aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/arm64/iort.c20
-rw-r--r--drivers/acpi/ec.c36
-rw-r--r--drivers/acpi/nfit/core.c7
-rw-r--r--drivers/acpi/property.c48
-rw-r--r--drivers/acpi/sleep.c26
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/android/binder_alloc.c42
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_imx.c25
-rw-r--r--drivers/ata/pata_arasan_cf.c1
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/base/Kconfig4
-rw-r--r--drivers/base/core.c308
-rw-r--r--drivers/base/firmware_loader/Kconfig14
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile3
-rw-r--r--drivers/base/firmware_loader/main.c9
-rw-r--r--drivers/base/memory.c40
-rw-r--r--drivers/base/platform.c393
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/qos-test.c117
-rw-r--r--drivers/base/power/qos.c73
-rw-r--r--drivers/base/property.c83
-rw-r--r--drivers/base/soc.c30
-rw-r--r--drivers/base/swnode.c258
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/pktcdvd.c25
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/bus/Kconfig9
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/ti-pwmss.c (renamed from drivers/pwm/pwm-tipwmss.c)0
-rw-r--r--drivers/cdrom/cdrom.c12
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/hw_random/Kconfig18
-rw-r--r--drivers/char/ipmi/Kconfig98
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1
-rw-r--r--drivers/char/lp.c4
-rw-r--r--drivers/char/ppdev.c28
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c12
-rw-r--r--drivers/char/virtio_console.c16
-rw-r--r--drivers/char/xillybus/xillybus_of.c5
-rw-r--r--drivers/clk/Kconfig7
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/at91/sckc.c3
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c4
-rw-r--r--drivers/clk/axs10x/pll_clock.c7
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c4
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c4
-rw-r--r--drivers/clk/clk-aspeed.c27
-rw-r--r--drivers/clk/clk-ast2600.c49
-rw-r--r--drivers/clk/clk-bd718x7.c1
-rw-r--r--drivers/clk/clk-bm1880.c969
-rw-r--r--drivers/clk/clk-composite.c13
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c2
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-gpio.c2
-rw-r--r--drivers/clk/clk-mux.c2
-rw-r--r--drivers/clk/clk.c27
-rw-r--r--drivers/clk/davinci/pll.c4
-rw-r--r--drivers/clk/davinci/psc.c4
-rw-r--r--drivers/clk/hisilicon/clk-hi3660.c60
-rw-r--r--drivers/clk/hisilicon/clk-hi3670.c152
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c3
-rw-r--r--drivers/clk/hisilicon/reset.c4
-rw-r--r--drivers/clk/imgtec/clk-boston.c3
-rw-r--r--drivers/clk/imx/clk-imx6sll.c8
-rw-r--r--drivers/clk/imx/clk-imx6sx.c12
-rw-r--r--drivers/clk/imx/clk-imx6ul.c8
-rw-r--r--drivers/clk/imx/clk-imx7d.c4
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c9
-rw-r--r--drivers/clk/imx/clk-imx8mm.c150
-rw-r--r--drivers/clk/imx/clk-imx8mn.c166
-rw-r--r--drivers/clk/imx/clk-imx8mq.c77
-rw-r--r--drivers/clk/imx/clk-pll14xx.c72
-rw-r--r--drivers/clk/imx/clk.h3
-rw-r--r--drivers/clk/ingenic/Kconfig12
-rw-r--r--drivers/clk/ingenic/Makefile1
-rw-r--r--drivers/clk/ingenic/tcu.c3
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c274
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c6
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c3
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c3
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c6
-rw-r--r--drivers/clk/mediatek/clk-mt7629.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c6
-rw-r--r--drivers/clk/meson/axg-audio.c2025
-rw-r--r--drivers/clk/meson/axg-audio.h21
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c4
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c6
-rw-r--r--drivers/clk/mvebu/armada-xp.c26
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c4
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c1
-rw-r--r--drivers/clk/qcom/Kconfig26
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-rcg.h2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c6
-rw-r--r--drivers/clk/qcom/clk-rpmh.c53
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c3
-rw-r--r--drivers/clk/qcom/common.c5
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c72
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c2450
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c96
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c338
-rw-r--r--drivers/clk/qcom/q6sstop-qcs404.c223
-rw-r--r--drivers/clk/renesas/Kconfig34
-rw-r--r--drivers/clk/renesas/Makefile5
-rw-r--r--drivers/clk/renesas/clk-mstp.c4
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c457
-rw-r--r--drivers/clk/renesas/r8a774b1-cpg-mssr.c327
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c24
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c25
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c64
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c14
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h1
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c3
-rw-r--r--drivers/clk/rockchip/clk-px30.c70
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c34
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c4
-rw-r--r--drivers/clk/samsung/clk.c3
-rw-r--r--drivers/clk/sprd/common.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c23
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.h4
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-dfll.c56
-rw-r--r--drivers/clk/tegra/clk-dfll.h2
-rw-r--r--drivers/clk/tegra/clk-divider.c11
-rw-r--r--drivers/clk/tegra/clk-emc.c12
-rw-r--r--drivers/clk/tegra/clk-id.h4
-rw-r--r--drivers/clk/tegra/clk-periph.c21
-rw-r--r--drivers/clk/tegra/clk-pll-out.c9
-rw-r--r--drivers/clk/tegra/clk-pll.c86
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c16
-rw-r--r--drivers/clk/tegra/clk-super.c41
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c15
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c8
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c7
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c1
-rw-r--r--drivers/clk/tegra/clk-tegra124.c55
-rw-r--r--drivers/clk/tegra/clk-tegra20-emc.c293
-rw-r--r--drivers/clk/tegra/clk-tegra20.c80
-rw-r--r--drivers/clk/tegra/clk-tegra210.c181
-rw-r--r--drivers/clk/tegra/clk-tegra30.c63
-rw-r--r--drivers/clk/tegra/clk.c112
-rw-r--r--drivers/clk/tegra/clk.h70
-rw-r--r--drivers/clk/ti/adpll.c11
-rw-r--r--drivers/clk/ti/clk-33xx.c4
-rw-r--r--drivers/clk/ti/clk-43xx.c4
-rw-r--r--drivers/clk/ti/clk-44xx.c4
-rw-r--r--drivers/clk/ti/clk-54xx.c11
-rw-r--r--drivers/clk/ti/clk-7xx.c8
-rw-r--r--drivers/clk/ti/clkctrl.c45
-rw-r--r--drivers/clk/ti/clock.h7
-rw-r--r--drivers/clk/ti/divider.c282
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c3
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/asm9260_timer.c4
-rw-r--r--drivers/clocksource/renesas-ostm.c189
-rw-r--r--drivers/clocksource/sh_cmt.c13
-rw-r--r--drivers/clocksource/sh_mtu2.c13
-rw-r--r--drivers/clocksource/sh_tmu.c14
-rw-r--r--drivers/clocksource/timer-of.c6
-rw-r--r--drivers/clocksource/timer-riscv.c31
-rw-r--r--drivers/counter/104-quad-8.c33
-rw-r--r--drivers/counter/Kconfig11
-rw-r--r--drivers/counter/Makefile1
-rw-r--r--drivers/counter/counter.c101
-rw-r--r--drivers/counter/ftm-quaddec.c14
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c7
-rw-r--r--drivers/counter/stm32-timer-cnt.c23
-rw-r--r--drivers/counter/ti-eqep.c466
-rw-r--r--drivers/cpufreq/Kconfig.powerpc8
-rw-r--r--drivers/cpufreq/Kconfig.x8616
-rw-r--r--drivers/cpuidle/Kconfig16
-rw-r--r--drivers/cpuidle/Kconfig.arm22
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/cpuidle/poll_state.c1
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/dax/bus.c22
-rw-r--r--drivers/dax/pmem/core.c6
-rw-r--r--drivers/devfreq/devfreq.c4
-rw-r--r--drivers/dma-buf/dma-buf.c124
-rw-r--r--drivers/dma-buf/dma-fence.c78
-rw-r--r--drivers/dma-buf/sw_sync.c2
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/dma/Kconfig88
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/at_xdmac.c7
-rw-r--r--drivers/dma/dma-jz4780.c16
-rw-r--r--drivers/dma/dw/platform.c2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/Kconfig9
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/Makefile3
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c825
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h153
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c376
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.h177
-rw-r--r--drivers/dma/fsl-qdma.c3
-rw-r--r--drivers/dma/iop-adma.c10
-rw-r--r--drivers/dma/k3dma.c7
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c10
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c9
-rw-r--r--drivers/dma/milbeaut-hdmac.c578
-rw-r--r--drivers/dma/milbeaut-xdmac.c415
-rw-r--r--drivers/dma/mmp_pdma.c2
-rw-r--r--drivers/dma/mmp_tdma.c3
-rw-r--r--drivers/dma/owl-dma.c7
-rw-r--r--drivers/dma/sf-pdma/Kconfig6
-rw-r--r--drivers/dma/sf-pdma/Makefile1
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c620
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h124
-rw-r--r--drivers/dma/sh/rcar-dmac.c47
-rw-r--r--drivers/dma/sprd-dma.c17
-rw-r--r--drivers/dma/ti/edma.c77
-rw-r--r--drivers/dma/uniphier-mdmac.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c649
-rw-r--r--drivers/dma/zx_dma.c8
-rw-r--r--drivers/extcon/extcon-axp288.c38
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c16
-rw-r--r--drivers/extcon/extcon-sm5502.c6
-rw-r--r--drivers/extcon/extcon-sm5502.h2
-rw-r--r--drivers/firewire/core-cdev.c15
-rw-r--r--drivers/firewire/core-iso.c7
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/dmi_scan.c41
-rw-r--r--drivers/firmware/efi/apple-properties.c18
-rw-r--r--drivers/firmware/qcom_scm-32.c5
-rw-r--r--drivers/firmware/qcom_scm-64.c153
-rw-r--r--drivers/firmware/qcom_scm.c6
-rw-r--r--drivers/firmware/qcom_scm.h5
-rw-r--r--drivers/firmware/stratix10-rsu.c42
-rw-r--r--drivers/firmware/stratix10-svc.c18
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/dfl-fme-main.c385
-rw-r--r--drivers/fpga/zynq-fpga.c4
-rw-r--r--drivers/fsi/Kconfig8
-rw-r--r--drivers/fsi/Makefile1
-rw-r--r--drivers/fsi/fsi-core.c67
-rw-r--r--drivers/fsi/fsi-master-aspeed.c544
-rw-r--r--drivers/fsi/fsi-master-hub.c46
-rw-r--r--drivers/fsi/fsi-master.h71
-rw-r--r--drivers/gpio/Kconfig29
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/TODO4
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c5
-rw-r--r--drivers/gpio/gpio-104-idi-48.c4
-rw-r--r--drivers/gpio/gpio-104-idio-16.c4
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c5
-rw-r--r--drivers/gpio/gpio-amd-fch.c2
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c (renamed from drivers/gpio/sgpio-aspeed.c)0
-rw-r--r--drivers/gpio/gpio-aspeed.c7
-rw-r--r--drivers/gpio/gpio-ath79.c10
-rw-r--r--drivers/gpio/gpio-bcm-kona.c6
-rw-r--r--drivers/gpio/gpio-bd70528.c9
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c4
-rw-r--r--drivers/gpio/gpio-dln2.c6
-rw-r--r--drivers/gpio/gpio-em.c39
-rw-r--r--drivers/gpio/gpio-exar.c5
-rw-r--r--drivers/gpio/gpio-f7188x.c5
-rw-r--r--drivers/gpio/gpio-gpio-mm.c5
-rw-r--r--drivers/gpio/gpio-htc-egpio.c42
-rw-r--r--drivers/gpio/gpio-ich.c5
-rw-r--r--drivers/gpio/gpio-kempld.c5
-rw-r--r--drivers/gpio/gpio-lp873x.c2
-rw-r--r--drivers/gpio/gpio-lp87565.c5
-rw-r--r--drivers/gpio/gpio-lynxpoint.c6
-rw-r--r--drivers/gpio/gpio-madera.c5
-rw-r--r--drivers/gpio/gpio-max3191x.c2
-rw-r--r--drivers/gpio/gpio-max77620.c231
-rw-r--r--drivers/gpio/gpio-menz127.c1
-rw-r--r--drivers/gpio/gpio-merrifield.c79
-rw-r--r--drivers/gpio/gpio-mmio.c22
-rw-r--r--drivers/gpio/gpio-mockup.c105
-rw-r--r--drivers/gpio/gpio-moxtet.c4
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c36
-rw-r--r--drivers/gpio/gpio-mvebu.c24
-rw-r--r--drivers/gpio/gpio-mxc.c13
-rw-r--r--drivers/gpio/gpio-mxs.c5
-rw-r--r--drivers/gpio/gpio-omap.c6
-rw-r--r--drivers/gpio/gpio-pca953x.c5
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c4
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c9
-rw-r--r--drivers/gpio/gpio-pisosr.c2
-rw-r--r--drivers/gpio/gpio-pl061.c5
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c5
-rw-r--r--drivers/gpio/gpio-rcar.c7
-rw-r--r--drivers/gpio/gpio-rda.c294
-rw-r--r--drivers/gpio/gpio-reg.c3
-rw-r--r--drivers/gpio/gpio-sa1100.c5
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c7
-rw-r--r--drivers/gpio/gpio-sch.c5
-rw-r--r--drivers/gpio/gpio-sch311x.c5
-rw-r--r--drivers/gpio/gpio-siox.c4
-rw-r--r--drivers/gpio/gpio-stmpe.c5
-rw-r--r--drivers/gpio/gpio-tc3589x.c5
-rw-r--r--drivers/gpio/gpio-tegra.c5
-rw-r--r--drivers/gpio/gpio-tegra186.c384
-rw-r--r--drivers/gpio/gpio-thunderx.c5
-rw-r--r--drivers/gpio/gpio-tpic2810.c2
-rw-r--r--drivers/gpio/gpio-tps65086.c2
-rw-r--r--drivers/gpio/gpio-tps65912.c4
-rw-r--r--drivers/gpio/gpio-tps68470.c6
-rw-r--r--drivers/gpio/gpio-tqmx86.c5
-rw-r--r--drivers/gpio/gpio-ts4900.c5
-rw-r--r--drivers/gpio/gpio-twl4030.c10
-rw-r--r--drivers/gpio/gpio-twl6040.c3
-rw-r--r--drivers/gpio/gpio-uniphier.c5
-rw-r--r--drivers/gpio/gpio-wcove.c7
-rw-r--r--drivers/gpio/gpio-ws16c48.c5
-rw-r--r--drivers/gpio/gpio-xgene.c32
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c320
-rw-r--r--drivers/gpio/gpio-xra1403.c5
-rw-r--r--drivers/gpio/gpio-xtensa.c4
-rw-r--r--drivers/gpio/gpio-zynq.c7
-rw-r--r--drivers/gpio/gpiolib-acpi.c17
-rw-r--r--drivers/gpio/gpiolib-of.c18
-rw-r--r--drivers/gpio/gpiolib.c282
-rw-r--r--drivers/gpio/gpiolib.h1
-rw-r--r--drivers/gpu/drm/Kconfig36
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c289
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c274
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c307
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c443
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c459
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c659
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c209
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c375
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c158
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c318
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/arct_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1389
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c474
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c380
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h139
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c272
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c108
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h3
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/display/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c381
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c346
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h66
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c59
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c153
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c186
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c281
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c345
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h187
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c640
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c349
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c116
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c470
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c380
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c324
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h19
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_types.h96
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c51
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/Makefile32
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c426
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h442
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c531
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c307
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c305
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c163
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h139
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c328
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h272
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h289
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c98
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c93
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h49
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h92
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h176
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h12
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h27
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h42
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h10
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h34
-rw-r--r--drivers/gpu/drm/amd/include/vega10_enum.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c1142
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c523
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c196
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c222
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c231
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c68
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c41
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h370
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h51
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h134
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h41
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c543
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c483
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h204
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c370
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c153
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c134
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c1
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c221
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c41
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c105
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c77
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h20
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c30
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_event.c140
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h17
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c76
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c16
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c9
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h10
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c6
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h43
-rw-r--r--drivers/gpu/drm/ast/ast_main.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c266
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c5
-rw-r--r--drivers/gpu/drm/bochs/Kconfig2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c26
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c3
-rw-r--r--drivers/gpu/drm/bridge/Kconfig3
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c110
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.h17
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c1
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c3
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c1
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c3
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c1
-rw-r--r--drivers/gpu/drm/bridge/panel.c70
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c1
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c37
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c11
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c21
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c155
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h39
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c10
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c66
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c1
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h247
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c18
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c2
-rw-r--r--drivers/gpu/drm/drm_blend.c7
-rw-r--r--drivers/gpu/drm/drm_cache.c14
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c142
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c23
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h3
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c8
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c8
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c29
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c177
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1807
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology_internal.h24
-rw-r--r--drivers/gpu/drm/drm_drv.c17
-rw-r--r--drivers/gpu/drm/drm_dsc.c23
-rw-r--r--drivers/gpu/drm/drm_edid.c222
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_encoder.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c62
-rw-r--r--drivers/gpu/drm/drm_gem.c40
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c31
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c84
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c735
-rw-r--r--drivers/gpu/drm/drm_memory.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c11
-rw-r--r--drivers/gpu/drm/drm_mm.c36
-rw-r--r--drivers/gpu/drm/drm_mode_config.c2
-rw-r--r--drivers/gpu/drm/drm_of.c5
-rw-r--r--drivers/gpu/drm/drm_panel.c14
-rw-r--r--drivers/gpu/drm/drm_prime.c9
-rw-r--r--drivers/gpu/drm/drm_print.c60
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c4
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c3
-rw-r--r--drivers/gpu/drm/drm_syncobj.c38
-rw-r--r--drivers/gpu/drm/drm_trace.h14
-rw-r--r--drivers/gpu/drm/drm_vblank.c60
-rw-r--r--drivers/gpu/drm/drm_vram_helper_common.c8
-rw-r--r--drivers/gpu/drm/drm_vram_mm_helper.c297
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c14
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c12
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c10
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig18
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug147
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile49
-rw-r--r--drivers/gpu/drm/i915/Kconfig.unstable29
-rw-r--r--drivers/gpu/drm/i915/Makefile25
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c1220
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c550
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c839
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2380
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h66
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c554
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c509
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c412
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c332
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c216
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c297
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c441
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c549
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c87
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h55
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h18
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c614
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h61
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c56
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c89
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c99
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c84
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c38
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c165
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c174
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c82
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c124
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c130
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c33
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c579
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c30
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c214
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c704
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c306
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c354
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c33
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h13
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h231
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c246
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c234
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h91
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c160
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c209
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c137
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hangcheck.c360
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c161
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1500
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c277
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c787
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c172
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c323
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h131
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c (renamed from drivers/gpu/drm/i915/gt/intel_ringbuffer.c)404
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_types.h51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1872
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h38
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c67
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c71
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c350
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c60
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c207
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c80
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.h14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c1895
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c16
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c138
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c270
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c185
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c41
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c38
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c76
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c48
-rw-r--r--drivers/gpu/drm/i915/i915_active.c389
-rw-r--r--drivers/gpu/drm/i915/i915_active.h330
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h34
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c522
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c289
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h622
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c406
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c413
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h77
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c150
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c839
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h16
-rw-r--r--drivers/gpu/drm/i915/i915_params.c12
-rw-r--r--drivers/gpu/drm/i915/i915_params.h5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c80
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c1860
-rw-r--r--drivers/gpu/drm/i915/i915_perf.h32
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h435
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c309
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h7
-rw-r--r--drivers/gpu/drm/i915/i915_query.c306
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h876
-rw-r--r--drivers/gpu/drm/i915/i915_request.c235
-rw-r--r--drivers/gpu/drm/i915/i915_request.h40
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h8
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c5
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h18
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h9
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c11
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c67
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c162
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h40
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c43
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h34
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c639
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h134
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c4
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c230
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h8
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c272
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h129
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3167
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h30
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.c132
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.h16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c94
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h20
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.c121
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c90
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c46
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c143
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c404
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c217
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c502
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c23
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c624
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c56
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c53
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c60
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h3
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c1
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c5
-rw-r--r--drivers/gpu/drm/lima/Kconfig1
-rw-r--r--drivers/gpu/drm/lima/Makefile4
-rw-r--r--drivers/gpu/drm/lima/lima_device.c5
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c22
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c195
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h32
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.c46
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.h13
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c1
-rw-r--r--drivers/gpu/drm/lima/lima_object.c119
-rw-r--r--drivers/gpu/drm/lima/lima_object.h35
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c6
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c87
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c4
-rw-r--r--drivers/gpu/drm/mediatek/Makefile2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c111
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c136
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c128
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c67
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h43
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c234
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c338
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.h49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c288
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c149
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c32
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c115
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c9
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c327
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h23
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c20
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c17
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h1
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h1
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c70
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h2
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c6
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c20
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c46
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.h4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c230
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c37
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c129
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c137
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c5
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c5
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c5
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c5
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c26
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c5
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c5
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c5
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c5
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c5
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c5
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c5
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c5
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c29
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c5
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c5
-rw-r--r--drivers/gpu/drm/panfrost/TODO2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c124
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_issues.h81
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c17
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c4
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c20
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c32
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c62
-rw-r--r--drivers/gpu/drm/radeon/cik.c106
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c218
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c101
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c29
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c12
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c19
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c169
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c48
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c12
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c66
-rw-r--r--drivers/gpu/drm/selftests/Makefile2
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c238
-rw-r--r--drivers/gpu/drm/selftests/test-drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c14
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c3
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c26
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c10
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c2
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c5
-rw-r--r--drivers/gpu/drm/stm/ltdc.c39
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c35
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h1
-rw-r--r--drivers/gpu/drm/tegra/Kconfig2
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/dc.c30
-rw-r--r--drivers/gpu/drm/tegra/dc.h2
-rw-r--r--drivers/gpu/drm/tegra/dp.c876
-rw-r--r--drivers/gpu/drm/tegra/dp.h177
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c208
-rw-r--r--drivers/gpu/drm/tegra/drm.c417
-rw-r--r--drivers/gpu/drm/tegra/drm.h13
-rw-r--r--drivers/gpu/drm/tegra/falcon.c64
-rw-r--r--drivers/gpu/drm/tegra/falcon.h16
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c81
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c12
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c12
-rw-r--r--drivers/gpu/drm/tegra/hub.c6
-rw-r--r--drivers/gpu/drm/tegra/output.c28
-rw-r--r--drivers/gpu/drm/tegra/plane.c104
-rw-r--r--drivers/gpu/drm/tegra/plane.h8
-rw-r--r--drivers/gpu/drm/tegra/sor.c1704
-rw-r--r--drivers/gpu/drm/tegra/sor.h3
-rw-r--r--drivers/gpu/drm/tegra/vic.c138
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c2
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c190
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c243
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c57
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c7
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c5
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c55
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig2
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c19
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h27
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c149
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c119
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c138
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c4
-rw-r--r--drivers/gpu/drm/virtio/Kconfig2
-rw-r--r--drivers/gpu/drm/virtio/Makefile2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c22
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h135
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c183
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c228
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c270
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c61
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c34
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c305
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c227
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c15
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h6
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h233
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c488
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c196
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c397
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c77
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h18
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c7
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/cdma.c6
-rw-r--r--drivers/gpu/host1x/channel.c13
-rw-r--r--drivers/gpu/host1x/channel.h1
-rw-r--r--drivers/gpu/host1x/dev.c236
-rw-r--r--drivers/gpu/host1x/dev.h3
-rw-r--r--drivers/gpu/host1x/intr.c1
-rw-r--r--drivers/gpu/host1x/job.c91
-rw-r--r--drivers/gpu/host1x/job.h4
-rw-r--r--drivers/greybus/connection.c3
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c55
-rw-r--r--drivers/hid/hid-google-hammer.c146
-rw-r--r--drivers/hid/hid-hyperv.c34
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-lg-g15.c899
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-quirks.c8
-rw-r--r--drivers/hid/hid-rmi.c3
-rw-r--r--drivers/hid/hidraw.c14
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c16
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c2
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hv/Makefile1
-rw-r--r--drivers/hv/connection.c87
-rw-r--r--drivers/hv/hv_balloon.c116
-rw-r--r--drivers/hv/hv_debugfs.c178
-rw-r--r--drivers/hv/hv_fcopy.c3
-rw-r--r--drivers/hv/hv_kvp.c3
-rw-r--r--drivers/hv/hv_snapshot.c3
-rw-r--r--drivers/hv/hv_util.c13
-rw-r--r--drivers/hv/hyperv_vmbus.h31
-rw-r--r--drivers/hv/ring_buffer.c2
-rw-r--r--drivers/hv/vmbus_drv.c27
-rw-r--r--drivers/hwmon/Kconfig41
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/ab8500.c65
-rw-r--r--drivers/hwmon/abituguru.c2
-rw-r--r--drivers/hwmon/applesmc.c38
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c7
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c115
-rw-r--r--drivers/hwmon/fschmd.c1
-rw-r--r--drivers/hwmon/ina3221.c163
-rw-r--r--drivers/hwmon/ltc2947-core.c1183
-rw-r--r--drivers/hwmon/ltc2947-i2c.c49
-rw-r--r--drivers/hwmon/ltc2947-spi.c50
-rw-r--r--drivers/hwmon/ltc2947.h12
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c131
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c74
-rw-r--r--drivers/hwmon/tmp421.c3
-rw-r--r--drivers/hwmon/tmp513.c772
-rw-r--r--drivers/hwmon/w83793.c3
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c16
-rw-r--r--drivers/hwspinlock/sprd_hwspinlock.c48
-rw-r--r--drivers/hwspinlock/u8500_hsem.c53
-rw-r--r--drivers/hwtracing/coresight/Kconfig1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c312
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c351
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h81
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c36
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c26
-rw-r--r--drivers/hwtracing/coresight/coresight.c51
-rw-r--r--drivers/hwtracing/intel_th/core.c8
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/core.c12
-rw-r--r--drivers/hwtracing/stm/policy.c4
-rw-r--r--drivers/i2c/busses/Kconfig24
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c4
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c38
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c53
-rw-r--r--drivers/i2c/busses/i2c-at91.h13
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c63
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c15
-rw-r--r--drivers/i2c/busses/i2c-i801.c8
-rw-r--r--drivers/i2c/busses/i2c-icy.c9
-rw-r--r--drivers/i2c/busses/i2c-pxa.c75
-rw-r--r--drivers/i2c/busses/i2c-qup.c4
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32.c16
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c32
-rw-r--r--drivers/i2c/busses/i2c-tegra.c4
-rw-r--r--drivers/i2c/busses/i2c-xiic.c2
-rw-r--r--drivers/i2c/i2c-core-base.c63
-rw-r--r--drivers/i2c/i2c-core-of.c1
-rw-r--r--drivers/i2c/i2c-smbus.c7
-rw-r--r--drivers/i2c/muxes/Kconfig18
-rw-r--r--drivers/ide/ide-tape.c27
-rw-r--r--drivers/idle/intel_idle.c6
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c6
-rw-r--r--drivers/iio/accel/st_accel_core.c1
-rw-r--r--drivers/iio/adc/Kconfig35
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c1218
-rw-r--r--drivers/iio/adc/ad7292.c350
-rw-r--r--drivers/iio/adc/ad7949.c33
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c3
-rw-r--r--drivers/iio/adc/aspeed_adc.c4
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c4
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c2
-rw-r--r--drivers/iio/adc/cc10001_adc.c4
-rw-r--r--drivers/iio/adc/cpcap-adc.c2
-rw-r--r--drivers/iio/adc/dln2-adc.c20
-rw-r--r--drivers/iio/adc/exynos_adc.c6
-rw-r--r--drivers/iio/adc/hx711.c22
-rw-r--r--drivers/iio/adc/ingenic-adc.c153
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c262
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c4
-rw-r--r--drivers/iio/adc/max1027.c180
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/adc/meson_saradc.c4
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c4
-rw-r--r--drivers/iio/adc/npcm_adc.c4
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c4
-rw-r--r--drivers/iio/adc/sc27xx_adc.c16
-rw-r--r--drivers/iio/adc/spear_adc.c4
-rw-r--r--drivers/iio/adc/stm32-adc-core.c27
-rw-r--r--drivers/iio/adc/stm32-adc.c21
-rw-r--r--drivers/iio/adc/stmpe-adc.c2
-rw-r--r--drivers/iio/adc/twl4030-madc.c18
-rw-r--r--drivers/iio/adc/vf610_adc.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c4
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c8
-rw-r--r--drivers/iio/chemical/sgp30.c2
-rw-r--r--drivers/iio/chemical/sps30.c2
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c6
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c4
-rw-r--r--drivers/iio/dac/Kconfig4
-rw-r--r--drivers/iio/dac/ad5446.c6
-rw-r--r--drivers/iio/dac/ad7303.c13
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c4
-rw-r--r--drivers/iio/dac/stm32-dac-core.c138
-rw-r--r--drivers/iio/dac/stm32-dac.c94
-rw-r--r--drivers/iio/dac/vf610_dac.c4
-rw-r--r--drivers/iio/gyro/adis16080.c8
-rw-r--r--drivers/iio/gyro/adis16130.c2
-rw-r--r--drivers/iio/gyro/adis16136.c24
-rw-r--r--drivers/iio/gyro/itg3200_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c1
-rw-r--r--drivers/iio/humidity/hdc100x.c19
-rw-r--r--drivers/iio/imu/Kconfig27
-rw-r--r--drivers/iio/imu/Makefile5
-rw-r--r--drivers/iio/imu/adis.c5
-rw-r--r--drivers/iio/imu/adis16400.c22
-rw-r--r--drivers/iio/imu/adis16460.c8
-rw-r--r--drivers/iio/imu/adis16480.c116
-rw-r--r--drivers/iio/imu/fxos8700.h10
-rw-r--r--drivers/iio/imu/fxos8700_core.c649
-rw-r--r--drivers/iio/imu/fxos8700_i2c.c71
-rw-r--r--drivers/iio/imu/fxos8700_spi.c59
-rw-r--r--drivers/iio/imu/inv_mpu6050/Makefile7
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c204
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h19
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c195
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c60
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h74
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c356
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h36
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c11
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c86
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h87
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c109
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c1056
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c45
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c10
-rw-r--r--drivers/iio/industrialio-core.c19
-rw-r--r--drivers/iio/light/Kconfig22
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adux1020.c849
-rw-r--r--drivers/iio/light/bh1750.c4
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c6
-rw-r--r--drivers/iio/light/tcs3414.c30
-rw-r--r--drivers/iio/light/veml6030.c908
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/pressure/bmp280-core.c130
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c6
-rw-r--r--drivers/iio/pressure/bmp280-spi.c6
-rw-r--r--drivers/iio/pressure/bmp280.h1
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c3
-rw-r--r--drivers/iio/pressure/st_pressure_core.c1
-rw-r--r--drivers/iio/pressure/zpa2326.c16
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c5
-rw-r--r--drivers/iio/proximity/sx9500.c16
-rw-r--r--drivers/iio/temperature/Kconfig11
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/ltc2983.c1557
-rw-r--r--drivers/iio/temperature/max31856.c2
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/cache.c8
-rw-r--r--drivers/infiniband/core/cm.c66
-rw-r--r--drivers/infiniband/core/cm_msgs.h32
-rw-r--r--drivers/infiniband/core/cma.c107
-rw-r--r--drivers/infiniband/core/core_priv.h11
-rw-r--r--drivers/infiniband/core/counters.c40
-rw-r--r--drivers/infiniband/core/device.c51
-rw-r--r--drivers/infiniband/core/ib_core_uverbs.c335
-rw-r--r--drivers/infiniband/core/iwpm_util.h5
-rw-r--r--drivers/infiniband/core/mad.c31
-rw-r--r--drivers/infiniband/core/nldev.c141
-rw-r--r--drivers/infiniband/core/rdma_core.c1
-rw-r--r--drivers/infiniband/core/restrack.c20
-rw-r--r--drivers/infiniband/core/restrack.h1
-rw-r--r--drivers/infiniband/core/rw.c25
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c12
-rw-r--r--drivers/infiniband/core/umem.c12
-rw-r--r--drivers/infiniband/core/umem_odp.c341
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c88
-rw-r--r--drivers/infiniband/core/verbs.c12
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig12
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c143
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/Kconfig19
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile7
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c1312
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h204
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c344
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.h69
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h802
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c282
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h155
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2258
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h233
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c230
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c232
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c101
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1321
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h347
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1082
-rw-r--r--drivers/infiniband/hw/cxgb3/tcb.h632
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c7
-rw-r--r--drivers/infiniband/hw/efa/efa.h13
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h29
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c40
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h19
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c17
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c370
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c17
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c2
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c146
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h5
-rw-r--r--drivers/infiniband/hw/hns/Kconfig17
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c300
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h55
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c38
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c76
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c21
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c69
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c86
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c30
-rw-r--r--drivers/infiniband/hw/mlx4/main.c18
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c37
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c25
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c29
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c24
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c124
-rw-r--r--drivers/infiniband/hw/mlx5/main.c137
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c199
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h80
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c180
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c1021
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c60
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c90
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c74
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c33
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h11
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c9
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h2
-rw-r--r--drivers/infiniband/hw/qedr/main.c5
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h72
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c150
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c643
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h12
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c38
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c119
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c30
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h1
-rw-r--r--drivers/infiniband/sw/siw/siw.h31
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c45
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c35
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c338
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h34
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c5
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c72
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h8
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c47
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c247
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h58
-rw-r--r--drivers/input/input-poller.c9
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/joystick/psxpad-spi.c64
-rw-r--r--drivers/input/keyboard/Kconfig28
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adc-keys.c36
-rw-r--r--drivers/input/keyboard/adp5589-keys.c171
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c70
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c6
-rw-r--r--drivers/input/keyboard/gpio_keys.c6
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c72
-rw-r--r--drivers/input/keyboard/imx_sc_key.c187
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c37
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c69
-rw-r--r--drivers/input/misc/Kconfig15
-rw-r--r--drivers/input/misc/apanel.c153
-rw-r--r--drivers/input/misc/bma150.c190
-rw-r--r--drivers/input/misc/cobalt_btns.c73
-rw-r--r--drivers/input/misc/gpio_decoder.c42
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c342
-rw-r--r--drivers/input/misc/kxtj9.c224
-rw-r--r--drivers/input/misc/mma8450.c101
-rw-r--r--drivers/input/misc/rb532_button.c48
-rw-r--r--drivers/input/misc/sgi_btns.c54
-rw-r--r--drivers/input/misc/wistron_btns.c51
-rw-r--r--drivers/input/mouse/Kconfig15
-rw-r--r--drivers/input/mouse/gpio_mouse.c45
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/rmi4/rmi_f54.c63
-rw-r--r--drivers/input/tablet/Kconfig20
-rw-r--r--drivers/input/touchscreen/Kconfig6
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c4
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c30
-rw-r--r--drivers/input/touchscreen/ili210x.c418
-rw-r--r--drivers/input/touchscreen/mms114.c3
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c177
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c38
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c1
-rw-r--r--drivers/input/touchscreen/st1232.c184
-rw-r--r--drivers/input/touchscreen/sur40.c92
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c36
-rw-r--r--drivers/input/touchscreen/ts4800-ts.c68
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c1
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/msm8974.c784
-rw-r--r--drivers/iommu/Kconfig8
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c893
-rw-r--r--drivers/iommu/amd_iommu_types.h3
-rw-r--r--drivers/iommu/arm-smmu-impl.c5
-rw-r--r--drivers/iommu/arm-smmu-qcom.c51
-rw-r--r--drivers/iommu/arm-smmu-v3.c12
-rw-r--r--drivers/iommu/arm-smmu.c223
-rw-r--r--drivers/iommu/arm-smmu.h16
-rw-r--r--drivers/iommu/dma-iommu.c56
-rw-r--r--drivers/iommu/dmar.c5
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c61
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c15
-rw-r--r--drivers/iommu/io-pgtable-arm.c130
-rw-r--r--drivers/iommu/ioasid.c422
-rw-r--r--drivers/iommu/iommu.c73
-rw-r--r--drivers/iommu/ipmmu-vmsa.c223
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c90
-rw-r--r--drivers/iommu/mtk_iommu.h2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c2
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/qcom_iommu.c10
-rw-r--r--drivers/iommu/rockchip-iommu.c11
-rw-r--r--drivers/iommu/s390-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/tegra-smmu.c38
-rw-r--r--drivers/iommu/virtio-iommu.c5
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c119
-rw-r--r--drivers/irqchip/irq-gic-v2m.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c302
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-ingenic.c85
-rw-r--r--drivers/irqchip/irq-ls-extirq.c197
-rw-r--r--drivers/irqchip/irq-sifive-plic.c11
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c5
-rw-r--r--drivers/irqchip/irq-zevio.c2
-rw-r--r--drivers/irqchip/qcom-pdc.c149
-rw-r--r--drivers/isdn/capi/capi.c31
-rw-r--r--drivers/leds/Kconfig17
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class-flash.c50
-rw-r--r--drivers/leds/led-class.c10
-rw-r--r--drivers/leds/led-triggers.c90
-rw-r--r--drivers/leds/leds-an30259a.c7
-rw-r--r--drivers/leds/leds-bcm6328.c7
-rw-r--r--drivers/leds/leds-bcm6358.c7
-rw-r--r--drivers/leds/leds-el15203000.c357
-rw-r--r--drivers/leds/leds-lm3601x.c4
-rw-r--r--drivers/leds/leds-lm3692x.c47
-rw-r--r--drivers/leds/leds-mlxreg.c4
-rw-r--r--drivers/leds/leds-pca9532.c14
-rw-r--r--drivers/leds/leds-tlc591xx.c90
-rw-r--r--drivers/leds/leds.h6
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c5
-rw-r--r--drivers/macintosh/ans-lcd.c3
-rw-r--r--drivers/mailbox/hi6220-mailbox.c1
-rw-r--r--drivers/mailbox/imx-mailbox.c74
-rw-r--r--drivers/mailbox/omap-mailbox.c2
-rw-r--r--drivers/mailbox/stm32-ipcc.c36
-rw-r--r--drivers/mailbox/tegra-hsp.c4
-rw-r--r--drivers/mcb/mcb-core.c28
-rw-r--r--drivers/mcb/mcb-lpc.c1
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/mcb/mcb-pci.c1
-rw-r--r--drivers/media/cec/cec-adap.c12
-rw-r--r--drivers/media/cec/cec-api.c20
-rw-r--r--drivers/media/cec/cec-core.c5
-rw-r--r--drivers/media/cec/cec-notifier.c5
-rw-r--r--drivers/media/cec/cec-pin.c10
-rw-r--r--drivers/media/common/siano/smscoreapi.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.h4
-rw-r--r--drivers/media/common/siano/smsir.h2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c12
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c12
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c54
-rw-r--r--drivers/media/dvb-frontends/mt312.c13
-rw-r--r--drivers/media/dvb-frontends/si2168.h47
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h10
-rw-r--r--drivers/media/dvb-frontends/tc90522.c27
-rw-r--r--drivers/media/dvb-frontends/tc90522.h3
-rw-r--r--drivers/media/i2c/Kconfig80
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/ad5820.c35
-rw-r--r--drivers/media/i2c/adv7180.c6
-rw-r--r--drivers/media/i2c/adv7842.c4
-rw-r--r--drivers/media/i2c/bt819.c2
-rw-r--r--drivers/media/i2c/hi556.c1200
-rw-r--r--drivers/media/i2c/imx214.c9
-rw-r--r--drivers/media/i2c/imx290.c884
-rw-r--r--drivers/media/i2c/lm3646.c2
-rw-r--r--drivers/media/i2c/max2175.c4
-rw-r--r--drivers/media/i2c/max2175.h4
-rw-r--r--drivers/media/i2c/mt9m001.c2
-rw-r--r--drivers/media/i2c/ov2659.c139
-rw-r--r--drivers/media/i2c/ov5640.c33
-rw-r--r--drivers/media/i2c/ov5695.c2
-rw-r--r--drivers/media/i2c/ov6650.c266
-rw-r--r--drivers/media/i2c/saa711x_regs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c326
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg.h36
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h3
-rw-r--r--drivers/media/i2c/st-mipid02.c5
-rw-r--r--drivers/media/i2c/tda1997x_regs.h2
-rw-r--r--drivers/media/i2c/tvp5150_reg.h2
-rw-r--r--drivers/media/i2c/vpx3220.c2
-rw-r--r--drivers/media/mc/mc-device.c65
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c5
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c43
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c1
-rw-r--r--drivers/media/pci/cx88/cx88-video.c11
-rw-r--r--drivers/media/pci/cx88/cx88.h1
-rw-r--r--drivers/media/pci/dm1105/dm1105.c1
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c4
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c166
-rw-r--r--drivers/media/pci/smipcie/smipcie.h1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c2
-rw-r--r--drivers/media/platform/Kconfig17
-rw-r--r--drivers/media/platform/Makefile4
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c861
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h43
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe_regs.h10
-rw-r--r--drivers/media/platform/aspeed-video.c58
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c2
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c41
-rw-r--r--drivers/media/platform/coda/coda-common.c13
-rw-r--r--drivers/media/platform/coda/coda.h1
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c6
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c7
-rw-r--r--drivers/media/platform/meson/ao-cec-g12a.c38
-rw-r--r--drivers/media/platform/meson/ao-cec.c32
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c20
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c9
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h9
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c4
-rw-r--r--drivers/media/platform/qcom/venus/core.c56
-rw-r--r--drivers/media/platform/qcom/venus/core.h30
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c247
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h3
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c6
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c11
-rw-r--r--drivers/media/platform/qcom/venus/venc.c7
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c17
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c63
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c156
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h6
-rw-r--r--drivers/media/platform/rcar_drif.c1
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c2
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c4
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.c5
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c3
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c26
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c4
-rw-r--r--drivers/media/platform/sunxi/Makefile1
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c1028
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.h237
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c4
-rw-r--r--drivers/media/platform/ti-vpe/csc.c254
-rw-r--r--drivers/media/platform/ti-vpe/csc.h4
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c13
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h2
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h5
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c396
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c4
-rw-r--r--drivers/media/platform/vim2m.c8
-rw-r--r--drivers/media/platform/vimc/Makefile7
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c107
-rw-r--r--drivers/media/platform/vimc/vimc-common.c171
-rw-r--r--drivers/media/platform/vimc/vimc-common.h120
-rw-r--r--drivers/media/platform/vimc/vimc-core.c215
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c182
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c102
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c109
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c19
-rw-r--r--drivers/media/platform/vivid/Makefile2
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c7
-rw-r--r--drivers/media/platform/vivid/vivid-core.c368
-rw-r--r--drivers/media/platform/vivid/vivid-core.h25
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c89
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c62
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c57
-rw-r--r--drivers/media/platform/vivid/vivid-meta-cap.c201
-rw-r--r--drivers/media/platform/vivid/vivid-meta-cap.h29
-rw-r--r--drivers/media/platform/vivid/vivid-meta-out.c174
-rw-r--r--drivers/media/platform/vivid/vivid-meta-out.h25
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h2
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/rc/imon.c64
-rw-r--r--drivers/media/rc/imon_raw.c22
-rw-r--r--drivers/media/rc/ir-rcmm-decoder.c6
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/keymaps/Makefile2
-rw-r--r--drivers/media/rc/keymaps/rc-beelink-gs1.c84
-rw-r--r--drivers/media/rc/keymaps/rc-vega-s9x.c54
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/rc/mceusb.c141
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-main.c1
-rw-r--r--drivers/media/rc/tango-ir.c14
-rw-r--r--drivers/media/tuners/qm1d1c0042.c2
-rw-r--r--drivers/media/tuners/si2157.c6
-rw-r--r--drivers/media/tuners/si2157.h33
-rw-r--r--drivers/media/tuners/si2157_priv.h5
-rw-r--r--drivers/media/tuners/tuner-xc2028-types.h2
-rw-r--r--drivers/media/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c13
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c508
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c172
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c795
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h30
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c37
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c28
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c391
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.h14
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c6
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c5
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c3
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c20
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c30
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/gspca/sq905.c3
-rw-r--r--drivers/media/usb/gspca/sq905c.c3
-rw-r--r--drivers/media/usb/gspca/stv0680.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c9
-rw-r--r--drivers/media/usb/tm6000/tm6000-regs.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000-usb-isoc.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000.h2
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c29
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c10
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c28
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c4
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c2
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c128
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c199
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c112
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c77
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c190
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c6
-rw-r--r--drivers/memory/mtk-smi.c4
-rw-r--r--drivers/memstick/core/Kconfig18
-rw-r--r--drivers/memstick/host/Kconfig4
-rw-r--r--drivers/memstick/host/jmb38x_ms.c14
-rw-r--r--drivers/mfd/Kconfig7
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/ab8500-core.c138
-rw-r--r--drivers/mfd/ab8500-debugfs.c715
-rw-r--r--drivers/mfd/ab8500-gpadc.c1075
-rw-r--r--drivers/mfd/arizona-core.c6
-rw-r--r--drivers/mfd/cros_ec_dev.c235
-rw-r--r--drivers/mfd/cs5535-mfd.c108
-rw-r--r--drivers/mfd/db8500-prcmu.c84
-rw-r--r--drivers/mfd/intel-lpss-pci.c41
-rw-r--r--drivers/mfd/intel-lpss.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c3
-rw-r--r--drivers/mfd/ipaq-micro.c6
-rw-r--r--drivers/mfd/madera-core.c27
-rw-r--r--drivers/mfd/max77620.c5
-rw-r--r--drivers/mfd/mfd-core.c118
-rw-r--r--drivers/mfd/mt6397-core.c12
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c4
-rw-r--r--drivers/mfd/rk808.c22
-rw-r--r--drivers/mfd/rohm-bd70528.c17
-rw-r--r--drivers/mfd/syscon.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/wm8998-tables.c12
-rw-r--r--drivers/misc/Kconfig17
-rw-r--r--drivers/misc/atmel_tclib.c4
-rw-r--r--drivers/misc/cardreader/Makefile2
-rw-r--r--drivers/misc/cardreader/rts5260.c3
-rw-r--r--drivers/misc/cardreader/rts5261.c792
-rw-r--r--drivers/misc/cardreader/rts5261.h233
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c43
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h1
-rw-r--r--drivers/misc/cxl/flash.c8
-rw-r--r--drivers/misc/eeprom/at24.c9
-rw-r--r--drivers/misc/eeprom/eeprom.c4
-rw-r--r--drivers/misc/fastrpc.c209
-rw-r--r--drivers/misc/genwqe/card_dev.c23
-rw-r--r--drivers/misc/habanalabs/command_submission.c127
-rw-r--r--drivers/misc/habanalabs/debugfs.c112
-rw-r--r--drivers/misc/habanalabs/device.c18
-rw-r--r--drivers/misc/habanalabs/firmware_if.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c78
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c53
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c31
-rw-r--r--drivers/misc/habanalabs/habanalabs.h171
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c73
-rw-r--r--drivers/misc/habanalabs/hw_queue.c249
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h2
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h7
-rw-r--r--drivers/misc/habanalabs/include/qman_if.h12
-rw-r--r--drivers/misc/habanalabs/memory.c392
-rw-r--r--drivers/misc/habanalabs/mmu.c204
-rw-r--r--drivers/misc/hpilo.h2
-rw-r--r--drivers/misc/ibmvmc.h4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c80
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h4
-rw-r--r--drivers/misc/lkdtm/bugs.c39
-rw-r--r--drivers/misc/lkdtm/core.c3
-rw-r--r--drivers/misc/lkdtm/lkdtm.h3
-rw-r--r--drivers/misc/mei/bus-fixup.c9
-rw-r--r--drivers/misc/mei/bus.c42
-rw-r--r--drivers/misc/mei/client.h36
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c45
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h17
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/hw-me.c74
-rw-r--r--drivers/misc/mei/hw-me.h12
-rw-r--r--drivers/misc/mei/hw-txe.c10
-rw-r--r--drivers/misc/mei/init.c6
-rw-r--r--drivers/misc/mei/main.c46
-rw-r--r--drivers/misc/mei/mei_dev.h18
-rw-r--r--drivers/misc/mei/pci-me.c16
-rw-r--r--drivers/misc/mic/Kconfig16
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h2
-rw-r--r--drivers/misc/ocxl/trace.h2
-rw-r--r--drivers/misc/pci_endpoint_test.c8
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c11
-rw-r--r--drivers/misc/sram.c28
-rw-r--r--drivers/misc/ti-st/st_core.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c2
-rw-r--r--drivers/mmc/core/block.c151
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c9
-rw-r--r--drivers/mmc/core/quirks.h7
-rw-r--r--drivers/mmc/core/sdio.c28
-rw-r--r--drivers/mmc/core/sdio_bus.c9
-rw-r--r--drivers/mmc/host/Kconfig21
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c13
-rw-r--r--drivers/mmc/host/bcm2835.c4
-rw-r--r--drivers/mmc/host/cavium-octeon.c15
-rw-r--r--drivers/mmc/host/dw_mmc.c14
-rw-r--r--drivers/mmc/host/jz4740_mmc.c41
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/mmci.c198
-rw-r--r--drivers/mmc/host/mmci.h5
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c46
-rw-r--r--drivers/mmc/host/moxart-mmc.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c31
-rw-r--r--drivers/mmc/host/owl-mmc.c696
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h14
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c362
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c493
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c12
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c19
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c257
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c53
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci_am654.c71
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c26
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.h32
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c2
-rw-r--r--drivers/mmc/host/vub300.c7
-rw-r--r--drivers/mtd/nand/onenand/Makefile2
-rw-r--r--drivers/mtd/nand/onenand/samsung_mtd.c (renamed from drivers/mtd/nand/onenand/samsung.c)0
-rw-r--r--drivers/mtd/ubi/cdev.c36
-rw-r--r--drivers/mtd/ubi/debug.c1
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c31
-rw-r--r--drivers/mtd/ubi/fastmap.c14
-rw-r--r--drivers/mtd/ubi/ubi.h8
-rw-r--r--drivers/mtd/ubi/wl.c32
-rw-r--r--drivers/mtd/ubi/wl.h1
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c192
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c28
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c55
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c12
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c57
-rw-r--r--drivers/net/phy/aquantia.h4
-rw-r--r--drivers/net/phy/bcm-phy-lib.h2
-rw-r--r--drivers/net/phy/dp83869.c49
-rw-r--r--drivers/net/phy/mdio-cavium.h2
-rw-r--r--drivers/net/phy/mdio-i2c.h2
-rw-r--r--drivers/net/phy/mdio-xgene.h2
-rw-r--r--drivers/net/phy/realtek.c9
-rw-r--r--drivers/net/ppp/ppp_generic.c245
-rw-r--r--drivers/net/tap.c12
-rw-r--r--drivers/net/usb/aqc111.h4
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/usbnet.c9
-rw-r--r--drivers/net/wan/z85230.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c33
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c25
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c8
-rw-r--r--drivers/ntb/test/ntb_pingpong.c5
-rw-r--r--drivers/nvdimm/btt.c18
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c48
-rw-r--r--drivers/nvdimm/claim.c14
-rw-r--r--drivers/nvdimm/core.c8
-rw-r--r--drivers/nvdimm/dax_devs.c27
-rw-r--r--drivers/nvdimm/dimm_devs.c30
-rw-r--r--drivers/nvdimm/e820.c13
-rw-r--r--drivers/nvdimm/namespace_devs.c114
-rw-r--r--drivers/nvdimm/nd-core.h21
-rw-r--r--drivers/nvdimm/nd.h27
-rw-r--r--drivers/nvdimm/of_pmem.c13
-rw-r--r--drivers/nvdimm/pfn_devs.c64
-rw-r--r--drivers/nvdimm/pmem.c18
-rw-r--r--drivers/nvdimm/region_devs.c235
-rw-r--r--drivers/nvme/host/core.c12
-rw-r--r--drivers/nvmem/Kconfig23
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c120
-rw-r--r--drivers/nvmem/imx-ocotp.c4
-rw-r--r--drivers/nvmem/rockchip-otp.c268
-rw-r--r--drivers/nvmem/sc27xx-efuse.c13
-rw-r--r--drivers/nvmem/sprd-efuse.c424
-rw-r--r--drivers/of/address.c103
-rw-r--r--drivers/of/base.c32
-rw-r--r--drivers/of/device.c9
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/of_private.h14
-rw-r--r--drivers/of/overlay.c37
-rw-r--r--drivers/of/platform.c12
-rw-r--r--drivers/of/property.c340
-rw-r--r--drivers/of/unittest-data/testcases.dts1
-rw-r--r--drivers/of/unittest-data/tests-address.dtsi48
-rw-r--r--drivers/of/unittest.c96
-rw-r--r--drivers/parport/daisy.c40
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c26
-rw-r--r--drivers/pci/Kconfig26
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c2
-rw-r--r--drivers/pci/ats.c207
-rw-r--r--drivers/pci/controller/Kconfig31
-rw-r--r--drivers/pci/controller/Makefile4
-rw-r--r--drivers/pci/controller/cadence/Kconfig45
-rw-r--r--drivers/pci/controller/cadence/Makefile5
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c (renamed from drivers/pci/controller/pcie-cadence-ep.c)96
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c (renamed from drivers/pci/controller/pcie-cadence-host.c)97
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c174
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c (renamed from drivers/pci/controller/pcie-cadence.c)0
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h (renamed from drivers/pci/controller/pcie-cadence.h)79
-rw-r--r--drivers/pci/controller/dwc/Kconfig6
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c1
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c136
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c41
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c10
-rw-r--r--drivers/pci/controller/pci-aardvark.c133
-rw-r--r--drivers/pci/controller/pci-ftpci100.c79
-rw-r--r--drivers/pci/controller/pci-host-common.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c218
-rw-r--r--drivers/pci/controller/pci-mvebu.c4
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c1
-rw-r--r--drivers/pci/controller/pci-v3-semi.c74
-rw-r--r--drivers/pci/controller/pci-versatile.c71
-rw-r--r--drivers/pci/controller/pci-xgene.c73
-rw-r--r--drivers/pci/controller/pcie-altera.c41
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c5
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c9
-rw-r--r--drivers/pci/controller/pcie-iproc.c106
-rw-r--r--drivers/pci/controller/pcie-mediatek.c43
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c146
-rw-r--r--drivers/pci/controller/pcie-rcar.c92
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c158
-rw-r--r--drivers/pci/controller/pcie-rockchip.h7
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c21
-rw-r--r--drivers/pci/controller/pcie-xilinx.c18
-rw-r--r--drivers/pci/controller/vmd.c34
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c10
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c2
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c12
-rw-r--r--drivers/pci/hotplug/pciehp.h8
-rw-r--r--drivers/pci/hotplug/pciehp_core.c36
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c10
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c67
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c131
-rw-r--r--drivers/pci/iov.c9
-rw-r--r--drivers/pci/msi.c25
-rw-r--r--drivers/pci/of.c67
-rw-r--r--drivers/pci/pci-bridge-emul.c25
-rw-r--r--drivers/pci/pci-bridge-emul.h78
-rw-r--r--drivers/pci/pci-driver.c198
-rw-r--r--drivers/pci/pci-sysfs.c28
-rw-r--r--drivers/pci/pci.c390
-rw-r--r--drivers/pci/pci.h48
-rw-r--r--drivers/pci/pcie/Kconfig10
-rw-r--r--drivers/pci/pcie/aer.c88
-rw-r--r--drivers/pci/pcie/aspm.c245
-rw-r--r--drivers/pci/pcie/dpc.c2
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/pcie/portdrv_core.c7
-rw-r--r--drivers/pci/pcie/portdrv_pci.c8
-rw-r--r--drivers/pci/pcie/ptm.c2
-rw-r--r--drivers/pci/probe.c60
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/quirks.c157
-rw-r--r--drivers/pci/setup-bus.c70
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/phy/allwinner/Kconfig11
-rw-r--r--drivers/phy/allwinner/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun50i-usb3.c190
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c70
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c10
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c4
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c4
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c3
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c9
-rw-r--r--drivers/phy/phy-xgene.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c120
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h96
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c7
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c5
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c7
-rw-r--r--drivers/phy/rockchip/Kconfig8
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c805
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c1
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c23
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c137
-rw-r--r--drivers/phy/tegra/xusb.c93
-rw-r--r--drivers/phy/tegra/xusb.h4
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c3
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c2
-rw-r--r--drivers/pinctrl/Kconfig36
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c7
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c5
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c119
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c6
-rw-r--r--drivers/pinctrl/devicetree.c50
-rw-r--r--drivers/pinctrl/devicetree.h7
-rw-r--r--drivers/pinctrl/freescale/Kconfig12
-rw-r--r--drivers/pinctrl/intel/Kconfig7
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c119
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c171
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c454
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c4
-rw-r--r--drivers/pinctrl/meson/Kconfig6
-rw-r--r--drivers/pinctrl/meson/Makefile1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c942
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c38
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c1
-rw-r--r--drivers/pinctrl/mvebu/Kconfig10
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c40
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c12
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c81
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c32
-rw-r--r--drivers/pinctrl/pinctrl-amd.c3
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c65
-rw-r--r--drivers/pinctrl/pinctrl-at91.c55
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c4
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c54
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c4
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c4
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c945
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h144
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c50
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c23
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c29
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c29
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c30
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c382
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c8
-rw-r--r--drivers/pinctrl/pinctrl-rza2.c8
-rw-r--r--drivers/pinctrl/pinctrl-rzn1.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c53
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c21
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c4
-rw-r--r--drivers/pinctrl/pinctrl-u300.c4
-rw-r--r--drivers/pinctrl/pinctrl-xway.c4
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa25x.c13
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa27x.c13
-rw-r--r--drivers/pinctrl/qcom/Kconfig101
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c115
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c1127
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c23
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c121
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c10
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig12
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c32
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c35
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c863
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c57
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c4
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h8
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c41
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c43
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c51
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c4
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c23
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c10
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c3
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c4
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c4
-rw-r--r--drivers/platform/chrome/Kconfig19
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/chrome/cros_ec.c84
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c25
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c17
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c267
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c19
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c199
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c5
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/Kconfig2
-rw-r--r--drivers/platform/chrome/wilco_ec/Makefile3
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c28
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c47
-rw-r--r--drivers/platform/chrome/wilco_ec/keyboard_leds.c191
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c91
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c2
-rw-r--r--drivers/platform/goldfish/Kconfig3
-rw-r--r--drivers/platform/mellanox/Kconfig16
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c321
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.h103
-rw-r--r--drivers/platform/x86/Kconfig37
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/acerhdf.c7
-rw-r--r--drivers/platform/x86/asus-laptop.c71
-rw-r--r--drivers/platform/x86/dell-laptop.c26
-rw-r--r--drivers/platform/x86/dell_rbu.c2
-rw-r--r--drivers/platform/x86/hdaps.c40
-rw-r--r--drivers/platform/x86/hp-wmi.c10
-rw-r--r--drivers/platform/x86/huawei-wmi.c876
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_common.c147
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_common.h41
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_microb.c57
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_typec.c (renamed from drivers/platform/x86/intel_cht_int33fe.c)78
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c28
-rw-r--r--drivers/platform/x86/intel_pmc_core.c17
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/peaq-wmi.c66
-rw-r--r--drivers/platform/x86/system76_acpi.c384
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c52
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/power/avs/Kconfig12
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c6
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c8
-rw-r--r--drivers/power/reset/mt6323-poweroff.c97
-rw-r--r--drivers/power/supply/Kconfig2
-rw-r--r--drivers/power/supply/ab8500_btemp.c50
-rw-r--r--drivers/power/supply/ab8500_charger.c83
-rw-r--r--drivers/power/supply/ab8500_fg.c49
-rw-r--r--drivers/power/supply/abx500_chargalg.c8
-rw-r--r--drivers/power/supply/axp20x_usb_power.c8
-rw-r--r--drivers/power/supply/bd70528-charger.c1
-rw-r--r--drivers/power/supply/cpcap-battery.c271
-rw-r--r--drivers/power/supply/cpcap-charger.c222
-rw-r--r--drivers/power/supply/test_power.c61
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/rapidio/devices/tsi721.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c52
-rw-r--r--drivers/remoteproc/remoteproc_core.c9
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c3
-rw-r--r--drivers/remoteproc/stm32_rproc.c100
-rw-r--r--drivers/rpmsg/Kconfig2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c53
-rw-r--r--drivers/rpmsg/qcom_glink_smem.c2
-rw-r--r--drivers/rpmsg/rpmsg_char.c16
-rw-r--r--drivers/rtc/Kconfig31
-rw-r--r--drivers/rtc/dev.c33
-rw-r--r--drivers/rtc/interface.c58
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c11
-rw-r--r--drivers/rtc/rtc-armada38x.c10
-rw-r--r--drivers/rtc/rtc-asm9260.c4
-rw-r--r--drivers/rtc/rtc-aspeed.c4
-rw-r--r--drivers/rtc/rtc-at91rm9200.c19
-rw-r--r--drivers/rtc/rtc-at91sam9.c4
-rw-r--r--drivers/rtc/rtc-bd70528.c1
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c5
-rw-r--r--drivers/rtc/rtc-cadence.c4
-rw-r--r--drivers/rtc/rtc-coh901331.c4
-rw-r--r--drivers/rtc/rtc-cros-ec.c22
-rw-r--r--drivers/rtc/rtc-da9063.c3
-rw-r--r--drivers/rtc/rtc-davinci.c4
-rw-r--r--drivers/rtc/rtc-digicolor.c4
-rw-r--r--drivers/rtc/rtc-ds1216.c4
-rw-r--r--drivers/rtc/rtc-ds1286.c4
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-ds1343.c297
-rw-r--r--drivers/rtc/rtc-ds1347.c102
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-ds1511.c4
-rw-r--r--drivers/rtc/rtc-ds1553.c4
-rw-r--r--drivers/rtc/rtc-ds1685.c116
-rw-r--r--drivers/rtc/rtc-em3027.c4
-rw-r--r--drivers/rtc/rtc-ep93xx.c4
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c24
-rw-r--r--drivers/rtc/rtc-goldfish.c8
-rw-r--r--drivers/rtc/rtc-jz4740.c4
-rw-r--r--drivers/rtc/rtc-lpc24xx.c4
-rw-r--r--drivers/rtc/rtc-lpc32xx.c15
-rw-r--r--drivers/rtc/rtc-m41t80.c7
-rw-r--r--drivers/rtc/rtc-m48t86.c11
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c15
-rw-r--r--drivers/rtc/rtc-meson.c6
-rw-r--r--drivers/rtc/rtc-msm6242.c23
-rw-r--r--drivers/rtc/rtc-mt6397.c107
-rw-r--r--drivers/rtc/rtc-mt7622.c4
-rw-r--r--drivers/rtc/rtc-mv.c4
-rw-r--r--drivers/rtc/rtc-omap.c4
-rw-r--r--drivers/rtc/rtc-pcf2127.c10
-rw-r--r--drivers/rtc/rtc-pcf8523.c18
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pic32.c4
-rw-r--r--drivers/rtc/rtc-pm8xxx.c2
-rw-r--r--drivers/rtc/rtc-r7301.c7
-rw-r--r--drivers/rtc/rtc-rtd119x.c4
-rw-r--r--drivers/rtc/rtc-rv3028.c146
-rw-r--r--drivers/rtc/rtc-rx6110.c16
-rw-r--r--drivers/rtc/rtc-s35390a.c16
-rw-r--r--drivers/rtc/rtc-s3c.c4
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/rtc/rtc-sc27xx.c7
-rw-r--r--drivers/rtc/rtc-sirfsoc.c8
-rw-r--r--drivers/rtc/rtc-spear.c4
-rw-r--r--drivers/rtc/rtc-st-lpc.c5
-rw-r--r--drivers/rtc/rtc-stk17ta8.c4
-rw-r--r--drivers/rtc/rtc-stm32.c4
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-sunxi.c4
-rw-r--r--drivers/rtc/rtc-tegra.c8
-rw-r--r--drivers/rtc/rtc-tps65910.c21
-rw-r--r--drivers/rtc/rtc-tx4939.c4
-rw-r--r--drivers/rtc/rtc-v3020.c3
-rw-r--r--drivers/rtc/rtc-vr41xx.c8
-rw-r--r--drivers/rtc/rtc-vt8500.c32
-rw-r--r--drivers/rtc/rtc-wilco-ec.c8
-rw-r--r--drivers/rtc/rtc-xgene.c6
-rw-r--r--drivers/rtc/rtc-zynqmp.c7
-rw-r--r--drivers/rtc/sysfs.c5
-rw-r--r--drivers/s390/char/tape_char.c41
-rw-r--r--drivers/s390/crypto/zcrypt_error.h2
-rw-r--r--drivers/s390/scsi/Makefile2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c8
-rw-r--r--drivers/s390/scsi/zfcp_def.h4
-rw-r--r--drivers/s390/scsi/zfcp_diag.c305
-rw-r--r--drivers/s390/scsi/zfcp_diag.h101
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c73
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c170
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/sbus/char/envctrl.c4
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/NCR5380.c37
-rw-r--r--drivers/scsi/aacraid/aachba.c11
-rw-r--r--drivers/scsi/aacraid/aacraid.h23
-rw-r--r--drivers/scsi/aacraid/comminit.c5
-rw-r--r--drivers/scsi/aacraid/commsup.c21
-rw-r--r--drivers/scsi/aacraid/linit.c35
-rw-r--r--drivers/scsi/aacraid/src.c10
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/arm/acornscsi.c4
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfad.c3
-rw-r--r--drivers/scsi/bfa/bfad_attr.c4
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c20
-rw-r--r--drivers/scsi/csiostor/csio_init.c7
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c18
-rw-r--r--drivers/scsi/csiostor/csio_mb.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c28
-rw-r--r--drivers/scsi/cxlflash/main.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_dev.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c376
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c30
-rw-r--r--drivers/scsi/hosts.c19
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/port_config.c2
-rw-r--r--drivers/scsi/isci/remote_device.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c8
-rw-r--r--drivers/scsi/lpfc/lpfc.h40
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c298
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h31
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c954
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c391
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c36
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h15
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c344
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c12
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/Kconfig2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c133
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c38
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c70
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h24
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c451
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h3
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c8
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h34
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c174
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c3
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c9
-rw-r--r--drivers/scsi/scsi_lib.c45
-rw-r--r--drivers/scsi/scsi_logging.c10
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_trace.c124
-rw-r--r--drivers/scsi/sd.c18
-rw-r--r--drivers/scsi/sg.c150
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h77
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c437
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c22
-rw-r--r--drivers/scsi/sr_vendor.c18
-rw-r--r--drivers/scsi/st.c28
-rw-r--r--drivers/scsi/storvsc_drv.c41
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/ufs/Kconfig10
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c90
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c5
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c53
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h3
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c15
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c1
-rw-r--r--drivers/scsi/ufs/ufshcd.c214
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h2
-rw-r--r--drivers/scsi/zorro_esp.c11
-rw-r--r--drivers/soundwire/Kconfig2
-rw-r--r--drivers/soundwire/bus.c7
-rw-r--r--drivers/soundwire/cadence_master.c292
-rw-r--r--drivers/soundwire/cadence_master.h39
-rw-r--r--drivers/soundwire/intel.c201
-rw-r--r--drivers/soundwire/intel_init.c1
-rw-r--r--drivers/soundwire/slave.c98
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c301
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.txt18
-rw-r--r--drivers/staging/board/armadillo800eva.c12
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c4
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_routes.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c21
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c27
-rw-r--r--drivers/staging/exfat/Kconfig9
-rw-r--r--drivers/staging/exfat/TODO61
-rw-r--r--drivers/staging/exfat/exfat.h186
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c28
-rw-r--r--drivers/staging/exfat/exfat_cache.c303
-rw-r--r--drivers/staging/exfat/exfat_core.c1938
-rw-r--r--drivers/staging/exfat/exfat_nls.c192
-rw-r--r--drivers/staging/exfat/exfat_super.c896
-rw-r--r--drivers/staging/fbtft/Kconfig21
-rw-r--r--drivers/staging/fbtft/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_seps525.c213
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c22
-rw-r--r--drivers/staging/fbtft/fbtft-core.c129
-rw-r--r--drivers/staging/fbtft/fbtft.h4
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h11
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c8
-rw-r--r--drivers/staging/fieldbus/anybuss/hms-profinet.c2
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c6
-rw-r--r--drivers/staging/fieldbus/dev_core.c3
-rw-r--r--drivers/staging/fieldbus/fieldbus_dev.h6
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c50
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.h5
-rw-r--r--drivers/staging/fwserial/Kconfig26
-rw-r--r--drivers/staging/gasket/gasket_constants.h3
-rw-r--r--drivers/staging/gasket/gasket_core.c12
-rw-r--r--drivers/staging/gasket/gasket_core.h4
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c16
-rw-r--r--drivers/staging/iio/accel/adis16240.c1
-rw-r--r--drivers/staging/iio/adc/ad7192.c79
-rw-r--r--drivers/staging/iio/frequency/ad9834.c4
-rw-r--r--drivers/staging/isdn/avm/b1.c41
-rw-r--r--drivers/staging/isdn/gigaset/interface.c2
-rw-r--r--drivers/staging/kpc2000/kpc2000_i2c.c204
-rw-r--r--drivers/staging/kpc2000/kpc2000_spi.c24
-rw-r--r--drivers/staging/media/allegro-dvt/nal-h264.c2
-rw-r--r--drivers/staging/media/hantro/hantro.h20
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c16
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c52
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c11
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c11
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c120
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h7
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c48
-rw-r--r--drivers/staging/media/hantro/rk3288_vpu_hw.c20
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw.c12
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c11
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c12
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c25
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c51
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c21
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c41
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c10
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c27
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c27
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c38
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c36
-rw-r--r--drivers/staging/media/ipu3/Makefile6
-rw-r--r--drivers/staging/media/ipu3/TODO5
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h5
-rw-r--r--drivers/staging/media/omap4iss/iss.c6
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c64
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c9
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c147
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c616
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h318
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c102
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.h1
-rw-r--r--drivers/staging/most/Kconfig8
-rw-r--r--drivers/staging/most/cdev/cdev.c1
-rw-r--r--drivers/staging/most/configfs.c124
-rw-r--r--drivers/staging/most/core.c108
-rw-r--r--drivers/staging/most/core.h1
-rw-r--r--drivers/staging/most/net/net.c1
-rw-r--r--drivers/staging/most/sound/sound.c9
-rw-r--r--drivers/staging/most/video/video.c1
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c21
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c23
-rw-r--r--drivers/staging/netlogic/TODO2
-rw-r--r--drivers/staging/netlogic/xlr_net.c3
-rw-r--r--drivers/staging/nvec/Kconfig10
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c6
-rw-r--r--drivers/staging/octeon/ethernet-tx.c6
-rw-r--r--drivers/staging/octeon/ethernet.c6
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
-rw-r--r--drivers/staging/octeon/octeon-stubs.h106
-rw-r--r--drivers/staging/olpc_dcon/Kconfig21
-rw-r--r--drivers/staging/olpc_dcon/Makefile4
-rw-r--r--drivers/staging/olpc_dcon/TODO1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c6
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h5
-rw-r--r--drivers/staging/pi433/Kconfig24
-rw-r--r--drivers/staging/pi433/pi433_if.c12
-rw-r--r--drivers/staging/qlge/TODO3
-rw-r--r--drivers/staging/qlge/qlge.h145
-rw-r--r--drivers/staging/qlge/qlge_dbg.c291
-rw-r--r--drivers/staging/qlge/qlge_main.c909
-rw-r--r--drivers/staging/qlge/qlge_mpi.c1
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c43
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c167
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c26
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c55
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c3
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h1
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c30
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c9
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c8
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c135
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c25
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c47
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c103
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h2
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c11
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c20
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c174
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c23
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c159
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c19
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c402
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c1076
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c77
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c41
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c13
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c127
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h26
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h21
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h4
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h7
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h15
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c56
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c59
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c23
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c136
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c8
-rw-r--r--drivers/staging/rts5208/ms.c86
-rw-r--r--drivers/staging/rts5208/ms.h70
-rw-r--r--drivers/staging/rts5208/rtsx.c3
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c4
-rw-r--r--drivers/staging/rts5208/sd.h2
-rw-r--r--drivers/staging/rts5208/xd.c8
-rw-r--r--drivers/staging/rts5208/xd.h6
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c41
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h18
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c4
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c16
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c28
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h11
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c94
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h83
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h17
-rw-r--r--drivers/staging/uwb/rsv.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig12
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c9
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h102
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_cfg.h172
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h28
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c23
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c370
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h32
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c231
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h104
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c14
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h96
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c164
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c4
-rw-r--r--drivers/staging/vme/devices/vme_user.c2
-rw-r--r--drivers/staging/vt6655/card.c24
-rw-r--r--drivers/staging/vt6655/card.h2
-rw-r--r--drivers/staging/vt6655/device_main.c14
-rw-r--r--drivers/staging/vt6655/power.c10
-rw-r--r--drivers/staging/vt6655/rf.c5
-rw-r--r--drivers/staging/vt6655/rf.h19
-rw-r--r--drivers/staging/vt6655/rxtx.c5
-rw-r--r--drivers/staging/vt6656/main_usb.c9
-rw-r--r--drivers/staging/vt6656/rxtx.c8
-rw-r--r--drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt97
-rw-r--r--drivers/staging/wfx/Kconfig8
-rw-r--r--drivers/staging/wfx/Makefile24
-rw-r--r--drivers/staging/wfx/TODO17
-rw-r--r--drivers/staging/wfx/bh.c321
-rw-r--r--drivers/staging/wfx/bh.h32
-rw-r--r--drivers/staging/wfx/bus.h36
-rw-r--r--drivers/staging/wfx/bus_sdio.c271
-rw-r--r--drivers/staging/wfx/bus_spi.c267
-rw-r--r--drivers/staging/wfx/data_rx.c213
-rw-r--r--drivers/staging/wfx/data_rx.h19
-rw-r--r--drivers/staging/wfx/data_tx.c837
-rw-r--r--drivers/staging/wfx/data_tx.h93
-rw-r--r--drivers/staging/wfx/debug.c311
-rw-r--r--drivers/staging/wfx/debug.h19
-rw-r--r--drivers/staging/wfx/fwio.c413
-rw-r--r--drivers/staging/wfx/fwio.h15
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h681
-rw-r--r--drivers/staging/wfx/hif_api_general.h437
-rw-r--r--drivers/staging/wfx/hif_api_mib.h557
-rw-r--r--drivers/staging/wfx/hif_rx.c364
-rw-r--r--drivers/staging/wfx/hif_rx.h18
-rw-r--r--drivers/staging/wfx/hif_tx.c493
-rw-r--r--drivers/staging/wfx/hif_tx.h68
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h293
-rw-r--r--drivers/staging/wfx/hwio.c352
-rw-r--r--drivers/staging/wfx/hwio.h80
-rw-r--r--drivers/staging/wfx/key.c268
-rw-r--r--drivers/staging/wfx/key.h22
-rw-r--r--drivers/staging/wfx/main.c491
-rw-r--r--drivers/staging/wfx/main.h47
-rw-r--r--drivers/staging/wfx/queue.c619
-rw-r--r--drivers/staging/wfx/queue.h61
-rw-r--r--drivers/staging/wfx/scan.c294
-rw-r--r--drivers/staging/wfx/scan.h42
-rw-r--r--drivers/staging/wfx/secure_link.h57
-rw-r--r--drivers/staging/wfx/sta.c1684
-rw-r--r--drivers/staging/wfx/sta.h103
-rw-r--r--drivers/staging/wfx/traces.h443
-rw-r--r--drivers/staging/wfx/wfx.h208
-rw-r--r--drivers/staging/wilc1000/Makefile8
-rw-r--r--drivers/staging/wilc1000/cfg80211.c (renamed from drivers/staging/wilc1000/wilc_wfi_cfgoperations.c)246
-rw-r--r--drivers/staging/wilc1000/cfg80211.h (renamed from drivers/staging/wilc1000/wilc_wfi_cfgoperations.h)8
-rw-r--r--drivers/staging/wilc1000/hif.c (renamed from drivers/staging/wilc1000/wilc_hif.c)43
-rw-r--r--drivers/staging/wilc1000/hif.h (renamed from drivers/staging/wilc1000/wilc_hif.h)6
-rw-r--r--drivers/staging/wilc1000/mon.c (renamed from drivers/staging/wilc1000/wilc_mon.c)4
-rw-r--r--drivers/staging/wilc1000/netdev.c (renamed from drivers/staging/wilc1000/wilc_netdev.c)146
-rw-r--r--drivers/staging/wilc1000/netdev.h (renamed from drivers/staging/wilc1000/wilc_wfi_netdevice.h)34
-rw-r--r--drivers/staging/wilc1000/sdio.c (renamed from drivers/staging/wilc1000/wilc_sdio.c)4
-rw-r--r--drivers/staging/wilc1000/spi.c (renamed from drivers/staging/wilc1000/wilc_spi.c)15
-rw-r--r--drivers/staging/wilc1000/wlan.c (renamed from drivers/staging/wilc1000/wilc_wlan.c)4
-rw-r--r--drivers/staging/wilc1000/wlan.h (renamed from drivers/staging/wilc1000/wilc_wlan.h)2
-rw-r--r--drivers/staging/wilc1000/wlan_cfg.c (renamed from drivers/staging/wilc1000/wilc_wlan_cfg.c)30
-rw-r--r--drivers/staging/wilc1000/wlan_cfg.h (renamed from drivers/staging/wilc1000/wilc_wlan_cfg.h)0
-rw-r--r--drivers/staging/wilc1000/wlan_if.h (renamed from drivers/staging/wilc1000/wilc_wlan_if.h)8
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h18
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c64
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c24
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c232
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h17
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h3
-rw-r--r--drivers/target/target_core_fabric_lib.c2
-rw-r--r--drivers/target/target_core_tpg.c12
-rw-r--r--drivers/target/target_core_transport.c28
-rw-r--r--drivers/target/target_core_user.c6
-rw-r--r--drivers/target/target_core_xcopy.c1
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c2
-rw-r--r--drivers/thunderbolt/cap.c6
-rw-r--r--drivers/thunderbolt/ctl.c8
-rw-r--r--drivers/thunderbolt/eeprom.c11
-rw-r--r--drivers/thunderbolt/icm.c157
-rw-r--r--drivers/thunderbolt/lc.c193
-rw-r--r--drivers/thunderbolt/path.c52
-rw-r--r--drivers/thunderbolt/switch.c586
-rw-r--r--drivers/thunderbolt/tb.c340
-rw-r--r--drivers/thunderbolt/tb.h81
-rw-r--r--drivers/thunderbolt/tb_msgs.h2
-rw-r--r--drivers/thunderbolt/tb_regs.h97
-rw-r--r--drivers/thunderbolt/tunnel.c364
-rw-r--r--drivers/thunderbolt/tunnel.h10
-rw-r--r--drivers/thunderbolt/xdomain.c5
-rw-r--r--drivers/tty/Kconfig40
-rw-r--r--drivers/tty/amiserial.c84
-rw-r--r--drivers/tty/hvc/Kconfig30
-rw-r--r--drivers/tty/hvc/hvc_dcc.c28
-rw-r--r--drivers/tty/rocket.c32
-rw-r--r--drivers/tty/serdev/core.c111
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c84
-rw-r--r--drivers/tty/serial/8250/8250_dw.c83
-rw-r--r--drivers/tty/serial/8250/8250_exar.c19
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c21
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c1
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/8250/8250_of.c31
-rw-r--r--drivers/tty/serial/8250/8250_pci.c300
-rw-r--r--drivers/tty/serial/8250/8250_port.c14
-rw-r--r--drivers/tty/serial/8250/Kconfig3
-rw-r--r--drivers/tty/serial/Kconfig106
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c12
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c4
-rw-r--r--drivers/tty/serial/fsl_lpuart.c84
-rw-r--r--drivers/tty/serial/ifx6x60.c3
-rw-r--r--drivers/tty/serial/imx.c7
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/serial/msm_serial.c10
-rw-r--r--drivers/tty/serial/pch_uart.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c68
-rw-r--r--drivers/tty/serial/samsung_tty.c (renamed from drivers/tty/serial/samsung.c)0
-rw-r--r--drivers/tty/serial/serial-tegra.c3
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/sh-sci.c11
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h5
-rw-r--r--drivers/tty/serial/sprd_serial.c33
-rw-r--r--drivers/tty/serial/stm32-usart.c6
-rw-r--r--drivers/tty/serial/uartlite.c97
-rw-r--r--drivers/tty/tty_io.c19
-rw-r--r--drivers/tty/tty_ldisc.c7
-rw-r--r--drivers/tty/vt/keyboard.c2
-rw-r--r--drivers/tty/vt/vc_screen.c3
-rw-r--r--drivers/uio/uio_dmem_genirq.c14
-rw-r--r--drivers/usb/cdns3/Kconfig10
-rw-r--r--drivers/usb/cdns3/Makefile1
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c236
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c79
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c22
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/udc.c75
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c31
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/core/config.c12
-rw-r--r--drivers/usb/core/devio.c35
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/core/hub.c201
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/debugfs.c2
-rw-r--r--drivers/usb/dwc3/Kconfig30
-rw-r--r--drivers/usb/dwc3/core.c37
-rw-r--r--drivers/usb/dwc3/debug.h4
-rw-r--r--drivers/usb/dwc3/debugfs.c2
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c28
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_acm.c21
-rw-r--r--drivers/usb/gadget/function/f_fs.c12
-rw-r--r--drivers/usb/gadget/function/f_obex.c2
-rw-r--r--drivers/usb/gadget/function/f_serial.c21
-rw-r--r--drivers/usb/gadget/function/f_tcm.c13
-rw-r--r--drivers/usb/gadget/function/u_audio.c2
-rw-r--r--drivers/usb/gadget/function/u_serial.c516
-rw-r--r--drivers/usb/gadget/function/u_serial.h8
-rw-r--r--drivers/usb/gadget/legacy/Kconfig26
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c3
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c3
-rw-r--r--drivers/usb/gadget/legacy/serial.c49
-rw-r--r--drivers/usb/gadget/udc/Kconfig19
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c9
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c10
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.h4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c12
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c7
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c6
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h2
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c4
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c21
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c5
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c3
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c3810
-rw-r--r--drivers/usb/host/Kconfig106
-rw-r--r--drivers/usb/host/bcma-hcd.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c8
-rw-r--r--drivers/usb/host/imx21-dbg.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c5
-rw-r--r--drivers/usb/host/ohci-at91.c8
-rw-r--r--drivers/usb/host/ohci-nxp.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c14
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c68
-rw-r--r--drivers/usb/host/xhci-tegra.c126
-rw-r--r--drivers/usb/host/xhci-trace.h26
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h29
-rw-r--r--drivers/usb/image/microtek.c3
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c2
-rw-r--r--drivers/usb/misc/Kconfig22
-rw-r--r--drivers/usb/misc/appledisplay.c8
-rw-r--r--drivers/usb/misc/chaoskey.c24
-rw-r--r--drivers/usb/misc/ftdi-elan.c6
-rw-r--r--drivers/usb/misc/idmouse.c36
-rw-r--r--drivers/usb/misc/legousbtower.c303
-rw-r--r--drivers/usb/misc/sisusbvga/Kconfig2
-rw-r--r--drivers/usb/misc/usb251xb.c66
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c35
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/musb_debugfs.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c5
-rw-r--r--drivers/usb/phy/phy-keystone.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c5
-rw-r--r--drivers/usb/renesas_usbhs/common.h3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.c19
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c12
-rw-r--r--drivers/usb/roles/class.c21
-rw-r--r--drivers/usb/serial/Kconfig48
-rw-r--r--drivers/usb/serial/ch341.c97
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/mos7840.c762
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/pl2303.c124
-rw-r--r--drivers/usb/serial/pl2303.h6
-rw-r--r--drivers/usb/storage/ene_ub6250.c2
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/usb/storage/transport.c3
-rw-r--r--drivers/usb/storage/uas.c11
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/Kconfig11
-rw-r--r--drivers/usb/typec/Makefile1
-rw-r--r--drivers/usb/typec/class.c42
-rw-r--r--drivers/usb/typec/hd3ss3220.c269
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c135
-rw-r--r--drivers/usb/typec/tps6598x.c49
-rw-r--r--drivers/usb/typec/ucsi/displayport.c40
-rw-r--r--drivers/usb/typec/ucsi/trace.c11
-rw-r--r--drivers/usb/typec/ucsi/trace.h79
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c609
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h417
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c91
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c166
-rw-r--r--drivers/usb/usbip/Kconfig1
-rw-r--r--drivers/usb/usbip/stub_rx.c50
-rw-r--r--drivers/usb/usbip/stub_tx.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c11
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c32
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h4
-rw-r--r--drivers/vfio/vfio.c39
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/scsi.c12
-rw-r--r--drivers/vhost/test.c12
-rw-r--r--drivers/vhost/vsock.c12
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/gpio_backlight.c128
-rw-r--r--drivers/video/backlight/ipaq_micro_bl.c2
-rw-r--r--drivers/video/backlight/lm3630a_bl.c13
-rw-r--r--drivers/video/backlight/pm8941-wled.c424
-rw-r--r--drivers/video/backlight/pwm_bl.c39
-rw-r--r--drivers/video/backlight/qcom-wled.c1296
-rw-r--r--drivers/video/backlight/tosa_bl.c10
-rw-r--r--drivers/video/backlight/tosa_bl.h8
-rw-r--r--drivers/video/backlight/tosa_lcd.c28
-rw-r--r--drivers/video/fbdev/Kconfig1
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c12
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c21
-rw-r--r--drivers/video/fbdev/efifb.c2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c428
-rw-r--r--drivers/video/fbdev/matrox/i2c-matroxfb.c4
-rw-r--r--drivers/video/fbdev/sa1100fb.c13
-rw-r--r--drivers/video/hdmi.c8
-rw-r--r--drivers/video/logo/.gitignore1
-rw-r--r--drivers/video/logo/Makefile15
-rw-r--r--drivers/video/logo/pnmtologo.c514
-rw-r--r--drivers/virt/fsl_hypervisor.c2
-rw-r--r--drivers/w1/masters/sgi_w1.c4
-rw-r--r--drivers/w1/slaves/Kconfig8
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2430.c295
-rw-r--r--drivers/watchdog/Kconfig5
-rw-r--r--drivers/watchdog/acquirewdt.c1
-rw-r--r--drivers/watchdog/advantechwdt.c1
-rw-r--r--drivers/watchdog/alim1535_wdt.c1
-rw-r--r--drivers/watchdog/alim7101_wdt.c1
-rw-r--r--drivers/watchdog/ar7_wdt.c1
-rw-r--r--drivers/watchdog/aspeed_wdt.c16
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c1
-rw-r--r--drivers/watchdog/at91sam9_wdt.h34
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c1
-rw-r--r--drivers/watchdog/bd70528_wdt.c4
-rw-r--r--drivers/watchdog/cadence_wdt.c6
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/f71808e_wdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/geodewdt.c1
-rw-r--r--drivers/watchdog/ib700wdt.c1
-rw-r--r--drivers/watchdog/ibmasr.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c30
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c45
-rw-r--r--drivers/watchdog/indydog.c1
-rw-r--r--drivers/watchdog/intel-mid_wdt.c1
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c1
-rw-r--r--drivers/watchdog/iop_wdt.c1
-rw-r--r--drivers/watchdog/it8712f_wdt.c1
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c108
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/machzwd.c1
-rw-r--r--drivers/watchdog/menz69_wdt.c1
-rw-r--r--drivers/watchdog/mixcomwd.c1
-rw-r--r--drivers/watchdog/mtx-1_wdt.c1
-rw-r--r--drivers/watchdog/mv64x60_wdt.c1
-rw-r--r--drivers/watchdog/nv_tco.c1
-rw-r--r--drivers/watchdog/pc87413_wdt.c1
-rw-r--r--drivers/watchdog/pcwd.c1
-rw-r--r--drivers/watchdog/pcwd_pci.c1
-rw-r--r--drivers/watchdog/pcwd_usb.c1
-rw-r--r--drivers/watchdog/pika_wdt.c1
-rw-r--r--drivers/watchdog/pnx833x_wdt.c1
-rw-r--r--drivers/watchdog/rc32434_wdt.c1
-rw-r--r--drivers/watchdog/rdc321x_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c1
-rw-r--r--drivers/watchdog/sa1100_wdt.c1
-rw-r--r--drivers/watchdog/sb_wdog.c1
-rw-r--r--drivers/watchdog/sbc60xxwdt.c1
-rw-r--r--drivers/watchdog/sbc7240_wdt.c4
-rw-r--r--drivers/watchdog/sbc_epx_c3.c1
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c1
-rw-r--r--drivers/watchdog/sc1200wdt.c1
-rw-r--r--drivers/watchdog/sc520_wdt.c1
-rw-r--r--drivers/watchdog/sch311x_wdt.c1
-rw-r--r--drivers/watchdog/scx200_wdt.c1
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c1
-rw-r--r--drivers/watchdog/sprd_wdt.c6
-rw-r--r--drivers/watchdog/w83627hf_wdt.c11
-rw-r--r--drivers/watchdog/w83877f_wdt.c1
-rw-r--r--drivers/watchdog/w83977f_wdt.c1
-rw-r--r--drivers/watchdog/wafer5823wdt.c1
-rw-r--r--drivers/watchdog/watchdog_dev.c102
-rw-r--r--drivers/watchdog/wdat_wdt.c2
-rw-r--r--drivers/watchdog/wdrtas.c1
-rw-r--r--drivers/watchdog/wdt.c1
-rw-r--r--drivers/watchdog/wdt285.c1
-rw-r--r--drivers/watchdog/wdt977.c1
-rw-r--r--drivers/watchdog/wdt_pci.c1
-rw-r--r--drivers/xen/balloon.c1
-rw-r--r--drivers/xen/gntdev-common.h8
-rw-r--r--drivers/xen/gntdev.c179
-rw-r--r--drivers/xen/platform-pci.c14
-rw-r--r--drivers/xen/swiotlb-xen.c12
3368 files changed, 167085 insertions, 80287 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 5a7551d060f2..33f71983e001 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1057,8 +1057,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
*/
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
- u64 mask, dmaaddr = 0, size = 0, offset = 0;
- int ret, msb;
+ u64 end, mask, dmaaddr = 0, size = 0, offset = 0;
+ int ret;
/*
* If @dev is expected to be DMA-capable then the bus code that created
@@ -1085,19 +1085,13 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
}
if (!ret) {
- msb = fls64(dmaaddr + size - 1);
/*
- * Round-up to the power-of-two mask or set
- * the mask to the whole 64-bit address space
- * in case the DMA region covers the full
- * memory window.
+ * Limit coherent and dma mask based on size retrieved from
+ * firmware.
*/
- mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
- /*
- * Limit coherent and dma mask based on size
- * retrieved from firmware.
- */
- dev->bus_dma_mask = mask;
+ end = dmaaddr + size - 1;
+ mask = DMA_BIT_MASK(ilog2(end) + 1);
+ dev->bus_dma_limit = end;
dev->coherent_dma_mask = mask;
*dev->dma_mask = mask;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 4fd84fbdac29..d05be13c1022 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -533,26 +533,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
}
#ifdef CONFIG_PM_SLEEP
-static bool acpi_ec_query_flushed(struct acpi_ec *ec)
+static void __acpi_ec_flush_work(void)
{
- bool flushed;
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
- flushed = !ec->nr_pending_queries;
- spin_unlock_irqrestore(&ec->lock, flags);
- return flushed;
-}
-
-static void __acpi_ec_flush_event(struct acpi_ec *ec)
-{
- /*
- * When ec_freeze_events is true, we need to flush events in
- * the proper position before entering the noirq stage.
- */
- wait_event(ec->wait, acpi_ec_query_flushed(ec));
- if (ec_query_wq)
- flush_workqueue(ec_query_wq);
+ flush_scheduled_work(); /* flush ec->work */
+ flush_workqueue(ec_query_wq); /* flush queries */
}
static void acpi_ec_disable_event(struct acpi_ec *ec)
@@ -562,15 +546,21 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_lock_irqsave(&ec->lock, flags);
__acpi_ec_disable_event(ec);
spin_unlock_irqrestore(&ec->lock, flags);
- __acpi_ec_flush_event(ec);
+
+ /*
+ * When ec_freeze_events is true, we need to flush events in
+ * the proper position before entering the noirq stage.
+ */
+ __acpi_ec_flush_work();
}
void acpi_ec_flush_work(void)
{
- if (first_ec)
- __acpi_ec_flush_event(first_ec);
+ /* Without ec_query_wq there is nothing to flush. */
+ if (!ec_query_wq)
+ return;
- flush_scheduled_work();
+ __acpi_ec_flush_work();
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 14e68f202f81..a3320f93616d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1404,7 +1404,6 @@ static const struct attribute_group acpi_nfit_attribute_group = {
};
static const struct attribute_group *acpi_nfit_attribute_groups[] = {
- &nvdimm_bus_attribute_group,
&acpi_nfit_attribute_group,
NULL,
};
@@ -1698,8 +1697,6 @@ static const struct attribute_group acpi_nfit_dimm_attribute_group = {
};
static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
- &nvdimm_attribute_group,
- &nd_device_attribute_group,
&acpi_nfit_dimm_attribute_group,
NULL,
};
@@ -2197,10 +2194,6 @@ static const struct attribute_group acpi_nfit_region_attribute_group = {
};
static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
- &nd_region_attribute_group,
- &nd_mapping_attribute_group,
- &nd_device_attribute_group,
- &nd_numa_attribute_group,
&acpi_nfit_region_attribute_group,
NULL,
};
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 3eacf474e1e3..e601c4511a8b 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1317,6 +1317,52 @@ acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
args_count, args);
}
+static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ const struct acpi_device *adev;
+ struct fwnode_handle *parent;
+
+ /* Is this the root node? */
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "\\";
+
+ fwnode_handle_put(parent);
+
+ if (is_acpi_data_node(fwnode)) {
+ const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
+
+ return dn->name;
+ }
+
+ adev = to_acpi_device_node(fwnode);
+ if (WARN_ON(!adev))
+ return NULL;
+
+ return acpi_device_bid(adev);
+}
+
+static const char *
+acpi_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+
+ /* Is this the root node? */
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "";
+
+ /* Is this 2nd node from the root? */
+ parent = fwnode_get_next_parent(parent);
+ if (!parent)
+ return "";
+
+ fwnode_handle_put(parent);
+
+ /* ACPI device or data node. */
+ return ".";
+}
+
static struct fwnode_handle *
acpi_fwnode_get_parent(struct fwnode_handle *fwnode)
{
@@ -1357,6 +1403,8 @@ acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
.get_parent = acpi_node_get_parent, \
.get_next_child_node = acpi_get_next_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
+ .get_name = acpi_fwnode_get_name, \
+ .get_name_prefix = acpi_fwnode_get_name_prefix, \
.get_reference_args = acpi_fwnode_get_reference_args, \
.graph_get_next_endpoint = \
acpi_graph_get_next_endpoint, \
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 2af937a8b1c5..6747a279621b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -977,6 +977,16 @@ static int acpi_s2idle_prepare_late(void)
return 0;
}
+static void acpi_s2idle_sync(void)
+{
+ /*
+ * The EC driver uses the system workqueue and an additional special
+ * one, so those need to be flushed too.
+ */
+ acpi_ec_flush_work();
+ acpi_os_wait_events_complete(); /* synchronize Notify handling */
+}
+
static void acpi_s2idle_wake(void)
{
/*
@@ -1001,13 +1011,8 @@ static void acpi_s2idle_wake(void)
* should be missed by canceling the wakeup here.
*/
pm_system_cancel_wakeup();
- /*
- * The EC driver uses the system workqueue and an additional
- * special one, so those need to be flushed too.
- */
- acpi_os_wait_events_complete(); /* synchronize EC GPE processing */
- acpi_ec_flush_work();
- acpi_os_wait_events_complete(); /* synchronize Notify handling */
+
+ acpi_s2idle_sync();
rearm_wake_irq(acpi_sci_irq);
}
@@ -1024,6 +1029,13 @@ static void acpi_s2idle_restore_early(void)
static void acpi_s2idle_restore(void)
{
+ /*
+ * Drain pending events before restoring the working-state configuration
+ * of GPEs.
+ */
+ acpi_os_wait_events_complete(); /* synchronize GPE processing */
+ acpi_s2idle_sync();
+
s2idle_wakeup = false;
acpi_enable_all_runtime_gpes();
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 265d9dd46a5e..e9bc9fcc7ea5 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -65,6 +65,7 @@
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/task_work.h>
+#include <linux/sizes.h>
#include <uapi/linux/android/binder.h>
#include <uapi/linux/android/binderfs.h>
@@ -92,11 +93,6 @@ static atomic_t binder_last_id;
static int proc_show(struct seq_file *m, void *unused);
DEFINE_SHOW_ATTRIBUTE(proc);
-/* This is only defined in include/asm-arm/sizes.h */
-#ifndef SZ_1K
-#define SZ_1K 0x400
-#endif
-
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
enum {
@@ -6058,7 +6054,7 @@ const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
- .compat_ioctl = binder_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index eb76a823fbb2..2d8b9b91dee0 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -268,7 +268,6 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
alloc->pages_high = index + 1;
trace_binder_alloc_page_end(alloc, index);
- /* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_read(&mm->mmap_sem);
@@ -277,8 +276,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
return 0;
free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
+ for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
bool ret;
size_t index;
@@ -291,6 +289,8 @@ free_range:
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
+ if (page_addr == start)
+ break;
continue;
err_vm_insert_page_failed:
@@ -298,7 +298,8 @@ err_vm_insert_page_failed:
page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
- ;
+ if (page_addr == start)
+ break;
}
err_no_vma:
if (mm) {
@@ -681,17 +682,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct binder_buffer *buffer;
mutex_lock(&binder_alloc_mmap_lock);
- if (alloc->buffer) {
+ if (alloc->buffer_size) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
+ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+ SZ_4M);
+ mutex_unlock(&binder_alloc_mmap_lock);
alloc->buffer = (void __user *)vma->vm_start;
- mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
- SZ_4M);
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
@@ -722,8 +723,9 @@ err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer = NULL;
+ mutex_lock(&binder_alloc_mmap_lock);
+ alloc->buffer_size = 0;
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
@@ -841,14 +843,20 @@ void binder_alloc_print_pages(struct seq_file *m,
int free = 0;
mutex_lock(&alloc->mutex);
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
+ /*
+ * Make sure the binder_alloc is fully initialized, otherwise we might
+ * read inconsistent state.
+ */
+ if (binder_alloc_get_vma(alloc) != NULL) {
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
}
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ec6c64fce74a..4bfd1b14b390 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -910,7 +910,7 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
* value, don't extend it here. This happens on STA2X11, for example.
*
* XXX: manipulating the DMA mask from platform code is completely
- * bogus, platform code should use dev->bus_dma_mask instead..
+ * bogus, platform code should use dev->bus_dma_limit instead..
*/
if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
return 0;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index bfc617cc8ac5..948d2c6557f3 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -11,8 +11,8 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/ahci_platform.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/libata.h>
@@ -100,7 +100,7 @@ struct imx_ahci_priv {
struct clk *phy_pclk0;
struct clk *phy_pclk1;
void __iomem *phy_base;
- int clkreq_gpio;
+ struct gpio_desc *clkreq_gpiod;
struct regmap *gpr;
bool no_device;
bool first_time;
@@ -980,7 +980,6 @@ static struct scsi_host_template ahci_platform_sht = {
static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv)
{
- int ret;
struct resource *phy_res;
struct platform_device *pdev = imxpriv->ahci_pdev;
struct device_node *np = dev->of_node;
@@ -1033,20 +1032,12 @@ static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv)
}
/* Fetch GPIO, then enable the external OSC */
- imxpriv->clkreq_gpio = of_get_named_gpio(np, "clkreq-gpio", 0);
- if (gpio_is_valid(imxpriv->clkreq_gpio)) {
- ret = devm_gpio_request_one(dev, imxpriv->clkreq_gpio,
- GPIOF_OUT_INIT_LOW,
- "SATA CLKREQ");
- if (ret == -EBUSY) {
- dev_info(dev, "clkreq had been initialized.\n");
- } else if (ret) {
- dev_err(dev, "%d unable to get clkreq.\n", ret);
- return ret;
- }
- } else if (imxpriv->clkreq_gpio == -EPROBE_DEFER) {
- return imxpriv->clkreq_gpio;
- }
+ imxpriv->clkreq_gpiod = devm_gpiod_get_optional(dev, "clkreq",
+ GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(imxpriv->clkreq_gpiod))
+ return PTR_ERR(imxpriv->clkreq_gpiod);
+ if (imxpriv->clkreq_gpiod)
+ gpiod_set_consumer_name(imxpriv->clkreq_gpiod, "SATA CLKREQ");
return 0;
}
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index ebecab8c3f36..135173c8d138 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -219,7 +219,6 @@ struct arasan_cf_dev {
static struct scsi_host_template arasan_cf_sht = {
ATA_BASE_SHT(DRIVER_NAME),
- .sg_tablesize = SG_NONE,
.dma_boundary = 0xFFFFFFFFUL,
};
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index cfd0cf2cbca6..e01a3a6e4d46 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -422,7 +422,7 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
#ifdef ATP867X_DEBUG
atp867x_check_res(pdev);
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
(unsigned long long)(host->iomap[i]));
#endif
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 65ec8dff1c51..f3e62f5528bd 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -2329,7 +2329,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
// Make sure this is a SATA controller by counting the number of bars
// (NVIDIA SATA controllers will always have six bars). Otherwise,
// it's an IDE controller and we ignore it.
- for (bar = 0; bar < 6; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
if (pci_resource_start(pdev, bar) == 0)
return -ENODEV;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 28b92e3cc570..c3b3b5c0b0da 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -148,6 +148,10 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
+config PM_QOS_KUNIT_TEST
+ bool "KUnit Test for PM QoS features"
+ depends on KUNIT
+
config HMEM_REPORTING
bool
default n
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7bd9cd366d41..42a672456432 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -45,6 +45,10 @@ early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
/* Device links support. */
+static LIST_HEAD(wait_for_suppliers);
+static DEFINE_MUTEX(wfs_lock);
+static LIST_HEAD(deferred_sync);
+static unsigned int defer_sync_state_count = 1;
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
@@ -127,6 +131,9 @@ static int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ continue;
+
if (link->consumer == target)
return 1;
@@ -196,8 +203,11 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
device_pm_move_last(dev);
device_for_each_child(dev, NULL, device_reorder_to_tail);
- list_for_each_entry(link, &dev->links.consumers, s_node)
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ continue;
device_reorder_to_tail(link->consumer, NULL);
+ }
return 0;
}
@@ -224,7 +234,8 @@ void device_pm_move_to_tail(struct device *dev)
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
DL_FLAG_AUTOREMOVE_SUPPLIER | \
- DL_FLAG_AUTOPROBE_CONSUMER)
+ DL_FLAG_AUTOPROBE_CONSUMER | \
+ DL_FLAG_SYNC_STATE_ONLY)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
@@ -292,6 +303,8 @@ struct device_link *device_link_add(struct device *consumer,
if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
+ (flags & DL_FLAG_SYNC_STATE_ONLY &&
+ flags != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
@@ -312,11 +325,14 @@ struct device_link *device_link_add(struct device *consumer,
/*
* If the supplier has not been fully registered yet or there is a
- * reverse dependency between the consumer and the supplier already in
- * the graph, return NULL.
+ * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and
+ * the supplier already in the graph, return NULL. If the link is a
+ * SYNC_STATE_ONLY link, we don't check for reverse dependencies
+ * because it only affects sync_state() callbacks.
*/
if (!device_pm_initialized(supplier)
- || device_is_dependent(consumer, supplier)) {
+ || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
+ device_is_dependent(consumer, supplier))) {
link = NULL;
goto out;
}
@@ -343,9 +359,14 @@ struct device_link *device_link_add(struct device *consumer,
}
if (flags & DL_FLAG_STATELESS) {
- link->flags |= DL_FLAG_STATELESS;
kref_get(&link->kref);
- goto out;
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !(link->flags & DL_FLAG_STATELESS)) {
+ link->flags |= DL_FLAG_STATELESS;
+ goto reorder;
+ } else {
+ goto out;
+ }
}
/*
@@ -367,6 +388,12 @@ struct device_link *device_link_add(struct device *consumer,
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
+ goto reorder;
+ }
+
goto out;
}
@@ -406,6 +433,13 @@ struct device_link *device_link_add(struct device *consumer,
flags & DL_FLAG_PM_RUNTIME)
pm_runtime_resume(supplier);
+ if (flags & DL_FLAG_SYNC_STATE_ONLY) {
+ dev_dbg(consumer,
+ "Linked as a sync state only consumer to %s\n",
+ dev_name(supplier));
+ goto out;
+ }
+reorder:
/*
* Move the consumer and all of the devices depending on it to the end
* of dpm_list and the devices_kset list.
@@ -431,6 +465,70 @@ struct device_link *device_link_add(struct device *consumer,
}
EXPORT_SYMBOL_GPL(device_link_add);
+/**
+ * device_link_wait_for_supplier - Add device to wait_for_suppliers list
+ * @consumer: Consumer device
+ *
+ * Marks the @consumer device as waiting for suppliers to become available by
+ * adding it to the wait_for_suppliers list. The consumer device will never be
+ * probed until it's removed from the wait_for_suppliers list.
+ *
+ * The caller is responsible for adding the links to the supplier devices once
+ * they are available and removing the @consumer device from the
+ * wait_for_suppliers list once links to all the suppliers have been created.
+ *
+ * This function is NOT meant to be called from the probe function of the
+ * consumer but rather from code that creates/adds the consumer device.
+ */
+static void device_link_wait_for_supplier(struct device *consumer,
+ bool need_for_probe)
+{
+ mutex_lock(&wfs_lock);
+ list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers);
+ consumer->links.need_for_probe = need_for_probe;
+ mutex_unlock(&wfs_lock);
+}
+
+static void device_link_wait_for_mandatory_supplier(struct device *consumer)
+{
+ device_link_wait_for_supplier(consumer, true);
+}
+
+static void device_link_wait_for_optional_supplier(struct device *consumer)
+{
+ device_link_wait_for_supplier(consumer, false);
+}
+
+/**
+ * device_link_add_missing_supplier_links - Add links from consumer devices to
+ * supplier devices, leaving any
+ * consumer with inactive suppliers on
+ * the wait_for_suppliers list
+ *
+ * Loops through all consumers waiting on suppliers and tries to add all their
+ * supplier links. If that succeeds, the consumer device is removed from
+ * wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers
+ * list. Devices left on the wait_for_suppliers list will not be probed.
+ *
+ * The fwnode add_links callback is expected to return 0 if it has found and
+ * added all the supplier links for the consumer device. It should return an
+ * error if it isn't able to do so.
+ *
+ * The caller of device_link_wait_for_supplier() is expected to call this once
+ * it's aware of potential suppliers becoming available.
+ */
+static void device_link_add_missing_supplier_links(void)
+{
+ struct device *dev, *tmp;
+
+ mutex_lock(&wfs_lock);
+ list_for_each_entry_safe(dev, tmp, &wait_for_suppliers,
+ links.needs_suppliers)
+ if (!fwnode_call_int_op(dev->fwnode, add_links, dev))
+ list_del_init(&dev->links.needs_suppliers);
+ mutex_unlock(&wfs_lock);
+}
+
static void device_link_free(struct device_link *link)
{
while (refcount_dec_not_one(&link->rpm_active))
@@ -565,10 +663,23 @@ int device_links_check_suppliers(struct device *dev)
struct device_link *link;
int ret = 0;
+ /*
+ * Device waiting for supplier to become available is not allowed to
+ * probe.
+ */
+ mutex_lock(&wfs_lock);
+ if (!list_empty(&dev->links.needs_suppliers) &&
+ dev->links.need_for_probe) {
+ mutex_unlock(&wfs_lock);
+ return -EPROBE_DEFER;
+ }
+ mutex_unlock(&wfs_lock);
+
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!(link->flags & DL_FLAG_MANAGED) ||
+ link->flags & DL_FLAG_SYNC_STATE_ONLY)
continue;
if (link->status != DL_STATE_AVAILABLE) {
@@ -585,6 +696,128 @@ int device_links_check_suppliers(struct device *dev)
}
/**
+ * __device_links_queue_sync_state - Queue a device for sync_state() callback
+ * @dev: Device to call sync_state() on
+ * @list: List head to queue the @dev on
+ *
+ * Queues a device for a sync_state() callback when the device links write lock
+ * isn't held. This allows the sync_state() execution flow to use device links
+ * APIs. The caller must ensure this function is called with
+ * device_links_write_lock() held.
+ *
+ * This function does a get_device() to make sure the device is not freed while
+ * on this list.
+ *
+ * So the caller must also ensure that device_links_flush_sync_list() is called
+ * as soon as the caller releases device_links_write_lock(). This is necessary
+ * to make sure the sync_state() is called in a timely fashion and the
+ * put_device() is called on this device.
+ */
+static void __device_links_queue_sync_state(struct device *dev,
+ struct list_head *list)
+{
+ struct device_link *link;
+
+ if (dev->state_synced)
+ return;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+ if (link->status != DL_STATE_ACTIVE)
+ return;
+ }
+
+ /*
+ * Set the flag here to avoid adding the same device to a list more
+ * than once. This can happen if new consumers get added to the device
+ * and probed before the list is flushed.
+ */
+ dev->state_synced = true;
+
+ if (WARN_ON(!list_empty(&dev->links.defer_sync)))
+ return;
+
+ get_device(dev);
+ list_add_tail(&dev->links.defer_sync, list);
+}
+
+/**
+ * device_links_flush_sync_list - Call sync_state() on a list of devices
+ * @list: List of devices to call sync_state() on
+ *
+ * Calls sync_state() on all the devices that have been queued for it. This
+ * function is used in conjunction with __device_links_queue_sync_state().
+ */
+static void device_links_flush_sync_list(struct list_head *list)
+{
+ struct device *dev, *tmp;
+
+ list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
+ list_del_init(&dev->links.defer_sync);
+
+ device_lock(dev);
+
+ if (dev->bus->sync_state)
+ dev->bus->sync_state(dev);
+ else if (dev->driver && dev->driver->sync_state)
+ dev->driver->sync_state(dev);
+
+ device_unlock(dev);
+
+ put_device(dev);
+ }
+}
+
+void device_links_supplier_sync_state_pause(void)
+{
+ device_links_write_lock();
+ defer_sync_state_count++;
+ device_links_write_unlock();
+}
+
+void device_links_supplier_sync_state_resume(void)
+{
+ struct device *dev, *tmp;
+ LIST_HEAD(sync_list);
+
+ device_links_write_lock();
+ if (!defer_sync_state_count) {
+ WARN(true, "Unmatched sync_state pause/resume!");
+ goto out;
+ }
+ defer_sync_state_count--;
+ if (defer_sync_state_count)
+ goto out;
+
+ list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
+ /*
+ * Delete from deferred_sync list before queuing it to
+ * sync_list because defer_sync is used for both lists.
+ */
+ list_del_init(&dev->links.defer_sync);
+ __device_links_queue_sync_state(dev, &sync_list);
+ }
+out:
+ device_links_write_unlock();
+
+ device_links_flush_sync_list(&sync_list);
+}
+
+static int sync_state_resume_initcall(void)
+{
+ device_links_supplier_sync_state_resume();
+ return 0;
+}
+late_initcall(sync_state_resume_initcall);
+
+static void __device_links_supplier_defer_sync(struct device *sup)
+{
+ if (list_empty(&sup->links.defer_sync))
+ list_add_tail(&sup->links.defer_sync, &deferred_sync);
+}
+
+/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
*
@@ -598,6 +831,16 @@ int device_links_check_suppliers(struct device *dev)
void device_links_driver_bound(struct device *dev)
{
struct device_link *link;
+ LIST_HEAD(sync_list);
+
+ /*
+ * If a device probes successfully, it's expected to have created all
+ * the device links it needs to or make new device links as it needs
+ * them. So, it no longer needs to wait on any suppliers.
+ */
+ mutex_lock(&wfs_lock);
+ list_del_init(&dev->links.needs_suppliers);
+ mutex_unlock(&wfs_lock);
device_links_write_lock();
@@ -628,11 +871,19 @@ void device_links_driver_bound(struct device *dev)
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+
+ if (defer_sync_state_count)
+ __device_links_supplier_defer_sync(link->supplier);
+ else
+ __device_links_queue_sync_state(link->supplier,
+ &sync_list);
}
dev->links.status = DL_DEV_DRIVER_BOUND;
device_links_write_unlock();
+
+ device_links_flush_sync_list(&sync_list);
}
static void device_link_drop_managed(struct device_link *link)
@@ -744,6 +995,7 @@ void device_links_driver_cleanup(struct device *dev)
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
+ list_del_init(&dev->links.defer_sync);
__device_links_no_driver(dev);
device_links_write_unlock();
@@ -813,7 +1065,8 @@ void device_links_unbind_consumers(struct device *dev)
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!(link->flags & DL_FLAG_MANAGED) ||
+ link->flags & DL_FLAG_SYNC_STATE_ONLY)
continue;
status = link->status;
@@ -849,6 +1102,10 @@ static void device_links_purge(struct device *dev)
{
struct device_link *link, *ln;
+ mutex_lock(&wfs_lock);
+ list_del(&dev->links.needs_suppliers);
+ mutex_unlock(&wfs_lock);
+
/*
* Delete all of the remaining links from this device to any other
* devices (either consumers or suppliers).
@@ -1713,6 +1970,8 @@ void device_initialize(struct device *dev)
#endif
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
+ INIT_LIST_HEAD(&dev->links.needs_suppliers);
+ INIT_LIST_HEAD(&dev->links.defer_sync);
dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -2101,7 +2360,7 @@ int device_add(struct device *dev)
struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
- int error = -EINVAL;
+ int error = -EINVAL, fw_ret;
struct kobject *glue_dir = NULL;
dev = get_device(dev);
@@ -2199,6 +2458,32 @@ int device_add(struct device *dev)
BUS_NOTIFY_ADD_DEVICE, dev);
kobject_uevent(&dev->kobj, KOBJ_ADD);
+
+ if (dev->fwnode && !dev->fwnode->dev)
+ dev->fwnode->dev = dev;
+
+ /*
+ * Check if any of the other devices (consumers) have been waiting for
+ * this device (supplier) to be added so that they can create a device
+ * link to it.
+ *
+ * This needs to happen after device_pm_add() because device_link_add()
+ * requires the supplier be registered before it's called.
+ *
+ * But this also needs to happe before bus_probe_device() to make sure
+ * waiting consumers can link to it before the driver is bound to the
+ * device and the driver sync_state callback is called for this device.
+ */
+ device_link_add_missing_supplier_links();
+
+ if (fwnode_has_op(dev->fwnode, add_links)) {
+ fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
+ if (fw_ret == -ENODEV)
+ device_link_wait_for_mandatory_supplier(dev);
+ else if (fw_ret)
+ device_link_wait_for_optional_supplier(dev);
+ }
+
bus_probe_device(dev);
if (parent)
klist_add_tail(&dev->p->knode_parent,
@@ -2343,6 +2628,9 @@ void device_del(struct device *dev)
kill_device(dev);
device_unlock(dev);
+ if (dev->fwnode && dev->fwnode->dev == dev)
+ dev->fwnode->dev = NULL;
+
/* Notify clients of device removal. This call must come
* before dpm_sysfs_remove().
*/
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 3f9e274e2ed3..5b24f3959255 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -148,7 +148,7 @@ config FW_LOADER_USER_HELPER_FALLBACK
to be used for all firmware requests which explicitly do not disable a
a fallback mechanism. Firmware calls which do prohibit a fallback
mechanism is request_firmware_direct(). This option is kept for
- backward compatibility purposes given this precise mechanism can also
+ backward compatibility purposes given this precise mechanism can also
be enabled by setting the proc sysctl value to true:
/proc/sys/kernel/firmware_config/force_sysfs_fallback
@@ -169,5 +169,17 @@ config FW_LOADER_COMPRESS
be compressed with either none or crc32 integrity check type (pass
"-C crc32" option to xz command).
+config FW_CACHE
+ bool "Enable firmware caching during suspend"
+ depends on PM_SLEEP
+ default y if PM_SLEEP
+ help
+ Because firmware caching generates uevent messages that are sent
+ over a netlink socket, it can prevent suspend on many platforms.
+ It is also not always useful, so on such platforms we have the
+ option.
+
+ If unsure, say Y.
+
endif # FW_LOADER
endmenu
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 37e5ae387400..4a66888e7253 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -8,7 +8,8 @@ fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
-FWSTR = $(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME))))
+comma := ,
+FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))))
ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long)
ASM_ALIGN = $(if $(CONFIG_64BIT),3,2)
PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index bf44c79beae9..249add8c5e05 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2003 Manuel Estrada Sainz
*
- * Please see Documentation/firmware_class/ for more information.
+ * Please see Documentation/driver-api/firmware/ for more information.
*
*/
@@ -51,7 +51,7 @@ struct firmware_cache {
struct list_head head;
int state;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FW_CACHE
/*
* Names of firmware images which have been cached successfully
* will be added into the below list so that device uncache
@@ -504,6 +504,7 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
path);
continue;
}
+ dev_dbg(device, "Loading firmware from %s\n", path);
if (decompress) {
dev_dbg(device, "f/w decompressing %s\n",
fw_priv->fw_name);
@@ -556,7 +557,7 @@ static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
(unsigned int)fw_priv->size);
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FW_CACHE
static void fw_name_devm_release(struct device *dev, void *res)
{
struct fw_name_devm *fwn = res;
@@ -1046,7 +1047,7 @@ request_firmware_nowait(
}
EXPORT_SYMBOL(request_firmware_nowait);
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FW_CACHE
static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
/**
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 84c4e1f72cbd..799b43191dea 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -19,15 +19,12 @@
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/mm.h>
-#include <linux/mutex.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
-static DEFINE_MUTEX(mem_sysfs_mutex);
-
#define MEMORY_CLASS_NAME "memory"
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
@@ -538,12 +535,7 @@ static ssize_t soft_offline_page_store(struct device *dev,
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- if (!pfn_valid(pfn))
- return -ENXIO;
- /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
- if (!pfn_to_online_page(pfn))
- return -EIO;
- ret = soft_offline_page(pfn_to_page(pfn), 0);
+ ret = soft_offline_page(pfn, 0);
return ret == 0 ? count : ret;
}
@@ -705,6 +697,8 @@ static void unregister_memory(struct memory_block *memory)
* Create memory block devices for the given memory area. Start and size
* have to be aligned to memory block granularity. Memory block devices
* will be initialized as offline.
+ *
+ * Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size)
{
@@ -718,7 +712,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
!IS_ALIGNED(size, memory_block_size_bytes())))
return -EINVAL;
- mutex_lock(&mem_sysfs_mutex);
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
if (ret)
@@ -730,11 +723,12 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
for (block_id = start_block_id; block_id != end_block_id;
block_id++) {
mem = find_memory_block_by_id(block_id);
+ if (WARN_ON_ONCE(!mem))
+ continue;
mem->section_count = 0;
unregister_memory(mem);
}
}
- mutex_unlock(&mem_sysfs_mutex);
return ret;
}
@@ -742,6 +736,8 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
* Remove memory block devices for the given memory area. Start and size
* have to be aligned to memory block granularity. Memory block devices
* have to be offline.
+ *
+ * Called under device_hotplug_lock.
*/
void remove_memory_block_devices(unsigned long start, unsigned long size)
{
@@ -754,7 +750,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
!IS_ALIGNED(size, memory_block_size_bytes())))
return;
- mutex_lock(&mem_sysfs_mutex);
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
@@ -763,7 +758,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
unregister_memory_block_under_nodes(mem);
unregister_memory(mem);
}
- mutex_unlock(&mem_sysfs_mutex);
}
/* return true if the memory block is offlined, otherwise, return false */
@@ -797,12 +791,13 @@ static const struct attribute_group *memory_root_attr_groups[] = {
};
/*
- * Initialize the sysfs support for memory devices...
+ * Initialize the sysfs support for memory devices. At the time this function
+ * is called, we cannot have concurrent creation/deletion of memory block
+ * devices, the device_hotplug_lock is not needed.
*/
void __init memory_dev_init(void)
{
int ret;
- int err;
unsigned long block_sz, nr;
/* Validate the configured memory block size */
@@ -813,24 +808,19 @@ void __init memory_dev_init(void)
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
- goto out;
+ panic("%s() failed to register subsystem: %d\n", __func__, ret);
/*
* Create entries for memory sections that were found
* during boot and have been initialized
*/
- mutex_lock(&mem_sysfs_mutex);
for (nr = 0; nr <= __highest_present_section_nr;
nr += sections_per_block) {
- err = add_memory_block(nr);
- if (!ret)
- ret = err;
+ ret = add_memory_block(nr);
+ if (ret)
+ panic("%s() failed to add memory block: %d\n", __func__,
+ ret);
}
- mutex_unlock(&mem_sysfs_mutex);
-
-out:
- if (ret)
- panic("%s() failed: %d\n", __func__, ret);
}
/**
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index b230beb6ccb4..7c532548b0a6 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -60,6 +60,7 @@ struct resource *platform_get_resource(struct platform_device *dev,
}
EXPORT_SYMBOL_GPL(platform_get_resource);
+#ifdef CONFIG_HAS_IOMEM
/**
* devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
* device
@@ -68,7 +69,6 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
* resource management
* @index: resource index
*/
-#ifdef CONFIG_HAS_IOMEM
void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index)
{
@@ -78,9 +78,63 @@ void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, res);
}
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
+
+/**
+ * devm_platform_ioremap_resource_wc - write-combined variant of
+ * devm_platform_ioremap_resource()
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @index: resource index
+ */
+void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
+ unsigned int index)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ return devm_ioremap_resource_wc(&pdev->dev, res);
+}
+
+/**
+ * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
+ * a platform device, retrieve the
+ * resource by name
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @name: name of the resource
+ */
+void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ return devm_ioremap_resource(&pdev->dev, res);
+}
+EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
#endif /* CONFIG_HAS_IOMEM */
-static int __platform_get_irq(struct platform_device *dev, unsigned int num)
+/**
+ * platform_get_irq_optional - get an optional IRQ for a device
+ * @dev: platform device
+ * @num: IRQ number index
+ *
+ * Gets an IRQ for a platform device. Device drivers should check the return
+ * value for errors so as to not pass a negative integer value to the
+ * request_irq() APIs. This is the same as platform_get_irq(), except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * Example:
+ * int irq = platform_get_irq_optional(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
{
#ifdef CONFIG_SPARC
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
@@ -89,9 +143,9 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
return dev->archdata.irqs[num];
#else
struct resource *r;
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
- int ret;
+ int ret;
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
ret = of_irq_get(dev->dev.of_node, num);
if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
@@ -100,8 +154,6 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
if (has_acpi_companion(&dev->dev)) {
if (r && r->flags & IORESOURCE_DISABLED) {
- int ret;
-
ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
if (ret)
return ret;
@@ -134,8 +186,7 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
* allows a common code path across either kind of resource.
*/
if (num == 0 && has_acpi_companion(&dev->dev)) {
- int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
-
+ ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
/* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER)
return ret;
@@ -144,6 +195,7 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
return -ENXIO;
#endif
}
+EXPORT_SYMBOL_GPL(platform_get_irq_optional);
/**
* platform_get_irq - get an IRQ for a device
@@ -165,7 +217,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
{
int ret;
- ret = __platform_get_irq(dev, num);
+ ret = platform_get_irq_optional(dev, num);
if (ret < 0 && ret != -EPROBE_DEFER)
dev_err(&dev->dev, "IRQ index %u not found\n", num);
@@ -174,29 +226,6 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
EXPORT_SYMBOL_GPL(platform_get_irq);
/**
- * platform_get_irq_optional - get an optional IRQ for a device
- * @dev: platform device
- * @num: IRQ number index
- *
- * Gets an IRQ for a platform device. Device drivers should check the return
- * value for errors so as to not pass a negative integer value to the
- * request_irq() APIs. This is the same as platform_get_irq(), except that it
- * does not print an error message if an IRQ can not be obtained.
- *
- * Example:
- * int irq = platform_get_irq_optional(pdev, 0);
- * if (irq < 0)
- * return irq;
- *
- * Return: IRQ number on success, negative error number on failure.
- */
-int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
-{
- return __platform_get_irq(dev, num);
-}
-EXPORT_SYMBOL_GPL(platform_get_irq_optional);
-
-/**
* platform_irq_count - Count the number of IRQs a platform device uses
* @dev: platform device
*
@@ -206,7 +235,7 @@ int platform_irq_count(struct platform_device *dev)
{
int ret, nr = 0;
- while ((ret = __platform_get_irq(dev, nr)) >= 0)
+ while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
nr++;
if (ret == -EPROBE_DEFER)
@@ -245,10 +274,9 @@ static int __platform_get_irq_byname(struct platform_device *dev,
const char *name)
{
struct resource *r;
+ int ret;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
- int ret;
-
ret = of_irq_get_byname(dev->dev.of_node, name);
if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
@@ -1278,6 +1306,11 @@ struct bus_type platform_bus_type = {
};
EXPORT_SYMBOL_GPL(platform_bus_type);
+static inline int __platform_match(struct device *dev, const void *drv)
+{
+ return platform_match(dev, (struct device_driver *)drv);
+}
+
/**
* platform_find_device_by_driver - Find a platform device with a given
* driver.
@@ -1288,7 +1321,7 @@ struct device *platform_find_device_by_driver(struct device *start,
const struct device_driver *drv)
{
return bus_find_device(&platform_bus_type, start, drv,
- (void *)platform_match);
+ __platform_match);
}
EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
@@ -1296,8 +1329,6 @@ int __init platform_bus_init(void)
{
int error;
- early_platform_cleanup();
-
error = device_register(&platform_bus);
if (error) {
put_device(&platform_bus);
@@ -1309,289 +1340,3 @@ int __init platform_bus_init(void)
of_platform_register_reconfig_notifier();
return error;
}
-
-static __initdata LIST_HEAD(early_platform_driver_list);
-static __initdata LIST_HEAD(early_platform_device_list);
-
-/**
- * early_platform_driver_register - register early platform driver
- * @epdrv: early_platform driver structure
- * @buf: string passed from early_param()
- *
- * Helper function for early_platform_init() / early_platform_init_buffer()
- */
-int __init early_platform_driver_register(struct early_platform_driver *epdrv,
- char *buf)
-{
- char *tmp;
- int n;
-
- /* Simply add the driver to the end of the global list.
- * Drivers will by default be put on the list in compiled-in order.
- */
- if (!epdrv->list.next) {
- INIT_LIST_HEAD(&epdrv->list);
- list_add_tail(&epdrv->list, &early_platform_driver_list);
- }
-
- /* If the user has specified device then make sure the driver
- * gets prioritized. The driver of the last device specified on
- * command line will be put first on the list.
- */
- n = strlen(epdrv->pdrv->driver.name);
- if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
- list_move(&epdrv->list, &early_platform_driver_list);
-
- /* Allow passing parameters after device name */
- if (buf[n] == '\0' || buf[n] == ',')
- epdrv->requested_id = -1;
- else {
- epdrv->requested_id = simple_strtoul(&buf[n + 1],
- &tmp, 10);
-
- if (buf[n] != '.' || (tmp == &buf[n + 1])) {
- epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
- n = 0;
- } else
- n += strcspn(&buf[n + 1], ",") + 1;
- }
-
- if (buf[n] == ',')
- n++;
-
- if (epdrv->bufsize) {
- memcpy(epdrv->buffer, &buf[n],
- min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
- epdrv->buffer[epdrv->bufsize - 1] = '\0';
- }
- }
-
- return 0;
-}
-
-/**
- * early_platform_add_devices - adds a number of early platform devices
- * @devs: array of early platform devices to add
- * @num: number of early platform devices in array
- *
- * Used by early architecture code to register early platform devices and
- * their platform data.
- */
-void __init early_platform_add_devices(struct platform_device **devs, int num)
-{
- struct device *dev;
- int i;
-
- /* simply add the devices to list */
- for (i = 0; i < num; i++) {
- dev = &devs[i]->dev;
-
- if (!dev->devres_head.next) {
- pm_runtime_early_init(dev);
- INIT_LIST_HEAD(&dev->devres_head);
- list_add_tail(&dev->devres_head,
- &early_platform_device_list);
- }
- }
-}
-
-/**
- * early_platform_driver_register_all - register early platform drivers
- * @class_str: string to identify early platform driver class
- *
- * Used by architecture code to register all early platform drivers
- * for a certain class. If omitted then only early platform drivers
- * with matching kernel command line class parameters will be registered.
- */
-void __init early_platform_driver_register_all(char *class_str)
-{
- /* The "class_str" parameter may or may not be present on the kernel
- * command line. If it is present then there may be more than one
- * matching parameter.
- *
- * Since we register our early platform drivers using early_param()
- * we need to make sure that they also get registered in the case
- * when the parameter is missing from the kernel command line.
- *
- * We use parse_early_options() to make sure the early_param() gets
- * called at least once. The early_param() may be called more than
- * once since the name of the preferred device may be specified on
- * the kernel command line. early_platform_driver_register() handles
- * this case for us.
- */
- parse_early_options(class_str);
-}
-
-/**
- * early_platform_match - find early platform device matching driver
- * @epdrv: early platform driver structure
- * @id: id to match against
- */
-static struct platform_device * __init
-early_platform_match(struct early_platform_driver *epdrv, int id)
-{
- struct platform_device *pd;
-
- list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
- if (platform_match(&pd->dev, &epdrv->pdrv->driver))
- if (pd->id == id)
- return pd;
-
- return NULL;
-}
-
-/**
- * early_platform_left - check if early platform driver has matching devices
- * @epdrv: early platform driver structure
- * @id: return true if id or above exists
- */
-static int __init early_platform_left(struct early_platform_driver *epdrv,
- int id)
-{
- struct platform_device *pd;
-
- list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
- if (platform_match(&pd->dev, &epdrv->pdrv->driver))
- if (pd->id >= id)
- return 1;
-
- return 0;
-}
-
-/**
- * early_platform_driver_probe_id - probe drivers matching class_str and id
- * @class_str: string to identify early platform driver class
- * @id: id to match against
- * @nr_probe: number of platform devices to successfully probe before exiting
- */
-static int __init early_platform_driver_probe_id(char *class_str,
- int id,
- int nr_probe)
-{
- struct early_platform_driver *epdrv;
- struct platform_device *match;
- int match_id;
- int n = 0;
- int left = 0;
-
- list_for_each_entry(epdrv, &early_platform_driver_list, list) {
- /* only use drivers matching our class_str */
- if (strcmp(class_str, epdrv->class_str))
- continue;
-
- if (id == -2) {
- match_id = epdrv->requested_id;
- left = 1;
-
- } else {
- match_id = id;
- left += early_platform_left(epdrv, id);
-
- /* skip requested id */
- switch (epdrv->requested_id) {
- case EARLY_PLATFORM_ID_ERROR:
- case EARLY_PLATFORM_ID_UNSET:
- break;
- default:
- if (epdrv->requested_id == id)
- match_id = EARLY_PLATFORM_ID_UNSET;
- }
- }
-
- switch (match_id) {
- case EARLY_PLATFORM_ID_ERROR:
- pr_warn("%s: unable to parse %s parameter\n",
- class_str, epdrv->pdrv->driver.name);
- /* fall-through */
- case EARLY_PLATFORM_ID_UNSET:
- match = NULL;
- break;
- default:
- match = early_platform_match(epdrv, match_id);
- }
-
- if (match) {
- /*
- * Set up a sensible init_name to enable
- * dev_name() and others to be used before the
- * rest of the driver core is initialized.
- */
- if (!match->dev.init_name && slab_is_available()) {
- if (match->id != -1)
- match->dev.init_name =
- kasprintf(GFP_KERNEL, "%s.%d",
- match->name,
- match->id);
- else
- match->dev.init_name =
- kasprintf(GFP_KERNEL, "%s",
- match->name);
-
- if (!match->dev.init_name)
- return -ENOMEM;
- }
-
- if (epdrv->pdrv->probe(match))
- pr_warn("%s: unable to probe %s early.\n",
- class_str, match->name);
- else
- n++;
- }
-
- if (n >= nr_probe)
- break;
- }
-
- if (left)
- return n;
- else
- return -ENODEV;
-}
-
-/**
- * early_platform_driver_probe - probe a class of registered drivers
- * @class_str: string to identify early platform driver class
- * @nr_probe: number of platform devices to successfully probe before exiting
- * @user_only: only probe user specified early platform devices
- *
- * Used by architecture code to probe registered early platform drivers
- * within a certain class. For probe to happen a registered early platform
- * device matching a registered early platform driver is needed.
- */
-int __init early_platform_driver_probe(char *class_str,
- int nr_probe,
- int user_only)
-{
- int k, n, i;
-
- n = 0;
- for (i = -2; n < nr_probe; i++) {
- k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
-
- if (k < 0)
- break;
-
- n += k;
-
- if (user_only)
- break;
- }
-
- return n;
-}
-
-/**
- * early_platform_cleanup - clean up early platform code
- */
-void __init early_platform_cleanup(void)
-{
- struct platform_device *pd, *pd2;
-
- /* clean up the devres list used to chain devices */
- list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
- dev.devres_head) {
- list_del(&pd->dev.devres_head);
- memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
- }
-}
-
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index ec5bb190b9d0..8fdd0073eeeb 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
+obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c
new file mode 100644
index 000000000000..3115db08d56b
--- /dev/null
+++ b/drivers/base/power/qos-test.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+#include <kunit/test.h>
+#include <linux/pm_qos.h>
+
+/* Basic test for aggregating two "min" requests */
+static void freq_qos_test_min(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+
+ ret = freq_qos_remove_request(&req2);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+}
+
+/* Test that requests for MAX_DEFAULT_VALUE have no effect */
+static void freq_qos_test_maxdef(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX),
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* Add max 1000 */
+ ret = freq_qos_update_request(&req1, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Add max 2000, no impact */
+ ret = freq_qos_update_request(&req2, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Remove max 1000, new max 2000 */
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 2000);
+}
+
+/*
+ * Test that a freq_qos_request can be added again after removal
+ *
+ * This issue was solved by commit 05ff1ba412fd ("PM: QoS: Invalidate frequency
+ * QoS requests after removal")
+ */
+static void freq_qos_test_readd(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req, 0, sizeof(req));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ /* Remove */
+ ret = freq_qos_remove_request(&req);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add again */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+}
+
+static struct kunit_case pm_qos_test_cases[] = {
+ KUNIT_CASE(freq_qos_test_min),
+ KUNIT_CASE(freq_qos_test_maxdef),
+ KUNIT_CASE(freq_qos_test_readd),
+ {},
+};
+
+static struct kunit_suite pm_qos_test_module = {
+ .name = "qos-kunit-test",
+ .test_cases = pm_qos_test_cases,
+};
+kunit_test_suite(pm_qos_test_module);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 350dcafd751f..8e93167f1783 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -115,10 +115,20 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
spin_lock_irqsave(&dev->power.lock, flags);
- if (type == DEV_PM_QOS_RESUME_LATENCY) {
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
: pm_qos_read_value(&qos->resume_latency);
- } else {
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
+ break;
+ default:
WARN_ON(1);
ret = 0;
}
@@ -159,6 +169,10 @@ static int apply_constraint(struct dev_pm_qos_request *req,
req->dev->power.set_latency_tolerance(req->dev, value);
}
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_apply(&req->data.freq, action, value);
+ break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
@@ -209,6 +223,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
+ freq_constraints_init(&qos->freq);
+
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
@@ -269,6 +285,20 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
memset(req, 0, sizeof(*req));
}
+ c = &qos->freq.min_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->freq.max_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -314,11 +344,22 @@ static int __dev_pm_qos_add_request(struct device *dev,
ret = dev_pm_qos_constraints_allocate(dev);
trace_dev_pm_qos_add_request(dev_name(dev), type, value);
- if (!ret) {
- req->dev = dev;
- req->type = type;
+ if (ret)
+ return ret;
+
+ req->dev = dev;
+ req->type = type;
+ if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MIN, value);
+ else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MAX, value);
+ else
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
- }
+
return ret;
}
@@ -382,6 +423,10 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
case DEV_PM_QOS_LATENCY_TOLERANCE:
curr_value = req->data.pnode.prio;
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ curr_value = req->data.freq.pnode.prio;
+ break;
case DEV_PM_QOS_FLAGS:
curr_value = req->data.flr.flags;
break;
@@ -507,6 +552,14 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
default:
WARN_ON(1);
ret = -EINVAL;
@@ -546,6 +599,14 @@ int dev_pm_qos_remove_notifier(struct device *dev,
ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
notifier);
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
default:
WARN_ON(1);
ret = -EINVAL;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 81bd01ed4042..511f6d7acdfe 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -557,6 +557,42 @@ int device_add_properties(struct device *dev,
EXPORT_SYMBOL_GPL(device_add_properties);
/**
+ * fwnode_get_name - Return the name of a node
+ * @fwnode: The firmware node
+ *
+ * Returns a pointer to the node name.
+ */
+const char *fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name);
+}
+
+/**
+ * fwnode_get_name_prefix - Return the prefix of node for printing purposes
+ * @fwnode: The firmware node
+ *
+ * Returns the prefix of a node, intended to be printed right before the node.
+ * The prefix works also as a separator between the nodes.
+ */
+const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name_prefix);
+}
+
+/**
+ * fwnode_get_parent - Return parent firwmare node
+ * @fwnode: Firmware whose parent is retrieved
+ *
+ * Return parent firmware node of the given node if possible or %NULL if no
+ * parent was available.
+ */
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_parent);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_parent);
+
+/**
* fwnode_get_next_parent - Iterate to the node's parent
* @fwnode: Firmware whose parent is retrieved
*
@@ -578,17 +614,50 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
- * fwnode_get_parent - Return parent firwmare node
- * @fwnode: Firmware whose parent is retrieved
+ * fwnode_count_parents - Return the number of parents a node has
+ * @fwnode: The node the parents of which are to be counted
*
- * Return parent firmware node of the given node if possible or %NULL if no
- * parent was available.
+ * Returns the number of parents a node has.
*/
-struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
{
- return fwnode_call_ptr_op(fwnode, get_parent);
+ struct fwnode_handle *__fwnode;
+ unsigned int count;
+
+ __fwnode = fwnode_get_parent(fwnode);
+
+ for (count = 0; __fwnode; count++)
+ __fwnode = fwnode_get_next_parent(__fwnode);
+
+ return count;
}
-EXPORT_SYMBOL_GPL(fwnode_get_parent);
+EXPORT_SYMBOL_GPL(fwnode_count_parents);
+
+/**
+ * fwnode_get_nth_parent - Return an nth parent of a node
+ * @fwnode: The node the parent of which is requested
+ * @depth: Distance of the parent from the node
+ *
+ * Returns the nth parent of a node. If there is no parent at the requested
+ * @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to
+ * fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on.
+ *
+ * The caller is responsible for calling fwnode_handle_put() for the returned
+ * node.
+ */
+struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
+ unsigned int depth)
+{
+ unsigned int i;
+
+ fwnode_handle_get(fwnode);
+
+ for (i = 0; i < depth && fwnode; i++)
+ fwnode = fwnode_get_next_parent(fwnode);
+
+ return fwnode;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
* fwnode_get_next_child_node - Return the next child node handle for a node
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 7c0c5ca5953d..4af11a423475 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -104,15 +104,12 @@ static const struct attribute_group soc_attr_group = {
.is_visible = soc_attribute_mode,
};
-static const struct attribute_group *soc_attr_groups[] = {
- &soc_attr_group,
- NULL,
-};
-
static void soc_release(struct device *dev)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+ ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
+ kfree(soc_dev->dev.groups);
kfree(soc_dev);
}
@@ -121,6 +118,7 @@ static struct soc_device_attribute *early_soc_dev_attr;
struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
{
struct soc_device *soc_dev;
+ const struct attribute_group **soc_attr_groups;
int ret;
if (!soc_bus_type.p) {
@@ -136,10 +134,18 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
goto out1;
}
+ soc_attr_groups = kcalloc(3, sizeof(*soc_attr_groups), GFP_KERNEL);
+ if (!soc_attr_groups) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+ soc_attr_groups[0] = &soc_attr_group;
+ soc_attr_groups[1] = soc_dev_attr->custom_attr_group;
+
/* Fetch a unique (reclaimable) SOC ID. */
ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
- goto out2;
+ goto out3;
soc_dev->soc_dev_num = ret;
soc_dev->attr = soc_dev_attr;
@@ -150,15 +156,15 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
ret = device_register(&soc_dev->dev);
- if (ret)
- goto out3;
+ if (ret) {
+ put_device(&soc_dev->dev);
+ return ERR_PTR(ret);
+ }
return soc_dev;
out3:
- ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
- put_device(&soc_dev->dev);
- soc_dev = NULL;
+ kfree(soc_attr_groups);
out2:
kfree(soc_dev);
out1:
@@ -169,8 +175,6 @@ EXPORT_SYMBOL_GPL(soc_device_register);
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
{
- ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
-
device_unregister(&soc_dev->dev);
early_soc_dev_attr = NULL;
}
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index a1f3f0994f9f..d8d0dc0ca5ac 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -71,9 +71,9 @@ software_node_to_swnode(const struct software_node *node)
return swnode;
}
-const struct software_node *to_software_node(struct fwnode_handle *fwnode)
+const struct software_node *to_software_node(const struct fwnode_handle *fwnode)
{
- struct swnode *swnode = to_swnode(fwnode);
+ const struct swnode *swnode = to_swnode(fwnode);
return swnode ? swnode->node : NULL;
}
@@ -103,71 +103,15 @@ property_entry_get(const struct property_entry *prop, const char *name)
return NULL;
}
-static void
-property_set_pointer(struct property_entry *prop, const void *pointer)
-{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- prop->pointer.u8_data = pointer;
- else
- prop->value.u8_data = *((u8 *)pointer);
- break;
- case DEV_PROP_U16:
- if (prop->is_array)
- prop->pointer.u16_data = pointer;
- else
- prop->value.u16_data = *((u16 *)pointer);
- break;
- case DEV_PROP_U32:
- if (prop->is_array)
- prop->pointer.u32_data = pointer;
- else
- prop->value.u32_data = *((u32 *)pointer);
- break;
- case DEV_PROP_U64:
- if (prop->is_array)
- prop->pointer.u64_data = pointer;
- else
- prop->value.u64_data = *((u64 *)pointer);
- break;
- case DEV_PROP_STRING:
- if (prop->is_array)
- prop->pointer.str = pointer;
- else
- prop->value.str = pointer;
- break;
- default:
- break;
- }
-}
-
static const void *property_get_pointer(const struct property_entry *prop)
{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- return prop->pointer.u8_data;
- return &prop->value.u8_data;
- case DEV_PROP_U16:
- if (prop->is_array)
- return prop->pointer.u16_data;
- return &prop->value.u16_data;
- case DEV_PROP_U32:
- if (prop->is_array)
- return prop->pointer.u32_data;
- return &prop->value.u32_data;
- case DEV_PROP_U64:
- if (prop->is_array)
- return prop->pointer.u64_data;
- return &prop->value.u64_data;
- case DEV_PROP_STRING:
- if (prop->is_array)
- return prop->pointer.str;
- return &prop->value.str;
- default:
+ if (!prop->length)
return NULL;
- }
+
+ if (prop->is_array)
+ return prop->pointer;
+
+ return &prop->value;
}
static const void *property_entry_find(const struct property_entry *props,
@@ -187,66 +131,6 @@ static const void *property_entry_find(const struct property_entry *props,
return pointer;
}
-static int property_entry_read_u8_array(const struct property_entry *props,
- const char *propname,
- u8 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u16_array(const struct property_entry *props,
- const char *propname,
- u16 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u32_array(const struct property_entry *props,
- const char *propname,
- u32 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u64_array(const struct property_entry *props,
- const char *propname,
- u64 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
static int
property_entry_count_elems_of_size(const struct property_entry *props,
const char *propname, size_t length)
@@ -265,49 +149,45 @@ static int property_entry_read_int_array(const struct property_entry *props,
unsigned int elem_size, void *val,
size_t nval)
{
+ const void *pointer;
+ size_t length;
+
if (!val)
return property_entry_count_elems_of_size(props, name,
elem_size);
- switch (elem_size) {
- case sizeof(u8):
- return property_entry_read_u8_array(props, name, val, nval);
- case sizeof(u16):
- return property_entry_read_u16_array(props, name, val, nval);
- case sizeof(u32):
- return property_entry_read_u32_array(props, name, val, nval);
- case sizeof(u64):
- return property_entry_read_u64_array(props, name, val, nval);
- }
- return -ENXIO;
+ if (!is_power_of_2(elem_size) || elem_size > sizeof(u64))
+ return -ENXIO;
+
+ length = nval * elem_size;
+
+ pointer = property_entry_find(props, name, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(val, pointer, length);
+ return 0;
}
static int property_entry_read_string_array(const struct property_entry *props,
const char *propname,
const char **strings, size_t nval)
{
- const struct property_entry *prop;
const void *pointer;
- size_t array_len, length;
+ size_t length;
+ int array_len;
/* Find out the array length. */
- prop = property_entry_get(props, propname);
- if (!prop)
- return -EINVAL;
-
- if (prop->is_array)
- /* Find the length of an array. */
- array_len = property_entry_count_elems_of_size(props, propname,
- sizeof(const char *));
- else
- /* The array length for a non-array string property is 1. */
- array_len = 1;
+ array_len = property_entry_count_elems_of_size(props, propname,
+ sizeof(const char *));
+ if (array_len < 0)
+ return array_len;
/* Return how many there are if strings is NULL. */
if (!strings)
return array_len;
- array_len = min(nval, array_len);
+ array_len = min_t(size_t, nval, array_len);
length = array_len * sizeof(*strings);
pointer = property_entry_find(props, propname, length);
@@ -322,13 +202,15 @@ static int property_entry_read_string_array(const struct property_entry *props,
static void property_entry_free_data(const struct property_entry *p)
{
const void *pointer = property_get_pointer(p);
+ const char * const *src_str;
size_t i, nval;
if (p->is_array) {
- if (p->type == DEV_PROP_STRING && p->pointer.str) {
+ if (p->type == DEV_PROP_STRING && p->pointer) {
+ src_str = p->pointer;
nval = p->length / sizeof(const char *);
for (i = 0; i < nval; i++)
- kfree(p->pointer.str[i]);
+ kfree(src_str[i]);
}
kfree(pointer);
} else if (p->type == DEV_PROP_STRING) {
@@ -337,29 +219,29 @@ static void property_entry_free_data(const struct property_entry *p)
kfree(p->name);
}
-static int property_copy_string_array(struct property_entry *dst,
- const struct property_entry *src)
+static const char * const *
+property_copy_string_array(const struct property_entry *src)
{
const char **d;
+ const char * const *src_str = src->pointer;
size_t nval = src->length / sizeof(*d);
int i;
d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
if (!d)
- return -ENOMEM;
+ return NULL;
for (i = 0; i < nval; i++) {
- d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL);
- if (!d[i] && src->pointer.str[i]) {
+ d[i] = kstrdup(src_str[i], GFP_KERNEL);
+ if (!d[i] && src_str[i]) {
while (--i >= 0)
kfree(d[i]);
kfree(d);
- return -ENOMEM;
+ return NULL;
}
}
- dst->pointer.str = d;
- return 0;
+ return d;
}
static int property_entry_copy_data(struct property_entry *dst,
@@ -367,36 +249,35 @@ static int property_entry_copy_data(struct property_entry *dst,
{
const void *pointer = property_get_pointer(src);
const void *new;
- int error;
if (src->is_array) {
if (!src->length)
return -ENODATA;
if (src->type == DEV_PROP_STRING) {
- error = property_copy_string_array(dst, src);
- if (error)
- return error;
- new = dst->pointer.str;
+ new = property_copy_string_array(src);
+ if (!new)
+ return -ENOMEM;
} else {
new = kmemdup(pointer, src->length, GFP_KERNEL);
if (!new)
return -ENOMEM;
}
+
+ dst->is_array = true;
+ dst->pointer = new;
} else if (src->type == DEV_PROP_STRING) {
new = kstrdup(src->value.str, GFP_KERNEL);
if (!new && src->value.str)
return -ENOMEM;
+
+ dst->value.str = new;
} else {
- new = pointer;
+ dst->value = src->value;
}
dst->length = src->length;
- dst->is_array = src->is_array;
dst->type = src->type;
-
- property_set_pointer(dst, new);
-
dst->name = kstrdup(src->name, GFP_KERNEL);
if (!dst->name)
goto out_free_data;
@@ -515,12 +396,47 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
propname, val, nval);
}
+static const char *
+software_node_get_name(const struct fwnode_handle *fwnode)
+{
+ const struct swnode *swnode = to_swnode(fwnode);
+
+ if (!swnode)
+ return "(null)";
+
+ return kobject_name(&swnode->kobj);
+}
+
+static const char *
+software_node_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ const char *prefix;
+
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "";
+
+ /* Figure out the prefix from the parents. */
+ while (is_software_node(parent))
+ parent = fwnode_get_next_parent(parent);
+
+ prefix = fwnode_get_name_prefix(parent);
+ fwnode_handle_put(parent);
+
+ /* Guess something if prefix was NULL. */
+ return prefix ?: "/";
+}
+
static struct fwnode_handle *
software_node_get_parent(const struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
- return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : NULL;
+ if (!swnode || !swnode->parent)
+ return NULL;
+
+ return fwnode_handle_get(&swnode->parent->fwnode);
}
static struct fwnode_handle *
@@ -612,6 +528,8 @@ static const struct fwnode_operations software_node_ops = {
.property_present = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
+ .get_name = software_node_get_name,
+ .get_name_prefix = software_node_get_name_prefix,
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
.get_named_child_node = software_node_get_named_child_node,
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index bd7d3bb8b890..1553d41f0b91 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -857,7 +857,7 @@ static void fd_calibrate( void )
}
if (ATARIHW_PRESENT(FDCSPEED))
- dma_wd.fdc_speed = 0; /* always seek with 8 Mhz */;
+ dma_wd.fdc_speed = 0; /* always seek with 8 Mhz */
DPRINT(("fd_calibrate\n"));
SET_IRQ_HANDLER( fd_calibrate_done );
/* we can't verify, since the speed may be incorrect */
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f86cea4c0f8d..840c3aef3c5c 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -884,7 +884,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
start_new_tl_epoch(connection);
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
break;
- };
+ }
return rv;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 76457003f140..ee67bf929fac 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2663,6 +2663,28 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
return ret;
}
+#ifdef CONFIG_COMPAT
+static int pkt_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ /* compatible */
+ case CDROMEJECT:
+ case CDROMMULTISESSION:
+ case CDROMREADTOCENTRY:
+ case SCSI_IOCTL_SEND_COMMAND:
+ return pkt_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg));
+
+
+ /* FIXME: no handler so far */
+ case CDROM_LAST_WRITTEN:
+ /* handled in compat_blkdev_driver_ioctl */
+ case CDROM_SEND_PACKET:
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#endif
+
static unsigned int pkt_check_events(struct gendisk *disk,
unsigned int clearing)
{
@@ -2684,6 +2706,9 @@ static const struct block_device_operations pktcdvd_ops = {
.open = pkt_open,
.release = pkt_close,
.ioctl = pkt_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = pkt_compat_ioctl,
+#endif
.check_events = pkt_check_events,
};
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6b2fd630de85..571612e233fe 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -634,7 +634,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
case VD_OP_GET_EFI:
case VD_OP_SET_EFI:
return -EOPNOTSUPP;
- };
+ }
map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6b331061d34b..97ab5ad171d4 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -150,6 +150,15 @@ config TEGRA_GMI
Driver for the Tegra Generic Memory Interface bus which can be used
to attach devices such as NOR, UART, FPGA and more.
+config TI_PWMSS
+ bool
+ default y if (ARCH_OMAP2PLUS) && (PWM_TIECAP || PWM_TIEHRPWM || TI_EQEP)
+ help
+ PWM Subsystem driver support for AM33xx SOC.
+
+ PWM submodules require PWM config space access from submodule
+ drivers and require common parent driver support.
+
config TI_SYSC
bool "TI sysc interconnect target module driver"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 16b43d3468c6..1320bcf9fa9d 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
+obj-$(CONFIG_TI_PWMSS) += ti-pwmss.o
obj-$(CONFIG_TI_SYSC) += ti-sysc.o
obj-$(CONFIG_TS_NBUS) += ts-nbus.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/bus/ti-pwmss.c
index e9c26c94251b..e9c26c94251b 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/bus/ti-pwmss.c
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index ac42ae4651ce..eebdcbef0578 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -996,6 +996,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
tracks->xa = 0;
tracks->error = 0;
cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
+ tracks->error = CDS_NO_INFO;
+ return;
+ }
+
/* Grab the TOC header so we can see how many tracks there are */
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
if (ret) {
@@ -1162,7 +1168,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
ret = open_for_data(cdi);
if (ret)
goto err;
- cdrom_mmc3_profile(cdi);
+ if (CDROM_CAN(CDC_GENERIC_PACKET))
+ cdrom_mmc3_profile(cdi);
if (mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
@@ -2882,6 +2889,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
it doesn't give enough information or fails. then we return
the toc contents. */
use_toc:
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+
toc.cdte_format = CDROM_MSF;
toc.cdte_track = CDROM_LEADOUT;
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index df0fc997dc3e..26956c006987 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -439,8 +439,8 @@ config RAW_DRIVER
Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
See the raw(8) manpage for more details.
- Applications should preferably open the device (eg /dev/hda1)
- with the O_DIRECT flag.
+ Applications should preferably open the device (eg /dev/hda1)
+ with the O_DIRECT flag.
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
@@ -559,4 +559,4 @@ config RANDOM_TRUST_BOOTLOADER
device randomness. Say Y here to assume the entropy provided by the
booloader is trustworthy so it will be added to the kernel's entropy
pool. Otherwise, say N here so it will be regarded as device input that
- only mixes the entropy pool. \ No newline at end of file
+ only mixes the entropy pool.
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 812d6aa6e013..bc54235a7022 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -63,7 +63,7 @@ config AGP_AMD64
This option gives you AGP support for the GLX component of
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
You still need an external AGP bridge like the AMD 8151, VIA
- K8T400M, SiS755. It may also support other AGP bridges when loaded
+ K8T400M, SiS755. It may also support other AGP bridges when loaded
with agp_try_unsupported=1.
config AGP_INTEL
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 3daae8ddd511..8486c29d8324 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -386,17 +386,17 @@ config HW_RANDOM_MESON
If unsure, say Y.
config HW_RANDOM_CAVIUM
- tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on Cavium SoCs.
+ tristate "Cavium ThunderX Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Cavium SoCs.
- To compile this driver as a module, choose M here: the
- module will be called cavium_rng.
+ To compile this driver as a module, choose M here: the
+ module will be called cavium_rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_MTK
tristate "Mediatek Random Number Generator support"
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 4bad0614109b..7dc2c3ec4051 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -4,38 +4,38 @@
#
menuconfig IPMI_HANDLER
- tristate 'IPMI top-level message handler'
- depends on HAS_IOMEM
- select IPMI_DMI_DECODE if DMI
- help
- This enables the central IPMI message handler, required for IPMI
- to work.
+ tristate 'IPMI top-level message handler'
+ depends on HAS_IOMEM
+ select IPMI_DMI_DECODE if DMI
+ help
+ This enables the central IPMI message handler, required for IPMI
+ to work.
- IPMI is a standard for managing sensors (temperature,
- voltage, etc.) in a system.
+ IPMI is a standard for managing sensors (temperature,
+ voltage, etc.) in a system.
- See <file:Documentation/IPMI.txt> for more details on the driver.
+ See <file:Documentation/IPMI.txt> for more details on the driver.
- If unsure, say N.
+ If unsure, say N.
config IPMI_DMI_DECODE
- select IPMI_PLAT_DATA
- bool
+ select IPMI_PLAT_DATA
+ bool
config IPMI_PLAT_DATA
- bool
+ bool
if IPMI_HANDLER
config IPMI_PANIC_EVENT
- bool 'Generate a panic event to all BMCs on a panic'
- help
- When a panic occurs, this will cause the IPMI message handler to,
- by default, generate an IPMI event describing the panic to each
- interface registered with the message handler. This is always
- available, the module parameter for ipmi_msghandler named
- panic_op can be set to "event" to chose this value, this config
- simply causes the default value to be set to "event".
+ bool 'Generate a panic event to all BMCs on a panic'
+ help
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate an IPMI event describing the panic to each
+ interface registered with the message handler. This is always
+ available, the module parameter for ipmi_msghandler named
+ panic_op can be set to "event" to chose this value, this config
+ simply causes the default value to be set to "event".
config IPMI_PANIC_STRING
bool 'Generate OEM events containing the panic string'
@@ -54,43 +54,43 @@ config IPMI_PANIC_STRING
causes the default value to be set to "string".
config IPMI_DEVICE_INTERFACE
- tristate 'Device interface for IPMI'
- help
- This provides an IOCTL interface to the IPMI message handler so
- userland processes may use IPMI. It supports poll() and select().
+ tristate 'Device interface for IPMI'
+ help
+ This provides an IOCTL interface to the IPMI message handler so
+ userland processes may use IPMI. It supports poll() and select().
config IPMI_SI
- tristate 'IPMI System Interface handler'
- select IPMI_PLAT_DATA
- help
- Provides a driver for System Interfaces (KCS, SMIC, BT).
- Currently, only KCS and SMIC are supported. If
- you are using IPMI, you should probably say "y" here.
+ tristate 'IPMI System Interface handler'
+ select IPMI_PLAT_DATA
+ help
+ Provides a driver for System Interfaces (KCS, SMIC, BT).
+ Currently, only KCS and SMIC are supported. If
+ you are using IPMI, you should probably say "y" here.
config IPMI_SSIF
- tristate 'IPMI SMBus handler (SSIF)'
- select I2C
- help
- Provides a driver for a SMBus interface to a BMC, meaning that you
- have a driver that must be accessed over an I2C bus instead of a
- standard interface. This module requires I2C support.
+ tristate 'IPMI SMBus handler (SSIF)'
+ select I2C
+ help
+ Provides a driver for a SMBus interface to a BMC, meaning that you
+ have a driver that must be accessed over an I2C bus instead of a
+ standard interface. This module requires I2C support.
config IPMI_POWERNV
- depends on PPC_POWERNV
- tristate 'POWERNV (OPAL firmware) IPMI interface'
- help
- Provides a driver for OPAL firmware-based IPMI interfaces.
+ depends on PPC_POWERNV
+ tristate 'POWERNV (OPAL firmware) IPMI interface'
+ help
+ Provides a driver for OPAL firmware-based IPMI interfaces.
config IPMI_WATCHDOG
- tristate 'IPMI Watchdog Timer'
- help
- This enables the IPMI watchdog timer.
+ tristate 'IPMI Watchdog Timer'
+ help
+ This enables the IPMI watchdog timer.
config IPMI_POWEROFF
- tristate 'IPMI Poweroff'
- help
- This enables a function to power off the system with IPMI if
- the IPMI management controller is capable of this.
+ tristate 'IPMI Poweroff'
+ help
+ This enables a function to power off the system with IPMI if
+ the IPMI management controller is capable of this.
endif # IPMI_HANDLER
@@ -126,7 +126,7 @@ config NPCM7XX_KCS_IPMI_BMC
config ASPEED_BT_IPMI_BMC
depends on ARCH_ASPEED || COMPILE_TEST
- depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
+ depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
tristate "BT IPMI bmc driver"
help
Provides a driver for the BT (Block Transfer) IPMI interface
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 74c6d1f34132..55986e10a124 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -893,6 +893,7 @@ static const struct file_operations ipmi_wdog_fops = {
.poll = ipmi_poll,
.write = ipmi_write,
.unlocked_ioctl = ipmi_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ipmi_open,
.release = ipmi_close,
.fasync = ipmi_fasync,
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 7c9269e3477a..bd95aba1f9fe 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -713,6 +713,10 @@ static int lp_set_timeout64(unsigned int minor, void __user *arg)
if (copy_from_user(karg, arg, sizeof(karg)))
return -EFAULT;
+ /* sparc64 suseconds_t is 32-bit only */
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ karg[1] >>= 32;
+
return lp_set_timeout(minor, karg[0], karg[1]);
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index c86f18aa8985..2c2381a806ae 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -619,20 +619,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(time32, argp, sizeof(time32)))
return -EFAULT;
+ if ((time32[0] < 0) || (time32[1] < 0))
+ return -EINVAL;
+
return pp_set_timeout(pp->pdev, time32[0], time32[1]);
case PPSETTIME64:
if (copy_from_user(time64, argp, sizeof(time64)))
return -EFAULT;
+ if ((time64[0] < 0) || (time64[1] < 0))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] >>= 32;
+
return pp_set_timeout(pp->pdev, time64[0], time64[1]);
case PPGETTIME32:
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time32[0] = ts.tv_sec;
time32[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time32[0] < 0) || (time32[1] < 0))
- return -EINVAL;
if (copy_to_user(argp, time32, sizeof(time32)))
return -EFAULT;
@@ -643,8 +650,9 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time64[0] = ts.tv_sec;
time64[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time64[0] < 0) || (time64[1] < 0))
- return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] <<= 32;
if (copy_to_user(argp, time64, sizeof(time64)))
return -EFAULT;
@@ -670,14 +678,6 @@ static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-#ifdef CONFIG_COMPAT
-static long pp_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static int pp_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
@@ -786,9 +786,7 @@ static const struct file_operations pp_fops = {
.write = pp_write,
.poll = pp_poll,
.unlocked_ioctl = pp_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = pp_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = pp_open,
.release = pp_release,
};
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 01b8868b9bed..909e0c3d82ea 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2166,6 +2166,7 @@ const struct file_operations random_fops = {
.write = random_write,
.poll = random_poll,
.unlocked_ioctl = random_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
};
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 2f6e087ec496..91c772e38bb5 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -670,20 +670,10 @@ static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vtpmx_fops_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vtpmx_fops_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vtpmx_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vtpmx_fops_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vtpmx_fops_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 3259426f01dc..4df9b40d6342 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -919,6 +919,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
.pos = *ppos,
.u.data = &sgl,
};
+ unsigned int occupancy;
/*
* Rproc_serial does not yet support splice. To support splice
@@ -929,21 +930,18 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
if (is_rproc_serial(port->out_vq->vdev))
return -EINVAL;
- /*
- * pipe->nrbufs == 0 means there are no data to transfer,
- * so this returns just 0 for no data.
- */
pipe_lock(pipe);
- if (!pipe->nrbufs) {
- ret = 0;
+ ret = 0;
+ if (pipe_empty(pipe->head, pipe->tail))
goto error_out;
- }
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
- buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
+ occupancy = pipe_occupancy(pipe->head, pipe->tail);
+ buf = alloc_buf(port->portdev->vdev, 0, occupancy);
+
if (!buf) {
ret = -ENOMEM;
goto error_out;
@@ -951,7 +949,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
sgl.n = 0;
sgl.len = 0;
- sgl.size = pipe->nrbufs;
+ sgl.size = occupancy;
sgl.sg = buf->sg;
sg_init_table(sgl.sg, sgl.size);
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index bfafd8f5e826..96b6de8a30e5 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -116,7 +116,6 @@ static int xilly_drv_probe(struct platform_device *op)
struct xilly_endpoint *endpoint;
int rc;
int irq;
- struct resource *res;
struct xilly_endpoint_hardware *ephw = &of_hw;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
@@ -129,9 +128,7 @@ static int xilly_drv_probe(struct platform_device *op)
dev_set_drvdata(dev, endpoint);
- res = platform_get_resource(op, IORESOURCE_MEM, 0);
- endpoint->registers = devm_ioremap_resource(dev, res);
-
+ endpoint->registers = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(endpoint->registers))
return PTR_ERR(endpoint->registers);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index c44247d0b83e..dc920daa6dbb 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -136,6 +136,13 @@ config COMMON_CLK_SI570
This driver supports Silicon Labs 570/571/598/599 programmable
clock generators.
+config COMMON_CLK_BM1880
+ bool "Clock driver for Bitmain BM1880 SoC"
+ depends on ARCH_BITMAIN || COMPILE_TEST
+ default ARCH_BITMAIN
+ help
+ This driver supports the clocks on Bitmain BM1880 SoC.
+
config COMMON_CLK_CDCE706
tristate "Clock driver for TI CDCE706 clock synthesizer"
depends on I2C
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 0138fb14e6f8..0696a0c1ab58 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_MACH_ASM9260) += clk-asm9260.o
obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o
obj-$(CONFIG_COMMON_CLK_BD718XX) += clk-bd718x7.o
+obj-$(CONFIG_COMMON_CLK_BM1880) += clk-bm1880.o
obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index fac0ca56d42d..15dc4cd86d76 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -487,8 +487,7 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
if (IS_ERR(slow_osc))
goto unregister_slow_rc;
- clk_data = kzalloc(sizeof(*clk_data) + (2 * sizeof(struct clk_hw *)),
- GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, 2), GFP_KERNEL);
if (!clk_data)
goto unregister_slow_osc;
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 71c2e9519ca8..e9da0e69bf6c 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -172,14 +172,12 @@ static int i2s_pll_clk_probe(struct platform_device *pdev)
struct clk *clk;
struct i2s_pll_clk *pll_clk;
struct clk_init_data init;
- struct resource *mem;
pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
if (!pll_clk)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pll_clk->base = devm_ioremap_resource(dev, mem);
+ pll_clk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pll_clk->base))
return PTR_ERR(pll_clk->base);
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index aba787b2e771..500345d99adb 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -221,7 +221,6 @@ static int axs10x_pll_clk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const char *parent_name;
struct axs10x_pll_clk *pll_clk;
- struct resource *mem;
struct clk_init_data init = { };
int ret;
@@ -229,13 +228,11 @@ static int axs10x_pll_clk_probe(struct platform_device *pdev)
if (!pll_clk)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pll_clk->base = devm_ioremap_resource(dev, mem);
+ pll_clk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pll_clk->base))
return PTR_ERR(pll_clk->base);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pll_clk->lock = devm_ioremap_resource(dev, mem);
+ pll_clk->lock = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pll_clk->lock))
return PTR_ERR(pll_clk->lock);
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index b6d07ca0164f..290a2846a86b 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -19,7 +19,6 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
struct clk_hw_onecell_data *onecell;
const char *parent;
struct clk *parent_clk;
- struct resource *res;
void __iomem *reg, *gate;
parent_clk = devm_clk_get(dev, NULL);
@@ -27,8 +26,7 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
return PTR_ERR(parent_clk);
parent = __clk_get_name(parent_clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 802e488fd3c3..ded13ccf768e 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -2192,7 +2192,6 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct clk_hw **hws;
struct bcm2835_cprman *cprman;
- struct resource *res;
const struct bcm2835_clk_desc *desc;
const size_t asize = ARRAY_SIZE(clk_desc_array);
const struct cprman_plat_data *pdata;
@@ -2211,8 +2210,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
spin_lock_init(&cprman->regs_lock);
cprman->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cprman->regs = devm_ioremap_resource(dev, res);
+ cprman->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cprman->regs))
return PTR_ERR(cprman->regs);
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index abf06fb6453e..411ff5fb2c07 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -14,7 +14,7 @@
#include "clk-aspeed.h"
-#define ASPEED_NUM_CLKS 36
+#define ASPEED_NUM_CLKS 38
#define ASPEED_RESET2_OFFSET 32
@@ -28,6 +28,7 @@
#define AST2400_HPLL_BYPASS_EN BIT(17)
#define ASPEED_MISC_CTRL 0x2c
#define UART_DIV13_EN BIT(12)
+#define ASPEED_MAC_CLK_DLY 0x48
#define ASPEED_STRAP 0x70
#define CLKIN_25MHZ_EN BIT(23)
#define AST2400_CLK_SOURCE_SEL BIT(18)
@@ -462,6 +463,30 @@ static int aspeed_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_MAC] = hw;
+ if (of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2500-scu")) {
+ /* RMII 50MHz RCLK */
+ hw = clk_hw_register_fixed_rate(dev, "mac12rclk", "hpll", 0,
+ 50000000);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ /* RMII1 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac1rclk", "mac12rclk", 0,
+ scu_base + ASPEED_MAC_CLK_DLY, 29, 0,
+ &aspeed_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_MAC1RCLK] = hw;
+
+ /* RMII2 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac2rclk", "mac12rclk", 0,
+ scu_base + ASPEED_MAC_CLK_DLY, 30, 0,
+ &aspeed_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_MAC2RCLK] = hw;
+ }
+
/* LPC Host (LHCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "lhclk", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 20, 3, 0,
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index b1318e6b655b..392d01705b97 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -15,7 +15,7 @@
#include "clk-aspeed.h"
-#define ASPEED_G6_NUM_CLKS 67
+#define ASPEED_G6_NUM_CLKS 71
#define ASPEED_G6_SILICON_REV 0x004
@@ -40,6 +40,9 @@
#define ASPEED_G6_STRAP1 0x500
+#define ASPEED_MAC12_CLK_DLY 0x340
+#define ASPEED_MAC34_CLK_DLY 0x350
+
/* Globally visible clocks */
static DEFINE_SPINLOCK(aspeed_g6_clk_lock);
@@ -116,8 +119,6 @@ static const struct aspeed_gate_data aspeed_g6_gates[] = {
[ASPEED_CLK_GATE_FSICLK] = { 62, 59, "fsiclk-gate", NULL, 0 }, /* FSI */
};
-static const char * const eclk_parent_names[] = { "mpll", "hpll", "dpll" };
-
static const struct clk_div_table ast2600_eclk_div_table[] = {
{ 0x0, 2 },
{ 0x1, 2 },
@@ -486,6 +487,11 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_SDIO] = hw;
+ /* MAC1/2 RMII 50MHz RCLK */
+ hw = clk_hw_register_fixed_rate(dev, "mac12rclk", "hpll", 0, 50000000);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
/* MAC1/2 AHB bus clock divider */
hw = clk_hw_register_divider_table(dev, "mac12", "hpll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 16, 3, 0,
@@ -495,6 +501,27 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC12] = hw;
+ /* RMII1 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac1rclk", "mac12rclk", 0,
+ scu_g6_base + ASPEED_MAC12_CLK_DLY, 29, 0,
+ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_MAC1RCLK] = hw;
+
+ /* RMII2 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac2rclk", "mac12rclk", 0,
+ scu_g6_base + ASPEED_MAC12_CLK_DLY, 30, 0,
+ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_MAC2RCLK] = hw;
+
+ /* MAC1/2 RMII 50MHz RCLK */
+ hw = clk_hw_register_fixed_rate(dev, "mac34rclk", "hclk", 0, 50000000);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
/* MAC3/4 AHB bus clock divider */
hw = clk_hw_register_divider_table(dev, "mac34", "hpll", 0,
scu_g6_base + 0x310, 24, 3, 0,
@@ -504,6 +531,22 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC34] = hw;
+ /* RMII3 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac3rclk", "mac34rclk", 0,
+ scu_g6_base + ASPEED_MAC34_CLK_DLY, 29, 0,
+ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_MAC3RCLK] = hw;
+
+ /* RMII4 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac4rclk", "mac34rclk", 0,
+ scu_g6_base + ASPEED_MAC34_CLK_DLY, 30, 0,
+ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_MAC4RCLK] = hw;
+
/* LPC Host (LHCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "lhclk", "hpll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
diff --git a/drivers/clk/clk-bd718x7.c b/drivers/clk/clk-bd718x7.c
index ae6e5baee330..00926c587390 100644
--- a/drivers/clk/clk-bd718x7.c
+++ b/drivers/clk/clk-bd718x7.c
@@ -133,3 +133,4 @@ module_platform_driver(bd71837_clk);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD71837/BD71847/BD70528 chip clk driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd718xx-clk");
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
new file mode 100644
index 000000000000..4cd175afce9b
--- /dev/null
+++ b/drivers/clk/clk-bm1880.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Bitmain BM1880 SoC clock driver
+ *
+ * Copyright (c) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/bm1880-clock.h>
+
+#define BM1880_CLK_MPLL_CTL 0x00
+#define BM1880_CLK_SPLL_CTL 0x04
+#define BM1880_CLK_FPLL_CTL 0x08
+#define BM1880_CLK_DDRPLL_CTL 0x0c
+
+#define BM1880_CLK_ENABLE0 0x00
+#define BM1880_CLK_ENABLE1 0x04
+#define BM1880_CLK_SELECT 0x20
+#define BM1880_CLK_DIV0 0x40
+#define BM1880_CLK_DIV1 0x44
+#define BM1880_CLK_DIV2 0x48
+#define BM1880_CLK_DIV3 0x4c
+#define BM1880_CLK_DIV4 0x50
+#define BM1880_CLK_DIV5 0x54
+#define BM1880_CLK_DIV6 0x58
+#define BM1880_CLK_DIV7 0x5c
+#define BM1880_CLK_DIV8 0x60
+#define BM1880_CLK_DIV9 0x64
+#define BM1880_CLK_DIV10 0x68
+#define BM1880_CLK_DIV11 0x6c
+#define BM1880_CLK_DIV12 0x70
+#define BM1880_CLK_DIV13 0x74
+#define BM1880_CLK_DIV14 0x78
+#define BM1880_CLK_DIV15 0x7c
+#define BM1880_CLK_DIV16 0x80
+#define BM1880_CLK_DIV17 0x84
+#define BM1880_CLK_DIV18 0x88
+#define BM1880_CLK_DIV19 0x8c
+#define BM1880_CLK_DIV20 0x90
+#define BM1880_CLK_DIV21 0x94
+#define BM1880_CLK_DIV22 0x98
+#define BM1880_CLK_DIV23 0x9c
+#define BM1880_CLK_DIV24 0xa0
+#define BM1880_CLK_DIV25 0xa4
+#define BM1880_CLK_DIV26 0xa8
+#define BM1880_CLK_DIV27 0xac
+#define BM1880_CLK_DIV28 0xb0
+
+#define to_bm1880_pll_clk(_hw) container_of(_hw, struct bm1880_pll_hw_clock, hw)
+#define to_bm1880_div_clk(_hw) container_of(_hw, struct bm1880_div_hw_clock, hw)
+
+static DEFINE_SPINLOCK(bm1880_clk_lock);
+
+struct bm1880_clock_data {
+ void __iomem *pll_base;
+ void __iomem *sys_base;
+ struct clk_hw_onecell_data hw_data;
+};
+
+struct bm1880_gate_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent;
+ u32 gate_reg;
+ s8 gate_shift;
+ unsigned long flags;
+};
+
+struct bm1880_mux_clock {
+ unsigned int id;
+ const char *name;
+ const char * const *parents;
+ s8 num_parents;
+ u32 reg;
+ s8 shift;
+ unsigned long flags;
+};
+
+struct bm1880_div_clock {
+ unsigned int id;
+ const char *name;
+ u32 reg;
+ u8 shift;
+ u8 width;
+ u32 initval;
+ const struct clk_div_table *table;
+ unsigned long flags;
+};
+
+struct bm1880_div_hw_clock {
+ struct bm1880_div_clock div;
+ void __iomem *base;
+ spinlock_t *lock;
+ struct clk_hw hw;
+ struct clk_init_data init;
+};
+
+struct bm1880_composite_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent;
+ const char * const *parents;
+ unsigned int num_parents;
+ unsigned long flags;
+
+ u32 gate_reg;
+ u32 mux_reg;
+ u32 div_reg;
+
+ s8 gate_shift;
+ s8 mux_shift;
+ s8 div_shift;
+ s8 div_width;
+ s16 div_initval;
+ const struct clk_div_table *table;
+};
+
+struct bm1880_pll_clock {
+ unsigned int id;
+ const char *name;
+ u32 reg;
+ unsigned long flags;
+};
+
+struct bm1880_pll_hw_clock {
+ struct bm1880_pll_clock pll;
+ void __iomem *base;
+ struct clk_hw hw;
+ struct clk_init_data init;
+};
+
+static const struct clk_ops bm1880_pll_ops;
+static const struct clk_ops bm1880_clk_div_ops;
+
+#define GATE_DIV(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, \
+ _div_shift, _div_width, _div_initval, _table, \
+ _flags) { \
+ .id = _id, \
+ .parent = _parent, \
+ .name = _name, \
+ .gate_reg = _gate_reg, \
+ .gate_shift = _gate_shift, \
+ .div_reg = _div_reg, \
+ .div_shift = _div_shift, \
+ .div_width = _div_width, \
+ .div_initval = _div_initval, \
+ .table = _table, \
+ .mux_shift = -1, \
+ .flags = _flags, \
+ }
+
+#define GATE_MUX(_id, _name, _parents, _gate_reg, _gate_shift, \
+ _mux_reg, _mux_shift, _flags) { \
+ .id = _id, \
+ .parents = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .name = _name, \
+ .gate_reg = _gate_reg, \
+ .gate_shift = _gate_shift, \
+ .div_shift = -1, \
+ .mux_reg = _mux_reg, \
+ .mux_shift = _mux_shift, \
+ .flags = _flags, \
+ }
+
+#define CLK_PLL(_id, _name, _parent, _reg, _flags) { \
+ .pll.id = _id, \
+ .pll.name = _name, \
+ .pll.reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, \
+ &bm1880_pll_ops, \
+ _flags), \
+ }
+
+#define CLK_DIV(_id, _name, _parent, _reg, _shift, _width, _initval, \
+ _table, _flags) { \
+ .div.id = _id, \
+ .div.name = _name, \
+ .div.reg = _reg, \
+ .div.shift = _shift, \
+ .div.width = _width, \
+ .div.initval = _initval, \
+ .div.table = _table, \
+ .hw.init = CLK_HW_INIT_HW(_name, _parent, \
+ &bm1880_clk_div_ops, \
+ _flags), \
+ }
+
+static struct clk_parent_data bm1880_pll_parent[] = {
+ { .fw_name = "osc", .name = "osc" },
+};
+
+/*
+ * All PLL clocks are marked as CRITICAL, hence they are very crucial
+ * for the functioning of the SoC
+ */
+static struct bm1880_pll_hw_clock bm1880_pll_clks[] = {
+ CLK_PLL(BM1880_CLK_MPLL, "clk_mpll", bm1880_pll_parent,
+ BM1880_CLK_MPLL_CTL, 0),
+ CLK_PLL(BM1880_CLK_SPLL, "clk_spll", bm1880_pll_parent,
+ BM1880_CLK_SPLL_CTL, 0),
+ CLK_PLL(BM1880_CLK_FPLL, "clk_fpll", bm1880_pll_parent,
+ BM1880_CLK_FPLL_CTL, 0),
+ CLK_PLL(BM1880_CLK_DDRPLL, "clk_ddrpll", bm1880_pll_parent,
+ BM1880_CLK_DDRPLL_CTL, 0),
+};
+
+/*
+ * Clocks marked as CRITICAL are needed for the proper functioning
+ * of the SoC.
+ */
+static const struct bm1880_gate_clock bm1880_gate_clks[] = {
+ { BM1880_CLK_AHB_ROM, "clk_ahb_rom", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 2, 0 },
+ { BM1880_CLK_AXI_SRAM, "clk_axi_sram", "clk_axi1",
+ BM1880_CLK_ENABLE0, 3, 0 },
+ /*
+ * Since this clock is sourcing the DDR memory, let's mark it as
+ * critical to avoid gating.
+ */
+ { BM1880_CLK_DDR_AXI, "clk_ddr_axi", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 4, CLK_IS_CRITICAL },
+ { BM1880_CLK_APB_EFUSE, "clk_apb_efuse", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 6, 0 },
+ { BM1880_CLK_AXI5_EMMC, "clk_axi5_emmc", "clk_axi5",
+ BM1880_CLK_ENABLE0, 7, 0 },
+ { BM1880_CLK_AXI5_SD, "clk_axi5_sd", "clk_axi5",
+ BM1880_CLK_ENABLE0, 10, 0 },
+ { BM1880_CLK_AXI4_ETH0, "clk_axi4_eth0", "clk_axi4",
+ BM1880_CLK_ENABLE0, 14, 0 },
+ { BM1880_CLK_AXI4_ETH1, "clk_axi4_eth1", "clk_axi4",
+ BM1880_CLK_ENABLE0, 16, 0 },
+ { BM1880_CLK_AXI1_GDMA, "clk_axi1_gdma", "clk_axi1",
+ BM1880_CLK_ENABLE0, 17, 0 },
+ /* Don't gate GPIO clocks as it is not owned by the GPIO driver */
+ { BM1880_CLK_APB_GPIO, "clk_apb_gpio", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 18, CLK_IGNORE_UNUSED },
+ { BM1880_CLK_APB_GPIO_INTR, "clk_apb_gpio_intr", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 19, CLK_IGNORE_UNUSED },
+ { BM1880_CLK_AXI1_MINER, "clk_axi1_miner", "clk_axi1",
+ BM1880_CLK_ENABLE0, 21, 0 },
+ { BM1880_CLK_AHB_SF, "clk_ahb_sf", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 22, 0 },
+ /*
+ * Not sure which module this clock is sourcing but gating this clock
+ * prevents the system from booting. So, let's mark it as critical.
+ */
+ { BM1880_CLK_SDMA_AXI, "clk_sdma_axi", "clk_axi5",
+ BM1880_CLK_ENABLE0, 23, CLK_IS_CRITICAL },
+ { BM1880_CLK_APB_I2C, "clk_apb_i2c", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 25, 0 },
+ { BM1880_CLK_APB_WDT, "clk_apb_wdt", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 26, 0 },
+ { BM1880_CLK_APB_JPEG, "clk_apb_jpeg", "clk_axi6",
+ BM1880_CLK_ENABLE0, 27, 0 },
+ { BM1880_CLK_AXI5_NF, "clk_axi5_nf", "clk_axi5",
+ BM1880_CLK_ENABLE0, 29, 0 },
+ { BM1880_CLK_APB_NF, "clk_apb_nf", "clk_axi6",
+ BM1880_CLK_ENABLE0, 30, 0 },
+ { BM1880_CLK_APB_PWM, "clk_apb_pwm", "clk_mux_axi6",
+ BM1880_CLK_ENABLE1, 0, 0 },
+ { BM1880_CLK_RV, "clk_rv", "clk_mux_rv",
+ BM1880_CLK_ENABLE1, 1, 0 },
+ { BM1880_CLK_APB_SPI, "clk_apb_spi", "clk_mux_axi6",
+ BM1880_CLK_ENABLE1, 2, 0 },
+ { BM1880_CLK_UART_500M, "clk_uart_500m", "clk_div_uart_500m",
+ BM1880_CLK_ENABLE1, 4, 0 },
+ { BM1880_CLK_APB_UART, "clk_apb_uart", "clk_axi6",
+ BM1880_CLK_ENABLE1, 5, 0 },
+ { BM1880_CLK_APB_I2S, "clk_apb_i2s", "clk_axi6",
+ BM1880_CLK_ENABLE1, 6, 0 },
+ { BM1880_CLK_AXI4_USB, "clk_axi4_usb", "clk_axi4",
+ BM1880_CLK_ENABLE1, 7, 0 },
+ { BM1880_CLK_APB_USB, "clk_apb_usb", "clk_axi6",
+ BM1880_CLK_ENABLE1, 8, 0 },
+ { BM1880_CLK_12M_USB, "clk_12m_usb", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE1, 11, 0 },
+ { BM1880_CLK_APB_VIDEO, "clk_apb_video", "clk_axi6",
+ BM1880_CLK_ENABLE1, 12, 0 },
+ { BM1880_CLK_APB_VPP, "clk_apb_vpp", "clk_axi6",
+ BM1880_CLK_ENABLE1, 15, 0 },
+ { BM1880_CLK_AXI6, "clk_axi6", "clk_mux_axi6",
+ BM1880_CLK_ENABLE1, 21, 0 },
+};
+
+static const char * const clk_a53_parents[] = { "clk_spll", "clk_mpll" };
+static const char * const clk_rv_parents[] = { "clk_div_1_rv", "clk_div_0_rv" };
+static const char * const clk_axi1_parents[] = { "clk_div_1_axi1", "clk_div_0_axi1" };
+static const char * const clk_axi6_parents[] = { "clk_div_1_axi6", "clk_div_0_axi6" };
+
+static const struct bm1880_mux_clock bm1880_mux_clks[] = {
+ { BM1880_CLK_MUX_RV, "clk_mux_rv", clk_rv_parents, 2,
+ BM1880_CLK_SELECT, 1, 0 },
+ { BM1880_CLK_MUX_AXI6, "clk_mux_axi6", clk_axi6_parents, 2,
+ BM1880_CLK_SELECT, 3, 0 },
+};
+
+static const struct clk_div_table bm1880_div_table_0[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 0, 0 }
+};
+
+static const struct clk_div_table bm1880_div_table_1[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 127, 128 }, { 0, 0 }
+};
+
+static const struct clk_div_table bm1880_div_table_2[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 127, 128 }, { 255, 256 }, { 0, 0 }
+};
+
+static const struct clk_div_table bm1880_div_table_3[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 127, 128 }, { 255, 256 }, { 511, 512 }, { 0, 0 }
+};
+
+static const struct clk_div_table bm1880_div_table_4[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 127, 128 }, { 255, 256 }, { 511, 512 }, { 65535, 65536 },
+ { 0, 0 }
+};
+
+/*
+ * Clocks marked as CRITICAL are needed for the proper functioning
+ * of the SoC.
+ */
+static struct bm1880_div_hw_clock bm1880_div_clks[] = {
+ CLK_DIV(BM1880_CLK_DIV_0_RV, "clk_div_0_rv", &bm1880_pll_clks[1].hw,
+ BM1880_CLK_DIV12, 16, 5, 1, bm1880_div_table_0, 0),
+ CLK_DIV(BM1880_CLK_DIV_1_RV, "clk_div_1_rv", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV13, 16, 5, 1, bm1880_div_table_0, 0),
+ CLK_DIV(BM1880_CLK_DIV_UART_500M, "clk_div_uart_500m", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV15, 16, 7, 3, bm1880_div_table_1, 0),
+ CLK_DIV(BM1880_CLK_DIV_0_AXI1, "clk_div_0_axi1", &bm1880_pll_clks[0].hw,
+ BM1880_CLK_DIV21, 16, 5, 2, bm1880_div_table_0,
+ 0),
+ CLK_DIV(BM1880_CLK_DIV_1_AXI1, "clk_div_1_axi1", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV22, 16, 5, 3, bm1880_div_table_0,
+ 0),
+ CLK_DIV(BM1880_CLK_DIV_0_AXI6, "clk_div_0_axi6", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV27, 16, 5, 15, bm1880_div_table_0,
+ 0),
+ CLK_DIV(BM1880_CLK_DIV_1_AXI6, "clk_div_1_axi6", &bm1880_pll_clks[0].hw,
+ BM1880_CLK_DIV28, 16, 5, 11, bm1880_div_table_0,
+ 0),
+ CLK_DIV(BM1880_CLK_DIV_12M_USB, "clk_div_12m_usb", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV18, 16, 7, 125, bm1880_div_table_1, 0),
+};
+
+/*
+ * Clocks marked as CRITICAL are all needed for the proper functioning
+ * of the SoC.
+ */
+static struct bm1880_composite_clock bm1880_composite_clks[] = {
+ /*
+ * Since clk_a53 and clk_50m_a53 clocks are sourcing the CPU core,
+ * let's mark them as critical to avoid gating.
+ */
+ GATE_MUX(BM1880_CLK_A53, "clk_a53", clk_a53_parents,
+ BM1880_CLK_ENABLE0, 0, BM1880_CLK_SELECT, 0,
+ CLK_IS_CRITICAL),
+ GATE_DIV(BM1880_CLK_50M_A53, "clk_50m_a53", "clk_fpll",
+ BM1880_CLK_ENABLE0, 1, BM1880_CLK_DIV0, 16, 5, 30,
+ bm1880_div_table_0, CLK_IS_CRITICAL),
+ GATE_DIV(BM1880_CLK_EFUSE, "clk_efuse", "clk_fpll",
+ BM1880_CLK_ENABLE0, 5, BM1880_CLK_DIV1, 16, 7, 60,
+ bm1880_div_table_1, 0),
+ GATE_DIV(BM1880_CLK_EMMC, "clk_emmc", "clk_fpll",
+ BM1880_CLK_ENABLE0, 8, BM1880_CLK_DIV2, 16, 5, 15,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_100K_EMMC, "clk_100k_emmc", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE0, 9, BM1880_CLK_DIV3, 16, 8, 120,
+ bm1880_div_table_2, 0),
+ GATE_DIV(BM1880_CLK_SD, "clk_sd", "clk_fpll",
+ BM1880_CLK_ENABLE0, 11, BM1880_CLK_DIV4, 16, 5, 15,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_100K_SD, "clk_100k_sd", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE0, 12, BM1880_CLK_DIV5, 16, 8, 120,
+ bm1880_div_table_2, 0),
+ GATE_DIV(BM1880_CLK_500M_ETH0, "clk_500m_eth0", "clk_fpll",
+ BM1880_CLK_ENABLE0, 13, BM1880_CLK_DIV6, 16, 5, 3,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_500M_ETH1, "clk_500m_eth1", "clk_fpll",
+ BM1880_CLK_ENABLE0, 15, BM1880_CLK_DIV7, 16, 5, 3,
+ bm1880_div_table_0, 0),
+ /* Don't gate GPIO clocks as it is not owned by the GPIO driver */
+ GATE_DIV(BM1880_CLK_GPIO_DB, "clk_gpio_db", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE0, 20, BM1880_CLK_DIV8, 16, 16, 120,
+ bm1880_div_table_4, CLK_IGNORE_UNUSED),
+ GATE_DIV(BM1880_CLK_SDMA_AUD, "clk_sdma_aud", "clk_fpll",
+ BM1880_CLK_ENABLE0, 24, BM1880_CLK_DIV9, 16, 7, 61,
+ bm1880_div_table_1, 0),
+ GATE_DIV(BM1880_CLK_JPEG_AXI, "clk_jpeg_axi", "clk_fpll",
+ BM1880_CLK_ENABLE0, 28, BM1880_CLK_DIV10, 16, 5, 4,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_NF, "clk_nf", "clk_fpll",
+ BM1880_CLK_ENABLE0, 31, BM1880_CLK_DIV11, 16, 5, 30,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_TPU_AXI, "clk_tpu_axi", "clk_spll",
+ BM1880_CLK_ENABLE1, 3, BM1880_CLK_DIV14, 16, 5, 1,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_125M_USB, "clk_125m_usb", "clk_fpll",
+ BM1880_CLK_ENABLE1, 9, BM1880_CLK_DIV16, 16, 5, 12,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_33K_USB, "clk_33k_usb", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE1, 10, BM1880_CLK_DIV17, 16, 9, 363,
+ bm1880_div_table_3, 0),
+ GATE_DIV(BM1880_CLK_VIDEO_AXI, "clk_video_axi", "clk_fpll",
+ BM1880_CLK_ENABLE1, 13, BM1880_CLK_DIV19, 16, 5, 4,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_VPP_AXI, "clk_vpp_axi", "clk_fpll",
+ BM1880_CLK_ENABLE1, 14, BM1880_CLK_DIV20, 16, 5, 4,
+ bm1880_div_table_0, 0),
+ GATE_MUX(BM1880_CLK_AXI1, "clk_axi1", clk_axi1_parents,
+ BM1880_CLK_ENABLE1, 15, BM1880_CLK_SELECT, 2, 0),
+ GATE_DIV(BM1880_CLK_AXI2, "clk_axi2", "clk_fpll",
+ BM1880_CLK_ENABLE1, 17, BM1880_CLK_DIV23, 16, 5, 3,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_AXI3, "clk_axi3", "clk_mux_rv",
+ BM1880_CLK_ENABLE1, 18, BM1880_CLK_DIV24, 16, 5, 2,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_AXI4, "clk_axi4", "clk_fpll",
+ BM1880_CLK_ENABLE1, 19, BM1880_CLK_DIV25, 16, 5, 6,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_AXI5, "clk_axi5", "clk_fpll",
+ BM1880_CLK_ENABLE1, 20, BM1880_CLK_DIV26, 16, 5, 15,
+ bm1880_div_table_0, 0),
+};
+
+static unsigned long bm1880_pll_rate_calc(u32 regval, unsigned long parent_rate)
+{
+ u64 numerator;
+ u32 fbdiv, fref, refdiv;
+ u32 postdiv1, postdiv2, denominator;
+
+ fbdiv = (regval >> 16) & 0xfff;
+ fref = parent_rate;
+ refdiv = regval & 0x1f;
+ postdiv1 = (regval >> 8) & 0x7;
+ postdiv2 = (regval >> 12) & 0x7;
+
+ numerator = parent_rate * fbdiv;
+ denominator = refdiv * postdiv1 * postdiv2;
+ do_div(numerator, denominator);
+
+ return (unsigned long)numerator;
+}
+
+static unsigned long bm1880_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw);
+ unsigned long rate;
+ u32 regval;
+
+ regval = readl(pll_hw->base + pll_hw->pll.reg);
+ rate = bm1880_pll_rate_calc(regval, parent_rate);
+
+ return rate;
+}
+
+static const struct clk_ops bm1880_pll_ops = {
+ .recalc_rate = bm1880_pll_recalc_rate,
+};
+
+static struct clk_hw *bm1880_clk_register_pll(struct bm1880_pll_hw_clock *pll_clk,
+ void __iomem *sys_base)
+{
+ struct clk_hw *hw;
+ int err;
+
+ pll_clk->base = sys_base;
+ hw = &pll_clk->hw;
+
+ err = clk_hw_register(NULL, hw);
+ if (err)
+ return ERR_PTR(err);
+
+ return hw;
+}
+
+static void bm1880_clk_unregister_pll(struct clk_hw *hw)
+{
+ struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw);
+
+ clk_hw_unregister(hw);
+ kfree(pll_hw);
+}
+
+static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *pll_base = data->pll_base;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ struct bm1880_pll_hw_clock *bm1880_clk = &clks[i];
+
+ hw = bm1880_clk_register_pll(bm1880_clk, pll_base);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, bm1880_clk->pll.name);
+ goto err_clk;
+ }
+
+ data->hw_data.hws[clks[i].pll.id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ bm1880_clk_unregister_pll(data->hw_data.hws[clks[i].pll.id]);
+
+ return PTR_ERR(hw);
+}
+
+static int bm1880_clk_register_mux(const struct bm1880_mux_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *sys_base = data->sys_base;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ hw = clk_hw_register_mux(NULL, clks[i].name,
+ clks[i].parents,
+ clks[i].num_parents,
+ clks[i].flags,
+ sys_base + clks[i].reg,
+ clks[i].shift, 1, 0,
+ &bm1880_clk_lock);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ goto err_clk;
+ }
+
+ data->hw_data.hws[clks[i].id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ clk_hw_unregister_mux(data->hw_data.hws[clks[i].id]);
+
+ return PTR_ERR(hw);
+}
+
+static unsigned long bm1880_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
+ struct bm1880_div_clock *div = &div_hw->div;
+ void __iomem *reg_addr = div_hw->base + div->reg;
+ unsigned int val;
+ unsigned long rate;
+
+ if (!(readl(reg_addr) & BIT(3))) {
+ val = div->initval;
+ } else {
+ val = readl(reg_addr) >> div->shift;
+ val &= clk_div_mask(div->width);
+ }
+
+ rate = divider_recalc_rate(hw, parent_rate, val, div->table,
+ div->flags, div->width);
+
+ return rate;
+}
+
+static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
+ struct bm1880_div_clock *div = &div_hw->div;
+ void __iomem *reg_addr = div_hw->base + div->reg;
+
+ if (div->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 val;
+
+ val = readl(reg_addr) >> div->shift;
+ val &= clk_div_mask(div->width);
+
+ return divider_ro_round_rate(hw, rate, prate, div->table,
+ div->width, div->flags,
+ val);
+ }
+
+ return divider_round_rate(hw, rate, prate, div->table,
+ div->width, div->flags);
+}
+
+static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
+ struct bm1880_div_clock *div = &div_hw->div;
+ void __iomem *reg_addr = div_hw->base + div->reg;
+ unsigned long flags = 0;
+ int value;
+ u32 val;
+
+ value = divider_get_val(rate, parent_rate, div->table,
+ div->width, div_hw->div.flags);
+ if (value < 0)
+ return value;
+
+ if (div_hw->lock)
+ spin_lock_irqsave(div_hw->lock, flags);
+ else
+ __acquire(div_hw->lock);
+
+ val = readl(reg_addr);
+ val &= ~(clk_div_mask(div->width) << div_hw->div.shift);
+ val |= (u32)value << div->shift;
+ writel(val, reg_addr);
+
+ if (div_hw->lock)
+ spin_unlock_irqrestore(div_hw->lock, flags);
+ else
+ __release(div_hw->lock);
+
+ return 0;
+}
+
+static const struct clk_ops bm1880_clk_div_ops = {
+ .recalc_rate = bm1880_clk_div_recalc_rate,
+ .round_rate = bm1880_clk_div_round_rate,
+ .set_rate = bm1880_clk_div_set_rate,
+};
+
+static struct clk_hw *bm1880_clk_register_div(struct bm1880_div_hw_clock *div_clk,
+ void __iomem *sys_base)
+{
+ struct clk_hw *hw;
+ int err;
+
+ div_clk->div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+ div_clk->base = sys_base;
+ div_clk->lock = &bm1880_clk_lock;
+
+ hw = &div_clk->hw;
+ err = clk_hw_register(NULL, hw);
+ if (err)
+ return ERR_PTR(err);
+
+ return hw;
+}
+
+static void bm1880_clk_unregister_div(struct clk_hw *hw)
+{
+ struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
+
+ clk_hw_unregister(hw);
+ kfree(div_hw);
+}
+
+static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *sys_base = data->sys_base;
+ unsigned int i, id;
+
+ for (i = 0; i < num_clks; i++) {
+ struct bm1880_div_hw_clock *bm1880_clk = &clks[i];
+
+ hw = bm1880_clk_register_div(bm1880_clk, sys_base);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, bm1880_clk->div.name);
+ goto err_clk;
+ }
+
+ id = clks[i].div.id;
+ data->hw_data.hws[id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ bm1880_clk_unregister_div(data->hw_data.hws[clks[i].div.id]);
+
+ return PTR_ERR(hw);
+}
+
+static int bm1880_clk_register_gate(const struct bm1880_gate_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *sys_base = data->sys_base;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ hw = clk_hw_register_gate(NULL, clks[i].name,
+ clks[i].parent,
+ clks[i].flags,
+ sys_base + clks[i].gate_reg,
+ clks[i].gate_shift, 0,
+ &bm1880_clk_lock);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ goto err_clk;
+ }
+
+ data->hw_data.hws[clks[i].id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ clk_hw_unregister_gate(data->hw_data.hws[clks[i].id]);
+
+ return PTR_ERR(hw);
+}
+
+static struct clk_hw *bm1880_clk_register_composite(struct bm1880_composite_clock *clks,
+ void __iomem *sys_base)
+{
+ struct clk_hw *hw;
+ struct clk_mux *mux = NULL;
+ struct clk_gate *gate = NULL;
+ struct bm1880_div_hw_clock *div_hws = NULL;
+ struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *div_hw = NULL;
+ const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, *div_ops = NULL;
+ const char * const *parent_names;
+ const char *parent;
+ int num_parents;
+ int ret;
+
+ if (clks->mux_shift >= 0) {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = sys_base + clks->mux_reg;
+ mux->mask = 1;
+ mux->shift = clks->mux_shift;
+ mux_hw = &mux->hw;
+ mux_ops = &clk_mux_ops;
+ mux->lock = &bm1880_clk_lock;
+
+ parent_names = clks->parents;
+ num_parents = clks->num_parents;
+ } else {
+ parent = clks->parent;
+ parent_names = &parent;
+ num_parents = 1;
+ }
+
+ if (clks->gate_shift >= 0) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ gate->reg = sys_base + clks->gate_reg;
+ gate->bit_idx = clks->gate_shift;
+ gate->lock = &bm1880_clk_lock;
+
+ gate_hw = &gate->hw;
+ gate_ops = &clk_gate_ops;
+ }
+
+ if (clks->div_shift >= 0) {
+ div_hws = kzalloc(sizeof(*div_hws), GFP_KERNEL);
+ if (!div_hws) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ div_hws->base = sys_base;
+ div_hws->div.reg = clks->div_reg;
+ div_hws->div.shift = clks->div_shift;
+ div_hws->div.width = clks->div_width;
+ div_hws->div.table = clks->table;
+ div_hws->div.initval = clks->div_initval;
+ div_hws->lock = &bm1880_clk_lock;
+ div_hws->div.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO;
+
+ div_hw = &div_hws->hw;
+ div_ops = &bm1880_clk_div_ops;
+ }
+
+ hw = clk_hw_register_composite(NULL, clks->name, parent_names,
+ num_parents, mux_hw, mux_ops, div_hw,
+ div_ops, gate_hw, gate_ops,
+ clks->flags);
+
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_out;
+ }
+
+ return hw;
+
+err_out:
+ kfree(div_hws);
+ kfree(gate);
+ kfree(mux);
+
+ return ERR_PTR(ret);
+}
+
+static int bm1880_clk_register_composites(struct bm1880_composite_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *sys_base = data->sys_base;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ struct bm1880_composite_clock *bm1880_clk = &clks[i];
+
+ hw = bm1880_clk_register_composite(bm1880_clk, sys_base);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, bm1880_clk->name);
+ goto err_clk;
+ }
+
+ data->hw_data.hws[clks[i].id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ clk_hw_unregister_composite(data->hw_data.hws[clks[i].id]);
+
+ return PTR_ERR(hw);
+}
+
+static int bm1880_clk_probe(struct platform_device *pdev)
+{
+ struct bm1880_clock_data *clk_data;
+ void __iomem *pll_base, *sys_base;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_clks, i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pll_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pll_base))
+ return PTR_ERR(pll_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ sys_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sys_base))
+ return PTR_ERR(sys_base);
+
+ num_clks = ARRAY_SIZE(bm1880_pll_clks) +
+ ARRAY_SIZE(bm1880_div_clks) +
+ ARRAY_SIZE(bm1880_mux_clks) +
+ ARRAY_SIZE(bm1880_composite_clks) +
+ ARRAY_SIZE(bm1880_gate_clks);
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws,
+ num_clks), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->pll_base = pll_base;
+ clk_data->sys_base = sys_base;
+
+ for (i = 0; i < num_clks; i++)
+ clk_data->hw_data.hws[i] = ERR_PTR(-ENOENT);
+
+ clk_data->hw_data.num = num_clks;
+
+ bm1880_clk_register_plls(bm1880_pll_clks,
+ ARRAY_SIZE(bm1880_pll_clks),
+ clk_data);
+
+ bm1880_clk_register_divs(bm1880_div_clks,
+ ARRAY_SIZE(bm1880_div_clks),
+ clk_data);
+
+ bm1880_clk_register_mux(bm1880_mux_clks,
+ ARRAY_SIZE(bm1880_mux_clks),
+ clk_data);
+
+ bm1880_clk_register_composites(bm1880_composite_clks,
+ ARRAY_SIZE(bm1880_composite_clks),
+ clk_data);
+
+ bm1880_clk_register_gate(bm1880_gate_clks,
+ ARRAY_SIZE(bm1880_gate_clks),
+ clk_data);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &clk_data->hw_data);
+}
+
+static const struct of_device_id bm1880_of_match[] = {
+ { .compatible = "bitmain,bm1880-clk", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bm1880_of_match);
+
+static struct platform_driver bm1880_clk_driver = {
+ .driver = {
+ .name = "bm1880-clk",
+ .of_match_table = bm1880_of_match,
+ },
+ .probe = bm1880_clk_probe,
+};
+module_platform_driver(bm1880_clk_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Clock driver for Bitmain BM1880 SoC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 4f13a681ddfc..3e9c3e608769 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -207,7 +207,7 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
unsigned long flags)
{
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk_composite *composite;
struct clk_ops *clk_composite_ops;
int ret;
@@ -343,3 +343,14 @@ void clk_unregister_composite(struct clk *clk)
clk_unregister(clk);
kfree(composite);
}
+
+void clk_hw_unregister_composite(struct clk_hw *hw)
+{
+ struct clk_composite *composite;
+
+ composite = to_clk_composite(hw);
+
+ clk_hw_unregister(hw);
+ kfree(composite);
+}
+EXPORT_SYMBOL_GPL(clk_hw_unregister_composite);
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 3f9ff78c4a2a..098b2b01f0af 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -471,7 +471,7 @@ static struct clk_hw *_register_divider(struct device *dev, const char *name,
{
struct clk_divider *div;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index a7e4aef7a376..2c4486c09040 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -58,7 +58,7 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
{
struct clk_fixed_rate *fixed;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
/* allocate fixed-rate clock */
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 1b99fc962745..670053c58c1a 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -141,7 +141,7 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
{
struct clk_gate *gate;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 9d930edd6516..13304cf5f2a8 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -280,7 +280,7 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
else
clk = clk_register_gpio_gate(&pdev->dev, node->name,
parent_names ? parent_names[0] : NULL, gpiod,
- 0);
+ CLK_SET_RATE_PARENT);
if (IS_ERR(clk))
return PTR_ERR(clk);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 66e91f740508..570b6e5b603b 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -153,7 +153,7 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
{
struct clk_mux *mux;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
u8 width = 0;
int ret;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1c677d7f7f53..b68e200829f2 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1187,7 +1187,7 @@ static void clk_core_disable_unprepare(struct clk_core *core)
clk_core_unprepare_lock(core);
}
-static void clk_unprepare_unused_subtree(struct clk_core *core)
+static void __init clk_unprepare_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
@@ -1217,7 +1217,7 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
clk_pm_runtime_put(core);
}
-static void clk_disable_unused_subtree(struct clk_core *core)
+static void __init clk_disable_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
unsigned long flags;
@@ -1263,7 +1263,7 @@ unprepare_out:
clk_core_disable_unprepare(core->parent);
}
-static bool clk_ignore_unused;
+static bool clk_ignore_unused __initdata;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
@@ -1271,7 +1271,7 @@ static int __init clk_ignore_unused_setup(char *__unused)
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
-static int clk_disable_unused(void)
+static int __init clk_disable_unused(void)
{
struct clk_core *core;
@@ -1674,6 +1674,24 @@ static int clk_fetch_parent_index(struct clk_core *core,
return i;
}
+/**
+ * clk_hw_get_parent_index - return the index of the parent clock
+ * @hw: clk_hw associated with the clk being consumed
+ *
+ * Fetches and returns the index of parent clock. Returns -EINVAL if the given
+ * clock does not have a current parent.
+ */
+int clk_hw_get_parent_index(struct clk_hw *hw)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+
+ if (WARN_ON(parent == NULL))
+ return -EINVAL;
+
+ return clk_fetch_parent_index(hw->core, parent->core);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
+
/*
* Update the orphan status of @core and all its children.
*/
@@ -3879,6 +3897,7 @@ void clk_unregister(struct clk *clk)
__func__, clk->core->name);
kref_put(&clk->core->ref, __clk_release);
+ free_clk(clk);
unlock:
clk_prepare_unlock();
}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 1ac11b6a47a3..8a23d5dfd1f8 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -910,7 +910,6 @@ static int davinci_pll_probe(struct platform_device *pdev)
struct davinci_pll_platform_data *pdata;
const struct of_device_id *of_id;
davinci_pll_init pll_init = NULL;
- struct resource *res;
void __iomem *base;
of_id = of_match_device(davinci_pll_of_match, dev);
@@ -930,8 +929,7 @@ static int davinci_pll_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index 5b69e24a224f..7387e7f6276e 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -531,7 +531,6 @@ static int davinci_psc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
const struct davinci_psc_init_data *init_data = NULL;
- struct resource *res;
void __iomem *base;
int ret;
@@ -546,8 +545,7 @@ static int davinci_psc_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
index 5b3ad26dcc77..41f61726ab19 100644
--- a/drivers/clk/hisilicon/clk-hi3660.c
+++ b/drivers/clk/hisilicon/clk-hi3660.c
@@ -333,49 +333,49 @@ static const struct hisi_mux_clock hi3660_crgctrl_mux_clks[] = {
static const struct hisi_divider_clock hi3660_crgctrl_divider_clks[] = {
{ HI3660_CLK_DIV_UART0, "clk_div_uart0", "clk_andgt_uart0",
- CLK_SET_RATE_PARENT, 0xb0, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb0, 4, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_UART1, "clk_div_uart1", "clk_andgt_uart1",
- CLK_SET_RATE_PARENT, 0xb0, 8, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb0, 8, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_UARTH, "clk_div_uarth", "clk_andgt_uarth",
- CLK_SET_RATE_PARENT, 0xb0, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb0, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_MMC, "clk_div_mmc", "clk_andgt_mmc",
- CLK_SET_RATE_PARENT, 0xb4, 3, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb4, 3, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_SD, "clk_div_sd", "clk_andgt_sd",
- CLK_SET_RATE_PARENT, 0xb8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb8, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_EDC0, "clk_div_edc0", "clk_andgt_edc0",
- CLK_SET_RATE_PARENT, 0xbc, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xbc, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_LDI0, "clk_div_ldi0", "clk_andgt_ldi0",
- CLK_SET_RATE_PARENT, 0xbc, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xbc, 10, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_SDIO, "clk_div_sdio", "clk_andgt_sdio",
- CLK_SET_RATE_PARENT, 0xc0, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xc0, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_LDI1, "clk_div_ldi1", "clk_andgt_ldi1",
- CLK_SET_RATE_PARENT, 0xc0, 8, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xc0, 8, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_SPI, "clk_div_spi", "clk_andgt_spi",
- CLK_SET_RATE_PARENT, 0xc4, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xc4, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_VENC, "clk_div_venc", "clk_andgt_venc",
- CLK_SET_RATE_PARENT, 0xc8, 6, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xc8, 6, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_VDEC, "clk_div_vdec", "clk_andgt_vdec",
- CLK_SET_RATE_PARENT, 0xcc, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xcc, 0, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_VIVOBUS, "clk_div_vivobus", "clk_vivobus_andgt",
- CLK_SET_RATE_PARENT, 0xd0, 7, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xd0, 7, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_I2C, "clk_div_i2c", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xe8, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xe8, 4, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_UFSPHY, "clk_div_ufsphy_cfg", "clk_gate_ufsphy_gt",
- CLK_SET_RATE_PARENT, 0xe8, 9, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xe8, 9, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_CFGBUS, "clk_div_cfgbus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0xec, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xec, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_MMC0BUS, "clk_div_mmc0bus", "autodiv_emmc0bus",
- CLK_SET_RATE_PARENT, 0xec, 2, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xec, 2, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_MMC1BUS, "clk_div_mmc1bus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0xec, 3, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xec, 3, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_UFSPERI, "clk_div_ufsperi", "clk_gate_ufs_subsys",
- CLK_SET_RATE_PARENT, 0xec, 14, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xec, 14, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_AOMM, "clk_div_aomm", "clk_aomm_andgt",
- CLK_SET_RATE_PARENT, 0x100, 7, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x100, 7, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_ISP_SNCLK, "clk_isp_snclk_div", "clk_isp_snclk_fac",
- CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_IOPERI, "clk_div_ioperi", "clk_mux_ioperi",
- CLK_SET_RATE_PARENT, 0x108, 11, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 11, 4, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_pmuctrl */
@@ -420,13 +420,13 @@ static const struct hisi_gate_clock hi3660_sctrl_gate_clks[] = {
{ HI3660_PCLK_MMBUF_ANDGT, "pclk_mmbuf_andgt", "clk_sw_mmbuf",
CLK_SET_RATE_PARENT, 0x258, 7, CLK_GATE_HIWORD_MASK, },
{ HI3660_CLK_MMBUF_PLL_ANDGT, "clk_mmbuf_pll_andgt", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x260, 11, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x260, 11, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_FLL_MMBUF_ANDGT, "clk_fll_mmbuf_andgt", "clk_fll_src",
- CLK_SET_RATE_PARENT, 0x260, 12, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x260, 12, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_SYS_MMBUF_ANDGT, "clk_sys_mmbuf_andgt", "clkin_sys",
- CLK_SET_RATE_PARENT, 0x260, 13, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x260, 13, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_GATE_PCIEPHY_GT, "clk_gate_pciephy_gt", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x268, 11, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x268, 11, CLK_DIVIDER_HIWORD_MASK, },
};
static const char *const
@@ -446,13 +446,13 @@ static const struct hisi_mux_clock hi3660_sctrl_mux_clks[] = {
static const struct hisi_divider_clock hi3660_sctrl_divider_clks[] = {
{ HI3660_CLK_DIV_AOBUS, "clk_div_aobus", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_PCLK_DIV_MMBUF, "pclk_div_mmbuf", "pclk_mmbuf_andgt",
- CLK_SET_RATE_PARENT, 0x258, 10, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x258, 10, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_ACLK_DIV_MMBUF, "aclk_div_mmbuf", "clk_mmbuf_pll_andgt",
- CLK_SET_RATE_PARENT, 0x258, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x258, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_PCIEPHY, "clk_div_pciephy", "clk_gate_pciephy_gt",
- CLK_SET_RATE_PARENT, 0x268, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x268, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_iomcu */
diff --git a/drivers/clk/hisilicon/clk-hi3670.c b/drivers/clk/hisilicon/clk-hi3670.c
index fd8c837a6ea3..4d05a71683a5 100644
--- a/drivers/clk/hisilicon/clk-hi3670.c
+++ b/drivers/clk/hisilicon/clk-hi3670.c
@@ -295,61 +295,61 @@ static const struct hisi_gate_clock hi3670_crgctrl_gate_sep_clks[] = {
static const struct hisi_gate_clock hi3670_crgctrl_gate_clks[] = {
{ HI3670_AUTODIV_SYSBUS, "autodiv_sysbus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0x404, 5, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x404, 5, CLK_GATE_HIWORD_MASK, },
{ HI3670_AUTODIV_EMMC0BUS, "autodiv_emmc0bus", "autodiv_sysbus",
- CLK_SET_RATE_PARENT, 0x404, 1, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x404, 1, CLK_GATE_HIWORD_MASK, },
{ HI3670_PCLK_ANDGT_MMC1_PCIE, "pclk_andgt_mmc1_pcie", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xf8, 13, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xf8, 13, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_GATE_VCODECBUS_GT, "clk_gate_vcodecbus_gt", "clk_mux_vcodecbus",
- CLK_SET_RATE_PARENT, 0x0F0, 8, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0F0, 8, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_SD, "clk_andgt_sd", "clk_mux_sd_pll",
- CLK_SET_RATE_PARENT, 0xF4, 3, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 3, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_SD_SYS_GT, "clk_sd_sys_gt", "clkin_sys",
- CLK_SET_RATE_PARENT, 0xF4, 5, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 5, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_SDIO, "clk_andgt_sdio", "clk_mux_sdio_pll",
- CLK_SET_RATE_PARENT, 0xF4, 8, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 8, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_SDIO_SYS_GT, "clk_sdio_sys_gt", "clkin_sys",
- CLK_SET_RATE_PARENT, 0xF4, 6, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 6, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_A53HPM_ANDGT, "clk_a53hpm_andgt", "clk_mux_a53hpm",
- CLK_SET_RATE_PARENT, 0x0F4, 7, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0F4, 7, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_320M_PLL_GT, "clk_320m_pll_gt", "clk_mux_320m",
- CLK_SET_RATE_PARENT, 0xF8, 10, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF8, 10, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_UARTH, "clk_andgt_uarth", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF4, 11, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 11, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_UARTL, "clk_andgt_uartl", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF4, 10, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 10, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_UART0, "clk_andgt_uart0", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF4, 9, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 9, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_SPI, "clk_andgt_spi", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF4, 13, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 13, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_PCIEAXI, "clk_andgt_pcieaxi", "clk_mux_pcieaxi",
- CLK_SET_RATE_PARENT, 0xfc, 15, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xfc, 15, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_DIV_AO_ASP_GT, "clk_div_ao_asp_gt", "clk_mux_ao_asp",
- CLK_SET_RATE_PARENT, 0xF4, 4, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 4, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_GATE_CSI_TRANS, "clk_gate_csi_trans", "clk_ppll2",
- CLK_SET_RATE_PARENT, 0xF4, 14, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 14, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_GATE_DSI_TRANS, "clk_gate_dsi_trans", "clk_ppll2",
- CLK_SET_RATE_PARENT, 0xF4, 1, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 1, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_PTP, "clk_andgt_ptp", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF8, 5, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF8, 5, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_OUT0, "clk_andgt_out0", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0xF0, 10, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF0, 10, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_OUT1, "clk_andgt_out1", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0xF0, 11, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF0, 11, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLKGT_DP_AUDIO_PLL_AO, "clkgt_dp_audio_pll_ao", "clk_ppll6",
- CLK_SET_RATE_PARENT, 0xF8, 15, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF8, 15, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_VDEC, "clk_andgt_vdec", "clk_mux_vdec",
- CLK_SET_RATE_PARENT, 0xF0, 13, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF0, 13, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_VENC, "clk_andgt_venc", "clk_mux_venc",
- CLK_SET_RATE_PARENT, 0xF0, 9, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF0, 9, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ISP_SNCLK_ANGT, "clk_isp_snclk_angt", "clk_div_a53hpm",
- CLK_SET_RATE_PARENT, 0x108, 2, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 2, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_RXDPHY, "clk_andgt_rxdphy", "clk_div_a53hpm",
- CLK_SET_RATE_PARENT, 0x0F0, 12, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0F0, 12, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_ICS, "clk_andgt_ics", "clk_mux_ics",
- CLK_SET_RATE_PARENT, 0xf0, 14, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xf0, 14, CLK_GATE_HIWORD_MASK, },
{ HI3670_AUTODIV_DMABUS, "autodiv_dmabus", "autodiv_sysbus",
- CLK_SET_RATE_PARENT, 0x404, 3, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x404, 3, CLK_GATE_HIWORD_MASK, },
};
static const char *const
@@ -485,57 +485,57 @@ static const struct hisi_mux_clock hi3670_crgctrl_mux_clks[] = {
static const struct hisi_divider_clock hi3670_crgctrl_divider_clks[] = {
{ HI3670_CLK_DIV_CFGBUS, "clk_div_cfgbus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0xEC, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xEC, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_MMC0BUS, "clk_div_mmc0bus", "autodiv_emmc0bus",
- CLK_SET_RATE_PARENT, 0x0EC, 2, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0EC, 2, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_MMC1BUS, "clk_div_mmc1bus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0x0EC, 3, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0EC, 3, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_PCLK_DIV_MMC1_PCIE, "pclk_div_mmc1_pcie", "pclk_andgt_mmc1_pcie",
- CLK_SET_RATE_PARENT, 0xb4, 6, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb4, 6, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_VCODECBUS, "clk_div_vcodecbus", "clk_gate_vcodecbus_gt",
- CLK_SET_RATE_PARENT, 0x0BC, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0BC, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_SD, "clk_div_sd", "clk_andgt_sd",
- CLK_SET_RATE_PARENT, 0xB8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xB8, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_SDIO, "clk_div_sdio", "clk_andgt_sdio",
- CLK_SET_RATE_PARENT, 0xC0, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xC0, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_UARTH, "clk_div_uarth", "clk_andgt_uarth",
- CLK_SET_RATE_PARENT, 0xB0, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xB0, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_UARTL, "clk_div_uartl", "clk_andgt_uartl",
- CLK_SET_RATE_PARENT, 0xB0, 8, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xB0, 8, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_UART0, "clk_div_uart0", "clk_andgt_uart0",
- CLK_SET_RATE_PARENT, 0xB0, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xB0, 4, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_I2C, "clk_div_i2c", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xE8, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xE8, 4, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_SPI, "clk_div_spi", "clk_andgt_spi",
- CLK_SET_RATE_PARENT, 0xC4, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xC4, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_PCIEAXI, "clk_div_pcieaxi", "clk_andgt_pcieaxi",
- CLK_SET_RATE_PARENT, 0xb4, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb4, 0, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_AO_ASP, "clk_div_ao_asp", "clk_div_ao_asp_gt",
- CLK_SET_RATE_PARENT, 0x108, 6, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 6, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CSI_TRANS, "clk_div_csi_trans", "clk_gate_csi_trans",
- CLK_SET_RATE_PARENT, 0xD4, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xD4, 0, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_DSI_TRANS, "clk_div_dsi_trans", "clk_gate_dsi_trans",
- CLK_SET_RATE_PARENT, 0xD4, 10, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xD4, 10, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_PTP, "clk_div_ptp", "clk_andgt_ptp",
- CLK_SET_RATE_PARENT, 0xD8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xD8, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CLKOUT0_PLL, "clk_div_clkout0_pll", "clk_andgt_out0",
- CLK_SET_RATE_PARENT, 0xe0, 4, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xe0, 4, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CLKOUT1_PLL, "clk_div_clkout1_pll", "clk_andgt_out1",
- CLK_SET_RATE_PARENT, 0xe0, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xe0, 10, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLKDIV_DP_AUDIO_PLL_AO, "clkdiv_dp_audio_pll_ao", "clkgt_dp_audio_pll_ao",
- CLK_SET_RATE_PARENT, 0xBC, 11, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xBC, 11, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_VDEC, "clk_div_vdec", "clk_andgt_vdec",
- CLK_SET_RATE_PARENT, 0xC4, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xC4, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_VENC, "clk_div_venc", "clk_andgt_venc",
- CLK_SET_RATE_PARENT, 0xC0, 8, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xC0, 8, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_ISP_SNCLK_DIV0, "clk_isp_snclk_div0", "clk_isp_snclk_fac",
- CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_ISP_SNCLK_DIV1, "clk_isp_snclk_div1", "clk_isp_snclk_fac",
- CLK_SET_RATE_PARENT, 0x10C, 14, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x10C, 14, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_ISP_SNCLK_DIV2, "clk_isp_snclk_div2", "clk_isp_snclk_fac",
- CLK_SET_RATE_PARENT, 0x10C, 11, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x10C, 11, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_ICS, "clk_div_ics", "clk_andgt_ics",
- CLK_SET_RATE_PARENT, 0xE4, 9, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xE4, 9, 6, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_pmuctrl */
@@ -608,12 +608,12 @@ static const struct hisi_gate_clock hi3670_sctrl_gate_sep_clks[] = {
static const struct hisi_gate_clock hi3670_sctrl_gate_clks[] = {
{ HI3670_CLK_ANDGT_IOPERI, "clk_andgt_ioperi", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x270, 6, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x270, 6, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLKANDGT_ASP_SUBSYS_PERI, "clkandgt_asp_subsys_peri",
"clk_ppll0",
- CLK_SET_RATE_PARENT, 0x268, 3, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x268, 3, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANGT_ASP_SUBSYS, "clk_angt_asp_subsys", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x258, 0, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x258, 0, CLK_GATE_HIWORD_MASK, },
};
static const char *const
@@ -650,19 +650,19 @@ static const struct hisi_mux_clock hi3670_sctrl_mux_clks[] = {
static const struct hisi_divider_clock hi3670_sctrl_divider_clks[] = {
{ HI3670_CLK_DIV_AOBUS, "clk_div_aobus", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_UFS_SUBSYS, "clk_div_ufs_subsys", "clk_mux_ufs_subsys",
- CLK_SET_RATE_PARENT, 0x274, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x274, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_IOPERI, "clk_div_ioperi", "clk_andgt_ioperi",
- CLK_SET_RATE_PARENT, 0x270, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x270, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CLKOUT0_TCXO, "clk_div_clkout0_tcxo", "clkin_sys",
- CLK_SET_RATE_PARENT, 0x254, 6, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x254, 6, 3, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CLKOUT1_TCXO, "clk_div_clkout1_tcxo", "clkin_sys",
- CLK_SET_RATE_PARENT, 0x254, 9, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x254, 9, 3, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_ASP_SUBSYS_PERI_DIV, "clk_asp_subsys_peri_div", "clkandgt_asp_subsys_peri",
- CLK_SET_RATE_PARENT, 0x268, 0, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x268, 0, 3, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_ASP_SUBSYS, "clk_div_asp_subsys", "clk_angt_asp_subsys",
- CLK_SET_RATE_PARENT, 0x250, 0, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x250, 0, 3, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_iomcu */
@@ -732,17 +732,17 @@ static const struct hisi_gate_clock hi3670_media1_gate_sep_clks[] = {
static const struct hisi_gate_clock hi3670_media1_gate_clks[] = {
{ HI3670_CLK_GATE_VIVOBUS_ANDGT, "clk_gate_vivobus_andgt", "clk_mux_vivobus",
- CLK_SET_RATE_PARENT, 0x84, 3, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 3, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_EDC0, "clk_andgt_edc0", "clk_mux_edc0",
- CLK_SET_RATE_PARENT, 0x84, 7, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 7, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_LDI0, "clk_andgt_ldi0", "clk_mux_ldi0",
- CLK_SET_RATE_PARENT, 0x84, 9, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 9, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_LDI1, "clk_andgt_ldi1", "clk_mux_ldi1",
- CLK_SET_RATE_PARENT, 0x84, 8, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 8, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_MMBUF_PLL_ANDGT, "clk_mmbuf_pll_andgt", "clk_sw_mmbuf",
- CLK_SET_RATE_PARENT, 0x84, 14, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 14, CLK_GATE_HIWORD_MASK, },
{ HI3670_PCLK_MMBUF_ANDGT, "pclk_mmbuf_andgt", "aclk_div_mmbuf",
- CLK_SET_RATE_PARENT, 0x84, 15, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 15, CLK_GATE_HIWORD_MASK, },
};
static const char *const
@@ -799,17 +799,17 @@ static const struct hisi_mux_clock hi3670_media1_mux_clks[] = {
static const struct hisi_divider_clock hi3670_media1_divider_clks[] = {
{ HI3670_CLK_DIV_VIVOBUS, "clk_div_vivobus", "clk_gate_vivobus_andgt",
- CLK_SET_RATE_PARENT, 0x74, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x74, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_EDC0, "clk_div_edc0", "clk_andgt_edc0",
- CLK_SET_RATE_PARENT, 0x68, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x68, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_LDI0, "clk_div_ldi0", "clk_andgt_ldi0",
- CLK_SET_RATE_PARENT, 0x60, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x60, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_LDI1, "clk_div_ldi1", "clk_andgt_ldi1",
- CLK_SET_RATE_PARENT, 0x64, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x64, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_ACLK_DIV_MMBUF, "aclk_div_mmbuf", "clk_mmbuf_pll_andgt",
- CLK_SET_RATE_PARENT, 0x7C, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x7C, 10, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_PCLK_DIV_MMBUF, "pclk_div_mmbuf", "pclk_mmbuf_andgt",
- CLK_SET_RATE_PARENT, 0x78, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x78, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_media2 */
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index b2c5b6bbb1c1..e7cdf72d4b06 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -86,7 +86,8 @@ static void __init hi6220_clk_ao_init(struct device_node *np)
hisi_clk_register_gate_sep(hi6220_separated_gate_clks_ao,
ARRAY_SIZE(hi6220_separated_gate_clks_ao), clk_data_ao);
}
-CLK_OF_DECLARE(hi6220_clk_ao, "hisilicon,hi6220-aoctrl", hi6220_clk_ao_init);
+/* Allow reset driver to probe as well */
+CLK_OF_DECLARE_DRIVER(hi6220_clk_ao, "hisilicon,hi6220-aoctrl", hi6220_clk_ao_init);
/* clocks in sysctrl */
diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c
index 2e22fea2a2e7..93cee17db8b1 100644
--- a/drivers/clk/hisilicon/reset.c
+++ b/drivers/clk/hisilicon/reset.c
@@ -90,14 +90,12 @@ static const struct reset_control_ops hisi_reset_ops = {
struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev)
{
struct hisi_reset_controller *rstc;
- struct resource *res;
rstc = devm_kmalloc(&pdev->dev, sizeof(*rstc), GFP_KERNEL);
if (!rstc)
return NULL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rstc->membase = devm_ioremap_resource(&pdev->dev, res);
+ rstc->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rstc->membase))
return NULL;
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
index 33ab4ff61165..b00cbd045af5 100644
--- a/drivers/clk/imgtec/clk-boston.c
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -58,8 +58,7 @@ static void __init clk_boston_setup(struct device_node *np)
cpu_div = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_CLK1DIV);
cpu_freq = mult_frac(in_freq, mul, cpu_div);
- onecell = kzalloc(sizeof(*onecell) +
- (BOSTON_CLK_COUNT * sizeof(struct clk_hw *)),
+ onecell = kzalloc(struct_size(onecell, hws, BOSTON_CLK_COUNT),
GFP_KERNEL);
if (!onecell)
return;
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
index 5f3e92c09a5e..8e8288bda4d0 100644
--- a/drivers/clk/imx/clk-imx6sll.c
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -107,12 +107,12 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
hws[IMX6SLL_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
- hws[IMX6SLL_CLK_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil"));
- hws[IMX6SLL_CLK_OSC] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc"));
+ hws[IMX6SLL_CLK_CKIL] = imx_obtain_fixed_clk_hw(ccm_node, "ckil");
+ hws[IMX6SLL_CLK_OSC] = imx_obtain_fixed_clk_hw(ccm_node, "osc");
/* ipp_di clock is external input */
- hws[IMX6SLL_CLK_IPP_DI0] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di0"));
- hws[IMX6SLL_CLK_IPP_DI1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di1"));
+ hws[IMX6SLL_CLK_IPP_DI0] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di0");
+ hws[IMX6SLL_CLK_IPP_DI1] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di1");
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sll-anatop");
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index c4685c01929a..89ba71271e5c 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -139,16 +139,16 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
hws[IMX6SX_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
- hws[IMX6SX_CLK_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil"));
- hws[IMX6SX_CLK_OSC] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc"));
+ hws[IMX6SX_CLK_CKIL] = imx_obtain_fixed_clk_hw(ccm_node, "ckil");
+ hws[IMX6SX_CLK_OSC] = imx_obtain_fixed_clk_hw(ccm_node, "osc");
/* ipp_di clock is external input */
- hws[IMX6SX_CLK_IPP_DI0] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di0"));
- hws[IMX6SX_CLK_IPP_DI1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di1"));
+ hws[IMX6SX_CLK_IPP_DI0] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di0");
+ hws[IMX6SX_CLK_IPP_DI1] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di1");
/* Clock source from external clock via CLK1/2 PAD */
- hws[IMX6SX_CLK_ANACLK1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "anaclk1"));
- hws[IMX6SX_CLK_ANACLK2] = __clk_get_hw(of_clk_get_by_name(ccm_node, "anaclk2"));
+ hws[IMX6SX_CLK_ANACLK1] = imx_obtain_fixed_clk_hw(ccm_node, "anaclk1");
+ hws[IMX6SX_CLK_ANACLK2] = imx_obtain_fixed_clk_hw(ccm_node, "anaclk2");
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index bc931988fe7b..dafc8806b03e 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -126,12 +126,12 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
- hws[IMX6UL_CLK_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil"));
- hws[IMX6UL_CLK_OSC] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc"));
+ hws[IMX6UL_CLK_CKIL] = imx_obtain_fixed_clk_hw(ccm_node, "ckil");
+ hws[IMX6UL_CLK_OSC] = imx_obtain_fixed_clk_hw(ccm_node, "osc");
/* ipp_di clock is external input */
- hws[IMX6UL_CLK_IPP_DI0] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di0"));
- hws[IMX6UL_CLK_IPP_DI1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di1"));
+ hws[IMX6UL_CLK_IPP_DI0] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di0");
+ hws[IMX6UL_CLK_IPP_DI1] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di1");
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop");
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index fbea774ef687..0c9f7adb41ae 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -403,8 +403,8 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws = clk_hw_data->hws;
hws[IMX7D_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
- hws[IMX7D_OSC_24M_CLK] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc"));
- hws[IMX7D_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil"));
+ hws[IMX7D_OSC_24M_CLK] = imx_obtain_fixed_clk_hw(ccm_node, "osc");
+ hws[IMX7D_CKIL] = imx_obtain_fixed_clk_hw(ccm_node, "ckil");
np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 2022d9bead91..3fdf3d494f0a 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -24,11 +24,11 @@ static const char * const spll_pfd_sels[] = { "spll_pfd0", "spll_pfd1", "spll_pf
static const char * const spll_sels[] = { "spll", "spll_pfd_sel", };
static const char * const apll_pfd_sels[] = { "apll_pfd0", "apll_pfd1", "apll_pfd2", "apll_pfd3", };
static const char * const apll_sels[] = { "apll", "apll_pfd_sel", };
-static const char * const scs_sels[] = { "dummy", "sosc", "sirc", "firc", "dummy", "apll_sel", "spll_sel", "upll", };
-static const char * const ddr_sels[] = { "apll_pfd_sel", "upll", };
+static const char * const scs_sels[] = { "dummy", "sosc", "sirc", "firc", "dummy", "apll_sel", "spll_sel", "dummy", };
+static const char * const ddr_sels[] = { "apll_pfd_sel", "dummy", "dummy", "dummy", };
static const char * const nic_sels[] = { "firc", "ddr_clk", };
static const char * const periph_plat_sels[] = { "dummy", "nic1_bus_clk", "nic1_clk", "ddr_clk", "apll_pfd2", "apll_pfd1", "apll_pfd0", "upll", };
-static const char * const periph_bus_sels[] = { "dummy", "sosc_bus_clk", "mpll", "firc_bus_clk", "rosc", "nic1_bus_clk", "nic1_clk", "spll_bus_clk", };
+static const char * const periph_bus_sels[] = { "dummy", "sosc_bus_clk", "dummy", "firc_bus_clk", "rosc", "nic1_bus_clk", "nic1_clk", "spll_bus_clk", };
static const char * const arm_sels[] = { "divcore", "dummy", "dummy", "hsrun_divcore", };
/* used by sosc/sirc/firc/ddr/spll/apll dividers */
@@ -75,7 +75,6 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
clks[IMX7ULP_CLK_SOSC] = imx_obtain_fixed_clk_hw(np, "sosc");
clks[IMX7ULP_CLK_SIRC] = imx_obtain_fixed_clk_hw(np, "sirc");
clks[IMX7ULP_CLK_FIRC] = imx_obtain_fixed_clk_hw(np, "firc");
- clks[IMX7ULP_CLK_MIPI_PLL] = imx_obtain_fixed_clk_hw(np, "mpll");
clks[IMX7ULP_CLK_UPLL] = imx_obtain_fixed_clk_hw(np, "upll");
/* SCG1 */
@@ -118,7 +117,7 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
clks[IMX7ULP_CLK_SYS_SEL] = imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
clks[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
clks[IMX7ULP_CLK_NIC_SEL] = imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels));
- clks[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 1, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+ clks[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
clks[IMX7ULP_CLK_CORE_DIV] = imx_clk_hw_divider_flags("divcore", "scs_sel", base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
clks[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 172589e94f60..030b15d7c0ce 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -26,73 +26,6 @@ static u32 share_count_disp;
static u32 share_count_pdm;
static u32 share_count_nand;
-static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = {
- PLL_1416X_RATE(1800000000U, 225, 3, 0),
- PLL_1416X_RATE(1600000000U, 200, 3, 0),
- PLL_1416X_RATE(1200000000U, 300, 3, 1),
- PLL_1416X_RATE(1000000000U, 250, 3, 1),
- PLL_1416X_RATE(800000000U, 200, 3, 1),
- PLL_1416X_RATE(750000000U, 250, 2, 2),
- PLL_1416X_RATE(700000000U, 350, 3, 2),
- PLL_1416X_RATE(600000000U, 300, 3, 2),
-};
-
-static const struct imx_pll14xx_rate_table imx8mm_audiopll_tbl[] = {
- PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
- PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
-};
-
-static const struct imx_pll14xx_rate_table imx8mm_videopll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
- PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
-};
-
-static const struct imx_pll14xx_rate_table imx8mm_drampll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
-};
-
-static struct imx_pll14xx_clk imx8mm_audio_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mm_audiopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_audiopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_video_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mm_videopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_videopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_dram_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mm_drampll_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_drampll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_arm_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_gpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_vpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_sys_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
static const char *pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
@@ -101,8 +34,6 @@ static const char *dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
-static const char *sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
-static const char *sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
static const char *sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
/* CCM ROOT */
@@ -392,20 +323,18 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
clks[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_SYS_PLL1_REF_SEL] = imx_clk_mux("sys_pll1_ref_sel", base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mm_audio_pll);
- clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mm_audio_pll);
- clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mm_video_pll);
- clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mm_dram_pll);
- clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mm_gpu_pll);
- clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mm_vpu_pll);
- clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mm_arm_pll);
- clks[IMX8MM_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mm_sys_pll);
- clks[IMX8MM_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mm_sys_pll);
- clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mm_sys_pll);
+ clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+ clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+ clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+ clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
+ clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+ clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+ clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+ clks[IMX8MM_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
+ clks[IMX8MM_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
+ clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
/* PLL bypass out */
clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
@@ -415,8 +344,6 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 28, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 28, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
/* PLL out gate */
@@ -427,29 +354,48 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
clks[IMX8MM_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
clks[IMX8MM_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
clks[IMX8MM_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
- clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 11);
- clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 11);
clks[IMX8MM_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
- /* SYS PLL fixed output */
- clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
- clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
- clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
- clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
- clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
- clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
- clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
- clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ /* SYS PLL1 fixed output */
+ clks[IMX8MM_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+ clks[IMX8MM_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+ clks[IMX8MM_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+ clks[IMX8MM_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+ clks[IMX8MM_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+ clks[IMX8MM_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+ clks[IMX8MM_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+ clks[IMX8MM_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+ clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+
+ clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+ clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+ clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+ clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+ clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+ clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+ clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+ clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
clks[IMX8MM_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
- clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
- clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
- clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
- clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
- clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
- clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
- clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
- clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ /* SYS PLL2 fixed output */
+ clks[IMX8MM_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+ clks[IMX8MM_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+ clks[IMX8MM_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+ clks[IMX8MM_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+ clks[IMX8MM_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+ clks[IMX8MM_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+ clks[IMX8MM_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+ clks[IMX8MM_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+ clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+
+ clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+ clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+ clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+ clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+ clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+ clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+ clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+ clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
np = dev->of_node;
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 58b5acee3830..9f5a5a56b45e 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -25,89 +25,6 @@ static u32 share_count_disp;
static u32 share_count_pdm;
static u32 share_count_nand;
-enum {
- ARM_PLL,
- GPU_PLL,
- VPU_PLL,
- SYS_PLL1,
- SYS_PLL2,
- SYS_PLL3,
- DRAM_PLL,
- AUDIO_PLL1,
- AUDIO_PLL2,
- VIDEO_PLL2,
- NR_PLLS,
-};
-
-static const struct imx_pll14xx_rate_table imx8mn_pll1416x_tbl[] = {
- PLL_1416X_RATE(1800000000U, 225, 3, 0),
- PLL_1416X_RATE(1600000000U, 200, 3, 0),
- PLL_1416X_RATE(1500000000U, 375, 3, 1),
- PLL_1416X_RATE(1400000000U, 350, 3, 1),
- PLL_1416X_RATE(1200000000U, 300, 3, 1),
- PLL_1416X_RATE(1000000000U, 250, 3, 1),
- PLL_1416X_RATE(800000000U, 200, 3, 1),
- PLL_1416X_RATE(750000000U, 250, 2, 2),
- PLL_1416X_RATE(700000000U, 350, 3, 2),
- PLL_1416X_RATE(600000000U, 300, 3, 2),
-};
-
-static const struct imx_pll14xx_rate_table imx8mn_audiopll_tbl[] = {
- PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
- PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
-};
-
-static const struct imx_pll14xx_rate_table imx8mn_videopll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
- PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
-};
-
-static const struct imx_pll14xx_rate_table imx8mn_drampll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
-};
-
-static struct imx_pll14xx_clk imx8mn_audio_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mn_audiopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_audiopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_video_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mn_videopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_videopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_dram_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mn_drampll_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_drampll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_arm_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mn_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_gpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mn_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_vpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mn_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_sys_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mn_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl),
-};
-
static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
@@ -116,8 +33,6 @@ static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_se
static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
-static const char * const sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
-static const char * const sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m",
@@ -405,20 +320,18 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
clks[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_SYS_PLL1_REF_SEL] = imx_clk_mux("sys_pll1_ref_sel", base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mn_audio_pll);
- clks[IMX8MN_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mn_audio_pll);
- clks[IMX8MN_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mn_video_pll);
- clks[IMX8MN_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mn_dram_pll);
- clks[IMX8MN_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mn_gpu_pll);
- clks[IMX8MN_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mn_vpu_pll);
- clks[IMX8MN_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mn_arm_pll);
- clks[IMX8MN_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mn_sys_pll);
- clks[IMX8MN_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mn_sys_pll);
- clks[IMX8MN_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mn_sys_pll);
+ clks[IMX8MN_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+ clks[IMX8MN_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+ clks[IMX8MN_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+ clks[IMX8MN_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
+ clks[IMX8MN_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+ clks[IMX8MN_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+ clks[IMX8MN_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+ clks[IMX8MN_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
+ clks[IMX8MN_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
+ clks[IMX8MN_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
/* PLL bypass out */
clks[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
@@ -428,8 +341,6 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
clks[IMX8MN_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MN_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MN_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 28, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 28, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
/* PLL out gate */
@@ -440,29 +351,48 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
clks[IMX8MN_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
clks[IMX8MN_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
clks[IMX8MN_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
- clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 11);
- clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 11);
clks[IMX8MN_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
- /* SYS PLL fixed output */
- clks[IMX8MN_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
- clks[IMX8MN_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
- clks[IMX8MN_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
- clks[IMX8MN_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
- clks[IMX8MN_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
- clks[IMX8MN_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
- clks[IMX8MN_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
- clks[IMX8MN_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ /* SYS PLL1 fixed output */
+ clks[IMX8MN_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+ clks[IMX8MN_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+ clks[IMX8MN_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+ clks[IMX8MN_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+ clks[IMX8MN_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+ clks[IMX8MN_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+ clks[IMX8MN_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+ clks[IMX8MN_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+ clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+
+ clks[IMX8MN_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+ clks[IMX8MN_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+ clks[IMX8MN_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+ clks[IMX8MN_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+ clks[IMX8MN_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+ clks[IMX8MN_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+ clks[IMX8MN_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+ clks[IMX8MN_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
clks[IMX8MN_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
- clks[IMX8MN_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
- clks[IMX8MN_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
- clks[IMX8MN_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
- clks[IMX8MN_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
- clks[IMX8MN_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
- clks[IMX8MN_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
- clks[IMX8MN_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
- clks[IMX8MN_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ /* SYS PLL2 fixed output */
+ clks[IMX8MN_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+ clks[IMX8MN_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+ clks[IMX8MN_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+ clks[IMX8MN_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+ clks[IMX8MN_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+ clks[IMX8MN_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+ clks[IMX8MN_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+ clks[IMX8MN_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+ clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+
+ clks[IMX8MN_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+ clks[IMX8MN_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+ clks[IMX8MN_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+ clks[IMX8MN_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+ clks[IMX8MN_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+ clks[IMX8MN_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+ clks[IMX8MN_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+ clks[IMX8MN_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
clks[IMX8MN_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
np = dev->of_node;
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 41fc9c63356e..5f10a606d836 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -34,10 +34,9 @@ static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_
static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
-static const char * const sys1_pll_out_sels[] = {"sys1_pll1_ref_sel", };
-static const char * const sys2_pll_out_sels[] = {"sys1_pll1_ref_sel", "sys2_pll1_ref_sel", };
-static const char * const sys3_pll_out_sels[] = {"sys3_pll1_ref_sel", "sys2_pll1_ref_sel", };
+static const char * const sys3_pll_out_sels[] = {"sys3_pll1_ref_sel", };
static const char * const dram_pll_out_sels[] = {"dram_pll1_ref_sel", };
+static const char * const video2_pll_out_sels[] = {"video2_pll1_ref_sel", };
/* CCM ROOT */
static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
@@ -307,10 +306,9 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x8, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x10, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_SYS1_PLL1_REF_SEL] = imx_clk_mux("sys1_pll1_ref_sel", base + 0x30, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_SYS2_PLL1_REF_SEL] = imx_clk_mux("sys2_pll1_ref_sel", base + 0x3c, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_SYS3_PLL1_REF_SEL] = imx_clk_mux("sys3_pll1_ref_sel", base + 0x48, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_DRAM_PLL1_REF_SEL] = imx_clk_mux("dram_pll1_ref_sel", base + 0x60, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_VIDEO2_PLL1_REF_SEL] = imx_clk_mux("video2_pll1_ref_sel", base + 0x54, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_ARM_PLL_REF_DIV] = imx_clk_divider("arm_pll_ref_div", "arm_pll_ref_sel", base + 0x28, 5, 6);
clks[IMX8MQ_GPU_PLL_REF_DIV] = imx_clk_divider("gpu_pll_ref_div", "gpu_pll_ref_sel", base + 0x18, 5, 6);
@@ -342,30 +340,53 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
- clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_sccg_pll("sys1_pll_out", sys1_pll_out_sels, ARRAY_SIZE(sys1_pll_out_sels), 0, 0, 0, base + 0x30, CLK_IS_CRITICAL);
- clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_sccg_pll("sys2_pll_out", sys2_pll_out_sels, ARRAY_SIZE(sys2_pll_out_sels), 0, 0, 1, base + 0x3c, CLK_IS_CRITICAL);
- clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 1, base + 0x48, CLK_IS_CRITICAL);
+ clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_fixed("sys1_pll_out", 800000000);
+ clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_fixed("sys2_pll_out", 1000000000);
+ clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 0, base + 0x48, CLK_IS_CRITICAL);
clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_sccg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL);
- /* SYS PLL fixed output */
- clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20);
- clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10);
- clks[IMX8MQ_SYS1_PLL_100M] = imx_clk_fixed_factor("sys1_pll_100m", "sys1_pll_out", 1, 8);
- clks[IMX8MQ_SYS1_PLL_133M] = imx_clk_fixed_factor("sys1_pll_133m", "sys1_pll_out", 1, 6);
- clks[IMX8MQ_SYS1_PLL_160M] = imx_clk_fixed_factor("sys1_pll_160m", "sys1_pll_out", 1, 5);
- clks[IMX8MQ_SYS1_PLL_200M] = imx_clk_fixed_factor("sys1_pll_200m", "sys1_pll_out", 1, 4);
- clks[IMX8MQ_SYS1_PLL_266M] = imx_clk_fixed_factor("sys1_pll_266m", "sys1_pll_out", 1, 3);
- clks[IMX8MQ_SYS1_PLL_400M] = imx_clk_fixed_factor("sys1_pll_400m", "sys1_pll_out", 1, 2);
- clks[IMX8MQ_SYS1_PLL_800M] = imx_clk_fixed_factor("sys1_pll_800m", "sys1_pll_out", 1, 1);
-
- clks[IMX8MQ_SYS2_PLL_50M] = imx_clk_fixed_factor("sys2_pll_50m", "sys2_pll_out", 1, 20);
- clks[IMX8MQ_SYS2_PLL_100M] = imx_clk_fixed_factor("sys2_pll_100m", "sys2_pll_out", 1, 10);
- clks[IMX8MQ_SYS2_PLL_125M] = imx_clk_fixed_factor("sys2_pll_125m", "sys2_pll_out", 1, 8);
- clks[IMX8MQ_SYS2_PLL_166M] = imx_clk_fixed_factor("sys2_pll_166m", "sys2_pll_out", 1, 6);
- clks[IMX8MQ_SYS2_PLL_200M] = imx_clk_fixed_factor("sys2_pll_200m", "sys2_pll_out", 1, 5);
- clks[IMX8MQ_SYS2_PLL_250M] = imx_clk_fixed_factor("sys2_pll_250m", "sys2_pll_out", 1, 4);
- clks[IMX8MQ_SYS2_PLL_333M] = imx_clk_fixed_factor("sys2_pll_333m", "sys2_pll_out", 1, 3);
- clks[IMX8MQ_SYS2_PLL_500M] = imx_clk_fixed_factor("sys2_pll_500m", "sys2_pll_out", 1, 2);
- clks[IMX8MQ_SYS2_PLL_1000M] = imx_clk_fixed_factor("sys2_pll_1000m", "sys2_pll_out", 1, 1);
+ clks[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_sccg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
+
+ /* SYS PLL1 fixed output */
+ clks[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
+ clks[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
+ clks[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
+ clks[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
+ clks[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
+ clks[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
+ clks[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
+ clks[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
+ clks[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
+
+ clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
+ clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
+ clks[IMX8MQ_SYS1_PLL_100M] = imx_clk_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
+ clks[IMX8MQ_SYS1_PLL_133M] = imx_clk_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
+ clks[IMX8MQ_SYS1_PLL_160M] = imx_clk_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
+ clks[IMX8MQ_SYS1_PLL_200M] = imx_clk_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
+ clks[IMX8MQ_SYS1_PLL_266M] = imx_clk_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
+ clks[IMX8MQ_SYS1_PLL_400M] = imx_clk_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
+ clks[IMX8MQ_SYS1_PLL_800M] = imx_clk_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
+
+ /* SYS PLL2 fixed output */
+ clks[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
+ clks[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
+ clks[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
+ clks[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
+ clks[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
+ clks[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
+ clks[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
+ clks[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
+ clks[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
+
+ clks[IMX8MQ_SYS2_PLL_50M] = imx_clk_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
+ clks[IMX8MQ_SYS2_PLL_100M] = imx_clk_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
+ clks[IMX8MQ_SYS2_PLL_125M] = imx_clk_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
+ clks[IMX8MQ_SYS2_PLL_166M] = imx_clk_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
+ clks[IMX8MQ_SYS2_PLL_200M] = imx_clk_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
+ clks[IMX8MQ_SYS2_PLL_250M] = imx_clk_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
+ clks[IMX8MQ_SYS2_PLL_333M] = imx_clk_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
+ clks[IMX8MQ_SYS2_PLL_500M] = imx_clk_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
+ clks[IMX8MQ_SYS2_PLL_1000M] = imx_clk_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 7a815ec76aa5..5c458199060a 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -41,6 +41,38 @@ struct clk_pll14xx {
#define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
+static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
+ PLL_1416X_RATE(1800000000U, 225, 3, 0),
+ PLL_1416X_RATE(1600000000U, 200, 3, 0),
+ PLL_1416X_RATE(1500000000U, 375, 3, 1),
+ PLL_1416X_RATE(1400000000U, 350, 3, 1),
+ PLL_1416X_RATE(1200000000U, 300, 3, 1),
+ PLL_1416X_RATE(1000000000U, 250, 3, 1),
+ PLL_1416X_RATE(800000000U, 200, 3, 1),
+ PLL_1416X_RATE(750000000U, 250, 2, 2),
+ PLL_1416X_RATE(700000000U, 350, 3, 2),
+ PLL_1416X_RATE(600000000U, 300, 3, 2),
+};
+
+static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
+ PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+ PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+ PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
+ PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
+};
+
+struct imx_pll14xx_clk imx_1443x_pll = {
+ .type = PLL_1443X,
+ .rate_table = imx_pll1443x_tbl,
+ .rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
+};
+
+struct imx_pll14xx_clk imx_1416x_pll = {
+ .type = PLL_1416X,
+ .rate_table = imx_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx_pll1416x_tbl),
+};
+
static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
struct clk_pll14xx *pll, unsigned long rate)
{
@@ -112,43 +144,17 @@ static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
return fvco;
}
-static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate,
+static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate,
u32 pll_div)
{
u32 old_mdiv, old_pdiv;
- old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK;
- old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK;
+ old_mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
+ old_pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
}
-static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate,
- u32 pll_div_ctl0, u32 pll_div_ctl1)
-{
- u32 old_mdiv, old_pdiv, old_kdiv;
-
- old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
- old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
- old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
-
- return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
- rate->kdiv != old_kdiv;
-}
-
-static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate,
- u32 pll_div_ctl0, u32 pll_div_ctl1)
-{
- u32 old_mdiv, old_pdiv, old_kdiv;
-
- old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
- old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
- old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
-
- return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
- rate->kdiv != old_kdiv;
-}
-
static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
{
u32 val;
@@ -174,7 +180,7 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
tmp = readl_relaxed(pll->base + 4);
- if (!clk_pll1416x_mp_change(rate, tmp)) {
+ if (!clk_pll14xx_mp_change(rate, tmp)) {
tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
tmp |= rate->sdiv << SDIV_SHIFT;
writel_relaxed(tmp, pll->base + 4);
@@ -239,13 +245,15 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
}
tmp = readl_relaxed(pll->base + 4);
- div_val = readl_relaxed(pll->base + 8);
- if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) {
+ if (!clk_pll14xx_mp_change(rate, tmp)) {
tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
tmp |= rate->sdiv << SDIV_SHIFT;
writel_relaxed(tmp, pll->base + 4);
+ tmp = rate->kdiv << KDIV_SHIFT;
+ writel_relaxed(tmp, pll->base + 8);
+
return 0;
}
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index f7a389a50401..bc5bb6ac8636 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -50,6 +50,9 @@ struct imx_pll14xx_clk {
int flags;
};
+extern struct imx_pll14xx_clk imx_1416x_pll;
+extern struct imx_pll14xx_clk imx_1443x_pll;
+
#define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk
diff --git a/drivers/clk/ingenic/Kconfig b/drivers/clk/ingenic/Kconfig
index 1cb489959a99..b4555b465ea6 100644
--- a/drivers/clk/ingenic/Kconfig
+++ b/drivers/clk/ingenic/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Ingenic SoCs drivers"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
config INGENIC_CGU_COMMON
bool
@@ -45,6 +45,16 @@ config INGENIC_CGU_JZ4780
If building for a JZ4780 SoC, you want to say Y here.
+config INGENIC_CGU_X1000
+ bool "Ingenic X1000 CGU driver"
+ default MACH_X1000
+ select INGENIC_CGU_COMMON
+ help
+ Support the clocks provided by the CGU hardware on Ingenic X1000
+ and compatible SoCs.
+
+ If building for a X1000 SoC, you want to say Y here.
+
config INGENIC_TCU_CLK
bool "Ingenic JZ47xx TCU clocks driver"
default MACH_INGENIC
diff --git a/drivers/clk/ingenic/Makefile b/drivers/clk/ingenic/Makefile
index 097220b05131..8b1dad9b74a7 100644
--- a/drivers/clk/ingenic/Makefile
+++ b/drivers/clk/ingenic/Makefile
@@ -4,4 +4,5 @@ obj-$(CONFIG_INGENIC_CGU_JZ4740) += jz4740-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4725B) += jz4725b-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4770) += jz4770-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4780) += jz4780-cgu.o
+obj-$(CONFIG_INGENIC_CGU_X1000) += x1000-cgu.o
obj-$(CONFIG_INGENIC_TCU_CLK) += tcu.o
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index a1a5f9cb439e..ad7daa494fd4 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -358,8 +358,7 @@ static int __init ingenic_tcu_probe(struct device_node *np)
}
}
- tcu->clocks = kzalloc(sizeof(*tcu->clocks) +
- sizeof(*tcu->clocks->hws) * TCU_CLK_COUNT,
+ tcu->clocks = kzalloc(struct_size(tcu->clocks, hws, TCU_CLK_COUNT),
GFP_KERNEL);
if (!tcu->clocks) {
ret = -ENOMEM;
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
new file mode 100644
index 000000000000..b22d87b3f555
--- /dev/null
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * X1000 SoC CGU driver
+ * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <dt-bindings/clock/x1000-cgu.h>
+#include "cgu.h"
+#include "pm.h"
+
+/* CGU register offsets */
+#define CGU_REG_CPCCR 0x00
+#define CGU_REG_APLL 0x10
+#define CGU_REG_MPLL 0x14
+#define CGU_REG_CLKGR 0x20
+#define CGU_REG_OPCR 0x24
+#define CGU_REG_DDRCDR 0x2c
+#define CGU_REG_MACCDR 0x54
+#define CGU_REG_I2SCDR 0x60
+#define CGU_REG_LPCDR 0x64
+#define CGU_REG_MSC0CDR 0x68
+#define CGU_REG_I2SCDR1 0x70
+#define CGU_REG_SSICDR 0x74
+#define CGU_REG_CIMCDR 0x7c
+#define CGU_REG_PCMCDR 0x84
+#define CGU_REG_MSC1CDR 0xa4
+#define CGU_REG_CMP_INTR 0xb0
+#define CGU_REG_CMP_INTRE 0xb4
+#define CGU_REG_DRCG 0xd0
+#define CGU_REG_CPCSR 0xd4
+#define CGU_REG_PCMCDR1 0xe0
+#define CGU_REG_MACPHYC 0xe8
+
+/* bits within the OPCR register */
+#define OPCR_SPENDN0 BIT(7)
+#define OPCR_SPENDN1 BIT(6)
+
+static struct ingenic_cgu *cgu;
+
+static const s8 pll_od_encoding[8] = {
+ 0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3,
+};
+
+static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
+
+ /* External clocks */
+
+ [X1000_CLK_EXCLK] = { "ext", CGU_CLK_EXT },
+ [X1000_CLK_RTCLK] = { "rtc", CGU_CLK_EXT },
+
+ /* PLLs */
+
+ [X1000_CLK_APLL] = {
+ "apll", CGU_CLK_PLL,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_APLL,
+ .m_shift = 24,
+ .m_bits = 7,
+ .m_offset = 1,
+ .n_shift = 18,
+ .n_bits = 5,
+ .n_offset = 1,
+ .od_shift = 16,
+ .od_bits = 2,
+ .od_max = 8,
+ .od_encoding = pll_od_encoding,
+ .bypass_bit = 9,
+ .enable_bit = 8,
+ .stable_bit = 10,
+ },
+ },
+
+ [X1000_CLK_MPLL] = {
+ "mpll", CGU_CLK_PLL,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_MPLL,
+ .m_shift = 24,
+ .m_bits = 7,
+ .m_offset = 1,
+ .n_shift = 18,
+ .n_bits = 5,
+ .n_offset = 1,
+ .od_shift = 16,
+ .od_bits = 2,
+ .od_max = 8,
+ .od_encoding = pll_od_encoding,
+ .bypass_bit = 6,
+ .enable_bit = 7,
+ .stable_bit = 0,
+ },
+ },
+
+ /* Muxes & dividers */
+
+ [X1000_CLK_SCLKA] = {
+ "sclk_a", CGU_CLK_MUX,
+ .parents = { -1, X1000_CLK_EXCLK, X1000_CLK_APLL, -1 },
+ .mux = { CGU_REG_CPCCR, 30, 2 },
+ },
+
+ [X1000_CLK_CPUMUX] = {
+ "cpu_mux", CGU_CLK_MUX,
+ .parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 28, 2 },
+ },
+
+ [X1000_CLK_CPU] = {
+ "cpu", CGU_CLK_DIV,
+ .parents = { X1000_CLK_CPUMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
+ },
+
+ [X1000_CLK_L2CACHE] = {
+ "l2cache", CGU_CLK_DIV,
+ .parents = { X1000_CLK_CPUMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 },
+ },
+
+ [X1000_CLK_AHB0] = {
+ "ahb0", CGU_CLK_MUX | CGU_CLK_DIV,
+ .parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 26, 2 },
+ .div = { CGU_REG_CPCCR, 8, 1, 4, 21, -1, -1 },
+ },
+
+ [X1000_CLK_AHB2PMUX] = {
+ "ahb2_apb_mux", CGU_CLK_MUX,
+ .parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 24, 2 },
+ },
+
+ [X1000_CLK_AHB2] = {
+ "ahb2", CGU_CLK_DIV,
+ .parents = { X1000_CLK_AHB2PMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 12, 1, 4, 20, -1, -1 },
+ },
+
+ [X1000_CLK_PCLK] = {
+ "pclk", CGU_CLK_DIV,
+ .parents = { X1000_CLK_AHB2PMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 16, 1, 4, 20, -1, -1 },
+ },
+
+ [X1000_CLK_DDR] = {
+ "ddr", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 },
+ .mux = { CGU_REG_DDRCDR, 30, 2 },
+ .div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 31 },
+ },
+
+ [X1000_CLK_MAC] = {
+ "mac", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL},
+ .mux = { CGU_REG_MACCDR, 31, 1 },
+ .div = { CGU_REG_MACCDR, 0, 1, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 25 },
+ },
+
+ [X1000_CLK_MSCMUX] = {
+ "msc_mux", CGU_CLK_MUX,
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL},
+ .mux = { CGU_REG_MSC0CDR, 31, 1 },
+ },
+
+ [X1000_CLK_MSC0] = {
+ "msc0", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1000_CLK_MSCMUX, -1, -1, -1 },
+ .div = { CGU_REG_MSC0CDR, 0, 2, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 4 },
+ },
+
+ [X1000_CLK_MSC1] = {
+ "msc1", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1000_CLK_MSCMUX, -1, -1, -1 },
+ .div = { CGU_REG_MSC1CDR, 0, 2, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 5 },
+ },
+
+ [X1000_CLK_SSIPLL] = {
+ "ssi_pll", CGU_CLK_MUX | CGU_CLK_DIV,
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL, -1, -1 },
+ .mux = { CGU_REG_SSICDR, 31, 1 },
+ .div = { CGU_REG_SSICDR, 0, 1, 8, 29, 28, 27 },
+ },
+
+ [X1000_CLK_SSIMUX] = {
+ "ssi_mux", CGU_CLK_MUX,
+ .parents = { X1000_CLK_EXCLK, X1000_CLK_SSIPLL, -1, -1 },
+ .mux = { CGU_REG_SSICDR, 30, 1 },
+ },
+
+ /* Gate-only clocks */
+
+ [X1000_CLK_SFC] = {
+ "sfc", CGU_CLK_GATE,
+ .parents = { X1000_CLK_SSIPLL, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 2 },
+ },
+
+ [X1000_CLK_I2C0] = {
+ "i2c0", CGU_CLK_GATE,
+ .parents = { X1000_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 7 },
+ },
+
+ [X1000_CLK_I2C1] = {
+ "i2c1", CGU_CLK_GATE,
+ .parents = { X1000_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 8 },
+ },
+
+ [X1000_CLK_I2C2] = {
+ "i2c2", CGU_CLK_GATE,
+ .parents = { X1000_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 9 },
+ },
+
+ [X1000_CLK_UART0] = {
+ "uart0", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 14 },
+ },
+
+ [X1000_CLK_UART1] = {
+ "uart1", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 15 },
+ },
+
+ [X1000_CLK_UART2] = {
+ "uart2", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 16 },
+ },
+
+ [X1000_CLK_SSI] = {
+ "ssi", CGU_CLK_GATE,
+ .parents = { X1000_CLK_SSIMUX, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 19 },
+ },
+
+ [X1000_CLK_PDMA] = {
+ "pdma", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 21 },
+ },
+};
+
+static void __init x1000_cgu_init(struct device_node *np)
+{
+ int retval;
+
+ cgu = ingenic_cgu_new(x1000_cgu_clocks,
+ ARRAY_SIZE(x1000_cgu_clocks), np);
+ if (!cgu) {
+ pr_err("%s: failed to initialise CGU\n", __func__);
+ return;
+ }
+
+ retval = ingenic_cgu_register_clocks(cgu);
+ if (retval) {
+ pr_err("%s: failed to register CGU Clocks\n", __func__);
+ return;
+ }
+
+ ingenic_cgu_register_syscore_ops(cgu);
+}
+CLK_OF_DECLARE(x1000_cgu, "ingenic,x1000-cgu", x1000_cgu_init);
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 354c26f663b4..a3bd9a107209 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -1306,9 +1306,8 @@ static int clk_mt2712_top_probe(struct platform_device *pdev)
int r, i;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
pr_err("%s(): ioremap failed\n", __func__);
return PTR_ERR(base);
@@ -1394,9 +1393,8 @@ static int clk_mt2712_mcu_probe(struct platform_device *pdev)
int r;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
pr_err("%s(): ioremap failed\n", __func__);
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 608a9a6621a3..9766cccf5844 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -1225,12 +1225,11 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
static int clk_mt6779_top_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *base;
struct clk_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index f62b0428da0e..f35389a11af1 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -385,9 +385,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
struct clk_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index 8190dab179d7..ef5947e15c75 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -614,9 +614,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
struct clk_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -695,9 +694,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
void __iomem *base;
int r;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
index d6233994af5a..b73bdf152836 100644
--- a/drivers/clk/mediatek/clk-mt7629.c
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -574,9 +574,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
struct clk_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -626,9 +625,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
void __iomem *base;
int r;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 51c8d5c9a030..5046852eb0fd 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -1189,11 +1189,10 @@ CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
static int clk_mt8183_top_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1262,9 +1261,8 @@ static int clk_mt8183_mcu_probe(struct platform_device *pdev)
struct clk_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 18b23cdf679c..53715e36326c 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -20,12 +20,7 @@
#include "clk-phase.h"
#include "sclk-div.h"
-#define AUD_MST_IN_COUNT 8
-#define AUD_SLV_SCLK_COUNT 10
-#define AUD_SLV_LRCLK_COUNT 10
-
-#define AUD_GATE(_name, _reg, _bit, _phws, _iflags) \
-struct clk_regmap aud_##_name = { \
+#define AUD_GATE(_name, _reg, _bit, _pname, _iflags) { \
.data = &(struct clk_regmap_gate_data){ \
.offset = (_reg), \
.bit_idx = (_bit), \
@@ -33,14 +28,13 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &clk_regmap_gate_ops, \
- .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
+ .parent_names = (const char *[]){ #_pname }, \
.num_parents = 1, \
.flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
}, \
}
-#define AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pdata, _iflags) \
-struct clk_regmap aud_##_name = { \
+#define AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pdata, _iflags) { \
.data = &(struct clk_regmap_mux_data){ \
.offset = (_reg), \
.mask = (_mask), \
@@ -56,8 +50,7 @@ struct clk_regmap aud_##_name = { \
}, \
}
-#define AUD_DIV(_name, _reg, _shift, _width, _dflags, _phws, _iflags) \
-struct clk_regmap aud_##_name = { \
+#define AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) { \
.data = &(struct clk_regmap_div_data){ \
.offset = (_reg), \
.shift = (_shift), \
@@ -67,137 +60,27 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data){ \
.name = "aud_"#_name, \
.ops = &clk_regmap_divider_ops, \
- .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
+ .parent_names = (const char *[]){ #_pname }, \
.num_parents = 1, \
.flags = (_iflags), \
}, \
}
-#define AUD_PCLK_GATE(_name, _bit) \
-struct clk_regmap aud_##_name = { \
+#define AUD_PCLK_GATE(_name, _reg, _bit) { \
.data = &(struct clk_regmap_gate_data){ \
- .offset = (AUDIO_CLK_GATE_EN), \
+ .offset = (_reg), \
.bit_idx = (_bit), \
}, \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "pclk", \
- }, \
+ .parent_names = (const char *[]){ "aud_top" }, \
.num_parents = 1, \
}, \
}
-/* Audio peripheral clocks */
-static AUD_PCLK_GATE(ddr_arb, 0);
-static AUD_PCLK_GATE(pdm, 1);
-static AUD_PCLK_GATE(tdmin_a, 2);
-static AUD_PCLK_GATE(tdmin_b, 3);
-static AUD_PCLK_GATE(tdmin_c, 4);
-static AUD_PCLK_GATE(tdmin_lb, 5);
-static AUD_PCLK_GATE(tdmout_a, 6);
-static AUD_PCLK_GATE(tdmout_b, 7);
-static AUD_PCLK_GATE(tdmout_c, 8);
-static AUD_PCLK_GATE(frddr_a, 9);
-static AUD_PCLK_GATE(frddr_b, 10);
-static AUD_PCLK_GATE(frddr_c, 11);
-static AUD_PCLK_GATE(toddr_a, 12);
-static AUD_PCLK_GATE(toddr_b, 13);
-static AUD_PCLK_GATE(toddr_c, 14);
-static AUD_PCLK_GATE(loopback, 15);
-static AUD_PCLK_GATE(spdifin, 16);
-static AUD_PCLK_GATE(spdifout, 17);
-static AUD_PCLK_GATE(resample, 18);
-static AUD_PCLK_GATE(power_detect, 19);
-static AUD_PCLK_GATE(spdifout_b, 21);
-
-/* Audio Master Clocks */
-static const struct clk_parent_data mst_mux_parent_data[] = {
- { .fw_name = "mst_in0", },
- { .fw_name = "mst_in1", },
- { .fw_name = "mst_in2", },
- { .fw_name = "mst_in3", },
- { .fw_name = "mst_in4", },
- { .fw_name = "mst_in5", },
- { .fw_name = "mst_in6", },
- { .fw_name = "mst_in7", },
-};
-
-#define AUD_MST_MUX(_name, _reg, _flag) \
- AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
- mst_mux_parent_data, 0)
-
-#define AUD_MST_MCLK_MUX(_name, _reg) \
- AUD_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
-
-#define AUD_MST_SYS_MUX(_name, _reg) \
- AUD_MST_MUX(_name, _reg, 0)
-
-static AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AUD_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AUD_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AUD_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AUD_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AUD_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AUD_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AUD_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-static AUD_MST_MCLK_MUX(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
-
-#define AUD_MST_DIV(_name, _reg, _flag) \
- AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
- aud_##_name##_sel, CLK_SET_RATE_PARENT) \
-
-#define AUD_MST_MCLK_DIV(_name, _reg) \
- AUD_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
-
-#define AUD_MST_SYS_DIV(_name, _reg) \
- AUD_MST_DIV(_name, _reg, 0)
-
-static AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AUD_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AUD_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AUD_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AUD_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AUD_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AUD_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AUD_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-static AUD_MST_MCLK_DIV(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
-
-#define AUD_MST_MCLK_GATE(_name, _reg) \
- AUD_GATE(_name, _reg, 31, aud_##_name##_div, \
- CLK_SET_RATE_PARENT)
-
-static AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AUD_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AUD_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AUD_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AUD_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AUD_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AUD_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AUD_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-static AUD_MST_MCLK_GATE(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
-
-/* Sample Clocks */
-#define AUD_MST_SCLK_PRE_EN(_name, _reg) \
- AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
- aud_mst_##_name##_mclk, 0)
-
-static AUD_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
#define AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
- _hi_shift, _hi_width, _phws, _iflags) \
-struct clk_regmap aud_##_name = { \
+ _hi_shift, _hi_width, _pname, _iflags) { \
.data = &(struct meson_sclk_div_data) { \
.div = { \
.reg_off = (_reg), \
@@ -213,38 +96,14 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &meson_sclk_div_ops, \
- .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
+ .parent_names = (const char *[]){ #_pname }, \
.num_parents = 1, \
.flags = (_iflags), \
}, \
}
-#define AUD_MST_SCLK_DIV(_name, _reg) \
- AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
- aud_mst_##_name##_sclk_pre_en, \
- CLK_SET_RATE_PARENT)
-
-static AUD_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AUD_MST_SCLK_POST_EN(_name, _reg) \
- AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
- aud_mst_##_name##_sclk_div, CLK_SET_RATE_PARENT)
-
-static AUD_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
-
#define AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
- _phws, _iflags) \
-struct clk_regmap aud_##_name = { \
+ _pname, _iflags) { \
.data = &(struct meson_clk_triphase_data) { \
.ph0 = { \
.reg_off = (_reg), \
@@ -265,52 +124,91 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &meson_clk_triphase_ops, \
- .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
+ .parent_names = (const char *[]){ #_pname }, \
.num_parents = 1, \
.flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
}, \
}
+#define AUD_PHASE(_name, _reg, _width, _shift, _pname, _iflags) { \
+ .data = &(struct meson_clk_phase_data) { \
+ .ph = { \
+ .reg_off = (_reg), \
+ .shift = (_shift), \
+ .width = (_width), \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "aud_"#_name, \
+ .ops = &meson_clk_phase_ops, \
+ .parent_names = (const char *[]){ #_pname }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+/* Audio Master Clocks */
+static const struct clk_parent_data mst_mux_parent_data[] = {
+ { .fw_name = "mst_in0", },
+ { .fw_name = "mst_in1", },
+ { .fw_name = "mst_in2", },
+ { .fw_name = "mst_in3", },
+ { .fw_name = "mst_in4", },
+ { .fw_name = "mst_in5", },
+ { .fw_name = "mst_in6", },
+ { .fw_name = "mst_in7", },
+};
+
+#define AUD_MST_MUX(_name, _reg, _flag) \
+ AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
+ mst_mux_parent_data, 0)
+#define AUD_MST_DIV(_name, _reg, _flag) \
+ AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
+ aud_##_name##_sel, CLK_SET_RATE_PARENT)
+#define AUD_MST_MCLK_GATE(_name, _reg) \
+ AUD_GATE(_name, _reg, 31, aud_##_name##_div, \
+ CLK_SET_RATE_PARENT)
+
+#define AUD_MST_MCLK_MUX(_name, _reg) \
+ AUD_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
+#define AUD_MST_MCLK_DIV(_name, _reg) \
+ AUD_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
+
+#define AUD_MST_SYS_MUX(_name, _reg) \
+ AUD_MST_MUX(_name, _reg, 0)
+#define AUD_MST_SYS_DIV(_name, _reg) \
+ AUD_MST_DIV(_name, _reg, 0)
+
+/* Sample Clocks */
+#define AUD_MST_SCLK_PRE_EN(_name, _reg) \
+ AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
+ aud_mst_##_name##_mclk, 0)
+#define AUD_MST_SCLK_DIV(_name, _reg) \
+ AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
+ aud_mst_##_name##_sclk_pre_en, \
+ CLK_SET_RATE_PARENT)
+#define AUD_MST_SCLK_POST_EN(_name, _reg) \
+ AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
+ aud_mst_##_name##_sclk_div, CLK_SET_RATE_PARENT)
#define AUD_MST_SCLK(_name, _reg) \
AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
aud_mst_##_name##_sclk_post_en, CLK_SET_RATE_PARENT)
-static AUD_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
-static AUD_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
-static AUD_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
-static AUD_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
-static AUD_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
-static AUD_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
-
#define AUD_MST_LRCLK_DIV(_name, _reg) \
AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
- aud_mst_##_name##_sclk_post_en, 0) \
-
-static AUD_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
-
+ aud_mst_##_name##_sclk_post_en, 0)
#define AUD_MST_LRCLK(_name, _reg) \
AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
aud_mst_##_name##_lrclk_div, CLK_SET_RATE_PARENT)
-static AUD_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
-static AUD_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
-static AUD_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
-static AUD_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
-static AUD_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
-static AUD_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
-
+/* TDM bit clock sources */
static const struct clk_parent_data tdm_sclk_parent_data[] = {
- { .hw = &aud_mst_a_sclk.hw, },
- { .hw = &aud_mst_b_sclk.hw, },
- { .hw = &aud_mst_c_sclk.hw, },
- { .hw = &aud_mst_d_sclk.hw, },
- { .hw = &aud_mst_e_sclk.hw, },
- { .hw = &aud_mst_f_sclk.hw, },
+ { .name = "aud_mst_a_sclk", .index = -1, },
+ { .name = "aud_mst_b_sclk", .index = -1, },
+ { .name = "aud_mst_c_sclk", .index = -1, },
+ { .name = "aud_mst_d_sclk", .index = -1, },
+ { .name = "aud_mst_e_sclk", .index = -1, },
+ { .name = "aud_mst_f_sclk", .index = -1, },
{ .fw_name = "slv_sclk0", },
{ .fw_name = "slv_sclk1", },
{ .fw_name = "slv_sclk2", },
@@ -323,78 +221,14 @@ static const struct clk_parent_data tdm_sclk_parent_data[] = {
{ .fw_name = "slv_sclk9", },
};
-#define AUD_TDM_SCLK_MUX(_name, _reg) \
- AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
- CLK_MUX_ROUND_CLOSEST, \
- tdm_sclk_parent_data, 0)
-
-static AUD_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AUD_TDM_SCLK_PRE_EN(_name, _reg) \
- AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
- aud_tdm##_name##_sclk_sel, CLK_SET_RATE_PARENT)
-
-static AUD_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AUD_TDM_SCLK_POST_EN(_name, _reg) \
- AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
- aud_tdm##_name##_sclk_pre_en, CLK_SET_RATE_PARENT)
-
-static AUD_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AUD_TDM_SCLK(_name, _reg) \
- struct clk_regmap aud_tdm##_name##_sclk = { \
- .data = &(struct meson_clk_phase_data) { \
- .ph = { \
- .reg_off = (_reg), \
- .shift = 29, \
- .width = 1, \
- }, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "aud_tdm"#_name"_sclk", \
- .ops = &meson_clk_phase_ops, \
- .parent_hws = (const struct clk_hw *[]) { \
- &aud_tdm##_name##_sclk_post_en.hw \
- }, \
- .num_parents = 1, \
- .flags = CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT, \
- }, \
-}
-
-static AUD_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
+/* TDM sample clock sources */
static const struct clk_parent_data tdm_lrclk_parent_data[] = {
- { .hw = &aud_mst_a_lrclk.hw, },
- { .hw = &aud_mst_b_lrclk.hw, },
- { .hw = &aud_mst_c_lrclk.hw, },
- { .hw = &aud_mst_d_lrclk.hw, },
- { .hw = &aud_mst_e_lrclk.hw, },
- { .hw = &aud_mst_f_lrclk.hw, },
+ { .name = "aud_mst_a_lrclk", .index = -1, },
+ { .name = "aud_mst_b_lrclk", .index = -1, },
+ { .name = "aud_mst_c_lrclk", .index = -1, },
+ { .name = "aud_mst_d_lrclk", .index = -1, },
+ { .name = "aud_mst_e_lrclk", .index = -1, },
+ { .name = "aud_mst_f_lrclk", .index = -1, },
{ .fw_name = "slv_lrclk0", },
{ .fw_name = "slv_lrclk1", },
{ .fw_name = "slv_lrclk2", },
@@ -407,69 +241,536 @@ static const struct clk_parent_data tdm_lrclk_parent_data[] = {
{ .fw_name = "slv_lrclk9", },
};
-#define AUD_TDM_LRLCK(_name, _reg) \
- AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
- CLK_MUX_ROUND_CLOSEST, \
- tdm_lrclk_parent_data, 0)
+#define AUD_TDM_SCLK_MUX(_name, _reg) \
+ AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
+ CLK_MUX_ROUND_CLOSEST, tdm_sclk_parent_data, 0)
+#define AUD_TDM_SCLK_PRE_EN(_name, _reg) \
+ AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
+ aud_tdm##_name##_sclk_sel, CLK_SET_RATE_PARENT)
+#define AUD_TDM_SCLK_POST_EN(_name, _reg) \
+ AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
+ aud_tdm##_name##_sclk_pre_en, CLK_SET_RATE_PARENT)
+#define AUD_TDM_SCLK(_name, _reg) \
+ AUD_PHASE(tdm##_name##_sclk, _reg, 1, 29, \
+ aud_tdm##_name##_sclk_post_en, \
+ CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)
+
+#define AUD_TDM_LRLCK(_name, _reg) \
+ AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
+ CLK_MUX_ROUND_CLOSEST, tdm_lrclk_parent_data, 0)
+
+/* Pad master clock sources */
+static const struct clk_parent_data mclk_pad_ctrl_parent_data[] = {
+ { .name = "aud_mst_a_mclk", .index = -1, },
+ { .name = "aud_mst_b_mclk", .index = -1, },
+ { .name = "aud_mst_c_mclk", .index = -1, },
+ { .name = "aud_mst_d_mclk", .index = -1, },
+ { .name = "aud_mst_e_mclk", .index = -1, },
+ { .name = "aud_mst_f_mclk", .index = -1, },
+};
+
+/* Pad bit clock sources */
+static const struct clk_parent_data sclk_pad_ctrl_parent_data[] = {
+ { .name = "aud_mst_a_sclk", .index = -1, },
+ { .name = "aud_mst_b_sclk", .index = -1, },
+ { .name = "aud_mst_c_sclk", .index = -1, },
+ { .name = "aud_mst_d_sclk", .index = -1, },
+ { .name = "aud_mst_e_sclk", .index = -1, },
+ { .name = "aud_mst_f_sclk", .index = -1, },
+};
-static AUD_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+/* Pad sample clock sources */
+static const struct clk_parent_data lrclk_pad_ctrl_parent_data[] = {
+ { .name = "aud_mst_a_lrclk", .index = -1, },
+ { .name = "aud_mst_b_lrclk", .index = -1, },
+ { .name = "aud_mst_c_lrclk", .index = -1, },
+ { .name = "aud_mst_d_lrclk", .index = -1, },
+ { .name = "aud_mst_e_lrclk", .index = -1, },
+ { .name = "aud_mst_f_lrclk", .index = -1, },
+};
-/* G12a Pad control */
#define AUD_TDM_PAD_CTRL(_name, _reg, _shift, _parents) \
- AUD_MUX(tdm_##_name, _reg, 0x7, _shift, 0, _parents, \
+ AUD_MUX(_name, _reg, 0x7, _shift, 0, _parents, \
CLK_SET_RATE_NO_REPARENT)
-static const struct clk_parent_data mclk_pad_ctrl_parent_data[] = {
- { .hw = &aud_mst_a_mclk.hw },
- { .hw = &aud_mst_b_mclk.hw },
- { .hw = &aud_mst_c_mclk.hw },
- { .hw = &aud_mst_d_mclk.hw },
- { .hw = &aud_mst_e_mclk.hw },
- { .hw = &aud_mst_f_mclk.hw },
+/* Common Clocks */
+static struct clk_regmap ddr_arb =
+ AUD_PCLK_GATE(ddr_arb, AUDIO_CLK_GATE_EN, 0);
+static struct clk_regmap pdm =
+ AUD_PCLK_GATE(pdm, AUDIO_CLK_GATE_EN, 1);
+static struct clk_regmap tdmin_a =
+ AUD_PCLK_GATE(tdmin_a, AUDIO_CLK_GATE_EN, 2);
+static struct clk_regmap tdmin_b =
+ AUD_PCLK_GATE(tdmin_b, AUDIO_CLK_GATE_EN, 3);
+static struct clk_regmap tdmin_c =
+ AUD_PCLK_GATE(tdmin_c, AUDIO_CLK_GATE_EN, 4);
+static struct clk_regmap tdmin_lb =
+ AUD_PCLK_GATE(tdmin_lb, AUDIO_CLK_GATE_EN, 5);
+static struct clk_regmap tdmout_a =
+ AUD_PCLK_GATE(tdmout_a, AUDIO_CLK_GATE_EN, 6);
+static struct clk_regmap tdmout_b =
+ AUD_PCLK_GATE(tdmout_b, AUDIO_CLK_GATE_EN, 7);
+static struct clk_regmap tdmout_c =
+ AUD_PCLK_GATE(tdmout_c, AUDIO_CLK_GATE_EN, 8);
+static struct clk_regmap frddr_a =
+ AUD_PCLK_GATE(frddr_a, AUDIO_CLK_GATE_EN, 9);
+static struct clk_regmap frddr_b =
+ AUD_PCLK_GATE(frddr_b, AUDIO_CLK_GATE_EN, 10);
+static struct clk_regmap frddr_c =
+ AUD_PCLK_GATE(frddr_c, AUDIO_CLK_GATE_EN, 11);
+static struct clk_regmap toddr_a =
+ AUD_PCLK_GATE(toddr_a, AUDIO_CLK_GATE_EN, 12);
+static struct clk_regmap toddr_b =
+ AUD_PCLK_GATE(toddr_b, AUDIO_CLK_GATE_EN, 13);
+static struct clk_regmap toddr_c =
+ AUD_PCLK_GATE(toddr_c, AUDIO_CLK_GATE_EN, 14);
+static struct clk_regmap loopback =
+ AUD_PCLK_GATE(loopback, AUDIO_CLK_GATE_EN, 15);
+static struct clk_regmap spdifin =
+ AUD_PCLK_GATE(spdifin, AUDIO_CLK_GATE_EN, 16);
+static struct clk_regmap spdifout =
+ AUD_PCLK_GATE(spdifout, AUDIO_CLK_GATE_EN, 17);
+static struct clk_regmap resample =
+ AUD_PCLK_GATE(resample, AUDIO_CLK_GATE_EN, 18);
+static struct clk_regmap power_detect =
+ AUD_PCLK_GATE(power_detect, AUDIO_CLK_GATE_EN, 19);
+
+static struct clk_regmap spdifout_clk_sel =
+ AUD_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static struct clk_regmap pdm_dclk_sel =
+ AUD_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static struct clk_regmap spdifin_clk_sel =
+ AUD_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static struct clk_regmap pdm_sysclk_sel =
+ AUD_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static struct clk_regmap spdifout_b_clk_sel =
+ AUD_MST_MCLK_MUX(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+static struct clk_regmap spdifout_clk_div =
+ AUD_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static struct clk_regmap pdm_dclk_div =
+ AUD_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static struct clk_regmap spdifin_clk_div =
+ AUD_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static struct clk_regmap pdm_sysclk_div =
+ AUD_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static struct clk_regmap spdifout_b_clk_div =
+ AUD_MST_MCLK_DIV(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+static struct clk_regmap spdifout_clk =
+ AUD_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static struct clk_regmap spdifin_clk =
+ AUD_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static struct clk_regmap pdm_dclk =
+ AUD_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static struct clk_regmap pdm_sysclk =
+ AUD_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static struct clk_regmap spdifout_b_clk =
+ AUD_MST_MCLK_GATE(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+static struct clk_regmap mst_a_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static struct clk_regmap mst_b_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static struct clk_regmap mst_c_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static struct clk_regmap mst_d_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static struct clk_regmap mst_e_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static struct clk_regmap mst_f_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+static struct clk_regmap mst_a_sclk_div =
+ AUD_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static struct clk_regmap mst_b_sclk_div =
+ AUD_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static struct clk_regmap mst_c_sclk_div =
+ AUD_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static struct clk_regmap mst_d_sclk_div =
+ AUD_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static struct clk_regmap mst_e_sclk_div =
+ AUD_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static struct clk_regmap mst_f_sclk_div =
+ AUD_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+static struct clk_regmap mst_a_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static struct clk_regmap mst_b_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static struct clk_regmap mst_c_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static struct clk_regmap mst_d_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static struct clk_regmap mst_e_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static struct clk_regmap mst_f_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+static struct clk_regmap mst_a_sclk =
+ AUD_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static struct clk_regmap mst_b_sclk =
+ AUD_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static struct clk_regmap mst_c_sclk =
+ AUD_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static struct clk_regmap mst_d_sclk =
+ AUD_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static struct clk_regmap mst_e_sclk =
+ AUD_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static struct clk_regmap mst_f_sclk =
+ AUD_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+static struct clk_regmap mst_a_lrclk_div =
+ AUD_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static struct clk_regmap mst_b_lrclk_div =
+ AUD_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static struct clk_regmap mst_c_lrclk_div =
+ AUD_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static struct clk_regmap mst_d_lrclk_div =
+ AUD_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static struct clk_regmap mst_e_lrclk_div =
+ AUD_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static struct clk_regmap mst_f_lrclk_div =
+ AUD_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+static struct clk_regmap mst_a_lrclk =
+ AUD_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static struct clk_regmap mst_b_lrclk =
+ AUD_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static struct clk_regmap mst_c_lrclk =
+ AUD_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static struct clk_regmap mst_d_lrclk =
+ AUD_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static struct clk_regmap mst_e_lrclk =
+ AUD_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static struct clk_regmap mst_f_lrclk =
+ AUD_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+static struct clk_regmap tdmin_a_sclk_sel =
+ AUD_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_sclk_sel =
+ AUD_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_sclk_sel =
+ AUD_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_sclk_sel =
+ AUD_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_sclk_sel =
+ AUD_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_sclk_sel =
+ AUD_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_sclk_sel =
+ AUD_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static struct clk_regmap tdmin_a_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static struct clk_regmap tdmin_a_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static struct clk_regmap tdmin_a_sclk =
+ AUD_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_sclk =
+ AUD_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_sclk =
+ AUD_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_sclk =
+ AUD_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_sclk =
+ AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_sclk =
+ AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_sclk =
+ AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static struct clk_regmap tdmin_a_lrclk =
+ AUD_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_lrclk =
+ AUD_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_lrclk =
+ AUD_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_lrclk =
+ AUD_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_lrclk =
+ AUD_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_lrclk =
+ AUD_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_lrclk =
+ AUD_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+/* AXG/G12A Clocks */
+static struct clk_hw axg_aud_top = {
+ .init = &(struct clk_init_data) {
+ /* Provide aud_top signal name on axg and g12a */
+ .name = "aud_top",
+ .ops = &(const struct clk_ops) {},
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "pclk",
+ },
+ .num_parents = 1,
+ },
};
-static AUD_TDM_PAD_CTRL(mclk_pad_0, AUDIO_MST_PAD_CTRL0, 0,
- mclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(mclk_pad_1, AUDIO_MST_PAD_CTRL0, 4,
- mclk_pad_ctrl_parent_data);
+static struct clk_regmap mst_a_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static struct clk_regmap mst_b_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static struct clk_regmap mst_c_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static struct clk_regmap mst_d_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static struct clk_regmap mst_e_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static struct clk_regmap mst_f_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+
+static struct clk_regmap mst_a_mclk_div =
+ AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static struct clk_regmap mst_b_mclk_div =
+ AUD_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static struct clk_regmap mst_c_mclk_div =
+ AUD_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static struct clk_regmap mst_d_mclk_div =
+ AUD_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static struct clk_regmap mst_e_mclk_div =
+ AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static struct clk_regmap mst_f_mclk_div =
+ AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+
+static struct clk_regmap mst_a_mclk =
+ AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static struct clk_regmap mst_b_mclk =
+ AUD_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static struct clk_regmap mst_c_mclk =
+ AUD_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static struct clk_regmap mst_d_mclk =
+ AUD_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static struct clk_regmap mst_e_mclk =
+ AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static struct clk_regmap mst_f_mclk =
+ AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+
+/* G12a clocks */
+static struct clk_regmap g12a_tdm_mclk_pad_0 = AUD_TDM_PAD_CTRL(
+ mclk_pad_0, AUDIO_MST_PAD_CTRL0, 0, mclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_mclk_pad_1 = AUD_TDM_PAD_CTRL(
+ mclk_pad_1, AUDIO_MST_PAD_CTRL0, 4, mclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_lrclk_pad_0 = AUD_TDM_PAD_CTRL(
+ lrclk_pad_0, AUDIO_MST_PAD_CTRL1, 16, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_lrclk_pad_1 = AUD_TDM_PAD_CTRL(
+ lrclk_pad_1, AUDIO_MST_PAD_CTRL1, 20, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_lrclk_pad_2 = AUD_TDM_PAD_CTRL(
+ lrclk_pad_2, AUDIO_MST_PAD_CTRL1, 24, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_sclk_pad_0 = AUD_TDM_PAD_CTRL(
+ sclk_pad_0, AUDIO_MST_PAD_CTRL1, 0, sclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_sclk_pad_1 = AUD_TDM_PAD_CTRL(
+ sclk_pad_1, AUDIO_MST_PAD_CTRL1, 4, sclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_sclk_pad_2 = AUD_TDM_PAD_CTRL(
+ sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8, sclk_pad_ctrl_parent_data);
+
+/* G12a/SM1 clocks */
+static struct clk_regmap toram =
+ AUD_PCLK_GATE(toram, AUDIO_CLK_GATE_EN, 20);
+static struct clk_regmap spdifout_b =
+ AUD_PCLK_GATE(spdifout_b, AUDIO_CLK_GATE_EN, 21);
+static struct clk_regmap eqdrc =
+ AUD_PCLK_GATE(eqdrc, AUDIO_CLK_GATE_EN, 22);
+
+/* SM1 Clocks */
+static struct clk_regmap sm1_clk81_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AUDIO_CLK81_EN,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_clk81_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "pclk",
+ },
+ .num_parents = 1,
+ },
+};
-static const struct clk_parent_data lrclk_pad_ctrl_parent_data[] = {
- { .hw = &aud_mst_a_lrclk.hw },
- { .hw = &aud_mst_b_lrclk.hw },
- { .hw = &aud_mst_c_lrclk.hw },
- { .hw = &aud_mst_d_lrclk.hw },
- { .hw = &aud_mst_e_lrclk.hw },
- { .hw = &aud_mst_f_lrclk.hw },
+static struct clk_regmap sm1_sysclk_a_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_sysclk_a_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_clk81_en.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
};
-static AUD_TDM_PAD_CTRL(lrclk_pad_0, AUDIO_MST_PAD_CTRL1, 16,
- lrclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(lrclk_pad_1, AUDIO_MST_PAD_CTRL1, 20,
- lrclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(lrclk_pad_2, AUDIO_MST_PAD_CTRL1, 24,
- lrclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_sysclk_a_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_sysclk_a_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_sysclk_a_div.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
-static const struct clk_parent_data sclk_pad_ctrl_parent_data[] = {
- { .hw = &aud_mst_a_sclk.hw },
- { .hw = &aud_mst_b_sclk.hw },
- { .hw = &aud_mst_c_sclk.hw },
- { .hw = &aud_mst_d_sclk.hw },
- { .hw = &aud_mst_e_sclk.hw },
- { .hw = &aud_mst_f_sclk.hw },
+static struct clk_regmap sm1_sysclk_b_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .shift = 16,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_sysclk_b_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_clk81_en.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
};
-static AUD_TDM_PAD_CTRL(sclk_pad_0, AUDIO_MST_PAD_CTRL1, 0,
- sclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(sclk_pad_1, AUDIO_MST_PAD_CTRL1, 4,
- sclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8,
- sclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_sysclk_b_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_sysclk_b_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_sysclk_b_div.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct clk_hw *sm1_aud_top_parents[] = {
+ &sm1_sysclk_a_en.hw,
+ &sm1_sysclk_b_en.hw,
+};
+
+static struct clk_regmap sm1_aud_top = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .mask = 0x1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "aud_top",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = sm1_aud_top_parents,
+ .num_parents = ARRAY_SIZE(sm1_aud_top_parents),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap resample_b =
+ AUD_PCLK_GATE(resample_b, AUDIO_CLK_GATE_EN, 26);
+static struct clk_regmap tovad =
+ AUD_PCLK_GATE(tovad, AUDIO_CLK_GATE_EN, 27);
+static struct clk_regmap locker =
+ AUD_PCLK_GATE(locker, AUDIO_CLK_GATE_EN, 28);
+static struct clk_regmap spdifin_lb =
+ AUD_PCLK_GATE(spdifin_lb, AUDIO_CLK_GATE_EN, 29);
+static struct clk_regmap frddr_d =
+ AUD_PCLK_GATE(frddr_d, AUDIO_CLK_GATE_EN1, 0);
+static struct clk_regmap toddr_d =
+ AUD_PCLK_GATE(toddr_d, AUDIO_CLK_GATE_EN1, 1);
+static struct clk_regmap loopback_b =
+ AUD_PCLK_GATE(loopback_b, AUDIO_CLK_GATE_EN1, 2);
+
+static struct clk_regmap sm1_mst_a_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
+static struct clk_regmap sm1_mst_b_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_b_mclk, AUDIO_SM1_MCLK_B_CTRL);
+static struct clk_regmap sm1_mst_c_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_c_mclk, AUDIO_SM1_MCLK_C_CTRL);
+static struct clk_regmap sm1_mst_d_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_d_mclk, AUDIO_SM1_MCLK_D_CTRL);
+static struct clk_regmap sm1_mst_e_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
+static struct clk_regmap sm1_mst_f_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+
+static struct clk_regmap sm1_mst_a_mclk_div =
+ AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
+static struct clk_regmap sm1_mst_b_mclk_div =
+ AUD_MST_MCLK_DIV(mst_b_mclk, AUDIO_SM1_MCLK_B_CTRL);
+static struct clk_regmap sm1_mst_c_mclk_div =
+ AUD_MST_MCLK_DIV(mst_c_mclk, AUDIO_SM1_MCLK_C_CTRL);
+static struct clk_regmap sm1_mst_d_mclk_div =
+ AUD_MST_MCLK_DIV(mst_d_mclk, AUDIO_SM1_MCLK_D_CTRL);
+static struct clk_regmap sm1_mst_e_mclk_div =
+ AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
+static struct clk_regmap sm1_mst_f_mclk_div =
+ AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+
+static struct clk_regmap sm1_mst_a_mclk =
+ AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
+static struct clk_regmap sm1_mst_b_mclk =
+ AUD_MST_MCLK_GATE(mst_b_mclk, AUDIO_SM1_MCLK_B_CTRL);
+static struct clk_regmap sm1_mst_c_mclk =
+ AUD_MST_MCLK_GATE(mst_c_mclk, AUDIO_SM1_MCLK_C_CTRL);
+static struct clk_regmap sm1_mst_d_mclk =
+ AUD_MST_MCLK_GATE(mst_d_mclk, AUDIO_SM1_MCLK_D_CTRL);
+static struct clk_regmap sm1_mst_e_mclk =
+ AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
+static struct clk_regmap sm1_mst_f_mclk =
+ AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+
+static struct clk_regmap sm1_tdm_mclk_pad_0 = AUD_TDM_PAD_CTRL(
+ tdm_mclk_pad_0, AUDIO_SM1_MST_PAD_CTRL0, 0, mclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_mclk_pad_1 = AUD_TDM_PAD_CTRL(
+ tdm_mclk_pad_1, AUDIO_SM1_MST_PAD_CTRL0, 4, mclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_lrclk_pad_0 = AUD_TDM_PAD_CTRL(
+ tdm_lrclk_pad_0, AUDIO_SM1_MST_PAD_CTRL1, 16, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_lrclk_pad_1 = AUD_TDM_PAD_CTRL(
+ tdm_lrclk_pad_1, AUDIO_SM1_MST_PAD_CTRL1, 20, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_lrclk_pad_2 = AUD_TDM_PAD_CTRL(
+ tdm_lrclk_pad_2, AUDIO_SM1_MST_PAD_CTRL1, 24, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_sclk_pad_0 = AUD_TDM_PAD_CTRL(
+ tdm_sclk_pad_0, AUDIO_SM1_MST_PAD_CTRL1, 0, sclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_sclk_pad_1 = AUD_TDM_PAD_CTRL(
+ tdm_sclk_pad_1, AUDIO_SM1_MST_PAD_CTRL1, 4, sclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_sclk_pad_2 = AUD_TDM_PAD_CTRL(
+ tdm_sclk_pad_2, AUDIO_SM1_MST_PAD_CTRL1, 8, sclk_pad_ctrl_parent_data);
/*
* Array of all clocks provided by this provider
@@ -477,127 +778,128 @@ static AUD_TDM_PAD_CTRL(sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8,
*/
static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
.hws = {
- [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw,
- [AUD_CLKID_PDM] = &aud_pdm.hw,
- [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw,
- [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw,
- [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw,
- [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw,
- [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw,
- [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw,
- [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw,
- [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw,
- [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw,
- [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw,
- [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw,
- [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw,
- [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw,
- [AUD_CLKID_LOOPBACK] = &aud_loopback.hw,
- [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw,
- [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw,
- [AUD_CLKID_RESAMPLE] = &aud_resample.hw,
- [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw,
- [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw,
- [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw,
- [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw,
- [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw,
- [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw,
- [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw,
- [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw,
- [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw,
- [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw,
- [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw,
- [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw,
- [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw,
- [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw,
- [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw,
- [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw,
- [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw,
- [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw,
- [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw,
- [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw,
- [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw,
- [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw,
- [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw,
- [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw,
- [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw,
- [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw,
- [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw,
- [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw,
- [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw,
- [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw,
- [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw,
- [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw,
- [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw,
- [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw,
- [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw,
- [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw,
- [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw,
- [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw,
- [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw,
- [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw,
- [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw,
- [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw,
- [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw,
- [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw,
- [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw,
- [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw,
- [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw,
- [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw,
- [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw,
- [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw,
- [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw,
- [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw,
- [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw,
- [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw,
- [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw,
- [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw,
- [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw,
- [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw,
- [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw,
- [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw,
- [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw,
- [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw,
- [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw,
- [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw,
- [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw,
- [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw,
- [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw,
- [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw,
- [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw,
- [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw,
- [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw,
- [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw,
- [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw,
- [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw,
- [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw,
- [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw,
- [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw,
- [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw,
- [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw,
- [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw,
- [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw,
- [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw,
- [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw,
+ [AUD_CLKID_DDR_ARB] = &ddr_arb.hw,
+ [AUD_CLKID_PDM] = &pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &power_detect.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &spdifout_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &tdmout_c_lrclk.hw,
+ [AUD_CLKID_TOP] = &axg_aud_top,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -609,284 +911,596 @@ static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
*/
static struct clk_hw_onecell_data g12a_audio_hw_onecell_data = {
.hws = {
- [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw,
- [AUD_CLKID_PDM] = &aud_pdm.hw,
- [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw,
- [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw,
- [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw,
- [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw,
- [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw,
- [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw,
- [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw,
- [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw,
- [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw,
- [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw,
- [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw,
- [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw,
- [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw,
- [AUD_CLKID_LOOPBACK] = &aud_loopback.hw,
- [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw,
- [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw,
- [AUD_CLKID_RESAMPLE] = &aud_resample.hw,
- [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw,
- [AUD_CLKID_SPDIFOUT_B] = &aud_spdifout_b.hw,
- [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw,
- [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw,
- [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw,
- [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw,
- [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw,
- [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw,
- [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw,
- [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw,
- [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw,
- [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw,
- [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw,
- [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw,
- [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw,
- [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw,
- [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw,
- [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw,
- [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw,
- [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw,
- [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw,
- [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw,
- [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw,
- [AUD_CLKID_SPDIFOUT_B_CLK_SEL] = &aud_spdifout_b_clk_sel.hw,
- [AUD_CLKID_SPDIFOUT_B_CLK_DIV] = &aud_spdifout_b_clk_div.hw,
- [AUD_CLKID_SPDIFOUT_B_CLK] = &aud_spdifout_b_clk.hw,
- [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw,
- [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw,
- [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw,
- [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw,
- [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw,
- [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw,
- [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw,
- [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw,
- [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw,
- [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw,
- [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw,
- [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw,
- [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw,
- [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw,
- [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw,
- [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw,
- [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw,
- [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw,
- [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw,
- [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw,
- [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw,
- [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw,
- [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw,
- [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw,
- [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw,
- [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw,
- [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw,
- [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw,
- [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw,
- [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw,
- [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw,
- [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw,
- [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw,
- [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw,
- [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw,
- [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw,
- [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw,
- [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw,
- [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw,
- [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw,
- [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw,
- [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw,
- [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw,
- [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw,
- [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw,
- [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw,
- [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw,
- [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw,
- [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw,
- [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw,
- [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw,
- [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw,
- [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw,
- [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw,
- [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw,
- [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw,
- [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw,
- [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw,
- [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw,
- [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw,
- [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw,
- [AUD_CLKID_TDM_MCLK_PAD0] = &aud_tdm_mclk_pad_0.hw,
- [AUD_CLKID_TDM_MCLK_PAD1] = &aud_tdm_mclk_pad_1.hw,
- [AUD_CLKID_TDM_LRCLK_PAD0] = &aud_tdm_lrclk_pad_0.hw,
- [AUD_CLKID_TDM_LRCLK_PAD1] = &aud_tdm_lrclk_pad_1.hw,
- [AUD_CLKID_TDM_LRCLK_PAD2] = &aud_tdm_lrclk_pad_2.hw,
- [AUD_CLKID_TDM_SCLK_PAD0] = &aud_tdm_sclk_pad_0.hw,
- [AUD_CLKID_TDM_SCLK_PAD1] = &aud_tdm_sclk_pad_1.hw,
- [AUD_CLKID_TDM_SCLK_PAD2] = &aud_tdm_sclk_pad_2.hw,
+ [AUD_CLKID_DDR_ARB] = &ddr_arb.hw,
+ [AUD_CLKID_PDM] = &pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &power_detect.hw,
+ [AUD_CLKID_SPDIFOUT_B] = &spdifout_b.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &spdifout_clk.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_SEL] = &spdifout_b_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_DIV] = &spdifout_b_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK] = &spdifout_b_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &tdmout_c_lrclk.hw,
+ [AUD_CLKID_TDM_MCLK_PAD0] = &g12a_tdm_mclk_pad_0.hw,
+ [AUD_CLKID_TDM_MCLK_PAD1] = &g12a_tdm_mclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD0] = &g12a_tdm_lrclk_pad_0.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD1] = &g12a_tdm_lrclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD2] = &g12a_tdm_lrclk_pad_2.hw,
+ [AUD_CLKID_TDM_SCLK_PAD0] = &g12a_tdm_sclk_pad_0.hw,
+ [AUD_CLKID_TDM_SCLK_PAD1] = &g12a_tdm_sclk_pad_1.hw,
+ [AUD_CLKID_TDM_SCLK_PAD2] = &g12a_tdm_sclk_pad_2.hw,
+ [AUD_CLKID_TOP] = &axg_aud_top,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
+/*
+ * Array of all SM1 clocks provided by this provider
+ * The input clocks of the controller will be populated at runtime
+ */
+static struct clk_hw_onecell_data sm1_audio_hw_onecell_data = {
+ .hws = {
+ [AUD_CLKID_DDR_ARB] = &ddr_arb.hw,
+ [AUD_CLKID_PDM] = &pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &resample.hw,
+ [AUD_CLKID_SPDIFOUT_B] = &spdifout_b.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &sm1_mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &sm1_mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &sm1_mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &sm1_mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &sm1_mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &sm1_mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &sm1_mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &sm1_mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &sm1_mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &sm1_mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &sm1_mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &sm1_mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &sm1_mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &sm1_mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &sm1_mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &sm1_mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &sm1_mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &sm1_mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &spdifout_clk.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_SEL] = &spdifout_b_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_DIV] = &spdifout_b_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK] = &spdifout_b_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &tdmout_c_lrclk.hw,
+ [AUD_CLKID_TDM_MCLK_PAD0] = &sm1_tdm_mclk_pad_0.hw,
+ [AUD_CLKID_TDM_MCLK_PAD1] = &sm1_tdm_mclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD0] = &sm1_tdm_lrclk_pad_0.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD1] = &sm1_tdm_lrclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD2] = &sm1_tdm_lrclk_pad_2.hw,
+ [AUD_CLKID_TDM_SCLK_PAD0] = &sm1_tdm_sclk_pad_0.hw,
+ [AUD_CLKID_TDM_SCLK_PAD1] = &sm1_tdm_sclk_pad_1.hw,
+ [AUD_CLKID_TDM_SCLK_PAD2] = &sm1_tdm_sclk_pad_2.hw,
+ [AUD_CLKID_TOP] = &sm1_aud_top.hw,
+ [AUD_CLKID_TORAM] = &toram.hw,
+ [AUD_CLKID_EQDRC] = &eqdrc.hw,
+ [AUD_CLKID_RESAMPLE_B] = &resample_b.hw,
+ [AUD_CLKID_TOVAD] = &tovad.hw,
+ [AUD_CLKID_LOCKER] = &locker.hw,
+ [AUD_CLKID_SPDIFIN_LB] = &spdifin_lb.hw,
+ [AUD_CLKID_FRDDR_D] = &frddr_d.hw,
+ [AUD_CLKID_TODDR_D] = &toddr_d.hw,
+ [AUD_CLKID_LOOPBACK_B] = &loopback_b.hw,
+ [AUD_CLKID_CLK81_EN] = &sm1_clk81_en.hw,
+ [AUD_CLKID_SYSCLK_A_DIV] = &sm1_sysclk_a_div.hw,
+ [AUD_CLKID_SYSCLK_A_EN] = &sm1_sysclk_a_en.hw,
+ [AUD_CLKID_SYSCLK_B_DIV] = &sm1_sysclk_b_div.hw,
+ [AUD_CLKID_SYSCLK_B_EN] = &sm1_sysclk_b_en.hw,
+ [NR_CLKS] = NULL,
+ },
+ .num = NR_CLKS,
+};
+
+
/* Convenience table to populate regmap in .probe()
* Note that this table is shared between both AXG and G12A,
* with spdifout_b clocks being exclusive to G12A. Since those
* clocks are not declared within the AXG onecell table, we do not
* feel the need to have separate AXG/G12A regmap tables.
*/
-static struct clk_regmap *const aud_clk_regmaps[] = {
- &aud_ddr_arb,
- &aud_pdm,
- &aud_tdmin_a,
- &aud_tdmin_b,
- &aud_tdmin_c,
- &aud_tdmin_lb,
- &aud_tdmout_a,
- &aud_tdmout_b,
- &aud_tdmout_c,
- &aud_frddr_a,
- &aud_frddr_b,
- &aud_frddr_c,
- &aud_toddr_a,
- &aud_toddr_b,
- &aud_toddr_c,
- &aud_loopback,
- &aud_spdifin,
- &aud_spdifout,
- &aud_resample,
- &aud_power_detect,
- &aud_spdifout_b,
- &aud_mst_a_mclk_sel,
- &aud_mst_b_mclk_sel,
- &aud_mst_c_mclk_sel,
- &aud_mst_d_mclk_sel,
- &aud_mst_e_mclk_sel,
- &aud_mst_f_mclk_sel,
- &aud_mst_a_mclk_div,
- &aud_mst_b_mclk_div,
- &aud_mst_c_mclk_div,
- &aud_mst_d_mclk_div,
- &aud_mst_e_mclk_div,
- &aud_mst_f_mclk_div,
- &aud_mst_a_mclk,
- &aud_mst_b_mclk,
- &aud_mst_c_mclk,
- &aud_mst_d_mclk,
- &aud_mst_e_mclk,
- &aud_mst_f_mclk,
- &aud_spdifout_clk_sel,
- &aud_spdifout_clk_div,
- &aud_spdifout_clk,
- &aud_spdifin_clk_sel,
- &aud_spdifin_clk_div,
- &aud_spdifin_clk,
- &aud_pdm_dclk_sel,
- &aud_pdm_dclk_div,
- &aud_pdm_dclk,
- &aud_pdm_sysclk_sel,
- &aud_pdm_sysclk_div,
- &aud_pdm_sysclk,
- &aud_mst_a_sclk_pre_en,
- &aud_mst_b_sclk_pre_en,
- &aud_mst_c_sclk_pre_en,
- &aud_mst_d_sclk_pre_en,
- &aud_mst_e_sclk_pre_en,
- &aud_mst_f_sclk_pre_en,
- &aud_mst_a_sclk_div,
- &aud_mst_b_sclk_div,
- &aud_mst_c_sclk_div,
- &aud_mst_d_sclk_div,
- &aud_mst_e_sclk_div,
- &aud_mst_f_sclk_div,
- &aud_mst_a_sclk_post_en,
- &aud_mst_b_sclk_post_en,
- &aud_mst_c_sclk_post_en,
- &aud_mst_d_sclk_post_en,
- &aud_mst_e_sclk_post_en,
- &aud_mst_f_sclk_post_en,
- &aud_mst_a_sclk,
- &aud_mst_b_sclk,
- &aud_mst_c_sclk,
- &aud_mst_d_sclk,
- &aud_mst_e_sclk,
- &aud_mst_f_sclk,
- &aud_mst_a_lrclk_div,
- &aud_mst_b_lrclk_div,
- &aud_mst_c_lrclk_div,
- &aud_mst_d_lrclk_div,
- &aud_mst_e_lrclk_div,
- &aud_mst_f_lrclk_div,
- &aud_mst_a_lrclk,
- &aud_mst_b_lrclk,
- &aud_mst_c_lrclk,
- &aud_mst_d_lrclk,
- &aud_mst_e_lrclk,
- &aud_mst_f_lrclk,
- &aud_tdmin_a_sclk_sel,
- &aud_tdmin_b_sclk_sel,
- &aud_tdmin_c_sclk_sel,
- &aud_tdmin_lb_sclk_sel,
- &aud_tdmout_a_sclk_sel,
- &aud_tdmout_b_sclk_sel,
- &aud_tdmout_c_sclk_sel,
- &aud_tdmin_a_sclk_pre_en,
- &aud_tdmin_b_sclk_pre_en,
- &aud_tdmin_c_sclk_pre_en,
- &aud_tdmin_lb_sclk_pre_en,
- &aud_tdmout_a_sclk_pre_en,
- &aud_tdmout_b_sclk_pre_en,
- &aud_tdmout_c_sclk_pre_en,
- &aud_tdmin_a_sclk_post_en,
- &aud_tdmin_b_sclk_post_en,
- &aud_tdmin_c_sclk_post_en,
- &aud_tdmin_lb_sclk_post_en,
- &aud_tdmout_a_sclk_post_en,
- &aud_tdmout_b_sclk_post_en,
- &aud_tdmout_c_sclk_post_en,
- &aud_tdmin_a_sclk,
- &aud_tdmin_b_sclk,
- &aud_tdmin_c_sclk,
- &aud_tdmin_lb_sclk,
- &aud_tdmout_a_sclk,
- &aud_tdmout_b_sclk,
- &aud_tdmout_c_sclk,
- &aud_tdmin_a_lrclk,
- &aud_tdmin_b_lrclk,
- &aud_tdmin_c_lrclk,
- &aud_tdmin_lb_lrclk,
- &aud_tdmout_a_lrclk,
- &aud_tdmout_b_lrclk,
- &aud_tdmout_c_lrclk,
- &aud_spdifout_b_clk_sel,
- &aud_spdifout_b_clk_div,
- &aud_spdifout_b_clk,
- &aud_tdm_mclk_pad_0,
- &aud_tdm_mclk_pad_1,
- &aud_tdm_lrclk_pad_0,
- &aud_tdm_lrclk_pad_1,
- &aud_tdm_lrclk_pad_2,
- &aud_tdm_sclk_pad_0,
- &aud_tdm_sclk_pad_1,
- &aud_tdm_sclk_pad_2,
+static struct clk_regmap *const axg_clk_regmaps[] = {
+ &ddr_arb,
+ &pdm,
+ &tdmin_a,
+ &tdmin_b,
+ &tdmin_c,
+ &tdmin_lb,
+ &tdmout_a,
+ &tdmout_b,
+ &tdmout_c,
+ &frddr_a,
+ &frddr_b,
+ &frddr_c,
+ &toddr_a,
+ &toddr_b,
+ &toddr_c,
+ &loopback,
+ &spdifin,
+ &spdifout,
+ &resample,
+ &power_detect,
+ &spdifout_b,
+ &mst_a_mclk_sel,
+ &mst_b_mclk_sel,
+ &mst_c_mclk_sel,
+ &mst_d_mclk_sel,
+ &mst_e_mclk_sel,
+ &mst_f_mclk_sel,
+ &mst_a_mclk_div,
+ &mst_b_mclk_div,
+ &mst_c_mclk_div,
+ &mst_d_mclk_div,
+ &mst_e_mclk_div,
+ &mst_f_mclk_div,
+ &mst_a_mclk,
+ &mst_b_mclk,
+ &mst_c_mclk,
+ &mst_d_mclk,
+ &mst_e_mclk,
+ &mst_f_mclk,
+ &spdifout_clk_sel,
+ &spdifout_clk_div,
+ &spdifout_clk,
+ &spdifin_clk_sel,
+ &spdifin_clk_div,
+ &spdifin_clk,
+ &pdm_dclk_sel,
+ &pdm_dclk_div,
+ &pdm_dclk,
+ &pdm_sysclk_sel,
+ &pdm_sysclk_div,
+ &pdm_sysclk,
+ &mst_a_sclk_pre_en,
+ &mst_b_sclk_pre_en,
+ &mst_c_sclk_pre_en,
+ &mst_d_sclk_pre_en,
+ &mst_e_sclk_pre_en,
+ &mst_f_sclk_pre_en,
+ &mst_a_sclk_div,
+ &mst_b_sclk_div,
+ &mst_c_sclk_div,
+ &mst_d_sclk_div,
+ &mst_e_sclk_div,
+ &mst_f_sclk_div,
+ &mst_a_sclk_post_en,
+ &mst_b_sclk_post_en,
+ &mst_c_sclk_post_en,
+ &mst_d_sclk_post_en,
+ &mst_e_sclk_post_en,
+ &mst_f_sclk_post_en,
+ &mst_a_sclk,
+ &mst_b_sclk,
+ &mst_c_sclk,
+ &mst_d_sclk,
+ &mst_e_sclk,
+ &mst_f_sclk,
+ &mst_a_lrclk_div,
+ &mst_b_lrclk_div,
+ &mst_c_lrclk_div,
+ &mst_d_lrclk_div,
+ &mst_e_lrclk_div,
+ &mst_f_lrclk_div,
+ &mst_a_lrclk,
+ &mst_b_lrclk,
+ &mst_c_lrclk,
+ &mst_d_lrclk,
+ &mst_e_lrclk,
+ &mst_f_lrclk,
+ &tdmin_a_sclk_sel,
+ &tdmin_b_sclk_sel,
+ &tdmin_c_sclk_sel,
+ &tdmin_lb_sclk_sel,
+ &tdmout_a_sclk_sel,
+ &tdmout_b_sclk_sel,
+ &tdmout_c_sclk_sel,
+ &tdmin_a_sclk_pre_en,
+ &tdmin_b_sclk_pre_en,
+ &tdmin_c_sclk_pre_en,
+ &tdmin_lb_sclk_pre_en,
+ &tdmout_a_sclk_pre_en,
+ &tdmout_b_sclk_pre_en,
+ &tdmout_c_sclk_pre_en,
+ &tdmin_a_sclk_post_en,
+ &tdmin_b_sclk_post_en,
+ &tdmin_c_sclk_post_en,
+ &tdmin_lb_sclk_post_en,
+ &tdmout_a_sclk_post_en,
+ &tdmout_b_sclk_post_en,
+ &tdmout_c_sclk_post_en,
+ &tdmin_a_sclk,
+ &tdmin_b_sclk,
+ &tdmin_c_sclk,
+ &tdmin_lb_sclk,
+ &tdmout_a_sclk,
+ &tdmout_b_sclk,
+ &tdmout_c_sclk,
+ &tdmin_a_lrclk,
+ &tdmin_b_lrclk,
+ &tdmin_c_lrclk,
+ &tdmin_lb_lrclk,
+ &tdmout_a_lrclk,
+ &tdmout_b_lrclk,
+ &tdmout_c_lrclk,
+ &spdifout_b_clk_sel,
+ &spdifout_b_clk_div,
+ &spdifout_b_clk,
+ &g12a_tdm_mclk_pad_0,
+ &g12a_tdm_mclk_pad_1,
+ &g12a_tdm_lrclk_pad_0,
+ &g12a_tdm_lrclk_pad_1,
+ &g12a_tdm_lrclk_pad_2,
+ &g12a_tdm_sclk_pad_0,
+ &g12a_tdm_sclk_pad_1,
+ &g12a_tdm_sclk_pad_2,
+ &toram,
+ &eqdrc,
+};
+
+static struct clk_regmap *const sm1_clk_regmaps[] = {
+ &ddr_arb,
+ &pdm,
+ &tdmin_a,
+ &tdmin_b,
+ &tdmin_c,
+ &tdmin_lb,
+ &tdmout_a,
+ &tdmout_b,
+ &tdmout_c,
+ &frddr_a,
+ &frddr_b,
+ &frddr_c,
+ &toddr_a,
+ &toddr_b,
+ &toddr_c,
+ &loopback,
+ &spdifin,
+ &spdifout,
+ &resample,
+ &spdifout_b,
+ &sm1_mst_a_mclk_sel,
+ &sm1_mst_b_mclk_sel,
+ &sm1_mst_c_mclk_sel,
+ &sm1_mst_d_mclk_sel,
+ &sm1_mst_e_mclk_sel,
+ &sm1_mst_f_mclk_sel,
+ &sm1_mst_a_mclk_div,
+ &sm1_mst_b_mclk_div,
+ &sm1_mst_c_mclk_div,
+ &sm1_mst_d_mclk_div,
+ &sm1_mst_e_mclk_div,
+ &sm1_mst_f_mclk_div,
+ &sm1_mst_a_mclk,
+ &sm1_mst_b_mclk,
+ &sm1_mst_c_mclk,
+ &sm1_mst_d_mclk,
+ &sm1_mst_e_mclk,
+ &sm1_mst_f_mclk,
+ &spdifout_clk_sel,
+ &spdifout_clk_div,
+ &spdifout_clk,
+ &spdifin_clk_sel,
+ &spdifin_clk_div,
+ &spdifin_clk,
+ &pdm_dclk_sel,
+ &pdm_dclk_div,
+ &pdm_dclk,
+ &pdm_sysclk_sel,
+ &pdm_sysclk_div,
+ &pdm_sysclk,
+ &mst_a_sclk_pre_en,
+ &mst_b_sclk_pre_en,
+ &mst_c_sclk_pre_en,
+ &mst_d_sclk_pre_en,
+ &mst_e_sclk_pre_en,
+ &mst_f_sclk_pre_en,
+ &mst_a_sclk_div,
+ &mst_b_sclk_div,
+ &mst_c_sclk_div,
+ &mst_d_sclk_div,
+ &mst_e_sclk_div,
+ &mst_f_sclk_div,
+ &mst_a_sclk_post_en,
+ &mst_b_sclk_post_en,
+ &mst_c_sclk_post_en,
+ &mst_d_sclk_post_en,
+ &mst_e_sclk_post_en,
+ &mst_f_sclk_post_en,
+ &mst_a_sclk,
+ &mst_b_sclk,
+ &mst_c_sclk,
+ &mst_d_sclk,
+ &mst_e_sclk,
+ &mst_f_sclk,
+ &mst_a_lrclk_div,
+ &mst_b_lrclk_div,
+ &mst_c_lrclk_div,
+ &mst_d_lrclk_div,
+ &mst_e_lrclk_div,
+ &mst_f_lrclk_div,
+ &mst_a_lrclk,
+ &mst_b_lrclk,
+ &mst_c_lrclk,
+ &mst_d_lrclk,
+ &mst_e_lrclk,
+ &mst_f_lrclk,
+ &tdmin_a_sclk_sel,
+ &tdmin_b_sclk_sel,
+ &tdmin_c_sclk_sel,
+ &tdmin_lb_sclk_sel,
+ &tdmout_a_sclk_sel,
+ &tdmout_b_sclk_sel,
+ &tdmout_c_sclk_sel,
+ &tdmin_a_sclk_pre_en,
+ &tdmin_b_sclk_pre_en,
+ &tdmin_c_sclk_pre_en,
+ &tdmin_lb_sclk_pre_en,
+ &tdmout_a_sclk_pre_en,
+ &tdmout_b_sclk_pre_en,
+ &tdmout_c_sclk_pre_en,
+ &tdmin_a_sclk_post_en,
+ &tdmin_b_sclk_post_en,
+ &tdmin_c_sclk_post_en,
+ &tdmin_lb_sclk_post_en,
+ &tdmout_a_sclk_post_en,
+ &tdmout_b_sclk_post_en,
+ &tdmout_c_sclk_post_en,
+ &tdmin_a_sclk,
+ &tdmin_b_sclk,
+ &tdmin_c_sclk,
+ &tdmin_lb_sclk,
+ &tdmout_a_sclk,
+ &tdmout_b_sclk,
+ &tdmout_c_sclk,
+ &tdmin_a_lrclk,
+ &tdmin_b_lrclk,
+ &tdmin_c_lrclk,
+ &tdmin_lb_lrclk,
+ &tdmout_a_lrclk,
+ &tdmout_b_lrclk,
+ &tdmout_c_lrclk,
+ &spdifout_b_clk_sel,
+ &spdifout_b_clk_div,
+ &spdifout_b_clk,
+ &sm1_tdm_mclk_pad_0,
+ &sm1_tdm_mclk_pad_1,
+ &sm1_tdm_lrclk_pad_0,
+ &sm1_tdm_lrclk_pad_1,
+ &sm1_tdm_lrclk_pad_2,
+ &sm1_tdm_sclk_pad_0,
+ &sm1_tdm_sclk_pad_1,
+ &sm1_tdm_sclk_pad_2,
+ &sm1_aud_top,
+ &toram,
+ &eqdrc,
+ &resample_b,
+ &tovad,
+ &locker,
+ &spdifin_lb,
+ &frddr_d,
+ &toddr_d,
+ &loopback_b,
+ &sm1_clk81_en,
+ &sm1_sysclk_a_div,
+ &sm1_sysclk_a_en,
+ &sm1_sysclk_b_div,
+ &sm1_sysclk_b_en,
};
static int devm_clk_get_enable(struct device *dev, char *id)
@@ -1001,10 +1615,12 @@ static const struct regmap_config axg_audio_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = AUDIO_CLK_PDMIN_CTRL1,
+ .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
};
struct audioclk_data {
+ struct clk_regmap *const *regmap_clks;
+ unsigned int regmap_clk_num;
struct clk_hw_onecell_data *hw_onecell_data;
unsigned int reset_offset;
unsigned int reset_num;
@@ -1016,7 +1632,6 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
const struct audioclk_data *data;
struct axg_audio_reset_data *rst;
struct regmap *map;
- struct resource *res;
void __iomem *regs;
struct clk_hw *hw;
int ret, i;
@@ -1025,8 +1640,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
if (!data)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
@@ -1048,8 +1662,8 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
/* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(aud_clk_regmaps); i++)
- aud_clk_regmaps[i]->map = map;
+ for (i = 0; i < data->regmap_clk_num; i++)
+ data->regmap_clks[i]->map = map;
/* Take care to skip the registered input clocks */
for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) {
@@ -1093,15 +1707,27 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
static const struct audioclk_data axg_audioclk_data = {
+ .regmap_clks = axg_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
.hw_onecell_data = &axg_audio_hw_onecell_data,
};
static const struct audioclk_data g12a_audioclk_data = {
+ .regmap_clks = axg_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
.hw_onecell_data = &g12a_audio_hw_onecell_data,
.reset_offset = AUDIO_SW_RESET,
.reset_num = 26,
};
+static const struct audioclk_data sm1_audioclk_data = {
+ .regmap_clks = sm1_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(sm1_clk_regmaps),
+ .hw_onecell_data = &sm1_audio_hw_onecell_data,
+ .reset_offset = AUDIO_SM1_SW_RESET0,
+ .reset_num = 39,
+};
+
static const struct of_device_id clkc_match_table[] = {
{
.compatible = "amlogic,axg-audio-clkc",
@@ -1109,6 +1735,9 @@ static const struct of_device_id clkc_match_table[] = {
}, {
.compatible = "amlogic,g12a-audio-clkc",
.data = &g12a_audioclk_data
+ }, {
+ .compatible = "amlogic,sm1-audio-clkc",
+ .data = &sm1_audioclk_data
}, {}
};
MODULE_DEVICE_TABLE(of, clkc_match_table);
@@ -1122,6 +1751,6 @@ static struct platform_driver axg_audio_driver = {
};
module_platform_driver(axg_audio_driver);
-MODULE_DESCRIPTION("Amlogic AXG/G12A Audio Clock driver");
+MODULE_DESCRIPTION("Amlogic AXG/G12A/SM1 Audio Clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
index c00e28b2e1a9..fd65a7d0704b 100644
--- a/drivers/clk/meson/axg-audio.h
+++ b/drivers/clk/meson/axg-audio.h
@@ -50,6 +50,20 @@
#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
+/* SM1 introduce new register and some shifts :( */
+#define AUDIO_CLK_GATE_EN1 0x004
+#define AUDIO_SM1_MCLK_A_CTRL 0x008
+#define AUDIO_SM1_MCLK_B_CTRL 0x00C
+#define AUDIO_SM1_MCLK_C_CTRL 0x010
+#define AUDIO_SM1_MCLK_D_CTRL 0x014
+#define AUDIO_SM1_MCLK_E_CTRL 0x018
+#define AUDIO_SM1_MCLK_F_CTRL 0x01C
+#define AUDIO_SM1_MST_PAD_CTRL0 0x020
+#define AUDIO_SM1_MST_PAD_CTRL1 0x024
+#define AUDIO_SM1_SW_RESET0 0x028
+#define AUDIO_SM1_SW_RESET1 0x02C
+#define AUDIO_CLK81_CTRL 0x030
+#define AUDIO_CLK81_EN 0x034
/*
* CLKID index values
* These indices are entirely contrived and do not map onto the hardware.
@@ -115,10 +129,15 @@
#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
#define AUD_CLKID_SPDIFOUT_B_CLK_SEL 153
#define AUD_CLKID_SPDIFOUT_B_CLK_DIV 154
+#define AUD_CLKID_CLK81_EN 173
+#define AUD_CLKID_SYSCLK_A_DIV 174
+#define AUD_CLKID_SYSCLK_B_DIV 175
+#define AUD_CLKID_SYSCLK_A_EN 176
+#define AUD_CLKID_SYSCLK_B_EN 177
/* include the CLKIDs which are part of the DT bindings */
#include <dt-bindings/clock/axg-audio-clkc.h>
-#define NR_CLKS 163
+#define NR_CLKS 178
#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index af5e5acad370..6b394302c76a 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -274,8 +274,8 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
if (!ap_cpu_clk)
return -ENOMEM;
- ap_cpu_data = devm_kzalloc(dev, sizeof(*ap_cpu_data) +
- sizeof(struct clk_hw *) * nclusters,
+ ap_cpu_data = devm_kzalloc(dev, struct_size(ap_cpu_data, hws,
+ nclusters),
GFP_KERNEL);
if (!ap_cpu_data)
return -ENOMEM;
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 5fc6d486a381..f5746f9ea929 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -303,6 +303,7 @@ PERIPH_CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, clk_table1);
PERIPH_CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6);
PERIPH_CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12);
PERIPH_CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18);
+static PERIPH_GATE(pcie, 14);
static struct clk_periph_data data_sb[] = {
REF_CLK_MUX_DD(gbe_50),
@@ -318,6 +319,7 @@ static struct clk_periph_data data_sb[] = {
REF_CLK_FULL_DD(sdio),
REF_CLK_FULL_DD(usb32_usb2_sys),
REF_CLK_FULL_DD(usb32_ss_sys),
+ REF_CLK_GATE(pcie, "gbe_core"),
{ },
};
@@ -712,8 +714,8 @@ static int __maybe_unused armada_3700_periph_clock_resume(struct device *dev)
}
static const struct dev_pm_ops armada_3700_periph_clock_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(armada_3700_periph_clock_suspend,
- armada_3700_periph_clock_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(armada_3700_periph_clock_suspend,
+ armada_3700_periph_clock_resume)
};
static int armada_3700_periph_clock_probe(struct platform_device *pdev)
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index fa1568279c23..45665655a258 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -50,12 +50,6 @@ static u32 __init axp_get_tclk_freq(void __iomem *sar)
return 250000000;
}
-/* MV98DX3236 TCLK frequency is fixed to 200MHz */
-static u32 __init mv98dx3236_get_tclk_freq(void __iomem *sar)
-{
- return 200000000;
-}
-
static const u32 axp_cpu_freqs[] __initconst = {
1000000000,
1066000000,
@@ -93,12 +87,6 @@ static u32 __init axp_get_cpu_freq(void __iomem *sar)
return cpu_freq;
}
-/* MV98DX3236 CLK frequency is fixed to 800MHz */
-static u32 __init mv98dx3236_get_cpu_freq(void __iomem *sar)
-{
- return 800000000;
-}
-
static const int axp_nbclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 2}, {2, 2},
{1, 2}, {1, 2}, {1, 1}, {2, 3},
@@ -168,11 +156,6 @@ static const struct coreclk_soc_desc axp_coreclks = {
.num_ratios = ARRAY_SIZE(axp_coreclk_ratios),
};
-static const struct coreclk_soc_desc mv98dx3236_coreclks = {
- .get_tclk_freq = mv98dx3236_get_tclk_freq,
- .get_cpu_freq = mv98dx3236_get_cpu_freq,
-};
-
/*
* Clock Gating Control
*/
@@ -210,15 +193,6 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = {
{ }
};
-static const struct clk_gating_soc_desc mv98dx3236_gating_desc[] __initconst = {
- { "ge1", NULL, 3, 0 },
- { "ge0", NULL, 4, 0 },
- { "pex00", NULL, 5, 0 },
- { "sdio", NULL, 17, 0 },
- { "xor0", NULL, 22, 0 },
- { }
-};
-
static void __init axp_clk_init(struct device_node *np)
{
struct device_node *cgnp =
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 808463276145..84c8900542e4 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -235,8 +235,8 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
if (ret)
return ret;
- cp110_clk_data = devm_kzalloc(dev, sizeof(*cp110_clk_data) +
- sizeof(struct clk_hw *) * CP110_CLK_NUM,
+ cp110_clk_data = devm_kzalloc(dev, struct_size(cp110_clk_data, hws,
+ CP110_CLK_NUM),
GFP_KERNEL);
if (!cp110_clk_data)
return -ENOMEM;
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 287fdeae7c7c..7b123105b5de 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -459,6 +459,7 @@ struct dummy_clk {
};
static struct dummy_clk dummy_clks[] __initdata = {
DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"),
+ DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"),
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
};
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 32dbb4f09492..3b33ef129274 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -220,6 +220,15 @@ config MSM_GCC_8998
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, UFS, SD/eMMC, PCIe, etc.
+config MSM_GPUCC_8998
+ tristate "MSM8998 Graphics Clock Controller"
+ select MSM_GCC_8998
+ select QCOM_GDSC
+ help
+ Support for the graphics clock controller on MSM8998 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config QCS_GCC_404
tristate "QCS404 Global Clock Controller"
help
@@ -227,6 +236,15 @@ config QCS_GCC_404
Say Y if you want to use multimedia devices or peripheral
devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+config SC_GCC_7180
+ tristate "SC7180 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SC7180 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
config SDM_CAMCC_845
tristate "SDM845 Camera Clock Controller"
select SDM_GCC_845
@@ -248,6 +266,14 @@ config QCS_TURING_404
Support for the Turing Clock Controller on QCS404, provides clocks
and resets for the Turing subsystem.
+config QCS_Q6SSTOP_404
+ tristate "QCS404 Q6SSTOP Clock Controller"
+ select QCS_GCC_404
+ help
+ Support for the Q6SSTOP clock controller on QCS404 devices.
+ Say Y if you want to use the Q6SSTOP branch clocks of the WCSS clock
+ controller to reset the Q6SSTOP subsystem.
+
config SDM_GCC_845
tristate "SDM845 Global Clock Controller"
select QCOM_GDSC
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 4a813b4055d0..d899661d0f44 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8998) += gcc-msm8998.o
+obj-$(CONFIG_MSM_GPUCC_8998) += gpucc-msm8998.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
@@ -42,7 +43,9 @@ obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
+obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
+obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index c25b57c3cbc8..78358b81d249 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -168,7 +168,7 @@ struct clk_rcg_dfs_data {
};
#define DEFINE_RCG_DFS(r) \
- { .rcg = &r##_src, .init = &r##_init }
+ { .rcg = &r, .init = &r##_init }
extern int qcom_cc_register_rcg_dfs(struct regmap *regmap,
const struct clk_rcg_dfs_data *rcgs,
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b98b81ef43a1..8f4b9bec2956 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -206,7 +206,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
break;
default:
return -EINVAL;
- };
+ }
if (!f)
return -EINVAL;
@@ -220,6 +220,8 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
if (clk_flags & CLK_SET_RATE_PARENT) {
rate = f->freq;
if (f->pre_div) {
+ if (!rate)
+ rate = req->rate;
rate /= 2;
rate *= f->pre_div + 1;
}
@@ -319,7 +321,7 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
break;
default:
return -EINVAL;
- };
+ }
if (!f)
return -EINVAL;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 96a36f6ff667..2dbbe47e8d4f 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -334,13 +334,14 @@ static const struct clk_ops clk_rpmh_bcm_ops = {
.recalc_rate = clk_rpmh_bcm_recalc_rate,
};
-/* Resource name must match resource id present in cmd-db. */
+/* Resource name must match resource id present in cmd-db */
DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+DEFINE_CLK_RPMH_VRM(sm8150, rf_clk3, rf_clk3_ao, "rfclka3", 1);
DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0");
static struct clk_hw *sdm845_rpmh_clocks[] = {
@@ -364,26 +365,19 @@ static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
.num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
};
-DEFINE_CLK_RPMH_ARC(sm8150, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
-DEFINE_CLK_RPMH_VRM(sm8150, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
-DEFINE_CLK_RPMH_VRM(sm8150, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
-DEFINE_CLK_RPMH_VRM(sm8150, rf_clk1, rf_clk1_ao, "rfclka1", 1);
-DEFINE_CLK_RPMH_VRM(sm8150, rf_clk2, rf_clk2_ao, "rfclka2", 1);
-DEFINE_CLK_RPMH_VRM(sm8150, rf_clk3, rf_clk3_ao, "rfclka3", 1);
-
static struct clk_hw *sm8150_rpmh_clocks[] = {
- [RPMH_CXO_CLK] = &sm8150_bi_tcxo.hw,
- [RPMH_CXO_CLK_A] = &sm8150_bi_tcxo_ao.hw,
- [RPMH_LN_BB_CLK2] = &sm8150_ln_bb_clk2.hw,
- [RPMH_LN_BB_CLK2_A] = &sm8150_ln_bb_clk2_ao.hw,
- [RPMH_LN_BB_CLK3] = &sm8150_ln_bb_clk3.hw,
- [RPMH_LN_BB_CLK3_A] = &sm8150_ln_bb_clk3_ao.hw,
- [RPMH_RF_CLK1] = &sm8150_rf_clk1.hw,
- [RPMH_RF_CLK1_A] = &sm8150_rf_clk1_ao.hw,
- [RPMH_RF_CLK2] = &sm8150_rf_clk2.hw,
- [RPMH_RF_CLK2_A] = &sm8150_rf_clk2_ao.hw,
- [RPMH_RF_CLK3] = &sm8150_rf_clk3.hw,
- [RPMH_RF_CLK3_A] = &sm8150_rf_clk3_ao.hw,
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sm8150 = {
@@ -391,6 +385,24 @@ static const struct clk_rpmh_desc clk_rpmh_sm8150 = {
.num_clks = ARRAY_SIZE(sm8150_rpmh_clocks),
};
+static struct clk_hw *sc7180_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
+ .clks = sc7180_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sc7180_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -471,6 +483,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
+ { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index fef5e8157061..930fa4a4c52a 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -648,6 +648,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
};
/* msm8998 */
+DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
@@ -670,6 +671,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
static struct clk_smd_rpm *msm8998_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
[RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
[RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk,
[RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk,
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 28ddc747d703..60d2a78d1395 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -29,6 +29,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
if (!f)
return NULL;
+ if (!f->freq)
+ return f;
+
for (; f->freq; f++)
if (rate <= f->freq)
return f;
@@ -218,7 +221,7 @@ static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
return ERR_PTR(-EINVAL);
}
- return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT);
+ return cc->rclks[idx] ? &cc->rclks[idx]->hw : NULL;
}
int qcom_cc_really_probe(struct platform_device *pdev,
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 091acd59c1d6..cf31b5d03270 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1266,6 +1266,72 @@ static struct clk_branch gcc_bimc_mss_q6_axi_clk = {
},
};
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mnoc_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk_src",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_ahb_clk = {
.halt_reg = 0x17004,
.halt_check = BRANCH_HALT_VOTED,
@@ -2832,6 +2898,11 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[GCC_USB3_CLKREF_CLK] = &gcc_usb3_clkref_clk.clkr,
[GCC_PCIE_CLKREF_CLK] = &gcc_pcie_clkref_clk.clkr,
[GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr,
};
static struct gdsc *gcc_msm8998_gdscs[] = {
@@ -2928,6 +2999,7 @@ static const struct qcom_reset_map gcc_msm8998_resets[] = {
[GCC_GPU_BCR] = { 0x71000 },
[GCC_SPSS_BCR] = { 0x72000 },
[GCC_OBT_ODT_BCR] = { 0x73000 },
+ [GCC_MSS_RESTART] = { 0x79000 },
[GCC_VS_BCR] = { 0x7a000 },
[GCC_MSS_VS_RESET] = { 0x7a100 },
[GCC_GPU_VS_RESET] = { 0x7a104 },
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
new file mode 100644
index 000000000000..38424e63bcae
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -0,0 +1,2450 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL1_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_pll0_main_div_cdiv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pll0_main_div_cdiv",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x01000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x13000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x27000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao", .name = "bi_tcxo_ao" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL1_OUT_MAIN, 4 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll1.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_qspi_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17164,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17294,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173c4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174f4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x12028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x12010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_prim_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_usb3_prim_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x8201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_hf_axi_clk = {
+ .halt_reg = 0xb080,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_throttle_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x502c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the AHB clock needs to be left enabled */
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x4452c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4452c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_hf_axi_clk = {
+ .halt_reg = 0xb084,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_throttle_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_axi_clk = {
+ .halt_reg = 0x73008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x73008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_dma_cfg_ahb_clk = {
+ .halt_reg = 0x73018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x73018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_dma_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_dsp_cfg_ahb_clk = {
+ .halt_reg = 0x7301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_dsp_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_cfg_ahb_clk = {
+ .halt_reg = 0x4d004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_dma_clk = {
+ .halt_reg = 0x4d1a0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4d1a0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4d1a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_dma_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qspi_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x17290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x173c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17620,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x12008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x1200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the SYS NOC clock needs to be left enabled */
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x4144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77038,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x77094,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77094,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7708c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7708c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_throttle_axi_clk = {
+ .halt_reg = 0xb07c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb07c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_throttle_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x0f004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON | VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON | VOTABLE,
+};
+
+static struct gdsc *gcc_sc7180_gdscs[] = {
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
+
+static struct clk_hw *gcc_sc7180_hws[] = {
+ [GCC_GPLL0_MAIN_DIV_CDIV] = &gcc_pll0_main_div_cdiv.hw,
+};
+
+static struct clk_regmap *gcc_sc7180_clocks[] = {
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_THROTTLE_HF_AXI_CLK] = &gcc_disp_throttle_hf_axi_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+ [GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr,
+ [GCC_NPU_BWMON_DMA_CFG_AHB_CLK] = &gcc_npu_bwmon_dma_cfg_ahb_clk.clkr,
+ [GCC_NPU_BWMON_DSP_CFG_AHB_CLK] = &gcc_npu_bwmon_dsp_cfg_ahb_clk.clkr,
+ [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+ [GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr,
+ [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_GPLL0_DIV_CLK_SRC] = &gcc_video_gpll0_div_clk_src.clkr,
+ [GCC_VIDEO_THROTTLE_AXI_CLK] = &gcc_video_throttle_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL1] = &gpll1.clkr,
+};
+
+static const struct qcom_reset_map gcc_sc7180_resets[] = {
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x26000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x26004 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+};
+
+static struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static const struct regmap_config gcc_sc7180_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x18208c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sc7180_desc = {
+ .config = &gcc_sc7180_regmap_config,
+ .clk_hws = gcc_sc7180_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_sc7180_hws),
+ .clks = gcc_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sc7180_clocks),
+ .resets = gcc_sc7180_resets,
+ .num_resets = ARRAY_SIZE(gcc_sc7180_resets),
+ .gdscs = gcc_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sc7180_gdscs),
+};
+
+static const struct of_device_id gcc_sc7180_match_table[] = {
+ { .compatible = "qcom,gcc-sc7180" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sc7180_match_table);
+
+static int gcc_sc7180_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sc7180_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Disable the GPLL0 active input to MM blocks, NPU
+ * and GPU via MISC registers.
+ */
+ regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /*
+ * Keep the clocks always-ON
+ * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
+ * GCC_GPU_CFG_AHB_CLK
+ */
+ regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_sc7180_desc, regmap);
+}
+
+static struct platform_driver gcc_sc7180_driver = {
+ .probe = gcc_sc7180_probe,
+ .driver = {
+ .name = "gcc-sc7180",
+ .of_match_table = gcc_sc7180_match_table,
+ },
+};
+
+static int __init gcc_sc7180_init(void)
+{
+ return platform_driver_register(&gcc_sc7180_driver);
+}
+core_initcall(gcc_sc7180_init);
+
+static void __exit gcc_sc7180_exit(void)
+{
+ platform_driver_unregister(&gcc_sc7180_driver);
+}
+module_exit(gcc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI GCC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 95be125c3bdd..d2142fe46a8e 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -408,7 +408,7 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
{ }
};
-static struct clk_init_data gcc_qupv3_wrap0_s0_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.name = "gcc_qupv3_wrap0_s0_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -421,10 +421,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s1_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.name = "gcc_qupv3_wrap0_s1_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -437,10 +437,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s2_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.name = "gcc_qupv3_wrap0_s2_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -453,10 +453,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s3_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.name = "gcc_qupv3_wrap0_s3_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -469,10 +469,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s4_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.name = "gcc_qupv3_wrap0_s4_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -485,10 +485,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s5_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.name = "gcc_qupv3_wrap0_s5_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -501,10 +501,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s6_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.name = "gcc_qupv3_wrap0_s6_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -517,10 +517,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s7_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.name = "gcc_qupv3_wrap0_s7_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -533,10 +533,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s0_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.name = "gcc_qupv3_wrap1_s0_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -549,10 +549,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s1_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.name = "gcc_qupv3_wrap1_s1_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -565,10 +565,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s2_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.name = "gcc_qupv3_wrap1_s2_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -581,10 +581,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s3_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.name = "gcc_qupv3_wrap1_s3_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -597,10 +597,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s4_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.name = "gcc_qupv3_wrap1_s4_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -613,10 +613,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s5_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.name = "gcc_qupv3_wrap1_s5_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -629,10 +629,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s6_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.name = "gcc_qupv3_wrap1_s6_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -645,10 +645,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s7_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.name = "gcc_qupv3_wrap1_s7_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -661,7 +661,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
};
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
@@ -3577,22 +3577,22 @@ static const struct of_device_id gcc_sdm845_match_table[] = {
MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
};
static int gcc_sdm845_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
new file mode 100644
index 000000000000..e5e2492b20c5
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Jeffrey Hugo
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gpucc-msm8998.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPUPLL0_OUT_EVEN,
+};
+
+/* Instead of going directly to the block, XO is routed through this branch */
+static struct clk_branch gpucc_cxo_clk = {
+ .halt_reg = 0x1020,
+ .clkr = {
+ .enable_reg = 0x1020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cxo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll gpupll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpupll0",
+ .parent_hws = (const struct clk_hw *[]){ &gpucc_cxo_clk.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpupll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpupll0_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &gpupll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct parent_map gpu_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+};
+
+static const struct clk_parent_data gpu_xo_gpll0[] = {
+ { .hw = &gpucc_cxo_clk.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+};
+
+static const struct parent_map gpu_xo_gpupll0_map[] = {
+ { P_XO, 0 },
+ { P_GPUPLL0_OUT_EVEN, 1 },
+};
+
+static const struct clk_parent_data gpu_xo_gpupll0[] = {
+ { .hw = &gpucc_cxo_clk.clkr.hw },
+ { .hw = &gpupll0_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_rbcpr_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_clk_src = {
+ .cmd_rcgr = 0x1030,
+ .hid_width = 5,
+ .parent_map = gpu_xo_gpll0_map,
+ .freq_tbl = ftbl_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_clk_src",
+ .parent_data = gpu_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ { .src = P_GPUPLL0_OUT_EVEN, .pre_div = 3 },
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x1070,
+ .hid_width = 5,
+ .parent_map = gpu_xo_gpupll0_map,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_data = gpu_xo_gpupll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ },
+};
+
+static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+ .cmd_rcgr = 0x10b0,
+ .hid_width = 5,
+ .parent_map = gpu_xo_gpll0_map,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk_src",
+ .parent_data = gpu_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gfx3d_isense_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_isense_clk_src = {
+ .cmd_rcgr = 0x1100,
+ .hid_width = 5,
+ .parent_map = gpu_xo_gpll0_map,
+ .freq_tbl = ftbl_gfx3d_isense_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_isense_clk_src",
+ .parent_data = gpu_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch rbcpr_clk = {
+ .halt_reg = 0x1054,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "rbcpr_clk",
+ .parent_hws = (const struct clk_hw *[]){ &rbcpr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_clk = {
+ .halt_reg = 0x1098,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk",
+ .parent_hws = (const struct clk_hw *[]){ &gfx3d_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch rbbmtimer_clk = {
+ .halt_reg = 0x10d0,
+ .clkr = {
+ .enable_reg = 0x10d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &rbbmtimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_isense_clk = {
+ .halt_reg = 0x1124,
+ .clkr = {
+ .enable_reg = 0x1124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_isense_clk",
+ .parent_hws = (const struct clk_hw *[]){ &gfx3d_isense_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x1004,
+ .pd = {
+ .name = "gpu_cx",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x1094,
+ .clamp_io_ctrl = 0x130,
+ .pd = {
+ .name = "gpu_gx",
+ },
+ .parent = &gpu_cx_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET,
+};
+
+static struct clk_regmap *gpucc_msm8998_clocks[] = {
+ [GPUPLL0] = &gpupll0.clkr,
+ [GPUPLL0_OUT_EVEN] = &gpupll0_out_even.clkr,
+ [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+ [GFX3D_ISENSE_CLK_SRC] = &gfx3d_isense_clk_src.clkr,
+ [RBCPR_CLK] = &rbcpr_clk.clkr,
+ [GFX3D_CLK] = &gfx3d_clk.clkr,
+ [RBBMTIMER_CLK] = &rbbmtimer_clk.clkr,
+ [GFX3D_ISENSE_CLK] = &gfx3d_isense_clk.clkr,
+ [GPUCC_CXO_CLK] = &gpucc_cxo_clk.clkr,
+};
+
+static struct gdsc *gpucc_msm8998_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpucc_msm8998_resets[] = {
+ [GPU_CX_BCR] = { 0x1000 },
+ [RBCPR_BCR] = { 0x1050 },
+ [GPU_GX_BCR] = { 0x1090 },
+ [GPU_ISENSE_BCR] = { 0x1120 },
+};
+
+static const struct regmap_config gpucc_msm8998_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpucc_msm8998_desc = {
+ .config = &gpucc_msm8998_regmap_config,
+ .clks = gpucc_msm8998_clocks,
+ .num_clks = ARRAY_SIZE(gpucc_msm8998_clocks),
+ .resets = gpucc_msm8998_resets,
+ .num_resets = ARRAY_SIZE(gpucc_msm8998_resets),
+ .gdscs = gpucc_msm8998_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpucc_msm8998_gdscs),
+};
+
+static const struct of_device_id gpucc_msm8998_match_table[] = {
+ { .compatible = "qcom,msm8998-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpucc_msm8998_match_table);
+
+static int gpucc_msm8998_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpucc_msm8998_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* force periph logic on to avoid perf counter corruption */
+ regmap_write_bits(regmap, gfx3d_clk.clkr.enable_reg, BIT(13), BIT(13));
+ /* tweak droop detector (GPUCC_GPU_DD_WRAP_CTRL) to reduce leakage */
+ regmap_write_bits(regmap, gfx3d_clk.clkr.enable_reg, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gpucc_msm8998_desc, regmap);
+}
+
+static struct platform_driver gpucc_msm8998_driver = {
+ .probe = gpucc_msm8998_probe,
+ .driver = {
+ .name = "gpucc-msm8998",
+ .of_match_table = gpucc_msm8998_match_table,
+ },
+};
+module_platform_driver(gpucc_msm8998_driver);
+
+MODULE_DESCRIPTION("QCOM GPUCC MSM8998 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/q6sstop-qcs404.c b/drivers/clk/qcom/q6sstop-qcs404.c
new file mode 100644
index 000000000000..723f932fbf7d
--- /dev/null
+++ b/drivers/clk/qcom/q6sstop-qcs404.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,q6sstopcc-qcs404.h>
+
+#include "clk-regmap.h"
+#include "clk-branch.h"
+#include "common.h"
+#include "reset.h"
+
+static struct clk_branch lcc_ahbfabric_cbc_clk = {
+ .halt_reg = 0x1b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_ahbfabric_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_ahbs_cbc_clk = {
+ .halt_reg = 0x22000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x22000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_ahbs_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_tcm_slave_cbc_clk = {
+ .halt_reg = 0x1c000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x1c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_tcm_slave_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_ahbm_cbc_clk = {
+ .halt_reg = 0x22004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x22004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_ahbm_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_axim_cbc_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_axim_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_bcr_sleep_clk = {
+ .halt_reg = 0x6004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x6004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_bcr_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* TCSR clock */
+static struct clk_branch tcsr_lcc_csr_cbcr_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_lcc_csr_cbcr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct regmap_config q6sstop_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static struct clk_regmap *q6sstop_qcs404_clocks[] = {
+ [LCC_AHBFABRIC_CBC_CLK] = &lcc_ahbfabric_cbc_clk.clkr,
+ [LCC_Q6SS_AHBS_CBC_CLK] = &lcc_q6ss_ahbs_cbc_clk.clkr,
+ [LCC_Q6SS_TCM_SLAVE_CBC_CLK] = &lcc_q6ss_tcm_slave_cbc_clk.clkr,
+ [LCC_Q6SS_AHBM_CBC_CLK] = &lcc_q6ss_ahbm_cbc_clk.clkr,
+ [LCC_Q6SS_AXIM_CBC_CLK] = &lcc_q6ss_axim_cbc_clk.clkr,
+ [LCC_Q6SS_BCR_SLEEP_CLK] = &lcc_q6ss_bcr_sleep_clk.clkr,
+};
+
+static const struct qcom_reset_map q6sstop_qcs404_resets[] = {
+ [Q6SSTOP_BCR_RESET] = { 0x6000 },
+};
+
+static const struct qcom_cc_desc q6sstop_qcs404_desc = {
+ .config = &q6sstop_regmap_config,
+ .clks = q6sstop_qcs404_clocks,
+ .num_clks = ARRAY_SIZE(q6sstop_qcs404_clocks),
+ .resets = q6sstop_qcs404_resets,
+ .num_resets = ARRAY_SIZE(q6sstop_qcs404_resets),
+};
+
+static struct clk_regmap *tcsr_qcs404_clocks[] = {
+ [TCSR_Q6SS_LCC_CBCR_CLK] = &tcsr_lcc_csr_cbcr_clk.clkr,
+};
+
+static const struct qcom_cc_desc tcsr_qcs404_desc = {
+ .config = &q6sstop_regmap_config,
+ .clks = tcsr_qcs404_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_qcs404_clocks),
+};
+
+static const struct of_device_id q6sstopcc_qcs404_match_table[] = {
+ { .compatible = "qcom,qcs404-q6sstopcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, q6sstopcc_qcs404_match_table);
+
+static int q6sstopcc_qcs404_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ goto destroy_pm_clk;
+ }
+
+ q6sstop_regmap_config.name = "q6sstop_tcsr";
+ desc = &tcsr_qcs404_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ q6sstop_regmap_config.name = "q6sstop_cc";
+ desc = &q6sstop_qcs404_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int q6sstopcc_qcs404_remove(struct platform_device *pdev)
+{
+ pm_clk_destroy(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops q6sstopcc_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver q6sstopcc_qcs404_driver = {
+ .probe = q6sstopcc_qcs404_probe,
+ .remove = q6sstopcc_qcs404_remove,
+ .driver = {
+ .name = "qcs404-q6sstopcc",
+ .of_match_table = q6sstopcc_qcs404_match_table,
+ .pm = &q6sstopcc_pm_ops,
+ },
+};
+
+module_platform_driver(q6sstopcc_qcs404_driver);
+
+MODULE_DESCRIPTION("QTI QCS404 Q6SSTOP Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index b879e3e3a6b4..4cd846bc98cc 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -12,6 +12,7 @@ config CLK_RENESAS
select CLK_R8A7745 if ARCH_R8A7745
select CLK_R8A77470 if ARCH_R8A77470
select CLK_R8A774A1 if ARCH_R8A774A1
+ select CLK_R8A774B1 if ARCH_R8A774B1
select CLK_R8A774C0 if ARCH_R8A774C0
select CLK_R8A7778 if ARCH_R8A7778
select CLK_R8A7779 if ARCH_R8A7779
@@ -20,7 +21,8 @@ config CLK_RENESAS
select CLK_R8A7792 if ARCH_R8A7792
select CLK_R8A7794 if ARCH_R8A7794
select CLK_R8A7795 if ARCH_R8A7795
- select CLK_R8A7796 if ARCH_R8A7796
+ select CLK_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select CLK_R8A77961 if ARCH_R8A77961
select CLK_R8A77965 if ARCH_R8A77965
select CLK_R8A77970 if ARCH_R8A77970
select CLK_R8A77980 if ARCH_R8A77980
@@ -31,17 +33,6 @@ config CLK_RENESAS
if CLK_RENESAS
-config CLK_RENESAS_LEGACY
- bool "Legacy DT clock support"
- depends on CLK_R8A7790 || CLK_R8A7791 || CLK_R8A7792 || CLK_R8A7794
- help
- Enable backward compatibility with old device trees describing a
- hierarchical representation of the various CPG and MSTP clocks.
-
- Say Y if you want your kernel to work with old DTBs.
- It is safe to say N if you use the DTS that is supplied with the
- current kernel source tree.
-
# SoC
config CLK_EMEV2
bool "Emma Mobile EV2 clock support" if COMPILE_TEST
@@ -80,6 +71,10 @@ config CLK_R8A774A1
bool "RZ/G2M clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A774B1
+ bool "RZ/G2N clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A774C0
bool "RZ/G2E clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
@@ -94,24 +89,20 @@ config CLK_R8A7779
config CLK_R8A7790
bool "R-Car H2 clock support" if COMPILE_TEST
- select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
select CLK_RENESAS_DIV6
config CLK_R8A7791
bool "R-Car M2-W/N clock support" if COMPILE_TEST
- select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
select CLK_RENESAS_DIV6
config CLK_R8A7792
bool "R-Car V2H clock support" if COMPILE_TEST
- select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
config CLK_R8A7794
bool "R-Car E2 clock support" if COMPILE_TEST
- select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
select CLK_RENESAS_DIV6
@@ -119,10 +110,14 @@ config CLK_R8A7795
bool "R-Car H3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
-config CLK_R8A7796
+config CLK_R8A77960
bool "R-Car M3-W clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A77961
+ bool "R-Car M3-W+ clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A77965
bool "R-Car M3-N clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
@@ -155,11 +150,6 @@ config CLK_SH73A0
# Family
-config CLK_RCAR_GEN2
- bool "R-Car Gen2 legacy clock support" if COMPILE_TEST
- select CLK_RENESAS_CPG_MSTP
- select CLK_RENESAS_DIV6
-
config CLK_RCAR_GEN2_CPG
bool "R-Car Gen2 CPG clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSSR
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index c793e3cc9452..4a722bc5aac7 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CLK_R8A7743) += r8a7743-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7745) += r8a7745-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77470) += r8a77470-cpg-mssr.o
obj-$(CONFIG_CLK_R8A774A1) += r8a774a1-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A774B1) += r8a774b1-cpg-mssr.o
obj-$(CONFIG_CLK_R8A774C0) += r8a774c0-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7778) += clk-r8a7778.o
obj-$(CONFIG_CLK_R8A7779) += clk-r8a7779.o
@@ -17,7 +18,8 @@ obj-$(CONFIG_CLK_R8A7791) += r8a7791-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7794) += r8a7794-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7795) += r8a7795-cpg-mssr.o
-obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77960) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77961) += r8a7796-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77965) += r8a77965-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
@@ -27,7 +29,6 @@ obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
-obj-$(CONFIG_CLK_RCAR_GEN2) += clk-rcar-gen2.o
obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index e326e6dc09fc..003e9ce45757 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -189,10 +189,8 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
unsigned int i;
group = kzalloc(struct_size(group, clks, MSTP_MAX_CLOCKS), GFP_KERNEL);
- if (group == NULL) {
- kfree(group);
+ if (!group)
return;
- }
clks = group->clks;
spin_lock_init(&group->lock);
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
deleted file mode 100644
index da9fe3f032eb..000000000000
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ /dev/null
@@ -1,457 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * rcar_gen2 Core CPG Clocks
- *
- * Copyright (C) 2013 Ideas On Board SPRL
- *
- * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/renesas.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/math64.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/soc/renesas/rcar-rst.h>
-
-struct rcar_gen2_cpg {
- struct clk_onecell_data data;
- spinlock_t lock;
- void __iomem *reg;
-};
-
-#define CPG_FRQCRB 0x00000004
-#define CPG_FRQCRB_KICK BIT(31)
-#define CPG_SDCKCR 0x00000074
-#define CPG_PLL0CR 0x000000d8
-#define CPG_FRQCRC 0x000000e0
-#define CPG_FRQCRC_ZFC_MASK (0x1f << 8)
-#define CPG_FRQCRC_ZFC_SHIFT 8
-#define CPG_ADSPCKCR 0x0000025c
-#define CPG_RCANCKCR 0x00000270
-
-/* -----------------------------------------------------------------------------
- * Z Clock
- *
- * Traits of this clock:
- * prepare - clk_prepare only ensures that parents are prepared
- * enable - clk_enable only ensures that parents are enabled
- * rate - rate is adjustable. clk->rate = parent->rate * mult / 32
- * parent - fixed parent. No clk_set_parent support
- */
-
-struct cpg_z_clk {
- struct clk_hw hw;
- void __iomem *reg;
- void __iomem *kick_reg;
-};
-
-#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
-
-static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- unsigned int val;
-
- val = (readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK) >> CPG_FRQCRC_ZFC_SHIFT;
- mult = 32 - val;
-
- return div_u64((u64)parent_rate * mult, 32);
-}
-
-static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- unsigned long prate = *parent_rate;
- unsigned int mult;
-
- if (!prate)
- prate = 1;
-
- mult = div_u64((u64)rate * 32, prate);
- mult = clamp(mult, 1U, 32U);
-
- return *parent_rate / 32 * mult;
-}
-
-static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val, kick;
- unsigned int i;
-
- mult = div_u64((u64)rate * 32, parent_rate);
- mult = clamp(mult, 1U, 32U);
-
- if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
- return -EBUSY;
-
- val = readl(zclk->reg);
- val &= ~CPG_FRQCRC_ZFC_MASK;
- val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
- writel(val, zclk->reg);
-
- /*
- * Set KICK bit in FRQCRB to update hardware setting and wait for
- * clock change completion.
- */
- kick = readl(zclk->kick_reg);
- kick |= CPG_FRQCRB_KICK;
- writel(kick, zclk->kick_reg);
-
- /*
- * Note: There is no HW information about the worst case latency.
- *
- * Using experimental measurements, it seems that no more than
- * ~10 iterations are needed, independently of the CPU rate.
- * Since this value might be dependent on external xtal rate, pll1
- * rate or even the other emulation clocks rate, use 1000 as a
- * "super" safe value.
- */
- for (i = 1000; i; i--) {
- if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
- return 0;
-
- cpu_relax();
- }
-
- return -ETIMEDOUT;
-}
-
-static const struct clk_ops cpg_z_clk_ops = {
- .recalc_rate = cpg_z_clk_recalc_rate,
- .round_rate = cpg_z_clk_round_rate,
- .set_rate = cpg_z_clk_set_rate,
-};
-
-static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
-{
- static const char *parent_name = "pll0";
- struct clk_init_data init;
- struct cpg_z_clk *zclk;
- struct clk *clk;
-
- zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
- if (!zclk)
- return ERR_PTR(-ENOMEM);
-
- init.name = "z";
- init.ops = &cpg_z_clk_ops;
- init.flags = 0;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- zclk->reg = cpg->reg + CPG_FRQCRC;
- zclk->kick_reg = cpg->reg + CPG_FRQCRB;
- zclk->hw.init = &init;
-
- clk = clk_register(NULL, &zclk->hw);
- if (IS_ERR(clk))
- kfree(zclk);
-
- return clk;
-}
-
-static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg,
- struct device_node *np)
-{
- const char *parent_name = of_clk_get_parent_name(np, 1);
- struct clk_fixed_factor *fixed;
- struct clk_gate *gate;
- struct clk *clk;
-
- fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
- if (!fixed)
- return ERR_PTR(-ENOMEM);
-
- fixed->mult = 1;
- fixed->div = 6;
-
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate) {
- kfree(fixed);
- return ERR_PTR(-ENOMEM);
- }
-
- gate->reg = cpg->reg + CPG_RCANCKCR;
- gate->bit_idx = 8;
- gate->flags = CLK_GATE_SET_TO_DISABLE;
- gate->lock = &cpg->lock;
-
- clk = clk_register_composite(NULL, "rcan", &parent_name, 1, NULL, NULL,
- &fixed->hw, &clk_fixed_factor_ops,
- &gate->hw, &clk_gate_ops, 0);
- if (IS_ERR(clk)) {
- kfree(gate);
- kfree(fixed);
- }
-
- return clk;
-}
-
-/* ADSP divisors */
-static const struct clk_div_table cpg_adsp_div_table[] = {
- { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 },
- { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
- { 10, 36 }, { 11, 48 }, { 0, 0 },
-};
-
-static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg)
-{
- const char *parent_name = "pll1";
- struct clk_divider *div;
- struct clk_gate *gate;
- struct clk *clk;
-
- div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div)
- return ERR_PTR(-ENOMEM);
-
- div->reg = cpg->reg + CPG_ADSPCKCR;
- div->width = 4;
- div->table = cpg_adsp_div_table;
- div->lock = &cpg->lock;
-
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate) {
- kfree(div);
- return ERR_PTR(-ENOMEM);
- }
-
- gate->reg = cpg->reg + CPG_ADSPCKCR;
- gate->bit_idx = 8;
- gate->flags = CLK_GATE_SET_TO_DISABLE;
- gate->lock = &cpg->lock;
-
- clk = clk_register_composite(NULL, "adsp", &parent_name, 1, NULL, NULL,
- &div->hw, &clk_divider_ops,
- &gate->hw, &clk_gate_ops, 0);
- if (IS_ERR(clk)) {
- kfree(gate);
- kfree(div);
- }
-
- return clk;
-}
-
-/* -----------------------------------------------------------------------------
- * CPG Clock Data
- */
-
-/*
- * MD EXTAL PLL0 PLL1 PLL3
- * 14 13 19 (MHz) *1 *1
- *---------------------------------------------------
- * 0 0 0 15 x 1 x172/2 x208/2 x106
- * 0 0 1 15 x 1 x172/2 x208/2 x88
- * 0 1 0 20 x 1 x130/2 x156/2 x80
- * 0 1 1 20 x 1 x130/2 x156/2 x66
- * 1 0 0 26 / 2 x200/2 x240/2 x122
- * 1 0 1 26 / 2 x200/2 x240/2 x102
- * 1 1 0 30 / 2 x172/2 x208/2 x106
- * 1 1 1 30 / 2 x172/2 x208/2 x88
- *
- * *1 : Table 7.6 indicates VCO output (PLLx = VCO/2)
- */
-#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
- (((md) & BIT(13)) >> 12) | \
- (((md) & BIT(19)) >> 19))
-struct cpg_pll_config {
- unsigned int extal_div;
- unsigned int pll1_mult;
- unsigned int pll3_mult;
- unsigned int pll0_mult; /* For R-Car V2H and E2 only */
-};
-
-static const struct cpg_pll_config cpg_pll_configs[8] __initconst = {
- { 1, 208, 106, 200 }, { 1, 208, 88, 200 },
- { 1, 156, 80, 150 }, { 1, 156, 66, 150 },
- { 2, 240, 122, 230 }, { 2, 240, 102, 230 },
- { 2, 208, 106, 200 }, { 2, 208, 88, 200 },
-};
-
-/* SDHI divisors */
-static const struct clk_div_table cpg_sdh_div_table[] = {
- { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 },
- { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
- { 8, 24 }, { 10, 36 }, { 11, 48 }, { 0, 0 },
-};
-
-static const struct clk_div_table cpg_sd01_div_table[] = {
- { 4, 8 },
- { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
- { 10, 36 }, { 11, 48 }, { 12, 10 }, { 0, 0 },
-};
-
-/* -----------------------------------------------------------------------------
- * Initialization
- */
-
-static u32 cpg_mode __initdata;
-
-static const char * const pll0_mult_match[] = {
- "renesas,r8a7792-cpg-clocks",
- "renesas,r8a7794-cpg-clocks",
- NULL
-};
-
-static struct clk * __init
-rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
- const struct cpg_pll_config *config,
- const char *name)
-{
- const struct clk_div_table *table = NULL;
- const char *parent_name;
- unsigned int shift;
- unsigned int mult = 1;
- unsigned int div = 1;
-
- if (!strcmp(name, "main")) {
- parent_name = of_clk_get_parent_name(np, 0);
- div = config->extal_div;
- } else if (!strcmp(name, "pll0")) {
- /* PLL0 is a configurable multiplier clock. Register it as a
- * fixed factor clock for now as there's no generic multiplier
- * clock implementation and we currently have no need to change
- * the multiplier value.
- */
- if (of_device_compatible_match(np, pll0_mult_match)) {
- /* R-Car V2H and E2 do not have PLL0CR */
- mult = config->pll0_mult;
- div = 3;
- } else {
- u32 value = readl(cpg->reg + CPG_PLL0CR);
- mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
- }
- parent_name = "main";
- } else if (!strcmp(name, "pll1")) {
- parent_name = "main";
- mult = config->pll1_mult / 2;
- } else if (!strcmp(name, "pll3")) {
- parent_name = "main";
- mult = config->pll3_mult;
- } else if (!strcmp(name, "lb")) {
- parent_name = "pll1";
- div = cpg_mode & BIT(18) ? 36 : 24;
- } else if (!strcmp(name, "qspi")) {
- parent_name = "pll1_div2";
- div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
- ? 8 : 10;
- } else if (!strcmp(name, "sdh")) {
- parent_name = "pll1";
- table = cpg_sdh_div_table;
- shift = 8;
- } else if (!strcmp(name, "sd0")) {
- parent_name = "pll1";
- table = cpg_sd01_div_table;
- shift = 4;
- } else if (!strcmp(name, "sd1")) {
- parent_name = "pll1";
- table = cpg_sd01_div_table;
- shift = 0;
- } else if (!strcmp(name, "z")) {
- return cpg_z_clk_register(cpg);
- } else if (!strcmp(name, "rcan")) {
- return cpg_rcan_clk_register(cpg, np);
- } else if (!strcmp(name, "adsp")) {
- return cpg_adsp_clk_register(cpg);
- } else {
- return ERR_PTR(-EINVAL);
- }
-
- if (!table)
- return clk_register_fixed_factor(NULL, name, parent_name, 0,
- mult, div);
- else
- return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + CPG_SDCKCR, shift,
- 4, 0, table, &cpg->lock);
-}
-
-/*
- * Reset register definitions.
- */
-#define MODEMR 0xe6160060
-
-static u32 __init rcar_gen2_read_mode_pins(void)
-{
- void __iomem *modemr = ioremap_nocache(MODEMR, 4);
- u32 mode;
-
- BUG_ON(!modemr);
- mode = ioread32(modemr);
- iounmap(modemr);
-
- return mode;
-}
-
-static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
-{
- const struct cpg_pll_config *config;
- struct rcar_gen2_cpg *cpg;
- struct clk **clks;
- unsigned int i;
- int num_clks;
-
- if (rcar_rst_read_mode_pins(&cpg_mode)) {
- /* Backward-compatibility with old DT */
- pr_warn("%pOF: failed to obtain mode pins from RST\n", np);
- cpg_mode = rcar_gen2_read_mode_pins();
- }
-
- num_clks = of_property_count_strings(np, "clock-output-names");
- if (num_clks < 0) {
- pr_err("%s: failed to count clocks\n", __func__);
- return;
- }
-
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
- clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
- if (cpg == NULL || clks == NULL) {
- /* We're leaking memory on purpose, there's no point in cleaning
- * up as the system won't boot anyway.
- */
- return;
- }
-
- spin_lock_init(&cpg->lock);
-
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
-
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
- return;
-
- config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
-
- for (i = 0; i < num_clks; ++i) {
- const char *name;
- struct clk *clk;
-
- of_property_read_string_index(np, "clock-output-names", i,
- &name);
-
- clk = rcar_gen2_cpg_register_clock(np, cpg, config, name);
- if (IS_ERR(clk))
- pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
- __func__, np, name, PTR_ERR(clk));
- else
- cpg->data.clks[i] = clk;
- }
-
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
-
- cpg_mstp_add_clk_domain(np);
-}
-CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks",
- rcar_gen2_cpg_clocks_init);
diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
new file mode 100644
index 000000000000..c9af70917312
--- /dev/null
+++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a774b1 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ *
+ * Based on r8a7796-cpg-mssr.c
+ *
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a774b1-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A774B1_CLK_CANFD,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL4,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_RINT,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+ DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+
+ DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
+
+ /* Core Clock Outputs */
+ DEF_GEN3_Z("z", R8A774B1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
+ DEF_FIXED("ztr", R8A774B1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A774B1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A774B1_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A774B1_CLK_ZX, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("s0d1", R8A774B1_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d2", R8A774B1_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A774B1_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A774B1_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6", R8A774B1_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d8", R8A774B1_CLK_S0D8, CLK_S0, 8, 1),
+ DEF_FIXED("s0d12", R8A774B1_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s1d2", R8A774B1_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A774B1_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A774B1_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A774B1_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A774B1_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A774B1_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A774B1_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A774B1_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_GEN3_SD("sd0", R8A774B1_CLK_SD0, CLK_SDSRC, 0x074),
+ DEF_GEN3_SD("sd1", R8A774B1_CLK_SD1, CLK_SDSRC, 0x078),
+ DEF_GEN3_SD("sd2", R8A774B1_CLK_SD2, CLK_SDSRC, 0x268),
+ DEF_GEN3_SD("sd3", R8A774B1_CLK_SD3, CLK_SDSRC, 0x26c),
+
+ DEF_FIXED("cl", R8A774B1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A774B1_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A774B1_CLK_CPEX, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("canfd", R8A774B1_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+ DEF_DIV6P1("csi0", R8A774B1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+ DEF_DIV6P1("mso", R8A774B1_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+ DEF_DIV6P1("hdmi", R8A774B1_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
+
+ DEF_GEN3_OSC("osc", R8A774B1_CLK_OSC, CLK_EXTAL, 8),
+
+ DEF_BASE("r", R8A774B1_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
+};
+
+static const struct mssr_mod_clk r8a774b1_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A774B1_CLK_S0D6),
+ DEF_MOD("tmu3", 122, R8A774B1_CLK_S3D2),
+ DEF_MOD("tmu2", 123, R8A774B1_CLK_S3D2),
+ DEF_MOD("tmu1", 124, R8A774B1_CLK_S3D2),
+ DEF_MOD("tmu0", 125, R8A774B1_CLK_CP),
+ DEF_MOD("fdp1-0", 119, R8A774B1_CLK_S0D1),
+ DEF_MOD("scif5", 202, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif4", 203, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif3", 204, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif1", 206, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif0", 207, R8A774B1_CLK_S3D4),
+ DEF_MOD("msiof3", 208, R8A774B1_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A774B1_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A774B1_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A774B1_CLK_MSO),
+ DEF_MOD("sys-dmac2", 217, R8A774B1_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A774B1_CLK_S3D1),
+ DEF_MOD("sys-dmac0", 219, R8A774B1_CLK_S0D3),
+ DEF_MOD("cmt3", 300, R8A774B1_CLK_R),
+ DEF_MOD("cmt2", 301, R8A774B1_CLK_R),
+ DEF_MOD("cmt1", 302, R8A774B1_CLK_R),
+ DEF_MOD("cmt0", 303, R8A774B1_CLK_R),
+ DEF_MOD("tpu0", 304, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif2", 310, R8A774B1_CLK_S3D4),
+ DEF_MOD("sdif3", 311, R8A774B1_CLK_SD3),
+ DEF_MOD("sdif2", 312, R8A774B1_CLK_SD2),
+ DEF_MOD("sdif1", 313, R8A774B1_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A774B1_CLK_SD0),
+ DEF_MOD("pcie1", 318, R8A774B1_CLK_S3D1),
+ DEF_MOD("pcie0", 319, R8A774B1_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A774B1_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A774B1_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A774B1_CLK_S3D1),
+ DEF_MOD("rwdt", 402, R8A774B1_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A774B1_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A774B1_CLK_S0D3),
+ DEF_MOD("audmac1", 501, R8A774B1_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A774B1_CLK_S1D2),
+ DEF_MOD("hscif4", 516, R8A774B1_CLK_S3D1),
+ DEF_MOD("hscif3", 517, R8A774B1_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A774B1_CLK_S3D1),
+ DEF_MOD("hscif1", 519, R8A774B1_CLK_S3D1),
+ DEF_MOD("hscif0", 520, R8A774B1_CLK_S3D1),
+ DEF_MOD("thermal", 522, R8A774B1_CLK_CP),
+ DEF_MOD("pwm", 523, R8A774B1_CLK_S0D12),
+ DEF_MOD("fcpvd1", 602, R8A774B1_CLK_S0D2),
+ DEF_MOD("fcpvd0", 603, R8A774B1_CLK_S0D2),
+ DEF_MOD("fcpvb0", 607, R8A774B1_CLK_S0D1),
+ DEF_MOD("fcpvi0", 611, R8A774B1_CLK_S0D1),
+ DEF_MOD("fcpf0", 615, R8A774B1_CLK_S0D1),
+ DEF_MOD("fcpcs", 619, R8A774B1_CLK_S0D2),
+ DEF_MOD("vspd1", 622, R8A774B1_CLK_S0D2),
+ DEF_MOD("vspd0", 623, R8A774B1_CLK_S0D2),
+ DEF_MOD("vspb", 626, R8A774B1_CLK_S0D1),
+ DEF_MOD("vspi0", 631, R8A774B1_CLK_S0D1),
+ DEF_MOD("ehci1", 702, R8A774B1_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A774B1_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A774B1_CLK_S3D2),
+ DEF_MOD("csi20", 714, R8A774B1_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A774B1_CLK_CSI0),
+ DEF_MOD("du3", 721, R8A774B1_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A774B1_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A774B1_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A774B1_CLK_S2D1),
+ DEF_MOD("hdmi0", 729, R8A774B1_CLK_HDMI),
+ DEF_MOD("vin7", 804, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin6", 805, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin5", 806, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin4", 807, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin3", 808, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin2", 809, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin1", 810, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin0", 811, R8A774B1_CLK_S0D2),
+ DEF_MOD("etheravb", 812, R8A774B1_CLK_S0D6),
+ DEF_MOD("sata0", 815, R8A774B1_CLK_S3D2),
+ DEF_MOD("gpio7", 905, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio6", 906, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A774B1_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A774B1_CLK_S3D2),
+ DEF_MOD("can-if1", 915, R8A774B1_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A774B1_CLK_S3D4),
+ DEF_MOD("i2c6", 918, R8A774B1_CLK_S0D6),
+ DEF_MOD("i2c5", 919, R8A774B1_CLK_S0D6),
+ DEF_MOD("i2c-dvfs", 926, R8A774B1_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A774B1_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A774B1_CLK_S0D6),
+ DEF_MOD("i2c2", 929, R8A774B1_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A774B1_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A774B1_CLK_S3D2),
+ DEF_MOD("ssi-all", 1005, R8A774B1_CLK_S3D4),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A774B1_CLK_S3D4),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a774b1_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3 PLL4 OSC
+ * 14 13 19 17 (MHz)
+ *-----------------------------------------------------------------
+ * 0 0 0 0 16.66 x 1 x180 x192 x192 x144 /16
+ * 0 0 0 1 16.66 x 1 x180 x192 x128 x144 /16
+ * 0 0 1 0 Prohibited setting
+ * 0 0 1 1 16.66 x 1 x180 x192 x192 x144 /16
+ * 0 1 0 0 20 x 1 x150 x160 x160 x120 /19
+ * 0 1 0 1 20 x 1 x150 x160 x106 x120 /19
+ * 0 1 1 0 Prohibited setting
+ * 0 1 1 1 20 x 1 x150 x160 x160 x120 /19
+ * 1 0 0 0 25 x 1 x120 x128 x128 x96 /24
+ * 1 0 0 1 25 x 1 x120 x128 x84 x96 /24
+ * 1 0 1 0 Prohibited setting
+ * 1 0 1 1 25 x 1 x120 x128 x128 x96 /24
+ * 1 1 0 0 33.33 / 2 x180 x192 x192 x144 /32
+ * 1 1 0 1 33.33 / 2 x180 x192 x128 x144 /32
+ * 1 1 1 0 Prohibited setting
+ * 1 1 1 1 33.33 / 2 x180 x192 x192 x144 /32
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
+ (((md) & BIT(13)) >> 11) | \
+ (((md) & BIT(19)) >> 18) | \
+ (((md) & BIT(17)) >> 17))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 192, 1, 128, 1, 16, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 160, 1, 106, 1, 19, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 128, 1, 128, 1, 24, },
+ { 1, 128, 1, 84, 1, 24, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, 24, },
+ { 2, 192, 1, 192, 1, 32, },
+ { 2, 192, 1, 128, 1, 32, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, 32, },
+};
+
+static int __init r8a774b1_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a774b1_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a774b1_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a774b1_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a774b1_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a774b1_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a774b1_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a774b1_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a774b1_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 90cc6a102602..e8420d3ada94 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * r8a7796 Clock Pulse Generator / Module Standby and Software Reset
+ * r8a7796 (R-Car M3-W/W+) Clock Pulse Generator / Module Standby and Software
+ * Reset
*
- * Copyright (C) 2016 Glider bvba
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2016-2019 Glider bvba
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
*
* Based on r8a7795-cpg-mssr.c
*
@@ -14,6 +15,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a7796-cpg-mssr.h>
@@ -116,7 +118,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_BASE("r", R8A7796_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
};
-static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
+static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("fdp1-0", 119, R8A7796_CLK_S0D1),
DEF_MOD("scif5", 202, R8A7796_CLK_S3D4),
DEF_MOD("scif4", 203, R8A7796_CLK_S3D4),
@@ -304,6 +306,14 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
{ 2, 192, 1, 192, 1, 32, },
};
+ /*
+ * Fixups for R-Car M3-W+
+ */
+
+static const unsigned int r8a77961_mod_nullify[] __initconst = {
+ MOD_CLK_ID(617), /* FCPCI0 */
+};
+
static int __init r8a7796_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
@@ -320,6 +330,12 @@ static int __init r8a7796_cpg_mssr_init(struct device *dev)
return -EINVAL;
}
+ if (of_device_is_compatible(dev->of_node, "renesas,r8a77961-cpg-mssr"))
+ mssr_mod_nullify(r8a7796_mod_clks,
+ ARRAY_SIZE(r8a7796_mod_clks),
+ r8a77961_mod_nullify,
+ ARRAY_SIZE(r8a77961_mod_nullify));
+
return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
}
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index b4e8c5b7d515..b3af4da2ca74 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -323,7 +323,7 @@ static int __init r8a77965_cpg_mssr_init(struct device *dev)
}
return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
-};
+}
const struct cpg_mssr_info r8a77965_cpg_mssr_info __initconst = {
/* Core Clocks */
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
index f596a2dafcf4..d4fa3dc3e2a2 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.c
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -63,19 +63,22 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
return div_u64((u64)parent_rate * mult, 32);
}
-static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cpg_z_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long prate = *parent_rate;
- unsigned int mult;
+ unsigned long prate = req->best_parent_rate;
+ unsigned int min_mult, max_mult, mult;
- if (!prate)
- prate = 1;
+ min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
+ max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
+ if (max_mult < min_mult)
+ return -EINVAL;
- mult = div_u64((u64)rate * 32, prate);
- mult = clamp(mult, 1U, 32U);
+ mult = div64_ul(req->rate * 32ULL, prate);
+ mult = clamp(mult, min_mult, max_mult);
- return *parent_rate / 32 * mult;
+ req->rate = div_u64((u64)prate * mult, 32);
+ return 0;
}
static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -86,7 +89,7 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
u32 val, kick;
unsigned int i;
- mult = div_u64((u64)rate * 32, parent_rate);
+ mult = div64_ul(rate * 32ULL, parent_rate);
mult = clamp(mult, 1U, 32U);
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
@@ -126,7 +129,7 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops cpg_z_clk_ops = {
.recalc_rate = cpg_z_clk_recalc_rate,
- .round_rate = cpg_z_clk_round_rate,
+ .determine_rate = cpg_z_clk_determine_rate,
.set_rate = cpg_z_clk_set_rate,
};
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index d25c8ba00a65..c97b647db9b6 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -114,18 +114,24 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
32 * zclk->fixed_div);
}
-static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cpg_z_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int min_mult, max_mult, mult;
unsigned long prate;
- unsigned int mult;
- prate = *parent_rate / zclk->fixed_div;
- mult = div_u64(rate * 32ULL, prate);
- mult = clamp(mult, 1U, 32U);
+ prate = req->best_parent_rate / zclk->fixed_div;
+ min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
+ max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
+ if (max_mult < min_mult)
+ return -EINVAL;
+
+ mult = div64_ul(req->rate * 32ULL, prate);
+ mult = clamp(mult, min_mult, max_mult);
- return (u64)prate * mult / 32;
+ req->rate = div_u64((u64)prate * mult, 32);
+ return 0;
}
static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -172,7 +178,7 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops cpg_z_clk_ops = {
.recalc_rate = cpg_z_clk_recalc_rate,
- .round_rate = cpg_z_clk_round_rate,
+ .determine_rate = cpg_z_clk_determine_rate,
.set_rate = cpg_z_clk_set_rate,
};
@@ -309,44 +315,44 @@ static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
clock->div_table[clock->cur_div_idx].div);
}
-static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
- unsigned long rate,
- unsigned long parent_rate)
+static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long calc_rate, diff, diff_min = ULONG_MAX;
- unsigned int i, best_div = 0;
+ unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned long calc_rate, diff;
+ unsigned int i;
for (i = 0; i < clock->div_num; i++) {
- calc_rate = DIV_ROUND_CLOSEST(parent_rate,
+ calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
clock->div_table[i].div);
- diff = calc_rate > rate ? calc_rate - rate : rate - calc_rate;
+ if (calc_rate < req->min_rate || calc_rate > req->max_rate)
+ continue;
+
+ diff = calc_rate > req->rate ? calc_rate - req->rate
+ : req->rate - calc_rate;
if (diff < diff_min) {
- best_div = clock->div_table[i].div;
+ best_rate = calc_rate;
diff_min = diff;
}
}
- return best_div;
-}
-
-static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate);
+ if (best_rate == ULONG_MAX)
+ return -EINVAL;
- return DIV_ROUND_CLOSEST(*parent_rate, div);
+ req->rate = best_rate;
+ return 0;
}
static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+ unsigned long parent_rate)
{
struct sd_clock *clock = to_sd_clock(hw);
- unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
unsigned int i;
for (i = 0; i < clock->div_num; i++)
- if (div == clock->div_table[i].div)
+ if (rate == DIV_ROUND_CLOSEST(parent_rate,
+ clock->div_table[i].div))
break;
if (i >= clock->div_num)
@@ -366,7 +372,7 @@ static const struct clk_ops cpg_sd_clock_ops = {
.disable = cpg_sd_clock_disable,
.is_enabled = cpg_sd_clock_is_enabled,
.recalc_rate = cpg_sd_clock_recalc_rate,
- .round_rate = cpg_sd_clock_round_rate,
+ .determine_rate = cpg_sd_clock_determine_rate,
.set_rate = cpg_sd_clock_set_rate,
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 132cc96895e3..a2663fbbd7a5 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -702,6 +702,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a774a1_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A774B1
+ {
+ .compatible = "renesas,r8a774b1-cpg-mssr",
+ .data = &r8a774b1_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A774C0
{
.compatible = "renesas,r8a774c0-cpg-mssr",
@@ -743,12 +749,18 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a7795_cpg_mssr_info,
},
#endif
-#ifdef CONFIG_CLK_R8A7796
+#ifdef CONFIG_CLK_R8A77960
{
.compatible = "renesas,r8a7796-cpg-mssr",
.data = &r8a7796_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77961
+ {
+ .compatible = "renesas,r8a77961-cpg-mssr",
+ .data = &r8a7796_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A77965
{
.compatible = "renesas,r8a77965-cpg-mssr",
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 4ddcdf3bfb95..3b852ba0ecec 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -159,6 +159,7 @@ extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7745_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77470_cpg_mssr_info;
extern const struct cpg_mssr_info r8a774a1_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a774b1_cpg_mssr_info;
extern const struct cpg_mssr_info r8a774c0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7790_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7791_cpg_mssr_info;
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index ba9f00dc9740..b333fc28c94b 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -139,12 +139,11 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-const struct clk_ops clk_half_divider_ops = {
+static const struct clk_ops clk_half_divider_ops = {
.recalc_rate = clk_half_divider_recalc_rate,
.round_rate = clk_half_divider_round_rate,
.set_rate = clk_half_divider_set_rate,
};
-EXPORT_SYMBOL_GPL(clk_half_divider_ops);
/**
* Register a clock branch.
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
index 3a501896b280..6fb9c98b7d24 100644
--- a/drivers/clk/rockchip/clk-px30.c
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -167,6 +167,10 @@ PNAME(mux_uart5_p) = { "clk_uart5_src", "clk_uart5_np5", "clk_uart5_frac" };
PNAME(mux_cif_out_p) = { "xin24m", "dummy_cpll", "npll", "usb480m" };
PNAME(mux_dclk_vopb_p) = { "dclk_vopb_src", "dclk_vopb_frac", "xin24m" };
PNAME(mux_dclk_vopl_p) = { "dclk_vopl_src", "dclk_vopl_frac", "xin24m" };
+PNAME(mux_nandc_p) = { "clk_nandc_div", "clk_nandc_div50" };
+PNAME(mux_sdio_p) = { "clk_sdio_div", "clk_sdio_div50" };
+PNAME(mux_emmc_p) = { "clk_emmc_div", "clk_emmc_div50" };
+PNAME(mux_sdmmc_p) = { "clk_sdmmc_div", "clk_sdmmc_div50" };
PNAME(mux_gmac_p) = { "clk_gmac_src", "gmac_clkin" };
PNAME(mux_gmac_rmii_sel_p) = { "clk_gmac_rx_tx_div20", "clk_gmac_rx_tx_div2" };
PNAME(mux_rtc32k_pmu_p) = { "xin32k", "pmu_pvtm_32k", "clk_rtc32k_frac", };
@@ -460,16 +464,40 @@ static struct rockchip_clk_branch px30_clk_branches[] __initdata = {
/* PD_MMC_NAND */
GATE(HCLK_MMC_NAND, "hclk_mmc_nand", "hclk_peri_pre", 0,
PX30_CLKGATE_CON(6), 0, GFLAGS),
- COMPOSITE(SCLK_NANDC, "clk_nandc", mux_gpll_cpll_npll_p, 0,
+ COMPOSITE(SCLK_NANDC_DIV, "clk_nandc_div", mux_gpll_cpll_npll_p, 0,
PX30_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 11, GFLAGS),
+ COMPOSITE(SCLK_NANDC_DIV50, "clk_nandc_div50", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(15), 6, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 12, GFLAGS),
+ COMPOSITE_NODIV(SCLK_NANDC, "clk_nandc", mux_nandc_p,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(15), 15, 1, MFLAGS,
PX30_CLKGATE_CON(5), 13, GFLAGS),
- COMPOSITE(SCLK_SDIO, "clk_sdio", mux_gpll_cpll_npll_xin24m_p, 0,
+ COMPOSITE(SCLK_SDIO_DIV, "clk_sdio_div", mux_gpll_cpll_npll_xin24m_p, 0,
PX30_CLKSEL_CON(18), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 1, GFLAGS),
+ COMPOSITE_DIV_OFFSET(SCLK_SDIO_DIV50, "clk_sdio_div50",
+ mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(18), 14, 2, MFLAGS,
+ PX30_CLKSEL_CON(19), 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 2, GFLAGS),
+ COMPOSITE_NODIV(SCLK_SDIO, "clk_sdio", mux_sdio_p,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(19), 15, 1, MFLAGS,
PX30_CLKGATE_CON(6), 3, GFLAGS),
- COMPOSITE(SCLK_EMMC, "clk_emmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ COMPOSITE(SCLK_EMMC_DIV, "clk_emmc_div", mux_gpll_cpll_npll_xin24m_p, 0,
PX30_CLKSEL_CON(20), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_DIV_OFFSET(SCLK_EMMC_DIV50, "clk_emmc_div50", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(20), 14, 2, MFLAGS,
+ PX30_CLKSEL_CON(21), 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 5, GFLAGS),
+ COMPOSITE_NODIV(SCLK_EMMC, "clk_emmc", mux_emmc_p,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(21), 15, 1, MFLAGS,
PX30_CLKGATE_CON(6), 6, GFLAGS),
COMPOSITE(SCLK_SFC, "clk_sfc", mux_gpll_cpll_p, 0,
@@ -494,8 +522,16 @@ static struct rockchip_clk_branch px30_clk_branches[] __initdata = {
/* PD_SDCARD */
GATE(0, "hclk_sdmmc_pre", "hclk_peri_pre", 0,
PX30_CLKGATE_CON(6), 12, GFLAGS),
- COMPOSITE(SCLK_SDMMC, "clk_sdmmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ COMPOSITE(SCLK_SDMMC_DIV, "clk_sdmmc_div", mux_gpll_cpll_npll_xin24m_p, 0,
PX30_CLKSEL_CON(16), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 13, GFLAGS),
+ COMPOSITE_DIV_OFFSET(SCLK_SDMMC_DIV50, "clk_sdmmc_div50", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(16), 14, 2, MFLAGS,
+ PX30_CLKSEL_CON(17), 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 14, GFLAGS),
+ COMPOSITE_NODIV(SCLK_SDMMC, "clk_sdmmc", mux_sdmmc_p,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(17), 15, 1, MFLAGS,
PX30_CLKGATE_CON(6), 15, GFLAGS),
/* PD_USB */
@@ -763,29 +799,29 @@ static struct rockchip_clk_branch px30_clk_branches[] __initdata = {
GATE(0, "pclk_ddrphy", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 3, GFLAGS),
GATE(PCLK_MIPIDSIPHY, "pclk_mipidsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 4, GFLAGS),
GATE(PCLK_MIPICSIPHY, "pclk_mipicsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 5, GFLAGS),
- GATE(PCLK_USB_GRF, "pclk_usb_grf", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(PCLK_USB_GRF, "pclk_usb_grf", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 6, GFLAGS),
GATE(0, "pclk_cpu_hoost", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 7, GFLAGS),
/* PD_VI */
- GATE(0, "aclk_vi_niu", "aclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 15, GFLAGS),
+ GATE(0, "aclk_vi_niu", "aclk_vi_pre", 0, PX30_CLKGATE_CON(4), 15, GFLAGS),
GATE(ACLK_CIF, "aclk_cif", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 1, GFLAGS),
GATE(ACLK_ISP, "aclk_isp", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 3, GFLAGS),
- GATE(0, "hclk_vi_niu", "hclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(0, "hclk_vi_niu", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 0, GFLAGS),
GATE(HCLK_CIF, "hclk_cif", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 2, GFLAGS),
GATE(HCLK_ISP, "hclk_isp", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 4, GFLAGS),
/* PD_VO */
- GATE(0, "aclk_vo_niu", "aclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 0, GFLAGS),
+ GATE(0, "aclk_vo_niu", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 0, GFLAGS),
GATE(ACLK_VOPB, "aclk_vopb", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 3, GFLAGS),
GATE(ACLK_RGA, "aclk_rga", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 7, GFLAGS),
GATE(ACLK_VOPL, "aclk_vopl", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 5, GFLAGS),
- GATE(0, "hclk_vo_niu", "hclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 1, GFLAGS),
+ GATE(0, "hclk_vo_niu", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 1, GFLAGS),
GATE(HCLK_VOPB, "hclk_vopb", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 4, GFLAGS),
GATE(HCLK_RGA, "hclk_rga", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 8, GFLAGS),
GATE(HCLK_VOPL, "hclk_vopl", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 6, GFLAGS),
- GATE(0, "pclk_vo_niu", "pclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 2, GFLAGS),
+ GATE(0, "pclk_vo_niu", "pclk_vo_pre", 0, PX30_CLKGATE_CON(3), 2, GFLAGS),
GATE(PCLK_MIPI_DSI, "pclk_mipi_dsi", "pclk_vo_pre", 0, PX30_CLKGATE_CON(3), 9, GFLAGS),
/* PD_BUS */
@@ -940,7 +976,7 @@ static struct rockchip_clk_branch px30_clk_pmu_branches[] __initdata = {
GATE(0, "pclk_cru_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 8, GFLAGS),
};
-static const char *const px30_pmucru_critical_clocks[] __initconst = {
+static const char *const px30_cru_critical_clocks[] __initconst = {
"aclk_bus_pre",
"pclk_bus_pre",
"hclk_bus_pre",
@@ -950,10 +986,16 @@ static const char *const px30_pmucru_critical_clocks[] __initconst = {
"pclk_top_pre",
"pclk_pmu_pre",
"hclk_usb_niu",
+ "pclk_vo_niu",
+ "aclk_vo_niu",
+ "hclk_vo_niu",
+ "aclk_vi_niu",
+ "hclk_vi_niu",
"pll_npll",
"usb480m",
"clk_uart2",
"pclk_uart2",
+ "pclk_usb_grf",
};
static void __init px30_clk_init(struct device_node *np)
@@ -985,6 +1027,9 @@ static void __init px30_clk_init(struct device_node *np)
&px30_cpuclk_data, px30_cpuclk_rates,
ARRAY_SIZE(px30_cpuclk_rates));
+ rockchip_clk_protect_critical(px30_cru_critical_clocks,
+ ARRAY_SIZE(px30_cru_critical_clocks));
+
rockchip_register_softrst(np, 12, reg_base + PX30_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
@@ -1017,9 +1062,6 @@ static void __init px30_pmu_clk_init(struct device_node *np)
rockchip_clk_register_branches(ctx, px30_clk_pmu_branches,
ARRAY_SIZE(px30_clk_pmu_branches));
- rockchip_clk_protect_critical(px30_pmucru_critical_clocks,
- ARRAY_SIZE(px30_pmucru_critical_clocks));
-
rockchip_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(px30_cru_pmu, "rockchip,px30-pmucru", px30_pmu_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 31466cd1842f..3a991ca1ee36 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -165,6 +165,8 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
GATE_BUS_CPU,
GATE_SCLK_CPU,
CLKOUT_CMU_CPU,
+ APLL_CON0,
+ KPLL_CON0,
CPLL_CON0,
DPLL_CON0,
EPLL_CON0,
@@ -611,7 +613,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
MUX(0, "mout_aclk66", mout_group1_p, SRC_TOP1, 8, 2),
MUX(0, "mout_aclk166", mout_group1_p, SRC_TOP1, 24, 2),
- MUX(0, "mout_aclk_g3d", mout_group5_p, SRC_TOP2, 16, 1),
+ MUX_F(0, "mout_aclk_g3d", mout_group5_p, SRC_TOP2, 16, 1,
+ CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_user_aclk400_isp", mout_user_aclk400_isp_p,
SRC_TOP3, 0, 1),
@@ -653,8 +656,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
SRC_TOP5, 8, 1),
MUX(0, "mout_user_aclk266_g2d", mout_user_aclk266_g2d_p,
SRC_TOP5, 12, 1),
- MUX(CLK_MOUT_G3D, "mout_user_aclk_g3d", mout_user_aclk_g3d_p,
- SRC_TOP5, 16, 1),
+ MUX_F(CLK_MOUT_G3D, "mout_user_aclk_g3d", mout_user_aclk_g3d_p,
+ SRC_TOP5, 16, 1, CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_user_aclk300_jpeg", mout_user_aclk300_jpeg_p,
SRC_TOP5, 20, 1),
MUX(CLK_MOUT_USER_ACLK300_DISP1, "mout_user_aclk300_disp1",
@@ -663,7 +666,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
mout_user_aclk300_gscl_p, SRC_TOP5, 28, 1),
MUX(0, "mout_sclk_mpll", mout_mpll_p, SRC_TOP6, 0, 1),
- MUX(CLK_MOUT_VPLL, "mout_sclk_vpll", mout_vpll_p, SRC_TOP6, 4, 1),
+ MUX_F(CLK_MOUT_VPLL, "mout_sclk_vpll", mout_vpll_p, SRC_TOP6, 4, 1,
+ CLK_SET_RATE_PARENT, 0),
MUX(CLK_MOUT_SCLK_SPLL, "mout_sclk_spll", mout_spll_p, SRC_TOP6, 8, 1),
MUX(0, "mout_sclk_ipll", mout_ipll_p, SRC_TOP6, 12, 1),
MUX(0, "mout_sclk_rpll", mout_rpll_p, SRC_TOP6, 16, 1),
@@ -707,7 +711,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
SRC_TOP12, 8, 1),
MUX(0, "mout_sw_aclk266_g2d", mout_sw_aclk266_g2d_p,
SRC_TOP12, 12, 1),
- MUX(0, "mout_sw_aclk_g3d", mout_sw_aclk_g3d_p, SRC_TOP12, 16, 1),
+ MUX_F(0, "mout_sw_aclk_g3d", mout_sw_aclk_g3d_p, SRC_TOP12, 16, 1,
+ CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_sw_aclk300_jpeg", mout_sw_aclk300_jpeg_p,
SRC_TOP12, 20, 1),
MUX(CLK_MOUT_SW_ACLK300, "mout_sw_aclk300_disp1",
@@ -804,8 +809,8 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV_TOP2, 8, 3),
DIV(CLK_DOUT_ACLK266_G2D, "dout_aclk266_g2d", "mout_aclk266_g2d",
DIV_TOP2, 12, 3),
- DIV(CLK_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2,
- 16, 3),
+ DIV_F(CLK_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2,
+ 16, 3, CLK_SET_RATE_PARENT, 0),
DIV(CLK_DOUT_ACLK300_JPEG, "dout_aclk300_jpeg", "mout_aclk300_jpeg",
DIV_TOP2, 20, 3),
DIV(CLK_DOUT_ACLK300_DISP1, "dout_aclk300_disp1",
@@ -1253,7 +1258,8 @@ static struct exynos5_subcmu_reg_dump exynos5x_gsc_suspend_regs[] = {
};
static const struct samsung_gate_clock exynos5x_g3d_gate_clks[] __initconst = {
- GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
+ GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9,
+ CLK_SET_RATE_PARENT, 0),
};
static struct exynos5_subcmu_reg_dump exynos5x_g3d_suspend_regs[] = {
@@ -1437,6 +1443,17 @@ static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = {
PLL_36XX_RATE(24 * MHZ, 32768001U, 131, 3, 5, 4719),
};
+static const struct samsung_pll_rate_table exynos5420_vpll_24mhz_tbl[] = {
+ PLL_35XX_RATE(24 * MHZ, 600000000U, 200, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 543000000U, 181, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 480000000U, 160, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 420000000U, 140, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 350000000U, 175, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 266000000U, 266, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 177000000U, 118, 2, 3),
+ PLL_35XX_RATE(24 * MHZ, 100000000U, 200, 3, 4),
+};
+
static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
[apll] = PLL(pll_2550, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
APLL_CON0, NULL),
@@ -1561,6 +1578,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5x_plls[apll].rate_table = exynos5420_pll2550x_24mhz_tbl;
exynos5x_plls[epll].rate_table = exynos5420_epll_24mhz_tbl;
exynos5x_plls[kpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
+ exynos5x_plls[vpll].rate_table = exynos5420_vpll_24mhz_tbl;
}
if (soc == EXYNOS5420)
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index 1281672cb00e..7dad9098e897 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -238,7 +238,6 @@ static SIMPLE_DEV_PM_OPS(s3c24xx_dclk_pm_ops,
static int s3c24xx_dclk_probe(struct platform_device *pdev)
{
struct s3c24xx_dclk *s3c24xx_dclk;
- struct resource *mem;
struct s3c24xx_dclk_drv_data *dclk_variant;
struct clk_hw **clk_table;
int ret, i;
@@ -257,8 +256,7 @@ static int s3c24xx_dclk_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, s3c24xx_dclk);
spin_lock_init(&s3c24xx_dclk->dclk_lock);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- s3c24xx_dclk->base = devm_ioremap_resource(&pdev->dev, mem);
+ s3c24xx_dclk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(s3c24xx_dclk->base))
return PTR_ERR(s3c24xx_dclk->base);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index e544a38106dd..dad31308c071 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -60,8 +60,7 @@ struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
struct samsung_clk_provider *ctx;
int i;
- ctx = kzalloc(sizeof(struct samsung_clk_provider) +
- sizeof(*ctx->clk_data.hws) * nr_clks, GFP_KERNEL);
+ ctx = kzalloc(struct_size(ctx, clk_data.hws, nr_clks), GFP_KERNEL);
if (!ctx)
panic("could not allocate clock provider context.\n");
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index 9d56eac43832..c0af4779892b 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -42,17 +42,15 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
struct regmap *regmap;
- struct resource *res;
if (of_find_property(node, "sprd,syscon", NULL)) {
regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
- if (IS_ERR_OR_NULL(regmap)) {
+ if (IS_ERR(regmap)) {
pr_err("%s: failed to get syscon regmap\n", __func__);
return PTR_ERR(regmap);
}
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index d89353a3cdec..f2497d0a4683 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -203,12 +203,21 @@ static struct ccu_nkmp pll_hsic_clk = {
* hardcode it to match with the clock names.
*/
#define SUN50I_H6_PLL_AUDIO_REG 0x078
+
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 541900800, .pattern = 0xc001288d, .m = 1, .n = 22 },
+ { .rate = 589824000, .pattern = 0xc00126e9, .m = 1, .n = 24 },
+};
+
static struct ccu_nm pll_audio_base_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table,
+ BIT(24), 0x178, BIT(31)),
.common = {
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
.reg = 0x078,
.hw.init = CLK_HW_INIT("pll-audio-base", "osc24M",
&ccu_nm_ops,
@@ -290,7 +299,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
0, 3, /* M */
24, 1, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
0x67c, BIT(0), 0);
@@ -753,12 +762,12 @@ static const struct clk_hw *clk_parent_pll_audio[] = {
};
/*
- * The divider of pll-audio is fixed to 8 now, as pll-audio-4x has a
- * fixed post-divider 2.
+ * The divider of pll-audio is fixed to 24 for now, so 24576000 and 22579200
+ * rates can be set exactly in conjunction with sigma-delta modulation.
*/
static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio",
clk_parent_pll_audio,
- 8, 1, CLK_SET_RATE_PARENT);
+ 24, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x",
clk_parent_pll_audio,
4, 1, CLK_SET_RATE_PARENT);
@@ -1215,12 +1224,12 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
}
/*
- * Force the post-divider of pll-audio to 8 and the output divider
- * of it to 1, to make the clock name represents the real frequency.
+ * Force the post-divider of pll-audio to 12 and the output divider
+ * of it to 2, so 24576000 and 22579200 rates can be set exactly.
*/
val = readl(reg + SUN50I_H6_PLL_AUDIO_REG);
val &= ~(GENMASK(21, 16) | BIT(0));
- writel(val | (7 << 16), reg + SUN50I_H6_PLL_AUDIO_REG);
+ writel(val | (11 << 16) | BIT(0), reg + SUN50I_H6_PLL_AUDIO_REG);
/*
* First clock parent (osc32K) is unusable for CEC. But since there
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
index b6e2680ef354..d8c38447e11b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -48,10 +48,6 @@
/* Some more module clocks are exported */
-#define CLK_MBUS 113
-
-/* And the GPU module clock is exported */
-
#define CLK_NUMBER_H3 (CLK_GPU + 1)
#define CLK_NUMBER_H5 (CLK_BUS_SCR1 + 1)
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 4812e45c2214..df966ca06788 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -17,7 +17,9 @@ obj-y += clk-tegra-fixed.o
obj-y += clk-tegra-super-gen4.o
obj-$(CONFIG_TEGRA_CLK_EMC) += clk-emc.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index f8688c2ddf1a..c051d92c2bbf 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1487,6 +1487,7 @@ static int dfll_init(struct tegra_dfll *td)
td->last_unrounded_rate = 0;
pm_runtime_enable(td->dev);
+ pm_runtime_irq_safe(td->dev);
pm_runtime_get_sync(td->dev);
dfll_set_mode(td, DFLL_DISABLED);
@@ -1513,6 +1514,61 @@ di_err1:
return ret;
}
+/**
+ * tegra_dfll_suspend - check DFLL is disabled
+ * @dev: DFLL device *
+ *
+ * DFLL clock should be disabled by the CPUFreq driver. So, make
+ * sure it is disabled and disable all clocks needed by the DFLL.
+ */
+int tegra_dfll_suspend(struct device *dev)
+{
+ struct tegra_dfll *td = dev_get_drvdata(dev);
+
+ if (dfll_is_running(td)) {
+ dev_err(td->dev, "DFLL still enabled while suspending\n");
+ return -EBUSY;
+ }
+
+ reset_control_assert(td->dvco_rst);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_suspend);
+
+/**
+ * tegra_dfll_resume - reinitialize DFLL on resume
+ * @dev: DFLL instance
+ *
+ * DFLL is disabled and reset during suspend and resume.
+ * So, reinitialize the DFLL IP block back for use.
+ * DFLL clock is enabled later in closed loop mode by CPUFreq
+ * driver before switching its clock source to DFLL output.
+ */
+int tegra_dfll_resume(struct device *dev)
+{
+ struct tegra_dfll *td = dev_get_drvdata(dev);
+
+ reset_control_deassert(td->dvco_rst);
+
+ pm_runtime_get_sync(td->dev);
+
+ dfll_set_mode(td, DFLL_DISABLED);
+ dfll_set_default_params(td);
+
+ if (td->soc->init_clock_trimmers)
+ td->soc->init_clock_trimmers();
+
+ dfll_set_open_loop_config(td);
+
+ dfll_init_out_if(td);
+
+ pm_runtime_put_sync(td->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_resume);
+
/*
* DT data fetch
*/
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
index 1b14ebe7268b..fb209eb5f365 100644
--- a/drivers/clk/tegra/clk-dfll.h
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -42,5 +42,7 @@ int tegra_dfll_register(struct platform_device *pdev,
struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev);
int tegra_dfll_runtime_suspend(struct device *dev);
int tegra_dfll_runtime_resume(struct device *dev);
+int tegra_dfll_suspend(struct device *dev);
+int tegra_dfll_resume(struct device *dev);
#endif /* __DRIVERS_CLK_TEGRA_CLK_DFLL_H */
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index e76731fb7d69..ca0de5f11f84 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -109,10 +109,21 @@ static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static void clk_divider_restore_context(struct clk_hw *hw)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+ unsigned long rate = clk_hw_get_rate(hw);
+
+ if (clk_frac_div_set_rate(hw, rate, parent_rate) < 0)
+ WARN_ON(1);
+}
+
const struct clk_ops tegra_clk_frac_div_ops = {
.recalc_rate = clk_frac_div_recalc_rate,
.set_rate = clk_frac_div_set_rate,
.round_rate = clk_frac_div_round_rate,
+ .restore_context = clk_divider_restore_context,
};
struct clk *tegra_clk_register_divider(const char *name,
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index ea39caf3d762..745f9faa98d8 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -403,20 +403,16 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
}
timing->parent_index = 0xff;
- for (i = 0; i < ARRAY_SIZE(emc_parent_clk_names); i++) {
- if (!strcmp(emc_parent_clk_names[i],
- __clk_get_name(timing->parent))) {
- timing->parent_index = i;
- break;
- }
- }
- if (timing->parent_index == 0xff) {
+ i = match_string(emc_parent_clk_names, ARRAY_SIZE(emc_parent_clk_names),
+ __clk_get_name(timing->parent));
+ if (i < 0) {
pr_err("timing %pOF: %s is not a valid parent\n",
node, __clk_get_name(timing->parent));
clk_put(timing->parent);
return -EINVAL;
}
+ timing->parent_index = i;
return 0;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index de466b4446da..c4faebd32760 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -236,9 +236,9 @@ enum clk_id {
tegra_clk_soc_therm,
tegra_clk_soc_therm_8,
tegra_clk_sor0,
- tegra_clk_sor0_lvds,
+ tegra_clk_sor0_out,
tegra_clk_sor1,
- tegra_clk_sor1_src,
+ tegra_clk_sor1_out,
tegra_clk_spdif,
tegra_clk_spdif_2x,
tegra_clk_spdif_in,
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 58437da25156..67620c7ecd9e 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -99,6 +100,23 @@ static void clk_periph_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
+static void clk_periph_restore_context(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+ int parent_id;
+
+ parent_id = clk_hw_get_parent_index(hw);
+ if (WARN_ON(parent_id < 0))
+ return;
+
+ if (!(periph->gate.flags & TEGRA_PERIPH_NO_DIV))
+ div_ops->restore_context(div_hw);
+
+ clk_periph_set_parent(hw, parent_id);
+}
+
const struct clk_ops tegra_clk_periph_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
@@ -108,6 +126,7 @@ const struct clk_ops tegra_clk_periph_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
+ .restore_context = clk_periph_restore_context,
};
static const struct clk_ops tegra_clk_periph_nodiv_ops = {
@@ -116,6 +135,7 @@ static const struct clk_ops tegra_clk_periph_nodiv_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
+ .restore_context = clk_periph_restore_context,
};
static const struct clk_ops tegra_clk_periph_no_gate_ops = {
@@ -124,6 +144,7 @@ static const struct clk_ops tegra_clk_periph_no_gate_ops = {
.recalc_rate = clk_periph_recalc_rate,
.round_rate = clk_periph_round_rate,
.set_rate = clk_periph_set_rate,
+ .restore_context = clk_periph_restore_context,
};
static struct clk *_tegra_clk_register_periph(const char *name,
diff --git a/drivers/clk/tegra/clk-pll-out.c b/drivers/clk/tegra/clk-pll-out.c
index 35f2bf00e1e6..d8bf89a81e6d 100644
--- a/drivers/clk/tegra/clk-pll-out.c
+++ b/drivers/clk/tegra/clk-pll-out.c
@@ -69,10 +69,19 @@ static void clk_pll_out_disable(struct clk_hw *hw)
spin_unlock_irqrestore(pll_out->lock, flags);
}
+static void tegra_clk_pll_out_restore_context(struct clk_hw *hw)
+{
+ if (!__clk_get_enable_count(hw->clk))
+ clk_pll_out_disable(hw);
+ else
+ clk_pll_out_enable(hw);
+}
+
const struct clk_ops tegra_clk_pll_out_ops = {
.is_enabled = clk_pll_out_is_enabled,
.enable = clk_pll_out_enable,
.disable = clk_pll_out_disable,
+ .restore_context = tegra_clk_pll_out_restore_context,
};
struct clk *tegra_clk_register_pll_out(const char *name,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 1583f5fc992f..531c2b3d814e 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1008,6 +1008,27 @@ static unsigned long clk_plle_recalc_rate(struct clk_hw *hw,
return rate;
}
+static void tegra_clk_pll_restore_context(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+ unsigned long rate = clk_hw_get_rate(hw);
+
+ if (clk_pll_is_enabled(hw))
+ return;
+
+ if (pll->params->set_defaults)
+ pll->params->set_defaults(pll);
+
+ clk_pll_set_rate(hw, rate, parent_rate);
+
+ if (!__clk_get_enable_count(hw->clk))
+ clk_pll_disable(hw);
+ else
+ clk_pll_enable(hw);
+}
+
const struct clk_ops tegra_clk_pll_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_enable,
@@ -1015,6 +1036,7 @@ const struct clk_ops tegra_clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_round_rate,
.set_rate = clk_pll_set_rate,
+ .restore_context = tegra_clk_pll_restore_context,
};
const struct clk_ops tegra_clk_plle_ops = {
@@ -1802,6 +1824,27 @@ out:
return ret;
}
+
+static void _clk_plle_tegra_init_parent(struct tegra_clk_pll *pll)
+{
+ u32 val, val_aux;
+
+ /* ensure parent is set to pll_ref */
+ val = pll_readl_base(pll);
+ val_aux = pll_readl(pll->params->aux_reg, pll);
+
+ if (val & PLL_BASE_ENABLE) {
+ if ((val_aux & PLLE_AUX_PLLRE_SEL) ||
+ (val_aux & PLLE_AUX_PLLP_SEL))
+ WARN(1, "pll_e enabled with unsupported parent %s\n",
+ (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" :
+ "pll_re_vco");
+ } else {
+ val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
+ pll_writel(val_aux, pll->params->aux_reg, pll);
+ fence_udelay(1, pll->clk_base);
+ }
+}
#endif
static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base,
@@ -2214,27 +2257,12 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
{
struct tegra_clk_pll *pll;
struct clk *clk;
- u32 val, val_aux;
pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
- /* ensure parent is set to pll_re_vco */
-
- val = pll_readl_base(pll);
- val_aux = pll_readl(pll_params->aux_reg, pll);
-
- if (val & PLL_BASE_ENABLE) {
- if ((val_aux & PLLE_AUX_PLLRE_SEL) ||
- (val_aux & PLLE_AUX_PLLP_SEL))
- WARN(1, "pll_e enabled with unsupported parent %s\n",
- (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" :
- "pll_re_vco");
- } else {
- val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
- pll_writel(val_aux, pll_params->aux_reg, pll);
- }
+ _clk_plle_tegra_init_parent(pll);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_plle_tegra114_ops);
@@ -2276,6 +2304,7 @@ static const struct clk_ops tegra_clk_pllss_ops = {
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_ramp_round_rate,
.set_rate = clk_pllxc_set_rate,
+ .restore_context = tegra_clk_pll_restore_context,
};
struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
@@ -2520,11 +2549,19 @@ out:
spin_unlock_irqrestore(pll->lock, flags);
}
+static void tegra_clk_plle_t210_restore_context(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+
+ _clk_plle_tegra_init_parent(pll);
+}
+
static const struct clk_ops tegra_clk_plle_tegra210_ops = {
.is_enabled = clk_plle_tegra210_is_enabled,
.enable = clk_plle_tegra210_enable,
.disable = clk_plle_tegra210_disable,
.recalc_rate = clk_pll_recalc_rate,
+ .restore_context = tegra_clk_plle_t210_restore_context,
};
struct clk *tegra_clk_register_plle_tegra210(const char *name,
@@ -2535,27 +2572,12 @@ struct clk *tegra_clk_register_plle_tegra210(const char *name,
{
struct tegra_clk_pll *pll;
struct clk *clk;
- u32 val, val_aux;
pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
- /* ensure parent is set to pll_re_vco */
-
- val = pll_readl_base(pll);
- val_aux = pll_readl(pll_params->aux_reg, pll);
-
- if (val & PLLE_BASE_ENABLE) {
- if ((val_aux & PLLE_AUX_PLLRE_SEL) ||
- (val_aux & PLLE_AUX_PLLP_SEL))
- WARN(1, "pll_e enabled with unsupported parent %s\n",
- (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" :
- "pll_re_vco");
- } else {
- val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
- pll_writel(val_aux, pll_params->aux_reg, pll);
- }
+ _clk_plle_tegra_init_parent(pll);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_plle_tegra210_ops);
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
index a5cd3e31dbae..316912d3b1a4 100644
--- a/drivers/clk/tegra/clk-sdmmc-mux.c
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -194,6 +194,21 @@ static void clk_sdmmc_mux_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
+static void clk_sdmmc_mux_restore_context(struct clk_hw *hw)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+ unsigned long rate = clk_hw_get_rate(hw);
+ int parent_id;
+
+ parent_id = clk_hw_get_parent_index(hw);
+ if (WARN_ON(parent_id < 0))
+ return;
+
+ clk_sdmmc_mux_set_parent(hw, parent_id);
+ clk_sdmmc_mux_set_rate(hw, rate, parent_rate);
+}
+
static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
.get_parent = clk_sdmmc_mux_get_parent,
.set_parent = clk_sdmmc_mux_set_parent,
@@ -203,6 +218,7 @@ static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
.is_enabled = clk_sdmmc_mux_is_enabled,
.enable = clk_sdmmc_mux_enable,
.disable = clk_sdmmc_mux_disable,
+ .restore_context = clk_sdmmc_mux_restore_context,
};
struct clk *tegra_clk_register_sdmmc_mux_div(const char *name,
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 39ef31b46df5..6099c6e9acd4 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -28,6 +28,9 @@
#define super_state_to_src_shift(m, s) ((m->width * s))
#define super_state_to_src_mask(m) (((1 << m->width) - 1))
+#define CCLK_SRC_PLLP_OUT0 4
+#define CCLK_SRC_PLLP_OUT4 5
+
static u8 clk_super_get_parent(struct clk_hw *hw)
{
struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
@@ -97,12 +100,23 @@ static int clk_super_set_parent(struct clk_hw *hw, u8 index)
if (index == mux->div2_index)
index = mux->pllx_index;
}
+
+ /* enable PLLP branches to CPU before selecting PLLP source */
+ if ((mux->flags & TEGRA210_CPU_CLK) &&
+ (index == CCLK_SRC_PLLP_OUT0 || index == CCLK_SRC_PLLP_OUT4))
+ tegra_clk_set_pllp_out_cpu(true);
+
val &= ~((super_state_to_src_mask(mux)) << shift);
val |= (index & (super_state_to_src_mask(mux))) << shift;
writel_relaxed(val, mux->reg);
udelay(2);
+ /* disable PLLP branches to CPU if not used */
+ if ((mux->flags & TEGRA210_CPU_CLK) &&
+ index != CCLK_SRC_PLLP_OUT0 && index != CCLK_SRC_PLLP_OUT4)
+ tegra_clk_set_pllp_out_cpu(false);
+
out:
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
@@ -110,9 +124,21 @@ out:
return err;
}
+static void clk_super_mux_restore_context(struct clk_hw *hw)
+{
+ int parent_id;
+
+ parent_id = clk_hw_get_parent_index(hw);
+ if (WARN_ON(parent_id < 0))
+ return;
+
+ clk_super_set_parent(hw, parent_id);
+}
+
static const struct clk_ops tegra_clk_super_mux_ops = {
.get_parent = clk_super_get_parent,
.set_parent = clk_super_set_parent,
+ .restore_context = clk_super_mux_restore_context,
};
static long clk_super_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -148,12 +174,27 @@ static int clk_super_set_rate(struct clk_hw *hw, unsigned long rate,
return super->div_ops->set_rate(div_hw, rate, parent_rate);
}
+static void clk_super_restore_context(struct clk_hw *hw)
+{
+ struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
+ struct clk_hw *div_hw = &super->frac_div.hw;
+ int parent_id;
+
+ parent_id = clk_hw_get_parent_index(hw);
+ if (WARN_ON(parent_id < 0))
+ return;
+
+ super->div_ops->restore_context(div_hw);
+ clk_super_set_parent(hw, parent_id);
+}
+
const struct clk_ops tegra_clk_super_ops = {
.get_parent = clk_super_get_parent,
.set_parent = clk_super_set_parent,
.set_rate = clk_super_set_rate,
.round_rate = clk_super_round_rate,
.recalc_rate = clk_super_recalc_rate,
+ .restore_context = clk_super_restore_context,
};
struct clk *tegra_clk_register_super_mux(const char *name,
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index 8d91b2b191cf..7c6c8abfcde6 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -17,6 +17,10 @@
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_SHIFT 28
#define OSC_CTRL_PLL_REF_DIV_SHIFT 26
+#define OSC_CTRL_MASK (0x3f2 | \
+ (0xf << OSC_CTRL_OSC_FREQ_SHIFT))
+
+static u32 osc_ctrl_ctx;
int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
unsigned long *input_freqs, unsigned int num,
@@ -29,6 +33,7 @@ int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
unsigned osc_idx;
val = readl_relaxed(clk_base + OSC_CTRL);
+ osc_ctrl_ctx = val & OSC_CTRL_MASK;
osc_idx = val >> OSC_CTRL_OSC_FREQ_SHIFT;
if (osc_idx < num)
@@ -96,3 +101,13 @@ void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks)
*dt_clk = clk;
}
}
+
+void tegra_clk_osc_resume(void __iomem *clk_base)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + OSC_CTRL) & ~OSC_CTRL_MASK;
+ val |= osc_ctrl_ctx;
+ writel_relaxed(val, clk_base + OSC_CTRL);
+ fence_udelay(2, clk_base);
+}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 1ed85f120a1b..0d07c0ba49b6 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -262,7 +262,6 @@
static DEFINE_SPINLOCK(PLLP_OUTA_lock);
static DEFINE_SPINLOCK(PLLP_OUTB_lock);
static DEFINE_SPINLOCK(PLLP_OUTC_lock);
-static DEFINE_SPINLOCK(sor0_lock);
#define MUX_I2S_SPDIF(_id) \
static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
@@ -587,11 +586,6 @@ static u32 mux_pllp_pllre_clkm_idx[] = {
[0] = 0, [1] = 2, [2] = 3,
};
-static const char *mux_clkm_plldp_sor0lvds[] = {
- "clk_m", "pll_dp", "sor0_lvds",
-};
-#define mux_clkm_plldp_sor0lvds_idx NULL
-
static const char * const mux_dmic1[] = {
"pll_a_out0", "dmic1_sync_clk", "pll_p", "clk_m"
};
@@ -731,14 +725,12 @@ static struct tegra_periph_init_data periph_clks[] = {
MUX8("hdmi_audio", mux_pllp3_pllc_clkm, CLK_SOURCE_HDMI_AUDIO, 176, TEGRA_PERIPH_NO_RESET, tegra_clk_hdmi_audio),
MUX8("clk72mhz", mux_pllp3_pllc_clkm, CLK_SOURCE_CLK72MHZ, 177, TEGRA_PERIPH_NO_RESET, tegra_clk_clk72Mhz),
MUX8("clk72mhz", mux_pllp_out3_pllp_pllc_clkm, CLK_SOURCE_CLK72MHZ, 177, TEGRA_PERIPH_NO_RESET, tegra_clk_clk72Mhz_8),
- MUX8_NOGATE_LOCK("sor0_lvds", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_SOR0, tegra_clk_sor0_lvds, &sor0_lock),
MUX_FLAGS("csite", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_CSITE, 73, TEGRA_PERIPH_ON_APB, tegra_clk_csite, CLK_IGNORE_UNUSED),
MUX_FLAGS("csite", mux_pllp_pllre_clkm, CLK_SOURCE_CSITE, 73, TEGRA_PERIPH_ON_APB, tegra_clk_csite_8, CLK_IGNORE_UNUSED),
NODIV("disp1", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP1, 29, 7, 27, 0, tegra_clk_disp1, NULL),
NODIV("disp1", mux_pllp_plld_plld2_clkm, CLK_SOURCE_DISP1, 29, 7, 27, 0, tegra_clk_disp1_8, NULL),
NODIV("disp2", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP2, 29, 7, 26, 0, tegra_clk_disp2, NULL),
NODIV("disp2", mux_pllp_plld_plld2_clkm, CLK_SOURCE_DISP2, 29, 7, 26, 0, tegra_clk_disp2_8, NULL),
- NODIV("sor0", mux_clkm_plldp_sor0lvds, CLK_SOURCE_SOR0, 14, 3, 182, 0, tegra_clk_sor0, &sor0_lock),
UART("uarta", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTA, 6, tegra_clk_uarta),
UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index cdfe7c9697e1..5760c978bef7 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -180,7 +180,7 @@ static void __init tegra_super_clk_init(void __iomem *clk_base,
gen_info->num_cclk_g_parents,
CLK_SET_RATE_PARENT,
clk_base + CCLKG_BURST_POLICY,
- 0, 4, 8, 0, NULL);
+ TEGRA210_CPU_CLK, 4, 8, 0, NULL);
} else {
clk = tegra_clk_register_super_mux("cclk_g",
gen_info->cclk_g_parents,
@@ -196,6 +196,11 @@ static void __init tegra_super_clk_init(void __iomem *clk_base,
dt_clk = tegra_lookup_dt_id(tegra_clk_cclk_lp, tegra_clks);
if (dt_clk) {
if (gen_info->gen == gen5) {
+ /*
+ * TEGRA210_CPU_CLK flag is not needed for cclk_lp as
+ * cluster switching is not currently supported on
+ * Tegra210 and also cpu_lp is not used.
+ */
clk = tegra_clk_register_super_mux("cclk_lp",
gen_info->cclk_lp_parents,
gen_info->num_cclk_lp_parents,
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index e84b6d52cbbd..2ac2679d696d 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -631,6 +631,7 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra124_dfll_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
tegra_dfll_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_dfll_suspend, tegra_dfll_resume)
};
static struct platform_driver tegra124_dfll_fcpu_driver = {
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 0224fdc4766f..b3110d5b5a6c 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -27,6 +27,7 @@
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
+#define CLK_SOURCE_SOR0 0x414
#define RST_DFLL_DVCO 0x2f4
#define DVFS_DFLL_RESET_SHIFT 0
@@ -91,6 +92,22 @@
/* Tegra CPU clock and reset control regs */
#define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470
+#define MASK(x) (BIT(x) - 1)
+
+#define MUX8_NOGATE_LOCK(_name, _parents, _offset, _clk_id, _lock) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset, \
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+ 0, TEGRA_PERIPH_NO_GATE, _clk_id,\
+ _parents##_idx, 0, _lock)
+
+#define NODIV(_name, _parents, _offset, \
+ _mux_shift, _mux_mask, _clk_num, \
+ _gate_flags, _clk_id, _lock) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ _mux_shift, _mux_mask, 0, 0, 0, 0, 0,\
+ _clk_num, (_gate_flags) | TEGRA_PERIPH_NO_DIV,\
+ _clk_id, _parents##_idx, 0, _lock)
+
#ifdef CONFIG_PM_SLEEP
static struct cpu_clk_suspend_context {
u32 clk_csite_src;
@@ -110,6 +127,7 @@ static DEFINE_SPINLOCK(pll_e_lock);
static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(pll_u_lock);
static DEFINE_SPINLOCK(emc_lock);
+static DEFINE_SPINLOCK(sor0_lock);
/* possible OSC frequencies in Hz */
static unsigned long tegra124_input_freq[] = {
@@ -829,7 +847,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_adx1] = { .dt_id = TEGRA124_CLK_ADX1, .present = true },
[tegra_clk_dpaux] = { .dt_id = TEGRA124_CLK_DPAUX, .present = true },
[tegra_clk_sor0] = { .dt_id = TEGRA124_CLK_SOR0, .present = true },
- [tegra_clk_sor0_lvds] = { .dt_id = TEGRA124_CLK_SOR0_LVDS, .present = true },
+ [tegra_clk_sor0_out] = { .dt_id = TEGRA124_CLK_SOR0_OUT, .present = true },
[tegra_clk_gpu] = { .dt_id = TEGRA124_CLK_GPU, .present = true },
[tegra_clk_amx1] = { .dt_id = TEGRA124_CLK_AMX1, .present = true },
[tegra_clk_uartb] = { .dt_id = TEGRA124_CLK_UARTB, .present = true },
@@ -987,12 +1005,33 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "hda2hdmi", .dt_id = TEGRA124_CLK_HDA2HDMI },
};
+static const char * const sor0_parents[] = {
+ "pll_p_out0", "pll_m_out0", "pll_d_out0", "pll_a_out0", "pll_c_out0",
+ "pll_d2_out0", "clk_m",
+};
+
+static const char * const sor0_out_parents[] = {
+ "clk_m", "sor0_pad_clkout",
+};
+
+static struct tegra_periph_init_data tegra124_periph[] = {
+ TEGRA_INIT_DATA_TABLE("sor0", NULL, NULL, sor0_parents,
+ CLK_SOURCE_SOR0, 29, 0x7, 0, 0, 0, 0,
+ 0, 182, 0, tegra_clk_sor0, NULL, 0,
+ &sor0_lock),
+ TEGRA_INIT_DATA_TABLE("sor0_out", NULL, NULL, sor0_out_parents,
+ CLK_SOURCE_SOR0, 14, 0x1, 0, 0, 0, 0,
+ 0, 0, TEGRA_PERIPH_NO_GATE, tegra_clk_sor0_out,
+ NULL, 0, &sor0_lock),
+};
+
static struct clk **clks;
static __init void tegra124_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
{
struct clk *clk;
+ unsigned int i;
/* xusb_ss_div2 */
clk = clk_register_fixed_factor(NULL, "xusb_ss_div2", "xusb_ss_src", 0,
@@ -1033,6 +1072,20 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
clk_register_clkdev(clk, "cml1", NULL);
clks[TEGRA124_CLK_CML1] = clk;
+ for (i = 0; i < ARRAY_SIZE(tegra124_periph); i++) {
+ struct tegra_periph_init_data *init = &tegra124_periph[i];
+ struct clk **clkp;
+
+ clkp = tegra_lookup_dt_id(init->clk_id, tegra124_clks);
+ if (!clkp) {
+ pr_warn("clock %u not found\n", init->clk_id);
+ continue;
+ }
+
+ clk = tegra_clk_register_periph_data(clk_base, init);
+ *clkp = clk;
+ }
+
tegra_periph_clk_init(clk_base, pmc_base, tegra124_clks, &pll_p_params);
}
diff --git a/drivers/clk/tegra/clk-tegra20-emc.c b/drivers/clk/tegra/clk-tegra20-emc.c
new file mode 100644
index 000000000000..03bf0009a33c
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra20-emc.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Based on drivers/clk/tegra/clk-emc.c
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2019 GRATE-DRIVER project
+ */
+
+#define pr_fmt(fmt) "tegra-emc-clk: " fmt
+
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/tegra.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+#define CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK GENMASK(7, 0)
+#define CLK_SOURCE_EMC_2X_CLK_SRC_MASK GENMASK(31, 30)
+#define CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT 30
+
+#define MC_EMC_SAME_FREQ BIT(16)
+#define USE_PLLM_UD BIT(29)
+
+#define EMC_SRC_PLL_M 0
+#define EMC_SRC_PLL_C 1
+#define EMC_SRC_PLL_P 2
+#define EMC_SRC_CLK_M 3
+
+static const char * const emc_parent_clk_names[] = {
+ "pll_m", "pll_c", "pll_p", "clk_m",
+};
+
+struct tegra_clk_emc {
+ struct clk_hw hw;
+ void __iomem *reg;
+ bool mc_same_freq;
+ bool want_low_jitter;
+
+ tegra20_clk_emc_round_cb *round_cb;
+ void *cb_arg;
+};
+
+static inline struct tegra_clk_emc *to_tegra_clk_emc(struct clk_hw *hw)
+{
+ return container_of(hw, struct tegra_clk_emc, hw);
+}
+
+static unsigned long emc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ u32 val, div;
+
+ val = readl_relaxed(emc->reg);
+ div = val & CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
+
+ return DIV_ROUND_UP(parent_rate * 2, div + 2);
+}
+
+static u8 emc_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+
+ return readl_relaxed(emc->reg) >> CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
+}
+
+static int emc_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ u32 val, div;
+
+ val = readl_relaxed(emc->reg);
+ val &= ~CLK_SOURCE_EMC_2X_CLK_SRC_MASK;
+ val |= index << CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
+
+ div = val & CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
+
+ if (index == EMC_SRC_PLL_M && div == 0 && emc->want_low_jitter)
+ val |= USE_PLLM_UD;
+ else
+ val &= ~USE_PLLM_UD;
+
+ if (emc->mc_same_freq)
+ val |= MC_EMC_SAME_FREQ;
+ else
+ val &= ~MC_EMC_SAME_FREQ;
+
+ writel_relaxed(val, emc->reg);
+
+ fence_udelay(1, emc->reg);
+
+ return 0;
+}
+
+static int emc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ unsigned int index;
+ u32 val, div;
+
+ div = div_frac_get(rate, parent_rate, 8, 1, 0);
+
+ val = readl_relaxed(emc->reg);
+ val &= ~CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
+ val |= div;
+
+ index = val >> CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
+
+ if (index == EMC_SRC_PLL_M && div == 0 && emc->want_low_jitter)
+ val |= USE_PLLM_UD;
+ else
+ val &= ~USE_PLLM_UD;
+
+ if (emc->mc_same_freq)
+ val |= MC_EMC_SAME_FREQ;
+ else
+ val &= ~MC_EMC_SAME_FREQ;
+
+ writel_relaxed(val, emc->reg);
+
+ fence_udelay(1, emc->reg);
+
+ return 0;
+}
+
+static int emc_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ u32 val, div;
+
+ div = div_frac_get(rate, parent_rate, 8, 1, 0);
+
+ val = readl_relaxed(emc->reg);
+
+ val &= ~CLK_SOURCE_EMC_2X_CLK_SRC_MASK;
+ val |= index << CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
+
+ val &= ~CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
+ val |= div;
+
+ if (index == EMC_SRC_PLL_M && div == 0 && emc->want_low_jitter)
+ val |= USE_PLLM_UD;
+ else
+ val &= ~USE_PLLM_UD;
+
+ if (emc->mc_same_freq)
+ val |= MC_EMC_SAME_FREQ;
+ else
+ val &= ~MC_EMC_SAME_FREQ;
+
+ writel_relaxed(val, emc->reg);
+
+ fence_udelay(1, emc->reg);
+
+ return 0;
+}
+
+static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ struct clk_hw *parent_hw;
+ unsigned long divided_rate;
+ unsigned long parent_rate;
+ unsigned int i;
+ long emc_rate;
+ int div;
+
+ emc_rate = emc->round_cb(req->rate, req->min_rate, req->max_rate,
+ emc->cb_arg);
+ if (emc_rate < 0)
+ return emc_rate;
+
+ for (i = 0; i < ARRAY_SIZE(emc_parent_clk_names); i++) {
+ parent_hw = clk_hw_get_parent_by_index(hw, i);
+
+ if (req->best_parent_hw == parent_hw)
+ parent_rate = req->best_parent_rate;
+ else
+ parent_rate = clk_hw_get_rate(parent_hw);
+
+ if (emc_rate > parent_rate)
+ continue;
+
+ div = div_frac_get(emc_rate, parent_rate, 8, 1, 0);
+ divided_rate = DIV_ROUND_UP(parent_rate * 2, div + 2);
+
+ if (divided_rate != emc_rate)
+ continue;
+
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent_hw;
+ req->rate = emc_rate;
+ break;
+ }
+
+ if (i == ARRAY_SIZE(emc_parent_clk_names)) {
+ pr_err_once("can't find parent for rate %lu emc_rate %lu\n",
+ req->rate, emc_rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct clk_ops tegra_clk_emc_ops = {
+ .recalc_rate = emc_recalc_rate,
+ .get_parent = emc_get_parent,
+ .set_parent = emc_set_parent,
+ .set_rate = emc_set_rate,
+ .set_rate_and_parent = emc_set_rate_and_parent,
+ .determine_rate = emc_determine_rate,
+};
+
+void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg)
+{
+ struct clk *clk = __clk_lookup("emc");
+ struct tegra_clk_emc *emc;
+ struct clk_hw *hw;
+
+ if (clk) {
+ hw = __clk_get_hw(clk);
+ emc = to_tegra_clk_emc(hw);
+
+ emc->round_cb = round_cb;
+ emc->cb_arg = cb_arg;
+ }
+}
+
+bool tegra20_clk_emc_driver_available(struct clk_hw *emc_hw)
+{
+ return to_tegra_clk_emc(emc_hw)->round_cb != NULL;
+}
+
+struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter)
+{
+ struct tegra_clk_emc *emc;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ emc = kzalloc(sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return NULL;
+
+ /*
+ * EMC stands for External Memory Controller.
+ *
+ * We don't want EMC clock to be disabled ever by gating its
+ * parent and whatnot because system is busted immediately in that
+ * case, hence the clock is marked as critical.
+ */
+ init.name = "emc";
+ init.ops = &tegra_clk_emc_ops;
+ init.flags = CLK_IS_CRITICAL;
+ init.parent_names = emc_parent_clk_names;
+ init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
+
+ emc->reg = ioaddr;
+ emc->hw.init = &init;
+ emc->want_low_jitter = low_jitter;
+
+ clk = clk_register(NULL, &emc->hw);
+ if (IS_ERR(clk)) {
+ kfree(emc);
+ return NULL;
+ }
+
+ return clk;
+}
+
+int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same)
+{
+ struct tegra_clk_emc *emc;
+ struct clk_hw *hw;
+
+ if (!emc_clk)
+ return -EINVAL;
+
+ hw = __clk_get_hw(emc_clk);
+ emc = to_tegra_clk_emc(hw);
+ emc->mc_same_freq = same;
+
+ return 0;
+}
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index bcd871134f45..4d8222f5c638 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -130,8 +130,6 @@ static struct cpu_clk_suspend_context {
static void __iomem *clk_base;
static void __iomem *pmc_base;
-static DEFINE_SPINLOCK(emc_lock);
-
#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
_clk_num, _gate_flags, _clk_id) \
TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
@@ -760,7 +758,6 @@ static const char *pwm_parents[] = { "pll_p", "pll_c", "audio", "clk_m",
static const char *mux_pllpcm_clkm[] = { "pll_p", "pll_c", "pll_m", "clk_m" };
static const char *mux_pllpdc_clkm[] = { "pll_p", "pll_d_out0", "pll_c",
"clk_m" };
-static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m" };
static struct tegra_periph_init_data tegra_periph_clk_list[] = {
TEGRA_INIT_DATA_MUX("i2s1", i2s1_parents, CLK_SOURCE_I2S1, 11, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2S1),
@@ -787,41 +784,6 @@ static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
TEGRA_INIT_DATA_NODIV("disp2", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, 0, TEGRA20_CLK_DISP2),
};
-static void __init tegra20_emc_clk_init(void)
-{
- const u32 use_pllm_ud = BIT(29);
- struct clk *clk;
- u32 emc_reg;
-
- clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + CLK_SOURCE_EMC,
- 30, 2, 0, &emc_lock);
-
- clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
- &emc_lock);
- clks[TEGRA20_CLK_MC] = clk;
-
- /* un-divided pll_m_out0 is currently unsupported */
- emc_reg = readl_relaxed(clk_base + CLK_SOURCE_EMC);
- if (emc_reg & use_pllm_ud) {
- pr_err("%s: un-divided PllM_out0 used as clock source\n",
- __func__);
- return;
- }
-
- /*
- * Note that 'emc_mux' source and 'emc' rate shouldn't be changed at
- * the same time due to a HW bug, this won't happen because we're
- * defining 'emc_mux' and 'emc' as distinct clocks.
- */
- clk = tegra_clk_register_divider("emc", "emc_mux",
- clk_base + CLK_SOURCE_EMC, CLK_IS_CRITICAL,
- TEGRA_DIVIDER_INT, 0, 8, 1, &emc_lock);
- clks[TEGRA20_CLK_EMC] = clk;
-}
-
static void __init tegra20_periph_clk_init(void)
{
struct tegra_periph_init_data *data;
@@ -835,7 +797,13 @@ static void __init tegra20_periph_clk_init(void)
clks[TEGRA20_CLK_AC97] = clk;
/* emc */
- tegra20_emc_clk_init();
+ clk = tegra20_clk_register_emc(clk_base + CLK_SOURCE_EMC, false);
+
+ clks[TEGRA20_CLK_EMC] = clk;
+
+ clk = tegra_clk_register_mc("mc", "emc", clk_base + CLK_SOURCE_EMC,
+ NULL);
+ clks[TEGRA20_CLK_MC] = clk;
/* dsi */
clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
@@ -987,6 +955,7 @@ static void tegra20_cpu_clock_suspend(void)
static void tegra20_cpu_clock_resume(void)
{
unsigned int reg, policy;
+ u32 misc, base;
/* Is CPU complex already running on PLLX? */
reg = readl(clk_base + CCLK_BURST_POLICY);
@@ -1000,15 +969,21 @@ static void tegra20_cpu_clock_resume(void)
BUG();
if (reg != CCLK_BURST_POLICY_PLLX) {
- /* restore PLLX settings if CPU is on different PLL */
- writel(tegra20_cpu_clk_sctx.pllx_misc,
- clk_base + PLLX_MISC);
- writel(tegra20_cpu_clk_sctx.pllx_base,
- clk_base + PLLX_BASE);
-
- /* wait for PLL stabilization if PLLX was enabled */
- if (tegra20_cpu_clk_sctx.pllx_base & (1 << 30))
- udelay(300);
+ misc = readl_relaxed(clk_base + PLLX_MISC);
+ base = readl_relaxed(clk_base + PLLX_BASE);
+
+ if (misc != tegra20_cpu_clk_sctx.pllx_misc ||
+ base != tegra20_cpu_clk_sctx.pllx_base) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra20_cpu_clk_sctx.pllx_misc,
+ clk_base + PLLX_MISC);
+ writel(tegra20_cpu_clk_sctx.pllx_base,
+ clk_base + PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra20_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
}
/*
@@ -1115,6 +1090,8 @@ static struct clk *tegra20_clk_src_onecell_get(struct of_phandle_args *clkspec,
if (IS_ERR(clk))
return clk;
+ hw = __clk_get_hw(clk);
+
/*
* Tegra20 CDEV1 and CDEV2 clocks are a bit special case, their parent
* clock is created by the pinctrl driver. It is possible for clk user
@@ -1124,13 +1101,16 @@ static struct clk *tegra20_clk_src_onecell_get(struct of_phandle_args *clkspec,
*/
if (clkspec->args[0] == TEGRA20_CLK_CDEV1 ||
clkspec->args[0] == TEGRA20_CLK_CDEV2) {
- hw = __clk_get_hw(clk);
-
parent_hw = clk_hw_get_parent(hw);
if (!parent_hw)
return ERR_PTR(-EPROBE_DEFER);
}
+ if (clkspec->args[0] == TEGRA20_CLK_EMC) {
+ if (!tegra20_clk_emc_driver_available(hw))
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
return clk;
}
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index df172d5772d7..762cd186f714 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -9,13 +9,13 @@
#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/clk/tegra.h>
#include <dt-bindings/clock/tegra210-car.h>
#include <dt-bindings/reset/tegra210-car.h>
-#include <linux/iopoll.h>
#include <linux/sizes.h>
#include <soc/tegra/pmc.h>
@@ -33,6 +33,7 @@
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
#define CLK_SOURCE_SOR1 0x410
+#define CLK_SOURCE_SOR0 0x414
#define CLK_SOURCE_LA 0x1f8
#define CLK_SOURCE_SDMMC2 0x154
#define CLK_SOURCE_SDMMC4 0x164
@@ -220,11 +221,15 @@
#define CLK_M_DIVISOR_SHIFT 2
#define CLK_M_DIVISOR_MASK 0x3
+#define CLK_MASK_ARM 0x44
+#define MISC_CLK_ENB 0x48
+
#define RST_DFLL_DVCO 0x2f4
#define DVFS_DFLL_RESET_SHIFT 0
#define CLK_RST_CONTROLLER_RST_DEV_Y_SET 0x2a8
#define CLK_RST_CONTROLLER_RST_DEV_Y_CLR 0x2ac
+#define CPU_SOFTRST_CTRL 0x380
#define LVL2_CLK_GATE_OVRA 0xf8
#define LVL2_CLK_GATE_OVRC 0x3a0
@@ -298,6 +303,7 @@ static DEFINE_SPINLOCK(pll_d_lock);
static DEFINE_SPINLOCK(pll_e_lock);
static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(pll_u_lock);
+static DEFINE_SPINLOCK(sor0_lock);
static DEFINE_SPINLOCK(sor1_lock);
static DEFINE_SPINLOCK(emc_lock);
static DEFINE_MUTEX(lvl2_ovr_lock);
@@ -2351,9 +2357,9 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_dpaux] = { .dt_id = TEGRA210_CLK_DPAUX, .present = true },
[tegra_clk_dpaux1] = { .dt_id = TEGRA210_CLK_DPAUX1, .present = true },
[tegra_clk_sor0] = { .dt_id = TEGRA210_CLK_SOR0, .present = true },
- [tegra_clk_sor0_lvds] = { .dt_id = TEGRA210_CLK_SOR0_LVDS, .present = true },
+ [tegra_clk_sor0_out] = { .dt_id = TEGRA210_CLK_SOR0_OUT, .present = true },
[tegra_clk_sor1] = { .dt_id = TEGRA210_CLK_SOR1, .present = true },
- [tegra_clk_sor1_src] = { .dt_id = TEGRA210_CLK_SOR1_SRC, .present = true },
+ [tegra_clk_sor1_out] = { .dt_id = TEGRA210_CLK_SOR1_OUT, .present = true },
[tegra_clk_gpu] = { .dt_id = TEGRA210_CLK_GPU, .present = true },
[tegra_clk_pll_g_ref] = { .dt_id = TEGRA210_CLK_PLL_G_REF, .present = true, },
[tegra_clk_uartb_8] = { .dt_id = TEGRA210_CLK_UARTB, .present = true },
@@ -2551,7 +2557,6 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "pll_c4_out2", .dt_id = TEGRA210_CLK_PLL_C4_OUT2 },
{ .con_id = "pll_c4_out3", .dt_id = TEGRA210_CLK_PLL_C4_OUT3 },
{ .con_id = "dpaux", .dt_id = TEGRA210_CLK_DPAUX },
- { .con_id = "sor0", .dt_id = TEGRA210_CLK_SOR0 },
};
static struct tegra_audio_clk_info tegra210_audio_plls[] = {
@@ -2825,6 +2830,7 @@ static int tegra210_enable_pllu(void)
struct tegra_clk_pll_freq_table *fentry;
struct tegra_clk_pll pllu;
u32 reg;
+ int ret;
for (fentry = pll_u_freq_table; fentry->input_rate; fentry++) {
if (fentry->input_rate == pll_ref_freq)
@@ -2841,7 +2847,7 @@ static int tegra210_enable_pllu(void)
reg = readl_relaxed(clk_base + pllu.params->ext_misc_reg[0]);
reg &= ~BIT(pllu.params->iddq_bit_idx);
writel_relaxed(reg, clk_base + pllu.params->ext_misc_reg[0]);
- udelay(5);
+ fence_udelay(5, clk_base);
reg = readl_relaxed(clk_base + PLLU_BASE);
reg &= ~GENMASK(20, 0);
@@ -2849,13 +2855,18 @@ static int tegra210_enable_pllu(void)
reg |= fentry->n << 8;
reg |= fentry->p << 16;
writel(reg, clk_base + PLLU_BASE);
- udelay(1);
+ fence_udelay(1, clk_base);
reg |= PLL_ENABLE;
writel(reg, clk_base + PLLU_BASE);
- readl_relaxed_poll_timeout_atomic(clk_base + PLLU_BASE, reg,
- reg & PLL_BASE_LOCK, 2, 1000);
- if (!(reg & PLL_BASE_LOCK)) {
+ /*
+ * During clocks resume, same PLLU init and enable sequence get
+ * executed. So, readx_poll_timeout_atomic can't be used here as it
+ * uses ktime_get() and timekeeping resume doesn't happen by that
+ * time. So, using tegra210_wait_for_mask for PLL LOCK.
+ */
+ ret = tegra210_wait_for_mask(&pllu, PLLU_BASE, PLL_BASE_LOCK);
+ if (ret) {
pr_err("Timed out waiting for PLL_U to lock\n");
return -ETIMEDOUT;
}
@@ -2895,12 +2906,12 @@ static int tegra210_init_pllu(void)
reg = readl_relaxed(clk_base + XUSB_PLL_CFG0);
reg &= ~XUSB_PLL_CFG0_PLLU_LOCK_DLY_MASK;
writel_relaxed(reg, clk_base + XUSB_PLL_CFG0);
- udelay(1);
+ fence_udelay(1, clk_base);
reg = readl_relaxed(clk_base + PLLU_HW_PWRDN_CFG0);
reg |= PLLU_HW_PWRDN_CFG0_SEQ_ENABLE;
writel_relaxed(reg, clk_base + PLLU_HW_PWRDN_CFG0);
- udelay(1);
+ fence_udelay(1, clk_base);
reg = readl_relaxed(clk_base + PLLU_BASE);
reg &= ~PLLU_BASE_CLKENABLE_USB;
@@ -2915,6 +2926,39 @@ static int tegra210_init_pllu(void)
return 0;
}
+/*
+ * The SOR hardware blocks are driven by two clocks: a module clock that is
+ * used to access registers and a pixel clock that is sourced from the same
+ * pixel clock that also drives the head attached to the SOR. The module
+ * clock is typically called sorX (with X being the SOR instance) and the
+ * pixel clock is called sorX_out. The source for the SOR pixel clock is
+ * referred to as the "parent" clock.
+ *
+ * On Tegra186 and newer, clocks are provided by the BPMP. Unfortunately the
+ * BPMP implementation for the SOR clocks doesn't exactly match the above in
+ * some aspects. For example, the SOR module is really clocked by the pad or
+ * sor_safe clocks, but BPMP models the sorX clock as being sourced by the
+ * pixel clocks. Conversely the sorX_out clock is sourced by the sor_safe or
+ * pad clocks on BPMP.
+ *
+ * In order to allow the display driver to deal with all SoC generations in
+ * a unified way, implement the BPMP semantics in this driver.
+ */
+
+static const char * const sor0_parents[] = {
+ "pll_d_out0",
+};
+
+static const char * const sor0_out_parents[] = {
+ "sor_safe", "sor0_pad_clkout",
+};
+
+static const char * const sor1_parents[] = {
+ "pll_p", "pll_d_out0", "pll_d2_out0", "clk_m",
+};
+
+static u32 sor1_parents_idx[] = { 0, 2, 5, 6 };
+
static const char * const sor1_out_parents[] = {
/*
* Bit 0 of the mux selects sor1_pad_clkout, irrespective of bit 1, so
@@ -2923,20 +2967,31 @@ static const char * const sor1_out_parents[] = {
* these bits to 0b11. While not an invalid setting, code should
* always set the bits to 0b01 to select sor1_pad_clkout.
*/
- "sor_safe", "sor1_pad_clkout", "sor1", "sor1_pad_clkout",
+ "sor_safe", "sor1_pad_clkout", "sor1_out", "sor1_pad_clkout",
};
-static const char * const sor1_parents[] = {
- "pll_p", "pll_d_out0", "pll_d2_out0", "clk_m",
-};
-
-static u32 sor1_parents_idx[] = { 0, 2, 5, 6 };
-
static struct tegra_periph_init_data tegra210_periph[] = {
+ /*
+ * On Tegra210, the sor0 clock doesn't have a mux it bitfield 31:29,
+ * but it is hardwired to the pll_d_out0 clock.
+ */
+ TEGRA_INIT_DATA_TABLE("sor0", NULL, NULL, sor0_parents,
+ CLK_SOURCE_SOR0, 29, 0x0, 0, 0, 0, 0,
+ 0, 182, 0, tegra_clk_sor0, NULL, 0,
+ &sor0_lock),
+ TEGRA_INIT_DATA_TABLE("sor0_out", NULL, NULL, sor0_out_parents,
+ CLK_SOURCE_SOR0, 14, 0x1, 0, 0, 0, 0,
+ 0, 0, TEGRA_PERIPH_NO_GATE, tegra_clk_sor0_out,
+ NULL, 0, &sor0_lock),
TEGRA_INIT_DATA_TABLE("sor1", NULL, NULL, sor1_parents,
CLK_SOURCE_SOR1, 29, 0x7, 0, 0, 8, 1,
- TEGRA_DIVIDER_ROUND_UP, 183, 0, tegra_clk_sor1,
- sor1_parents_idx, 0, &sor1_lock),
+ TEGRA_DIVIDER_ROUND_UP, 183, 0,
+ tegra_clk_sor1, sor1_parents_idx, 0,
+ &sor1_lock),
+ TEGRA_INIT_DATA_TABLE("sor1_out", NULL, NULL, sor1_out_parents,
+ CLK_SOURCE_SOR1, 14, 0x3, 0, 0, 0, 0,
+ 0, 0, TEGRA_PERIPH_NO_GATE,
+ tegra_clk_sor1_out, NULL, 0, &sor1_lock),
};
static const char * const la_parents[] = {
@@ -2969,12 +3024,6 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
1, 17, 207);
clks[TEGRA210_CLK_DPAUX1] = clk;
- clk = clk_register_mux_table(NULL, "sor1_out", sor1_out_parents,
- ARRAY_SIZE(sor1_out_parents), 0,
- clk_base + CLK_SOURCE_SOR1, 14, 0x3,
- 0, NULL, &sor1_lock);
- clks[TEGRA210_CLK_SOR1_OUT] = clk;
-
/* pll_d_dsi_out */
clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
clk_base + PLLD_MISC0, 21, 0, &pll_d_lock);
@@ -3287,6 +3336,77 @@ static void tegra210_disable_cpu_clock(u32 cpu)
}
#ifdef CONFIG_PM_SLEEP
+#define car_readl(_base, _off) readl_relaxed(clk_base + (_base) + ((_off) * 4))
+#define car_writel(_val, _base, _off) \
+ writel_relaxed(_val, clk_base + (_base) + ((_off) * 4))
+
+static u32 spare_reg_ctx, misc_clk_enb_ctx, clk_msk_arm_ctx;
+static u32 cpu_softrst_ctx[3];
+
+static int tegra210_clk_suspend(void)
+{
+ unsigned int i;
+
+ clk_save_context();
+
+ /*
+ * Save the bootloader configured clock registers SPARE_REG0,
+ * MISC_CLK_ENB, CLK_MASK_ARM, CPU_SOFTRST_CTRL.
+ */
+ spare_reg_ctx = readl_relaxed(clk_base + SPARE_REG0);
+ misc_clk_enb_ctx = readl_relaxed(clk_base + MISC_CLK_ENB);
+ clk_msk_arm_ctx = readl_relaxed(clk_base + CLK_MASK_ARM);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_softrst_ctx); i++)
+ cpu_softrst_ctx[i] = car_readl(CPU_SOFTRST_CTRL, i);
+
+ tegra_clk_periph_suspend();
+ return 0;
+}
+
+static void tegra210_clk_resume(void)
+{
+ unsigned int i;
+
+ tegra_clk_osc_resume(clk_base);
+
+ /*
+ * Restore the bootloader configured clock registers SPARE_REG0,
+ * MISC_CLK_ENB, CLK_MASK_ARM, CPU_SOFTRST_CTRL from saved context.
+ */
+ writel_relaxed(spare_reg_ctx, clk_base + SPARE_REG0);
+ writel_relaxed(misc_clk_enb_ctx, clk_base + MISC_CLK_ENB);
+ writel_relaxed(clk_msk_arm_ctx, clk_base + CLK_MASK_ARM);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_softrst_ctx); i++)
+ car_writel(cpu_softrst_ctx[i], CPU_SOFTRST_CTRL, i);
+
+ /*
+ * Tegra clock programming sequence recommends peripheral clock to
+ * be enabled prior to changing its clock source and divider to
+ * prevent glitchless frequency switch.
+ * So, enable all peripheral clocks before restoring their source
+ * and dividers.
+ */
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_L, clk_base + CLK_OUT_ENB_L);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_H, clk_base + CLK_OUT_ENB_H);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_U, clk_base + CLK_OUT_ENB_U);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_V, clk_base + CLK_OUT_ENB_V);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_W, clk_base + CLK_OUT_ENB_W);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_X, clk_base + CLK_OUT_ENB_X);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_Y, clk_base + CLK_OUT_ENB_Y);
+
+ /* wait for all writes to happen to have all the clocks enabled */
+ fence_udelay(2, clk_base);
+
+ /* restore PLLs and all peripheral clock rates */
+ tegra210_init_pllu();
+ clk_restore_context();
+
+ /* restore saved context of peripheral clocks and reset state */
+ tegra_clk_periph_resume();
+}
+
static void tegra210_cpu_clock_suspend(void)
{
/* switch coresite to clk_m, save off original source */
@@ -3302,6 +3422,13 @@ static void tegra210_cpu_clock_resume(void)
}
#endif
+static struct syscore_ops tegra_clk_syscore_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = tegra210_clk_suspend,
+ .resume = tegra210_clk_resume,
+#endif
+};
+
static struct tegra_cpu_car_ops tegra210_cpu_car_ops = {
.wait_for_reset = tegra210_wait_cpu_in_reset,
.disable_clock = tegra210_disable_cpu_clock,
@@ -3586,5 +3713,7 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra210_mbist_clk_init();
tegra_cpu_car_ops = &tegra210_cpu_car_ops;
+
+ register_syscore_ops(&tegra_clk_syscore_ops);
}
CLK_OF_DECLARE(tegra210, "nvidia,tegra210-car", tegra210_clock_init);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 7b4c6a488527..c8bc18e4d7e5 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -151,7 +151,6 @@ static unsigned long input_freq;
static DEFINE_SPINLOCK(cml_lock);
static DEFINE_SPINLOCK(pll_d_lock);
-static DEFINE_SPINLOCK(emc_lock);
#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
_clk_num, _gate_flags, _clk_id) \
@@ -808,7 +807,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true },
[tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true },
[tegra_clk_cec] = { .dt_id = TEGRA30_CLK_CEC, .present = true },
- [tegra_clk_emc] = { .dt_id = TEGRA30_CLK_EMC, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA30_CLK_EMC, .present = false },
};
static const char *pll_e_parents[] = { "pll_ref", "pll_p" };
@@ -995,7 +994,6 @@ static void __init tegra30_super_clk_init(void)
static const char *mux_pllacp_clkm[] = { "pll_a_out0", "unused", "pll_p",
"clk_m" };
static const char *mux_pllpcm_clkm[] = { "pll_p", "pll_c", "pll_m", "clk_m" };
-static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m" };
static const char *spdif_out_parents[] = { "pll_a_out0", "spdif_2x", "pll_p",
"clk_m" };
static const char *mux_pllmcpa[] = { "pll_m", "pll_c", "pll_p", "pll_a_out0" };
@@ -1044,14 +1042,12 @@ static void __init tegra30_periph_clk_init(void)
clks[TEGRA30_CLK_AFI] = clk;
/* emc */
- clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + CLK_SOURCE_EMC,
- 30, 2, 0, &emc_lock);
+ clk = tegra20_clk_register_emc(clk_base + CLK_SOURCE_EMC, true);
+
+ clks[TEGRA30_CLK_EMC] = clk;
- clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
- &emc_lock);
+ clk = tegra_clk_register_mc("mc", "emc", clk_base + CLK_SOURCE_EMC,
+ NULL);
clks[TEGRA30_CLK_MC] = clk;
/* cml0 */
@@ -1167,6 +1163,7 @@ static void tegra30_cpu_clock_suspend(void)
static void tegra30_cpu_clock_resume(void)
{
unsigned int reg, policy;
+ u32 misc, base;
/* Is CPU complex already running on PLLX? */
reg = readl(clk_base + CLK_RESET_CCLK_BURST);
@@ -1180,15 +1177,21 @@ static void tegra30_cpu_clock_resume(void)
BUG();
if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
- /* restore PLLX settings if CPU is on different PLL */
- writel(tegra30_cpu_clk_sctx.pllx_misc,
- clk_base + CLK_RESET_PLLX_MISC);
- writel(tegra30_cpu_clk_sctx.pllx_base,
- clk_base + CLK_RESET_PLLX_BASE);
-
- /* wait for PLL stabilization if PLLX was enabled */
- if (tegra30_cpu_clk_sctx.pllx_base & (1 << 30))
- udelay(300);
+ misc = readl_relaxed(clk_base + CLK_RESET_PLLX_MISC);
+ base = readl_relaxed(clk_base + CLK_RESET_PLLX_BASE);
+
+ if (misc != tegra30_cpu_clk_sctx.pllx_misc ||
+ base != tegra30_cpu_clk_sctx.pllx_base) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra30_cpu_clk_sctx.pllx_misc,
+ clk_base + CLK_RESET_PLLX_MISC);
+ writel(tegra30_cpu_clk_sctx.pllx_base,
+ clk_base + CLK_RESET_PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra30_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
}
/*
@@ -1302,6 +1305,26 @@ static struct tegra_audio_clk_info tegra30_audio_plls[] = {
{ "pll_a", &pll_a_params, tegra_clk_pll_a, "pll_p_out1" },
};
+static struct clk *tegra30_clk_src_onecell_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+
+ clk = of_clk_src_onecell_get(clkspec, data);
+ if (IS_ERR(clk))
+ return clk;
+
+ hw = __clk_get_hw(clk);
+
+ if (clkspec->args[0] == TEGRA30_CLK_EMC) {
+ if (!tegra20_clk_emc_driver_available(hw))
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return clk;
+}
+
static void __init tegra30_clock_init(struct device_node *np)
{
struct device_node *node;
@@ -1345,7 +1368,7 @@ static void __init tegra30_clock_init(struct device_node *np)
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
- tegra_add_of_provider(np, of_clk_src_onecell_get);
+ tegra_add_of_provider(np, tegra30_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra30_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 573e3c967ae1..e6bd6d1ea012 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -16,56 +16,13 @@
#include "clk.h"
-#define CLK_OUT_ENB_L 0x010
-#define CLK_OUT_ENB_H 0x014
-#define CLK_OUT_ENB_U 0x018
-#define CLK_OUT_ENB_V 0x360
-#define CLK_OUT_ENB_W 0x364
-#define CLK_OUT_ENB_X 0x280
-#define CLK_OUT_ENB_Y 0x298
-#define CLK_OUT_ENB_SET_L 0x320
-#define CLK_OUT_ENB_CLR_L 0x324
-#define CLK_OUT_ENB_SET_H 0x328
-#define CLK_OUT_ENB_CLR_H 0x32c
-#define CLK_OUT_ENB_SET_U 0x330
-#define CLK_OUT_ENB_CLR_U 0x334
-#define CLK_OUT_ENB_SET_V 0x440
-#define CLK_OUT_ENB_CLR_V 0x444
-#define CLK_OUT_ENB_SET_W 0x448
-#define CLK_OUT_ENB_CLR_W 0x44c
-#define CLK_OUT_ENB_SET_X 0x284
-#define CLK_OUT_ENB_CLR_X 0x288
-#define CLK_OUT_ENB_SET_Y 0x29c
-#define CLK_OUT_ENB_CLR_Y 0x2a0
-
-#define RST_DEVICES_L 0x004
-#define RST_DEVICES_H 0x008
-#define RST_DEVICES_U 0x00C
-#define RST_DEVICES_V 0x358
-#define RST_DEVICES_W 0x35C
-#define RST_DEVICES_X 0x28C
-#define RST_DEVICES_Y 0x2a4
-#define RST_DEVICES_SET_L 0x300
-#define RST_DEVICES_CLR_L 0x304
-#define RST_DEVICES_SET_H 0x308
-#define RST_DEVICES_CLR_H 0x30c
-#define RST_DEVICES_SET_U 0x310
-#define RST_DEVICES_CLR_U 0x314
-#define RST_DEVICES_SET_V 0x430
-#define RST_DEVICES_CLR_V 0x434
-#define RST_DEVICES_SET_W 0x438
-#define RST_DEVICES_CLR_W 0x43c
-#define RST_DEVICES_SET_X 0x290
-#define RST_DEVICES_CLR_X 0x294
-#define RST_DEVICES_SET_Y 0x2a8
-#define RST_DEVICES_CLR_Y 0x2ac
-
/* Global data of Tegra CPU CAR ops */
static struct tegra_cpu_car_ops dummy_car_ops;
struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops;
int *periph_clk_enb_refcnt;
static int periph_banks;
+static u32 *periph_state_ctx;
static struct clk **clks;
static int clk_num;
static struct clk_onecell_data clk_data;
@@ -199,6 +156,65 @@ const struct tegra_clk_periph_regs *get_reg_bank(int clkid)
}
}
+void tegra_clk_set_pllp_out_cpu(bool enable)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + CLK_OUT_ENB_Y);
+ if (enable)
+ val |= CLK_ENB_PLLP_OUT_CPU;
+ else
+ val &= ~CLK_ENB_PLLP_OUT_CPU;
+
+ writel_relaxed(val, clk_base + CLK_OUT_ENB_Y);
+}
+
+void tegra_clk_periph_suspend(void)
+{
+ unsigned int i, idx;
+
+ idx = 0;
+ for (i = 0; i < periph_banks; i++, idx++)
+ periph_state_ctx[idx] =
+ readl_relaxed(clk_base + periph_regs[i].enb_reg);
+
+ for (i = 0; i < periph_banks; i++, idx++)
+ periph_state_ctx[idx] =
+ readl_relaxed(clk_base + periph_regs[i].rst_reg);
+}
+
+void tegra_clk_periph_resume(void)
+{
+ unsigned int i, idx;
+
+ idx = 0;
+ for (i = 0; i < periph_banks; i++, idx++)
+ writel_relaxed(periph_state_ctx[idx],
+ clk_base + periph_regs[i].enb_reg);
+ /*
+ * All non-boot peripherals will be in reset state on resume.
+ * Wait for 5us of reset propagation delay before de-asserting
+ * the peripherals based on the saved context.
+ */
+ fence_udelay(5, clk_base);
+
+ for (i = 0; i < periph_banks; i++, idx++)
+ writel_relaxed(periph_state_ctx[idx],
+ clk_base + periph_regs[i].rst_reg);
+
+ fence_udelay(2, clk_base);
+}
+
+static int tegra_clk_periph_ctx_init(int banks)
+{
+ periph_state_ctx = kcalloc(2 * banks, sizeof(*periph_state_ctx),
+ GFP_KERNEL);
+ if (!periph_state_ctx)
+ return -ENOMEM;
+
+ return 0;
+}
+
struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
{
clk_base = regs;
@@ -220,6 +236,14 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
clk_num = num;
+ if (IS_ENABLED(CONFIG_PM_SLEEP)) {
+ if (tegra_clk_periph_ctx_init(banks)) {
+ kfree(periph_clk_enb_refcnt);
+ kfree(clks);
+ return NULL;
+ }
+ }
+
return clks;
}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 905bf1096558..416a6b09f6a3 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -10,6 +10,65 @@
#include <linux/clkdev.h>
#include <linux/delay.h>
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_V 0x360
+#define CLK_OUT_ENB_W 0x364
+#define CLK_OUT_ENB_X 0x280
+#define CLK_OUT_ENB_Y 0x298
+#define CLK_ENB_PLLP_OUT_CPU BIT(31)
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_SET_V 0x440
+#define CLK_OUT_ENB_CLR_V 0x444
+#define CLK_OUT_ENB_SET_W 0x448
+#define CLK_OUT_ENB_CLR_W 0x44c
+#define CLK_OUT_ENB_SET_X 0x284
+#define CLK_OUT_ENB_CLR_X 0x288
+#define CLK_OUT_ENB_SET_Y 0x29c
+#define CLK_OUT_ENB_CLR_Y 0x2a0
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00C
+#define RST_DEVICES_V 0x358
+#define RST_DEVICES_W 0x35C
+#define RST_DEVICES_X 0x28C
+#define RST_DEVICES_Y 0x2a4
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_SET_V 0x430
+#define RST_DEVICES_CLR_V 0x434
+#define RST_DEVICES_SET_W 0x438
+#define RST_DEVICES_CLR_W 0x43c
+#define RST_DEVICES_SET_X 0x290
+#define RST_DEVICES_CLR_X 0x294
+#define RST_DEVICES_SET_Y 0x2a8
+#define RST_DEVICES_CLR_Y 0x2ac
+
+/*
+ * Tegra CLK_OUT_ENB registers have some undefined bits which are not used and
+ * any accidental write of 1 to these bits can cause PSLVERR.
+ * So below are the valid mask defines for each CLK_OUT_ENB register used to
+ * turn ON only the valid clocks.
+ */
+#define TEGRA210_CLK_ENB_VLD_MSK_L 0xdcd7dff9
+#define TEGRA210_CLK_ENB_VLD_MSK_H 0x87d1f3e7
+#define TEGRA210_CLK_ENB_VLD_MSK_U 0xf3fed3fa
+#define TEGRA210_CLK_ENB_VLD_MSK_V 0xffc18cfb
+#define TEGRA210_CLK_ENB_VLD_MSK_W 0x793fb7ff
+#define TEGRA210_CLK_ENB_VLD_MSK_X 0x3fe66fff
+#define TEGRA210_CLK_ENB_VLD_MSK_Y 0xfc1fc7ff
+
/**
* struct tegra_clk_sync_source - external clock source from codec
*
@@ -669,6 +728,9 @@ struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
* Flags:
* TEGRA_DIVIDER_2 - LP cluster has additional divider. This flag indicates
* that this is LP cluster clock.
+ * TEGRA210_CPU_CLK - This flag is used to identify CPU cluster for gen5
+ * super mux parent using PLLP branches. To use PLLP branches to CPU, need
+ * to configure additional bit PLLP_OUT_CPU in the clock registers.
*/
struct tegra_clk_super_mux {
struct clk_hw hw;
@@ -685,6 +747,7 @@ struct tegra_clk_super_mux {
#define to_clk_super_mux(_hw) container_of(_hw, struct tegra_clk_super_mux, hw)
#define TEGRA_DIVIDER_2 BIT(0)
+#define TEGRA210_CPU_CLK BIT(1)
extern const struct clk_ops tegra_clk_super_ops;
struct clk *tegra_clk_register_super_mux(const char *name,
@@ -829,6 +892,10 @@ u16 tegra_pll_get_fixed_mdiv(struct clk_hw *hw, unsigned long input_rate);
int tegra_pll_p_div_to_hw(struct tegra_clk_pll *pll, u8 p_div);
int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
u8 frac_width, u8 flags);
+void tegra_clk_osc_resume(void __iomem *clk_base);
+void tegra_clk_set_pllp_out_cpu(bool enable);
+void tegra_clk_periph_suspend(void);
+void tegra_clk_periph_resume(void);
/* Combined read fence with delay */
@@ -838,4 +905,7 @@ int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
udelay(delay); \
} while (0)
+bool tegra20_clk_emc_driver_available(struct clk_hw *emc_hw);
+struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter);
+
#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index fdfb90058504..bb2f2836dab2 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -194,15 +194,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
if (err)
return NULL;
} else {
- const char *base_name = "adpll";
- char *buf;
-
- buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
- strlen(postfix), GFP_KERNEL);
- if (!buf)
- return NULL;
- sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
- name = buf;
+ name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
+ d->pa, postfix);
}
return name;
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index a360d3109555..e001b9bcb6bf 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -107,7 +107,7 @@ static const struct omap_clkctrl_reg_data am3_l4hs_clkctrl_regs[] __initconst =
};
static const struct omap_clkctrl_reg_data am3_pruss_ocp_clkctrl_regs[] __initconst = {
- { AM3_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk" },
+ { AM3_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "pruss_ocp_gclk" },
{ 0 },
};
@@ -217,7 +217,7 @@ static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst
};
static const struct omap_clkctrl_reg_data am3_gfx_l3_clkctrl_regs[] __initconst = {
- { AM3_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
+ { AM3_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "gfx_fck_div_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 2782d91838ac..af3e7805769e 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -73,7 +73,7 @@ static const struct omap_clkctrl_reg_data am4_mpu_clkctrl_regs[] __initconst = {
};
static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst = {
- { AM4_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
+ { AM4_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "gfx_fck_div_ck" },
{ 0 },
};
@@ -126,7 +126,7 @@ static const struct omap_clkctrl_reg_data am4_l3s_clkctrl_regs[] __initconst = {
};
static const struct omap_clkctrl_reg_data am4_pruss_ocp_clkctrl_regs[] __initconst = {
- { AM4_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk" },
+ { AM4_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "pruss_ocp_gclk" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index b10ed0429091..2b4dab632318 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -37,7 +37,7 @@ static const struct omap_clkctrl_reg_data omap4_mpuss_clkctrl_regs[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_tesla_clkctrl_regs[] __initconst = {
- { OMAP4_DSP_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m4x2_ck" },
+ { OMAP4_DSP_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_m4x2_ck" },
{ 0 },
};
@@ -219,7 +219,7 @@ static const struct omap_clkctrl_reg_data omap4_l3_2_clkctrl_regs[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_ducati_clkctrl_regs[] __initconst = {
- { OMAP4_IPU_CLKCTRL, NULL, CLKF_HW_SUP, "ducati_clk_mux_ck" },
+ { OMAP4_IPU_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "ducati_clk_mux_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index e675e27f1203..c9608e5d993a 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -31,7 +31,7 @@ static const struct omap_clkctrl_reg_data omap5_mpu_clkctrl_regs[] __initconst =
};
static const struct omap_clkctrl_reg_data omap5_dsp_clkctrl_regs[] __initconst = {
- { OMAP5_MMU_DSP_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h11x2_ck" },
+ { OMAP5_MMU_DSP_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_h11x2_ck" },
{ 0 },
};
@@ -145,7 +145,7 @@ static const struct omap_clkctrl_reg_data omap5_l3main2_clkctrl_regs[] __initcon
};
static const struct omap_clkctrl_reg_data omap5_ipu_clkctrl_regs[] __initconst = {
- { OMAP5_MMU_IPU_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h22x2_ck" },
+ { OMAP5_MMU_IPU_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_core_h22x2_ck" },
{ 0 },
};
@@ -286,6 +286,12 @@ static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst
{ 0 },
};
+static const struct omap_clkctrl_reg_data omap5_iva_clkctrl_regs[] __initconst = {
+ { OMAP5_IVA_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
+ { OMAP5_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
+ { 0 },
+};
+
static const char * const omap5_dss_dss_clk_parents[] __initconst = {
"dpll_per_h12x2_ck",
NULL,
@@ -502,6 +508,7 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = {
{ 0x4a008d20, omap5_l4cfg_clkctrl_regs },
{ 0x4a008e20, omap5_l3instr_clkctrl_regs },
{ 0x4a009020, omap5_l4per_clkctrl_regs },
+ { 0x4a009220, omap5_iva_clkctrl_regs },
{ 0x4a009420, omap5_dss_clkctrl_regs },
{ 0x4a009520, omap5_gpu_clkctrl_regs },
{ 0x4a009620, omap5_l3init_clkctrl_regs },
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 9dd6185a4b4e..5f46782cebeb 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -25,7 +25,7 @@ static const struct omap_clkctrl_reg_data dra7_mpu_clkctrl_regs[] __initconst =
};
static const struct omap_clkctrl_reg_data dra7_dsp1_clkctrl_regs[] __initconst = {
- { DRA7_DSP1_MMU0_DSP1_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_dsp_m2_ck" },
+ { DRA7_DSP1_MMU0_DSP1_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_dsp_m2_ck" },
{ 0 },
};
@@ -41,7 +41,7 @@ static const struct omap_clkctrl_bit_data dra7_mmu_ipu1_bit_data[] __initconst =
};
static const struct omap_clkctrl_reg_data dra7_ipu1_clkctrl_regs[] __initconst = {
- { DRA7_IPU1_MMU_IPU1_CLKCTRL, dra7_mmu_ipu1_bit_data, CLKF_HW_SUP, "ipu1-clkctrl:0000:24" },
+ { DRA7_IPU1_MMU_IPU1_CLKCTRL, dra7_mmu_ipu1_bit_data, CLKF_HW_SUP | CLKF_NO_IDLEST, "ipu1-clkctrl:0000:24" },
{ 0 },
};
@@ -137,7 +137,7 @@ static const struct omap_clkctrl_reg_data dra7_ipu_clkctrl_regs[] __initconst =
};
static const struct omap_clkctrl_reg_data dra7_dsp2_clkctrl_regs[] __initconst = {
- { DRA7_DSP2_MMU0_DSP2_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_dsp_m2_ck" },
+ { DRA7_DSP2_MMU0_DSP2_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_dsp_m2_ck" },
{ 0 },
};
@@ -164,7 +164,7 @@ static const struct omap_clkctrl_reg_data dra7_l3main1_clkctrl_regs[] __initcons
};
static const struct omap_clkctrl_reg_data dra7_ipu2_clkctrl_regs[] __initconst = {
- { DRA7_IPU2_MMU_IPU2_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h22x2_ck" },
+ { DRA7_IPU2_MMU_IPU2_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_core_h22x2_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index b0c0690a5a12..17b9a761242f 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -24,7 +24,7 @@
#include <linux/timekeeping.h>
#include "clock.h"
-#define NO_IDLEST 0x1
+#define NO_IDLEST 0
#define OMAP4_MODULEMODE_MASK 0x3
@@ -34,6 +34,9 @@
#define OMAP4_IDLEST_MASK (0x3 << 16)
#define OMAP4_IDLEST_SHIFT 16
+#define OMAP4_STBYST_MASK BIT(18)
+#define OMAP4_STBYST_SHIFT 18
+
#define CLKCTRL_IDLEST_FUNCTIONAL 0x0
#define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
#define CLKCTRL_IDLEST_DISABLED 0x3
@@ -159,7 +162,7 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
- if (clk->flags & NO_IDLEST)
+ if (test_bit(NO_IDLEST, &clk->flags))
return 0;
/* Wait until module is enabled */
@@ -188,7 +191,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
- if (clk->flags & NO_IDLEST)
+ if (test_bit(NO_IDLEST, &clk->flags))
goto exit;
/* Wait until module is disabled */
@@ -381,7 +384,7 @@ _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
div_data->max_div, div_flags,
- &div->width, &div->table)) {
+ div)) {
pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
node, offset, data->bit);
kfree(div);
@@ -597,7 +600,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
if (reg_data->flags & CLKF_HW_SUP)
hw->enable_bit = MODULEMODE_HWCTRL;
if (reg_data->flags & CLKF_NO_IDLEST)
- hw->flags |= NO_IDLEST;
+ set_bit(NO_IDLEST, &hw->flags);
if (reg_data->clkdm_name)
hw->clkdm_name = reg_data->clkdm_name;
@@ -623,7 +626,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
init.ops = &omap4_clkctrl_clk_ops;
hw->hw.init = &init;
- clk = ti_clk_register(NULL, &hw->hw, init.name);
+ clk = ti_clk_register_omap_hw(NULL, &hw->hw, init.name);
if (IS_ERR_OR_NULL(clk))
goto cleanup;
@@ -648,3 +651,33 @@ cleanup:
}
CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
_ti_omap4_clkctrl_setup);
+
+/**
+ * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
+ * @clk: clock to check standby status for
+ *
+ * Finds whether the provided clock is in standby mode or not. Returns
+ * true if the provided clock is a clkctrl type clock and it is in standby,
+ * false otherwise.
+ */
+bool ti_clk_is_in_standby(struct clk *clk)
+{
+ struct clk_hw *hw;
+ struct clk_hw_omap *hwclk;
+ u32 val;
+
+ hw = __clk_get_hw(clk);
+
+ if (!omap2_clk_is_hw_omap(hw))
+ return false;
+
+ hwclk = to_clk_hw_omap(hw);
+
+ val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
+
+ if (val & OMAP4_STBYST_MASK)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index e4b8392ff63c..e6995c04001e 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -20,9 +20,11 @@ struct clk_omap_divider {
struct clk_hw hw;
struct clk_omap_reg reg;
u8 shift;
- u8 width;
u8 flags;
s8 latch;
+ u16 min;
+ u16 max;
+ u16 mask;
const struct clk_div_table *table;
u32 context;
};
@@ -220,8 +222,7 @@ void ti_clk_latch(struct clk_omap_reg *reg, s8 shift);
struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
- u8 flags, u8 *width,
- const struct clk_div_table **table);
+ u8 flags, struct clk_omap_divider *div);
int ti_clk_get_reg_addr(struct device_node *node, int index,
struct clk_omap_reg *reg);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 6cb863c13648..28080df92f72 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -26,30 +26,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
-#define div_mask(d) ((1 << ((d)->width)) - 1)
-
-static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
-{
- unsigned int maxdiv = 0;
- const struct clk_div_table *clkt;
-
- for (clkt = table; clkt->div; clkt++)
- if (clkt->div > maxdiv)
- maxdiv = clkt->div;
- return maxdiv;
-}
-
-static unsigned int _get_maxdiv(struct clk_omap_divider *divider)
-{
- if (divider->flags & CLK_DIVIDER_ONE_BASED)
- return div_mask(divider);
- if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
- return 1 << div_mask(divider);
- if (divider->table)
- return _get_table_maxdiv(divider->table);
- return div_mask(divider) + 1;
-}
-
static unsigned int _get_table_div(const struct clk_div_table *table,
unsigned int val)
{
@@ -61,6 +37,34 @@ static unsigned int _get_table_div(const struct clk_div_table *table,
return 0;
}
+static void _setup_mask(struct clk_omap_divider *divider)
+{
+ u16 mask;
+ u32 max_val;
+ const struct clk_div_table *clkt;
+
+ if (divider->table) {
+ max_val = 0;
+
+ for (clkt = divider->table; clkt->div; clkt++)
+ if (clkt->val > max_val)
+ max_val = clkt->val;
+ } else {
+ max_val = divider->max;
+
+ if (!(divider->flags & CLK_DIVIDER_ONE_BASED) &&
+ !(divider->flags & CLK_DIVIDER_POWER_OF_TWO))
+ max_val--;
+ }
+
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ mask = fls(max_val) - 1;
+ else
+ mask = max_val;
+
+ divider->mask = (1 << fls(mask)) - 1;
+}
+
static unsigned int _get_div(struct clk_omap_divider *divider, unsigned int val)
{
if (divider->flags & CLK_DIVIDER_ONE_BASED)
@@ -101,7 +105,7 @@ static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw,
unsigned int div, val;
val = ti_clk_ll_ops->clk_readl(&divider->reg) >> divider->shift;
- val &= div_mask(divider);
+ val &= divider->mask;
div = _get_div(divider, val);
if (!div) {
@@ -180,7 +184,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (!rate)
rate = 1;
- maxdiv = _get_maxdiv(divider);
+ maxdiv = divider->max;
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
parent_rate = *best_parent_rate;
@@ -219,7 +223,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
}
if (!bestdiv) {
- bestdiv = _get_maxdiv(divider);
+ bestdiv = divider->max;
*best_parent_rate =
clk_hw_round_rate(clk_hw_get_parent(hw), 1);
}
@@ -249,17 +253,16 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
divider = to_clk_omap_divider(hw);
div = DIV_ROUND_UP(parent_rate, rate);
- value = _get_val(divider, div);
- if (value > div_mask(divider))
- value = div_mask(divider);
+ if (div > divider->max)
+ div = divider->max;
+ if (div < divider->min)
+ div = divider->min;
- if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
- val = div_mask(divider) << (divider->shift + 16);
- } else {
- val = ti_clk_ll_ops->clk_readl(&divider->reg);
- val &= ~(div_mask(divider) << divider->shift);
- }
+ value = _get_val(divider, div);
+
+ val = ti_clk_ll_ops->clk_readl(&divider->reg);
+ val &= ~(divider->mask << divider->shift);
val |= value << divider->shift;
ti_clk_ll_ops->clk_writel(val, &divider->reg);
@@ -280,7 +283,7 @@ static int clk_divider_save_context(struct clk_hw *hw)
u32 val;
val = ti_clk_ll_ops->clk_readl(&divider->reg) >> divider->shift;
- divider->context = val & div_mask(divider);
+ divider->context = val & divider->mask;
return 0;
}
@@ -297,7 +300,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
u32 val;
val = ti_clk_ll_ops->clk_readl(&divider->reg);
- val &= ~(div_mask(divider) << divider->shift);
+ val &= ~(divider->mask << divider->shift);
val |= divider->context << divider->shift;
ti_clk_ll_ops->clk_writel(val, &divider->reg);
}
@@ -310,47 +313,26 @@ const struct clk_ops ti_clk_divider_ops = {
.restore_context = clk_divider_restore_context,
};
-static struct clk *_register_divider(struct device *dev, const char *name,
- const char *parent_name,
- unsigned long flags,
- struct clk_omap_reg *reg,
- u8 shift, u8 width, s8 latch,
- u8 clk_divider_flags,
- const struct clk_div_table *table)
+static struct clk *_register_divider(struct device_node *node,
+ u32 flags,
+ struct clk_omap_divider *div)
{
- struct clk_omap_divider *div;
struct clk *clk;
struct clk_init_data init;
+ const char *parent_name;
- if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
- if (width + shift > 16) {
- pr_warn("divider value exceeds LOWORD field\n");
- return ERR_PTR(-EINVAL);
- }
- }
-
- /* allocate the divider */
- div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div)
- return ERR_PTR(-ENOMEM);
+ parent_name = of_clk_get_parent_name(node, 0);
- init.name = name;
+ init.name = node->name;
init.ops = &ti_clk_divider_ops;
init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
- /* struct clk_divider assignments */
- memcpy(&div->reg, reg, sizeof(*reg));
- div->shift = shift;
- div->width = width;
- div->latch = latch;
- div->flags = clk_divider_flags;
div->hw.init = &init;
- div->table = table;
/* register the clock */
- clk = ti_clk_register(dev, &div->hw, name);
+ clk = ti_clk_register(NULL, &div->hw, node->name);
if (IS_ERR(clk))
kfree(div);
@@ -359,34 +341,17 @@ static struct clk *_register_divider(struct device *dev, const char *name,
}
int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
- u8 flags, u8 *width,
- const struct clk_div_table **table)
+ u8 flags, struct clk_omap_divider *divider)
{
int valid_div = 0;
- u32 val;
- int div;
int i;
struct clk_div_table *tmp;
+ u16 min_div = 0;
if (!div_table) {
- if (flags & CLKF_INDEX_STARTS_AT_ONE)
- val = 1;
- else
- val = 0;
-
- div = 1;
-
- while (div < max_div) {
- if (flags & CLKF_INDEX_POWER_OF_TWO)
- div <<= 1;
- else
- div++;
- val++;
- }
-
- *width = fls(val);
- *table = NULL;
-
+ divider->min = 1;
+ divider->max = max_div;
+ _setup_mask(divider);
return 0;
}
@@ -403,30 +368,32 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
num_dividers = i;
tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
- if (!tmp) {
- *table = ERR_PTR(-ENOMEM);
+ if (!tmp)
return -ENOMEM;
- }
valid_div = 0;
- *width = 0;
for (i = 0; i < num_dividers; i++)
if (div_table[i] > 0) {
tmp[valid_div].div = div_table[i];
tmp[valid_div].val = i;
valid_div++;
- *width = i;
+ if (div_table[i] > max_div)
+ max_div = div_table[i];
+ if (!min_div || div_table[i] < min_div)
+ min_div = div_table[i];
}
- *width = fls(*width);
- *table = tmp;
+ divider->min = min_div;
+ divider->max = max_div;
+ divider->table = tmp;
+ _setup_mask(divider);
return 0;
}
-static struct clk_div_table *
-__init ti_clk_get_div_table(struct device_node *node)
+static int __init ti_clk_get_div_table(struct device_node *node,
+ struct clk_omap_divider *div)
{
struct clk_div_table *table;
const __be32 *divspec;
@@ -438,7 +405,7 @@ __init ti_clk_get_div_table(struct device_node *node)
divspec = of_get_property(node, "ti,dividers", &num_div);
if (!divspec)
- return NULL;
+ return 0;
num_div /= 4;
@@ -453,13 +420,12 @@ __init ti_clk_get_div_table(struct device_node *node)
if (!valid_div) {
pr_err("no valid dividers for %pOFn table\n", node);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
table = kcalloc(valid_div + 1, sizeof(*table), GFP_KERNEL);
-
if (!table)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
valid_div = 0;
@@ -472,19 +438,20 @@ __init ti_clk_get_div_table(struct device_node *node)
}
}
- return table;
+ div->table = table;
+
+ return 0;
}
-static int _get_divider_width(struct device_node *node,
- const struct clk_div_table *table,
- u8 flags)
+static int _populate_divider_min_max(struct device_node *node,
+ struct clk_omap_divider *divider)
{
- u32 min_div;
- u32 max_div;
- u32 val = 0;
- u32 div;
+ u32 min_div = 0;
+ u32 max_div = 0;
+ u32 val;
+ const struct clk_div_table *clkt;
- if (!table) {
+ if (!divider->table) {
/* Clk divider table not provided, determine min/max divs */
if (of_property_read_u32(node, "ti,min-div", &min_div))
min_div = 1;
@@ -493,75 +460,62 @@ static int _get_divider_width(struct device_node *node,
pr_err("no max-div for %pOFn!\n", node);
return -EINVAL;
}
-
- /* Determine bit width for the field */
- if (flags & CLK_DIVIDER_ONE_BASED)
- val = 1;
-
- div = min_div;
-
- while (div < max_div) {
- if (flags & CLK_DIVIDER_POWER_OF_TWO)
- div <<= 1;
- else
- div++;
- val++;
- }
} else {
- div = 0;
- while (table[div].div) {
- val = table[div].val;
- div++;
+ for (clkt = divider->table; clkt->div; clkt++) {
+ val = clkt->div;
+ if (val > max_div)
+ max_div = val;
+ if (!min_div || val < min_div)
+ min_div = val;
}
}
- return fls(val);
+ divider->min = min_div;
+ divider->max = max_div;
+ _setup_mask(divider);
+
+ return 0;
}
static int __init ti_clk_divider_populate(struct device_node *node,
- struct clk_omap_reg *reg, const struct clk_div_table **table,
- u32 *flags, u8 *div_flags, u8 *width, u8 *shift, s8 *latch)
+ struct clk_omap_divider *div,
+ u32 *flags)
{
u32 val;
int ret;
- ret = ti_clk_get_reg_addr(node, 0, reg);
+ ret = ti_clk_get_reg_addr(node, 0, &div->reg);
if (ret)
return ret;
if (!of_property_read_u32(node, "ti,bit-shift", &val))
- *shift = val;
+ div->shift = val;
else
- *shift = 0;
+ div->shift = 0;
- if (latch) {
- if (!of_property_read_u32(node, "ti,latch-bit", &val))
- *latch = val;
- else
- *latch = -EINVAL;
- }
+ if (!of_property_read_u32(node, "ti,latch-bit", &val))
+ div->latch = val;
+ else
+ div->latch = -EINVAL;
*flags = 0;
- *div_flags = 0;
+ div->flags = 0;
if (of_property_read_bool(node, "ti,index-starts-at-one"))
- *div_flags |= CLK_DIVIDER_ONE_BASED;
+ div->flags |= CLK_DIVIDER_ONE_BASED;
if (of_property_read_bool(node, "ti,index-power-of-two"))
- *div_flags |= CLK_DIVIDER_POWER_OF_TWO;
+ div->flags |= CLK_DIVIDER_POWER_OF_TWO;
if (of_property_read_bool(node, "ti,set-rate-parent"))
*flags |= CLK_SET_RATE_PARENT;
- *table = ti_clk_get_div_table(node);
-
- if (IS_ERR(*table))
- return PTR_ERR(*table);
-
- *width = _get_divider_width(node, *table, *div_flags);
+ ret = ti_clk_get_div_table(node, div);
+ if (ret)
+ return ret;
- return 0;
+ return _populate_divider_min_max(node, div);
}
/**
@@ -573,24 +527,17 @@ static int __init ti_clk_divider_populate(struct device_node *node,
static void __init of_ti_divider_clk_setup(struct device_node *node)
{
struct clk *clk;
- const char *parent_name;
- struct clk_omap_reg reg;
- u8 clk_divider_flags = 0;
- u8 width = 0;
- u8 shift = 0;
- s8 latch = -EINVAL;
- const struct clk_div_table *table = NULL;
u32 flags = 0;
+ struct clk_omap_divider *div;
- parent_name = of_clk_get_parent_name(node, 0);
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return;
- if (ti_clk_divider_populate(node, &reg, &table, &flags,
- &clk_divider_flags, &width, &shift, &latch))
+ if (ti_clk_divider_populate(node, div, &flags))
goto cleanup;
- clk = _register_divider(NULL, node->name, parent_name, flags, &reg,
- shift, width, latch, clk_divider_flags, table);
-
+ clk = _register_divider(node, flags, div);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
of_ti_clk_autoidle_setup(node);
@@ -598,22 +545,21 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
}
cleanup:
- kfree(table);
+ kfree(div->table);
+ kfree(div);
}
CLK_OF_DECLARE(divider_clk, "ti,divider-clock", of_ti_divider_clk_setup);
static void __init of_ti_composite_divider_clk_setup(struct device_node *node)
{
struct clk_omap_divider *div;
- u32 val;
+ u32 tmp;
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
return;
- if (ti_clk_divider_populate(node, &div->reg, &div->table, &val,
- &div->flags, &div->width, &div->shift,
- NULL) < 0)
+ if (ti_clk_divider_populate(node, div, &tmp))
goto cleanup;
if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER))
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index c6aaca73cf86..12380236d7ab 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -64,8 +64,7 @@ static int uniphier_clk_probe(struct platform_device *pdev)
for (p = data; p->name; p++)
clk_num = max(clk_num, p->idx + 1);
- hw_data = devm_kzalloc(dev,
- sizeof(*hw_data) + clk_num * sizeof(struct clk_hw *),
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, clk_num),
GFP_KERNEL);
if (!hw_data)
return -ENOMEM;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f35a53ce8988..5fdd76cb1768 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -528,6 +528,7 @@ config SH_TIMER_MTU2
config RENESAS_OSTM
bool "Renesas OSTM timer driver" if COMPILE_TEST
select CLKSRC_MMIO
+ select TIMER_OF
help
Enables the support for the Renesas OSTM.
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 9f09a59161e7..5b39d3701fa3 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -194,6 +194,10 @@ static int __init asm9260_timer_init(struct device_node *np)
}
clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clk!\n");
+ return PTR_ERR(clk);
+ }
ret = clk_prepare_enable(clk);
if (ret) {
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 37c39b901bb1..3d06ba66008c 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -6,14 +6,14 @@
* Copyright (C) 2017 Chris Brandt
*/
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
+#include "timer-of.h"
+
/*
* The OSTM contains independent channels.
* The first OSTM channel probed will be set up as a free running
@@ -24,12 +24,6 @@
* driven clock event.
*/
-struct ostm_device {
- void __iomem *base;
- unsigned long ticks_per_jiffy;
- struct clock_event_device ced;
-};
-
static void __iomem *system_clock; /* For sched_clock() */
/* OSTM REGISTERS */
@@ -47,41 +41,32 @@ static void __iomem *system_clock; /* For sched_clock() */
#define CTL_ONESHOT 0x02
#define CTL_FREERUN 0x02
-static struct ostm_device *ced_to_ostm(struct clock_event_device *ced)
-{
- return container_of(ced, struct ostm_device, ced);
-}
-
-static void ostm_timer_stop(struct ostm_device *ostm)
+static void ostm_timer_stop(struct timer_of *to)
{
- if (readb(ostm->base + OSTM_TE) & TE) {
- writeb(TT, ostm->base + OSTM_TT);
+ if (readb(timer_of_base(to) + OSTM_TE) & TE) {
+ writeb(TT, timer_of_base(to) + OSTM_TT);
/*
* Read back the register simply to confirm the write operation
* has completed since I/O writes can sometimes get queued by
* the bus architecture.
*/
- while (readb(ostm->base + OSTM_TE) & TE)
+ while (readb(timer_of_base(to) + OSTM_TE) & TE)
;
}
}
-static int __init ostm_init_clksrc(struct ostm_device *ostm, unsigned long rate)
+static int __init ostm_init_clksrc(struct timer_of *to)
{
- /*
- * irq not used (clock sources don't use interrupts)
- */
-
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
- writel(0, ostm->base + OSTM_CMP);
- writeb(CTL_FREERUN, ostm->base + OSTM_CTL);
- writeb(TS, ostm->base + OSTM_TS);
+ writel(0, timer_of_base(to) + OSTM_CMP);
+ writeb(CTL_FREERUN, timer_of_base(to) + OSTM_CTL);
+ writeb(TS, timer_of_base(to) + OSTM_TS);
- return clocksource_mmio_init(ostm->base + OSTM_CNT,
- "ostm", rate,
- 300, 32, clocksource_mmio_readl_up);
+ return clocksource_mmio_init(timer_of_base(to) + OSTM_CNT,
+ to->np->full_name, timer_of_rate(to), 300,
+ 32, clocksource_mmio_readl_up);
}
static u64 notrace ostm_read_sched_clock(void)
@@ -89,87 +74,75 @@ static u64 notrace ostm_read_sched_clock(void)
return readl(system_clock);
}
-static void __init ostm_init_sched_clock(struct ostm_device *ostm,
- unsigned long rate)
+static void __init ostm_init_sched_clock(struct timer_of *to)
{
- system_clock = ostm->base + OSTM_CNT;
- sched_clock_register(ostm_read_sched_clock, 32, rate);
+ system_clock = timer_of_base(to) + OSTM_CNT;
+ sched_clock_register(ostm_read_sched_clock, 32, timer_of_rate(to));
}
static int ostm_clock_event_next(unsigned long delta,
- struct clock_event_device *ced)
+ struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
- writel(delta, ostm->base + OSTM_CMP);
- writeb(CTL_ONESHOT, ostm->base + OSTM_CTL);
- writeb(TS, ostm->base + OSTM_TS);
+ writel(delta, timer_of_base(to) + OSTM_CMP);
+ writeb(CTL_ONESHOT, timer_of_base(to) + OSTM_CTL);
+ writeb(TS, timer_of_base(to) + OSTM_TS);
return 0;
}
static int ostm_shutdown(struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
return 0;
}
static int ostm_set_periodic(struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
- writel(ostm->ticks_per_jiffy - 1, ostm->base + OSTM_CMP);
- writeb(CTL_PERIODIC, ostm->base + OSTM_CTL);
- writeb(TS, ostm->base + OSTM_TS);
+ writel(timer_of_period(to) - 1, timer_of_base(to) + OSTM_CMP);
+ writeb(CTL_PERIODIC, timer_of_base(to) + OSTM_CTL);
+ writeb(TS, timer_of_base(to) + OSTM_TS);
return 0;
}
static int ostm_set_oneshot(struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
return 0;
}
static irqreturn_t ostm_timer_interrupt(int irq, void *dev_id)
{
- struct ostm_device *ostm = dev_id;
+ struct clock_event_device *ced = dev_id;
- if (clockevent_state_oneshot(&ostm->ced))
- ostm_timer_stop(ostm);
+ if (clockevent_state_oneshot(ced))
+ ostm_timer_stop(to_timer_of(ced));
/* notify clockevent layer */
- if (ostm->ced.event_handler)
- ostm->ced.event_handler(&ostm->ced);
+ if (ced->event_handler)
+ ced->event_handler(ced);
return IRQ_HANDLED;
}
-static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
- unsigned long rate)
+static int __init ostm_init_clkevt(struct timer_of *to)
{
- struct clock_event_device *ced = &ostm->ced;
- int ret = -ENXIO;
-
- ret = request_irq(irq, ostm_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL,
- "ostm", ostm);
- if (ret) {
- pr_err("ostm: failed to request irq\n");
- return ret;
- }
+ struct clock_event_device *ced = &to->clkevt;
- ced->name = "ostm";
ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
ced->set_state_shutdown = ostm_shutdown;
ced->set_state_periodic = ostm_set_periodic;
@@ -178,79 +151,61 @@ static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
ced->shift = 32;
ced->rating = 300;
ced->cpumask = cpumask_of(0);
- clockevents_config_and_register(ced, rate, 0xf, 0xffffffff);
+ clockevents_config_and_register(ced, timer_of_rate(to), 0xf,
+ 0xffffffff);
return 0;
}
static int __init ostm_init(struct device_node *np)
{
- struct ostm_device *ostm;
- int ret = -EFAULT;
- struct clk *ostm_clk = NULL;
- int irq;
- unsigned long rate;
-
- ostm = kzalloc(sizeof(*ostm), GFP_KERNEL);
- if (!ostm)
- return -ENOMEM;
-
- ostm->base = of_iomap(np, 0);
- if (!ostm->base) {
- pr_err("ostm: failed to remap I/O memory\n");
- goto err;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq < 0) {
- pr_err("ostm: Failed to get irq\n");
- goto err;
- }
+ struct timer_of *to;
+ int ret;
- ostm_clk = of_clk_get(np, 0);
- if (IS_ERR(ostm_clk)) {
- pr_err("ostm: Failed to get clock\n");
- ostm_clk = NULL;
- goto err;
- }
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
- ret = clk_prepare_enable(ostm_clk);
- if (ret) {
- pr_err("ostm: Failed to enable clock\n");
- goto err;
+ to->flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
+ if (system_clock) {
+ /*
+ * clock sources don't use interrupts, clock events do
+ */
+ to->flags |= TIMER_OF_IRQ;
+ to->of_irq.flags = IRQF_TIMER | IRQF_IRQPOLL;
+ to->of_irq.handler = ostm_timer_interrupt;
}
- rate = clk_get_rate(ostm_clk);
- ostm->ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
+ ret = timer_of_init(np, to);
+ if (ret)
+ goto err_free;
/*
* First probed device will be used as system clocksource. Any
* additional devices will be used as clock events.
*/
if (!system_clock) {
- ret = ostm_init_clksrc(ostm, rate);
-
- if (!ret) {
- ostm_init_sched_clock(ostm, rate);
- pr_info("ostm: used for clocksource\n");
- }
+ ret = ostm_init_clksrc(to);
+ if (ret)
+ goto err_cleanup;
+ ostm_init_sched_clock(to);
+ pr_info("%pOF: used for clocksource\n", np);
} else {
- ret = ostm_init_clkevt(ostm, irq, rate);
+ ret = ostm_init_clkevt(to);
+ if (ret)
+ goto err_cleanup;
- if (!ret)
- pr_info("ostm: used for clock events\n");
- }
-
-err:
- if (ret) {
- clk_disable_unprepare(ostm_clk);
- iounmap(ostm->base);
- kfree(ostm);
- return ret;
+ pr_info("%pOF: used for clock events\n", np);
}
return 0;
+
+err_cleanup:
+ timer_of_cleanup(to);
+err_free:
+ kfree(to);
+ return ret;
}
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index ef773db080e9..9cde50cb3220 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -25,6 +25,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#ifdef CONFIG_SUPERH
+#include <asm/platform_early.h>
+#endif
+
struct sh_cmt_device;
/*
@@ -1052,7 +1056,7 @@ static int sh_cmt_probe(struct platform_device *pdev)
struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
int ret;
- if (!is_early_platform_device(pdev)) {
+ if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -1072,7 +1076,7 @@ static int sh_cmt_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
return ret;
}
- if (is_early_platform_device(pdev))
+ if (is_sh_early_platform_device(pdev))
return 0;
out:
@@ -1109,7 +1113,10 @@ static void __exit sh_cmt_exit(void)
platform_driver_unregister(&sh_cmt_device_driver);
}
-early_platform_init("earlytimer", &sh_cmt_device_driver);
+#ifdef CONFIG_SUPERH
+sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
+#endif
+
subsys_initcall(sh_cmt_init);
module_exit(sh_cmt_exit);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 62812f80b5cc..64526e50d471 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -23,6 +23,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#ifdef CONFIG_SUPERH
+#include <asm/platform_early.h>
+#endif
+
struct sh_mtu2_device;
struct sh_mtu2_channel {
@@ -448,7 +452,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)
struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
int ret;
- if (!is_early_platform_device(pdev)) {
+ if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -468,7 +472,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
return ret;
}
- if (is_early_platform_device(pdev))
+ if (is_sh_early_platform_device(pdev))
return 0;
out:
@@ -517,7 +521,10 @@ static void __exit sh_mtu2_exit(void)
platform_driver_unregister(&sh_mtu2_device_driver);
}
-early_platform_init("earlytimer", &sh_mtu2_device_driver);
+#ifdef CONFIG_SUPERH
+sh_early_platform_init("earlytimer", &sh_mtu2_device_driver);
+#endif
+
subsys_initcall(sh_mtu2_init);
module_exit(sh_mtu2_exit);
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 8c4f3753b36e..d49690d15536 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -24,6 +24,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#ifdef CONFIG_SUPERH
+#include <asm/platform_early.h>
+#endif
+
enum sh_tmu_model {
SH_TMU,
SH_TMU_SH3,
@@ -595,7 +599,7 @@ static int sh_tmu_probe(struct platform_device *pdev)
struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
int ret;
- if (!is_early_platform_device(pdev)) {
+ if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -615,7 +619,8 @@ static int sh_tmu_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
return ret;
}
- if (is_early_platform_device(pdev))
+
+ if (is_sh_early_platform_device(pdev))
return 0;
out:
@@ -665,7 +670,10 @@ static void __exit sh_tmu_exit(void)
platform_driver_unregister(&sh_tmu_device_driver);
}
-early_platform_init("earlytimer", &sh_tmu_device_driver);
+#ifdef CONFIG_SUPERH
+sh_early_platform_init("earlytimer", &sh_tmu_device_driver);
+#endif
+
subsys_initcall(sh_tmu_init);
module_exit(sh_tmu_exit);
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 11ff701ff4bb..572da477c6d3 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -57,8 +57,8 @@ static __init int timer_of_irq_init(struct device_node *np,
if (of_irq->name) {
of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
if (ret < 0) {
- pr_err("Failed to get interrupt %s for %s\n",
- of_irq->name, np->full_name);
+ pr_err("Failed to get interrupt %s for %pOF\n",
+ of_irq->name, np);
return ret;
}
} else {
@@ -192,7 +192,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
}
if (!to->clkevt.name)
- to->clkevt.name = np->name;
+ to->clkevt.name = np->full_name;
to->np = np;
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 470c7ef02ea4..4e54856ce2a5 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -3,9 +3,9 @@
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
- * All RISC-V systems have a timer attached to every hart. These timers can be
- * read from the "time" and "timeh" CSRs, and can use the SBI to setup
- * events.
+ * All RISC-V systems have a timer attached to every hart. These timers can
+ * either be read from the "time" and "timeh" CSRs, and can use the SBI to
+ * setup events, or directly accessed using MMIO registers.
*/
#include <linux/clocksource.h>
#include <linux/clockchips.h>
@@ -13,14 +13,29 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/sched_clock.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/smp.h>
#include <asm/sbi.h>
+u64 __iomem *riscv_time_cmp;
+u64 __iomem *riscv_time_val;
+
+static inline void mmio_set_timer(u64 val)
+{
+ void __iomem *r;
+
+ r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id());
+ writeq_relaxed(val, r);
+}
+
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
- csr_set(sie, SIE_STIE);
- sbi_set_timer(get_cycles64() + delta);
+ csr_set(CSR_IE, IE_TIE);
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_set_timer(get_cycles64() + delta);
+ else
+ mmio_set_timer(get_cycles64() + delta);
return 0;
}
@@ -61,13 +76,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
ce->cpumask = cpumask_of(cpu);
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
- csr_set(sie, SIE_STIE);
+ csr_set(CSR_IE, IE_TIE);
return 0;
}
static int riscv_timer_dying_cpu(unsigned int cpu)
{
- csr_clear(sie, SIE_STIE);
+ csr_clear(CSR_IE, IE_TIE);
return 0;
}
@@ -76,7 +91,7 @@ void riscv_timer_interrupt(void)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
- csr_clear(sie, SIE_STIE);
+ csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
}
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 00b113f4b958..17e67a84777d 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -562,11 +562,10 @@ static const struct iio_chan_spec quad8_channels[] = {
};
static int quad8_signal_read(struct counter_device *counter,
- struct counter_signal *signal, struct counter_signal_read_value *val)
+ struct counter_signal *signal, enum counter_signal_value *val)
{
const struct quad8_iio *const priv = counter->priv;
unsigned int state;
- enum counter_signal_level level;
/* Only Index signal levels can be read */
if (signal->id < 16)
@@ -575,22 +574,19 @@ static int quad8_signal_read(struct counter_device *counter,
state = inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
& BIT(signal->id - 16);
- level = (state) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
-
- counter_signal_read_value_set(val, COUNTER_SIGNAL_LEVEL, &level);
+ *val = (state) ? COUNTER_SIGNAL_HIGH : COUNTER_SIGNAL_LOW;
return 0;
}
static int quad8_count_read(struct counter_device *counter,
- struct counter_count *count, struct counter_count_read_value *val)
+ struct counter_count *count, unsigned long *val)
{
const struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
unsigned int flags;
unsigned int borrow;
unsigned int carry;
- unsigned long position;
int i;
flags = inb(base_offset + 1);
@@ -598,36 +594,27 @@ static int quad8_count_read(struct counter_device *counter,
carry = !!(flags & QUAD8_FLAG_CT);
/* Borrow XOR Carry effectively doubles count range */
- position = (unsigned long)(borrow ^ carry) << 24;
+ *val = (unsigned long)(borrow ^ carry) << 24;
/* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
base_offset + 1);
for (i = 0; i < 3; i++)
- position |= (unsigned long)inb(base_offset) << (8 * i);
-
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position);
+ *val |= (unsigned long)inb(base_offset) << (8 * i);
return 0;
}
static int quad8_count_write(struct counter_device *counter,
- struct counter_count *count, struct counter_count_write_value *val)
+ struct counter_count *count, unsigned long val)
{
const struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
- int err;
- unsigned long position;
int i;
- err = counter_count_write_value_get(&position, COUNTER_COUNT_POSITION,
- val);
- if (err)
- return err;
-
/* Only 24-bit values are supported */
- if (position > 0xFFFFFF)
+ if (val > 0xFFFFFF)
return -EINVAL;
/* Reset Byte Pointer */
@@ -635,7 +622,7 @@ static int quad8_count_write(struct counter_device *counter,
/* Counter can only be set via Preset Register */
for (i = 0; i < 3; i++)
- outb(position >> (8 * i), base_offset);
+ outb(val >> (8 * i), base_offset);
/* Transfer Preset Register to Counter */
outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
@@ -644,9 +631,9 @@ static int quad8_count_write(struct counter_device *counter,
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Set Preset Register back to original value */
- position = priv->preset[count->id];
+ val = priv->preset[count->id];
for (i = 0; i < 3; i++)
- outb(position >> (8 * i), base_offset);
+ outb(val >> (8 * i), base_offset);
/* Reset Borrow, Carry, Compare, and Sign flags */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 2967d0a9ff91..c80fa76bb531 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -49,6 +49,17 @@ config STM32_LPTIMER_CNT
To compile this driver as a module, choose M here: the
module will be called stm32-lptimer-cnt.
+config TI_EQEP
+ tristate "TI eQEP counter driver"
+ depends on (SOC_AM33XX || COMPILE_TEST)
+ select REGMAP_MMIO
+ help
+ Select this option to enable the Texas Instruments Enhanced Quadrature
+ Encoder Pulse (eQEP) counter driver.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ti-eqep.
+
config FTM_QUADDEC
tristate "Flex Timer Module Quadrature decoder driver"
depends on HAS_IOMEM && OF
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index 40d35522937d..55142d1f4c43 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_COUNTER) += counter.o
obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
obj-$(CONFIG_STM32_TIMER_CNT) += stm32-timer-cnt.o
obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
+obj-$(CONFIG_TI_EQEP) += ti-eqep.o
obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o
diff --git a/drivers/counter/counter.c b/drivers/counter/counter.c
index 106bc7180cd8..6a683d086008 100644
--- a/drivers/counter/counter.c
+++ b/drivers/counter/counter.c
@@ -220,86 +220,6 @@ ssize_t counter_device_enum_available_read(struct counter_device *counter,
}
EXPORT_SYMBOL_GPL(counter_device_enum_available_read);
-static const char *const counter_signal_level_str[] = {
- [COUNTER_SIGNAL_LEVEL_LOW] = "low",
- [COUNTER_SIGNAL_LEVEL_HIGH] = "high"
-};
-
-/**
- * counter_signal_read_value_set - set counter_signal_read_value data
- * @val: counter_signal_read_value structure to set
- * @type: property Signal data represents
- * @data: Signal data
- *
- * This function sets an opaque counter_signal_read_value structure with the
- * provided Signal data.
- */
-void counter_signal_read_value_set(struct counter_signal_read_value *const val,
- const enum counter_signal_value_type type,
- void *const data)
-{
- if (type == COUNTER_SIGNAL_LEVEL)
- val->len = sprintf(val->buf, "%s\n",
- counter_signal_level_str[*(enum counter_signal_level *)data]);
- else
- val->len = 0;
-}
-EXPORT_SYMBOL_GPL(counter_signal_read_value_set);
-
-/**
- * counter_count_read_value_set - set counter_count_read_value data
- * @val: counter_count_read_value structure to set
- * @type: property Count data represents
- * @data: Count data
- *
- * This function sets an opaque counter_count_read_value structure with the
- * provided Count data.
- */
-void counter_count_read_value_set(struct counter_count_read_value *const val,
- const enum counter_count_value_type type,
- void *const data)
-{
- switch (type) {
- case COUNTER_COUNT_POSITION:
- val->len = sprintf(val->buf, "%lu\n", *(unsigned long *)data);
- break;
- default:
- val->len = 0;
- }
-}
-EXPORT_SYMBOL_GPL(counter_count_read_value_set);
-
-/**
- * counter_count_write_value_get - get counter_count_write_value data
- * @data: Count data
- * @type: property Count data represents
- * @val: counter_count_write_value structure containing data
- *
- * This function extracts Count data from the provided opaque
- * counter_count_write_value structure and stores it at the address provided by
- * @data.
- *
- * RETURNS:
- * 0 on success, negative error number on failure.
- */
-int counter_count_write_value_get(void *const data,
- const enum counter_count_value_type type,
- const struct counter_count_write_value *const val)
-{
- int err;
-
- switch (type) {
- case COUNTER_COUNT_POSITION:
- err = kstrtoul(val->buf, 0, data);
- if (err)
- return err;
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(counter_count_write_value_get);
-
struct counter_attr_parm {
struct counter_device_attr_group *group;
const char *prefix;
@@ -369,6 +289,11 @@ struct counter_signal_unit {
struct counter_signal *signal;
};
+static const char *const counter_signal_value_str[] = {
+ [COUNTER_SIGNAL_LOW] = "low",
+ [COUNTER_SIGNAL_HIGH] = "high"
+};
+
static ssize_t counter_signal_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -377,13 +302,13 @@ static ssize_t counter_signal_show(struct device *dev,
const struct counter_signal_unit *const component = devattr->component;
struct counter_signal *const signal = component->signal;
int err;
- struct counter_signal_read_value val = { .buf = buf };
+ enum counter_signal_value val;
err = counter->ops->signal_read(counter, signal, &val);
if (err)
return err;
- return val.len;
+ return sprintf(buf, "%s\n", counter_signal_value_str[val]);
}
struct counter_name_unit {
@@ -788,13 +713,13 @@ static ssize_t counter_count_show(struct device *dev,
const struct counter_count_unit *const component = devattr->component;
struct counter_count *const count = component->count;
int err;
- struct counter_count_read_value val = { .buf = buf };
+ unsigned long val;
err = counter->ops->count_read(counter, count, &val);
if (err)
return err;
- return val.len;
+ return sprintf(buf, "%lu\n", val);
}
static ssize_t counter_count_store(struct device *dev,
@@ -806,9 +731,13 @@ static ssize_t counter_count_store(struct device *dev,
const struct counter_count_unit *const component = devattr->component;
struct counter_count *const count = component->count;
int err;
- struct counter_count_write_value val = { .buf = buf };
+ unsigned long val;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
- err = counter->ops->count_write(counter, count, &val);
+ err = counter->ops->count_write(counter, count, val);
if (err)
return err;
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 4046aa9f9234..c2b3fdfd8b77 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -178,31 +178,25 @@ static const enum counter_count_function ftm_quaddec_count_functions[] = {
static int ftm_quaddec_count_read(struct counter_device *counter,
struct counter_count *count,
- struct counter_count_read_value *val)
+ unsigned long *val)
{
struct ftm_quaddec *const ftm = counter->priv;
uint32_t cntval;
ftm_read(ftm, FTM_CNT, &cntval);
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cntval);
+ *val = cntval;
return 0;
}
static int ftm_quaddec_count_write(struct counter_device *counter,
struct counter_count *count,
- struct counter_count_write_value *val)
+ const unsigned long val)
{
struct ftm_quaddec *const ftm = counter->priv;
- u32 cnt;
- int err;
- err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
- if (err)
- return err;
-
- if (cnt != 0) {
+ if (val != 0) {
dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n");
return -EINVAL;
}
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index bbc930a5962c..8e276eb655f5 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -347,7 +347,7 @@ static const struct iio_chan_spec stm32_lptim_cnt_channels = {
};
/**
- * stm32_lptim_cnt_function - enumerates stm32 LPTimer counter & encoder modes
+ * enum stm32_lptim_cnt_function - enumerates LPTimer counter & encoder modes
* @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges
* @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature)
*/
@@ -377,8 +377,7 @@ static enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
};
static int stm32_lptim_cnt_read(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_read_value *val)
+ struct counter_count *count, unsigned long *val)
{
struct stm32_lptim_cnt *const priv = counter->priv;
u32 cnt;
@@ -388,7 +387,7 @@ static int stm32_lptim_cnt_read(struct counter_device *counter,
if (ret)
return ret;
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+ *val = cnt;
return 0;
}
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 644ba18a72ad..3eafccec3beb 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -28,7 +28,7 @@ struct stm32_timer_cnt {
};
/**
- * stm32_count_function - enumerates stm32 timer counter encoder modes
+ * enum stm32_count_function - enumerates stm32 timer counter encoder modes
* @STM32_COUNT_SLAVE_MODE_DISABLED: counts on internal clock when CEN=1
* @STM32_COUNT_ENCODER_MODE_1: counts TI1FP1 edges, depending on TI2FP2 level
* @STM32_COUNT_ENCODER_MODE_2: counts TI2FP2 edges, depending on TI1FP1 level
@@ -48,34 +48,27 @@ static enum counter_count_function stm32_count_functions[] = {
};
static int stm32_count_read(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_read_value *val)
+ struct counter_count *count, unsigned long *val)
{
struct stm32_timer_cnt *const priv = counter->priv;
u32 cnt;
regmap_read(priv->regmap, TIM_CNT, &cnt);
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+ *val = cnt;
return 0;
}
static int stm32_count_write(struct counter_device *counter,
struct counter_count *count,
- struct counter_count_write_value *val)
+ const unsigned long val)
{
struct stm32_timer_cnt *const priv = counter->priv;
- u32 cnt;
- int err;
-
- err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
- if (err)
- return err;
- if (cnt > priv->ceiling)
+ if (val > priv->ceiling)
return -EINVAL;
- return regmap_write(priv->regmap, TIM_CNT, cnt);
+ return regmap_write(priv->regmap, TIM_CNT, val);
}
static int stm32_count_function_get(struct counter_device *counter,
@@ -219,8 +212,8 @@ static ssize_t stm32_count_enable_write(struct counter_device *counter,
if (enable) {
regmap_read(priv->regmap, TIM_CR1, &cr1);
- if (!(cr1 & TIM_CR1_CEN))
- clk_enable(priv->clk);
+ if (!(cr1 & TIM_CR1_CEN))
+ clk_enable(priv->clk);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
TIM_CR1_CEN);
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
new file mode 100644
index 000000000000..1ff07faef27f
--- /dev/null
+++ b/drivers/counter/ti-eqep.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 David Lechner <david@lechnology.com>
+ *
+ * Counter driver for Texas Instruments Enhanced Quadrature Encoder Pulse (eQEP)
+ */
+
+#include <linux/bitops.h>
+#include <linux/counter.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+/* 32-bit registers */
+#define QPOSCNT 0x0
+#define QPOSINIT 0x4
+#define QPOSMAX 0x8
+#define QPOSCMP 0xc
+#define QPOSILAT 0x10
+#define QPOSSLAT 0x14
+#define QPOSLAT 0x18
+#define QUTMR 0x1c
+#define QUPRD 0x20
+
+/* 16-bit registers */
+#define QWDTMR 0x0 /* 0x24 */
+#define QWDPRD 0x2 /* 0x26 */
+#define QDECCTL 0x4 /* 0x28 */
+#define QEPCTL 0x6 /* 0x2a */
+#define QCAPCTL 0x8 /* 0x2c */
+#define QPOSCTL 0xa /* 0x2e */
+#define QEINT 0xc /* 0x30 */
+#define QFLG 0xe /* 0x32 */
+#define QCLR 0x10 /* 0x34 */
+#define QFRC 0x12 /* 0x36 */
+#define QEPSTS 0x14 /* 0x38 */
+#define QCTMR 0x16 /* 0x3a */
+#define QCPRD 0x18 /* 0x3c */
+#define QCTMRLAT 0x1a /* 0x3e */
+#define QCPRDLAT 0x1c /* 0x40 */
+
+#define QDECCTL_QSRC_SHIFT 14
+#define QDECCTL_QSRC GENMASK(15, 14)
+#define QDECCTL_SOEN BIT(13)
+#define QDECCTL_SPSEL BIT(12)
+#define QDECCTL_XCR BIT(11)
+#define QDECCTL_SWAP BIT(10)
+#define QDECCTL_IGATE BIT(9)
+#define QDECCTL_QAP BIT(8)
+#define QDECCTL_QBP BIT(7)
+#define QDECCTL_QIP BIT(6)
+#define QDECCTL_QSP BIT(5)
+
+#define QEPCTL_FREE_SOFT GENMASK(15, 14)
+#define QEPCTL_PCRM GENMASK(13, 12)
+#define QEPCTL_SEI GENMASK(11, 10)
+#define QEPCTL_IEI GENMASK(9, 8)
+#define QEPCTL_SWI BIT(7)
+#define QEPCTL_SEL BIT(6)
+#define QEPCTL_IEL GENMASK(5, 4)
+#define QEPCTL_PHEN BIT(3)
+#define QEPCTL_QCLM BIT(2)
+#define QEPCTL_UTE BIT(1)
+#define QEPCTL_WDE BIT(0)
+
+/* EQEP Inputs */
+enum {
+ TI_EQEP_SIGNAL_QEPA, /* QEPA/XCLK */
+ TI_EQEP_SIGNAL_QEPB, /* QEPB/XDIR */
+};
+
+/* Position Counter Input Modes */
+enum {
+ TI_EQEP_COUNT_FUNC_QUAD_COUNT,
+ TI_EQEP_COUNT_FUNC_DIR_COUNT,
+ TI_EQEP_COUNT_FUNC_UP_COUNT,
+ TI_EQEP_COUNT_FUNC_DOWN_COUNT,
+};
+
+enum {
+ TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES,
+ TI_EQEP_SYNAPSE_ACTION_RISING_EDGE,
+ TI_EQEP_SYNAPSE_ACTION_NONE,
+};
+
+struct ti_eqep_cnt {
+ struct counter_device counter;
+ struct regmap *regmap32;
+ struct regmap *regmap16;
+};
+
+static int ti_eqep_count_read(struct counter_device *counter,
+ struct counter_count *count, unsigned long *val)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 cnt;
+
+ regmap_read(priv->regmap32, QPOSCNT, &cnt);
+ *val = cnt;
+
+ return 0;
+}
+
+static int ti_eqep_count_write(struct counter_device *counter,
+ struct counter_count *count, unsigned long val)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 max;
+
+ regmap_read(priv->regmap32, QPOSMAX, &max);
+ if (val > max)
+ return -EINVAL;
+
+ return regmap_write(priv->regmap32, QPOSCNT, val);
+}
+
+static int ti_eqep_function_get(struct counter_device *counter,
+ struct counter_count *count, size_t *function)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qdecctl;
+
+ regmap_read(priv->regmap16, QDECCTL, &qdecctl);
+ *function = (qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT;
+
+ return 0;
+}
+
+static int ti_eqep_function_set(struct counter_device *counter,
+ struct counter_count *count, size_t function)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+
+ return regmap_write_bits(priv->regmap16, QDECCTL, QDECCTL_QSRC,
+ function << QDECCTL_QSRC_SHIFT);
+}
+
+static int ti_eqep_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse, size_t *action)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ size_t function;
+ u32 qdecctl;
+ int err;
+
+ err = ti_eqep_function_get(counter, count, &function);
+ if (err)
+ return err;
+
+ switch (function) {
+ case TI_EQEP_COUNT_FUNC_QUAD_COUNT:
+ /* In quadrature mode, the rising and falling edge of both
+ * QEPA and QEPB trigger QCLK.
+ */
+ *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case TI_EQEP_COUNT_FUNC_DIR_COUNT:
+ /* In direction-count mode only rising edge of QEPA is counted
+ * and QEPB gives direction.
+ */
+ switch (synapse->signal->id) {
+ case TI_EQEP_SIGNAL_QEPA:
+ *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
+ break;
+ default:
+ *action = TI_EQEP_SYNAPSE_ACTION_NONE;
+ break;
+ }
+ break;
+ case TI_EQEP_COUNT_FUNC_UP_COUNT:
+ case TI_EQEP_COUNT_FUNC_DOWN_COUNT:
+ /* In up/down-count modes only QEPA is counted and QEPB is not
+ * used.
+ */
+ switch (synapse->signal->id) {
+ case TI_EQEP_SIGNAL_QEPA:
+ err = regmap_read(priv->regmap16, QDECCTL, &qdecctl);
+ if (err)
+ return err;
+
+ if (qdecctl & QDECCTL_XCR)
+ *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
+ else
+ *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
+ break;
+ default:
+ *action = TI_EQEP_SYNAPSE_ACTION_NONE;
+ break;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct counter_ops ti_eqep_counter_ops = {
+ .count_read = ti_eqep_count_read,
+ .count_write = ti_eqep_count_write,
+ .function_get = ti_eqep_function_get,
+ .function_set = ti_eqep_function_set,
+ .action_get = ti_eqep_action_get,
+};
+
+static ssize_t ti_eqep_position_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qposmax;
+
+ regmap_read(priv->regmap32, QPOSMAX, &qposmax);
+
+ return sprintf(buf, "%u\n", qposmax);
+}
+
+static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, const char *buf,
+ size_t len)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ int err;
+ u32 res;
+
+ err = kstrtouint(buf, 0, &res);
+ if (err < 0)
+ return err;
+
+ regmap_write(priv->regmap32, QPOSMAX, res);
+
+ return len;
+}
+
+static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qposinit;
+
+ regmap_read(priv->regmap32, QPOSINIT, &qposinit);
+
+ return sprintf(buf, "%u\n", qposinit);
+}
+
+static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, const char *buf,
+ size_t len)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ int err;
+ u32 res;
+
+ err = kstrtouint(buf, 0, &res);
+ if (err < 0)
+ return err;
+
+ regmap_write(priv->regmap32, QPOSINIT, res);
+
+ return len;
+}
+
+static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qepctl;
+
+ regmap_read(priv->regmap16, QEPCTL, &qepctl);
+
+ return sprintf(buf, "%u\n", !!(qepctl & QEPCTL_PHEN));
+}
+
+static ssize_t ti_eqep_position_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, const char *buf,
+ size_t len)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ int err;
+ bool res;
+
+ err = kstrtobool(buf, &res);
+ if (err < 0)
+ return err;
+
+ regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, res ? -1 : 0);
+
+ return len;
+}
+
+static struct counter_count_ext ti_eqep_position_ext[] = {
+ {
+ .name = "ceiling",
+ .read = ti_eqep_position_ceiling_read,
+ .write = ti_eqep_position_ceiling_write,
+ },
+ {
+ .name = "floor",
+ .read = ti_eqep_position_floor_read,
+ .write = ti_eqep_position_floor_write,
+ },
+ {
+ .name = "enable",
+ .read = ti_eqep_position_enable_read,
+ .write = ti_eqep_position_enable_write,
+ },
+};
+
+static struct counter_signal ti_eqep_signals[] = {
+ [TI_EQEP_SIGNAL_QEPA] = {
+ .id = TI_EQEP_SIGNAL_QEPA,
+ .name = "QEPA"
+ },
+ [TI_EQEP_SIGNAL_QEPB] = {
+ .id = TI_EQEP_SIGNAL_QEPB,
+ .name = "QEPB"
+ },
+};
+
+static const enum counter_count_function ti_eqep_position_functions[] = {
+ [TI_EQEP_COUNT_FUNC_QUAD_COUNT] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+ [TI_EQEP_COUNT_FUNC_DIR_COUNT] = COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
+ [TI_EQEP_COUNT_FUNC_UP_COUNT] = COUNTER_COUNT_FUNCTION_INCREASE,
+ [TI_EQEP_COUNT_FUNC_DOWN_COUNT] = COUNTER_COUNT_FUNCTION_DECREASE,
+};
+
+static const enum counter_synapse_action ti_eqep_position_synapse_actions[] = {
+ [TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ [TI_EQEP_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ [TI_EQEP_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+};
+
+static struct counter_synapse ti_eqep_position_synapses[] = {
+ {
+ .actions_list = ti_eqep_position_synapse_actions,
+ .num_actions = ARRAY_SIZE(ti_eqep_position_synapse_actions),
+ .signal = &ti_eqep_signals[TI_EQEP_SIGNAL_QEPA],
+ },
+ {
+ .actions_list = ti_eqep_position_synapse_actions,
+ .num_actions = ARRAY_SIZE(ti_eqep_position_synapse_actions),
+ .signal = &ti_eqep_signals[TI_EQEP_SIGNAL_QEPB],
+ },
+};
+
+static struct counter_count ti_eqep_counts[] = {
+ {
+ .id = 0,
+ .name = "QPOSCNT",
+ .functions_list = ti_eqep_position_functions,
+ .num_functions = ARRAY_SIZE(ti_eqep_position_functions),
+ .synapses = ti_eqep_position_synapses,
+ .num_synapses = ARRAY_SIZE(ti_eqep_position_synapses),
+ .ext = ti_eqep_position_ext,
+ .num_ext = ARRAY_SIZE(ti_eqep_position_ext),
+ },
+};
+
+static const struct regmap_config ti_eqep_regmap32_config = {
+ .name = "32-bit",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x24,
+};
+
+static const struct regmap_config ti_eqep_regmap16_config = {
+ .name = "16-bit",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = 0x1e,
+};
+
+static int ti_eqep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_eqep_cnt *priv;
+ void __iomem *base;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap32 = devm_regmap_init_mmio(dev, base,
+ &ti_eqep_regmap32_config);
+ if (IS_ERR(priv->regmap32))
+ return PTR_ERR(priv->regmap32);
+
+ priv->regmap16 = devm_regmap_init_mmio(dev, base + 0x24,
+ &ti_eqep_regmap16_config);
+ if (IS_ERR(priv->regmap16))
+ return PTR_ERR(priv->regmap16);
+
+ priv->counter.name = dev_name(dev);
+ priv->counter.parent = dev;
+ priv->counter.ops = &ti_eqep_counter_ops;
+ priv->counter.counts = ti_eqep_counts;
+ priv->counter.num_counts = ARRAY_SIZE(ti_eqep_counts);
+ priv->counter.signals = ti_eqep_signals;
+ priv->counter.num_signals = ARRAY_SIZE(ti_eqep_signals);
+ priv->counter.priv = priv;
+
+ platform_set_drvdata(pdev, priv);
+
+ /*
+ * Need to make sure power is turned on. On AM33xx, this comes from the
+ * parent PWMSS bus driver. On AM17xx, this comes from the PSC power
+ * domain.
+ */
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ err = counter_register(&priv->counter);
+ if (err < 0) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ti_eqep_remove(struct platform_device *pdev)
+{
+ struct ti_eqep_cnt *priv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ counter_unregister(&priv->counter);
+ pm_runtime_put_sync(dev),
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static const struct of_device_id ti_eqep_of_match[] = {
+ { .compatible = "ti,am3352-eqep", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ti_eqep_of_match);
+
+static struct platform_driver ti_eqep_driver = {
+ .probe = ti_eqep_probe,
+ .remove = ti_eqep_remove,
+ .driver = {
+ .name = "ti-eqep-cnt",
+ .of_match_table = ti_eqep_of_match,
+ },
+};
+module_platform_driver(ti_eqep_driver);
+
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_DESCRIPTION("TI eQEP counter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 35b4f700f054..58151ca56695 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -48,9 +48,9 @@ config PPC_PASEMI_CPUFREQ
PWRficient processors.
config POWERNV_CPUFREQ
- tristate "CPU frequency scaling for IBM POWERNV platform"
- depends on PPC_POWERNV
- default y
- help
+ tristate "CPU frequency scaling for IBM POWERNV platform"
+ depends on PPC_POWERNV
+ default y
+ help
This adds support for CPU frequency switching on IBM POWERNV
platform
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index dfa6457deaf6..a6528388952e 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -4,17 +4,17 @@
#
config X86_INTEL_PSTATE
- bool "Intel P state control"
- depends on X86
- select ACPI_PROCESSOR if ACPI
- select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
- help
- This driver provides a P state for Intel core processors.
+ bool "Intel P state control"
+ depends on X86
+ select ACPI_PROCESSOR if ACPI
+ select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
+ help
+ This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
- the scaling driver and governor for Sandy bridge processors.
+ the scaling driver and governor for Sandy bridge processors.
When this driver is enabled it will become the preferred
- scaling driver for Sandy bridge processors.
+ scaling driver for Sandy bridge processors.
If in doubt, say N.
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 88727b7c0d59..c0aeedd66f02 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -16,7 +16,7 @@ config CPU_IDLE
if CPU_IDLE
config CPU_IDLE_MULTIPLE_DRIVERS
- bool
+ bool
config CPU_IDLE_GOV_LADDER
bool "Ladder governor (for periodic timer tick)"
@@ -63,13 +63,13 @@ source "drivers/cpuidle/Kconfig.powerpc"
endmenu
config HALTPOLL_CPUIDLE
- tristate "Halt poll cpuidle driver"
- depends on X86 && KVM_GUEST
- default y
- help
- This option enables halt poll cpuidle driver, which allows to poll
- before halting in the guest (more efficient than polling in the
- host via halt_poll_ns for some scenarios).
+ tristate "Halt poll cpuidle driver"
+ depends on X86 && KVM_GUEST
+ default y
+ help
+ This option enables halt poll cpuidle driver, which allows to poll
+ before halting in the guest (more efficient than polling in the
+ host via halt_poll_ns for some scenarios).
endif
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index d8530475493c..a224d33dda7f 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -3,15 +3,15 @@
# ARM CPU Idle drivers
#
config ARM_CPUIDLE
- bool "Generic ARM/ARM64 CPU idle Driver"
- select DT_IDLE_STATES
+ bool "Generic ARM/ARM64 CPU idle Driver"
+ select DT_IDLE_STATES
select CPU_IDLE_MULTIPLE_DRIVERS
- help
- Select this to enable generic cpuidle driver for ARM.
- It provides a generic idle driver whose idle states are configured
- at run-time through DT nodes. The CPUidle suspend backend is
- initialized by calling the CPU operations init idle hook
- provided by architecture code.
+ help
+ Select this to enable generic cpuidle driver for ARM.
+ It provides a generic idle driver whose idle states are configured
+ at run-time through DT nodes. The CPUidle suspend backend is
+ initialized by calling the CPU operations init idle hook
+ provided by architecture code.
config ARM_PSCI_CPUIDLE
bool "PSCI CPU idle Driver"
@@ -65,21 +65,21 @@ config ARM_U8500_CPUIDLE
bool "Cpu Idle Driver for the ST-E u8500 processors"
depends on ARCH_U8500 && !ARM64
help
- Select this to enable cpuidle for ST-E u8500 processors
+ Select this to enable cpuidle for ST-E u8500 processors.
config ARM_AT91_CPUIDLE
bool "Cpu Idle Driver for the AT91 processors"
default y
depends on ARCH_AT91 && !ARM64
help
- Select this to enable cpuidle for AT91 processors
+ Select this to enable cpuidle for AT91 processors.
config ARM_EXYNOS_CPUIDLE
bool "Cpu Idle Driver for the Exynos processors"
depends on ARCH_EXYNOS && !ARM64
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
help
- Select this to enable cpuidle for Exynos processors
+ Select this to enable cpuidle for Exynos processors.
config ARM_MVEBU_V7_CPUIDLE
bool "CPU Idle Driver for mvebu v7 family processors"
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 569dbac443bd..0005be5ea2b4 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -572,7 +572,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
return -EINVAL;
for (i = 0; i < drv->state_count; i++)
- if (drv->states[i].disabled)
+ if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
per_cpu(cpuidle_devices, dev->cpu) = dev;
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index 9f1ace9c53da..f7e83613ae94 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -53,7 +53,6 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)
state->target_residency_ns = 0;
state->power_usage = -1;
state->enter = poll_idle;
- state->disabled = false;
state->flags = CPUIDLE_FLAG_POLLING;
}
EXPORT_SYMBOL_GPL(cpuidle_poll_state_init);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43ed1b621718..91eb768d4221 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -289,6 +289,7 @@ config CRYPTO_DEV_TALITOS
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_HASH
+ select CRYPTO_LIB_DES
select HW_RANDOM
depends on FSL_SOC
help
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index dc1eb97d57f7..62b04e19067c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -179,14 +179,14 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
- if (!qp_ctx->c_in_pool) {
+ if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
goto err_free_req_list;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
- if (!qp_ctx->c_out_pool) {
+ if (IS_ERR(qp_ctx->c_out_pool)) {
dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
}
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index abc7a7f64d64..ef0e482ee04f 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -68,7 +68,7 @@ static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
static const struct file_operations adf_ctl_ops = {
.owner = THIS_MODULE,
.unlocked_ioctl = adf_ctl_ioctl,
- .compat_ioctl = adf_ctl_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
struct adf_ctl_drv_info {
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index eccdda1f7b71..46e46047a1f7 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -309,7 +309,7 @@ static ssize_t resource_show(struct device *dev,
return sprintf(buf, "%#llx\n", dev_dax_resource(dev_dax));
}
-static DEVICE_ATTR_RO(resource);
+static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -322,6 +322,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev_to_node(dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -329,8 +336,8 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
return 0;
- if (a == &dev_attr_resource.attr)
- return 0400;
+ if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
+ return 0;
return a->mode;
}
@@ -339,6 +346,7 @@ static struct attribute *dev_dax_attributes[] = {
&dev_attr_size.attr,
&dev_attr_target_node.attr,
&dev_attr_resource.attr,
+ &dev_attr_numa_node.attr,
NULL,
};
@@ -373,6 +381,11 @@ static void dev_dax_release(struct device *dev)
kfree(dev_dax);
}
+static const struct device_type dev_dax_type = {
+ .release = dev_dax_release,
+ .groups = dax_attribute_groups,
+};
+
static void unregister_dev_dax(void *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
@@ -430,8 +443,7 @@ struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
else
dev->class = dax_class;
dev->parent = parent;
- dev->groups = dax_attribute_groups;
- dev->release = dev_dax_release;
+ dev->type = &dev_dax_type;
dev_set_name(dev, "dax%d.%d", dax_region->id, id);
rc = device_add(dev);
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index 6eb6dfdf19bf..2bedf8414fff 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -25,20 +25,20 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return ERR_CAST(ndns);
- nsio = to_nd_namespace_io(&ndns->dev);
/* parse the 'pfn' info block via ->rw_bytes */
- rc = devm_nsio_enable(dev, nsio);
+ rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
if (rc)
return ERR_PTR(rc);
rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
if (rc)
return ERR_PTR(rc);
- devm_nsio_disable(dev, nsio);
+ devm_namespace_disable(dev, ndns);
/* reserve the metadata area, device-dax will reserve the data */
pfn_sb = nd_pfn->pfn_sb;
offset = le64_to_cpu(pfn_sb->dataoff);
+ nsio = to_nd_namespace_io(&ndns->dev);
if (!devm_request_mem_region(dev, nsio->res.start, offset,
dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve metadata\n");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index f840e61e5a27..425149e8bab0 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -921,7 +921,9 @@ int devfreq_suspend_device(struct devfreq *devfreq)
}
if (devfreq->suspend_freq) {
+ mutex_lock(&devfreq->lock);
ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
+ mutex_unlock(&devfreq->lock);
if (ret)
return ret;
}
@@ -949,7 +951,9 @@ int devfreq_resume_device(struct devfreq *devfreq)
return 0;
if (devfreq->resume_freq) {
+ mutex_lock(&devfreq->lock);
ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
+ mutex_unlock(&devfreq->lock);
if (ret)
return ret;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 433d91d710e4..ce41cd9b758a 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
size_t ret = 0;
dmabuf = dentry->d_fsdata;
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
@@ -334,7 +334,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
if (IS_ERR(name))
return PTR_ERR(name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (!list_empty(&dmabuf->attachments)) {
ret = -EBUSY;
kfree(name);
@@ -344,7 +344,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
dmabuf->name = name;
out_unlock:
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return ret;
}
@@ -403,10 +403,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
}
static const struct file_operations dma_buf_fops = {
@@ -415,9 +415,7 @@ static const struct file_operations dma_buf_fops = {
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
.unlocked_ioctl = dma_buf_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = dma_buf_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.show_fdinfo = dma_buf_show_fdinfo,
};
@@ -525,6 +523,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
return ERR_PTR(-EINVAL);
}
+ if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
+ exp_info->ops->dynamic_mapping))
+ return ERR_PTR(-EINVAL);
+
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
@@ -645,10 +647,11 @@ void dma_buf_put(struct dma_buf *dmabuf)
EXPORT_SYMBOL_GPL(dma_buf_put);
/**
- * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
* calls attach() of dma_buf_ops to allow device-specific attach functionality
- * @dmabuf: [in] buffer to attach device to.
- * @dev: [in] device to be attached.
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ * @dynamic_mapping: [in] calling convention for map/unmap
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
@@ -662,8 +665,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* accessible to @dev, and cannot be moved to a more suitable place. This is
* indicated with the error code -EBUSY.
*/
-struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev)
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ bool dynamic_mapping)
{
struct dma_buf_attachment *attach;
int ret;
@@ -677,24 +681,68 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
attach->dev = dev;
attach->dmabuf = dmabuf;
-
- mutex_lock(&dmabuf->lock);
+ attach->dynamic_mapping = dynamic_mapping;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
+ dma_resv_lock(dmabuf->resv, NULL);
list_add(&attach->node, &dmabuf->attachments);
+ dma_resv_unlock(dmabuf->resv);
- mutex_unlock(&dmabuf->lock);
+ /* When either the importer or the exporter can't handle dynamic
+ * mappings we cache the mapping here to avoid issues with the
+ * reservation object lock.
+ */
+ if (dma_buf_attachment_is_dynamic(attach) !=
+ dma_buf_is_dynamic(dmabuf)) {
+ struct sg_table *sgt;
+
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
+ sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ if (!sgt)
+ sgt = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_unlock;
+ }
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ attach->sgt = sgt;
+ attach->dir = DMA_BIDIRECTIONAL;
+ }
return attach;
err_attach:
kfree(attach);
- mutex_unlock(&dmabuf->lock);
return ERR_PTR(ret);
+
+err_unlock:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
+
+/**
+ * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+ * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
+ * mapping.
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ return dma_buf_dynamic_attach(dmabuf, dev, false);
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
@@ -711,15 +759,22 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (WARN_ON(!dmabuf || !attach))
return;
- if (attach->sgt)
+ if (attach->sgt) {
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
- mutex_lock(&dmabuf->lock);
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ }
+
+ dma_resv_lock(dmabuf->resv, NULL);
list_del(&attach->node);
+ dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
- mutex_unlock(&dmabuf->lock);
kfree(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_detach);
@@ -749,6 +804,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt) {
/*
* Two mappings with different directions for the same
@@ -761,6 +819,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@@ -793,9 +854,15 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt == sg_table)
return;
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
@@ -1171,13 +1238,10 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
"size", "flags", "mode", "count", "ino");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
- ret = mutex_lock_interruptible(&buf_obj->lock);
- if (ret) {
- seq_puts(s,
- "\tERROR locking buffer object: skipping\n");
- continue;
- }
+ ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
+ if (ret)
+ goto error_unlock;
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
@@ -1223,19 +1287,23 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
attach_count++;
}
+ dma_resv_unlock(buf_obj->resv);
seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
size += buf_obj->size;
- mutex_unlock(&buf_obj->lock);
}
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
mutex_unlock(&db_list.lock);
return 0;
+
+error_unlock:
+ mutex_unlock(&db_list.lock);
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 2c136aee3e79..052a41e2451c 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -273,6 +273,30 @@ void dma_fence_free(struct dma_fence *fence)
}
EXPORT_SYMBOL(dma_fence_free);
+static bool __dma_fence_enable_signaling(struct dma_fence *fence)
+{
+ bool was_set;
+
+ lockdep_assert_held(fence->lock);
+
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return false;
+
+ if (!was_set && fence->ops->enable_signaling) {
+ trace_dma_fence_enable_signal(fence);
+
+ if (!fence->ops->enable_signaling(fence)) {
+ dma_fence_signal_locked(fence);
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: the fence to enable
@@ -285,19 +309,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
- fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- spin_lock_irqsave(fence->lock, flags);
-
- if (!fence->ops->enable_signaling(fence))
- dma_fence_signal_locked(fence);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return;
- spin_unlock_irqrestore(fence->lock, flags);
- }
+ spin_lock_irqsave(fence->lock, flags);
+ __dma_fence_enable_signaling(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
}
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@@ -331,7 +348,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
{
unsigned long flags;
int ret = 0;
- bool was_set;
if (WARN_ON(!fence || !func))
return -EINVAL;
@@ -343,25 +359,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- ret = -ENOENT;
- else if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- ret = -ENOENT;
- }
- }
-
- if (!ret) {
+ if (__dma_fence_enable_signaling(fence)) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
- } else
+ } else {
INIT_LIST_HEAD(&cb->node);
+ ret = -ENOENT;
+ }
+
spin_unlock_irqrestore(fence->lock, flags);
return ret;
@@ -461,7 +466,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
struct default_wait_cb cb;
unsigned long flags;
signed long ret = timeout ? timeout : 1;
- bool was_set;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return ret;
@@ -473,21 +477,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
goto out;
}
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!__dma_fence_enable_signaling(fence))
goto out;
- if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- goto out;
- }
- }
-
if (!timeout) {
ret = 0;
goto out;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 6713cfb1995c..348b3a9170fa 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -408,5 +408,5 @@ const struct file_operations sw_sync_debugfs_fops = {
.open = sw_sync_debugfs_open,
.release = sw_sync_debugfs_release,
.unlocked_ioctl = sw_sync_ioctl,
- .compat_ioctl = sw_sync_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 25c5c071645b..76fb072c22dc 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -480,5 +480,5 @@ static const struct file_operations sync_file_fops = {
.release = sync_file_release,
.poll = sync_file_poll,
.unlocked_ioctl = sync_file_ioctl,
- .compat_ioctl = sync_file_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 7af874b69ffb..6fa1eba9d477 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -15,19 +15,19 @@ menuconfig DMADEVICES
be empty in some cases.
config DMADEVICES_DEBUG
- bool "DMA Engine debugging"
- depends on DMADEVICES != n
- help
- This is an option for use by developers; most people should
- say N here. This enables DMA engine core and driver debugging.
+ bool "DMA Engine debugging"
+ depends on DMADEVICES != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables DMA engine core and driver debugging.
config DMADEVICES_VDEBUG
- bool "DMA Engine verbose debugging"
- depends on DMADEVICES_DEBUG != n
- help
- This is an option for use by developers; most people should
- say N here. This enables deeper (more verbose) debugging of
- the DMA engine core and drivers.
+ bool "DMA Engine verbose debugging"
+ depends on DMADEVICES_DEBUG != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables deeper (more verbose) debugging of
+ the DMA engine core and drivers.
if DMADEVICES
@@ -215,28 +215,28 @@ config FSL_EDMA
This module can be found on Freescale Vybrid and LS-1 SoCs.
config FSL_QDMA
- tristate "NXP Layerscape qDMA engine support"
- depends on ARM || ARM64
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- select DMA_ENGINE_RAID
- select ASYNC_TX_ENABLE_CHANNEL_SWITCH
- help
- Support the NXP Layerscape qDMA engine with command queue and legacy mode.
- Channel virtualization is supported through enqueuing of DMA jobs to,
- or dequeuing DMA jobs from, different work queues.
- This module can be found on NXP Layerscape SoCs.
+ tristate "NXP Layerscape qDMA engine support"
+ depends on ARM || ARM64
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ help
+ Support the NXP Layerscape qDMA engine with command queue and legacy mode.
+ Channel virtualization is supported through enqueuing of DMA jobs to,
+ or dequeuing DMA jobs from, different work queues.
+ This module can be found on NXP Layerscape SoCs.
The qdma driver only work on SoCs with a DPAA hardware block.
config FSL_RAID
- tristate "Freescale RAID engine Support"
- depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
- select DMA_ENGINE
- select DMA_ENGINE_RAID
- ---help---
- Enable support for Freescale RAID Engine. RAID Engine is
- available on some QorIQ SoCs (like P5020/P5040). It has
- the capability to offload memcpy, xor and pq computation
+ tristate "Freescale RAID engine Support"
+ depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ ---help---
+ Enable support for Freescale RAID Engine. RAID Engine is
+ available on some QorIQ SoCs (like P5020/P5040). It has
+ the capability to offload memcpy, xor and pq computation
for raid5/6.
config IMG_MDC_DMA
@@ -342,6 +342,26 @@ config MCF_EDMA
minimal intervention from a host processor.
This module can be found on Freescale ColdFire mcf5441x SoCs.
+config MILBEAUT_HDMAC
+ tristate "Milbeaut AHB DMA support"
+ depends on ARCH_MILBEAUT || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Say yes here to support the Socionext Milbeaut
+ HDMAC device.
+
+config MILBEAUT_XDMAC
+ tristate "Milbeaut AXI DMA support"
+ depends on ARCH_MILBEAUT || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Say yes here to support the Socionext Milbeaut
+ XDMAC device.
+
config MMP_PDMA
bool "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
@@ -635,6 +655,10 @@ config XILINX_DMA
destination address.
AXI DMA engine provides high-bandwidth one dimensional direct
memory access between memory and AXI4-Stream target peripherals.
+ AXI MCDMA engine provides high-bandwidth direct memory access
+ between memory and AXI4-Stream target peripherals. It provides
+ the scatter gather interface with multiple channels independent
+ configuration support.
config XILINX_ZYNQMP_DMA
tristate "Xilinx ZynqMP DMA Engine"
@@ -665,10 +689,14 @@ source "drivers/dma/dw-edma/Kconfig"
source "drivers/dma/hsu/Kconfig"
+source "drivers/dma/sf-pdma/Kconfig"
+
source "drivers/dma/sh/Kconfig"
source "drivers/dma/ti/Kconfig"
+source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
+
# clients
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index f5ce8665e944..42d7e2fc64fa 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -45,6 +45,8 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
+obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
@@ -60,6 +62,7 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_SF_PDMA) += sf-pdma/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_STM32_DMA) += stm32-dma.o
@@ -75,6 +78,7 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
+obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
obj-y += mediatek/
obj-y += qcom/
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b58ac720d9a1..f71c9f77d405 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1957,21 +1957,16 @@ static int atmel_xdmac_resume(struct device *dev)
static int at_xdmac_probe(struct platform_device *pdev)
{
- struct resource *res;
struct at_xdmac *atxdmac;
int irq, size, nr_channels, i, ret;
void __iomem *base;
u32 reg;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index cafb1cc065bb..fa626acdc9b9 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -858,13 +858,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzdma->soc_data = soc_data;
platform_set_drvdata(pdev, jzdma);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O memory\n");
- return -EINVAL;
- }
-
- jzdma->chn_base = devm_ioremap_resource(dev, res);
+ jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jzdma->chn_base))
return PTR_ERR(jzdma->chn_base);
@@ -987,6 +981,7 @@ static int jz4780_dma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
+ clk_disable_unprepare(jzdma->clk);
free_irq(jzdma->irq, jzdma);
for (i = 0; i < jzdma->soc_data->nb_channels; i++)
@@ -1019,11 +1014,18 @@ static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
.flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
};
+static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
+ .nb_channels = 8,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
+ { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index c90c798e5ec3..0585d749d935 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -66,7 +66,7 @@ static int dw_probe(struct platform_device *pdev)
data->chip = chip;
- chip->clk = devm_clk_get(chip->dev, "hclk");
+ chip->clk = devm_clk_get_optional(chip->dev, "hclk");
if (IS_ERR(chip->clk))
return PTR_ERR(chip->clk);
err = clk_prepare_enable(chip->clk);
diff --git a/drivers/dma/fsl-dpaa2-qdma/Kconfig b/drivers/dma/fsl-dpaa2-qdma/Kconfig
new file mode 100644
index 000000000000..258ed6be934d
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/Kconfig
@@ -0,0 +1,9 @@
+menuconfig FSL_DPAA2_QDMA
+ tristate "NXP DPAA2 QDMA"
+ depends on ARM64
+ depends on FSL_MC_BUS && FSL_MC_DPIO
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ NXP Data Path Acceleration Architecture 2 QDMA driver,
+ using the NXP MC bus driver.
diff --git a/drivers/dma/fsl-dpaa2-qdma/Makefile b/drivers/dma/fsl-dpaa2-qdma/Makefile
new file mode 100644
index 000000000000..c1d0226f2bd7
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the NXP DPAA2 qDMA controllers
+obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma.o dpdmai.o
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
new file mode 100644
index 000000000000..c70a7965f140
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2019 NXP
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dmapool.h>
+#include <linux/of_irq.h>
+#include <linux/iommu.h>
+#include <linux/sys_soc.h>
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "../virt-dma.h"
+#include "dpdmai.h"
+#include "dpaa2-qdma.h"
+
+static bool smmu_disable = true;
+
+static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
+}
+
+static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct dpaa2_qdma_comp, vdesc);
+}
+
+static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
+ struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
+
+ dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
+ sizeof(struct dpaa2_fd),
+ sizeof(struct dpaa2_fd), 0);
+ if (!dpaa2_chan->fd_pool)
+ goto err;
+
+ dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
+ sizeof(struct dpaa2_fl_entry),
+ sizeof(struct dpaa2_fl_entry), 0);
+ if (!dpaa2_chan->fl_pool)
+ goto err_fd;
+
+ dpaa2_chan->sdd_pool =
+ dma_pool_create("sdd_pool", dev,
+ sizeof(struct dpaa2_qdma_sd_d),
+ sizeof(struct dpaa2_qdma_sd_d), 0);
+ if (!dpaa2_chan->sdd_pool)
+ goto err_fl;
+
+ return dpaa2_qdma->desc_allocated++;
+err_fl:
+ dma_pool_destroy(dpaa2_chan->fl_pool);
+err_fd:
+ dma_pool_destroy(dpaa2_chan->fd_pool);
+err:
+ return -ENOMEM;
+}
+
+static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
+ unsigned long flags;
+
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
+ vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
+ spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
+
+ dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
+ dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
+
+ dma_pool_destroy(dpaa2_chan->fd_pool);
+ dma_pool_destroy(dpaa2_chan->fl_pool);
+ dma_pool_destroy(dpaa2_chan->sdd_pool);
+ dpaa2_qdma->desc_allocated--;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct dpaa2_qdma_comp *
+dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
+{
+ struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
+ struct device *dev = &qdma_priv->dpdmai_dev->dev;
+ struct dpaa2_qdma_comp *comp_temp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
+ if (list_empty(&dpaa2_chan->comp_free)) {
+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
+ if (!comp_temp)
+ goto err;
+ comp_temp->fd_virt_addr =
+ dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
+ &comp_temp->fd_bus_addr);
+ if (!comp_temp->fd_virt_addr)
+ goto err_comp;
+
+ comp_temp->fl_virt_addr =
+ dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
+ &comp_temp->fl_bus_addr);
+ if (!comp_temp->fl_virt_addr)
+ goto err_fd_virt;
+
+ comp_temp->desc_virt_addr =
+ dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
+ &comp_temp->desc_bus_addr);
+ if (!comp_temp->desc_virt_addr)
+ goto err_fl_virt;
+
+ comp_temp->qchan = dpaa2_chan;
+ return comp_temp;
+ }
+
+ comp_temp = list_first_entry(&dpaa2_chan->comp_free,
+ struct dpaa2_qdma_comp, list);
+ list_del(&comp_temp->list);
+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+
+ comp_temp->qchan = dpaa2_chan;
+
+ return comp_temp;
+
+err_fl_virt:
+ dma_pool_free(dpaa2_chan->fl_pool,
+ comp_temp->fl_virt_addr,
+ comp_temp->fl_bus_addr);
+err_fd_virt:
+ dma_pool_free(dpaa2_chan->fd_pool,
+ comp_temp->fd_virt_addr,
+ comp_temp->fd_bus_addr);
+err_comp:
+ kfree(comp_temp);
+err:
+ dev_err(dev, "Failed to request descriptor\n");
+ return NULL;
+}
+
+static void
+dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
+{
+ struct dpaa2_fd *fd;
+
+ fd = dpaa2_comp->fd_virt_addr;
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+
+ /* fd populated */
+ dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
+
+ /*
+ * Bypass memory translation, Frame list format, short length disable
+ * we need to disable BMT if fsl-mc use iova addr
+ */
+ if (smmu_disable)
+ dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
+ dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
+
+ dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
+}
+
+/* first frame list for descriptor buffer */
+static void
+dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
+ struct dpaa2_qdma_comp *dpaa2_comp,
+ bool wrt_changed)
+{
+ struct dpaa2_qdma_sd_d *sdd;
+
+ sdd = dpaa2_comp->desc_virt_addr;
+ memset(sdd, 0, 2 * (sizeof(*sdd)));
+
+ /* source descriptor CMD */
+ sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
+ sdd++;
+
+ /* dest descriptor CMD */
+ if (wrt_changed)
+ sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
+ else
+ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
+
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+ /* first frame list to source descriptor */
+ dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
+ dpaa2_fl_set_len(f_list, 0x20);
+ dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
+
+ /* bypass memory translation */
+ if (smmu_disable)
+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+}
+
+/* source and destination frame list */
+static void
+dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
+ dma_addr_t dst, dma_addr_t src,
+ size_t len, uint8_t fmt)
+{
+ /* source frame list to source buffer */
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+ dpaa2_fl_set_addr(f_list, src);
+ dpaa2_fl_set_len(f_list, len);
+
+ /* single buffer frame or scatter gather frame */
+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
+
+ /* bypass memory translation */
+ if (smmu_disable)
+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+
+ f_list++;
+
+ /* destination frame list to destination buffer */
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+ dpaa2_fl_set_addr(f_list, dst);
+ dpaa2_fl_set_len(f_list, len);
+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
+ /* single buffer frame or scatter gather frame */
+ dpaa2_fl_set_final(f_list, QDMA_FL_F);
+ /* bypass memory translation */
+ if (smmu_disable)
+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+}
+
+static struct dma_async_tx_descriptor
+*dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, ulong flags)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct dpaa2_fl_entry *f_list;
+ bool wrt_changed;
+
+ dpaa2_qdma = dpaa2_chan->qdma;
+ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
+ if (!dpaa2_comp)
+ return NULL;
+
+ wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
+
+ /* populate Frame descriptor */
+ dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
+
+ f_list = dpaa2_comp->fl_virt_addr;
+
+ /* first frame list for descriptor buffer (logn format) */
+ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
+
+ f_list++;
+
+ dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
+
+ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
+}
+
+static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct virt_dma_desc *vdesc;
+ struct dpaa2_fd *fd;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
+ spin_lock(&dpaa2_chan->vchan.lock);
+ if (vchan_issue_pending(&dpaa2_chan->vchan)) {
+ vdesc = vchan_next_desc(&dpaa2_chan->vchan);
+ if (!vdesc)
+ goto err_enqueue;
+ dpaa2_comp = to_fsl_qdma_comp(vdesc);
+
+ fd = dpaa2_comp->fd_virt_addr;
+
+ list_del(&vdesc->node);
+ list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
+
+ err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
+ if (err) {
+ list_del(&dpaa2_comp->list);
+ list_add_tail(&dpaa2_comp->list,
+ &dpaa2_chan->comp_free);
+ }
+ }
+err_enqueue:
+ spin_unlock(&dpaa2_chan->vchan.lock);
+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+}
+
+static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_qdma_priv *priv;
+ u8 prio_def = DPDMAI_PRIO_NUM;
+ int err = -EINVAL;
+ int i;
+
+ priv = dev_get_drvdata(dev);
+
+ priv->dev = dev;
+ priv->dpqdma_id = ls_dev->obj_desc.id;
+
+ /* Get the handle for the DPDMAI this interface is associate with */
+ err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpdmai_open() failed\n");
+ return err;
+ }
+
+ dev_dbg(dev, "Opened dpdmai object successfully\n");
+
+ err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->dpdmai_attr);
+ if (err) {
+ dev_err(dev, "dpdmai_get_attributes() failed\n");
+ goto exit;
+ }
+
+ if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+ dev_err(dev, "DPDMAI major version mismatch\n"
+ "Found %u.%u, supported version is %u.%u\n",
+ priv->dpdmai_attr.version.major,
+ priv->dpdmai_attr.version.minor,
+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
+ goto exit;
+ }
+
+ if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+ dev_err(dev, "DPDMAI minor version mismatch\n"
+ "Found %u.%u, supported version is %u.%u\n",
+ priv->dpdmai_attr.version.major,
+ priv->dpdmai_attr.version.minor,
+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
+ goto exit;
+ }
+
+ priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
+ ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
+ if (!ppriv) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ priv->ppriv = ppriv;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+ i, &priv->rx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpdmai_get_rx_queue() failed\n");
+ goto exit;
+ }
+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
+
+ err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+ i, &priv->tx_fqid[i]);
+ if (err) {
+ dev_err(dev, "dpdmai_get_tx_queue() failed\n");
+ goto exit;
+ }
+ ppriv->req_fqid = priv->tx_fqid[i];
+ ppriv->prio = i;
+ ppriv->priv = priv;
+ ppriv++;
+ }
+
+ return 0;
+exit:
+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+ return err;
+}
+
+static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
+ struct dpaa2_qdma_priv_per_prio, nctx);
+ struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
+ struct dpaa2_qdma_priv *priv = ppriv->priv;
+ u32 n_chans = priv->dpaa2_qdma->n_chans;
+ struct dpaa2_qdma_chan *qchan;
+ const struct dpaa2_fd *fd_eq;
+ const struct dpaa2_fd *fd;
+ struct dpaa2_dq *dq;
+ int is_last = 0;
+ int found;
+ u8 status;
+ int err;
+ int i;
+
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err);
+
+ while (!is_last) {
+ do {
+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
+ } while (!is_last && !dq);
+ if (!dq) {
+ dev_err(priv->dev, "FQID returned no valid frames!\n");
+ continue;
+ }
+
+ /* obtain FD and process the error */
+ fd = dpaa2_dq_fd(dq);
+
+ status = dpaa2_fd_get_ctrl(fd) & 0xff;
+ if (status)
+ dev_err(priv->dev, "FD error occurred\n");
+ found = 0;
+ for (i = 0; i < n_chans; i++) {
+ qchan = &priv->dpaa2_qdma->chans[i];
+ spin_lock(&qchan->queue_lock);
+ if (list_empty(&qchan->comp_used)) {
+ spin_unlock(&qchan->queue_lock);
+ continue;
+ }
+ list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
+ &qchan->comp_used, list) {
+ fd_eq = dpaa2_comp->fd_virt_addr;
+
+ if (le64_to_cpu(fd_eq->simple.addr) ==
+ le64_to_cpu(fd->simple.addr)) {
+ spin_lock(&qchan->vchan.lock);
+ vchan_cookie_complete(&
+ dpaa2_comp->vdesc);
+ spin_unlock(&qchan->vchan.lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&qchan->queue_lock);
+ if (found)
+ break;
+ }
+ }
+
+ dpaa2_io_service_rearm(NULL, ctx);
+}
+
+static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+ struct device *dev = priv->dev;
+ int err = -EINVAL;
+ int i, num;
+
+ num = priv->num_pairs;
+ ppriv = priv->ppriv;
+ for (i = 0; i < num; i++) {
+ ppriv->nctx.is_cdan = 0;
+ ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
+ ppriv->nctx.id = ppriv->rsp_fqid;
+ ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
+ err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
+ if (err) {
+ dev_err(dev, "Notification register failed\n");
+ goto err_service;
+ }
+
+ ppriv->store =
+ dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
+ if (!ppriv->store) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
+ goto err_store;
+ }
+
+ ppriv++;
+ }
+ return 0;
+
+err_store:
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+err_service:
+ ppriv--;
+ while (ppriv >= priv->ppriv) {
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+ dpaa2_io_store_destroy(ppriv->store);
+ ppriv--;
+ }
+ return err;
+}
+
+static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ dpaa2_io_store_destroy(ppriv->store);
+ ppriv++;
+ }
+}
+
+static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+ struct device *dev = priv->dev;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+ ppriv++;
+ }
+}
+
+static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
+{
+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev;
+ int i, num;
+ int err;
+
+ ls_dev = to_fsl_mc_device(dev);
+ num = priv->num_pairs;
+ ppriv = priv->ppriv;
+ for (i = 0; i < num; i++) {
+ rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
+ DPDMAI_QUEUE_OPT_DEST;
+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+ rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+ rx_queue_cfg.dest_cfg.priority = ppriv->prio;
+ err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+ rx_queue_cfg.dest_cfg.priority,
+ &rx_queue_cfg);
+ if (err) {
+ dev_err(dev, "dpdmai_set_rx_queue() failed\n");
+ return err;
+ }
+
+ ppriv++;
+ }
+
+ return 0;
+}
+
+static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev;
+ int err = 0;
+ int i;
+
+ ls_dev = to_fsl_mc_device(dev);
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv->nctx.qman64 = 0;
+ ppriv->nctx.dpio_id = 0;
+ ppriv++;
+ }
+
+ err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err)
+ dev_err(dev, "dpdmai_reset() failed\n");
+
+ return err;
+}
+
+static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
+ struct list_head *head)
+{
+ struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
+ unsigned long flags;
+
+ list_for_each_entry_safe(comp_tmp, _comp_tmp,
+ head, list) {
+ spin_lock_irqsave(&qchan->queue_lock, flags);
+ list_del(&comp_tmp->list);
+ spin_unlock_irqrestore(&qchan->queue_lock, flags);
+ dma_pool_free(qchan->fd_pool,
+ comp_tmp->fd_virt_addr,
+ comp_tmp->fd_bus_addr);
+ dma_pool_free(qchan->fl_pool,
+ comp_tmp->fl_virt_addr,
+ comp_tmp->fl_bus_addr);
+ dma_pool_free(qchan->sdd_pool,
+ comp_tmp->desc_virt_addr,
+ comp_tmp->desc_bus_addr);
+ kfree(comp_tmp);
+ }
+}
+
+static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
+{
+ struct dpaa2_qdma_chan *qchan;
+ int num, i;
+
+ num = dpaa2_qdma->n_chans;
+ for (i = 0; i < num; i++) {
+ qchan = &dpaa2_qdma->chans[i];
+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
+ dma_pool_destroy(qchan->fd_pool);
+ dma_pool_destroy(qchan->fl_pool);
+ dma_pool_destroy(qchan->sdd_pool);
+ }
+}
+
+static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct dpaa2_qdma_chan *qchan;
+ unsigned long flags;
+
+ dpaa2_comp = to_fsl_qdma_comp(vdesc);
+ qchan = dpaa2_comp->qchan;
+ spin_lock_irqsave(&qchan->queue_lock, flags);
+ list_del(&dpaa2_comp->list);
+ list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
+ spin_unlock_irqrestore(&qchan->queue_lock, flags);
+}
+
+static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
+{
+ struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
+ struct dpaa2_qdma_chan *dpaa2_chan;
+ int num = priv->num_pairs;
+ int i;
+
+ INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
+ for (i = 0; i < dpaa2_qdma->n_chans; i++) {
+ dpaa2_chan = &dpaa2_qdma->chans[i];
+ dpaa2_chan->qdma = dpaa2_qdma;
+ dpaa2_chan->fqid = priv->tx_fqid[i % num];
+ dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
+ vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
+ spin_lock_init(&dpaa2_chan->queue_lock);
+ INIT_LIST_HEAD(&dpaa2_chan->comp_used);
+ INIT_LIST_HEAD(&dpaa2_chan->comp_free);
+ }
+ return 0;
+}
+
+static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
+{
+ struct device *dev = &dpdmai_dev->dev;
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_priv *priv;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(dev, priv);
+ priv->dpdmai_dev = dpdmai_dev;
+
+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+ if (priv->iommu_domain)
+ smmu_disable = false;
+
+ /* obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+ goto err_mcportal;
+ }
+
+ /* DPDMAI initialization */
+ err = dpaa2_qdma_setup(dpdmai_dev);
+ if (err) {
+ dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
+ goto err_dpdmai_setup;
+ }
+
+ /* DPIO */
+ err = dpaa2_qdma_dpio_setup(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ /* DPDMAI binding to DPIO */
+ err = dpaa2_dpdmai_bind(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
+ goto err_bind;
+ }
+
+ /* DPDMAI enable */
+ err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpdmai_enable() faile\n");
+ goto err_enable;
+ }
+
+ dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
+ if (!dpaa2_qdma) {
+ err = -ENOMEM;
+ goto err_eng;
+ }
+
+ priv->dpaa2_qdma = dpaa2_qdma;
+ dpaa2_qdma->priv = priv;
+
+ dpaa2_qdma->desc_allocated = 0;
+ dpaa2_qdma->n_chans = NUM_CH;
+
+ dpaa2_dpdmai_init_channels(dpaa2_qdma);
+
+ if (soc_device_match(soc_fixup_tuning))
+ dpaa2_qdma->qdma_wrtype_fixup = true;
+ else
+ dpaa2_qdma->qdma_wrtype_fixup = false;
+
+ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
+
+ dpaa2_qdma->dma_dev.dev = dev;
+ dpaa2_qdma->dma_dev.device_alloc_chan_resources =
+ dpaa2_qdma_alloc_chan_resources;
+ dpaa2_qdma->dma_dev.device_free_chan_resources =
+ dpaa2_qdma_free_chan_resources;
+ dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
+ dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
+ dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
+
+ err = dma_async_device_register(&dpaa2_qdma->dma_dev);
+ if (err) {
+ dev_err(dev, "Can't register NXP QDMA engine.\n");
+ goto err_dpaa2_qdma;
+ }
+
+ return 0;
+
+err_dpaa2_qdma:
+ kfree(dpaa2_qdma);
+err_eng:
+ dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
+err_enable:
+ dpaa2_dpdmai_dpio_unbind(priv);
+err_bind:
+ dpaa2_dpmai_store_free(priv);
+ dpaa2_dpdmai_dpio_free(priv);
+err_dpio_setup:
+ kfree(priv->ppriv);
+ dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
+err_dpdmai_setup:
+ fsl_mc_portal_free(priv->mc_io);
+err_mcportal:
+ kfree(priv);
+ dev_set_drvdata(dev, NULL);
+ return err;
+}
+
+static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
+{
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_priv *priv;
+ struct device *dev;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+ dpaa2_qdma = priv->dpaa2_qdma;
+
+ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ dpaa2_dpdmai_dpio_unbind(priv);
+ dpaa2_dpmai_store_free(priv);
+ dpaa2_dpdmai_dpio_free(priv);
+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+ fsl_mc_portal_free(priv->mc_io);
+ dev_set_drvdata(dev, NULL);
+ dpaa2_dpdmai_free_channels(dpaa2_qdma);
+
+ dma_async_device_unregister(&dpaa2_qdma->dma_dev);
+ kfree(priv);
+ kfree(dpaa2_qdma);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpdmai",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_qdma_driver = {
+ .driver = {
+ .name = "dpaa2-qdma",
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_qdma_probe,
+ .remove = dpaa2_qdma_remove,
+ .match_id_table = dpaa2_qdma_id_table
+};
+
+static int __init dpaa2_qdma_driver_init(void)
+{
+ return fsl_mc_driver_register(&(dpaa2_qdma_driver));
+}
+late_initcall(dpaa2_qdma_driver_init);
+
+static void __exit fsl_qdma_exit(void)
+{
+ fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
+}
+module_exit(fsl_qdma_exit);
+
+MODULE_ALIAS("platform:fsl-dpaa2-qdma");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
new file mode 100644
index 000000000000..7d571849c569
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP */
+
+#ifndef __DPAA2_QDMA_H
+#define __DPAA2_QDMA_H
+
+#define DPAA2_QDMA_STORE_SIZE 16
+#define NUM_CH 8
+
+struct dpaa2_qdma_sd_d {
+ u32 rsv:32;
+ union {
+ struct {
+ u32 ssd:12; /* souce stride distance */
+ u32 sss:12; /* souce stride size */
+ u32 rsv1:8;
+ } sdf;
+ struct {
+ u32 dsd:12; /* Destination stride distance */
+ u32 dss:12; /* Destination stride size */
+ u32 rsv2:8;
+ } ddf;
+ } df;
+ u32 rbpcmd; /* Route-by-port command */
+ u32 cmd;
+} __attribute__((__packed__));
+
+/* Source descriptor command read transaction type for RBP=0: */
+/* coherent copy of cacheable memory */
+#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
+/* Destination descriptor command write transaction type for RBP=0: */
+/* coherent copy of cacheable memory */
+#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
+#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
+
+#define QMAN_FD_FMT_ENABLE BIT(0) /* frame list table enable */
+#define QMAN_FD_BMT_ENABLE BIT(15) /* bypass memory translation */
+#define QMAN_FD_BMT_DISABLE (0) /* bypass memory translation */
+#define QMAN_FD_SL_DISABLE (0) /* short lengthe disabled */
+#define QMAN_FD_SL_ENABLE BIT(14) /* short lengthe enabled */
+
+#define QDMA_FINAL_BIT_DISABLE (0) /* final bit disable */
+#define QDMA_FINAL_BIT_ENABLE BIT(31) /* final bit enable */
+
+#define QDMA_FD_SHORT_FORMAT BIT(11) /* short format */
+#define QDMA_FD_LONG_FORMAT (0) /* long format */
+#define QDMA_SER_DISABLE (8) /* no notification */
+#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */
+#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
+#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
+#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */
+
+#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */
+#define QMAN_FD_VA_DISABLE (0)/* Address used is a real address */
+/* Flow Context: 49bit physical address */
+#define QMAN_FD_CBMT_ENABLE BIT(15)
+#define QMAN_FD_CBMT_DISABLE (0) /* Flow Context: 64bit virtual address */
+#define QMAN_FD_SC_DISABLE (0) /* stashing control */
+
+#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
+#define QDMA_FL_FMT_SGE (0x2) /* Scatter gather frame */
+#define QDMA_FL_BMT_ENABLE BIT(15) /* enable bypass memory translation */
+#define QDMA_FL_BMT_DISABLE (0x0) /* enable bypass memory translation */
+#define QDMA_FL_SL_LONG (0x0)/* long length */
+#define QDMA_FL_SL_SHORT (0x1) /* short length */
+#define QDMA_FL_F (0x1)/* last frame list bit */
+
+/*Description of Frame list table structure*/
+struct dpaa2_qdma_chan {
+ struct dpaa2_qdma_engine *qdma;
+ struct virt_dma_chan vchan;
+ struct virt_dma_desc vdesc;
+ enum dma_status status;
+ u32 fqid;
+
+ /* spinlock used by dpaa2 qdma driver */
+ spinlock_t queue_lock;
+ struct dma_pool *fd_pool;
+ struct dma_pool *fl_pool;
+ struct dma_pool *sdd_pool;
+
+ struct list_head comp_used;
+ struct list_head comp_free;
+
+};
+
+struct dpaa2_qdma_comp {
+ dma_addr_t fd_bus_addr;
+ dma_addr_t fl_bus_addr;
+ dma_addr_t desc_bus_addr;
+ struct dpaa2_fd *fd_virt_addr;
+ struct dpaa2_fl_entry *fl_virt_addr;
+ struct dpaa2_qdma_sd_d *desc_virt_addr;
+ struct dpaa2_qdma_chan *qchan;
+ struct virt_dma_desc vdesc;
+ struct list_head list;
+};
+
+struct dpaa2_qdma_engine {
+ struct dma_device dma_dev;
+ u32 n_chans;
+ struct dpaa2_qdma_chan chans[NUM_CH];
+ int qdma_wrtype_fixup;
+ int desc_allocated;
+
+ struct dpaa2_qdma_priv *priv;
+};
+
+/*
+ * dpaa2_qdma_priv - driver private data
+ */
+struct dpaa2_qdma_priv {
+ int dpqdma_id;
+
+ struct iommu_domain *iommu_domain;
+ struct dpdmai_attr dpdmai_attr;
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ struct fsl_mc_device *dpdmai_dev;
+ u8 num_pairs;
+
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+
+ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
+ u32 tx_fqid[DPDMAI_PRIO_NUM];
+};
+
+struct dpaa2_qdma_priv_per_prio {
+ int req_fqid;
+ int rsp_fqid;
+ int prio;
+
+ struct dpaa2_io_store *store;
+ struct dpaa2_io_notification_ctx nctx;
+
+ struct dpaa2_qdma_priv *priv;
+};
+
+static struct soc_device_attribute soc_fixup_tuning[] = {
+ { .family = "QorIQ LX2160A"},
+ { },
+};
+
+/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
+#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
+ sizeof(struct dpaa2_fl_entry) * 3 + \
+ sizeof(struct dpaa2_qdma_sd_d) * 2)
+
+static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma);
+static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
+ struct list_head *head);
+#endif /* __DPAA2_QDMA_H */
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
new file mode 100644
index 000000000000..f8d22115154a
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2019 NXP
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/fsl/mc.h>
+#include "dpdmai.h"
+
+struct dpdmai_rsp_get_attributes {
+ __le32 id;
+ u8 num_of_priorities;
+ u8 pad0[3];
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpdmai_cmd_queue {
+ __le32 dest_id;
+ u8 priority;
+ u8 queue;
+ u8 dest_type;
+ u8 pad;
+ __le64 user_ctx;
+ union {
+ __le32 options;
+ __le32 fqid;
+ };
+};
+
+struct dpdmai_rsp_get_tx_queue {
+ __le64 pad;
+ __le32 fqid;
+};
+
+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPDMAI_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\
+ MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\
+} while (0)
+
+static inline u64 mc_enc(int lsoffset, int width, u64 val)
+{
+ return (val & MAKE_UMASK64(width)) << lsoffset;
+}
+
+/**
+ * dpdmai_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: DPDMAI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpdmai_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ int dpdmai_id, u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ __le64 *cmd_dpdmai_id;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
+ cmd_flags, 0);
+
+ cmd_dpdmai_id = cmd.params;
+ *cmd_dpdmai_id = cpu_to_le32(dpdmai_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_open);
+
+/**
+ * dpdmai_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_close);
+
+/**
+ * dpdmai_create() - Create the DPDMAI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @token: Returned token; use in subsequent API calls
+ *
+ * Create the DPDMAI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent calls to
+ * this specific object. For objects that are created using the
+ * DPL file, call dpdmai_open() function to get an authentication
+ * token first.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ const struct dpdmai_cfg *cfg, u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
+ cmd_flags, 0);
+ DPDMAI_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_enable);
+
+/**
+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_disable);
+
+/**
+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_reset);
+
+/**
+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpdmai_attr *attr)
+{
+ struct dpdmai_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->version.major = le16_to_cpu(rsp_params->major);
+ attr->version.minor = le16_to_cpu(rsp_params->minor);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_attributes);
+
+/**
+ * dpdmai_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, const struct dpdmai_rx_queue_cfg *cfg)
+{
+ struct dpdmai_cmd_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
+ cmd_flags, token);
+
+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ cmd_params->queue = priority;
+ cmd_params->dest_type = cfg->dest_cfg.dest_type;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue);
+
+/**
+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, struct dpdmai_rx_queue_attr *attr)
+{
+ struct dpdmai_cmd_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
+ cmd_flags, token);
+
+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+ cmd_params->queue = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+ attr->dest_cfg.priority = cmd_params->priority;
+ attr->dest_cfg.dest_type = cmd_params->dest_type;
+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+ attr->fqid = le32_to_cpu(cmd_params->fqid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue);
+
+/**
+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @fqid: Returned Tx queue
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, u8 priority, u32 *fqid)
+{
+ struct dpdmai_rsp_get_tx_queue *rsp_params;
+ struct dpdmai_cmd_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
+ cmd_flags, token);
+
+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+ cmd_params->queue = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+
+ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
+ *fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_tx_queue);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
new file mode 100644
index 000000000000..6d785093da8e
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP */
+
+#ifndef __FSL_DPDMAI_H
+#define __FSL_DPDMAI_H
+
+/* DPDMAI Version */
+#define DPDMAI_VER_MAJOR 2
+#define DPDMAI_VER_MINOR 2
+
+#define DPDMAI_CMD_BASE_VERSION 0
+#define DPDMAI_CMD_ID_OFFSET 4
+
+#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \
+ DPDMAI_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
+#define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E)
+#define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E)
+
+#define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002)
+#define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003)
+#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMDID_FORMAT(0x004)
+#define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005)
+#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006)
+
+#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010)
+#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011)
+#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012)
+#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013)
+#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014)
+#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015)
+#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016)
+#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017)
+
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2)
+
+#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
+#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
+
+#define MAKE_UMASK64(_width) \
+ ((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : (u64)-1))
+
+/* Data Path DMA Interface API
+ * Contains initialization APIs and runtime control APIs for DPDMAI
+ */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPDMAI object
+ */
+#define DPDMAI_PRIO_NUM 2
+
+/* DPDMAI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPDMAI_QUEUE_OPT_USER_CTX 0x1
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPDMAI_QUEUE_OPT_DEST 0x2
+
+/**
+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
+ * configured with values 1-8; the entry following last valid entry
+ * should be configured with 0
+ */
+struct dpdmai_cfg {
+ u8 priorities[DPDMAI_PRIO_NUM];
+};
+
+/**
+ * struct dpdmai_attr - Structure representing DPDMAI attributes
+ * @id: DPDMAI object ID
+ * @version: DPDMAI version
+ * @num_of_priorities: number of priorities
+ */
+struct dpdmai_attr {
+ int id;
+ /**
+ * struct version - DPDMAI version
+ * @major: DPDMAI major version
+ * @minor: DPDMAI minor version
+ */
+ struct {
+ u16 major;
+ u16 minor;
+ } version;
+ u8 num_of_priorities;
+};
+
+/**
+ * enum dpdmai_dest - DPDMAI destination types
+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpdmai_dest {
+ DPDMAI_DEST_NONE = 0,
+ DPDMAI_DEST_DPIO = 1,
+ DPDMAI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPDMAI_DEST_NONE' option
+ */
+struct dpdmai_dest_cfg {
+ enum dpdmai_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/**
+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpdmai_rx_queue_cfg {
+ struct dpdmai_dest_cfg dest_cfg;
+ u32 options;
+ u64 user_ctx;
+
+};
+
+/**
+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpdmai_rx_queue_attr {
+ struct dpdmai_dest_cfg dest_cfg;
+ u64 user_ctx;
+ u32 fqid;
+};
+
+int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ int dpdmai_id, u16 *token);
+int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ const struct dpdmai_cfg *cfg, u16 *token);
+int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpdmai_attr *attr);
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, struct dpdmai_rx_queue_attr *attr);
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, u8 priority, u32 *fqid);
+
+#endif /* __FSL_DPDMAI_H */
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 06664fbd2d91..89792083d62c 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1155,6 +1155,9 @@ static int fsl_qdma_probe(struct platform_device *pdev)
return ret;
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
+ if (fsl_qdma->irq_base < 0)
+ return fsl_qdma->irq_base;
+
fsl_qdma->feature = of_property_read_bool(np, "big-endian");
INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index a3f942a6a946..db0e274126fb 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -173,7 +173,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
&iop_chan->chain, chain_node) {
zero_sum_result |=
iop_desc_get_zero_result(grp_iter);
- pr_debug("\titer%d result: %d\n",
+ pr_debug("\titer%d result: %d\n",
grp_iter->idx, zero_sum_result);
slot_cnt -= slots_per_op;
if (slot_cnt == 0)
@@ -1359,9 +1359,11 @@ static int iop_adma_probe(struct platform_device *pdev)
iop_adma_device_clear_err_status(iop_chan);
for (i = 0; i < 3; i++) {
- irq_handler_t handler[] = { iop_adma_eot_handler,
- iop_adma_eoc_handler,
- iop_adma_err_handler };
+ static const irq_handler_t handler[] = {
+ iop_adma_eot_handler,
+ iop_adma_eoc_handler,
+ iop_adma_err_handler
+ };
int irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = -ENXIO;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 4b36c8810517..adecea51814f 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -835,13 +835,8 @@ static int k3_dma_probe(struct platform_device *op)
const struct k3dma_soc_data *soc_data;
struct k3_dma_dev *d;
const struct of_device_id *of_id;
- struct resource *iores;
int i, ret, irq = 0;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
@@ -850,7 +845,7 @@ static int k3_dma_probe(struct platform_device *op)
if (!soc_data)
return -EINVAL;
- d->base = devm_ioremap_resource(&op->dev, iores);
+ d->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(d->base))
return PTR_ERR(d->base);
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 723b11c190b3..6bf838e63be1 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -819,15 +819,7 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&cqdma->pc[i]->queue);
spin_lock_init(&cqdma->pc[i]->lock);
refcount_set(&cqdma->pc[i]->refcnt, 0);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_err(&pdev->dev, "No mem resource for %s\n",
- dev_name(&pdev->dev));
- return -EINVAL;
- }
-
- cqdma->pc[i]->base = devm_ioremap_resource(&pdev->dev, res);
+ cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(cqdma->pc[i]->base))
return PTR_ERR(cqdma->pc[i]->base);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 1a2028e1c29e..4c58da742143 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"request_irq failed with err %d\n", err);
- goto err_unregister;
+ goto err_free;
}
platform_set_drvdata(pdev, hsdma);
@@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
+err_free:
+ of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index f40051d6aecb..c20e6bd4e298 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -475,7 +475,6 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mtk_uart_apdmadev *mtkd;
int bit_mask = 32, rc;
- struct resource *res;
struct mtk_chan *c;
unsigned int i;
@@ -532,13 +531,7 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
goto err_no_dma;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- rc = -ENODEV;
- goto err_no_dma;
- }
-
- c->base = devm_ioremap_resource(&pdev->dev, res);
+ c->base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(c->base)) {
rc = PTR_ERR(c->base);
goto err_no_dma;
diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c
new file mode 100644
index 000000000000..8853d442430b
--- /dev/null
+++ b/drivers/dma/milbeaut-hdmac.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Linaro Ltd.
+// Copyright (C) 2019 Socionext Inc.
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "virt-dma.h"
+
+#define MLB_HDMAC_DMACR 0x0 /* global */
+#define MLB_HDMAC_DE BIT(31)
+#define MLB_HDMAC_DS BIT(30)
+#define MLB_HDMAC_PR BIT(28)
+#define MLB_HDMAC_DH GENMASK(27, 24)
+
+#define MLB_HDMAC_CH_STRIDE 0x10
+
+#define MLB_HDMAC_DMACA 0x0 /* channel */
+#define MLB_HDMAC_EB BIT(31)
+#define MLB_HDMAC_PB BIT(30)
+#define MLB_HDMAC_ST BIT(29)
+#define MLB_HDMAC_IS GENMASK(28, 24)
+#define MLB_HDMAC_BT GENMASK(23, 20)
+#define MLB_HDMAC_BC GENMASK(19, 16)
+#define MLB_HDMAC_TC GENMASK(15, 0)
+#define MLB_HDMAC_DMACB 0x4
+#define MLB_HDMAC_TT GENMASK(31, 30)
+#define MLB_HDMAC_MS GENMASK(29, 28)
+#define MLB_HDMAC_TW GENMASK(27, 26)
+#define MLB_HDMAC_FS BIT(25)
+#define MLB_HDMAC_FD BIT(24)
+#define MLB_HDMAC_RC BIT(23)
+#define MLB_HDMAC_RS BIT(22)
+#define MLB_HDMAC_RD BIT(21)
+#define MLB_HDMAC_EI BIT(20)
+#define MLB_HDMAC_CI BIT(19)
+#define HDMAC_PAUSE 0x7
+#define MLB_HDMAC_SS GENMASK(18, 16)
+#define MLB_HDMAC_SP GENMASK(15, 12)
+#define MLB_HDMAC_DP GENMASK(11, 8)
+#define MLB_HDMAC_DMACSA 0x8
+#define MLB_HDMAC_DMACDA 0xc
+
+#define MLB_HDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+struct milbeaut_hdmac_desc {
+ struct virt_dma_desc vd;
+ struct scatterlist *sgl;
+ unsigned int sg_len;
+ unsigned int sg_cur;
+ enum dma_transfer_direction dir;
+};
+
+struct milbeaut_hdmac_chan {
+ struct virt_dma_chan vc;
+ struct milbeaut_hdmac_device *mdev;
+ struct milbeaut_hdmac_desc *md;
+ void __iomem *reg_ch_base;
+ unsigned int slave_id;
+ struct dma_slave_config cfg;
+};
+
+struct milbeaut_hdmac_device {
+ struct dma_device ddev;
+ struct clk *clk;
+ void __iomem *reg_base;
+ struct milbeaut_hdmac_chan channels[0];
+};
+
+static struct milbeaut_hdmac_chan *
+to_milbeaut_hdmac_chan(struct virt_dma_chan *vc)
+{
+ return container_of(vc, struct milbeaut_hdmac_chan, vc);
+}
+
+static struct milbeaut_hdmac_desc *
+to_milbeaut_hdmac_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct milbeaut_hdmac_desc, vd);
+}
+
+/* mc->vc.lock must be held by caller */
+static struct milbeaut_hdmac_desc *
+milbeaut_hdmac_next_desc(struct milbeaut_hdmac_chan *mc)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&mc->vc);
+ if (!vd) {
+ mc->md = NULL;
+ return NULL;
+ }
+
+ list_del(&vd->node);
+
+ mc->md = to_milbeaut_hdmac_desc(vd);
+
+ return mc->md;
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_chan_start(struct milbeaut_hdmac_chan *mc,
+ struct milbeaut_hdmac_desc *md)
+{
+ struct scatterlist *sg;
+ u32 cb, ca, src_addr, dest_addr, len;
+ u32 width, burst;
+
+ sg = &md->sgl[md->sg_cur];
+ len = sg_dma_len(sg);
+
+ cb = MLB_HDMAC_CI | MLB_HDMAC_EI;
+ if (md->dir == DMA_MEM_TO_DEV) {
+ cb |= MLB_HDMAC_FD;
+ width = mc->cfg.dst_addr_width;
+ burst = mc->cfg.dst_maxburst;
+ src_addr = sg_dma_address(sg);
+ dest_addr = mc->cfg.dst_addr;
+ } else {
+ cb |= MLB_HDMAC_FS;
+ width = mc->cfg.src_addr_width;
+ burst = mc->cfg.src_maxburst;
+ src_addr = mc->cfg.src_addr;
+ dest_addr = sg_dma_address(sg);
+ }
+ cb |= FIELD_PREP(MLB_HDMAC_TW, (width >> 1));
+ cb |= FIELD_PREP(MLB_HDMAC_MS, 2);
+
+ writel_relaxed(MLB_HDMAC_DE, mc->mdev->reg_base + MLB_HDMAC_DMACR);
+ writel_relaxed(src_addr, mc->reg_ch_base + MLB_HDMAC_DMACSA);
+ writel_relaxed(dest_addr, mc->reg_ch_base + MLB_HDMAC_DMACDA);
+ writel_relaxed(cb, mc->reg_ch_base + MLB_HDMAC_DMACB);
+
+ ca = FIELD_PREP(MLB_HDMAC_IS, mc->slave_id);
+ if (burst == 16)
+ ca |= FIELD_PREP(MLB_HDMAC_BT, 0xf);
+ else if (burst == 8)
+ ca |= FIELD_PREP(MLB_HDMAC_BT, 0xd);
+ else if (burst == 4)
+ ca |= FIELD_PREP(MLB_HDMAC_BT, 0xb);
+ burst *= width;
+ ca |= FIELD_PREP(MLB_HDMAC_TC, (len / burst - 1));
+ writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
+ ca |= MLB_HDMAC_EB;
+ writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_hdmac_start(struct milbeaut_hdmac_chan *mc)
+{
+ struct milbeaut_hdmac_desc *md;
+
+ md = milbeaut_hdmac_next_desc(mc);
+ if (md)
+ milbeaut_chan_start(mc, md);
+}
+
+static irqreturn_t milbeaut_hdmac_interrupt(int irq, void *dev_id)
+{
+ struct milbeaut_hdmac_chan *mc = dev_id;
+ struct milbeaut_hdmac_desc *md;
+ u32 val;
+
+ spin_lock(&mc->vc.lock);
+
+ /* Ack and Disable irqs */
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACB);
+ val &= ~(FIELD_PREP(MLB_HDMAC_SS, HDMAC_PAUSE));
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
+ val &= ~MLB_HDMAC_EI;
+ val &= ~MLB_HDMAC_CI;
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
+
+ md = mc->md;
+ if (!md)
+ goto out;
+
+ md->sg_cur++;
+
+ if (md->sg_cur >= md->sg_len) {
+ vchan_cookie_complete(&md->vd);
+ md = milbeaut_hdmac_next_desc(mc);
+ if (!md)
+ goto out;
+ }
+
+ milbeaut_chan_start(mc, md);
+
+out:
+ spin_unlock(&mc->vc.lock);
+ return IRQ_HANDLED;
+}
+
+static void milbeaut_hdmac_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static int
+milbeaut_hdmac_chan_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+
+ spin_lock(&mc->vc.lock);
+ mc->cfg = *cfg;
+ spin_unlock(&mc->vc.lock);
+
+ return 0;
+}
+
+static int milbeaut_hdmac_chan_pause(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ u32 val;
+
+ spin_lock(&mc->vc.lock);
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+ val |= MLB_HDMAC_PB;
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+ spin_unlock(&mc->vc.lock);
+
+ return 0;
+}
+
+static int milbeaut_hdmac_chan_resume(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ u32 val;
+
+ spin_lock(&mc->vc.lock);
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+ val &= ~MLB_HDMAC_PB;
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+ spin_unlock(&mc->vc.lock);
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+milbeaut_hdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_desc *md;
+ int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ md = kzalloc(sizeof(*md), GFP_NOWAIT);
+ if (!md)
+ return NULL;
+
+ md->sgl = kzalloc(sizeof(*sgl) * sg_len, GFP_NOWAIT);
+ if (!md->sgl) {
+ kfree(md);
+ return NULL;
+ }
+
+ for (i = 0; i < sg_len; i++)
+ md->sgl[i] = sgl[i];
+
+ md->sg_len = sg_len;
+ md->dir = direction;
+
+ return vchan_tx_prep(vc, &md->vd, flags);
+}
+
+static int milbeaut_hdmac_terminate_all(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ unsigned long flags;
+ u32 val;
+
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+ val &= ~MLB_HDMAC_EB; /* disable the channel */
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+
+ if (mc->md) {
+ vchan_terminate_vdesc(&mc->md->vd);
+ mc->md = NULL;
+ }
+
+ vchan_get_all_descriptors(vc, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+
+ return 0;
+}
+
+static void milbeaut_hdmac_synchronize(struct dma_chan *chan)
+{
+ vchan_synchronize(to_virt_chan(chan));
+}
+
+static enum dma_status milbeaut_hdmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct virt_dma_chan *vc;
+ struct virt_dma_desc *vd;
+ struct milbeaut_hdmac_chan *mc;
+ struct milbeaut_hdmac_desc *md = NULL;
+ enum dma_status stat;
+ unsigned long flags;
+ int i;
+
+ stat = dma_cookie_status(chan, cookie, txstate);
+ /* Return immediately if we do not need to compute the residue. */
+ if (stat == DMA_COMPLETE || !txstate)
+ return stat;
+
+ vc = to_virt_chan(chan);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ mc = to_milbeaut_hdmac_chan(vc);
+
+ /* residue from the on-flight chunk */
+ if (mc->md && mc->md->vd.tx.cookie == cookie) {
+ struct scatterlist *sg;
+ u32 done;
+
+ md = mc->md;
+ sg = &md->sgl[md->sg_cur];
+
+ if (md->dir == DMA_DEV_TO_MEM)
+ done = readl_relaxed(mc->reg_ch_base
+ + MLB_HDMAC_DMACDA);
+ else
+ done = readl_relaxed(mc->reg_ch_base
+ + MLB_HDMAC_DMACSA);
+ done -= sg_dma_address(sg);
+
+ txstate->residue = -done;
+ }
+
+ if (!md) {
+ vd = vchan_find_desc(vc, cookie);
+ if (vd)
+ md = to_milbeaut_hdmac_desc(vd);
+ }
+
+ if (md) {
+ /* residue from the queued chunks */
+ for (i = md->sg_cur; i < md->sg_len; i++)
+ txstate->residue += sg_dma_len(&md->sgl[i]);
+ }
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return stat;
+}
+
+static void milbeaut_hdmac_issue_pending(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (vchan_issue_pending(vc) && !mc->md)
+ milbeaut_hdmac_start(mc);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void milbeaut_hdmac_desc_free(struct virt_dma_desc *vd)
+{
+ struct milbeaut_hdmac_desc *md = to_milbeaut_hdmac_desc(vd);
+
+ kfree(md->sgl);
+ kfree(md);
+}
+
+static struct dma_chan *
+milbeaut_hdmac_xlate(struct of_phandle_args *dma_spec, struct of_dma *of_dma)
+{
+ struct milbeaut_hdmac_device *mdev = of_dma->of_dma_data;
+ struct milbeaut_hdmac_chan *mc;
+ struct virt_dma_chan *vc;
+ struct dma_chan *chan;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&mdev->ddev);
+ if (!chan)
+ return NULL;
+
+ vc = to_virt_chan(chan);
+ mc = to_milbeaut_hdmac_chan(vc);
+ mc->slave_id = dma_spec->args[0];
+
+ return chan;
+}
+
+static int milbeaut_hdmac_chan_init(struct platform_device *pdev,
+ struct milbeaut_hdmac_device *mdev,
+ int chan_id)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_hdmac_chan *mc = &mdev->channels[chan_id];
+ char *irq_name;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, chan_id);
+ if (irq < 0)
+ return irq;
+
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-hdmac-%d",
+ chan_id);
+ if (!irq_name)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, milbeaut_hdmac_interrupt,
+ IRQF_SHARED, irq_name, mc);
+ if (ret)
+ return ret;
+
+ mc->mdev = mdev;
+ mc->reg_ch_base = mdev->reg_base + MLB_HDMAC_CH_STRIDE * (chan_id + 1);
+ mc->vc.desc_free = milbeaut_hdmac_desc_free;
+ vchan_init(&mc->vc, &mdev->ddev);
+
+ return 0;
+}
+
+static int milbeaut_hdmac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_hdmac_device *mdev;
+ struct dma_device *ddev;
+ int nr_chans, ret, i;
+
+ nr_chans = platform_irq_count(pdev);
+ if (nr_chans < 0)
+ return nr_chans;
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
+ GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdev->reg_base))
+ return PTR_ERR(mdev->reg_base);
+
+ mdev->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mdev->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(mdev->clk);
+ }
+
+ ret = clk_prepare_enable(mdev->clk);
+ if (ret)
+ return ret;
+
+ ddev = &mdev->ddev;
+ ddev->dev = dev;
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ ddev->src_addr_widths = MLB_HDMAC_BUSWIDTHS;
+ ddev->dst_addr_widths = MLB_HDMAC_BUSWIDTHS;
+ ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ ddev->device_free_chan_resources = milbeaut_hdmac_free_chan_resources;
+ ddev->device_config = milbeaut_hdmac_chan_config;
+ ddev->device_pause = milbeaut_hdmac_chan_pause;
+ ddev->device_resume = milbeaut_hdmac_chan_resume;
+ ddev->device_prep_slave_sg = milbeaut_hdmac_prep_slave_sg;
+ ddev->device_terminate_all = milbeaut_hdmac_terminate_all;
+ ddev->device_synchronize = milbeaut_hdmac_synchronize;
+ ddev->device_tx_status = milbeaut_hdmac_tx_status;
+ ddev->device_issue_pending = milbeaut_hdmac_issue_pending;
+ INIT_LIST_HEAD(&ddev->channels);
+
+ for (i = 0; i < nr_chans; i++) {
+ ret = milbeaut_hdmac_chan_init(pdev, mdev, i);
+ if (ret)
+ goto disable_clk;
+ }
+
+ ret = dma_async_device_register(ddev);
+ if (ret)
+ goto disable_clk;
+
+ ret = of_dma_controller_register(dev->of_node,
+ milbeaut_hdmac_xlate, mdev);
+ if (ret)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, mdev);
+
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(ddev);
+disable_clk:
+ clk_disable_unprepare(mdev->clk);
+
+ return ret;
+}
+
+static int milbeaut_hdmac_remove(struct platform_device *pdev)
+{
+ struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
+ struct dma_chan *chan;
+ int ret;
+
+ /*
+ * Before reaching here, almost all descriptors have been freed by the
+ * ->device_free_chan_resources() hook. However, each channel might
+ * be still holding one descriptor that was on-flight at that moment.
+ * Terminate it to make sure this hardware is no longer running. Then,
+ * free the channel resources once again to avoid memory leak.
+ */
+ list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
+ ret = dmaengine_terminate_sync(chan);
+ if (ret)
+ return ret;
+ milbeaut_hdmac_free_chan_resources(chan);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&mdev->ddev);
+ clk_disable_unprepare(mdev->clk);
+
+ return 0;
+}
+
+static const struct of_device_id milbeaut_hdmac_match[] = {
+ { .compatible = "socionext,milbeaut-m10v-hdmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
+
+static struct platform_driver milbeaut_hdmac_driver = {
+ .probe = milbeaut_hdmac_probe,
+ .remove = milbeaut_hdmac_remove,
+ .driver = {
+ .name = "milbeaut-m10v-hdmac",
+ .of_match_table = milbeaut_hdmac_match,
+ },
+};
+module_platform_driver(milbeaut_hdmac_driver);
+
+MODULE_DESCRIPTION("Milbeaut HDMAC DmaEngine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
new file mode 100644
index 000000000000..ab3d2f395378
--- /dev/null
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Linaro Ltd.
+// Copyright (C) 2019 Socionext Inc.
+
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "virt-dma.h"
+
+/* global register */
+#define M10V_XDACS 0x00
+
+/* channel local register */
+#define M10V_XDTBC 0x10
+#define M10V_XDSSA 0x14
+#define M10V_XDDSA 0x18
+#define M10V_XDSAC 0x1C
+#define M10V_XDDAC 0x20
+#define M10V_XDDCC 0x24
+#define M10V_XDDES 0x28
+#define M10V_XDDPC 0x2C
+#define M10V_XDDSD 0x30
+
+#define M10V_XDACS_XE BIT(28)
+
+#define M10V_DEFBS 0x3
+#define M10V_DEFBL 0xf
+
+#define M10V_XDSAC_SBS GENMASK(17, 16)
+#define M10V_XDSAC_SBL GENMASK(11, 8)
+
+#define M10V_XDDAC_DBS GENMASK(17, 16)
+#define M10V_XDDAC_DBL GENMASK(11, 8)
+
+#define M10V_XDDES_CE BIT(28)
+#define M10V_XDDES_SE BIT(24)
+#define M10V_XDDES_SA BIT(15)
+#define M10V_XDDES_TF GENMASK(23, 20)
+#define M10V_XDDES_EI BIT(1)
+#define M10V_XDDES_TI BIT(0)
+
+#define M10V_XDDSD_IS_MASK GENMASK(3, 0)
+#define M10V_XDDSD_IS_NORMAL 0x8
+
+#define MLB_XDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+struct milbeaut_xdmac_desc {
+ struct virt_dma_desc vd;
+ size_t len;
+ dma_addr_t src;
+ dma_addr_t dst;
+};
+
+struct milbeaut_xdmac_chan {
+ struct virt_dma_chan vc;
+ struct milbeaut_xdmac_desc *md;
+ void __iomem *reg_ch_base;
+};
+
+struct milbeaut_xdmac_device {
+ struct dma_device ddev;
+ void __iomem *reg_base;
+ struct milbeaut_xdmac_chan channels[0];
+};
+
+static struct milbeaut_xdmac_chan *
+to_milbeaut_xdmac_chan(struct virt_dma_chan *vc)
+{
+ return container_of(vc, struct milbeaut_xdmac_chan, vc);
+}
+
+static struct milbeaut_xdmac_desc *
+to_milbeaut_xdmac_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct milbeaut_xdmac_desc, vd);
+}
+
+/* mc->vc.lock must be held by caller */
+static struct milbeaut_xdmac_desc *
+milbeaut_xdmac_next_desc(struct milbeaut_xdmac_chan *mc)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&mc->vc);
+ if (!vd) {
+ mc->md = NULL;
+ return NULL;
+ }
+
+ list_del(&vd->node);
+
+ mc->md = to_milbeaut_xdmac_desc(vd);
+
+ return mc->md;
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_chan_start(struct milbeaut_xdmac_chan *mc,
+ struct milbeaut_xdmac_desc *md)
+{
+ u32 val;
+
+ /* Setup the channel */
+ val = md->len - 1;
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDTBC);
+
+ val = md->src;
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDSSA);
+
+ val = md->dst;
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDSA);
+
+ val = readl_relaxed(mc->reg_ch_base + M10V_XDSAC);
+ val &= ~(M10V_XDSAC_SBS | M10V_XDSAC_SBL);
+ val |= FIELD_PREP(M10V_XDSAC_SBS, M10V_DEFBS) |
+ FIELD_PREP(M10V_XDSAC_SBL, M10V_DEFBL);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDSAC);
+
+ val = readl_relaxed(mc->reg_ch_base + M10V_XDDAC);
+ val &= ~(M10V_XDDAC_DBS | M10V_XDDAC_DBL);
+ val |= FIELD_PREP(M10V_XDDAC_DBS, M10V_DEFBS) |
+ FIELD_PREP(M10V_XDDAC_DBL, M10V_DEFBL);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDAC);
+
+ /* Start the channel */
+ val = readl_relaxed(mc->reg_ch_base + M10V_XDDES);
+ val &= ~(M10V_XDDES_CE | M10V_XDDES_SE | M10V_XDDES_TF |
+ M10V_XDDES_EI | M10V_XDDES_TI);
+ val |= FIELD_PREP(M10V_XDDES_CE, 1) | FIELD_PREP(M10V_XDDES_SE, 1) |
+ FIELD_PREP(M10V_XDDES_TF, 1) | FIELD_PREP(M10V_XDDES_EI, 1) |
+ FIELD_PREP(M10V_XDDES_TI, 1);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDES);
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_xdmac_start(struct milbeaut_xdmac_chan *mc)
+{
+ struct milbeaut_xdmac_desc *md;
+
+ md = milbeaut_xdmac_next_desc(mc);
+ if (md)
+ milbeaut_chan_start(mc, md);
+}
+
+static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id)
+{
+ struct milbeaut_xdmac_chan *mc = dev_id;
+ struct milbeaut_xdmac_desc *md;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&mc->vc.lock, flags);
+
+ /* Ack and Stop */
+ val = FIELD_PREP(M10V_XDDSD_IS_MASK, 0x0);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDSD);
+
+ md = mc->md;
+ if (!md)
+ goto out;
+
+ vchan_cookie_complete(&md->vd);
+
+ milbeaut_xdmac_start(mc);
+out:
+ spin_unlock_irqrestore(&mc->vc.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void milbeaut_xdmac_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static struct dma_async_tx_descriptor *
+milbeaut_xdmac_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_xdmac_desc *md;
+
+ md = kzalloc(sizeof(*md), GFP_NOWAIT);
+ if (!md)
+ return NULL;
+
+ md->len = len;
+ md->src = src;
+ md->dst = dst;
+
+ return vchan_tx_prep(vc, &md->vd, flags);
+}
+
+static int milbeaut_xdmac_terminate_all(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc);
+ unsigned long flags;
+ u32 val;
+
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ /* Halt the channel */
+ val = readl(mc->reg_ch_base + M10V_XDDES);
+ val &= ~M10V_XDDES_CE;
+ val |= FIELD_PREP(M10V_XDDES_CE, 0);
+ writel(val, mc->reg_ch_base + M10V_XDDES);
+
+ if (mc->md) {
+ vchan_terminate_vdesc(&mc->md->vd);
+ mc->md = NULL;
+ }
+
+ vchan_get_all_descriptors(vc, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+
+ return 0;
+}
+
+static void milbeaut_xdmac_synchronize(struct dma_chan *chan)
+{
+ vchan_synchronize(to_virt_chan(chan));
+}
+
+static void milbeaut_xdmac_issue_pending(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (vchan_issue_pending(vc) && !mc->md)
+ milbeaut_xdmac_start(mc);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void milbeaut_xdmac_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_milbeaut_xdmac_desc(vd));
+}
+
+static int milbeaut_xdmac_chan_init(struct platform_device *pdev,
+ struct milbeaut_xdmac_device *mdev,
+ int chan_id)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_xdmac_chan *mc = &mdev->channels[chan_id];
+ char *irq_name;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, chan_id);
+ if (irq < 0)
+ return irq;
+
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-xdmac-%d",
+ chan_id);
+ if (!irq_name)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, milbeaut_xdmac_interrupt,
+ IRQF_SHARED, irq_name, mc);
+ if (ret)
+ return ret;
+
+ mc->reg_ch_base = mdev->reg_base + chan_id * 0x30;
+
+ mc->vc.desc_free = milbeaut_xdmac_desc_free;
+ vchan_init(&mc->vc, &mdev->ddev);
+
+ return 0;
+}
+
+static void enable_xdmac(struct milbeaut_xdmac_device *mdev)
+{
+ unsigned int val;
+
+ val = readl(mdev->reg_base + M10V_XDACS);
+ val |= M10V_XDACS_XE;
+ writel(val, mdev->reg_base + M10V_XDACS);
+}
+
+static void disable_xdmac(struct milbeaut_xdmac_device *mdev)
+{
+ unsigned int val;
+
+ val = readl(mdev->reg_base + M10V_XDACS);
+ val &= ~M10V_XDACS_XE;
+ writel(val, mdev->reg_base + M10V_XDACS);
+}
+
+static int milbeaut_xdmac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_xdmac_device *mdev;
+ struct dma_device *ddev;
+ int nr_chans, ret, i;
+
+ nr_chans = platform_irq_count(pdev);
+ if (nr_chans < 0)
+ return nr_chans;
+
+ mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
+ GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdev->reg_base))
+ return PTR_ERR(mdev->reg_base);
+
+ ddev = &mdev->ddev;
+ ddev->dev = dev;
+ dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
+ ddev->src_addr_widths = MLB_XDMAC_BUSWIDTHS;
+ ddev->dst_addr_widths = MLB_XDMAC_BUSWIDTHS;
+ ddev->device_free_chan_resources = milbeaut_xdmac_free_chan_resources;
+ ddev->device_prep_dma_memcpy = milbeaut_xdmac_prep_memcpy;
+ ddev->device_terminate_all = milbeaut_xdmac_terminate_all;
+ ddev->device_synchronize = milbeaut_xdmac_synchronize;
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = milbeaut_xdmac_issue_pending;
+ INIT_LIST_HEAD(&ddev->channels);
+
+ for (i = 0; i < nr_chans; i++) {
+ ret = milbeaut_xdmac_chan_init(pdev, mdev, i);
+ if (ret)
+ return ret;
+ }
+
+ enable_xdmac(mdev);
+
+ ret = dma_async_device_register(ddev);
+ if (ret)
+ return ret;
+
+ ret = of_dma_controller_register(dev->of_node,
+ of_dma_simple_xlate, mdev);
+ if (ret)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, mdev);
+
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(ddev);
+ return ret;
+}
+
+static int milbeaut_xdmac_remove(struct platform_device *pdev)
+{
+ struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev);
+ struct dma_chan *chan;
+ int ret;
+
+ /*
+ * Before reaching here, almost all descriptors have been freed by the
+ * ->device_free_chan_resources() hook. However, each channel might
+ * be still holding one descriptor that was on-flight at that moment.
+ * Terminate it to make sure this hardware is no longer running. Then,
+ * free the channel resources once again to avoid memory leak.
+ */
+ list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
+ ret = dmaengine_terminate_sync(chan);
+ if (ret)
+ return ret;
+ milbeaut_xdmac_free_chan_resources(chan);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&mdev->ddev);
+
+ disable_xdmac(mdev);
+
+ return 0;
+}
+
+static const struct of_device_id milbeaut_xdmac_match[] = {
+ { .compatible = "socionext,milbeaut-m10v-xdmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
+
+static struct platform_driver milbeaut_xdmac_driver = {
+ .probe = milbeaut_xdmac_probe,
+ .remove = milbeaut_xdmac_remove,
+ .driver = {
+ .name = "milbeaut-m10v-xdmac",
+ .of_match_table = milbeaut_xdmac_match,
+ },
+};
+module_platform_driver(milbeaut_xdmac_driver);
+
+MODULE_DESCRIPTION("Milbeaut XDMAC DmaEngine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 7fe494fc50d4..ad06f260e907 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -945,6 +945,8 @@ static int mmp_pdma_remove(struct platform_device *op)
struct mmp_pdma_phy *phy;
int i, irq = 0, irq_num = 0;
+ if (op->dev.of_node)
+ of_dma_controller_free(op->dev.of_node);
for (i = 0; i < pdev->dma_channels; i++) {
if (platform_get_irq(op, i) > 0)
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index e7d1e12bf464..10117f271b12 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -544,6 +544,9 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static int mmp_tdma_remove(struct platform_device *pdev)
{
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
return 0;
}
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 90bbcef99ef8..023f951189a7 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1045,18 +1045,13 @@ static int owl_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct owl_dma *od;
- struct resource *res;
int ret, i, nr_channels, nr_requests;
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- od->base = devm_ioremap_resource(&pdev->dev, res);
+ od->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(od->base))
return PTR_ERR(od->base);
diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
new file mode 100644
index 000000000000..f8ffa02e279f
--- /dev/null
+++ b/drivers/dma/sf-pdma/Kconfig
@@ -0,0 +1,6 @@
+config SF_PDMA
+ tristate "Sifive PDMA controller driver"
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the SiFive PDMA controller.
diff --git a/drivers/dma/sf-pdma/Makefile b/drivers/dma/sf-pdma/Makefile
new file mode 100644
index 000000000000..764552ab8d0a
--- /dev/null
+++ b/drivers/dma/sf-pdma/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SF_PDMA) += sf-pdma.o
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
new file mode 100644
index 000000000000..465256fe8b1f
--- /dev/null
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SiFive FU540 Platform DMA driver
+ * Copyright (C) 2019 SiFive
+ *
+ * Based partially on:
+ * - drivers/dma/fsl-edma.c
+ * - drivers/dma/dw-edma/
+ * - drivers/dma/pxa-dma.c
+ *
+ * See the following sources for further documentation:
+ * - Chapter 12 "Platform DMA Engine (PDMA)" of
+ * SiFive FU540-C000 v1.0
+ * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+
+#include "sf-pdma.h"
+
+#ifndef readq
+static inline unsigned long long readq(void __iomem *addr)
+{
+ return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(unsigned long long v, void __iomem *addr)
+{
+ writel(lower_32_bits(v), addr);
+ writel(upper_32_bits(v), addr + 4);
+}
+#endif
+
+static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan)
+{
+ return container_of(dchan, struct sf_pdma_chan, vchan.chan);
+}
+
+static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct sf_pdma_desc, vdesc);
+}
+
+static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
+{
+ struct sf_pdma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->desc && !chan->desc->in_use) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return chan->desc;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->chan = chan;
+
+ return desc;
+}
+
+static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
+ u64 dst, u64 src, u64 size)
+{
+ desc->xfer_type = PDMA_FULL_SPEED;
+ desc->xfer_size = size;
+ desc->dst_addr = dst;
+ desc->src_addr = src;
+}
+
+static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
+{
+ struct pdma_regs *regs = &chan->regs;
+
+ writel(PDMA_CLEAR_CTRL, regs->ctrl);
+}
+
+static struct dma_async_tx_descriptor *
+sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ struct sf_pdma_desc *desc;
+
+ if (chan && (!len || !dest || !src)) {
+ dev_err(chan->pdma->dma_dev.dev,
+ "Please check dma len, dest, src!\n");
+ return NULL;
+ }
+
+ desc = sf_pdma_alloc_desc(chan);
+ if (!desc)
+ return NULL;
+
+ desc->in_use = true;
+ desc->dirn = DMA_MEM_TO_MEM;
+ desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ chan->desc = desc;
+ sf_pdma_fill_desc(desc, dest, src, len);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return desc->async_tx;
+}
+
+static int sf_pdma_slave_config(struct dma_chan *dchan,
+ struct dma_slave_config *cfg)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+
+ memcpy(&chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ struct pdma_regs *regs = &chan->regs;
+
+ dma_cookie_init(dchan);
+ writel(PDMA_CLAIM_MASK, regs->ctrl);
+
+ return 0;
+}
+
+static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
+{
+ struct pdma_regs *regs = &chan->regs;
+
+ writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl);
+}
+
+static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ sf_pdma_disable_request(chan);
+ kfree(chan->desc);
+ chan->desc = NULL;
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+ sf_pdma_disclaim_chan(chan);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct virt_dma_desc *vd = NULL;
+ struct pdma_regs *regs = &chan->regs;
+ unsigned long flags;
+ u64 residue = 0;
+ struct sf_pdma_desc *desc;
+ struct dma_async_tx_descriptor *tx;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ tx = &chan->desc->vdesc.tx;
+ if (cookie == tx->chan->completed_cookie)
+ goto out;
+
+ if (cookie == tx->cookie) {
+ residue = readq(regs->residue);
+ } else {
+ vd = vchan_find_desc(&chan->vchan, cookie);
+ if (!vd)
+ goto out;
+
+ desc = to_sf_pdma_desc(vd);
+ residue = desc->xfer_size;
+ }
+
+out:
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ return residue;
+}
+
+static enum dma_status
+sf_pdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ enum dma_status status;
+
+ status = dma_cookie_status(dchan, cookie, txstate);
+
+ if (txstate && status != DMA_ERROR)
+ dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie));
+
+ return status;
+}
+
+static int sf_pdma_terminate_all(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ sf_pdma_disable_request(chan);
+ kfree(chan->desc);
+ chan->desc = NULL;
+ chan->xfer_err = false;
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return 0;
+}
+
+static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
+{
+ struct pdma_regs *regs = &chan->regs;
+ u32 v;
+
+ v = PDMA_CLAIM_MASK |
+ PDMA_ENABLE_DONE_INT_MASK |
+ PDMA_ENABLE_ERR_INT_MASK |
+ PDMA_RUN_MASK;
+
+ writel(v, regs->ctrl);
+}
+
+static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
+{
+ struct sf_pdma_desc *desc = chan->desc;
+ struct pdma_regs *regs = &chan->regs;
+
+ if (!desc) {
+ dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
+ return;
+ }
+
+ writel(desc->xfer_type, regs->xfer_type);
+ writeq(desc->xfer_size, regs->xfer_size);
+ writeq(desc->dst_addr, regs->dst_addr);
+ writeq(desc->src_addr, regs->src_addr);
+
+ chan->desc = desc;
+ chan->status = DMA_IN_PROGRESS;
+ sf_pdma_enable_request(chan);
+}
+
+static void sf_pdma_issue_pending(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&chan->vchan) && chan->desc)
+ sf_pdma_xfer_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct sf_pdma_desc *desc;
+
+ desc = to_sf_pdma_desc(vdesc);
+ desc->in_use = false;
+}
+
+static void sf_pdma_donebh_tasklet(unsigned long arg)
+{
+ struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
+ struct sf_pdma_desc *desc = chan->desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->xfer_err) {
+ chan->retries = MAX_RETRY;
+ chan->status = DMA_COMPLETE;
+ chan->xfer_err = false;
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
+}
+
+static void sf_pdma_errbh_tasklet(unsigned long arg)
+{
+ struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
+ struct sf_pdma_desc *desc = chan->desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->retries <= 0) {
+ /* fail to recover */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
+ } else {
+ /* retry */
+ chan->retries--;
+ chan->xfer_err = true;
+ chan->status = DMA_ERROR;
+
+ sf_pdma_enable_request(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+}
+
+static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
+{
+ struct sf_pdma_chan *chan = dev_id;
+ struct pdma_regs *regs = &chan->regs;
+ unsigned long flags;
+ u64 residue;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
+ residue = readq(regs->residue);
+
+ if (!residue) {
+ list_del(&chan->desc->vdesc.node);
+ vchan_cookie_complete(&chan->desc->vdesc);
+ } else {
+ /* submit next trascatioin if possible */
+ struct sf_pdma_desc *desc = chan->desc;
+
+ desc->src_addr += desc->xfer_size - residue;
+ desc->dst_addr += desc->xfer_size - residue;
+ desc->xfer_size = residue;
+
+ sf_pdma_xfer_desc(chan);
+ }
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ tasklet_hi_schedule(&chan->done_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
+{
+ struct sf_pdma_chan *chan = dev_id;
+ struct pdma_regs *regs = &chan->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ tasklet_schedule(&chan->err_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * sf_pdma_irq_init() - Init PDMA IRQ Handlers
+ * @pdev: pointer of platform_device
+ * @pdma: pointer of PDMA engine. Caller should check NULL
+ *
+ * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
+ * make sure the pointer passed in are non-NULL. This function should be called
+ * only one time during the device probe.
+ *
+ * Context: Any context.
+ *
+ * Return:
+ * * 0 - OK to init all IRQ handlers
+ * * -EINVAL - Fail to request IRQ
+ */
+static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
+{
+ int irq, r, i;
+ struct sf_pdma_chan *chan;
+
+ for (i = 0; i < pdma->n_chans; i++) {
+ chan = &pdma->chans[i];
+
+ irq = platform_get_irq(pdev, i * 2);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
+ return -EINVAL;
+ }
+
+ r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
+ dev_name(&pdev->dev), (void *)chan);
+ if (r) {
+ dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r);
+ return -EINVAL;
+ }
+
+ chan->txirq = irq;
+
+ irq = platform_get_irq(pdev, (i * 2) + 1);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
+ return -EINVAL;
+ }
+
+ r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
+ dev_name(&pdev->dev), (void *)chan);
+ if (r) {
+ dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r);
+ return -EINVAL;
+ }
+
+ chan->errirq = irq;
+ }
+
+ return 0;
+}
+
+/**
+ * sf_pdma_setup_chans() - Init settings of each channel
+ * @pdma: pointer of PDMA engine. Caller should check NULL
+ *
+ * Initialize all data structure and register base. Caller should make sure
+ * the pointer passed in are non-NULL. This function should be called only
+ * one time during the device probe.
+ *
+ * Context: Any context.
+ *
+ * Return: none
+ */
+static void sf_pdma_setup_chans(struct sf_pdma *pdma)
+{
+ int i;
+ struct sf_pdma_chan *chan;
+
+ INIT_LIST_HEAD(&pdma->dma_dev.channels);
+
+ for (i = 0; i < pdma->n_chans; i++) {
+ chan = &pdma->chans[i];
+
+ chan->regs.ctrl =
+ SF_PDMA_REG_BASE(i) + PDMA_CTRL;
+ chan->regs.xfer_type =
+ SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE;
+ chan->regs.xfer_size =
+ SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE;
+ chan->regs.dst_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR;
+ chan->regs.src_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR;
+ chan->regs.act_type =
+ SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE;
+ chan->regs.residue =
+ SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE;
+ chan->regs.cur_dst_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR;
+ chan->regs.cur_src_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR;
+
+ chan->pdma = pdma;
+ chan->pm_state = RUNNING;
+ chan->slave_id = i;
+ chan->xfer_err = false;
+ spin_lock_init(&chan->lock);
+
+ chan->vchan.desc_free = sf_pdma_free_desc;
+ vchan_init(&chan->vchan, &pdma->dma_dev);
+
+ writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
+
+ tasklet_init(&chan->done_tasklet,
+ sf_pdma_donebh_tasklet, (unsigned long)chan);
+ tasklet_init(&chan->err_tasklet,
+ sf_pdma_errbh_tasklet, (unsigned long)chan);
+ }
+}
+
+static int sf_pdma_probe(struct platform_device *pdev)
+{
+ struct sf_pdma *pdma;
+ struct sf_pdma_chan *chan;
+ struct resource *res;
+ int len, chans;
+ int ret;
+ const enum dma_slave_buswidth widths =
+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
+ DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
+ DMA_SLAVE_BUSWIDTH_64_BYTES;
+
+ chans = PDMA_NR_CH;
+ len = sizeof(*pdma) + sizeof(*chan) * chans;
+ pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!pdma)
+ return -ENOMEM;
+
+ pdma->n_chans = chans;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdma->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdma->membase))
+ goto ERR_MEMBASE;
+
+ ret = sf_pdma_irq_init(pdev, pdma);
+ if (ret)
+ goto ERR_INITIRQ;
+
+ sf_pdma_setup_chans(pdma);
+
+ pdma->dma_dev.dev = &pdev->dev;
+
+ /* Setup capability */
+ dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask);
+ pdma->dma_dev.copy_align = 2;
+ pdma->dma_dev.src_addr_widths = widths;
+ pdma->dma_dev.dst_addr_widths = widths;
+ pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM);
+ pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ pdma->dma_dev.descriptor_reuse = true;
+
+ /* Setup DMA APIs */
+ pdma->dma_dev.device_alloc_chan_resources =
+ sf_pdma_alloc_chan_resources;
+ pdma->dma_dev.device_free_chan_resources =
+ sf_pdma_free_chan_resources;
+ pdma->dma_dev.device_tx_status = sf_pdma_tx_status;
+ pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy;
+ pdma->dma_dev.device_config = sf_pdma_slave_config;
+ pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all;
+ pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending;
+
+ platform_set_drvdata(pdev, pdma);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ dev_warn(&pdev->dev,
+ "Failed to set DMA mask. Fall back to default.\n");
+
+ ret = dma_async_device_register(&pdma->dma_dev);
+ if (ret)
+ goto ERR_REG_DMADEVICE;
+
+ return 0;
+
+ERR_MEMBASE:
+ devm_kfree(&pdev->dev, pdma);
+ return PTR_ERR(pdma->membase);
+
+ERR_INITIRQ:
+ devm_kfree(&pdev->dev, pdma);
+ return ret;
+
+ERR_REG_DMADEVICE:
+ devm_kfree(&pdev->dev, pdma);
+ dev_err(&pdev->dev,
+ "Can't register SiFive Platform DMA. (%d)\n", ret);
+ return ret;
+}
+
+static int sf_pdma_remove(struct platform_device *pdev)
+{
+ struct sf_pdma *pdma = platform_get_drvdata(pdev);
+ struct sf_pdma_chan *ch;
+ int i;
+
+ for (i = 0; i < PDMA_NR_CH; i++) {
+ ch = &pdma->chans[i];
+
+ devm_free_irq(&pdev->dev, ch->txirq, ch);
+ devm_free_irq(&pdev->dev, ch->errirq, ch);
+ list_del(&ch->vchan.chan.device_node);
+ tasklet_kill(&ch->vchan.task);
+ tasklet_kill(&ch->done_tasklet);
+ tasklet_kill(&ch->err_tasklet);
+ }
+
+ dma_async_device_unregister(&pdma->dma_dev);
+
+ return 0;
+}
+
+static const struct of_device_id sf_pdma_dt_ids[] = {
+ { .compatible = "sifive,fu540-c000-pdma" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
+
+static struct platform_driver sf_pdma_driver = {
+ .probe = sf_pdma_probe,
+ .remove = sf_pdma_remove,
+ .driver = {
+ .name = "sf-pdma",
+ .of_match_table = of_match_ptr(sf_pdma_dt_ids),
+ },
+};
+
+static int __init sf_pdma_init(void)
+{
+ return platform_driver_register(&sf_pdma_driver);
+}
+
+static void __exit sf_pdma_exit(void)
+{
+ platform_driver_unregister(&sf_pdma_driver);
+}
+
+/* do early init */
+subsys_initcall(sf_pdma_init);
+module_exit(sf_pdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SiFive Platform DMA driver");
+MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
new file mode 100644
index 000000000000..0c20167b097d
--- /dev/null
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SiFive FU540 Platform DMA driver
+ * Copyright (C) 2019 SiFive
+ *
+ * Based partially on:
+ * - drivers/dma/fsl-edma.c
+ * - drivers/dma/dw-edma/
+ * - drivers/dma/pxa-dma.c
+ *
+ * See the following sources for further documentation:
+ * - Chapter 12 "Platform DMA Engine (PDMA)" of
+ * SiFive FU540-C000 v1.0
+ * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+#ifndef _SF_PDMA_H
+#define _SF_PDMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+#define PDMA_NR_CH 4
+
+#if (PDMA_NR_CH != 4)
+#error "Please define PDMA_NR_CH to 4"
+#endif
+
+#define PDMA_BASE_ADDR 0x3000000
+#define PDMA_CHAN_OFFSET 0x1000
+
+/* Register Offset */
+#define PDMA_CTRL 0x000
+#define PDMA_XFER_TYPE 0x004
+#define PDMA_XFER_SIZE 0x008
+#define PDMA_DST_ADDR 0x010
+#define PDMA_SRC_ADDR 0x018
+#define PDMA_ACT_TYPE 0x104 /* Read-only */
+#define PDMA_REMAINING_BYTE 0x108 /* Read-only */
+#define PDMA_CUR_DST_ADDR 0x110 /* Read-only*/
+#define PDMA_CUR_SRC_ADDR 0x118 /* Read-only*/
+
+/* CTRL */
+#define PDMA_CLEAR_CTRL 0x0
+#define PDMA_CLAIM_MASK GENMASK(0, 0)
+#define PDMA_RUN_MASK GENMASK(1, 1)
+#define PDMA_ENABLE_DONE_INT_MASK GENMASK(14, 14)
+#define PDMA_ENABLE_ERR_INT_MASK GENMASK(15, 15)
+#define PDMA_DONE_STATUS_MASK GENMASK(30, 30)
+#define PDMA_ERR_STATUS_MASK GENMASK(31, 31)
+
+/* Transfer Type */
+#define PDMA_FULL_SPEED 0xFF000008
+
+/* Error Recovery */
+#define MAX_RETRY 1
+
+#define SF_PDMA_REG_BASE(ch) (pdma->membase + (PDMA_CHAN_OFFSET * (ch)))
+
+struct pdma_regs {
+ /* read-write regs */
+ void __iomem *ctrl; /* 4 bytes */
+
+ void __iomem *xfer_type; /* 4 bytes */
+ void __iomem *xfer_size; /* 8 bytes */
+ void __iomem *dst_addr; /* 8 bytes */
+ void __iomem *src_addr; /* 8 bytes */
+
+ /* read-only */
+ void __iomem *act_type; /* 4 bytes */
+ void __iomem *residue; /* 8 bytes */
+ void __iomem *cur_dst_addr; /* 8 bytes */
+ void __iomem *cur_src_addr; /* 8 bytes */
+};
+
+struct sf_pdma_desc {
+ u32 xfer_type;
+ u64 xfer_size;
+ u64 dst_addr;
+ u64 src_addr;
+ struct virt_dma_desc vdesc;
+ struct sf_pdma_chan *chan;
+ bool in_use;
+ enum dma_transfer_direction dirn;
+ struct dma_async_tx_descriptor *async_tx;
+};
+
+enum sf_pdma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
+
+struct sf_pdma_chan {
+ struct virt_dma_chan vchan;
+ enum dma_status status;
+ enum sf_pdma_pm_state pm_state;
+ u32 slave_id;
+ struct sf_pdma *pdma;
+ struct sf_pdma_desc *desc;
+ struct dma_slave_config cfg;
+ u32 attr;
+ dma_addr_t dma_dev_addr;
+ u32 dma_dev_size;
+ struct tasklet_struct done_tasklet;
+ struct tasklet_struct err_tasklet;
+ struct pdma_regs regs;
+ spinlock_t lock; /* protect chan data */
+ bool xfer_err;
+ int txirq;
+ int errirq;
+ int retries;
+};
+
+struct sf_pdma {
+ struct dma_device dma_dev;
+ void __iomem *membase;
+ void __iomem *mappedbase;
+ u32 n_chans;
+ struct sf_pdma_chan chans[PDMA_NR_CH];
+};
+
+#endif /* _SF_PDMA_H */
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 3993ab65c62c..f06016d38a05 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -203,19 +203,27 @@ struct rcar_dmac {
unsigned int n_channels;
struct rcar_dmac_chan *channels;
- unsigned int channels_mask;
+ u32 channels_mask;
DECLARE_BITMAP(modules, 256);
};
#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
+/*
+ * struct rcar_dmac_of_data - This driver's OF data
+ * @chan_offset_base: DMAC channels base offset
+ * @chan_offset_stride: DMAC channels offset stride
+ */
+struct rcar_dmac_of_data {
+ u32 chan_offset_base;
+ u32 chan_offset_stride;
+};
+
/* -----------------------------------------------------------------------------
* Registers
*/
-#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
-
#define RCAR_DMAISTA 0x0020
#define RCAR_DMASEC 0x0030
#define RCAR_DMAOR 0x0060
@@ -1726,6 +1734,7 @@ static const struct dev_pm_ops rcar_dmac_pm = {
static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
struct rcar_dmac_chan *rchan,
+ const struct rcar_dmac_of_data *data,
unsigned int index)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
@@ -1735,7 +1744,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
int ret;
rchan->index = index;
- rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
+ rchan->iomem = dmac->iomem + data->chan_offset_base +
+ data->chan_offset_stride * index;
rchan->mid_rid = -EINVAL;
spin_lock_init(&rchan->lock);
@@ -1800,7 +1810,15 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
return -EINVAL;
}
+ /*
+ * If the driver is unable to read dma-channel-mask property,
+ * the driver assumes that it can use all channels.
+ */
dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
+ of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask);
+
+ /* If the property has out-of-channel mask, this driver clears it */
+ dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0);
return 0;
}
@@ -1813,10 +1831,14 @@ static int rcar_dmac_probe(struct platform_device *pdev)
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
struct dma_device *engine;
struct rcar_dmac *dmac;
- struct resource *mem;
+ const struct rcar_dmac_of_data *data;
unsigned int i;
int ret;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
if (!dmac)
return -ENOMEM;
@@ -1848,8 +1870,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
/* Request resources. */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+ dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->iomem))
return PTR_ERR(dmac->iomem);
@@ -1901,7 +1922,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
if (!(dmac->channels_mask & BIT(i)))
continue;
- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i);
if (ret < 0)
goto error;
}
@@ -1948,8 +1969,16 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
rcar_dmac_stop_all_chan(dmac);
}
+static const struct rcar_dmac_of_data rcar_dmac_data = {
+ .chan_offset_base = 0x8000,
+ .chan_offset_stride = 0x80,
+};
+
static const struct of_device_id rcar_dmac_of_ids[] = {
- { .compatible = "renesas,rcar-dmac", },
+ {
+ .compatible = "renesas,rcar-dmac",
+ .data = &rcar_dmac_data,
+ },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 8546ad034720..9a31a315dbef 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -99,6 +99,7 @@
/* DMA_CHN_WARP_* register definition */
#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
+#define SPRD_DMA_WRAP_ADDR_MASK GENMASK(27, 0)
#define SPRD_DMA_HIGH_ADDR_OFFSET 4
/* SPRD_DMA_CHN_INTC register definition */
@@ -118,6 +119,8 @@
#define SPRD_DMA_SWT_MODE_OFFSET 26
#define SPRD_DMA_REQ_MODE_OFFSET 24
#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
+#define SPRD_DMA_WRAP_SEL_DEST BIT(23)
+#define SPRD_DMA_WRAP_EN BIT(22)
#define SPRD_DMA_FIX_SEL_OFFSET 21
#define SPRD_DMA_FIX_EN_OFFSET 20
#define SPRD_DMA_LLIST_END BIT(19)
@@ -804,6 +807,8 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
+ temp |= schan->linklist.wrap_addr ?
+ SPRD_DMA_WRAP_EN | SPRD_DMA_WRAP_SEL_DEST : 0;
temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
hw->frg_len = temp;
@@ -831,6 +836,12 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
hw->llist_ptr = lower_32_bits(llist_ptr);
hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
SPRD_DMA_LLIST_HIGH_MASK;
+
+ if (schan->linklist.wrap_addr) {
+ hw->wrap_ptr |= schan->linklist.wrap_addr &
+ SPRD_DMA_WRAP_ADDR_MASK;
+ hw->wrap_to |= dst & SPRD_DMA_WRAP_ADDR_MASK;
+ }
} else {
hw->llist_ptr = 0;
hw->src_blk_step = 0;
@@ -939,9 +950,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
schan->linklist.phy_addr = ll_cfg->phy_addr;
schan->linklist.virt_addr = ll_cfg->virt_addr;
+ schan->linklist.wrap_addr = ll_cfg->wrap_addr;
} else {
schan->linklist.phy_addr = 0;
schan->linklist.virt_addr = 0;
+ schan->linklist.wrap_addr = 0;
}
/*
@@ -1080,7 +1093,6 @@ static int sprd_dma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct sprd_dma_dev *sdev;
struct sprd_dma_chn *dma_chn;
- struct resource *res;
u32 chn_count;
int ret, i;
@@ -1126,8 +1138,7 @@ static int sprd_dma_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
+ sdev->glb_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->glb_base))
return PTR_ERR(sdev->glb_base);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index ba7c4f07fcd6..756a3c951dc7 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -260,6 +260,13 @@ struct edma_cc {
*/
unsigned long *slot_inuse;
+ /*
+ * For tracking reserved channels used by DSP.
+ * If the bit is cleared, the channel is allocated to be used by DSP
+ * and Linux must not touch it.
+ */
+ unsigned long *channels_mask;
+
struct dma_device dma_slave;
struct dma_device *dma_memcpy;
struct edma_chan *slave_chans;
@@ -716,6 +723,12 @@ static int edma_alloc_channel(struct edma_chan *echan,
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ if (!test_bit(echan->ch_num, ecc->channels_mask)) {
+ dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n",
+ echan->ch_num);
+ return -EINVAL;
+ }
+
/* ensure access through shadow region 0 */
edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
EDMA_CHANNEL_BIT(channel));
@@ -2249,10 +2262,8 @@ static int edma_probe(struct platform_device *pdev)
{
struct edma_soc_info *info = pdev->dev.platform_data;
s8 (*queue_priority_mapping)[2];
- int i, off;
- const s16 (*rsv_slots)[2];
- const s16 (*xbar_chans)[2];
- int irq;
+ const s16 (*reserved)[2];
+ int i, irq;
char *irq_name;
struct resource *mem;
struct device_node *node = pdev->dev.of_node;
@@ -2331,15 +2342,32 @@ static int edma_probe(struct platform_device *pdev)
if (!ecc->slot_inuse)
return -ENOMEM;
+ ecc->channels_mask = devm_kcalloc(dev,
+ BITS_TO_LONGS(ecc->num_channels),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!ecc->channels_mask)
+ return -ENOMEM;
+
+ /* Mark all channels available initially */
+ bitmap_fill(ecc->channels_mask, ecc->num_channels);
+
ecc->default_queue = info->default_queue;
if (info->rsv) {
/* Set the reserved slots in inuse list */
- rsv_slots = info->rsv->rsv_slots;
- if (rsv_slots) {
- for (i = 0; rsv_slots[i][0] != -1; i++)
- bitmap_set(ecc->slot_inuse, rsv_slots[i][0],
- rsv_slots[i][1]);
+ reserved = info->rsv->rsv_slots;
+ if (reserved) {
+ for (i = 0; reserved[i][0] != -1; i++)
+ bitmap_set(ecc->slot_inuse, reserved[i][0],
+ reserved[i][1]);
+ }
+
+ /* Clear channels not usable for Linux */
+ reserved = info->rsv->rsv_chans;
+ if (reserved) {
+ for (i = 0; reserved[i][0] != -1; i++)
+ bitmap_clear(ecc->channels_mask, reserved[i][0],
+ reserved[i][1]);
}
}
@@ -2349,14 +2377,6 @@ static int edma_probe(struct platform_device *pdev)
edma_write_slot(ecc, i, &dummy_paramset);
}
- /* Clear the xbar mapped channels in unused list */
- xbar_chans = info->xbar_chans;
- if (xbar_chans) {
- for (i = 0; xbar_chans[i][1] != -1; i++) {
- off = xbar_chans[i][1];
- }
- }
-
irq = platform_get_irq_byname(pdev, "edma3_ccint");
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 0);
@@ -2399,12 +2419,15 @@ static int edma_probe(struct platform_device *pdev)
if (!ecc->legacy_mode) {
int lowest_priority = 0;
+ unsigned int array_max;
struct of_phandle_args tc_args;
ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
sizeof(*ecc->tc_list), GFP_KERNEL);
- if (!ecc->tc_list)
- return -ENOMEM;
+ if (!ecc->tc_list) {
+ ret = -ENOMEM;
+ goto err_reg1;
+ }
for (i = 0;; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
@@ -2420,6 +2443,18 @@ static int edma_probe(struct platform_device *pdev)
info->default_queue = i;
}
}
+
+ /* See if we have optional dma-channel-mask array */
+ array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32));
+ ret = of_property_read_variable_u32_array(node,
+ "dma-channel-mask",
+ (u32 *)ecc->channels_mask,
+ 1, array_max);
+ if (ret > 0 && ret != array_max)
+ dev_warn(dev, "dma-channel-mask is not complete.\n");
+ else if (ret == -EOVERFLOW || ret == -ENODATA)
+ dev_warn(dev,
+ "dma-channel-mask is out of range or empty\n");
}
/* Event queue priority mapping */
@@ -2437,6 +2472,10 @@ static int edma_probe(struct platform_device *pdev)
edma_dma_init(ecc, legacy_mode);
for (i = 0; i < ecc->num_channels; i++) {
+ /* Do not touch reserved channels */
+ if (!test_bit(i, ecc->channels_mask))
+ continue;
+
/* Assign all channels to the default queue */
edma_assign_channel_eventq(&ecc->slave_chans[i],
info->default_queue);
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index fde54687856b..21b8f1131d55 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -382,7 +382,6 @@ static int uniphier_mdmac_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uniphier_mdmac_device *mdev;
struct dma_device *ddev;
- struct resource *res;
int nr_chans, ret, i;
nr_chans = platform_irq_count(pdev);
@@ -398,8 +397,7 @@ static int uniphier_mdmac_probe(struct platform_device *pdev)
if (!mdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->reg_base = devm_ioremap_resource(dev, res);
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->reg_base))
return PTR_ERR(mdev->reg_base);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5d56f1e4d332..a9c5d5cc9f2b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -25,6 +25,12 @@
* The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
* Access (DMA) between a memory-mapped source address and a memory-mapped
* destination address.
+ *
+ * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
+ * Xilinx IP that provides high-bandwidth direct memory access between
+ * memory and AXI4-Stream target peripherals. It provides scatter gather
+ * (SG) interface with multiple channels independent configuration support.
+ *
*/
#include <linux/bitops.h>
@@ -173,18 +179,6 @@
#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
-/* Multi-Channel DMA Descriptor offsets*/
-#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
-#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
-
-/* Multi-Channel DMA Masks/Shifts */
-#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
-#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
-#define XILINX_DMA_BD_STRIDE_SHIFT 0
-#define XILINX_DMA_BD_VSIZE_SHIFT 19
-
/* AXI CDMA Specific Registers/Offsets */
#define XILINX_CDMA_REG_SRCADDR 0x18
#define XILINX_CDMA_REG_DSTADDR 0x20
@@ -194,6 +188,31 @@
#define xilinx_prep_dma_addr_t(addr) \
((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
+
+/* AXI MCDMA Specific Registers/Offsets */
+#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
+#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
+#define XILINX_MCDMA_CHEN_OFFSET 0x0008
+#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
+#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
+#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
+#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
+
+/* AXI MCDMA Specific Masks/Shifts */
+#define XILINX_MCDMA_COALESCE_SHIFT 16
+#define XILINX_MCDMA_COALESCE_MAX 24
+#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
+#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
+#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
+#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
+#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
+#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
+#define XILINX_MCDMA_BD_EOP BIT(30)
+#define XILINX_MCDMA_BD_SOP BIT(31)
+
/**
* struct xilinx_vdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
@@ -221,8 +240,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
- * @mcdma_control: Control field for mcdma @0x10
- * @vsize_stride: Vsize and Stride field for mcdma @0x14
+ * @reserved1: Reserved @0x10
+ * @reserved2: Reserved @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
@@ -232,14 +251,38 @@ struct xilinx_axidma_desc_hw {
u32 next_desc_msb;
u32 buf_addr;
u32 buf_addr_msb;
- u32 mcdma_control;
- u32 vsize_stride;
+ u32 reserved1;
+ u32 reserved2;
u32 control;
u32 status;
u32 app[XILINX_DMA_NUM_APP_WORDS];
} __aligned(64);
/**
+ * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @rsvd: Reserved field @0x10
+ * @control: Control Information field @0x14
+ * @status: Status field @0x18
+ * @sideband_status: Status of sideband signals @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_aximcdma_desc_hw {
+ u32 next_desc;
+ u32 next_desc_msb;
+ u32 buf_addr;
+ u32 buf_addr_msb;
+ u32 rsvd;
+ u32 control;
+ u32 status;
+ u32 sideband_status;
+ u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
+/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
* @next_desc_msb: Next Descriptor Pointer MSB @0x04
@@ -286,6 +329,18 @@ struct xilinx_axidma_tx_segment {
} __aligned(64);
/**
+ * struct xilinx_aximcdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_aximcdma_tx_segment {
+ struct xilinx_aximcdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
* struct xilinx_cdma_tx_segment - Descriptor segment
* @hw: Hardware descriptor
* @node: Node in the descriptor segments list
@@ -303,12 +358,16 @@ struct xilinx_cdma_tx_segment {
* @segments: TX segments list
* @node: Node in the channel descriptors list
* @cyclic: Check for cyclic transfers.
+ * @err: Whether the descriptor has an error.
+ * @residue: Residue of the completed descriptor
*/
struct xilinx_dma_tx_descriptor {
struct dma_async_tx_descriptor async_tx;
struct list_head segments;
struct list_head node;
bool cyclic;
+ bool err;
+ u32 residue;
};
/**
@@ -339,8 +398,8 @@ struct xilinx_dma_tx_descriptor {
* @desc_pendingcount: Descriptor pending count
* @ext_addr: Indicates 64 bit addressing is supported by dma channel
* @desc_submitcount: Descriptor h/w submitted count
- * @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
+ * @seg_mv: Statically allocated segments base for MCDMA
* @seg_p: Physical allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
* @cyclic_seg_p: Physical allocated segments base for cyclic dma
@@ -376,8 +435,8 @@ struct xilinx_dma_chan {
u32 desc_pendingcount;
bool ext_addr;
u32 desc_submitcount;
- u32 residue;
struct xilinx_axidma_tx_segment *seg_v;
+ struct xilinx_aximcdma_tx_segment *seg_mv;
dma_addr_t seg_p;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
dma_addr_t cyclic_seg_p;
@@ -393,12 +452,14 @@ struct xilinx_dma_chan {
* @XDMA_TYPE_AXIDMA: Axi dma ip.
* @XDMA_TYPE_CDMA: Axi cdma ip.
* @XDMA_TYPE_VDMA: Axi vdma ip.
+ * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
*
*/
enum xdma_ip_type {
XDMA_TYPE_AXIDMA = 0,
XDMA_TYPE_CDMA,
XDMA_TYPE_VDMA,
+ XDMA_TYPE_AXIMCDMA
};
struct xilinx_dma_config {
@@ -406,6 +467,7 @@ struct xilinx_dma_config {
int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
struct clk **tx_clk, struct clk **txs_clk,
struct clk **rx_clk, struct clk **rxs_clk);
+ irqreturn_t (*irq_handler)(int irq, void *data);
};
/**
@@ -414,7 +476,6 @@ struct xilinx_dma_config {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
- * @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device
* @pdev: Platform device structure pointer
@@ -427,13 +488,13 @@ struct xilinx_dma_config {
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
* @max_buffer_len: Max buffer length
+ * @s2mm_index: S2MM channel index
*/
struct xilinx_dma_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
- bool mcdma;
u32 flush_on_fsync;
bool ext_addr;
struct platform_device *pdev;
@@ -446,6 +507,7 @@ struct xilinx_dma_device {
u32 nr_channels;
u32 chan_id;
u32 max_buffer_len;
+ u32 s2mm_index;
};
/* Macros */
@@ -546,6 +608,18 @@ static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
}
}
+static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
+ struct xilinx_aximcdma_desc_hw *hw,
+ dma_addr_t buf_addr, size_t sg_used)
+{
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(buf_addr + sg_used);
+ hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
+ } else {
+ hw->buf_addr = buf_addr + sg_used;
+ }
+}
+
/* -----------------------------------------------------------------------------
* Descriptors and segments alloc and free
*/
@@ -613,6 +687,33 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
}
spin_unlock_irqrestore(&chan->lock, flags);
+ if (!segment)
+ dev_dbg(chan->dev, "Could not find free tx segment\n");
+
+ return segment;
+}
+
+/**
+ * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_aximcdma_tx_segment *
+xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_aximcdma_tx_segment *segment = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->free_seg_list)) {
+ segment = list_first_entry(&chan->free_seg_list,
+ struct xilinx_aximcdma_tx_segment,
+ node);
+ list_del(&segment->node);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
return segment;
}
@@ -627,6 +728,17 @@ static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
hw->next_desc_msb = next_desc_msb;
}
+static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
+{
+ u32 next_desc = hw->next_desc;
+ u32 next_desc_msb = hw->next_desc_msb;
+
+ memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
+
+ hw->next_desc = next_desc;
+ hw->next_desc_msb = next_desc_msb;
+}
+
/**
* xilinx_dma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
@@ -641,6 +753,20 @@ static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
}
/**
+ * xilinx_mcdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_aximcdma_tx_segment *
+ segment)
+{
+ xilinx_mcdma_clean_hw_desc(&segment->hw);
+
+ list_add_tail(&segment->node, &chan->free_seg_list);
+}
+
+/**
* xilinx_cdma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
* @segment: DMA transaction segment
@@ -694,6 +820,7 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
struct xilinx_vdma_tx_segment *segment, *next;
struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
+ struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
if (!desc)
return;
@@ -709,12 +836,18 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
list_del(&cdma_segment->node);
xilinx_cdma_free_tx_segment(chan, cdma_segment);
}
- } else {
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
list_for_each_entry_safe(axidma_segment, axidma_next,
&desc->segments, node) {
list_del(&axidma_segment->node);
xilinx_dma_free_tx_segment(chan, axidma_segment);
}
+ } else {
+ list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
+ &desc->segments, node) {
+ list_del(&aximcdma_segment->node);
+ xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
+ }
}
kfree(desc);
@@ -783,10 +916,61 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
chan->cyclic_seg_v, chan->cyclic_seg_p);
}
- if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ spin_lock_irqsave(&chan->lock, flags);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Free memory that is allocated for BD */
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
+ XILINX_DMA_NUM_DESCS, chan->seg_mv,
+ chan->seg_p);
+ }
+
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
+ chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
}
+
+}
+
+/**
+ * xilinx_dma_get_residue - Compute residue for a given descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ *
+ * Return: The number of residue bytes for the descriptor.
+ */
+static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
+{
+ struct xilinx_cdma_tx_segment *cdma_seg;
+ struct xilinx_axidma_tx_segment *axidma_seg;
+ struct xilinx_cdma_desc_hw *cdma_hw;
+ struct xilinx_axidma_desc_hw *axidma_hw;
+ struct list_head *entry;
+ u32 residue = 0;
+
+ list_for_each(entry, &desc->segments) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ cdma_seg = list_entry(entry,
+ struct xilinx_cdma_tx_segment,
+ node);
+ cdma_hw = &cdma_seg->hw;
+ residue += (cdma_hw->control - cdma_hw->status) &
+ chan->xdev->max_buffer_len;
+ } else {
+ axidma_seg = list_entry(entry,
+ struct xilinx_axidma_tx_segment,
+ node);
+ axidma_hw = &axidma_seg->hw;
+ residue += (axidma_hw->control - axidma_hw->status) &
+ chan->xdev->max_buffer_len;
+ }
+ }
+
+ return residue;
}
/**
@@ -823,7 +1007,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
- struct dmaengine_desc_callback cb;
+ struct dmaengine_result result;
if (desc->cyclic) {
xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@@ -833,14 +1017,22 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Remove from the list of running transactions */
list_del(&desc->node);
- /* Run the link descriptor callback function */
- dmaengine_desc_get_callback(&desc->async_tx, &cb);
- if (dmaengine_desc_callback_valid(&cb)) {
- spin_unlock_irqrestore(&chan->lock, flags);
- dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock_irqsave(&chan->lock, flags);
+ if (unlikely(desc->err)) {
+ if (chan->direction == DMA_DEV_TO_MEM)
+ result.result = DMA_TRANS_READ_FAILED;
+ else
+ result.result = DMA_TRANS_WRITE_FAILED;
+ } else {
+ result.result = DMA_TRANS_NOERROR;
}
+ result.residue = desc->residue;
+
+ /* Run the link descriptor callback function */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
+ spin_lock_irqsave(&chan->lock, flags);
+
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc);
@@ -922,6 +1114,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
list_add_tail(&chan->seg_v[i].node,
&chan->free_seg_list);
}
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ /* Allocate the buffer descriptors. */
+ chan->seg_mv = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->seg_mv) *
+ XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
+ if (!chan->seg_mv) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptors\n",
+ chan->id);
+ return -ENOMEM;
+ }
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+ chan->seg_mv[i].hw.next_desc =
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_mv[i].hw.next_desc_msb =
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_mv[i].phys = chan->seg_p +
+ sizeof(*chan->seg_v) * i;
+ list_add_tail(&chan->seg_mv[i].node,
+ &chan->free_seg_list);
+ }
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
chan->dev,
@@ -937,7 +1153,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
}
if (!chan->desc_pool &&
- (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
+ ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
+ chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
dev_err(chan->dev,
"unable to allocate channel %d descriptor pool\n",
chan->id);
@@ -1003,8 +1220,6 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
enum dma_status ret;
unsigned long flags;
u32 residue = 0;
@@ -1013,23 +1228,20 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
if (ret == DMA_COMPLETE || !txstate)
return ret;
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
- desc = list_last_entry(&chan->active_list,
- struct xilinx_dma_tx_descriptor, node);
- if (chan->has_sg) {
- list_for_each_entry(segment, &desc->segments, node) {
- hw = &segment->hw;
- residue += (hw->control - hw->status) &
- chan->xdev->max_buffer_len;
- }
- }
- spin_unlock_irqrestore(&chan->lock, flags);
+ desc = list_last_entry(&chan->active_list,
+ struct xilinx_dma_tx_descriptor, node);
+ /*
+ * VDMA and simple mode do not support residue reporting, so the
+ * residue field will always be 0.
+ */
+ if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+ residue = xilinx_dma_get_residue(chan, desc);
- chan->residue = residue;
- dma_set_residue(txstate, chan->residue);
- }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dma_set_residue(txstate, residue);
return ret;
}
@@ -1301,53 +1513,23 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
}
- if (chan->has_sg && !chan->xdev->mcdma)
+ if (chan->has_sg)
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
- if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_CDESC(chan->tdest),
- head_desc->async_tx.phys);
- }
- }
- }
-
xilinx_dma_start(chan);
if (chan->err)
return;
/* Start the transfer */
- if (chan->has_sg && !chan->xdev->mcdma) {
+ if (chan->has_sg) {
if (chan->cyclic)
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
chan->cyclic_seg_v->phys);
else
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
- } else if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_TDESC(chan->tdest),
- tail_segment->phys);
- }
- }
} else {
struct xilinx_axidma_tx_segment *segment;
struct xilinx_axidma_desc_hw *hw;
@@ -1371,6 +1553,76 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
}
/**
+ * xilinx_mcdma_start_transfer - Starts MCDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_axidma_tx_segment *tail_segment;
+ u32 reg;
+
+ /*
+ * lock has been held by calling functions, so we don't need it
+ * to take it here again.
+ */
+
+ if (chan->err)
+ return;
+
+ if (!chan->idle)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+
+ if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
+ reg &= ~XILINX_MCDMA_COALESCE_MASK;
+ reg |= chan->desc_pendingcount <<
+ XILINX_MCDMA_COALESCE_SHIFT;
+ }
+
+ reg |= XILINX_MCDMA_IRQ_ALL_MASK;
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+ /* Program current descriptor */
+ xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
+ head_desc->async_tx.phys);
+
+ /* Program channel enable register */
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
+ reg |= BIT(chan->tdest);
+ dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
+
+ /* Start the fetch of BDs for the channel */
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+ reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+ xilinx_dma_start(chan);
+
+ if (chan->err)
+ return;
+
+ /* Start the transfer */
+ xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
+ tail_segment->phys);
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+ chan->idle = false;
+}
+
+/**
* xilinx_dma_issue_pending - Issue pending transactions
* @dchan: DMA channel
*/
@@ -1399,6 +1651,13 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
return;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ if (chan->has_sg && chan->xdev->dma_config->dmatype !=
+ XDMA_TYPE_VDMA)
+ desc->residue = xilinx_dma_get_residue(chan, desc);
+ else
+ desc->residue = 0;
+ desc->err = chan->err;
+
list_del(&desc->node);
if (!desc->cyclic)
dma_cookie_complete(&desc->async_tx);
@@ -1433,6 +1692,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
chan->err = false;
chan->idle = true;
+ chan->desc_pendingcount = 0;
chan->desc_submitcount = 0;
return err;
@@ -1461,6 +1721,74 @@ static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
}
/**
+ * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx MCDMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dma_chan *chan = data;
+ u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
+
+ if (chan->direction == DMA_DEV_TO_MEM)
+ ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
+ else
+ ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
+
+ /* Read the channel id raising the interrupt*/
+ chan_sermask = dma_ctrl_read(chan, ser_offset);
+ chan_id = ffs(chan_sermask);
+
+ if (!chan_id)
+ return IRQ_NONE;
+
+ if (chan->direction == DMA_DEV_TO_MEM)
+ chan_offset = chan->xdev->s2mm_index;
+
+ chan_offset = chan_offset + (chan_id - 1);
+ chan = chan->xdev->chan[chan_offset];
+ /* Read the status and ack the interrupts. */
+ status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
+ if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
+ status & XILINX_MCDMA_IRQ_ALL_MASK);
+
+ if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
+ dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
+ chan,
+ dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
+ dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
+ (chan->tdest)),
+ dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
+ (chan->tdest)));
+ chan->err = true;
+ }
+
+ if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
+ /*
+ * Device takes too long to do the transfer when user requires
+ * responsiveness.
+ */
+ dev_dbg(chan->dev, "Inter-packet latency too long\n");
+ }
+
+ if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
+ spin_lock(&chan->lock);
+ xilinx_dma_complete_descriptor(chan);
+ chan->idle = true;
+ chan->start_transfer(chan);
+ spin_unlock(&chan->lock);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
* xilinx_dma_irq_handler - DMA Interrupt handler
* @irq: IRQ number
* @data: Pointer to the Xilinx DMA channel structure
@@ -1967,31 +2295,32 @@ error:
}
/**
- * xilinx_dma_prep_interleaved - prepare a descriptor for a
- * DMA_SLAVE transaction
+ * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
- * @xt: Interleaved template pointer
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
* @flags: transfer ack flags
+ * @context: APP words of the descriptor
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
-xilinx_dma_prep_interleaved(struct dma_chan *dchan,
- struct dma_interleaved_template *xt,
- unsigned long flags)
+xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
-
- if (!is_slave_direction(xt->dir))
- return NULL;
-
- if (!xt->numf || !xt->sgl[0].size)
- return NULL;
+ struct xilinx_aximcdma_tx_segment *segment = NULL;
+ u32 *app_w = (u32 *)context;
+ struct scatterlist *sg;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
- if (xt->frame_size != 1)
+ if (!is_slave_direction(direction))
return NULL;
/* Allocate a transaction descriptor. */
@@ -1999,54 +2328,67 @@ xilinx_dma_prep_interleaved(struct dma_chan *dchan,
if (!desc)
return NULL;
- chan->direction = xt->dir;
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
- hw = &segment->hw;
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ struct xilinx_aximcdma_desc_hw *hw;
- /* Fill in the descriptor */
- if (xt->dir != DMA_MEM_TO_DEV)
- hw->buf_addr = xt->dst_start;
- else
- hw->buf_addr = xt->src_start;
+ /* Get a free segment */
+ segment = xilinx_aximcdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
- hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
- hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
- XILINX_DMA_BD_VSIZE_MASK;
- hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
- XILINX_DMA_BD_STRIDE_MASK;
- hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+ chan->xdev->max_buffer_len);
+ hw = &segment->hw;
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
+ /* Fill in the descriptor */
+ xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
+ sg_used);
+ hw->control = copy;
+ if (chan->direction == DMA_MEM_TO_DEV && app_w) {
+ memcpy(hw->app, app_w, sizeof(u32) *
+ XILINX_DMA_NUM_APP_WORDS);
+ }
+
+ sg_used += copy;
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
+ struct xilinx_aximcdma_tx_segment, node);
desc->async_tx.phys = segment->phys;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (xt->dir == DMA_MEM_TO_DEV) {
- segment->hw.control |= XILINX_DMA_BD_SOP;
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_MCDMA_BD_SOP;
segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
+ struct xilinx_aximcdma_tx_segment,
node);
- segment->hw.control |= XILINX_DMA_BD_EOP;
+ segment->hw.control |= XILINX_MCDMA_BD_EOP;
}
return &desc->async_tx;
error:
xilinx_dma_free_tx_descriptor(chan, desc);
+
return NULL;
}
@@ -2194,7 +2536,9 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
+ err);
return err;
}
@@ -2259,14 +2603,18 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
+ err);
return err;
}
*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
if (IS_ERR(*dev_clk)) {
err = PTR_ERR(*dev_clk);
- dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
+ err);
return err;
}
@@ -2299,7 +2647,9 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
+ err);
return err;
}
@@ -2321,7 +2671,8 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
err = clk_prepare_enable(*axi_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
+ err);
return err;
}
@@ -2454,6 +2805,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
"xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
chan->id = chan_id;
+ xdev->s2mm_index = xdev->nr_channels;
chan->tdest = chan_id - xdev->nr_channels;
chan->has_vflip = of_property_read_bool(node,
"xlnx,enable-vert-flip");
@@ -2463,7 +2815,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
XILINX_VDMA_ENABLE_VERTICAL_FLIP;
}
- chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+ chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
+ else
+ chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
chan->config.park = 1;
@@ -2478,9 +2834,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
}
/* Request the interrupt */
- chan->irq = irq_of_parse_and_map(node, 0);
- err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
- "xilinx-dma-controller", chan);
+ chan->irq = irq_of_parse_and_map(node, chan->tdest);
+ err = request_irq(chan->irq, xdev->dma_config->irq_handler,
+ IRQF_SHARED, "xilinx-dma-controller", chan);
if (err) {
dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
return err;
@@ -2489,6 +2845,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
chan->start_transfer = xilinx_dma_start_transfer;
chan->stop_transfer = xilinx_dma_stop_transfer;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ chan->start_transfer = xilinx_mcdma_start_transfer;
+ chan->stop_transfer = xilinx_dma_stop_transfer;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->start_transfer = xilinx_cdma_start_transfer;
chan->stop_transfer = xilinx_cdma_stop_transfer;
@@ -2545,7 +2904,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
int ret, i, nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
- if ((ret < 0) && xdev->mcdma)
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
dev_warn(xdev->dev, "missing dma-channels property\n");
for (i = 0; i < nr_channels; i++)
@@ -2578,22 +2937,31 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
static const struct xilinx_dma_config axidma_config = {
.dmatype = XDMA_TYPE_AXIDMA,
.clk_init = axidma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
};
+static const struct xilinx_dma_config aximcdma_config = {
+ .dmatype = XDMA_TYPE_AXIMCDMA,
+ .clk_init = axidma_clk_init,
+ .irq_handler = xilinx_mcdma_irq_handler,
+};
static const struct xilinx_dma_config axicdma_config = {
.dmatype = XDMA_TYPE_CDMA,
.clk_init = axicdma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
};
static const struct xilinx_dma_config axivdma_config = {
.dmatype = XDMA_TYPE_VDMA,
.clk_init = axivdma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
};
static const struct of_device_id xilinx_dma_of_ids[] = {
{ .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
{ .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
{ .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
+ { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
{}
};
MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
@@ -2612,7 +2980,6 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
- struct resource *io;
u32 num_frames, addr_width, len_width;
int i, err;
@@ -2638,16 +3005,15 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return err;
/* Request and map I/O memory */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+ xdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xdev->regs))
return PTR_ERR(xdev->regs);
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
+ xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
if (!of_property_read_u32(node, "xlnx,sg-length-width",
&len_width)) {
if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
@@ -2712,14 +3078,17 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
- xdev->common.device_prep_interleaved_dma =
- xilinx_dma_prep_interleaved;
- /* Residue calculation is supported by only AXI DMA */
+ /* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ /* Residue calculation is supported by only AXI DMA and CDMA */
+ xdev->common.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_SEGMENT;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
} else {
xdev->common.device_prep_interleaved_dma =
xilinx_vdma_dma_prep_interleaved;
@@ -2755,6 +3124,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+ dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
else
dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c
index 9f4436f7c914..5fe2e8b9a7b8 100644
--- a/drivers/dma/zx_dma.c
+++ b/drivers/dma/zx_dma.c
@@ -754,18 +754,13 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
static int zx_dma_probe(struct platform_device *op)
{
struct zx_dma_dev *d;
- struct resource *iores;
int i, ret = 0;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
- d->base = devm_ioremap_resource(&op->dev, iores);
+ d->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(d->base))
return PTR_ERR(d->base);
@@ -894,7 +889,6 @@ static int zx_dma_remove(struct platform_device *op)
list_del(&c->vc.chan.device_node);
}
clk_disable_unprepare(d->clk);
- dmam_pool_destroy(d->pool);
return 0;
}
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 415afaf479e7..a7f216191493 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -322,6 +322,25 @@ static void axp288_put_role_sw(void *data)
usb_role_switch_put(info->role_sw);
}
+static int axp288_extcon_find_role_sw(struct axp288_extcon_info *info)
+{
+ const struct software_node *swnode;
+ struct fwnode_handle *fwnode;
+
+ if (!x86_match_cpu(cherry_trail_cpu_ids))
+ return 0;
+
+ swnode = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!swnode)
+ return -EPROBE_DEFER;
+
+ fwnode = software_node_fwnode(swnode);
+ info->role_sw = usb_role_switch_find_by_fwnode(fwnode);
+ fwnode_handle_put(fwnode);
+
+ return info->role_sw ? 0 : -EPROBE_DEFER;
+}
+
static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
@@ -343,9 +362,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- info->role_sw = usb_role_switch_get(dev);
- if (IS_ERR(info->role_sw))
- return PTR_ERR(info->role_sw);
+ ret = axp288_extcon_find_role_sw(info);
+ if (ret)
+ return ret;
+
if (info->role_sw) {
ret = devm_add_action_or_reset(dev, axp288_put_role_sw, info);
if (ret)
@@ -440,26 +460,14 @@ static struct platform_driver axp288_extcon_driver = {
},
};
-static struct device_connection axp288_extcon_role_sw_conn = {
- .endpoint[0] = "axp288_extcon",
- .endpoint[1] = "intel_xhci_usb_sw-role-switch",
- .id = "usb-role-switch",
-};
-
static int __init axp288_extcon_init(void)
{
- if (x86_match_cpu(cherry_trail_cpu_ids))
- device_connection_add(&axp288_extcon_role_sw_conn);
-
return platform_driver_register(&axp288_extcon_driver);
}
module_init(axp288_extcon_init);
static void __exit axp288_extcon_exit(void)
{
- if (x86_match_cpu(cherry_trail_cpu_ids))
- device_connection_remove(&axp288_extcon_role_sw_conn);
-
platform_driver_unregister(&axp288_extcon_driver);
}
module_exit(axp288_extcon_exit);
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 9d32150e68db..771f6f4cf92e 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -338,6 +338,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct cht_wc_extcon_data *ext;
unsigned long mask = ~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_USBID_MASK);
+ int pwrsrc_sts, id;
int irq, ret;
irq = platform_get_irq(pdev, 0);
@@ -387,8 +388,19 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
goto disable_sw_control;
}
- /* Route D+ and D- to PMIC for initial charger detection */
- cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
+ ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_STS, &pwrsrc_sts);
+ if (ret) {
+ dev_err(ext->dev, "Error reading pwrsrc status: %d\n", ret);
+ goto disable_sw_control;
+ }
+
+ /*
+ * If no USB host or device connected, route D+ and D- to PMIC for
+ * initial charger detection
+ */
+ id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
+ if (id != INTEL_USB_ID_GND)
+ cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
/* Get initial state */
cht_wc_extcon_pwrsrc_event(ext);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index dc43847ad2b0..bcf65aaca5d2 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -65,6 +65,10 @@ struct sm5502_muic_info {
/* Default value of SM5502 register to bring up MUIC device. */
static struct reg_data sm5502_reg_data[] = {
{
+ .reg = SM5502_REG_RESET,
+ .val = SM5502_REG_RESET_MASK,
+ .invert = true,
+ }, {
.reg = SM5502_REG_CONTROL,
.val = SM5502_REG_CONTROL_MASK_INT_MASK,
.invert = false,
@@ -272,7 +276,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
/* Return cable type of attached or detached accessories */
static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
{
- unsigned int cable_type = -1, adc, dev_type1;
+ unsigned int cable_type, adc, dev_type1;
int ret;
/* Read ADC value according to external cable or button */
diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h
index 9dbb634d213b..ce1f1ec310c4 100644
--- a/drivers/extcon/extcon-sm5502.h
+++ b/drivers/extcon/extcon-sm5502.h
@@ -237,6 +237,8 @@ enum sm5502_reg {
#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
| (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+#define SM5502_REG_RESET_MASK (0x1)
+
/* SM5502 Interrupts */
enum sm5502_irq {
/* INT1 */
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 1da7ba18d399..6e291d8f3a27 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1646,14 +1646,6 @@ static long fw_device_op_ioctl(struct file *file,
return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
}
-#ifdef CONFIG_COMPAT
-static long fw_device_op_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
-}
-#endif
-
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
struct client *client = file->private_data;
@@ -1694,7 +1686,8 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
if (ret < 0)
goto fail;
- ret = fw_iso_buffer_map_vma(&client->buffer, vma);
+ ret = vm_map_pages_zero(vma, client->buffer.pages,
+ client->buffer.page_count);
if (ret < 0)
goto fail;
@@ -1795,7 +1788,5 @@ const struct file_operations fw_device_ops = {
.mmap = fw_device_op_mmap,
.release = fw_device_op_release,
.poll = fw_device_op_poll,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = fw_device_op_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index df8a56a979b9..185b0b78b3d6 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -91,13 +91,6 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
}
EXPORT_SYMBOL(fw_iso_buffer_init);
-int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
- struct vm_area_struct *vma)
-{
- return vm_map_pages_zero(vma, buffer->pages,
- buffer->page_count);
-}
-
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
struct fw_card *card)
{
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0f0bed3a4bbb..4b0e4ee655a1 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -158,8 +158,6 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction);
-int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
- struct vm_area_struct *vma);
/* -topology */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 522f3addb5bd..33269316f111 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1752,7 +1752,7 @@ static u32 update_bus_time(struct fw_ohci *ohci)
if (unlikely(!ohci->bus_time_running)) {
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
- ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) |
+ ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
(cycle_time_seconds & 0x40);
ohci->bus_time_running = true;
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 1e21fc3e9851..2045566d622f 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -35,6 +35,7 @@ static struct dmi_memdev_info {
const char *bank;
u64 size; /* bytes */
u16 handle;
+ u8 type; /* DDR2, DDR3, DDR4 etc */
} *dmi_memdev;
static int dmi_memdev_nr;
@@ -391,7 +392,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
u64 bytes;
u16 size;
- if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
+ if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x13)
return;
if (nr >= dmi_memdev_nr) {
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@@ -400,6 +401,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
dmi_memdev[nr].handle = get_unaligned(&dm->handle);
dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
+ dmi_memdev[nr].type = d[0x12];
size = get_unaligned((u16 *)&d[0xC]);
if (size == 0)
@@ -1128,3 +1130,40 @@ u64 dmi_memdev_size(u16 handle)
return ~0ull;
}
EXPORT_SYMBOL_GPL(dmi_memdev_size);
+
+/**
+ * dmi_memdev_type - get the memory type
+ * @handle: DMI structure handle
+ *
+ * Return the DMI memory type of the module in the slot associated with the
+ * given DMI handle, or 0x0 if no such DMI handle exists.
+ */
+u8 dmi_memdev_type(u16 handle)
+{
+ int n;
+
+ if (dmi_memdev) {
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle)
+ return dmi_memdev[n].type;
+ }
+ }
+ return 0x0; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_type);
+
+/**
+ * dmi_memdev_handle - get the DMI handle of a memory slot
+ * @slot: slot number
+ *
+ * Return the DMI handle associated with a given memory slot, or %0xFFFF
+ * if there is no such slot.
+ */
+u16 dmi_memdev_handle(int slot)
+{
+ if (dmi_memdev && slot >= 0 && slot < dmi_memdev_nr)
+ return dmi_memdev[slot].handle;
+
+ return 0xffff; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_handle);
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 0e206c9e0d7a..5ccf39986a14 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -53,7 +53,8 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
for (i = 0; i < dev_header->prop_count; i++) {
int remaining = dev_header->len - (ptr - (void *)dev_header);
- u32 key_len, val_len;
+ u32 key_len, val_len, entry_len;
+ const u8 *entry_data;
char *key;
if (sizeof(key_len) > remaining)
@@ -85,17 +86,14 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
ucs2_as_utf8(key, ptr + sizeof(key_len),
key_len - sizeof(key_len));
- entry[i].name = key;
- entry[i].length = val_len - sizeof(val_len);
- entry[i].is_array = !!entry[i].length;
- entry[i].type = DEV_PROP_U8;
- entry[i].pointer.u8_data = ptr + key_len + sizeof(val_len);
-
+ entry_data = ptr + key_len + sizeof(val_len);
+ entry_len = val_len - sizeof(val_len);
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
if (dump_properties) {
- dev_info(dev, "property: %s\n", entry[i].name);
+ dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
- 16, 1, entry[i].pointer.u8_data,
- entry[i].length, true);
+ 16, 1, entry_data, entry_len, true);
}
ptr += key_len + val_len;
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 215061c581e1..bee8729525ec 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -614,3 +614,8 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
addr, val);
}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
+{
+ return -ENODEV;
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 91d5ad7cf58b..e1cd933ea9ae 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -62,32 +62,72 @@ static DEFINE_MUTEX(qcom_scm_lock);
#define FIRST_EXT_ARG_IDX 3
#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
-/**
- * qcom_scm_call() - Invoke a syscall in the secure world
- * @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @desc: Descriptor structure containing arguments and return values
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This should *only* be called in pre-emptible context.
-*/
-static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
- const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res)
+static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, u32 type)
+{
+ u64 cmd;
+ struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
+
+ cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
+ ARM_SMCCC_OWNER_SIP, fn_id);
+
+ quirk.state.a6 = 0;
+
+ do {
+ arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2], x5,
+ quirk.state.a6, 0, res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ cmd = res->a0;
+
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, bool atomic)
+{
+ int retry_count = 0;
+
+ if (atomic) {
+ __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
+ return;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ __qcom_scm_call_do(desc, res, fn_id, x5,
+ ARM_SMCCC_STD_CALL);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+}
+
+static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int retry_count = 0, i;
+ int i;
u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
- u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
+ u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
- struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
+ gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
if (unlikely(arglen > N_REGISTER_ARGS)) {
alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
- args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt)
return -ENOMEM;
@@ -117,46 +157,56 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
x5 = args_phys;
}
- do {
- mutex_lock(&qcom_scm_lock);
-
- cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
- qcom_smccc_convention,
- ARM_SMCCC_OWNER_SIP, fn_id);
-
- quirk.state.a6 = 0;
-
- do {
- arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
- desc->args[1], desc->args[2], x5,
- quirk.state.a6, 0, res, &quirk);
-
- if (res->a0 == QCOM_SCM_INTERRUPTED)
- cmd = res->a0;
-
- } while (res->a0 == QCOM_SCM_INTERRUPTED);
-
- mutex_unlock(&qcom_scm_lock);
-
- if (res->a0 == QCOM_SCM_V2_EBUSY) {
- if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
- break;
- msleep(QCOM_SCM_EBUSY_WAIT_MS);
- }
- } while (res->a0 == QCOM_SCM_V2_EBUSY);
+ qcom_scm_call_do(desc, res, fn_id, x5, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
- if (res->a0 < 0)
+ if ((long)res->a0 < 0)
return qcom_scm_remap_error(res->a0);
return 0;
}
/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ */
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ might_sleep();
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
+}
+
+/**
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This can be called in atomic context.
+ */
+static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
+}
+
+/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
@@ -502,3 +552,16 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res);
}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
+ desc.args[1] = en;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
+ QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 4802ab170fe5..a729e05c21b8 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -345,6 +345,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+{
+ return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
+}
+EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
return __qcom_scm_io_readl(__scm->dev, addr, val);
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 99506bd873c0..baee744dbcfe 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -91,10 +91,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare);
#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
+#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
+#define QCOM_SCM_CONFIG_ERRATA1 0x3
+#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare);
+extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
+ bool enable);
#define QCOM_MEM_PROT_ASSIGN_ID 0x16
extern int __qcom_scm_assign_mem(struct device *dev,
phys_addr_t mem_region, size_t mem_sz,
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index bb008c019920..f8533338b018 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -20,7 +20,6 @@
#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
-#define RSU_FW_VERSION_MASK GENMASK_ULL(15, 0)
#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
@@ -109,9 +108,12 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
{
struct stratix10_rsu_priv *priv = client->priv;
- if (data->status != BIT(SVC_STATUS_RSU_OK))
- dev_err(client->dev, "RSU returned status is %i\n",
- data->status);
+ if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support notify\n");
+ else if (data->status == BIT(SVC_STATUS_RSU_ERROR))
+ dev_err(client->dev, "Failure, returned status is %lu\n",
+ BIT(data->status));
+
complete(&priv->completion);
}
@@ -133,9 +135,11 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_RSU_OK))
priv->retry_counter = *counter;
+ else if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
- dev_err(client->dev, "Failed to get retry counter %i\n",
- data->status);
+ dev_err(client->dev, "Failed to get retry counter %lu\n",
+ BIT(data->status));
complete(&priv->completion);
}
@@ -333,15 +337,10 @@ static ssize_t notify_store(struct device *dev,
return ret;
}
- /* only 19.3 or late version FW supports retry counter feature */
- if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY,
- 0, rsu_retry_callback);
- if (ret) {
- dev_err(dev,
- "Error, getting RSU retry %i\n", ret);
- return ret;
- }
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ return ret;
}
return count;
@@ -413,15 +412,10 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
- /* only 19.3 or late version FW supports retry counter feature */
- if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0,
- rsu_retry_callback);
- if (ret) {
- dev_err(dev,
- "Error, getting RSU retry %i\n", ret);
- stratix10_svc_free_channel(priv->chan);
- }
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
}
return ret;
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index b485321189e1..c6c31402848d 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -493,8 +493,24 @@ static int svc_normal_to_secure_thread(void *data)
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
break;
default:
- pr_warn("it shouldn't happen\n");
+ pr_warn("Secure firmware doesn't support...\n");
+
+ /*
+ * be compatible with older version firmware which
+ * doesn't support RSU notify or retry
+ */
+ if ((pdata->command == COMMAND_RSU_RETRY) ||
+ (pdata->command == COMMAND_RSU_NOTIFY)) {
+ cbdata->status =
+ BIT(SVC_STATUS_RSU_NO_SUPPORT);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(
+ pdata->chan->scl, cbdata);
+ }
break;
+
}
};
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 73c779e920ed..72380e1d31c7 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -156,7 +156,7 @@ config FPGA_DFL
config FPGA_DFL_FME
tristate "FPGA DFL FME Driver"
- depends on FPGA_DFL
+ depends on FPGA_DFL && HWMON
help
The FPGA Management Engine (FME) is a feature device implemented
under Device Feature List (DFL) framework. Select this option to
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 4d78e182878f..7c930e6b314d 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -14,6 +14,8 @@
* Henry Mitchel <henry.mitchel@intel.com>
*/
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -181,6 +183,381 @@ static const struct dfl_feature_ops fme_hdr_ops = {
.ioctl = fme_hdr_ioctl,
};
+#define FME_THERM_THRESHOLD 0x8
+#define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
+#define TEMP_THRESHOLD1_EN BIT_ULL(7)
+#define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
+#define TEMP_THRESHOLD2_EN BIT_ULL(15)
+#define TRIP_THRESHOLD GENMASK_ULL(30, 24)
+#define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
+#define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
+/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
+#define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
+
+#define FME_THERM_RDSENSOR_FMT1 0x10
+#define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
+
+#define FME_THERM_CAP 0x20
+#define THERM_NO_THROTTLE BIT_ULL(0)
+
+#define MD_PRE_DEG
+
+static bool fme_thermal_throttle_support(void __iomem *base)
+{
+ u64 v = readq(base + FME_THERM_CAP);
+
+ return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
+}
+
+static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct dfl_feature *feature = drvdata;
+
+ /* temperature is always supported, and check hardware cap for others */
+ if (attr == hwmon_temp_input)
+ return 0444;
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
+}
+
+static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
+ *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
+ break;
+ case hwmon_temp_max:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
+ break;
+ case hwmon_temp_crit:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
+ break;
+ case hwmon_temp_emergency:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
+ break;
+ case hwmon_temp_max_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_temp_crit_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops thermal_hwmon_ops = {
+ .is_visible = thermal_hwmon_attrs_visible,
+ .read = thermal_hwmon_read,
+};
+
+static const struct hwmon_channel_info *thermal_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info thermal_hwmon_chip_info = {
+ .ops = &thermal_hwmon_ops,
+ .info = thermal_hwmon_info,
+};
+
+static ssize_t temp1_max_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
+}
+
+static DEVICE_ATTR_RO(temp1_max_policy);
+
+static struct attribute *thermal_extra_attrs[] = {
+ &dev_attr_temp1_max_policy.attr,
+ NULL,
+};
+
+static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
+}
+
+static const struct attribute_group thermal_extra_group = {
+ .attrs = thermal_extra_attrs,
+ .is_visible = thermal_extra_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(thermal_extra);
+
+static int fme_thermal_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ /*
+ * create hwmon to allow userspace monitoring temperature and other
+ * threshold information.
+ *
+ * temp1_input -> FPGA device temperature
+ * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
+ * temp1_crit -> hardware threshold 2 -> 100% throttling
+ * temp1_emergency -> hardware trip_threshold to shutdown FPGA
+ * temp1_max_alarm -> hardware threshold 1 alarm
+ * temp1_crit_alarm -> hardware threshold 2 alarm
+ *
+ * create device specific sysfs interfaces, e.g. read temp1_max_policy
+ * to understand the actual hardware throttling action (50% vs 90%).
+ *
+ * If hardware doesn't support automatic throttling per thresholds,
+ * then all above sysfs interfaces are not visible except temp1_input
+ * for temperature.
+ */
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_thermal", feature,
+ &thermal_hwmon_chip_info,
+ thermal_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_THERMAL_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+};
+
+#define FME_PWR_STATUS 0x8
+#define FME_LATENCY_TOLERANCE BIT_ULL(18)
+#define PWR_CONSUMED GENMASK_ULL(17, 0)
+
+#define FME_PWR_THRESHOLD 0x10
+#define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
+#define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
+#define PWR_THRESHOLD_MAX 0x7f /* in Watts */
+#define PWR_THRESHOLD1_STATUS BIT_ULL(16)
+#define PWR_THRESHOLD2_STATUS BIT_ULL(17)
+
+#define FME_PWR_XEON_LIMIT 0x18
+#define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define XEON_PWR_EN BIT_ULL(15)
+#define FME_PWR_FPGA_LIMIT 0x20
+#define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define FPGA_PWR_EN BIT_ULL(15)
+
+static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_power_input:
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+ *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
+ break;
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
+ break;
+ case hwmon_power_max_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_power_crit_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ int ret = 0;
+ u64 v;
+
+ val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
+
+ mutex_lock(&pdata->lock);
+
+ switch (attr) {
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD1;
+ v |= FIELD_PREP(PWR_THRESHOLD1, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD2;
+ v |= FIELD_PREP(PWR_THRESHOLD2, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static umode_t power_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_max_alarm:
+ case hwmon_power_crit_alarm:
+ return 0444;
+ case hwmon_power_max:
+ case hwmon_power_crit:
+ return 0644;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops power_hwmon_ops = {
+ .is_visible = power_hwmon_attrs_visible,
+ .read = power_hwmon_read,
+ .write = power_hwmon_write,
+};
+
+static const struct hwmon_channel_info *power_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MAX_ALARM |
+ HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info power_hwmon_chip_info = {
+ .ops = &power_hwmon_ops,
+ .info = power_hwmon_info,
+};
+
+static ssize_t power1_xeon_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 xeon_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
+
+ if (FIELD_GET(XEON_PWR_EN, v))
+ xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", xeon_limit * 100000);
+}
+
+static ssize_t power1_fpga_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 fpga_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
+
+ if (FIELD_GET(FPGA_PWR_EN, v))
+ fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", fpga_limit * 100000);
+}
+
+static ssize_t power1_ltr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
+}
+
+static DEVICE_ATTR_RO(power1_xeon_limit);
+static DEVICE_ATTR_RO(power1_fpga_limit);
+static DEVICE_ATTR_RO(power1_ltr);
+
+static struct attribute *power_extra_attrs[] = {
+ &dev_attr_power1_xeon_limit.attr,
+ &dev_attr_power1_fpga_limit.attr,
+ &dev_attr_power1_ltr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(power_extra);
+
+static int fme_power_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_power", feature,
+ &power_hwmon_chip_info,
+ power_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register power hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_POWER_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+};
+
static struct dfl_feature_driver fme_feature_drvs[] = {
{
.id_table = fme_hdr_id_table,
@@ -195,6 +572,14 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
.ops = &fme_global_err_ops,
},
{
+ .id_table = fme_thermal_mgmt_id_table,
+ .ops = &fme_thermal_mgmt_ops,
+ },
+ {
+ .id_table = fme_power_mgmt_id_table,
+ .ops = &fme_power_mgmt_ops,
+ },
+ {
.ops = NULL,
},
};
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 31ef38e38537..ee7765049607 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -578,10 +578,8 @@ static int zynq_fpga_probe(struct platform_device *pdev)
init_completion(&priv->dma_done);
priv->irq = platform_get_irq(pdev, 0);
- if (priv->irq < 0) {
- dev_err(dev, "No IRQ available\n");
+ if (priv->irq < 0)
return priv->irq;
- }
priv->clk = devm_clk_get(dev, "ref_clk");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index c612db7a914a..92ce6d85802c 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -53,6 +53,14 @@ config FSI_MASTER_AST_CF
lines driven by the internal ColdFire coprocessor. This requires
the corresponding machine specific ColdFire firmware to be available.
+config FSI_MASTER_ASPEED
+ tristate "FSI ASPEED master"
+ help
+ This option enables a FSI master that is present behind an OPB bridge
+ in the AST2600.
+
+ Enable it for your BMC kernel in an OpenPower or IBM Power system.
+
config FSI_SCOM
tristate "SCOM FSI client device driver"
---help---
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index e4a2ff043c32..da218a1ad8e1 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_FSI) += fsi-core.o
obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
+obj-$(CONFIG_FSI_MASTER_ASPEED) += fsi-master-aspeed.o
obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
obj-$(CONFIG_FSI_MASTER_AST_CF) += fsi-master-ast-cf.o
obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 1f76740f33b6..8244da8a7241 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -544,6 +544,31 @@ static int fsi_slave_scan(struct fsi_slave *slave)
return 0;
}
+static unsigned long aligned_access_size(size_t offset, size_t count)
+{
+ unsigned long offset_unit, count_unit;
+
+ /* Criteria:
+ *
+ * 1. Access size must be less than or equal to the maximum access
+ * width or the highest power-of-two factor of offset
+ * 2. Access size must be less than or equal to the amount specified by
+ * count
+ *
+ * The access width is optimal if we can calculate 1 to be strictly
+ * equal while still satisfying 2.
+ */
+
+ /* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
+ offset_unit = BIT(__builtin_ctzl(offset | 4));
+
+ /* Find 2 by the top bit of count */
+ count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
+
+ /* Constrain the maximum access width to the minimum of both criteria */
+ return BIT(__builtin_ctzl(offset_unit | count_unit));
+}
+
static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -559,8 +584,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += read_len) {
- read_len = min_t(size_t, count, 4);
- read_len -= off & 0x3;
+ read_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_read(slave, off, buf + total_len, read_len);
if (rc)
@@ -587,8 +611,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += write_len) {
- write_len = min_t(size_t, count, 4);
- write_len -= off & 0x3;
+ write_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_write(slave, off, buf + total_len, write_len);
if (rc)
@@ -1241,6 +1264,19 @@ static ssize_t master_break_store(struct device *dev,
static DEVICE_ATTR(break, 0200, NULL, master_break_store);
+static struct attribute *master_attrs[] = {
+ &dev_attr_break.attr,
+ &dev_attr_rescan.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(master);
+
+static struct class fsi_master_class = {
+ .name = "fsi-master",
+ .dev_groups = master_groups,
+};
+
int fsi_master_register(struct fsi_master *master)
{
int rc;
@@ -1249,6 +1285,7 @@ int fsi_master_register(struct fsi_master *master)
mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
dev_set_name(&master->dev, "fsi%d", master->idx);
+ master->dev.class = &fsi_master_class;
rc = device_register(&master->dev);
if (rc) {
@@ -1256,20 +1293,6 @@ int fsi_master_register(struct fsi_master *master)
return rc;
}
- rc = device_create_file(&master->dev, &dev_attr_rescan);
- if (rc) {
- device_del(&master->dev);
- ida_simple_remove(&master_ida, master->idx);
- return rc;
- }
-
- rc = device_create_file(&master->dev, &dev_attr_break);
- if (rc) {
- device_del(&master->dev);
- ida_simple_remove(&master_ida, master->idx);
- return rc;
- }
-
np = dev_of_node(&master->dev);
if (!of_property_read_bool(np, "no-scan-on-init")) {
mutex_lock(&master->scan_lock);
@@ -1350,8 +1373,15 @@ static int __init fsi_init(void)
rc = bus_register(&fsi_bus_type);
if (rc)
goto fail_bus;
+
+ rc = class_register(&fsi_master_class);
+ if (rc)
+ goto fail_class;
+
return 0;
+ fail_class:
+ bus_unregister(&fsi_bus_type);
fail_bus:
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
return rc;
@@ -1360,6 +1390,7 @@ postcore_initcall(fsi_init);
static void fsi_exit(void)
{
+ class_unregister(&fsi_master_class);
bus_unregister(&fsi_bus_type);
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
ida_destroy(&fsi_minor_ida);
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
new file mode 100644
index 000000000000..f49742b310c2
--- /dev/null
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (C) IBM Corporation 2018
+// FSI master driver for AST2600
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fsi.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+
+#include "fsi-master.h"
+
+struct fsi_master_aspeed {
+ struct fsi_master master;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+#define to_fsi_master_aspeed(m) \
+ container_of(m, struct fsi_master_aspeed, master)
+
+/* Control register (size 0x400) */
+static const u32 ctrl_base = 0x80000000;
+
+static const u32 fsi_base = 0xa0000000;
+
+#define OPB_FSI_VER 0x00
+#define OPB_TRIGGER 0x04
+#define OPB_CTRL_BASE 0x08
+#define OPB_FSI_BASE 0x0c
+#define OPB_CLK_SYNC 0x3c
+#define OPB_IRQ_CLEAR 0x40
+#define OPB_IRQ_MASK 0x44
+#define OPB_IRQ_STATUS 0x48
+
+#define OPB0_SELECT 0x10
+#define OPB0_RW 0x14
+#define OPB0_XFER_SIZE 0x18
+#define OPB0_FSI_ADDR 0x1c
+#define OPB0_FSI_DATA_W 0x20
+#define OPB0_STATUS 0x80
+#define OPB0_FSI_DATA_R 0x84
+
+#define OPB0_WRITE_ORDER1 0x4c
+#define OPB0_WRITE_ORDER2 0x50
+#define OPB1_WRITE_ORDER1 0x54
+#define OPB1_WRITE_ORDER2 0x58
+#define OPB0_READ_ORDER1 0x5c
+#define OPB1_READ_ORDER2 0x60
+
+#define OPB_RETRY_COUNTER 0x64
+
+/* OPBn_STATUS */
+#define STATUS_HALFWORD_ACK BIT(0)
+#define STATUS_FULLWORD_ACK BIT(1)
+#define STATUS_ERR_ACK BIT(2)
+#define STATUS_RETRY BIT(3)
+#define STATUS_TIMEOUT BIT(4)
+
+/* OPB_IRQ_MASK */
+#define OPB1_XFER_ACK_EN BIT(17)
+#define OPB0_XFER_ACK_EN BIT(16)
+
+/* OPB_RW */
+#define CMD_READ BIT(0)
+#define CMD_WRITE 0
+
+/* OPBx_XFER_SIZE */
+#define XFER_FULLWORD (BIT(1) | BIT(0))
+#define XFER_HALFWORD (BIT(0))
+#define XFER_BYTE (0)
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_aspeed.h>
+
+#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
+
+#define DEFAULT_DIVISOR 14
+#define OPB_POLL_TIMEOUT 10000
+
+static int __opb_write(struct fsi_master_aspeed *aspeed, u32 addr,
+ u32 val, u32 transfer_size)
+{
+ void __iomem *base = aspeed->base;
+ u32 reg, status;
+ int ret;
+
+ writel(CMD_WRITE, base + OPB0_RW);
+ writel(transfer_size, base + OPB0_XFER_SIZE);
+ writel(addr, base + OPB0_FSI_ADDR);
+ writel(val, base + OPB0_FSI_DATA_W);
+ writel(0x1, base + OPB_IRQ_CLEAR);
+ writel(0x1, base + OPB_TRIGGER);
+
+ ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
+ (reg & OPB0_XFER_ACK_EN) != 0,
+ 0, OPB_POLL_TIMEOUT);
+
+ status = readl(base + OPB0_STATUS);
+
+ trace_fsi_master_aspeed_opb_write(addr, val, transfer_size, status, reg);
+
+ /* Return error when poll timed out */
+ if (ret)
+ return ret;
+
+ /* Command failed, master will reset */
+ if (status & STATUS_ERR_ACK)
+ return -EIO;
+
+ return 0;
+}
+
+static int opb_writeb(struct fsi_master_aspeed *aspeed, u32 addr, u8 val)
+{
+ return __opb_write(aspeed, addr, val, XFER_BYTE);
+}
+
+static int opb_writew(struct fsi_master_aspeed *aspeed, u32 addr, __be16 val)
+{
+ return __opb_write(aspeed, addr, (__force u16)val, XFER_HALFWORD);
+}
+
+static int opb_writel(struct fsi_master_aspeed *aspeed, u32 addr, __be32 val)
+{
+ return __opb_write(aspeed, addr, (__force u32)val, XFER_FULLWORD);
+}
+
+static int __opb_read(struct fsi_master_aspeed *aspeed, uint32_t addr,
+ u32 transfer_size, void *out)
+{
+ void __iomem *base = aspeed->base;
+ u32 result, reg;
+ int status, ret;
+
+ writel(CMD_READ, base + OPB0_RW);
+ writel(transfer_size, base + OPB0_XFER_SIZE);
+ writel(addr, base + OPB0_FSI_ADDR);
+ writel(0x1, base + OPB_IRQ_CLEAR);
+ writel(0x1, base + OPB_TRIGGER);
+
+ ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
+ (reg & OPB0_XFER_ACK_EN) != 0,
+ 0, OPB_POLL_TIMEOUT);
+
+ status = readl(base + OPB0_STATUS);
+
+ result = readl(base + OPB0_FSI_DATA_R);
+
+ trace_fsi_master_aspeed_opb_read(addr, transfer_size, result,
+ readl(base + OPB0_STATUS),
+ reg);
+
+ /* Return error when poll timed out */
+ if (ret)
+ return ret;
+
+ /* Command failed, master will reset */
+ if (status & STATUS_ERR_ACK)
+ return -EIO;
+
+ if (out) {
+ switch (transfer_size) {
+ case XFER_BYTE:
+ *(u8 *)out = result;
+ break;
+ case XFER_HALFWORD:
+ *(u16 *)out = result;
+ break;
+ case XFER_FULLWORD:
+ *(u32 *)out = result;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+static int opb_readl(struct fsi_master_aspeed *aspeed, uint32_t addr, __be32 *out)
+{
+ return __opb_read(aspeed, addr, XFER_FULLWORD, out);
+}
+
+static int opb_readw(struct fsi_master_aspeed *aspeed, uint32_t addr, __be16 *out)
+{
+ return __opb_read(aspeed, addr, XFER_HALFWORD, (void *)out);
+}
+
+static int opb_readb(struct fsi_master_aspeed *aspeed, uint32_t addr, u8 *out)
+{
+ return __opb_read(aspeed, addr, XFER_BYTE, (void *)out);
+}
+
+static int check_errors(struct fsi_master_aspeed *aspeed, int err)
+{
+ int ret;
+
+ if (trace_fsi_master_aspeed_opb_error_enabled()) {
+ __be32 mresp0, mstap0, mesrb0;
+
+ opb_readl(aspeed, ctrl_base + FSI_MRESP0, &mresp0);
+ opb_readl(aspeed, ctrl_base + FSI_MSTAP0, &mstap0);
+ opb_readl(aspeed, ctrl_base + FSI_MESRB0, &mesrb0);
+
+ trace_fsi_master_aspeed_opb_error(
+ be32_to_cpu(mresp0),
+ be32_to_cpu(mstap0),
+ be32_to_cpu(mesrb0));
+ }
+
+ if (err == -EIO) {
+ /* Check MAEB (0x70) ? */
+
+ /* Then clear errors in master */
+ ret = opb_writel(aspeed, ctrl_base + FSI_MRESP0,
+ cpu_to_be32(FSI_MRESP_RST_ALL_MASTER));
+ if (ret) {
+ /* TODO: log? return different code? */
+ return ret;
+ }
+ /* TODO: confirm that 0x70 was okay */
+ }
+
+ /* This will pass through timeout errors */
+ return err;
+}
+
+static int aspeed_master_read(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, void *val, size_t size)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int ret;
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += link * FSI_HUB_LINK_SIZE;
+
+ switch (size) {
+ case 1:
+ ret = opb_readb(aspeed, fsi_base + addr, val);
+ break;
+ case 2:
+ ret = opb_readw(aspeed, fsi_base + addr, val);
+ break;
+ case 4:
+ ret = opb_readl(aspeed, fsi_base + addr, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = check_errors(aspeed, ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_master_write(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int ret;
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += link * FSI_HUB_LINK_SIZE;
+
+ switch (size) {
+ case 1:
+ ret = opb_writeb(aspeed, fsi_base + addr, *(u8 *)val);
+ break;
+ case 2:
+ ret = opb_writew(aspeed, fsi_base + addr, *(__be16 *)val);
+ break;
+ case 4:
+ ret = opb_writel(aspeed, fsi_base + addr, *(__be32 *)val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = check_errors(aspeed, ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_master_link_enable(struct fsi_master *master, int link)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int idx, bit, ret;
+ __be32 reg, result;
+
+ idx = link / 32;
+ bit = link % 32;
+
+ reg = cpu_to_be32(0x80000000 >> bit);
+
+ ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg);
+ if (ret)
+ return ret;
+
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ ret = opb_readl(aspeed, ctrl_base + FSI_MENP0 + (4 * idx), &result);
+ if (ret)
+ return ret;
+
+ if (result != reg) {
+ dev_err(aspeed->dev, "%s failed: %08x\n", __func__, result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int aspeed_master_term(struct fsi_master *master, int link, uint8_t id)
+{
+ uint32_t addr;
+ __be32 cmd;
+
+ addr = 0x4;
+ cmd = cpu_to_be32(0xecc00000);
+
+ return aspeed_master_write(master, link, id, addr, &cmd, 4);
+}
+
+static int aspeed_master_break(struct fsi_master *master, int link)
+{
+ uint32_t addr;
+ __be32 cmd;
+
+ addr = 0x0;
+ cmd = cpu_to_be32(0xc0de0000);
+
+ return aspeed_master_write(master, link, 0, addr, &cmd, 4);
+}
+
+static void aspeed_master_release(struct device *dev)
+{
+ struct fsi_master_aspeed *aspeed =
+ to_fsi_master_aspeed(dev_to_fsi_master(dev));
+
+ kfree(aspeed);
+}
+
+/* mmode encoders */
+static inline u32 fsi_mmode_crs0(u32 x)
+{
+ return (x & FSI_MMODE_CRS0MASK) << FSI_MMODE_CRS0SHFT;
+}
+
+static inline u32 fsi_mmode_crs1(u32 x)
+{
+ return (x & FSI_MMODE_CRS1MASK) << FSI_MMODE_CRS1SHFT;
+}
+
+static int aspeed_master_init(struct fsi_master_aspeed *aspeed)
+{
+ __be32 reg;
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ /* Initialize the MFSI (hub master) engine */
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ reg = cpu_to_be32(FSI_MECTRL_EOAE | FSI_MECTRL_P8_AUTO_TERM);
+ opb_writel(aspeed, ctrl_base + FSI_MECTRL, reg);
+
+ reg = cpu_to_be32(FSI_MMODE_ECRC | FSI_MMODE_EPC | FSI_MMODE_RELA
+ | fsi_mmode_crs0(DEFAULT_DIVISOR)
+ | fsi_mmode_crs1(DEFAULT_DIVISOR)
+ | FSI_MMODE_P8_TO_LSB);
+ opb_writel(aspeed, ctrl_base + FSI_MMODE, reg);
+
+ reg = cpu_to_be32(0xffff0000);
+ opb_writel(aspeed, ctrl_base + FSI_MDLYR, reg);
+
+ reg = cpu_to_be32(~0);
+ opb_writel(aspeed, ctrl_base + FSI_MSENP0, reg);
+
+ /* Leave enabled long enough for master logic to set up */
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ opb_writel(aspeed, ctrl_base + FSI_MCENP0, reg);
+
+ opb_readl(aspeed, ctrl_base + FSI_MAEB, NULL);
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ opb_readl(aspeed, ctrl_base + FSI_MLEVP0, NULL);
+
+ /* Reset the master bridge */
+ reg = cpu_to_be32(FSI_MRESB_RST_GEN);
+ opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
+
+ reg = cpu_to_be32(FSI_MRESB_RST_ERR);
+ opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
+
+ return 0;
+}
+
+static int fsi_master_aspeed_probe(struct platform_device *pdev)
+{
+ struct fsi_master_aspeed *aspeed;
+ struct resource *res;
+ int rc, links, reg;
+ __be32 raw;
+
+ aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL);
+ if (!aspeed)
+ return -ENOMEM;
+
+ aspeed->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ aspeed->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aspeed->base))
+ return PTR_ERR(aspeed->base);
+
+ aspeed->clk = devm_clk_get(aspeed->dev, NULL);
+ if (IS_ERR(aspeed->clk)) {
+ dev_err(aspeed->dev, "couldn't get clock\n");
+ return PTR_ERR(aspeed->clk);
+ }
+ rc = clk_prepare_enable(aspeed->clk);
+ if (rc) {
+ dev_err(aspeed->dev, "couldn't enable clock\n");
+ return rc;
+ }
+
+ writel(0x1, aspeed->base + OPB_CLK_SYNC);
+ writel(OPB1_XFER_ACK_EN | OPB0_XFER_ACK_EN,
+ aspeed->base + OPB_IRQ_MASK);
+
+ /* TODO: determine an appropriate value */
+ writel(0x10, aspeed->base + OPB_RETRY_COUNTER);
+
+ writel(ctrl_base, aspeed->base + OPB_CTRL_BASE);
+ writel(fsi_base, aspeed->base + OPB_FSI_BASE);
+
+ /* Set read data order */
+ writel(0x00030b1b, aspeed->base + OPB0_READ_ORDER1);
+
+ /* Set write data order */
+ writel(0x0011101b, aspeed->base + OPB0_WRITE_ORDER1);
+ writel(0x0c330f3f, aspeed->base + OPB0_WRITE_ORDER2);
+
+ /*
+ * Select OPB0 for all operations.
+ * Will need to be reworked when enabling DMA or anything that uses
+ * OPB1.
+ */
+ writel(0x1, aspeed->base + OPB0_SELECT);
+
+ rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to read hub version\n");
+ return rc;
+ }
+
+ reg = be32_to_cpu(raw);
+ links = (reg >> 8) & 0xff;
+ dev_info(&pdev->dev, "hub version %08x (%d links)\n", reg, links);
+
+ aspeed->master.dev.parent = &pdev->dev;
+ aspeed->master.dev.release = aspeed_master_release;
+ aspeed->master.dev.of_node = of_node_get(dev_of_node(&pdev->dev));
+
+ aspeed->master.n_links = links;
+ aspeed->master.read = aspeed_master_read;
+ aspeed->master.write = aspeed_master_write;
+ aspeed->master.send_break = aspeed_master_break;
+ aspeed->master.term = aspeed_master_term;
+ aspeed->master.link_enable = aspeed_master_link_enable;
+
+ dev_set_drvdata(&pdev->dev, aspeed);
+
+ aspeed_master_init(aspeed);
+
+ rc = fsi_master_register(&aspeed->master);
+ if (rc)
+ goto err_release;
+
+ /* At this point, fsi_master_register performs the device_initialize(),
+ * and holds the sole reference on master.dev. This means the device
+ * will be freed (via ->release) during any subsequent call to
+ * fsi_master_unregister. We add our own reference to it here, so we
+ * can perform cleanup (in _remove()) without it being freed before
+ * we're ready.
+ */
+ get_device(&aspeed->master.dev);
+ return 0;
+
+err_release:
+ clk_disable_unprepare(aspeed->clk);
+ return rc;
+}
+
+static int fsi_master_aspeed_remove(struct platform_device *pdev)
+{
+ struct fsi_master_aspeed *aspeed = platform_get_drvdata(pdev);
+
+ fsi_master_unregister(&aspeed->master);
+ clk_disable_unprepare(aspeed->clk);
+
+ return 0;
+}
+
+static const struct of_device_id fsi_master_aspeed_match[] = {
+ { .compatible = "aspeed,ast2600-fsi-master" },
+ { },
+};
+
+static struct platform_driver fsi_master_aspeed_driver = {
+ .driver = {
+ .name = "fsi-master-aspeed",
+ .of_match_table = fsi_master_aspeed_match,
+ },
+ .probe = fsi_master_aspeed_probe,
+ .remove = fsi_master_aspeed_remove,
+};
+
+module_platform_driver(fsi_master_aspeed_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index f158b1a88286..def35cf92571 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -13,53 +13,7 @@
#include "fsi-master.h"
-/* Control Registers */
-#define FSI_MMODE 0x0 /* R/W: mode */
-#define FSI_MDLYR 0x4 /* R/W: delay */
-#define FSI_MCRSP 0x8 /* R/W: clock rate */
-#define FSI_MENP0 0x10 /* R/W: enable */
-#define FSI_MLEVP0 0x18 /* R: plug detect */
-#define FSI_MSENP0 0x18 /* S: Set enable */
-#define FSI_MCENP0 0x20 /* C: Clear enable */
-#define FSI_MAEB 0x70 /* R: Error address */
-#define FSI_MVER 0x74 /* R: master version/type */
-#define FSI_MRESP0 0xd0 /* W: Port reset */
-#define FSI_MESRB0 0x1d0 /* R: Master error status */
-#define FSI_MRESB0 0x1d0 /* W: Reset bridge */
-#define FSI_MECTRL 0x2e0 /* W: Error control */
-
-/* MMODE: Mode control */
-#define FSI_MMODE_EIP 0x80000000 /* Enable interrupt polling */
-#define FSI_MMODE_ECRC 0x40000000 /* Enable error recovery */
-#define FSI_MMODE_EPC 0x10000000 /* Enable parity checking */
-#define FSI_MMODE_P8_TO_LSB 0x00000010 /* Timeout value LSB */
- /* MSB=1, LSB=0 is 0.8 ms */
- /* MSB=0, LSB=1 is 0.9 ms */
-#define FSI_MMODE_CRS0SHFT 18 /* Clk rate selection 0 shift */
-#define FSI_MMODE_CRS0MASK 0x3ff /* Clk rate selection 0 mask */
-#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
-#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
-
-/* MRESB: Reset brindge */
-#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
-#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
-
-/* MRESB: Reset port */
-#define FSI_MRESP_RST_ALL_MASTER 0x20000000 /* Reset all FSI masters */
-#define FSI_MRESP_RST_ALL_LINK 0x10000000 /* Reset all FSI port contr. */
-#define FSI_MRESP_RST_MCR 0x08000000 /* Reset FSI master reg. */
-#define FSI_MRESP_RST_PYE 0x04000000 /* Reset FSI parity error */
-#define FSI_MRESP_RST_ALL 0xfc000000 /* Reset any error */
-
-/* MECTRL: Error control */
-#define FSI_MECTRL_EOAE 0x8000 /* Enable machine check when */
- /* master 0 in error */
-#define FSI_MECTRL_P8_AUTO_TERM 0x4000 /* Auto terminate */
-
#define FSI_ENGID_HUB_MASTER 0x1c
-#define FSI_HUB_LINK_OFFSET 0x80000
-#define FSI_HUB_LINK_SIZE 0x80000
-#define FSI_HUB_MASTER_MAX_LINKS 8
#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index c7174237e864..6e8d4d4d5149 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -12,6 +12,71 @@
#include <linux/device.h>
#include <linux/mutex.h>
+/*
+ * Master registers
+ *
+ * These are used by hardware masters, such as the one in the FSP2, AST2600 and
+ * the hub master in POWER processors.
+ */
+
+/* Control Registers */
+#define FSI_MMODE 0x0 /* R/W: mode */
+#define FSI_MDLYR 0x4 /* R/W: delay */
+#define FSI_MCRSP 0x8 /* R/W: clock rate */
+#define FSI_MENP0 0x10 /* R/W: enable */
+#define FSI_MLEVP0 0x18 /* R: plug detect */
+#define FSI_MSENP0 0x18 /* S: Set enable */
+#define FSI_MCENP0 0x20 /* C: Clear enable */
+#define FSI_MAEB 0x70 /* R: Error address */
+#define FSI_MVER 0x74 /* R: master version/type */
+#define FSI_MSTAP0 0xd0 /* R: Port status */
+#define FSI_MRESP0 0xd0 /* W: Port reset */
+#define FSI_MESRB0 0x1d0 /* R: Master error status */
+#define FSI_MRESB0 0x1d0 /* W: Reset bridge */
+#define FSI_MSCSB0 0x1d4 /* R: Master sub command stack */
+#define FSI_MATRB0 0x1d8 /* R: Master address trace */
+#define FSI_MDTRB0 0x1dc /* R: Master data trace */
+#define FSI_MECTRL 0x2e0 /* W: Error control */
+
+/* MMODE: Mode control */
+#define FSI_MMODE_EIP 0x80000000 /* Enable interrupt polling */
+#define FSI_MMODE_ECRC 0x40000000 /* Enable error recovery */
+#define FSI_MMODE_RELA 0x20000000 /* Enable relative address commands */
+#define FSI_MMODE_EPC 0x10000000 /* Enable parity checking */
+#define FSI_MMODE_P8_TO_LSB 0x00000010 /* Timeout value LSB */
+ /* MSB=1, LSB=0 is 0.8 ms */
+ /* MSB=0, LSB=1 is 0.9 ms */
+#define FSI_MMODE_CRS0SHFT 18 /* Clk rate selection 0 shift */
+#define FSI_MMODE_CRS0MASK 0x3ff /* Clk rate selection 0 mask */
+#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
+#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
+
+/* MRESB: Reset brindge */
+#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
+#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
+
+/* MRESP: Reset port */
+#define FSI_MRESP_RST_ALL_MASTER 0x20000000 /* Reset all FSI masters */
+#define FSI_MRESP_RST_ALL_LINK 0x10000000 /* Reset all FSI port contr. */
+#define FSI_MRESP_RST_MCR 0x08000000 /* Reset FSI master reg. */
+#define FSI_MRESP_RST_PYE 0x04000000 /* Reset FSI parity error */
+#define FSI_MRESP_RST_ALL 0xfc000000 /* Reset any error */
+
+/* MECTRL: Error control */
+#define FSI_MECTRL_EOAE 0x8000 /* Enable machine check when */
+ /* master 0 in error */
+#define FSI_MECTRL_P8_AUTO_TERM 0x4000 /* Auto terminate */
+
+#define FSI_HUB_LINK_OFFSET 0x80000
+#define FSI_HUB_LINK_SIZE 0x80000
+#define FSI_HUB_MASTER_MAX_LINKS 8
+
+/*
+ * Protocol definitions
+ *
+ * These are used by low level masters that bit-bang out the protocol
+ */
+
/* Various protocol delays */
#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
#define FSI_SEND_DELAY_CLOCKS 16 /* Number clocks for send delay */
@@ -47,6 +112,12 @@
/* fsi-master definition and flags */
#define FSI_MASTER_FLAG_SWCLOCK 0x1
+/*
+ * Structures and function prototypes
+ *
+ * These are common to all masters
+ */
+
struct fsi_master {
struct device dev;
int idx;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 92d0ff63b3ea..8adffd42f8cb 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -120,6 +120,14 @@ config GPIO_ASPEED
help
Say Y here to support Aspeed AST2400 and AST2500 GPIO controllers.
+config GPIO_ASPEED_SGPIO
+ bool "Aspeed SGPIO support"
+ depends on (ARCH_ASPEED || COMPILE_TEST) && OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y here to support Aspeed AST2500 SGPIO functionality.
+
config GPIO_ATH79
tristate "Atheros AR71XX/AR724X/AR913X GPIO support"
default y if ATH79
@@ -147,6 +155,15 @@ config GPIO_BCM_KONA
help
Turn on GPIO support for Broadcom "Kona" chips.
+config GPIO_BCM_XGS_IPROC
+ tristate "BRCM XGS iProc GPIO support"
+ depends on OF_GPIO && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ default ARCH_BCM_IPROC
+ help
+ Say yes here to enable GPIO support for Broadcom XGS iProc SoCs.
+
config GPIO_BRCMSTB
tristate "BRCMSTB GPIO support"
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
@@ -435,6 +452,15 @@ config GPIO_RCAR
help
Say yes here to support GPIO on Renesas R-Car SoCs.
+config GPIO_RDA
+ bool "RDA Micro GPIO controller support"
+ depends on ARCH_RDA || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y here to support RDA Micro GPIO controller.
+
config GPIO_REG
bool
help
@@ -531,6 +557,7 @@ config GPIO_TEGRA186
depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
depends on OF_GPIO
select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
help
Say yes here to support GPIO pins on NVIDIA Tegra186 SoCs.
@@ -1320,7 +1347,7 @@ config GPIO_BT8XX
The card needs to be physically altered for using it as a
GPIO card. For more information on how to build a GPIO card
from a BT8xx TV card, see the documentation file at
- Documentation/driver-api/bt8xxgpio.rst
+ Documentation/driver-api/gpio/bt8xxgpio.rst
If unsure, say N.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d2fd19c15bae..34eb8b2b12dd 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -32,8 +32,10 @@ obj-$(CONFIG_GPIO_AMD_FCH) += gpio-amd-fch.o
obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
+obj-$(CONFIG_GPIO_ASPEED_SGPIO) += gpio-aspeed-sgpio.o
obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
+obj-$(CONFIG_GPIO_BCM_XGS_IPROC) += gpio-xgs-iproc.o
obj-$(CONFIG_GPIO_BD70528) += gpio-bd70528.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
@@ -115,6 +117,7 @@ obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
+obj-$(CONFIG_GPIO_RDA) += gpio-rda.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_GPIO_REG) += gpio-reg.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 9c048f10c9ad..76f8c7ff18ff 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -80,6 +80,10 @@ Work items:
- Look over and identify any remaining easily converted drivers and
dry-code conversions to MMIO GPIO for maintainers to test
+- Expand the MMIO GPIO or write a new library for regmap-based I/O
+ helpers for GPIO drivers on regmap that simply use offsets
+ 0..n in some register to drive GPIO lines
+
- Expand the MMIO GPIO or write a new library for port-mapped I/O
helpers (x86 inb()/outb()) and convert port-mapped I/O drivers to use
this with dry-coding and sending to maintainers to test
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index a44fa8af5b0d..400c09b905f8 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -59,7 +59,10 @@ static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
const unsigned port = offset / 8;
const unsigned mask = BIT(offset % 8);
- return !!(dio48egpio->io_state[port] & mask);
+ if (dio48egpio->io_state[port] & mask)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index ff53887bdaa8..c50329ab493a 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -53,7 +53,7 @@ struct idi_48_gpio {
static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -65,7 +65,7 @@ static int idi_48_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
unsigned i;
- const unsigned register_offset[6] = { 0, 1, 2, 4, 5, 6 };
+ static const unsigned int register_offset[6] = { 0, 1, 2, 4, 5, 6 };
unsigned base_offset;
unsigned mask;
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 8d2f51cd9d91..5752d9dab148 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -51,9 +51,9 @@ struct idio_16_gpio {
static int idio_16_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
if (offset > 15)
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int idio_16_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 83a2286d93f6..173e06758e6c 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -77,7 +77,10 @@ static int mmio_74xx_get_direction(struct gpio_chip *gc, unsigned offset)
{
struct mmio_74xx_gpio_priv *priv = gpiochip_get_data(gc);
- return !(priv->flags & MMIO_74XX_DIR_OUT);
+ if (priv->flags & MMIO_74XX_DIR_OUT)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int mmio_74xx_dir_in(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index 181df1581df5..4e44ba4d7423 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -92,7 +92,7 @@ static int amd_fch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
ret = (readl_relaxed(ptr) & AMD_FCH_GPIO_FLAG_DIRECTION);
spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
+ return ret ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
static void amd_fch_gpio_set(struct gpio_chip *gc,
diff --git a/drivers/gpio/sgpio-aspeed.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 7e99860ca447..7e99860ca447 100644
--- a/drivers/gpio/sgpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 09e53c5f3b0a..f1037b61f763 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -487,10 +487,10 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
u32 val;
if (!have_input(gpio, offset))
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
if (!have_output(gpio, offset))
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
spin_lock_irqsave(&gpio->lock, flags);
@@ -498,8 +498,7 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
spin_unlock_irqrestore(&gpio->lock, flags);
- return !val;
-
+ return val ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index f1a5ea9b3de2..53fae02c40ad 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -226,7 +226,6 @@ static int ath79_gpio_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct ath79_gpio_ctrl *ctrl;
struct gpio_irq_chip *girq;
- struct resource *res;
u32 ath79_gpio_count;
bool oe_inverted;
int err;
@@ -256,12 +255,9 @@ static int ath79_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- ctrl->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!ctrl->base)
- return -ENOMEM;
+ ctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
raw_spin_lock_init(&ctrl->lock);
err = bgpio_init(&ctrl->gc, dev, 4,
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 9fa6d3a967d2..4122683eb1f9 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -127,7 +127,7 @@ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
u32 val;
val = readl(reg_base + GPIO_CONTROL(gpio)) & GPIO_GPCTR0_IOTR_MASK;
- return !!val;
+ return val ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
@@ -144,7 +144,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
/* this function only applies to output pin */
- if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
goto out;
reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
@@ -170,7 +170,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
reg_base = kona_gpio->reg_base;
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
reg_offset = GPIO_IN_STATUS(bank_id);
else
reg_offset = GPIO_OUT_STATUS(bank_id);
diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c
index 4ba4d4a67881..45b3da8da336 100644
--- a/drivers/gpio/gpio-bd70528.c
+++ b/drivers/gpio/gpio-bd70528.c
@@ -54,8 +54,10 @@ static int bd70528_get_direction(struct gpio_chip *chip, unsigned int offset)
dev_err(bdgpio->chip.dev, "Could not read gpio direction\n");
return ret;
}
+ if (val & BD70528_GPIO_OUT_EN_MASK)
+ return GPIO_LINE_DIRECTION_OUT;
- return !(val & BD70528_GPIO_OUT_EN_MASK);
+ return GPIO_LINE_DIRECTION_IN;
}
static int bd70528_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -166,9 +168,9 @@ static int bd70528_gpio_get(struct gpio_chip *chip, unsigned int offset)
* locking would make no sense.
*/
ret = bd70528_get_direction(chip, offset);
- if (ret == 0)
+ if (ret == GPIO_LINE_DIRECTION_OUT)
ret = bd70528_gpio_get_o(bdgpio, offset);
- else if (ret == 1)
+ else if (ret == GPIO_LINE_DIRECTION_IN)
ret = bd70528_gpio_get_i(bdgpio, offset);
else
dev_err(bdgpio->chip.dev, "failed to read GPIO direction\n");
@@ -230,3 +232,4 @@ module_platform_driver(bd70528_gpio);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 voltage regulator driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-gpio");
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index 5224a946e8ab..c0abc9c6851b 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -37,8 +37,10 @@ static int bd9571mwv_gpio_get_direction(struct gpio_chip *chip,
ret = regmap_read(gpio->bd->regmap, BD9571MWV_GPIO_DIR, &val);
if (ret < 0)
return ret;
+ if (val & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
- return val & BIT(offset);
+ return GPIO_LINE_DIRECTION_OUT;
}
static int bd9571mwv_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 8a33c2fc174d..26b40c8b8a12 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -200,9 +200,9 @@ static int dln2_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
struct dln2_gpio *dln2 = gpiochip_get_data(chip);
if (test_bit(offset, dln2->output_enabled))
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int dln2_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -214,7 +214,7 @@ static int dln2_gpio_get(struct gpio_chip *chip, unsigned int offset)
if (dir < 0)
return dir;
- if (dir == 1)
+ if (dir == GPIO_LINE_DIRECTION_IN)
return dln2_gpio_pin_get_in_val(dln2, offset);
return dln2_gpio_pin_get_out_val(dln2, offset);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 620f25b7efb4..17a243c528ad 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -269,13 +269,12 @@ static void em_gio_irq_domain_remove(void *data)
static int em_gio_probe(struct platform_device *pdev)
{
struct em_gio_priv *p;
- struct resource *io[2], *irq[2];
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
struct device *dev = &pdev->dev;
const char *name = dev_name(dev);
unsigned int ngpios;
- int ret;
+ int irq[2], ret;
p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
if (!p)
@@ -285,25 +284,21 @@ static int em_gio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
spin_lock_init(&p->sense_lock);
- io[0] = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io[1] = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- irq[0] = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- irq[1] = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ irq[0] = platform_get_irq(pdev, 0);
+ if (irq[0] < 0)
+ return irq[0];
- if (!io[0] || !io[1] || !irq[0] || !irq[1]) {
- dev_err(dev, "missing IRQ or IOMEM\n");
- return -EINVAL;
- }
+ irq[1] = platform_get_irq(pdev, 1);
+ if (irq[1] < 0)
+ return irq[1];
- p->base0 = devm_ioremap_nocache(dev, io[0]->start,
- resource_size(io[0]));
- if (!p->base0)
- return -ENOMEM;
+ p->base0 = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->base0))
+ return PTR_ERR(p->base0);
- p->base1 = devm_ioremap_nocache(dev, io[1]->start,
- resource_size(io[1]));
- if (!p->base1)
- return -ENOMEM;
+ p->base1 = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(p->base1))
+ return PTR_ERR(p->base1);
if (of_property_read_u32(dev->of_node, "ngpios", &ngpios)) {
dev_err(dev, "Missing ngpios OF property\n");
@@ -326,7 +321,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->ngpio = ngpios;
irq_chip = &p->irq_chip;
- irq_chip->name = name;
+ irq_chip->name = "gpio-em";
irq_chip->irq_mask = em_gio_irq_disable;
irq_chip->irq_unmask = em_gio_irq_enable;
irq_chip->irq_set_type = em_gio_irq_set_type;
@@ -346,14 +341,12 @@ static int em_gio_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (devm_request_irq(dev, irq[0]->start,
- em_gio_irq_handler, 0, name, p)) {
+ if (devm_request_irq(dev, irq[0], em_gio_irq_handler, 0, name, p)) {
dev_err(dev, "failed to request low IRQ\n");
return -ENOENT;
}
- if (devm_request_irq(dev, irq[1]->start,
- em_gio_irq_handler, 0, name, p)) {
+ if (devm_request_irq(dev, irq[1], em_gio_irq_handler, 0, name, p)) {
dev_err(dev, "failed to request high IRQ\n");
return -ENOENT;
}
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index fae327d5b06e..da1ef0b1c291 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -77,7 +77,10 @@ static int exar_get_direction(struct gpio_chip *chip, unsigned int offset)
EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
unsigned int bit = (offset + exar_gpio->first_pin) % 8;
- return !!(exar_get(chip, addr) & BIT(bit));
+ if (exar_get(chip, addr) & BIT(bit))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int exar_get_value(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index fdc639f856f1..cadd02993539 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -250,7 +250,10 @@ static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
superio_exit(sio->addr);
- return !(dir & 1 << offset);
+ if (dir & 1 << offset)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index 78a1db24e931..c22d6f94129c 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -52,7 +52,10 @@ static int gpiomm_gpio_get_direction(struct gpio_chip *chip,
const unsigned int port = offset / 8;
const unsigned int mask = BIT(offset % 8);
- return !!(gpiommgpio->io_state[port] & mask);
+ if (gpiommgpio->io_state[port] & mask)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int gpiomm_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index 6eb56f7ab9c9..a40bd56673fe 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -220,7 +220,10 @@ static int egpio_get_direction(struct gpio_chip *chip, unsigned offset)
egpio = gpiochip_get_data(chip);
- return !test_bit(offset, &egpio->is_out);
+ if (test_bit(offset, &egpio->is_out))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static void egpio_write_cache(struct egpio_info *ei)
@@ -265,7 +268,6 @@ static int __init egpio_probe(struct platform_device *pdev)
struct gpio_chip *chip;
unsigned int irq, irq_end;
int i;
- int ret;
/* Initialize ei data structure. */
ei = devm_kzalloc(&pdev->dev, sizeof(*ei), GFP_KERNEL);
@@ -275,28 +277,24 @@ static int __init egpio_probe(struct platform_device *pdev)
spin_lock_init(&ei->lock);
/* Find chained irq */
- ret = -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res)
ei->chained_irq = res->start;
/* Map egpio chip into virtual address space. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto fail;
- ei->base_addr = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!ei->base_addr)
- goto fail;
- pr_debug("EGPIO phys=%08x virt=%p\n", (u32)res->start, ei->base_addr);
+ ei->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ei->base_addr))
+ return PTR_ERR(ei->base_addr);
if ((pdata->bus_width != 16) && (pdata->bus_width != 32))
- goto fail;
+ return -EINVAL;
+
ei->bus_shift = fls(pdata->bus_width - 1) - 3;
pr_debug("bus_shift = %d\n", ei->bus_shift);
if ((pdata->reg_width != 8) && (pdata->reg_width != 16))
- goto fail;
+ return -EINVAL;
+
ei->reg_shift = fls(pdata->reg_width - 1);
pr_debug("reg_shift = %d\n", ei->reg_shift);
@@ -308,10 +306,9 @@ static int __init egpio_probe(struct platform_device *pdev)
ei->chip = devm_kcalloc(&pdev->dev,
ei->nchips, sizeof(struct egpio_chip),
GFP_KERNEL);
- if (!ei->chip) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!ei->chip)
+ return -ENOMEM;
+
for (i = 0; i < ei->nchips; i++) {
ei->chip[i].reg_start = pdata->chip[i].reg_start;
ei->chip[i].cached_values = pdata->chip[i].initial_values;
@@ -321,10 +318,9 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->label = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"htc-egpio-%d",
i);
- if (!chip->label) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!chip->label)
+ return -ENOMEM;
+
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
@@ -366,10 +362,6 @@ static int __init egpio_probe(struct platform_device *pdev)
}
return 0;
-
-fail:
- printk(KERN_ERR "EGPIO failed to setup\n");
- return ret;
}
#ifdef CONFIG_PM
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 90bf7742f9b0..2f086d0aa1f4 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -159,7 +159,10 @@ static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
static int ichx_gpio_get_direction(struct gpio_chip *gpio, unsigned nr)
{
- return ichx_read_bit(GPIO_IO_SEL, nr);
+ if (ichx_read_bit(GPIO_IO_SEL, nr))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index ef51638f3f75..4ea15f08e0f4 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -104,7 +104,10 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
struct kempld_gpio_data *gpio = gpiochip_get_data(chip);
struct kempld_device_data *pld = gpio->pld;
- return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
+ if (kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int kempld_gpio_pincount(struct kempld_device_data *pld)
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 801995dd9b26..70fad87ff2db 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -33,7 +33,7 @@ static int lp873x_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
/* This device is output only */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int lp873x_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index a121c8f10610..e1244520cf7d 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -57,7 +57,10 @@ static int lp87565_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !(val & BIT(offset));
+ if (val & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int lp87565_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index e9e47c0d5be7..490ce7bae25e 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -164,6 +164,12 @@ static int lp_irq_type(struct irq_data *d, unsigned type)
value |= TRIG_SEL_BIT | INT_INV_BIT;
outl(value, reg);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+
spin_unlock_irqrestore(&lg->lock, flags);
return 0;
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index 7086f8b5388f..8f38303fcbc4 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -34,7 +34,10 @@ static int madera_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !!(val & MADERA_GP1_DIR_MASK);
+ if (val & MADERA_GP1_DIR_MASK)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int madera_gpio_direction_in(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index 4b4b2ceb82fc..0696d5a21431 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -94,7 +94,7 @@ DECLARE_CRC8_TABLE(max3191x_crc8);
static int max3191x_get_direction(struct gpio_chip *gpio, unsigned int offset)
{
- return 1; /* always in */
+ return GPIO_LINE_DIRECTION_IN; /* always in */
}
static int max3191x_direction_input(struct gpio_chip *gpio, unsigned int offset)
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 642c6321c22a..313bd02dd893 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -18,109 +18,115 @@ struct max77620_gpio {
struct gpio_chip gpio_chip;
struct regmap *rmap;
struct device *dev;
+ struct mutex buslock; /* irq_bus_lock */
+ unsigned int irq_type[8];
+ bool irq_enabled[8];
};
-static const struct regmap_irq max77620_gpio_irqs[] = {
- [0] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE0,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 0,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [1] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE1,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 1,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [2] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE2,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 2,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [3] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE3,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 3,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [4] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE4,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 4,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [5] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE5,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 5,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [6] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE6,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 6,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [7] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE7,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 7,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
-};
+static irqreturn_t max77620_gpio_irqhandler(int irq, void *data)
+{
+ struct max77620_gpio *gpio = data;
+ unsigned int value, offset;
+ unsigned long pending;
+ int err;
+
+ err = regmap_read(gpio->rmap, MAX77620_REG_IRQ_LVL2_GPIO, &value);
+ if (err < 0) {
+ dev_err(gpio->dev, "REG_IRQ_LVL2_GPIO read failed: %d\n", err);
+ return IRQ_NONE;
+ }
+
+ pending = value;
+
+ for_each_set_bit(offset, &pending, 8) {
+ unsigned int virq;
+
+ virq = irq_find_mapping(gpio->gpio_chip.irq.domain, offset);
+ handle_nested_irq(virq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void max77620_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+
+ gpio->irq_enabled[data->hwirq] = false;
+}
-static const struct regmap_irq_chip max77620_gpio_irq_chip = {
- .name = "max77620-gpio",
- .irqs = max77620_gpio_irqs,
- .num_irqs = ARRAY_SIZE(max77620_gpio_irqs),
- .num_regs = 1,
- .num_type_reg = 8,
- .irq_reg_stride = 1,
- .type_reg_stride = 1,
- .status_base = MAX77620_REG_IRQ_LVL2_GPIO,
- .type_base = MAX77620_REG_GPIO0,
+static void max77620_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+
+ gpio->irq_enabled[data->hwirq] = true;
+}
+
+static int max77620_gpio_set_irq_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int irq_type;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ irq_type = MAX77620_CNFG_GPIO_INT_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_type = MAX77620_CNFG_GPIO_INT_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_type = MAX77620_CNFG_GPIO_INT_RISING |
+ MAX77620_CNFG_GPIO_INT_FALLING;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ gpio->irq_type[data->hwirq] = irq_type;
+
+ return 0;
+}
+
+static void max77620_gpio_bus_lock(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+
+ mutex_lock(&gpio->buslock);
+}
+
+static void max77620_gpio_bus_sync_unlock(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int value, offset = data->hwirq;
+ int err;
+
+ value = gpio->irq_enabled[offset] ? gpio->irq_type[offset] : 0;
+
+ err = regmap_update_bits(gpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_INT_MASK, value);
+ if (err < 0)
+ dev_err(chip->parent, "failed to update interrupt mask: %d\n",
+ err);
+
+ mutex_unlock(&gpio->buslock);
+}
+
+static struct irq_chip max77620_gpio_irqchip = {
+ .name = "max77620-gpio",
+ .irq_mask = max77620_gpio_irq_mask,
+ .irq_unmask = max77620_gpio_irq_unmask,
+ .irq_set_type = max77620_gpio_set_irq_type,
+ .irq_bus_lock = max77620_gpio_bus_lock,
+ .irq_bus_sync_unlock = max77620_gpio_bus_sync_unlock,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
static int max77620_gpio_dir_input(struct gpio_chip *gc, unsigned int offset)
@@ -254,14 +260,6 @@ static int max77620_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
return -ENOTSUPP;
}
-static int max77620_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
-{
- struct max77620_gpio *mgpio = gpiochip_get_data(gc);
- struct max77620_chip *chip = dev_get_drvdata(mgpio->dev->parent);
-
- return regmap_irq_get_virq(chip->gpio_irq_data, offset);
-}
-
static int max77620_gpio_probe(struct platform_device *pdev)
{
struct max77620_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -287,7 +285,6 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
mgpio->gpio_chip.set = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
- mgpio->gpio_chip.to_irq = max77620_gpio_to_irq;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
mgpio->gpio_chip.base = -1;
@@ -303,15 +300,21 @@ static int max77620_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_regmap_add_irq_chip(&pdev->dev, chip->rmap, gpio_irq,
- IRQF_ONESHOT, -1,
- &max77620_gpio_irq_chip,
- &chip->gpio_irq_data);
+ mutex_init(&mgpio->buslock);
+
+ gpiochip_irqchip_add_nested(&mgpio->gpio_chip, &max77620_gpio_irqchip,
+ 0, handle_edge_irq, IRQ_TYPE_NONE);
+
+ ret = request_threaded_irq(gpio_irq, NULL, max77620_gpio_irqhandler,
+ IRQF_ONESHOT, "max77620-gpio", mgpio);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add gpio irq_chip %d\n", ret);
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
return ret;
}
+ gpiochip_set_nested_irqchip(&mgpio->gpio_chip, &max77620_gpio_irqchip,
+ gpio_irq);
+
return 0;
}
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index 70fdb42a8e88..1e21c661d79d 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -211,3 +211,4 @@ MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z127");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 3302125e5265..48918a016cd8 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -162,7 +162,10 @@ static int mrfld_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- return !(readl(gpdr) & BIT(offset % 32));
+ if (readl(gpdr) & BIT(offset % 32))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int mrfld_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -362,8 +365,9 @@ static void mrfld_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
-static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
+static int mrfld_irq_init_hw(struct gpio_chip *chip)
{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned int base;
@@ -375,6 +379,8 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
reg = gpio_reg(&priv->chip, base, GFER);
writel(0, reg);
}
+
+ return 0;
}
static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@@ -393,14 +399,36 @@ static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
return name;
}
-static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int mrfld_gpio_add_pin_ranges(struct gpio_chip *chip)
{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
const struct mrfld_gpio_pinrange *range;
const char *pinctrl_dev_name;
+ unsigned int i;
+ int retval;
+
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name(priv);
+ for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
+ range = &mrfld_gpio_ranges[i];
+ retval = gpiochip_add_pin_range(&priv->chip, pinctrl_dev_name,
+ range->gpio_base,
+ range->pin_base,
+ range->npins);
+ if (retval) {
+ dev_err(priv->dev, "failed to add GPIO pin range\n");
+ return retval;
+ }
+ }
+
+ return 0;
+}
+
+static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct gpio_irq_chip *girq;
struct mrfld_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
- unsigned int i;
int retval;
retval = pcim_enable_device(pdev);
@@ -441,42 +469,31 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
priv->chip.base = gpio_base;
priv->chip.ngpio = MRFLD_NGPIO;
priv->chip.can_sleep = false;
+ priv->chip.add_pin_ranges = mrfld_gpio_add_pin_ranges;
raw_spin_lock_init(&priv->lock);
- pci_set_drvdata(pdev, priv);
+ girq = &priv->chip.irq;
+ girq->chip = &mrfld_irqchip;
+ girq->init_hw = mrfld_irq_init_hw;
+ girq->parent_handler = mrfld_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = pdev->irq;
+ girq->first = irq_base;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
return retval;
}
- pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name(priv);
- for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
- range = &mrfld_gpio_ranges[i];
- retval = gpiochip_add_pin_range(&priv->chip,
- pinctrl_dev_name,
- range->gpio_base,
- range->pin_base,
- range->npins);
- if (retval) {
- dev_err(&pdev->dev, "failed to add GPIO pin range\n");
- return retval;
- }
- }
-
- retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
- handle_bad_irq, IRQ_TYPE_NONE);
- if (retval) {
- dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
- return retval;
- }
-
- mrfld_irq_init_hw(priv);
-
- gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
- mrfld_irq_handler);
-
+ pci_set_drvdata(pdev, priv);
return 0;
}
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 6f904c874678..f729e3e9e983 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -370,15 +370,23 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
/* Return 0 if output, 1 if input */
- if (gc->bgpio_dir_unreadable)
- return !(gc->bgpio_dir & bgpio_line2mask(gc, gpio));
- if (gc->reg_dir_out)
- return !(gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio));
+ if (gc->bgpio_dir_unreadable) {
+ if (gc->bgpio_dir & bgpio_line2mask(gc, gpio))
+ return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
+ }
+
+ if (gc->reg_dir_out) {
+ if (gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio))
+ return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
+ }
+
if (gc->reg_dir_in)
- return !!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio));
+ if (!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio)))
+ return GPIO_LINE_DIRECTION_OUT;
- /* This should not happen */
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 213aedc97dc2..56d647a30e3e 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -34,14 +34,9 @@
#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
-enum {
- GPIO_MOCKUP_DIR_IN = 0,
- GPIO_MOCKUP_DIR_OUT = 1,
-};
-
/*
* struct gpio_pin_status - structure describing a GPIO status
- * @dir: Configures direction of gpio as "in" or "out", 0=in, 1=out
+ * @dir: Configures direction of gpio as "in" or "out"
* @value: Configures status of the gpio as 0(low) or 1(high)
*/
struct gpio_mockup_line_status {
@@ -146,13 +141,68 @@ static void gpio_mockup_set_multiple(struct gpio_chip *gc,
mutex_unlock(&chip->lock);
}
+static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
+ unsigned int offset, int value)
+{
+ struct gpio_desc *desc;
+ struct gpio_chip *gc;
+ struct irq_sim *sim;
+ int curr, irq, irq_type;
+
+ gc = &chip->gc;
+ desc = &gc->gpiodev->descs[offset];
+ sim = &chip->irqsim;
+
+ mutex_lock(&chip->lock);
+
+ if (test_bit(FLAG_REQUESTED, &desc->flags) &&
+ !test_bit(FLAG_IS_OUT, &desc->flags)) {
+ curr = __gpio_mockup_get(chip, offset);
+ if (curr == value)
+ goto out;
+
+ irq = irq_sim_irqnum(sim, offset);
+ irq_type = irq_get_trigger_type(irq);
+
+ if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
+ (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
+ irq_sim_fire(sim, offset);
+ }
+
+ /* Change the value unless we're actively driving the line. */
+ if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
+ !test_bit(FLAG_IS_OUT, &desc->flags))
+ __gpio_mockup_set(chip, offset, value);
+
+out:
+ chip->lines[offset].pull = value;
+ mutex_unlock(&chip->lock);
+ return 0;
+}
+
+static int gpio_mockup_set_config(struct gpio_chip *gc,
+ unsigned int offset, unsigned long config)
+{
+ struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ return gpio_mockup_apply_pull(chip, offset, 1);
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return gpio_mockup_apply_pull(chip, offset, 0);
+ default:
+ break;
+ }
+ return -ENOTSUPP;
+}
+
static int gpio_mockup_dirout(struct gpio_chip *gc,
unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- chip->lines[offset].dir = GPIO_MOCKUP_DIR_OUT;
+ chip->lines[offset].dir = GPIO_LINE_DIRECTION_OUT;
__gpio_mockup_set(chip, offset, value);
mutex_unlock(&chip->lock);
@@ -164,7 +214,7 @@ static int gpio_mockup_dirin(struct gpio_chip *gc, unsigned int offset)
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- chip->lines[offset].dir = GPIO_MOCKUP_DIR_IN;
+ chip->lines[offset].dir = GPIO_LINE_DIRECTION_IN;
mutex_unlock(&chip->lock);
return 0;
@@ -226,12 +276,8 @@ static ssize_t gpio_mockup_debugfs_write(struct file *file,
size_t size, loff_t *ppos)
{
struct gpio_mockup_dbgfs_private *priv;
- int rv, val, curr, irq, irq_type;
- struct gpio_mockup_chip *chip;
+ int rv, val;
struct seq_file *sfile;
- struct gpio_desc *desc;
- struct gpio_chip *gc;
- struct irq_sim *sim;
if (*ppos != 0)
return -EINVAL;
@@ -244,35 +290,9 @@ static ssize_t gpio_mockup_debugfs_write(struct file *file,
sfile = file->private_data;
priv = sfile->private;
- chip = priv->chip;
- gc = &chip->gc;
- desc = &gc->gpiodev->descs[priv->offset];
- sim = &chip->irqsim;
-
- mutex_lock(&chip->lock);
-
- if (test_bit(FLAG_REQUESTED, &desc->flags) &&
- !test_bit(FLAG_IS_OUT, &desc->flags)) {
- curr = __gpio_mockup_get(chip, priv->offset);
- if (curr == val)
- goto out;
-
- irq = irq_sim_irqnum(sim, priv->offset);
- irq_type = irq_get_trigger_type(irq);
-
- if ((val == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
- (val == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
- irq_sim_fire(sim, priv->offset);
- }
-
- /* Change the value unless we're actively driving the line. */
- if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
- !test_bit(FLAG_IS_OUT, &desc->flags))
- __gpio_mockup_set(chip, priv->offset, val);
-
-out:
- chip->lines[priv->offset].pull = val;
- mutex_unlock(&chip->lock);
+ rv = gpio_mockup_apply_pull(priv->chip, priv->offset, val);
+ if (rv)
+ return rv;
return size;
}
@@ -418,6 +438,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
+ gc->set_config = gpio_mockup_set_config;
gc->to_irq = gpio_mockup_to_irq;
gc->free = gpio_mockup_free;
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
index 3fd729994a38..8299909318f4 100644
--- a/drivers/gpio/gpio-moxtet.c
+++ b/drivers/gpio/gpio-moxtet.c
@@ -78,9 +78,9 @@ static int moxtet_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
/* All lines are hard wired to be either input or output, not both. */
if (chip->desc->in_mask & BIT(offset))
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
else if (chip->desc->out_mask & BIT(offset))
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
else
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 16a47de29c94..f1e164cecff8 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
+#include <linux/interrupt.h>
#define MPC8XXX_GPIO_PINS 32
@@ -127,20 +128,19 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return -ENXIO;
}
-static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc)
+static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = data;
struct gpio_chip *gc = &mpc8xxx_gc->gc;
- unsigned int mask;
+ unsigned long mask;
+ int i;
mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
& gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
- if (mask)
- generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
- 32 - ffs(mask)));
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
+ for_each_set_bit(i, &mask, 32)
+ generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, 31 - i));
+
+ return IRQ_HANDLED;
}
static void mpc8xxx_irq_unmask(struct irq_data *d)
@@ -377,7 +377,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* It's assumed that only a single type of gpio controller is available
* on the current machine, so overwriting global data is fine.
*/
- mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
+ if (devtype->irq_set_type)
+ mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
if (devtype->gpio_dir_out)
gc->direction_output = devtype->gpio_dir_out;
@@ -386,6 +387,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
gc->to_irq = mpc8xxx_gpio_to_irq;
+ if (of_device_is_compatible(np, "fsl,qoriq-gpio"))
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+
ret = gpiochip_add_data(gc, mpc8xxx_gc);
if (ret) {
pr_err("%pOF: GPIO chip registration failed with status %d\n",
@@ -409,8 +413,16 @@ static int mpc8xxx_probe(struct platform_device *pdev)
if (devtype->gpio_dir_in_init)
devtype->gpio_dir_in_init(gc);
- irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
- mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
+ ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
+ mpc8xxx_gpio_irq_cascade,
+ IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade",
+ mpc8xxx_gc);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to devm_request_irq(%d), ret = %d\n",
+ np->full_name, mpc8xxx_gc->irqn, ret);
+ goto err;
+ }
+
return 0;
err:
iounmap(mpc8xxx_gc->regs);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 6c0687694341..993bbeb3c006 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -384,7 +384,10 @@ static int mvebu_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &u);
- return !!(u & BIT(pin));
+ if (u & BIT(pin))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned int pin)
@@ -773,23 +776,12 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct mvebu_pwm *mvpwm;
- struct resource *res;
u32 set;
if (!of_device_is_compatible(mvchip->chip.of_node,
"marvell,armada-370-gpio"))
return 0;
- /*
- * There are only two sets of PWM configuration registers for
- * all the GPIO lines on those SoCs which this driver reserves
- * for the first two GPIO chips. So if the resource is missing
- * we can't treat it as an error.
- */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
- if (!res)
- return 0;
-
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
@@ -812,7 +804,13 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
- mvpwm->membase = devm_ioremap_resource(dev, res);
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+ * for the first two GPIO chips. So if the resource is missing
+ * we can't treat it as an error.
+ */
+ mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
if (IS_ERR(mvpwm->membase))
return PTR_ERR(mvpwm->membase);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 7907a8755866..c77d474185f3 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -411,6 +411,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
+ int irq_count;
int irq_base;
int err;
@@ -426,9 +427,15 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (IS_ERR(port->base))
return PTR_ERR(port->base);
- port->irq_high = platform_get_irq(pdev, 1);
- if (port->irq_high < 0)
- port->irq_high = 0;
+ irq_count = platform_irq_count(pdev);
+ if (irq_count < 0)
+ return irq_count;
+
+ if (irq_count > 1) {
+ port->irq_high = platform_get_irq(pdev, 1);
+ if (port->irq_high < 0)
+ port->irq_high = 0;
+ }
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 5e5437a2c607..c4a314c68555 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -248,7 +248,10 @@ static int mxs_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
u32 dir;
dir = readl(port->base + PINCTRL_DOE(port));
- return !(dir & mask);
+ if (dir & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static const struct platform_device_id mxs_gpio_ids[] = {
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index d0f27084a942..3bd8adaeed9e 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -805,8 +805,10 @@ static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank = gpiochip_get_data(chip);
- return !!(readl_relaxed(bank->base + bank->regs->direction) &
- BIT(offset));
+ if (readl_relaxed(bank->base + bank->regs->direction) & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index de5d1383f28d..82122c3c688a 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -449,7 +449,10 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
if (ret < 0)
return ret;
- return !!(reg_val & bit);
+ if (reg_val & bit)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 5aa136a6a3e0..df51dd08bdfe 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -61,9 +61,9 @@ static int idio_16_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
if (offset > 15)
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int idio_16_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index 52f1647a46fd..44c1e4fc489f 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -104,15 +104,18 @@ static int idio_24_gpio_get_direction(struct gpio_chip *chip,
/* FET Outputs */
if (offset < 24)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
/* Isolated Inputs */
if (offset < 48)
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
/* TTL/CMOS I/O */
/* OUT MODE = 1 when TTL/CMOS Output Mode is set */
- return !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask);
+ if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int idio_24_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index f809a5a8e9eb..1331b2a94679 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -65,7 +65,7 @@ static int pisosr_gpio_get_direction(struct gpio_chip *chip,
unsigned offset)
{
/* This device always input */
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int pisosr_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 722ce5cf861e..5df7782e348f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -63,7 +63,10 @@ static int pl061_get_direction(struct gpio_chip *gc, unsigned offset)
{
struct pl061 *pl061 = gpiochip_get_data(gc);
- return !(readb(pl061->base + GPIODIR) & BIT(offset));
+ if (readb(pl061->base + GPIODIR) & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index b77ea16ffa03..bb100e0124e6 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -147,7 +147,10 @@ static int rpi_exp_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
get.gpio);
return ret ? ret : -EIO;
}
- return !get.direction;
+ if (get.direction)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int rpi_exp_gpio_get(struct gpio_chip *gc, unsigned int off)
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 187984d26f47..f800b250971c 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -279,7 +279,10 @@ static int gpio_rcar_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- return !(gpio_rcar_read(p, INOUTSEL) & BIT(offset));
+ if (gpio_rcar_read(p, INOUTSEL) & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -483,7 +486,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->ngpio = npins;
irq_chip = &p->irq_chip;
- irq_chip->name = name;
+ irq_chip->name = "gpio-rcar";
irq_chip->parent_device = dev;
irq_chip->irq_mask = gpio_rcar_irq_disable;
irq_chip->irq_unmask = gpio_rcar_irq_enable;
diff --git a/drivers/gpio/gpio-rda.c b/drivers/gpio/gpio-rda.c
new file mode 100644
index 000000000000..28dcbb58b76b
--- /dev/null
+++ b/drivers/gpio/gpio-rda.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RDA Micro GPIO driver
+ *
+ * Copyright (C) 2012 RDA Micro Inc.
+ * Copyright (C) 2019 Manivannan Sadhasivam
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define RDA_GPIO_OEN_VAL 0x00
+#define RDA_GPIO_OEN_SET_OUT 0x04
+#define RDA_GPIO_OEN_SET_IN 0x08
+#define RDA_GPIO_VAL 0x0c
+#define RDA_GPIO_SET 0x10
+#define RDA_GPIO_CLR 0x14
+#define RDA_GPIO_INT_CTRL_SET 0x18
+#define RDA_GPIO_INT_CTRL_CLR 0x1c
+#define RDA_GPIO_INT_CLR 0x20
+#define RDA_GPIO_INT_STATUS 0x24
+
+#define RDA_GPIO_IRQ_RISE_SHIFT 0
+#define RDA_GPIO_IRQ_FALL_SHIFT 8
+#define RDA_GPIO_DEBOUCE_SHIFT 16
+#define RDA_GPIO_LEVEL_SHIFT 24
+
+#define RDA_GPIO_IRQ_MASK 0xff
+
+/* Each bank consists of 32 GPIOs */
+#define RDA_GPIO_BANK_NR 32
+
+struct rda_gpio {
+ struct gpio_chip chip;
+ void __iomem *base;
+ spinlock_t lock;
+ struct irq_chip irq_chip;
+ int irq;
+};
+
+static inline void rda_gpio_update(struct gpio_chip *chip, unsigned int offset,
+ u16 reg, int val)
+{
+ struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
+ void __iomem *base = rda_gpio->base;
+ unsigned long flags;
+ u32 tmp;
+
+ spin_lock_irqsave(&rda_gpio->lock, flags);
+ tmp = readl_relaxed(base + reg);
+
+ if (val)
+ tmp |= BIT(offset);
+ else
+ tmp &= ~BIT(offset);
+
+ writel_relaxed(tmp, base + reg);
+ spin_unlock_irqrestore(&rda_gpio->lock, flags);
+}
+
+static void rda_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
+ void __iomem *base = rda_gpio->base;
+ u32 offset = irqd_to_hwirq(data);
+ u32 value;
+
+ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT;
+ value |= BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT;
+
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR);
+}
+
+static void rda_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ u32 offset = irqd_to_hwirq(data);
+
+ rda_gpio_update(chip, offset, RDA_GPIO_INT_CLR, 1);
+}
+
+static int rda_gpio_set_irq(struct gpio_chip *chip, u32 offset,
+ unsigned int flow_type)
+{
+ struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
+ void __iomem *base = rda_gpio->base;
+ u32 value;
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ /* Set rising edge trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+
+ /* Switch to edge trigger interrupt */
+ value = BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ /* Set falling edge trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+
+ /* Switch to edge trigger interrupt */
+ value = BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ /* Set both edge trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT;
+ value |= BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+
+ /* Switch to edge trigger interrupt */
+ value = BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR);
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ /* Set high level trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT;
+
+ /* Switch to level trigger interrupt */
+ value |= BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ /* Set low level trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT;
+
+ /* Switch to level trigger interrupt */
+ value |= BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rda_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ u32 offset = irqd_to_hwirq(data);
+ u32 trigger = irqd_get_trigger_type(data);
+
+ rda_gpio_set_irq(chip, offset, trigger);
+}
+
+static int rda_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ u32 offset = irqd_to_hwirq(data);
+ int ret;
+
+ ret = rda_gpio_set_irq(chip, offset, flow_type);
+ if (ret)
+ return ret;
+
+ if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ irq_set_handler_locked(data, handle_level_irq);
+ else if (flow_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ return 0;
+}
+
+static void rda_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *ic = irq_desc_get_chip(desc);
+ struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
+ unsigned long status;
+ u32 n, girq;
+
+ chained_irq_enter(ic, desc);
+
+ status = readl_relaxed(rda_gpio->base + RDA_GPIO_INT_STATUS);
+ /* Only lower 8 bits are capable of generating interrupts */
+ status &= RDA_GPIO_IRQ_MASK;
+
+ for_each_set_bit(n, &status, RDA_GPIO_BANK_NR) {
+ girq = irq_find_mapping(chip->irq.domain, n);
+ generic_handle_irq(girq);
+ }
+
+ chained_irq_exit(ic, desc);
+}
+
+static int rda_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct gpio_irq_chip *girq;
+ struct rda_gpio *rda_gpio;
+ u32 ngpios;
+ int ret;
+
+ rda_gpio = devm_kzalloc(dev, sizeof(*rda_gpio), GFP_KERNEL);
+ if (!rda_gpio)
+ return -ENOMEM;
+
+ ret = device_property_read_u32(dev, "ngpios", &ngpios);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Not all ports have interrupt capability. For instance, on
+ * RDA8810PL, GPIOC doesn't support interrupt. So we must handle
+ * those also.
+ */
+ rda_gpio->irq = platform_get_irq(pdev, 0);
+
+ rda_gpio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rda_gpio->base))
+ return PTR_ERR(rda_gpio->base);
+
+ spin_lock_init(&rda_gpio->lock);
+
+ ret = bgpio_init(&rda_gpio->chip, dev, 4,
+ rda_gpio->base + RDA_GPIO_VAL,
+ rda_gpio->base + RDA_GPIO_SET,
+ rda_gpio->base + RDA_GPIO_CLR,
+ rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
+ rda_gpio->base + RDA_GPIO_OEN_SET_IN,
+ BGPIOF_READ_OUTPUT_REG_SET);
+ if (ret) {
+ dev_err(dev, "bgpio_init failed\n");
+ return ret;
+ }
+
+ rda_gpio->chip.label = dev_name(dev);
+ rda_gpio->chip.ngpio = ngpios;
+ rda_gpio->chip.base = -1;
+ rda_gpio->chip.parent = dev;
+ rda_gpio->chip.of_node = np;
+
+ if (rda_gpio->irq >= 0) {
+ rda_gpio->irq_chip.name = "rda-gpio",
+ rda_gpio->irq_chip.irq_ack = rda_gpio_irq_ack,
+ rda_gpio->irq_chip.irq_mask = rda_gpio_irq_mask,
+ rda_gpio->irq_chip.irq_unmask = rda_gpio_irq_unmask,
+ rda_gpio->irq_chip.irq_set_type = rda_gpio_irq_set_type,
+ rda_gpio->irq_chip.flags = IRQCHIP_SKIP_SET_WAKE,
+
+ girq = &rda_gpio->chip.irq;
+ girq->chip = &rda_gpio->irq_chip;
+ girq->handler = handle_bad_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->parent_handler = rda_gpio_irq_handler;
+ girq->parent_handler_data = rda_gpio;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = rda_gpio->irq;
+ }
+
+ platform_set_drvdata(pdev, rda_gpio);
+
+ return devm_gpiochip_add_data(dev, &rda_gpio->chip, rda_gpio);
+}
+
+static const struct of_device_id rda_gpio_of_match[] = {
+ { .compatible = "rda,8810pl-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rda_gpio_of_match);
+
+static struct platform_driver rda_gpio_driver = {
+ .probe = rda_gpio_probe,
+ .driver = {
+ .name = "rda-gpio",
+ .of_match_table = rda_gpio_of_match,
+ },
+};
+
+module_platform_driver_probe(rda_gpio_driver, rda_gpio_probe);
+
+MODULE_DESCRIPTION("RDA Micro GPIO driver");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index fdc7a9d5b382..d35169bde25a 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -26,7 +26,8 @@ static int gpio_reg_get_direction(struct gpio_chip *gc, unsigned offset)
{
struct gpio_reg *r = to_gpio_reg(gc);
- return r->direction & BIT(offset) ? 1 : 0;
+ return r->direction & BIT(offset) ? GPIO_LINE_DIRECTION_IN :
+ GPIO_LINE_DIRECTION_OUT;
}
static int gpio_reg_direction_output(struct gpio_chip *gc, unsigned offset,
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 46b7cf23fb0f..edff5e81489f 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -53,7 +53,10 @@ static int sa1100_get_direction(struct gpio_chip *chip, unsigned offset)
{
void __iomem *gpdr = sa1100_gpio_chip(chip)->membase + R_GPDR;
- return !(readl_relaxed(gpdr) & BIT(offset));
+ if (readl_relaxed(gpdr) & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int sa1100_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index 7d718557092e..b04c561f3280 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -119,7 +119,8 @@ static int sama5d2_piobu_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return (ret == PIOBU_IN) ? 1 : 0;
+ return (ret == PIOBU_IN) ? GPIO_LINE_DIRECTION_IN :
+ GPIO_LINE_DIRECTION_OUT;
}
/**
@@ -154,9 +155,9 @@ static int sama5d2_piobu_get(struct gpio_chip *chip, unsigned int pin)
/* if pin is input, read value from PDS else read from SOD */
int ret = sama5d2_piobu_get_direction(chip, pin);
- if (ret == 1)
+ if (ret == GPIO_LINE_DIRECTION_IN)
ret = sama5d2_piobu_read_value(chip, pin, PIOBU_PDS);
- else if (!ret)
+ else if (ret == GPIO_LINE_DIRECTION_OUT)
ret = sama5d2_piobu_read_value(chip, pin, PIOBU_SOD);
if (ret < 0)
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index fb143f28c386..c65f35b68202 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -127,7 +127,10 @@ static int sch_gpio_get_direction(struct gpio_chip *gc, unsigned gpio_num)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
- return sch_gpio_reg_get(sch, gpio_num, GIO);
+ if (sch_gpio_reg_get(sch, gpio_num, GIO))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static const struct gpio_chip sch_gpio_chip = {
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index 8ecf336c9af4..da01e1cad7cb 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -228,7 +228,10 @@ static int sch311x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
data = inb(block->runtime_reg + block->config_regs[offset]);
spin_unlock(&block->lock);
- return !!(data & SCH311X_GPIO_CONF_DIR);
+ if (data & SCH311X_GPIO_CONF_DIR)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int sch311x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 006a7e6a75f2..311f66757b92 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -203,9 +203,9 @@ static int gpio_siox_direction_output(struct gpio_chip *chip,
static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset)
{
if (offset < 12)
- return 1; /* input */
+ return GPIO_LINE_DIRECTION_IN;
else
- return 0; /* output */
+ return GPIO_LINE_DIRECTION_OUT;
}
static int gpio_siox_probe(struct siox_device *sdevice)
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 994d542daf53..542706a852e6 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -84,7 +84,10 @@ static int stmpe_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !(ret & mask);
+ if (ret & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int stmpe_gpio_direction_output(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 75b1135b383a..6be0684cfa49 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -97,7 +97,10 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !(ret & BIT(pos));
+ if (ret & BIT(pos))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int tc3589x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 8a01d3694b28..6fdfe4c5303e 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -215,7 +215,10 @@ static int tegra_gpio_get_direction(struct gpio_chip *chip,
oe = tegra_gpio_readl(tgi, GPIO_OE(tgi, offset));
- return !(oe & pin_mask);
+ if (oe & pin_mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index a9058fda187e..55b43b7ce88d 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -15,6 +15,14 @@
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/gpio/tegra194-gpio.h>
+/* security registers */
+#define TEGRA186_GPIO_CTL_SCR 0x0c
+#define TEGRA186_GPIO_CTL_SCR_SEC_WEN BIT(28)
+#define TEGRA186_GPIO_CTL_SCR_SEC_REN BIT(27)
+
+#define TEGRA186_GPIO_INT_ROUTE_MAPPING(p, x) (0x14 + (p) * 0x20 + (x) * 4)
+
+/* control registers */
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
#define TEGRA186_GPIO_ENABLE_CONFIG_ENABLE BIT(0)
#define TEGRA186_GPIO_ENABLE_CONFIG_OUT BIT(1)
@@ -24,6 +32,7 @@
#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE (0x3 << 2)
#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_MASK (0x3 << 2)
#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL BIT(4)
+#define TEGRA186_GPIO_ENABLE_CONFIG_DEBOUNCE BIT(5)
#define TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT BIT(6)
#define TEGRA186_GPIO_DEBOUNCE_CONTROL 0x04
@@ -44,15 +53,16 @@
struct tegra_gpio_port {
const char *name;
- unsigned int offset;
+ unsigned int bank;
+ unsigned int port;
unsigned int pins;
- unsigned int irq;
};
struct tegra_gpio_soc {
const struct tegra_gpio_port *ports;
unsigned int num_ports;
const char *name;
+ unsigned int instance;
};
struct tegra_gpio {
@@ -63,6 +73,7 @@ struct tegra_gpio {
const struct tegra_gpio_soc *soc;
+ void __iomem *secure;
void __iomem *base;
};
@@ -89,12 +100,15 @@ static void __iomem *tegra186_gpio_get_base(struct tegra_gpio *gpio,
unsigned int pin)
{
const struct tegra_gpio_port *port;
+ unsigned int offset;
port = tegra186_gpio_get_port(gpio, &pin);
if (!port)
return NULL;
- return gpio->base + port->offset + pin * 0x20;
+ offset = port->bank * 0x1000 + port->port * 0x200;
+
+ return gpio->base + offset + pin * 0x20;
}
static int tegra186_gpio_get_direction(struct gpio_chip *chip,
@@ -110,9 +124,9 @@ static int tegra186_gpio_get_direction(struct gpio_chip *chip,
value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
if (value & TEGRA186_GPIO_ENABLE_CONFIG_OUT)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int tegra186_gpio_direction_input(struct gpio_chip *chip,
@@ -204,6 +218,42 @@ static void tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
}
+static int tegra186_gpio_set_config(struct gpio_chip *chip,
+ unsigned int offset,
+ unsigned long config)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ u32 debounce, value;
+ void __iomem *base;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (base == NULL)
+ return -ENXIO;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+
+ /*
+ * The Tegra186 GPIO controller supports a maximum of 255 ms debounce
+ * time.
+ */
+ if (debounce > 255000)
+ return -EINVAL;
+
+ debounce = DIV_ROUND_UP(debounce, USEC_PER_MSEC);
+
+ value = TEGRA186_GPIO_DEBOUNCE_CONTROL_THRESHOLD(debounce);
+ writel(value, base + TEGRA186_GPIO_DEBOUNCE_CONTROL);
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_DEBOUNCE;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec,
u32 *flags)
@@ -327,7 +377,7 @@ static int tegra186_irq_set_type(struct irq_data *data, unsigned int type)
else
irq_set_handler_locked(data, handle_edge_irq);
- return 0;
+ return irq_chip_set_type_parent(data, type);
}
static void tegra186_gpio_irq(struct irq_desc *desc)
@@ -342,12 +392,14 @@ static void tegra186_gpio_irq(struct irq_desc *desc)
for (i = 0; i < gpio->soc->num_ports; i++) {
const struct tegra_gpio_port *port = &gpio->soc->ports[i];
- void __iomem *base = gpio->base + port->offset;
unsigned int pin, irq;
unsigned long value;
+ void __iomem *base;
- /* skip ports that are not associated with this controller */
- if (parent != gpio->irq[port->irq])
+ base = gpio->base + port->bank * 0x1000 + port->port * 0x200;
+
+ /* skip ports that are not associated with this bank */
+ if (parent != gpio->irq[port->bank])
goto skip;
value = readl(base + TEGRA186_GPIO_INTERRUPT_STATUS(1));
@@ -367,47 +419,119 @@ skip:
chained_irq_exit(chip, desc);
}
-static int tegra186_gpio_irq_domain_xlate(struct irq_domain *domain,
- struct device_node *np,
- const u32 *spec, unsigned int size,
- unsigned long *hwirq,
- unsigned int *type)
+static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
{
struct tegra_gpio *gpio = gpiochip_get_data(domain->host_data);
unsigned int port, pin, i, offset = 0;
- if (size < 2)
+ if (WARN_ON(gpio->gpio.of_gpio_n_cells < 2))
+ return -EINVAL;
+
+ if (WARN_ON(fwspec->param_count < gpio->gpio.of_gpio_n_cells))
return -EINVAL;
- port = spec[0] / 8;
- pin = spec[0] % 8;
+ port = fwspec->param[0] / 8;
+ pin = fwspec->param[0] % 8;
- if (port >= gpio->soc->num_ports) {
- dev_err(gpio->gpio.parent, "invalid port number: %u\n", port);
+ if (port >= gpio->soc->num_ports)
return -EINVAL;
- }
for (i = 0; i < port; i++)
offset += gpio->soc->ports[i].pins;
- *type = spec[1] & IRQ_TYPE_SENSE_MASK;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
*hwirq = offset + pin;
return 0;
}
-static const struct irq_domain_ops tegra186_gpio_irq_domain_ops = {
- .map = gpiochip_irq_map,
- .unmap = gpiochip_irq_unmap,
- .xlate = tegra186_gpio_irq_domain_xlate,
+static void tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ struct irq_fwspec *fwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+
+ fwspec->param_count = 3;
+ fwspec->param[0] = gpio->soc->instance;
+ fwspec->param[1] = parent_hwirq;
+ fwspec->param[2] = parent_type;
+}
+
+static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int hwirq,
+ unsigned int type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type)
+{
+ *parent_hwirq = chip->irq.child_offset_to_irq(chip, hwirq);
+ *parent_type = type;
+
+ return 0;
+}
+
+static unsigned int tegra186_gpio_child_offset_to_irq(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int i;
+
+ for (i = 0; i < gpio->soc->num_ports; i++) {
+ if (offset < gpio->soc->ports[i].pins)
+ break;
+
+ offset -= gpio->soc->ports[i].pins;
+ }
+
+ return offset + i * 8;
+}
+
+static const struct of_device_id tegra186_pmc_of_match[] = {
+ { .compatible = "nvidia,tegra186-pmc" },
+ { .compatible = "nvidia,tegra194-pmc" },
+ { /* sentinel */ }
};
+static void tegra186_gpio_init_route_mapping(struct tegra_gpio *gpio)
+{
+ unsigned int i, j;
+ u32 value;
+
+ for (i = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+ unsigned int offset, p = port->port;
+ void __iomem *base;
+
+ base = gpio->secure + port->bank * 0x1000 + 0x800;
+
+ value = readl(base + TEGRA186_GPIO_CTL_SCR);
+
+ /*
+ * For controllers that haven't been locked down yet, make
+ * sure to program the default interrupt route mapping.
+ */
+ if ((value & TEGRA186_GPIO_CTL_SCR_SEC_REN) == 0 &&
+ (value & TEGRA186_GPIO_CTL_SCR_SEC_WEN) == 0) {
+ for (j = 0; j < 8; j++) {
+ offset = TEGRA186_GPIO_INT_ROUTE_MAPPING(p, j);
+
+ value = readl(base + offset);
+ value = BIT(port->pins) - 1;
+ writel(value, base + offset);
+ }
+ }
+ }
+}
+
static int tegra186_gpio_probe(struct platform_device *pdev)
{
unsigned int i, j, offset;
struct gpio_irq_chip *irq;
struct tegra_gpio *gpio;
- struct resource *res;
+ struct device_node *np;
char **names;
int err;
@@ -417,8 +541,11 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
- gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security");
+ if (IS_ERR(gpio->secure))
+ return PTR_ERR(gpio->secure);
+
+ gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
@@ -449,6 +576,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.direction_output = tegra186_gpio_direction_output;
gpio->gpio.get = tegra186_gpio_get,
gpio->gpio.set = tegra186_gpio_set;
+ gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.base = -1;
@@ -487,10 +615,15 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->intc.irq_mask = tegra186_irq_mask;
gpio->intc.irq_unmask = tegra186_irq_unmask;
gpio->intc.irq_set_type = tegra186_irq_set_type;
+ gpio->intc.irq_set_wake = irq_chip_set_wake_parent;
irq = &gpio->gpio.irq;
irq->chip = &gpio->intc;
- irq->domain_ops = &tegra186_gpio_irq_domain_ops;
+ irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
+ irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq;
+ irq->populate_parent_fwspec = tegra186_gpio_populate_parent_fwspec;
+ irq->child_offset_to_irq = tegra186_gpio_child_offset_to_irq;
+ irq->child_irq_domain_ops.translate = tegra186_gpio_irq_domain_translate;
irq->handler = handle_simple_irq;
irq->default_type = IRQ_TYPE_NONE;
irq->parent_handler = tegra186_gpio_irq;
@@ -498,6 +631,17 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
irq->num_parents = gpio->num_irq;
irq->parents = gpio->irq;
+ np = of_find_matching_node(NULL, tegra186_pmc_of_match);
+ if (np) {
+ irq->parent_domain = irq_find_host(np);
+ of_node_put(np);
+
+ if (!irq->parent_domain)
+ return -EPROBE_DEFER;
+ }
+
+ tegra186_gpio_init_route_mapping(gpio);
+
irq->map = devm_kcalloc(&pdev->dev, gpio->gpio.ngpio,
sizeof(*irq->map), GFP_KERNEL);
if (!irq->map)
@@ -507,7 +651,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
const struct tegra_gpio_port *port = &gpio->soc->ports[i];
for (j = 0; j < port->pins; j++)
- irq->map[offset + j] = irq->parents[port->irq];
+ irq->map[offset + j] = irq->parents[port->bank];
offset += port->pins;
}
@@ -526,136 +670,140 @@ static int tegra186_gpio_remove(struct platform_device *pdev)
return 0;
}
-#define TEGRA186_MAIN_GPIO_PORT(port, base, count, controller) \
- [TEGRA186_MAIN_GPIO_PORT_##port] = { \
- .name = #port, \
- .offset = base, \
- .pins = count, \
- .irq = controller, \
+#define TEGRA186_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA186_MAIN_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
static const struct tegra_gpio_port tegra186_main_ports[] = {
- TEGRA186_MAIN_GPIO_PORT( A, 0x2000, 7, 2),
- TEGRA186_MAIN_GPIO_PORT( B, 0x3000, 7, 3),
- TEGRA186_MAIN_GPIO_PORT( C, 0x3200, 7, 3),
- TEGRA186_MAIN_GPIO_PORT( D, 0x3400, 6, 3),
- TEGRA186_MAIN_GPIO_PORT( E, 0x2200, 8, 2),
- TEGRA186_MAIN_GPIO_PORT( F, 0x2400, 6, 2),
- TEGRA186_MAIN_GPIO_PORT( G, 0x4200, 6, 4),
- TEGRA186_MAIN_GPIO_PORT( H, 0x1000, 7, 1),
- TEGRA186_MAIN_GPIO_PORT( I, 0x0800, 8, 0),
- TEGRA186_MAIN_GPIO_PORT( J, 0x5000, 8, 5),
- TEGRA186_MAIN_GPIO_PORT( K, 0x5200, 1, 5),
- TEGRA186_MAIN_GPIO_PORT( L, 0x1200, 8, 1),
- TEGRA186_MAIN_GPIO_PORT( M, 0x5600, 6, 5),
- TEGRA186_MAIN_GPIO_PORT( N, 0x0000, 7, 0),
- TEGRA186_MAIN_GPIO_PORT( O, 0x0200, 4, 0),
- TEGRA186_MAIN_GPIO_PORT( P, 0x4000, 7, 4),
- TEGRA186_MAIN_GPIO_PORT( Q, 0x0400, 6, 0),
- TEGRA186_MAIN_GPIO_PORT( R, 0x0a00, 6, 0),
- TEGRA186_MAIN_GPIO_PORT( T, 0x0600, 4, 0),
- TEGRA186_MAIN_GPIO_PORT( X, 0x1400, 8, 1),
- TEGRA186_MAIN_GPIO_PORT( Y, 0x1600, 7, 1),
- TEGRA186_MAIN_GPIO_PORT(BB, 0x2600, 2, 2),
- TEGRA186_MAIN_GPIO_PORT(CC, 0x5400, 4, 5),
+ TEGRA186_MAIN_GPIO_PORT( A, 2, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( B, 3, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( C, 3, 1, 7),
+ TEGRA186_MAIN_GPIO_PORT( D, 3, 2, 6),
+ TEGRA186_MAIN_GPIO_PORT( E, 2, 1, 8),
+ TEGRA186_MAIN_GPIO_PORT( F, 2, 2, 6),
+ TEGRA186_MAIN_GPIO_PORT( G, 4, 1, 6),
+ TEGRA186_MAIN_GPIO_PORT( H, 1, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( I, 0, 4, 8),
+ TEGRA186_MAIN_GPIO_PORT( J, 5, 0, 8),
+ TEGRA186_MAIN_GPIO_PORT( K, 5, 1, 1),
+ TEGRA186_MAIN_GPIO_PORT( L, 1, 1, 8),
+ TEGRA186_MAIN_GPIO_PORT( M, 5, 3, 6),
+ TEGRA186_MAIN_GPIO_PORT( N, 0, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( O, 0, 1, 4),
+ TEGRA186_MAIN_GPIO_PORT( P, 4, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( Q, 0, 2, 6),
+ TEGRA186_MAIN_GPIO_PORT( R, 0, 5, 6),
+ TEGRA186_MAIN_GPIO_PORT( T, 0, 3, 4),
+ TEGRA186_MAIN_GPIO_PORT( X, 1, 2, 8),
+ TEGRA186_MAIN_GPIO_PORT( Y, 1, 3, 7),
+ TEGRA186_MAIN_GPIO_PORT(BB, 2, 3, 2),
+ TEGRA186_MAIN_GPIO_PORT(CC, 5, 2, 4),
};
static const struct tegra_gpio_soc tegra186_main_soc = {
.num_ports = ARRAY_SIZE(tegra186_main_ports),
.ports = tegra186_main_ports,
.name = "tegra186-gpio",
+ .instance = 0,
};
-#define TEGRA186_AON_GPIO_PORT(port, base, count, controller) \
- [TEGRA186_AON_GPIO_PORT_##port] = { \
- .name = #port, \
- .offset = base, \
- .pins = count, \
- .irq = controller, \
+#define TEGRA186_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA186_AON_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
static const struct tegra_gpio_port tegra186_aon_ports[] = {
- TEGRA186_AON_GPIO_PORT( S, 0x0200, 5, 0),
- TEGRA186_AON_GPIO_PORT( U, 0x0400, 6, 0),
- TEGRA186_AON_GPIO_PORT( V, 0x0800, 8, 0),
- TEGRA186_AON_GPIO_PORT( W, 0x0a00, 8, 0),
- TEGRA186_AON_GPIO_PORT( Z, 0x0e00, 4, 0),
- TEGRA186_AON_GPIO_PORT(AA, 0x0c00, 8, 0),
- TEGRA186_AON_GPIO_PORT(EE, 0x0600, 3, 0),
- TEGRA186_AON_GPIO_PORT(FF, 0x0000, 5, 0),
+ TEGRA186_AON_GPIO_PORT( S, 0, 1, 5),
+ TEGRA186_AON_GPIO_PORT( U, 0, 2, 6),
+ TEGRA186_AON_GPIO_PORT( V, 0, 4, 8),
+ TEGRA186_AON_GPIO_PORT( W, 0, 5, 8),
+ TEGRA186_AON_GPIO_PORT( Z, 0, 7, 4),
+ TEGRA186_AON_GPIO_PORT(AA, 0, 6, 8),
+ TEGRA186_AON_GPIO_PORT(EE, 0, 3, 3),
+ TEGRA186_AON_GPIO_PORT(FF, 0, 0, 5),
};
static const struct tegra_gpio_soc tegra186_aon_soc = {
.num_ports = ARRAY_SIZE(tegra186_aon_ports),
.ports = tegra186_aon_ports,
.name = "tegra186-gpio-aon",
+ .instance = 1,
};
-#define TEGRA194_MAIN_GPIO_PORT(port, base, count, controller) \
- [TEGRA194_MAIN_GPIO_PORT_##port] = { \
- .name = #port, \
- .offset = base, \
- .pins = count, \
- .irq = controller, \
+#define TEGRA194_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA194_MAIN_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
static const struct tegra_gpio_port tegra194_main_ports[] = {
- TEGRA194_MAIN_GPIO_PORT( A, 0x1400, 8, 1),
- TEGRA194_MAIN_GPIO_PORT( B, 0x4e00, 2, 4),
- TEGRA194_MAIN_GPIO_PORT( C, 0x4600, 8, 4),
- TEGRA194_MAIN_GPIO_PORT( D, 0x4800, 4, 4),
- TEGRA194_MAIN_GPIO_PORT( E, 0x4a00, 8, 4),
- TEGRA194_MAIN_GPIO_PORT( F, 0x4c00, 6, 4),
- TEGRA194_MAIN_GPIO_PORT( G, 0x4000, 8, 4),
- TEGRA194_MAIN_GPIO_PORT( H, 0x4200, 8, 4),
- TEGRA194_MAIN_GPIO_PORT( I, 0x4400, 5, 4),
- TEGRA194_MAIN_GPIO_PORT( J, 0x5200, 6, 5),
- TEGRA194_MAIN_GPIO_PORT( K, 0x3000, 8, 3),
- TEGRA194_MAIN_GPIO_PORT( L, 0x3200, 4, 3),
- TEGRA194_MAIN_GPIO_PORT( M, 0x2600, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( N, 0x2800, 3, 2),
- TEGRA194_MAIN_GPIO_PORT( O, 0x5000, 6, 5),
- TEGRA194_MAIN_GPIO_PORT( P, 0x2a00, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( Q, 0x2c00, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( R, 0x2e00, 6, 2),
- TEGRA194_MAIN_GPIO_PORT( S, 0x3600, 8, 3),
- TEGRA194_MAIN_GPIO_PORT( T, 0x3800, 8, 3),
- TEGRA194_MAIN_GPIO_PORT( U, 0x3a00, 1, 3),
- TEGRA194_MAIN_GPIO_PORT( V, 0x1000, 8, 1),
- TEGRA194_MAIN_GPIO_PORT( W, 0x1200, 2, 1),
- TEGRA194_MAIN_GPIO_PORT( X, 0x2000, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( Y, 0x2200, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( Z, 0x2400, 8, 2),
- TEGRA194_MAIN_GPIO_PORT(FF, 0x3400, 2, 3),
- TEGRA194_MAIN_GPIO_PORT(GG, 0x0000, 2, 0)
+ TEGRA194_MAIN_GPIO_PORT( A, 1, 2, 8),
+ TEGRA194_MAIN_GPIO_PORT( B, 4, 7, 2),
+ TEGRA194_MAIN_GPIO_PORT( C, 4, 3, 8),
+ TEGRA194_MAIN_GPIO_PORT( D, 4, 4, 4),
+ TEGRA194_MAIN_GPIO_PORT( E, 4, 5, 8),
+ TEGRA194_MAIN_GPIO_PORT( F, 4, 6, 6),
+ TEGRA194_MAIN_GPIO_PORT( G, 4, 0, 8),
+ TEGRA194_MAIN_GPIO_PORT( H, 4, 1, 8),
+ TEGRA194_MAIN_GPIO_PORT( I, 4, 2, 5),
+ TEGRA194_MAIN_GPIO_PORT( J, 5, 1, 6),
+ TEGRA194_MAIN_GPIO_PORT( K, 3, 0, 8),
+ TEGRA194_MAIN_GPIO_PORT( L, 3, 1, 4),
+ TEGRA194_MAIN_GPIO_PORT( M, 2, 3, 8),
+ TEGRA194_MAIN_GPIO_PORT( N, 2, 4, 3),
+ TEGRA194_MAIN_GPIO_PORT( O, 5, 0, 6),
+ TEGRA194_MAIN_GPIO_PORT( P, 2, 5, 8),
+ TEGRA194_MAIN_GPIO_PORT( Q, 2, 6, 8),
+ TEGRA194_MAIN_GPIO_PORT( R, 2, 7, 6),
+ TEGRA194_MAIN_GPIO_PORT( S, 3, 3, 8),
+ TEGRA194_MAIN_GPIO_PORT( T, 3, 4, 8),
+ TEGRA194_MAIN_GPIO_PORT( U, 3, 5, 1),
+ TEGRA194_MAIN_GPIO_PORT( V, 1, 0, 8),
+ TEGRA194_MAIN_GPIO_PORT( W, 1, 1, 2),
+ TEGRA194_MAIN_GPIO_PORT( X, 2, 0, 8),
+ TEGRA194_MAIN_GPIO_PORT( Y, 2, 1, 8),
+ TEGRA194_MAIN_GPIO_PORT( Z, 2, 2, 8),
+ TEGRA194_MAIN_GPIO_PORT(FF, 3, 2, 2),
+ TEGRA194_MAIN_GPIO_PORT(GG, 0, 0, 2)
};
static const struct tegra_gpio_soc tegra194_main_soc = {
.num_ports = ARRAY_SIZE(tegra194_main_ports),
.ports = tegra194_main_ports,
.name = "tegra194-gpio",
+ .instance = 0,
};
-#define TEGRA194_AON_GPIO_PORT(port, base, count, controller) \
- [TEGRA194_AON_GPIO_PORT_##port] = { \
- .name = #port, \
- .offset = base, \
- .pins = count, \
- .irq = controller, \
+#define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA194_AON_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
static const struct tegra_gpio_port tegra194_aon_ports[] = {
- TEGRA194_AON_GPIO_PORT(AA, 0x0600, 8, 0),
- TEGRA194_AON_GPIO_PORT(BB, 0x0800, 4, 0),
- TEGRA194_AON_GPIO_PORT(CC, 0x0200, 8, 0),
- TEGRA194_AON_GPIO_PORT(DD, 0x0400, 3, 0),
- TEGRA194_AON_GPIO_PORT(EE, 0x0000, 7, 0)
+ TEGRA194_AON_GPIO_PORT(AA, 0, 3, 8),
+ TEGRA194_AON_GPIO_PORT(BB, 0, 4, 4),
+ TEGRA194_AON_GPIO_PORT(CC, 0, 1, 8),
+ TEGRA194_AON_GPIO_PORT(DD, 0, 2, 3),
+ TEGRA194_AON_GPIO_PORT(EE, 0, 0, 7)
};
static const struct tegra_gpio_soc tegra194_aon_soc = {
.num_ports = ARRAY_SIZE(tegra194_aon_ports),
.ports = tegra194_aon_ports,
.name = "tegra194-gpio-aon",
+ .instance = 1,
};
static const struct of_device_id tegra186_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index ddad5c7ea617..d08d86a22b1f 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -169,7 +169,10 @@ static int thunderx_gpio_get_direction(struct gpio_chip *chip, unsigned int line
bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line));
- return !(bit_cfg & GPIO_BIT_CFG_TX_OE);
+ if (bit_cfg & GPIO_BIT_CFG_TX_OE)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int thunderx_gpio_set_config(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index c8b34d787eed..99d5a84a9129 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -39,7 +39,7 @@ static int tpic2810_get_direction(struct gpio_chip *chip,
unsigned offset)
{
/* This device always output */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int tpic2810_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index 2eea98ff4ea3..1e9d8262d0ff 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -21,7 +21,7 @@ static int tps65086_gpio_get_direction(struct gpio_chip *chip,
unsigned offset)
{
/* This device is output only */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int tps65086_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 3ad68bd78282..510d9ed9fd2a 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -32,9 +32,9 @@ static int tps65912_gpio_get_direction(struct gpio_chip *gc,
return ret;
if (val & GPIO_CFG_MASK)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
else
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int tps65912_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index aff6e504c666..f7f5f770e0fb 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -47,7 +47,6 @@ static int tps68470_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(val & BIT(offset));
}
-/* Return 0 if output, 1 if input */
static int tps68470_gpio_get_direction(struct gpio_chip *gc,
unsigned int offset)
{
@@ -57,7 +56,7 @@ static int tps68470_gpio_get_direction(struct gpio_chip *gc,
/* rest are always outputs */
if (offset >= TPS68470_N_REGULAR_GPIO)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(regmap, TPS68470_GPIO_CTL_REG_A(offset), &val);
if (ret) {
@@ -67,7 +66,8 @@ static int tps68470_gpio_get_direction(struct gpio_chip *gc,
}
val &= TPS68470_GPIO_MODE_MASK;
- return val >= TPS68470_GPIO_MODE_OUT_CMOS ? 0 : 1;
+ return val >= TPS68470_GPIO_MODE_OUT_CMOS ? GPIO_LINE_DIRECTION_OUT :
+ GPIO_LINE_DIRECTION_IN;
}
static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index a3109bcaa0ac..5022e0ad0fae 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -101,7 +101,10 @@ static int tqmx86_gpio_direction_output(struct gpio_chip *chip,
static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
- return !!(TQMX86_DIR_INPUT_MASK & BIT(offset));
+ if (TQMX86_DIR_INPUT_MASK & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static void tqmx86_gpio_irq_mask(struct irq_data *data)
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 1da8d0586329..d885032cf814 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -44,7 +44,10 @@ static int ts4900_gpio_get_direction(struct gpio_chip *chip,
regmap_read(priv->regmap, offset, &reg);
- return !(reg & TS4900_GPIO_OE);
+ if (reg & TS4900_GPIO_OE)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int ts4900_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index fbfb648d3502..de249726230e 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -165,10 +165,10 @@ static int twl4030_get_gpio_direction(int gpio)
if (ret < 0)
return ret;
- /* 1 = output, but gpiolib semantics are inverse so invert */
- ret = !(ret & d_msk);
+ if (ret & d_msk)
+ return GPIO_LINE_DIRECTION_OUT;
- return ret;
+ return GPIO_LINE_DIRECTION_IN;
}
static int twl4030_set_gpio_dataout(int gpio, int enable)
@@ -380,10 +380,10 @@ static int twl_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct gpio_twl4030_priv *priv = gpiochip_get_data(chip);
/*
- * Default 0 = output
+ * Default GPIO_LINE_DIRECTION_OUT
* LED GPIOs >= TWL4030_GPIO_MAX are always output
*/
- int ret = 0;
+ int ret = GPIO_LINE_DIRECTION_OUT;
mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX) {
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index c845b2ff1f43..648fb418d775 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -34,8 +34,7 @@ static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset)
static int twl6040gpo_get_direction(struct gpio_chip *chip, unsigned offset)
{
- /* This means "out" */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 93cdcc41e9fb..bd203e8fa58e 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -113,7 +113,10 @@ static int uniphier_gpio_offset_read(struct gpio_chip *chip,
static int uniphier_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
- return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DIR);
+ if (uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DIR))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int uniphier_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 444fe9e7f04a..8b481b3c1ebe 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -170,13 +170,16 @@ static int wcove_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
int ret, reg = to_reg(gpio, CTRL_OUT);
if (reg < 0)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(wg->regmap, reg, &val);
if (ret)
return ret;
- return !(val & CTLO_DIR_OUT);
+ if (val & CTLO_DIR_OUT)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index e0ef66b6a237..fe456bea81f6 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -56,7 +56,10 @@ static int ws16c48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
const unsigned port = offset / 8;
const unsigned mask = BIT(offset % 8);
- return !!(ws16c48gpio->io_state[port] & mask);
+ if (ws16c48gpio->io_state[port] & mask)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int ws16c48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 2918363884de..532b0df8a1f2 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -80,7 +80,10 @@ static int xgene_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
bank_offset = GPIO_SET_DR_OFFSET + GPIO_BANK_OFFSET(offset);
bit_offset = GPIO_BIT_OFFSET(offset);
- return !!(ioread32(chip->base + bank_offset) & BIT(bit_offset));
+ if (ioread32(chip->base + bank_offset) & BIT(bit_offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int xgene_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
@@ -155,28 +158,16 @@ static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
static int xgene_gpio_probe(struct platform_device *pdev)
{
- struct resource *res;
struct xgene_gpio *gpio;
int err = 0;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (!gpio) {
- err = -ENOMEM;
- goto err;
- }
+ if (!gpio)
+ return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- goto err;
- }
-
- gpio->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!gpio->base) {
- err = -ENOMEM;
- goto err;
- }
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
gpio->chip.ngpio = XGENE_MAX_GPIOS;
@@ -196,14 +187,11 @@ static int xgene_gpio_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"failed to register gpiochip.\n");
- goto err;
+ return err;
}
dev_info(&pdev->dev, "X-Gene GPIO driver registered.\n");
return 0;
-err:
- dev_err(&pdev->dev, "X-Gene GPIO driver registration failed.\n");
- return err;
}
static const struct of_device_id xgene_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
new file mode 100644
index 000000000000..773e5c24309e
--- /dev/null
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Broadcom
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define IPROC_CCA_INT_F_GPIOINT BIT(0)
+#define IPROC_CCA_INT_STS 0x20
+#define IPROC_CCA_INT_MASK 0x24
+
+#define IPROC_GPIO_CCA_DIN 0x0
+#define IPROC_GPIO_CCA_DOUT 0x4
+#define IPROC_GPIO_CCA_OUT_EN 0x8
+#define IPROC_GPIO_CCA_INT_LEVEL 0x10
+#define IPROC_GPIO_CCA_INT_LEVEL_MASK 0x14
+#define IPROC_GPIO_CCA_INT_EVENT 0x18
+#define IPROC_GPIO_CCA_INT_EVENT_MASK 0x1C
+#define IPROC_GPIO_CCA_INT_EDGE 0x24
+
+struct iproc_gpio_chip {
+ struct irq_chip irqchip;
+ struct gpio_chip gc;
+ spinlock_t lock;
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *intr;
+};
+
+static inline struct iproc_gpio_chip *
+to_iproc_gpio(struct gpio_chip *gc)
+{
+ return container_of(gc, struct iproc_gpio_chip, gc);
+}
+
+static void iproc_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 irq = d->irq;
+ u32 irq_type, event_status = 0;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ irq_type = irq_get_trigger_type(irq);
+ if (irq_type & IRQ_TYPE_EDGE_BOTH) {
+ event_status |= BIT(pin);
+ writel_relaxed(event_status,
+ chip->base + IPROC_GPIO_CCA_INT_EVENT);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+
+static void iproc_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 irq = d->irq;
+ u32 int_mask, irq_type, event_mask;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ irq_type = irq_get_trigger_type(irq);
+ event_mask = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ int_mask = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+
+ if (irq_type & IRQ_TYPE_EDGE_BOTH) {
+ event_mask |= 1 << pin;
+ writel_relaxed(event_mask,
+ chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ } else {
+ int_mask |= 1 << pin;
+ writel_relaxed(int_mask,
+ chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+
+static void iproc_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 irq = d->irq;
+ u32 irq_type, int_mask, event_mask;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ irq_type = irq_get_trigger_type(irq);
+ event_mask = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ int_mask = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+
+ if (irq_type & IRQ_TYPE_EDGE_BOTH) {
+ event_mask &= ~BIT(pin);
+ writel_relaxed(event_mask,
+ chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ } else {
+ int_mask &= ~BIT(pin);
+ writel_relaxed(int_mask,
+ chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+
+static int iproc_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 irq = d->irq;
+ u32 event_pol, int_pol;
+ int ret = 0;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ event_pol = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EDGE);
+ event_pol &= ~BIT(pin);
+ writel_relaxed(event_pol, chip->base + IPROC_GPIO_CCA_INT_EDGE);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ event_pol = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EDGE);
+ event_pol |= BIT(pin);
+ writel_relaxed(event_pol, chip->base + IPROC_GPIO_CCA_INT_EDGE);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ int_pol = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ int_pol &= ~BIT(pin);
+ writel_relaxed(int_pol, chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ int_pol = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ int_pol |= BIT(pin);
+ writel_relaxed(int_pol, chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ break;
+ default:
+ /* should not come here */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(irq_get_irq_data(irq), handle_level_irq);
+ else if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(irq_get_irq_data(irq), handle_edge_irq);
+
+out_unlock:
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return ret;
+}
+
+static irqreturn_t iproc_gpio_irq_handler(int irq, void *data)
+{
+ struct gpio_chip *gc = (struct gpio_chip *)data;
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int bit;
+ unsigned long int_bits = 0;
+ u32 int_status;
+
+ /* go through the entire GPIOs and handle all interrupts */
+ int_status = readl_relaxed(chip->intr + IPROC_CCA_INT_STS);
+ if (int_status & IPROC_CCA_INT_F_GPIOINT) {
+ u32 event, level;
+
+ /* Get level and edge interrupts */
+ event =
+ readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ event &= readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EVENT);
+ level = readl_relaxed(chip->base + IPROC_GPIO_CCA_DIN);
+ level ^= readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ level &=
+ readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+ int_bits = level | event;
+
+ for_each_set_bit(bit, &int_bits, gc->ngpio)
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, bit));
+ }
+
+ return int_bits ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int iproc_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ struct iproc_gpio_chip *chip;
+ u32 num_gpios;
+ int irq, ret;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = dev;
+ platform_set_drvdata(pdev, chip);
+ spin_lock_init(&chip->lock);
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ ret = bgpio_init(&chip->gc, dev, 4,
+ chip->base + IPROC_GPIO_CCA_DIN,
+ chip->base + IPROC_GPIO_CCA_DOUT,
+ NULL,
+ chip->base + IPROC_GPIO_CCA_OUT_EN,
+ NULL,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init GPIO chip\n");
+ return ret;
+ }
+
+ chip->gc.label = dev_name(dev);
+ if (of_property_read_u32(dn, "ngpios", &num_gpios))
+ chip->gc.ngpio = num_gpios;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq > 0) {
+ struct gpio_irq_chip *girq;
+ struct irq_chip *irqc;
+ u32 val;
+
+ irqc = &chip->irqchip;
+ irqc->name = dev_name(dev);
+ irqc->irq_ack = iproc_gpio_irq_ack;
+ irqc->irq_mask = iproc_gpio_irq_mask;
+ irqc->irq_unmask = iproc_gpio_irq_unmask;
+ irqc->irq_set_type = iproc_gpio_irq_set_type;
+
+ chip->intr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(chip->intr))
+ return PTR_ERR(chip->intr);
+
+ /* Enable GPIO interrupts for CCA GPIO */
+ val = readl_relaxed(chip->intr + IPROC_CCA_INT_MASK);
+ val |= IPROC_CCA_INT_F_GPIOINT;
+ writel_relaxed(val, chip->intr + IPROC_CCA_INT_MASK);
+
+ /*
+ * Directly request the irq here instead of passing
+ * a flow-handler to gpiochip_set_chained_irqchip,
+ * because the irq is shared.
+ */
+ ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
+ IRQF_SHARED, chip->gc.label, &chip->gc);
+ if (ret) {
+ dev_err(dev, "Fail to request IRQ%d: %d\n", irq, ret);
+ return ret;
+ }
+
+ girq = &chip->gc.irq;
+ girq->chip = irqc;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ }
+
+ ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ if (ret) {
+ dev_err(dev, "unable to add GPIO chip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __exit iproc_gpio_remove(struct platform_device *pdev)
+{
+ struct iproc_gpio_chip *chip;
+
+ chip = platform_get_drvdata(pdev);
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->intr) {
+ u32 val;
+
+ val = readl_relaxed(chip->intr + IPROC_CCA_INT_MASK);
+ val &= ~IPROC_CCA_INT_F_GPIOINT;
+ writel_relaxed(val, chip->intr + IPROC_CCA_INT_MASK);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm_iproc_gpio_of_match[] = {
+ { .compatible = "brcm,iproc-gpio-cca" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_gpio_of_match);
+
+static struct platform_driver bcm_iproc_gpio_driver = {
+ .driver = {
+ .name = "iproc-xgs-gpio",
+ .of_match_table = bcm_iproc_gpio_of_match,
+ },
+ .probe = iproc_gpio_probe,
+ .remove = iproc_gpio_remove,
+};
+
+module_platform_driver(bcm_iproc_gpio_driver);
+
+MODULE_DESCRIPTION("XGS IPROC GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index 05f1998c11a4..31b5072b2df0 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -83,7 +83,10 @@ static int xra1403_get_direction(struct gpio_chip *chip, unsigned int offset)
if (ret)
return ret;
- return !!(val & BIT(offset % 8));
+ if (val & BIT(offset % 8))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int xra1403_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 43d3fa5f511a..08d7c3b32038 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -72,7 +72,7 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable)
static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
{
- return 1; /* input only */
+ return GPIO_LINE_DIRECTION_IN; /* input only */
}
static int xtensa_impwire_get_value(struct gpio_chip *gc, unsigned offset)
@@ -95,7 +95,7 @@ static void xtensa_impwire_set_value(struct gpio_chip *gc, unsigned offset,
static int xtensa_expstate_get_direction(struct gpio_chip *gc, unsigned offset)
{
- return 0; /* output only */
+ return GPIO_LINE_DIRECTION_OUT; /* output only */
}
static int xtensa_expstate_get_value(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index cd475ff4bcad..4c3f6370eab4 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -360,7 +360,7 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
*
* This function returns the direction of the specified GPIO.
*
- * Return: 0 for output, 1 for input
+ * Return: GPIO_LINE_DIRECTION_OUT or GPIO_LINE_DIRECTION_IN
*/
static int zynq_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
{
@@ -372,7 +372,10 @@ static int zynq_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
- return !(reg & BIT(bank_pin_num));
+ if (reg & BIT(bank_pin_num))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
/**
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 59ccfd24627d..d30e57dc755c 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -194,6 +194,7 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
acpi_gpiochip_request_irq(acpi_gpio, event);
}
+/* Always returns AE_OK so that we keep looping over the resources */
static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
void *context)
{
@@ -230,19 +231,25 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event",
GPIO_ACTIVE_HIGH, GPIOD_IN);
if (IS_ERR(desc)) {
- dev_err(chip->parent, "Failed to request GPIO\n");
- return AE_ERROR;
+ dev_err(chip->parent,
+ "Failed to request GPIO for pin 0x%04X, err %ld\n",
+ pin, PTR_ERR(desc));
+ return AE_OK;
}
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
- dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
+ dev_err(chip->parent,
+ "Failed to lock GPIO pin 0x%04X as interrupt, err %d\n",
+ pin, ret);
goto fail_free_desc;
}
irq = gpiod_to_irq(desc);
if (irq < 0) {
- dev_err(chip->parent, "Failed to translate GPIO to IRQ\n");
+ dev_err(chip->parent,
+ "Failed to translate GPIO pin 0x%04X to IRQ, err %d\n",
+ pin, irq);
goto fail_unlock_irq;
}
@@ -287,7 +294,7 @@ fail_unlock_irq:
fail_free_desc:
gpiochip_free_own_desc(desc);
- return AE_ERROR;
+ return AE_OK;
}
/**
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 80ea49f570f4..dc27b1a88e93 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -27,7 +27,7 @@
* This is used by external users of of_gpio_count() from <linux/of_gpio.h>
*
* FIXME: get rid of those external users by converting them to GPIO
- * descriptors and let them all use gpiod_get_count()
+ * descriptors and let them all use gpiod_count()
*/
int of_gpio_get_count(struct device *dev, const char *con_id)
{
@@ -84,8 +84,9 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
/**
* of_gpio_need_valid_mask() - figure out if the OF GPIO driver needs
* to set the .valid_mask
- * @dev: the device for the GPIO provider
- * @return: true if the valid mask needs to be set
+ * @gc: the target gpio_chip
+ *
+ * Return: true if the valid mask needs to be set
*/
bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
{
@@ -134,18 +135,20 @@ static void of_gpio_flags_quirks(struct device_node *np,
(!(strcmp(propname, "enable-gpio") &&
strcmp(propname, "enable-gpios")) &&
of_device_is_compatible(np, "regulator-gpio")))) {
+ bool active_low = !of_property_read_bool(np,
+ "enable-active-high");
/*
* The regulator GPIO handles are specified such that the
* presence or absence of "enable-active-high" solely controls
* the polarity of the GPIO line. Any phandle flags must
* be actively ignored.
*/
- if (*flags & OF_GPIO_ACTIVE_LOW) {
+ if ((*flags & OF_GPIO_ACTIVE_LOW) && !active_low) {
pr_warn("%s GPIO handle specifies active low - ignored\n",
of_node_full_name(np));
*flags &= ~OF_GPIO_ACTIVE_LOW;
}
- if (!of_property_read_bool(np, "enable-active-high"))
+ if (active_low)
*flags |= OF_GPIO_ACTIVE_LOW;
}
/*
@@ -882,16 +885,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_node_get(chip->of_node);
ret = of_gpiochip_scan_gpios(chip);
- if (ret) {
+ if (ret)
of_node_put(chip->of_node);
- gpiochip_remove_pin_ranges(chip);
- }
return ret;
}
void of_gpiochip_remove(struct gpio_chip *chip)
{
- gpiochip_remove_pin_ranges(chip);
of_node_put(chip->of_node);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index fb33ff6fc1a9..9913886ede90 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -390,6 +390,14 @@ static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip)
gpiochip->valid_mask = NULL;
}
+static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
+{
+ if (gc->add_pin_ranges)
+ return gc->add_pin_ranges(gc);
+
+ return 0;
+}
+
bool gpiochip_line_is_valid(const struct gpio_chip *gpiochip,
unsigned int offset)
{
@@ -422,9 +430,127 @@ struct linehandle_state {
(GPIOHANDLE_REQUEST_INPUT | \
GPIOHANDLE_REQUEST_OUTPUT | \
GPIOHANDLE_REQUEST_ACTIVE_LOW | \
+ GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
+ GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
+ GPIOHANDLE_REQUEST_BIAS_DISABLE | \
GPIOHANDLE_REQUEST_OPEN_DRAIN | \
GPIOHANDLE_REQUEST_OPEN_SOURCE)
+static int linehandle_validate_flags(u32 flags)
+{
+ /* Return an error if an unknown flag is set */
+ if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
+ return -EINVAL;
+
+ /*
+ * Do not allow both INPUT & OUTPUT flags to be set as they are
+ * contradictory.
+ */
+ if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
+ (flags & GPIOHANDLE_REQUEST_OUTPUT))
+ return -EINVAL;
+
+ /*
+ * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
+ * the hardware actually supports enabling both at the same time the
+ * electrical result would be disastrous.
+ */
+ if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
+ (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
+ return -EINVAL;
+
+ /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
+ if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
+ ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
+ return -EINVAL;
+
+ /* Bias flags only allowed for input or output mode. */
+ if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
+ (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
+ ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
+ (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
+ (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
+ return -EINVAL;
+
+ /* Only one bias flag can be set. */
+ if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
+ (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
+ GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
+ ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
+ (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void linehandle_configure_flag(unsigned long *flagsp,
+ u32 bit, bool active)
+{
+ if (active)
+ set_bit(bit, flagsp);
+ else
+ clear_bit(bit, flagsp);
+}
+
+static long linehandle_set_config(struct linehandle_state *lh,
+ void __user *ip)
+{
+ struct gpiohandle_config gcnf;
+ struct gpio_desc *desc;
+ int i, ret;
+ u32 lflags;
+ unsigned long *flagsp;
+
+ if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
+ return -EFAULT;
+
+ lflags = gcnf.flags;
+ ret = linehandle_validate_flags(lflags);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lh->numdescs; i++) {
+ desc = lh->descs[i];
+ flagsp = &desc->flags;
+
+ linehandle_configure_flag(flagsp, FLAG_ACTIVE_LOW,
+ lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
+
+ linehandle_configure_flag(flagsp, FLAG_OPEN_DRAIN,
+ lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
+
+ linehandle_configure_flag(flagsp, FLAG_OPEN_SOURCE,
+ lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
+
+ linehandle_configure_flag(flagsp, FLAG_PULL_UP,
+ lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
+
+ linehandle_configure_flag(flagsp, FLAG_PULL_DOWN,
+ lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
+
+ linehandle_configure_flag(flagsp, FLAG_BIAS_DISABLE,
+ lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
+
+ /*
+ * Lines have to be requested explicitly for input
+ * or output, else the line will be treated "as is".
+ */
+ if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ int val = !!gcnf.default_values[i];
+
+ ret = gpiod_direction_output(desc, val);
+ if (ret)
+ return ret;
+ } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
+ ret = gpiod_direction_input(desc);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
static long linehandle_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
@@ -475,6 +601,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
lh->descs,
NULL,
vals);
+ } else if (cmd == GPIOHANDLE_SET_CONFIG_IOCTL) {
+ return linehandle_set_config(lh, ip);
}
return -EINVAL;
}
@@ -526,32 +654,9 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
lflags = handlereq.flags;
- /* Return an error if an unknown flag is set */
- if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
- return -EINVAL;
-
- /*
- * Do not allow both INPUT & OUTPUT flags to be set as they are
- * contradictory.
- */
- if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
- (lflags & GPIOHANDLE_REQUEST_OUTPUT))
- return -EINVAL;
-
- /*
- * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
- * the hardware actually supports enabling both at the same time the
- * electrical result would be disastrous.
- */
- if ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
- (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
- return -EINVAL;
-
- /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
- if (!(lflags & GPIOHANDLE_REQUEST_OUTPUT) &&
- ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
- (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
- return -EINVAL;
+ ret = linehandle_validate_flags(lflags);
+ if (ret)
+ return ret;
lh = kzalloc(sizeof(*lh), GFP_KERNEL);
if (!lh)
@@ -593,6 +698,12 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE)
+ set_bit(FLAG_BIAS_DISABLE, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)
+ set_bit(FLAG_PULL_DOWN, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)
+ set_bit(FLAG_PULL_UP, &desc->flags);
ret = gpiod_set_transitory(desc, false);
if (ret < 0)
@@ -895,6 +1006,32 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
return -EFAULT;
+ offset = eventreq.lineoffset;
+ lflags = eventreq.handleflags;
+ eflags = eventreq.eventflags;
+
+ if (offset >= gdev->ngpio)
+ return -EINVAL;
+
+ /* Return an error if a unknown flag is set */
+ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
+ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
+ return -EINVAL;
+
+ /* This is just wrong: we don't look for events on output lines */
+ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
+ return -EINVAL;
+
+ /* Only one bias flag can be set. */
+ if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
+ (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
+ GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
+ ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
+ (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
+ return -EINVAL;
+
le = kzalloc(sizeof(*le), GFP_KERNEL);
if (!le)
return -ENOMEM;
@@ -912,30 +1049,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
}
- offset = eventreq.lineoffset;
- lflags = eventreq.handleflags;
- eflags = eventreq.eventflags;
-
- if (offset >= gdev->ngpio) {
- ret = -EINVAL;
- goto out_free_label;
- }
-
- /* Return an error if a unknown flag is set */
- if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
- (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
- ret = -EINVAL;
- goto out_free_label;
- }
-
- /* This is just wrong: we don't look for events on output lines */
- if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
- (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
- (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
- ret = -EINVAL;
- goto out_free_label;
- }
-
desc = &gdev->descs[offset];
ret = gpiod_request(desc, le->label);
if (ret)
@@ -945,6 +1058,12 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE)
+ set_bit(FLAG_BIAS_DISABLE, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)
+ set_bit(FLAG_PULL_DOWN, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)
+ set_bit(FLAG_PULL_UP, &desc->flags);
ret = gpiod_direction_input(desc);
if (ret)
@@ -1098,6 +1217,12 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
GPIOLINE_FLAG_IS_OUT);
+ if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_BIAS_DISABLE;
+ if (test_bit(FLAG_PULL_DOWN, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
+ if (test_bit(FLAG_PULL_UP, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
@@ -1403,15 +1528,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
}
}
+ ret = gpiochip_add_pin_ranges(chip);
+ if (ret)
+ goto err_remove_of_chip;
+
acpi_gpiochip_add(chip);
machine_gpiochip_add(chip);
- ret = gpiochip_irqchip_init_hw(chip);
+ ret = gpiochip_irqchip_init_valid_mask(chip);
if (ret)
goto err_remove_acpi_chip;
- ret = gpiochip_irqchip_init_valid_mask(chip);
+ ret = gpiochip_irqchip_init_hw(chip);
if (ret)
goto err_remove_acpi_chip;
@@ -1444,6 +1573,7 @@ err_remove_of_chip:
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
err_free_gpiochip_mask:
+ gpiochip_remove_pin_ranges(chip);
gpiochip_free_valid_mask(chip);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
@@ -1499,8 +1629,8 @@ void gpiochip_remove(struct gpio_chip *chip)
gdev->chip = NULL;
gpiochip_irqchip_remove(chip);
acpi_gpiochip_remove(chip);
- gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
+ gpiochip_remove_pin_ranges(chip);
gpiochip_free_valid_mask(chip);
/*
* We accept no more calls into the driver from this point, so
@@ -1539,7 +1669,7 @@ static void devm_gpio_chip_release(struct device *dev, void *res)
}
/**
- * devm_gpiochip_add_data() - Resource manager gpiochip_add_data()
+ * devm_gpiochip_add_data() - Resource managed gpiochip_add_data()
* @dev: pointer to the device that gpio_chip belongs to.
* @chip: the chip to register, with chip->base initialized
* @data: driver-private data associated with this chip
@@ -2790,6 +2920,9 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
clear_bit(FLAG_REQUESTED, &desc->flags);
clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ clear_bit(FLAG_PULL_UP, &desc->flags);
+ clear_bit(FLAG_PULL_DOWN, &desc->flags);
+ clear_bit(FLAG_BIAS_DISABLE, &desc->flags);
clear_bit(FLAG_IS_HOGGED, &desc->flags);
ret = true;
}
@@ -2916,6 +3049,7 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
unsigned arg;
switch (mode) {
+ case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_UP:
arg = 1;
@@ -2929,6 +3063,26 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
}
+static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
+{
+ int bias = 0;
+ int ret = 0;
+
+ if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
+ bias = PIN_CONFIG_BIAS_DISABLE;
+ else if (test_bit(FLAG_PULL_UP, &desc->flags))
+ bias = PIN_CONFIG_BIAS_PULL_UP;
+ else if (test_bit(FLAG_PULL_DOWN, &desc->flags))
+ bias = PIN_CONFIG_BIAS_PULL_DOWN;
+
+ if (bias) {
+ ret = gpio_set_config(chip, gpio_chip_hwgpio(desc), bias);
+ if (ret != -ENOTSUPP)
+ return ret;
+ }
+ return 0;
+}
+
/**
* gpiod_direction_input - set the GPIO direction to input
* @desc: GPIO to set to input
@@ -2973,15 +3127,10 @@ int gpiod_direction_input(struct gpio_desc *desc)
__func__);
return -EIO;
}
- if (ret == 0)
+ if (ret == 0) {
clear_bit(FLAG_IS_OUT, &desc->flags);
-
- if (test_bit(FLAG_PULL_UP, &desc->flags))
- gpio_set_config(chip, gpio_chip_hwgpio(desc),
- PIN_CONFIG_BIAS_PULL_UP);
- else if (test_bit(FLAG_PULL_DOWN, &desc->flags))
- gpio_set_config(chip, gpio_chip_hwgpio(desc),
- PIN_CONFIG_BIAS_PULL_DOWN);
+ ret = gpio_set_bias(chip, desc);
+ }
trace_gpio_direction(desc_to_gpio(desc), 1, ret);
@@ -3111,6 +3260,9 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
}
set_output_value:
+ ret = gpio_set_bias(gc, desc);
+ if (ret)
+ return ret;
return gpiod_direction_output_raw_commit(desc, value);
set_output_flag:
@@ -4742,9 +4894,9 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
pr_info("GPIO line %d (%s) hogged as %s%s\n",
desc_to_gpio(desc), name,
- (dflags&GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
- (dflags&GPIOD_FLAGS_BIT_DIR_OUT) ?
- (dflags&GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low":"");
+ (dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
+ (dflags & GPIOD_FLAGS_BIT_DIR_OUT) ?
+ (dflags & GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low" : "");
return 0;
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index b8b10a409c7b..ca9bc1e4803c 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -110,6 +110,7 @@ struct gpio_desc {
#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
+#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
/* Connection label */
const char *label;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e67c194c2aca..1168351267fd 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -93,6 +93,20 @@ config DRM_KMS_FB_HELPER
help
FBDEV helpers for KMS drivers.
+config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
+ bool "Enable refcount backtrace history in the DP MST helpers"
+ select STACKDEPOT
+ depends on DRM_KMS_HELPER
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ help
+ Enables debug tracing for topology refs in DRM's DP MST helpers. A
+ history of each topology reference/dereference will be printed to the
+ kernel log once a port or branch device's topology refcount reaches 0.
+
+ This has the potential to use a lot of memory and print some very
+ large kernel messages. If in doubt, say "N".
+
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
@@ -165,13 +179,26 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_TTM_DMA_PAGE_POOL
+ bool
+ depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU)
+ default y
+ help
+ Choose this if you need the TTM dma page pool
+
config DRM_VRAM_HELPER
tristate
depends on DRM
- select DRM_TTM
help
Helpers for VRAM memory management
+config DRM_TTM_HELPER
+ tristate
+ depends on DRM
+ select DRM_TTM
+ help
+ Helpers for ttm-based gem objects
+
config DRM_GEM_CMA_HELPER
bool
depends on DRM
@@ -226,9 +253,9 @@ config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
select FW_LOADER
- select DRM_KMS_HELPER
+ select DRM_KMS_HELPER
select DRM_SCHED
- select DRM_TTM
+ select DRM_TTM
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
@@ -257,6 +284,7 @@ config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
select DRM_KMS_HELPER
+ select CRC32
default n
help
Virtual Kernel Mode-Setting (VKMS) is used for testing or for
@@ -397,7 +425,7 @@ config DRM_R128
config DRM_I810
tristate "Intel I810"
- # !PREEMPT because of missing ioctl locking
+ # !PREEMPTION because of missing ioctl locking
depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 82ff826b33cc..9f1c7c486f88 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -33,10 +33,12 @@ drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_vram_helper-y := drm_gem_vram_helper.o \
- drm_vram_helper_common.o \
- drm_vram_mm_helper.o
+ drm_vram_helper_common.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
+drm_ttm_helper-y := drm_gem_ttm_helper.o
+obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
+
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 00962a659009..ca0e435559d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,8 +53,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
- amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
+ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
+ amdgpu_umc.o smu_v11_0_i2c.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
@@ -67,7 +68,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
- arct_reg_init.o navi12_reg_init.o
+ arct_reg_init.o navi12_reg_init.o mxgpu_nv.o
# add DF block
amdgpu-y += \
@@ -83,7 +84,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_1.o
+ umc_v6_1.o umc_v6_0.o
# add IH block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index bd37df5dd6d0..0c229a92a24b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -73,6 +73,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_nbio.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
@@ -106,6 +107,8 @@ struct amdgpu_mgpu_info
uint32_t num_apu;
};
+#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
+
/*
* Modules parameters.
*/
@@ -122,6 +125,7 @@ extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
extern int amdgpu_pcie_gen2;
extern int amdgpu_msi;
+extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
@@ -135,6 +139,7 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
+extern int amdgpu_exp_hw_support;
extern int amdgpu_dc;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
@@ -146,11 +151,7 @@ extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern uint amdgpu_pp_feature_mask;
-extern int amdgpu_ngg;
-extern int amdgpu_prim_buf_per_se;
-extern int amdgpu_pos_buf_per_se;
-extern int amdgpu_cntl_sb_buf_per_se;
-extern int amdgpu_param_buf_per_se;
+extern uint amdgpu_force_long_training;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
@@ -167,6 +168,12 @@ extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
extern int amdgpu_noretry;
+extern int amdgpu_force_asic_type;
+#ifdef CONFIG_HSA_AMD
+extern int sched_policy;
+#else
+static const int sched_policy = KFD_SCHED_POLICY_HWS;
+#endif
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -283,6 +290,9 @@ struct amdgpu_ip_block_version {
const struct amd_ip_funcs *funcs;
};
+#define HW_REV(_Major, _Minor, _Rev) \
+ ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
+
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
@@ -425,7 +435,6 @@ struct amdgpu_fpriv {
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
@@ -477,7 +486,6 @@ struct amdgpu_cs_parser {
uint64_t bytes_moved_vis_threshold;
uint64_t bytes_moved;
uint64_t bytes_moved_vis;
- struct amdgpu_bo_list_entry *evictable;
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
@@ -624,6 +632,11 @@ struct amdgpu_fw_vram_usage {
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
+
+ /* Offset on the top of VRAM, used as c2p write buffer.
+ */
+ u64 mem_train_fb_loc;
+ bool mem_train_support;
};
/*
@@ -644,71 +657,14 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-
-/*
- * amdgpu nbio functions
- *
- */
-struct nbio_hdp_flush_reg {
- u32 ref_and_mask_cp0;
- u32 ref_and_mask_cp1;
- u32 ref_and_mask_cp2;
- u32 ref_and_mask_cp3;
- u32 ref_and_mask_cp4;
- u32 ref_and_mask_cp5;
- u32 ref_and_mask_cp6;
- u32 ref_and_mask_cp7;
- u32 ref_and_mask_cp8;
- u32 ref_and_mask_cp9;
- u32 ref_and_mask_sdma0;
- u32 ref_and_mask_sdma1;
- u32 ref_and_mask_sdma2;
- u32 ref_and_mask_sdma3;
- u32 ref_and_mask_sdma4;
- u32 ref_and_mask_sdma5;
- u32 ref_and_mask_sdma6;
- u32 ref_and_mask_sdma7;
-};
-
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
};
-struct amdgpu_nbio_funcs {
- const struct nbio_hdp_flush_reg *hdp_flush_reg;
- u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
- u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
- u32 (*get_rev_id)(struct amdgpu_device *adev);
- void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
- u32 (*get_memsize)(struct amdgpu_device *adev);
- void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index, int doorbell_size);
- void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
- int doorbell_index, int instance);
- void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*ih_doorbell_range)(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
- bool enable);
- void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
- bool enable);
- void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
- void (*ih_control)(struct amdgpu_device *adev);
- void (*init_registers)(struct amdgpu_device *adev);
- void (*detect_hw_virt)(struct amdgpu_device *adev);
- void (*remap_hdp_registers)(struct amdgpu_device *adev);
-};
-
struct amdgpu_df_funcs {
void (*sw_init)(struct amdgpu_device *adev);
+ void (*sw_fini)(struct amdgpu_device *adev);
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
bool enable);
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
@@ -813,6 +769,7 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
+ struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -921,6 +878,12 @@ struct amdgpu_device {
u32 cg_flags;
u32 pg_flags;
+ /* nbio */
+ struct amdgpu_nbio nbio;
+
+ /* mmhub */
+ struct amdgpu_mmhub mmhub;
+
/* gfx */
struct amdgpu_gfx gfx;
@@ -974,9 +937,7 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
- const struct amdgpu_nbio_funcs *nbio_funcs;
const struct amdgpu_df_funcs *df_funcs;
- const struct amdgpu_mmhub_funcs *mmhub_funcs;
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -1006,11 +967,11 @@ struct amdgpu_device {
struct mutex lock_reset;
struct amdgpu_doorbell_index doorbell_index;
+ struct mutex notifier_lock;
+
int asic_reset_res;
struct work_struct xgmi_reset_work;
- bool in_baco_reset;
-
long gfx_timeout;
long sdma_timeout;
long video_timeout;
@@ -1018,6 +979,9 @@ struct amdgpu_device {
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
+
+ /* device pstate */
+ int pstate;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1032,6 +996,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
void amdgpu_device_fini(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write);
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 07eb29885372..d3da9dde4ee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -63,45 +63,10 @@ void amdgpu_amdkfd_fini(void)
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
- const struct kfd2kgd_calls *kfd2kgd;
-
- switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_KAVERI:
- case CHIP_HAWAII:
- kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
- break;
-#endif
- case CHIP_CARRIZO:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
- break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
- break;
- case CHIP_ARCTURUS:
- kfd2kgd = amdgpu_amdkfd_arcturus_get_functions();
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions();
- break;
- default:
- dev_info(adev->dev, "kfd not supported on this ASIC\n");
- return;
- }
+ bool vf = amdgpu_sriov_vf(adev);
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->pdev, adev->asic_type, vf);
if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -165,14 +130,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES);
- /* remove the KIQ bit as well */
- if (adev->gfx.kiq.ring.sched.ready)
- clear_bit(amdgpu_gfx_mec_queue_to_bit(adev,
- adev->gfx.kiq.ring.me - 1,
- adev->gfx.kiq.ring.pipe,
- adev->gfx.kiq.ring.queue),
- gpu_resources.queue_bitmap);
-
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
* nbits is not compile time constant
*/
@@ -202,7 +159,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp;
}
- kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
+ kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
}
}
@@ -709,38 +666,14 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
return 0;
}
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
-{
- return NULL;
-}
-
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g)
+ unsigned int asic_type, bool vf)
{
return NULL;
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index e519df3fd2b6..069d5d230810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -57,7 +57,7 @@ struct kgd_mem {
unsigned int mapped_to_gpu_memory;
uint64_t va;
- uint32_t mapping_flags;
+ uint32_t alloc_flags;
atomic_t invalid;
struct amdkfd_process_info *process_info;
@@ -137,12 +137,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void);
-
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
@@ -179,10 +173,17 @@ uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
+/* Read user wptr from a specified user address space with page fault
+ * disabled. The memory must be pinned and mapped to the hardware when
+ * this is called in hqd_load functions, so it should never fault in
+ * the first place. This resolves a circular lock dependency involving
+ * four locks, including the DQM lock and mmap_sem.
+ */
#define read_user_wptr(mmptr, wptr, dst) \
({ \
bool valid = false; \
if ((mmptr) && (wptr)) { \
+ pagefault_disable(); \
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
} else if (current->mm == NULL) { \
@@ -190,6 +191,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
valid = !get_user((dst), (wptr)); \
unuse_mm(mmptr); \
} \
+ pagefault_enable(); \
} \
valid; \
})
@@ -240,8 +242,9 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g);
+ unsigned int asic_type, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index c79aaebeeaf0..b6713e0ed1b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -19,10 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#undef pr_fmt
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
@@ -69,11 +65,11 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[8] = {
+ uint32_t sdma_engine_reg_base[8] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
@@ -91,111 +87,82 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
SOC15_REG_OFFSET(SDMA7, 0,
mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
};
- uint32_t retval;
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
-static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
- u32 instance, u32 offset)
-{
- switch (instance) {
- case 0:
- return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
- case 1:
- return (adev->reg_offset[SDMA1_HWIP][0][1] + offset);
- case 2:
- return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
- case 3:
- return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
- case 4:
- return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
- case 5:
- return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
- case 6:
- return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
- case 7:
- return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
- default:
- break;
- }
- return 0;
-}
-
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev,
- m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -205,7 +172,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
@@ -215,15 +183,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -235,14 +203,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -255,40 +223,42 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
+const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
@@ -304,20 +274,11 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index d10f483f5e27..61cd707158e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -19,18 +19,9 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#undef pr_fmt
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "navi10_enum.h"
@@ -42,6 +33,7 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
+#include "gfxhub_v2_0.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -50,63 +42,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-#if 0
-static uint32_t get_watch_base_addr(struct amdgpu_device *adev);
-#endif
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -139,37 +74,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions()
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -250,11 +154,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
ATC_VMID0_PASID_MAPPING__VALID_MASK;
pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
- /*
- * need to do this twice, once for gfx and once for mmhub
- * for ATC add 16 to VMID for mmhub, for IH different registers.
- * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
- */
pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
@@ -306,11 +205,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
/* On gfx10, mmSDMA1_xxx registers are defined NOT based
@@ -322,12 +221,12 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
};
- uint32_t retval;
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -488,72 +387,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -563,28 +457,26 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id);
- pr_debug("sdma base addr %x\n", sdma_base_addr);
-
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -618,14 +510,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -746,59 +638,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -830,6 +715,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid;
+ uint16_t queried_pasid;
+ bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
if (amdgpu_emu_mode == 0 && ring->sched.ready)
@@ -838,13 +725,13 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
- if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, 0);
- break;
- }
+
+ ret = get_atc_vmid_pasid_mapping_info(kgd, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, 0);
+ break;
}
}
@@ -914,7 +801,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
@@ -922,18 +808,31 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
return;
}
- /* TODO: take advantage of per-process address space size. For
- * now, all processes share the same address space size, like
- * on GFX8 and older.
- */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+ /* SDMA is on gfxhub as well for Navi1* series */
+ gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
+
+const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .get_tile_config = amdgpu_amdkfd_get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .get_hive_id = amdgpu_amdkfd_get_hive_id,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5f459bf5f622..6e6f0a99ec06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -86,65 +84,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -170,37 +109,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -303,14 +211,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -413,60 +322,52 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdma_rlc_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdma_rlc_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -524,13 +425,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -645,32 +546,34 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
@@ -758,24 +661,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static void set_scratch_backing_va(struct kgd_dev *kgd,
@@ -855,3 +750,28 @@ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
+
+const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 6d2f61449606..bfbddedb2380 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,9 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -44,62 +41,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -125,38 +66,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -260,13 +169,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -398,59 +309,51 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdmax_rlcx_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -517,13 +420,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -641,54 +544,48 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int kgd_address_watch_disable(struct kgd_dev *kgd)
@@ -798,3 +695,28 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
+
+const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index e262f2ac07a3..47c853ef1051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -19,17 +19,10 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
@@ -50,9 +43,6 @@
#include "gmc_v9_0.h"
-#define V9_PIPE_PER_MEC (4)
-#define V9_QUEUES_PER_PIPE_MEC (8)
-
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
@@ -226,22 +216,21 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
};
- uint32_t retval;
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
-
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -388,71 +377,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -462,7 +447,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
@@ -472,15 +458,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -514,14 +500,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -584,59 +570,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
@@ -671,6 +650,8 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid, i;
+ uint16_t queried_pasid;
+ bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
uint32_t flush_type = 0;
@@ -686,14 +667,14 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
- if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- for (i = 0; i < adev->num_vmhubs; i++)
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- i, flush_type);
- break;
- }
+
+ ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+ i, flush_type);
+ break;
}
}
@@ -777,15 +758,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
-void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid)
-{
- /* No longer needed on GFXv9. The scratch base address is
- * passed to the shader by the CP. It's the user mode driver's
- * responsibility.
- */
-}
-
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
@@ -811,7 +783,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
-static const struct kfd2kgd_calls kfd2kgd = {
+const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
@@ -827,19 +799,11 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 26d8879bff9d..d9e9ad22b2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -55,14 +55,10 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset);
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid);
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base);
-void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6d021ecc8d59..7d35b5b66229 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -19,9 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
#include <linux/dma-buf.h>
#include <linux/list.h>
#include <linux/pagemap.h>
@@ -33,11 +30,6 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
-/* Special VM and GART address alignment needed for VI pre-Fiji due to
- * a HW bug.
- */
-#define VI_BO_SIZE_ALIGN (0x8000)
-
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -349,13 +341,46 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
- ret = amdgpu_vm_update_directories(adev, vm);
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
if (ret)
return ret;
return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
}
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+{
+ struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+ bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+ uint32_t mapping_flags;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (bo_adev == adev)
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ } else {
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+ break;
+ default:
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+
+ return amdgpu_gem_va_map_flags(adev, mapping_flags);
+}
+
/* add_bo_to_vm - Add a BO to a VM
*
* Everything that needs to bo done only once when a BO is first added
@@ -404,8 +429,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
}
bo_va_entry->va = va;
- bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
- mem->mapping_flags);
+ bo_va_entry->pte_flags = get_pte_flags(adev, mem);
bo_va_entry->kgd_dev = (void *)adev;
list_add(&bo_va_entry->bo_list, list_bo_va);
@@ -481,8 +505,7 @@ static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
*
* Returns 0 for success, negative errno for errors.
*/
-static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
- uint64_t user_addr)
+static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
{
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
@@ -586,7 +609,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else {
@@ -659,7 +682,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else
@@ -1079,10 +1102,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
- int byte_align;
u32 domain, alloc_domain;
u64 alloc_flags;
- uint32_t mapping_flags;
int ret;
/*
@@ -1135,25 +1156,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if ((*mem)->aql_queue)
size = size >> 1;
- /* Workaround for TLB bug on older VI chips */
- byte_align = (adev->family == AMDGPU_FAMILY_VI &&
- adev->asic_type != CHIP_FIJI &&
- adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11 &&
- adev->asic_type != CHIP_POLARIS12 &&
- adev->asic_type != CHIP_VEGAM) ?
- VI_BO_SIZE_ALIGN : 1;
-
- mapping_flags = AMDGPU_VM_PAGE_READABLE;
- if (flags & ALLOC_MEM_FLAGS_WRITABLE)
- mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
- if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
- mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- if (flags & ALLOC_MEM_FLAGS_COHERENT)
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
- else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- (*mem)->mapping_flags = mapping_flags;
+ (*mem)->alloc_flags = flags;
amdgpu_sync_create(&(*mem)->sync);
@@ -1168,7 +1171,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
memset(&bp, 0, sizeof(bp));
bp.size = size;
- bp.byte_align = byte_align;
+ bp.byte_align = 1;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
bp.type = bo_type;
@@ -1195,7 +1198,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
- ret = init_user_pages(*mem, current->mm, user_addr);
+ ret = init_user_pages(*mem, user_addr);
if (ret)
goto allocate_init_user_pages_failed;
}
@@ -1626,9 +1629,10 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
- (*mem)->mapping_flags =
- AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+ (*mem)->alloc_flags =
+ ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
+ ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
(*mem)->bo = amdgpu_bo_ref(bo);
(*mem)->va = va;
@@ -1739,6 +1743,10 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
return ret;
}
+ /*
+ * FIXME: Cannot ignore the return code, must hold
+ * notifier_lock
+ */
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
/* Mark the BO as valid unless it was invalidated
@@ -1797,8 +1805,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
- true);
+ ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
@@ -1996,7 +2003,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save, true);
+ false, &duplicate_save);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1c9d40f97a9b..72232fccf61a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -2038,6 +2038,11 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
if (adev->is_atom_fw) {
amdgpu_atomfirmware_scratch_regs_init(adev);
amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
+ if (ret) {
+ DRM_ERROR("Failed to get mem train fb location.\n");
+ return ret;
+ }
} else {
amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index daf687428cdb..ff4eb96bdfb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -27,6 +27,7 @@
#include "amdgpu_atomfirmware.h"
#include "atom.h"
#include "atombios.h"
+#include "soc15_hw_ip.h"
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
{
@@ -120,65 +121,14 @@ union vram_info {
struct atom_vram_info_header_v2_3 v23;
struct atom_vram_info_header_v2_4 v24;
};
-/*
- * Return vram width from integrated system info table, if available,
- * or 0 if not.
- */
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
- u16 data_offset, size;
- union igp_info *igp_info;
- union vram_info *vram_info;
- u32 mem_channel_number;
- u32 mem_channel_width;
- u8 frev, crev;
-
- if (adev->flags & AMD_IS_APU)
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- integratedsysteminfo);
- else
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- vram_info);
- /* get any igp specific overrides */
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- if (adev->flags & AMD_IS_APU) {
- igp_info = (union igp_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 11:
- mem_channel_number = igp_info->v11.umachannelnumber;
- /* channel width is 64 */
- return mem_channel_number * 64;
- default:
- return 0;
- }
- } else {
- vram_info = (union vram_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 3:
- mem_channel_number = vram_info->v23.vram_module[0].channel_num;
- mem_channel_width = vram_info->v23.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- case 4:
- mem_channel_number = vram_info->v24.vram_module[0].channel_num;
- mem_channel_width = vram_info->v24.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- default:
- return 0;
- }
- }
- }
-
- return 0;
-}
+union vram_module {
+ struct atom_vram_module_v9 v9;
+ struct atom_vram_module_v10 v10;
+};
-static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
- int atom_mem_type)
+static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
+ int atom_mem_type)
{
int vram_type;
@@ -219,19 +169,25 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
return vram_type;
}
-/*
- * Return vram type from either integrated system info table
- * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
- */
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+
+
+int
+amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
+ int index, i = 0;
u16 data_offset, size;
union igp_info *igp_info;
union vram_info *vram_info;
+ union vram_module *vram_module;
u8 frev, crev;
u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+ u32 module_id;
if (adev->flags & AMD_IS_APU)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -239,6 +195,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
else
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_info);
+
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size,
&frev, &crev, &data_offset)) {
@@ -247,25 +204,67 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
mem_type = igp_info->v11.memorytype;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
default:
- return 0;
+ return -EINVAL;
}
} else {
vram_info = (union vram_info *)
(mode_info->atom_context->bios + data_offset);
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
switch (crev) {
case 3:
- mem_type = vram_info->v23.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v23.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v23.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
case 4:
- mem_type = vram_info->v24.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v24.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v24.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v10.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v10.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v10.channel_num;
+ mem_channel_width = vram_module->v10.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
default:
- return 0;
+ return -EINVAL;
}
}
+
}
return 0;
@@ -464,3 +463,138 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
}
return -EINVAL;
}
+
+/*
+ * Check if VBIOS supports GDDR6 training data save/restore
+ */
+static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
+{
+ uint16_t data_offset;
+ int index;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
+ NULL, NULL, &data_offset)) {
+ struct atom_firmware_info_v3_1 *firmware_info =
+ (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
+ data_offset);
+
+ DRM_DEBUG("atom firmware capability:0x%08x.\n",
+ le32_to_cpu(firmware_info->firmware_capability));
+
+ if (le32_to_cpu(firmware_info->firmware_capability) &
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
+ return true;
+ }
+
+ return false;
+}
+
+static int gddr6_mem_train_support(struct amdgpu_device *adev)
+{
+ int ret;
+ uint32_t major, minor, revision, hw_v;
+
+ if (gddr6_mem_train_vbios_support(adev)) {
+ amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
+ hw_v = HW_REV(major, minor, revision);
+ /*
+ * treat 0 revision as a special case since register for MP0 and MMHUB is missing
+ * for some Navi10 A0, preventing driver from discovering the hwip information since
+ * none of the functions will be initialized, it should not cause any problems
+ */
+ switch (hw_v) {
+ case HW_REV(11, 0, 0):
+ case HW_REV(11, 0, 5):
+ ret = 1;
+ break;
+ default:
+ DRM_ERROR("memory training vbios supports but psp hw(%08x)"
+ " doesn't support!\n", hw_v);
+ ret = -1;
+ break;
+ }
+ } else {
+ ret = 0;
+ hw_v = -1;
+ }
+
+
+ DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
+ return ret;
+}
+
+int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ unsigned char *bios = ctx->bios;
+ struct vram_reserve_block *reserved_block;
+ int index, block_number;
+ uint8_t frev, crev;
+ uint16_t data_offset, size;
+ uint32_t start_address_in_kb;
+ uint64_t offset;
+ int ret;
+
+ adev->fw_vram_usage.mem_train_support = false;
+
+ if (adev->asic_type != CHIP_NAVI10 &&
+ adev->asic_type != CHIP_NAVI14)
+ return 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = gddr6_mem_train_support(adev);
+ if (ret == -1)
+ return -EINVAL;
+ else if (ret == 0)
+ return 0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ vram_usagebyfirmware);
+ ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset);
+ if (ret == 0) {
+ DRM_ERROR("parse data header failed.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
+ " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
+ /* only support 2.1+ */
+ if (((uint16_t)frev << 8 | crev) < 0x0201) {
+ DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
+ return -EINVAL;
+ }
+
+ reserved_block = (struct vram_reserve_block *)
+ (bios + data_offset + sizeof(struct atom_common_table_header));
+ block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
+ / sizeof(struct vram_reserve_block);
+ reserved_block += (block_number > 0) ? block_number-1 : 0;
+ DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
+ block_number,
+ le32_to_cpu(reserved_block->start_address_in_kb),
+ le16_to_cpu(reserved_block->used_by_firmware_in_kb),
+ le16_to_cpu(reserved_block->used_by_driver_in_kb));
+ if (reserved_block->used_by_firmware_in_kb > 0) {
+ start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
+ offset = (uint64_t)start_address_in_kb * ONE_KiB;
+ if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
+ offset -= ONE_MiB;
+ }
+
+ offset &= ~(ONE_MiB - 1);
+ adev->fw_vram_usage.mem_train_fb_loc = offset;
+ adev->fw_vram_usage.mem_train_support = true;
+ DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
+ ret = 0;
+ } else {
+ DRM_ERROR("used_by_firmware_in_kb is 0!\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 5ec6f92f353c..f871af5ea6f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -29,8 +29,9 @@
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
+int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3e35a8f2c5e5..a97fb759e2f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -613,17 +613,7 @@ static bool amdgpu_atpx_detect(void)
bool d3_supported = false;
struct pci_dev *parent_pdev;
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- vga_count++;
-
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
-
- parent_pdev = pci_upstream_bridge(pdev);
- d3_supported |= parent_pdev && parent_pdev->bridge_d3;
- amdgpu_atpx_get_quirks(pdev);
- }
-
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 649e68c4479b..d1495e1c9289 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
int i, r;
start_jiffies = jiffies;
@@ -44,16 +44,14 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (r)
goto exit_do_move;
- dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
- if (fence)
- dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index ece55c8fa673..a62cbc8199de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -217,11 +217,10 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
- int i;
best_encoder = connector_funcs->best_encoder(connector);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -236,9 +235,8 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
int encoder_type)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
@@ -347,10 +345,9 @@ static struct drm_encoder *
amdgpu_connector_best_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1022,8 +1019,12 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
*/
if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *list_amdgpu_connector;
- list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(list_connector,
+ &iter) {
if (connector == list_connector)
continue;
list_amdgpu_connector = to_amdgpu_connector(list_connector);
@@ -1040,6 +1041,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
}
}
@@ -1065,9 +1067,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (amdgpu_connector->dac_load_detect) {
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1117,9 +1118,8 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (amdgpu_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1134,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1271,9 +1271,8 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
@@ -1292,10 +1291,9 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
bool found = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1501,6 +1499,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
struct drm_encoder *encoder;
@@ -1515,10 +1514,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
return;
/* see if we already added it */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->connector_id == connector_id) {
amdgpu_connector->devices |= supported_device;
+ drm_connector_list_iter_end(&iter);
return;
}
if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
@@ -1533,6 +1534,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
}
}
+ drm_connector_list_iter_end(&iter);
/* check if it's a dp bridge */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82823d9a8ba8..5ca905b4a0fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -35,6 +35,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
+#include "amdgpu_ras.h"
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
@@ -449,75 +450,12 @@ retry:
return r;
}
-/* Last resort, try to evict something from the current working set */
-static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo *validated)
-{
- uint32_t domain = validated->allowed_domains;
- struct ttm_operation_ctx ctx = { true, false };
- int r;
-
- if (!p->evictable)
- return false;
-
- for (;&p->evictable->tv.head != &p->validated;
- p->evictable = list_prev_entry(p->evictable, tv.head)) {
-
- struct amdgpu_bo_list_entry *candidate = p->evictable;
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- bool update_bytes_moved_vis;
- uint32_t other;
-
- /* If we reached our current BO we can forget it */
- if (bo == validated)
- break;
-
- /* We can't move pinned BOs here */
- if (bo->pin_count)
- continue;
-
- other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
-
- /* Check if this BO is in one of the domains we need space for */
- if (!(other & domain))
- continue;
-
- /* Check if we can move this BO somewhere else */
- other = bo->allowed_domains & ~domain;
- if (!other)
- continue;
-
- /* Good we can try to move this BO somewhere else */
- update_bytes_moved_vis =
- !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo);
- amdgpu_bo_placement_from_domain(bo, other);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- p->bytes_moved += ctx.bytes_moved;
- if (update_bytes_moved_vis)
- p->bytes_moved_vis += ctx.bytes_moved;
-
- if (unlikely(r))
- break;
-
- p->evictable = list_prev_entry(p->evictable, tv.head);
- list_move(&candidate->tv.head, &p->validated);
-
- return true;
- }
-
- return false;
-}
-
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_cs_parser *p = param;
int r;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
@@ -554,9 +492,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
lobj->user_pages);
}
- if (p->evictable == lobj)
- p->evictable = NULL;
-
r = amdgpu_cs_validate(p, bo);
if (r)
return r;
@@ -603,8 +538,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
e->tv.num_shared = 2;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
- if (p->bo_list->first_userptr != p->bo_list->num_entries)
- p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
@@ -646,7 +579,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates, false);
+ &duplicates);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
@@ -657,9 +590,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
p->bytes_moved_vis = 0;
- p->evictable = list_last_entry(&p->validated,
- struct amdgpu_bo_list_entry,
- tv.head);
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
amdgpu_cs_validate, p);
@@ -911,7 +841,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
if (r)
return r;
@@ -1287,11 +1217,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r)
goto error_unlock;
- /* No memory allocation is allowed while holding the mn lock.
- * p->mn is hold until amdgpu_cs_submit is finished and fence is added
- * to BOs.
+ /* No memory allocation is allowed while holding the notifier lock.
+ * The lock is held until amdgpu_cs_submit is finished and fence is
+ * added to BOs.
*/
- amdgpu_mn_lock(p->mn);
+ mutex_lock(&p->adev->notifier_lock);
/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
* -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
@@ -1334,13 +1264,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
- amdgpu_mn_unlock(p->mn);
+ mutex_unlock(&p->adev->notifier_lock);
return 0;
error_abort:
drm_sched_job_cleanup(&job->base);
- amdgpu_mn_unlock(p->mn);
+ mutex_unlock(&p->adev->notifier_lock);
error_unlock:
amdgpu_job_free(job);
@@ -1355,6 +1285,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
bool reserved_buffers = false;
int i, r;
+ if (amdgpu_ras_intr_triggered())
+ return -EHWPOISON;
+
if (!adev->accel_working)
return -EBUSY;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 35a8d3c96fc9..08047bc4d588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 5652cc72ed3a..8e6726e0d035 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -859,6 +859,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
struct amdgpu_device *adev = dev->dev_private;
int r = 0, i;
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ mutex_lock(&adev->lock_reset);
+
/* hold on the scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -884,6 +887,8 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
kthread_unpark(ring->sched.thread);
}
+ mutex_unlock(&adev->lock_reset);
+
return 0;
}
@@ -1036,6 +1041,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
if (!fences)
return -ENOMEM;
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ mutex_lock(&adev->lock_reset);
+
/* stop the scheduler */
kthread_park(ring->sched.thread);
@@ -1075,10 +1083,11 @@ failure:
/* restart the scheduler */
kthread_unpark(ring->sched.thread);
+ mutex_unlock(&adev->lock_reset);
+
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (fences)
- kfree(fences);
+ kfree(fences);
return 0;
}
@@ -1090,8 +1099,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
- adev->ddev->primary->debugfs_root,
- (void *)adev, &fops_ib_preempt);
+ adev->ddev->primary->debugfs_root, adev,
+ &fops_ib_preempt);
if (!(adev->debugfs_preempt)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
return -EIO;
@@ -1103,8 +1112,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
{
- if (adev->debugfs_preempt)
- debugfs_remove(adev->debugfs_preempt);
+ debugfs_remove(adev->debugfs_preempt);
}
#else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7a6c837c0a85..c17505fba988 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -65,6 +65,8 @@
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
+#include <linux/suspend.h>
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -78,7 +80,7 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
-static const char *amdgpu_asic_name[] = {
+const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
"VERDE",
@@ -151,6 +153,36 @@ bool amdgpu_device_is_px(struct drm_device *dev)
return false;
}
+/**
+ * VRAM access helper functions.
+ *
+ * amdgpu_device_vram_access - read/write a buffer in vram
+ *
+ * @adev: amdgpu_device pointer
+ * @pos: offset of the buffer in vram
+ * @buf: virtual address of the buffer in system memory
+ * @size: read/write size, sizeof(@buf) must > @size
+ * @write: true - write to vram, otherwise - read from vram
+ */
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write)
+{
+ uint64_t last;
+ unsigned long flags;
+
+ last = size - 4;
+ for (last += pos; pos <= last; pos += 4) {
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+ if (write)
+ WREG32_NO_KIQ(mmMM_DATA, *buf++);
+ else
+ *buf++ = RREG32_NO_KIQ(mmMM_DATA);
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ }
+}
+
/*
* MMIO register access helper functions.
*/
@@ -1023,12 +1055,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_device_check_block_size(adev);
- ret = amdgpu_device_get_job_timeout_settings(adev);
- if (ret) {
- dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- return ret;
- }
-
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
return ret;
@@ -1469,6 +1495,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ goto parse_soc_bounding_box;
+
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
@@ -1496,7 +1525,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->gfx.config.num_packer_per_sc =
le32_to_cpu(gpu_info_fw->num_packer_per_sc);
}
+
+parse_soc_bounding_box:
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ /*
+ * soc bounding box info is not integrated in disocovery table,
+ * we always need to parse it from gpu info firmware.
+ */
if (hdr->version_minor == 2) {
const struct gpu_info_firmware_v1_2 *gpu_info_fw =
(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
@@ -1613,6 +1648,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ amdgpu_discovery_get_gfx_info(adev);
+
amdgpu_amdkfd_device_probe(adev);
if (amdgpu_sriov_vf(adev)) {
@@ -1622,7 +1660,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1839,6 +1877,19 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
goto init_failed;
+ /*
+ * retired pages will be loaded from eeprom and reserved here,
+ * it should be called after amdgpu_device_ip_hw_init_phase2 since
+ * for some ASICs the RAS EEPROM code relies on SMU fully functioning
+ * for I2C communication which only true at this point.
+ * recovery_init may fail, but it can free all resources allocated by
+ * itself and its failure should not stop amdgpu init process.
+ *
+ * Note: theoretically, this should be called before all vram allocations
+ * to protect retired page from abusing
+ */
+ amdgpu_ras_recovery_init(adev);
+
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
@@ -2006,6 +2057,7 @@ out:
*/
static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
{
+ struct amdgpu_gpu_instance *gpu_instance;
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -2031,8 +2083,39 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
- /* set to low pstate by default */
- amdgpu_xgmi_set_pstate(adev, 0);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ mutex_lock(&mgpu_info.mutex);
+
+ /*
+ * Reset device p-state to low as this was booted with high.
+ *
+ * This should be performed only after all devices from the same
+ * hive get initialized.
+ *
+ * However, it's unknown how many device in the hive in advance.
+ * As this is counted one by one during devices initializations.
+ *
+ * So, we wait for all XGMI interlinked devices initialized.
+ * This may bring some delays as those devices may come from
+ * different hives. But that should be OK.
+ */
+ if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
+ for (i = 0; i < mgpu_info.num_gpu; i++) {
+ gpu_instance = &(mgpu_info.gpu_ins[i]);
+ if (gpu_instance->adev->flags & AMD_IS_APU)
+ continue;
+
+ r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
+ if (r) {
+ DRM_ERROR("pstate setting failed (%d).\n", r);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&mgpu_info.mutex);
+ }
return 0;
}
@@ -2220,6 +2303,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
/* displays are handled in phase1 */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
continue;
+ /* PSP lost connection when err_event_athub occurs */
+ if (amdgpu_ras_intr_triggered() &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
+ adev->ip_blocks[i].status.hw = false;
+ continue;
+ }
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -2231,17 +2320,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
/* handle putting the SMC in the appropriate state */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
if (is_support_sw_smu(adev)) {
- /* todo */
+ r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
} else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_mp1_state) {
r = adev->powerplay.pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
adev->mp1_state);
- if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
- }
+ }
+ if (r) {
+ DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
}
}
@@ -2556,6 +2645,73 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
adev->asic_reset_res, adev->ddev->unique);
}
+static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
+{
+ char *input = amdgpu_lockup_timeout;
+ char *timeout_setting = NULL;
+ int index = 0;
+ long timeout;
+ int ret = 0;
+
+ /*
+ * By default timeout for non compute jobs is 10000.
+ * And there is no timeout enforced on compute jobs.
+ * In SR-IOV or passthrough mode, timeout for compute
+ * jobs are 10000 by default.
+ */
+ adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ else
+ adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
+
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * There is only one value specified and
+ * it should apply to all non-compute jobs.
+ */
+ if (index == 1) {
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ }
+ }
+
+ return ret;
+}
/**
* amdgpu_device_init - initialize the driver
@@ -2583,7 +2739,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ddev = ddev;
adev->pdev = pdev;
adev->flags = flags;
- adev->asic_type = flags & AMD_ASIC_MASK;
+
+ if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
+ adev->asic_type = amdgpu_force_asic_type;
+ else
+ adev->asic_type = flags & AMD_ASIC_MASK;
+
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1)
adev->usec_timeout *= 2;
@@ -2633,6 +2794,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
+ mutex_init(&adev->notifier_lock);
mutex_init(&adev->virt.dpm_mutex);
mutex_init(&adev->psp.mutex);
@@ -2726,6 +2888,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ r = amdgpu_device_get_job_timeout_settings(adev);
+ if (r) {
+ dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
+ return r;
+ }
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -2942,7 +3110,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
int r;
DRM_INFO("amdgpu: finishing device.\n");
+ flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
@@ -2960,7 +3130,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
}
adev->accel_working = false;
- cancel_delayed_work_sync(&adev->delayed_init_work);
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
@@ -3014,6 +3183,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
struct amdgpu_device *adev;
struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int r;
if (dev == NULL || dev->dev_private == NULL) {
@@ -3036,9 +3206,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- }
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_OFF);
+ drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -3089,15 +3261,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- pci_save_state(dev->pdev);
if (suspend) {
+ pci_save_state(dev->pdev);
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
- } else {
- r = amdgpu_asic_reset(adev);
- if (r)
- DRM_ERROR("amdgpu asic reset failed\n");
}
return 0;
@@ -3117,6 +3285,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_device *adev = dev->dev_private;
struct drm_crtc *crtc;
int r = 0;
@@ -3187,9 +3356,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* turn on display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_ON);
+ drm_connector_list_iter_end(&iter);
+
drm_modeset_unlock_all(dev);
}
amdgpu_fbdev_set_suspend(adev, 0);
@@ -3635,11 +3808,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
break;
}
}
-
- list_for_each_entry(tmp_adev, device_list_handle,
- gmc.xgmi.head) {
- amdgpu_ras_reserve_bad_pages(tmp_adev);
- }
}
}
@@ -3743,25 +3911,18 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
adev->mp1_state = PP_MP1_STATE_NONE;
break;
}
- /* Block kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_pre_reset(adev);
return true;
}
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
- /*unlock kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
}
-
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -3781,11 +3942,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
+
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
need_full_reset = job_signaled = false;
INIT_LIST_HEAD(&device_list);
- dev_info(adev->dev, "GPU reset begin!\n");
+ dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -3812,9 +3986,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
return 0;
}
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Build list of devices to reset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
if (!hive) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_device_unlock_adev(adev);
return -ENODEV;
}
@@ -3830,17 +4011,22 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
device_list_handle = &device_list;
}
- /*
- * Mark these ASICs to be reseted as untracked first
- * And add them back after reset completed
- */
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
- amdgpu_unregister_gpu_instance(tmp_adev);
-
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (tmp_adev != adev) {
+ amdgpu_device_lock_adev(tmp_adev, false);
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
+ }
+
+ /*
+ * Mark these ASICs to be reseted as untracked first
+ * And add them back after reset completed
+ */
+ amdgpu_unregister_gpu_instance(tmp_adev);
+
/* disable ras on ALL IPs */
- if (amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3850,10 +4036,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
+
+ if (in_ras_intr)
+ amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
}
}
+ if (in_ras_intr)
+ goto skip_sched_resume;
+
/*
* Must check guilty signal here since after this point all old
* HW fences are force signaled.
@@ -3864,9 +4056,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dma_fence_is_signaled(job->base.s_fence->parent))
job_signaled = true;
- if (!amdgpu_device_ip_need_full_reset(adev))
- device_list_handle = &device_list;
-
if (job_signaled) {
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -3888,7 +4077,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (tmp_adev == adev)
continue;
- amdgpu_device_lock_adev(tmp_adev, false);
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
&need_full_reset);
@@ -3916,6 +4104,7 @@ skip_hw_reset:
/* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -3937,12 +4126,18 @@ skip_hw_reset:
if (r) {
/* bad news, how to tell it to userspace ? */
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
}
+ }
+skip_sched_resume:
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_post_reset(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1481899f86c1..f95092741c38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -134,20 +134,10 @@ static int hw_id_map[MAX_HWIP] = {
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
{
- uint32_t *p = (uint32_t *)binary;
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- uint64_t pos = vram_size - BINARY_MAX_SIZE;
- unsigned long flags;
-
- while (pos < vram_size) {
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
- *p++ = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- pos += 4;
- }
+ uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
return 0;
}
@@ -179,7 +169,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL);
+ adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
if (!adev->discovery)
return -ENOMEM;
@@ -333,7 +323,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
}
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor)
+ int *major, int *minor, int *revision)
{
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
@@ -369,6 +359,8 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
*major = ip->major;
if (minor)
*minor = ip->minor;
+ if (revision)
+ *revision = ip->revision;
return 0;
}
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 85b8c4d4d576..ba78e15d9b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,11 +24,13 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#define DISCOVERY_TMR_SIZE (64 << 10)
+
int amdgpu_discovery_init(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor);
+ int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 82efc1e22e61..3cadb0b76f22 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -370,11 +370,13 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ struct drm_connector_list_iter iter;
uint32_t devices;
int i = 0;
+ drm_connector_list_iter_begin(dev, &iter);
DRM_INFO("AMDGPU Display Connectors\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector->name);
@@ -438,6 +440,7 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
}
i++;
}
+ drm_connector_list_iter_end(&iter);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 61f108ec2b5c..e2eec7b66334 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -34,27 +34,12 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_gem.h"
+#include "amdgpu_dma_buf.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
/**
- * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
- * implementation
- * @obj: GEM buffer object (BO)
- *
- * Returns:
- * A scatter/gather table for the pinned pages of the BO's memory.
- */
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int npages = bo->tbo.num_pages;
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
-}
-
-/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
* @obj: GEM BO
*
@@ -179,92 +164,126 @@ err_fences_put:
}
/**
- * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation
- * @dma_buf: Shared DMA buffer
+ * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
+ *
+ * @dmabuf: DMA-buf where we attach to
+ * @attach: attachment to add
+ *
+ * Add the attachment as user to the exported DMA-buf.
+ */
+static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ int r;
+
+ if (attach->dev->driver == adev->dev->driver)
+ return 0;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ /*
+ * We only create shared fences for internal use, but importers
+ * of the dmabuf rely on exclusive fences for implicitly
+ * tracking write hazards. As any of the current fences may
+ * correspond to a write, we need to convert all existing
+ * fences on the reservation object into a single exclusive
+ * fence.
+ */
+ r = __dma_resv_make_exclusive(bo->tbo.base.resv);
+ if (r)
+ return r;
+
+ bo->prime_shared_count++;
+ amdgpu_bo_unreserve(bo);
+ return 0;
+}
+
+/**
+ * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
+ *
+ * @dmabuf: DMA-buf where we remove the attachment from
+ * @attach: the attachment to remove
+ *
+ * Called when an attachment is removed from the DMA-buf.
+ */
+static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
+ bo->prime_shared_count--;
+}
+
+/**
+ * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
+ * @dir: DMA direction
*
* Makes sure that the shared DMA buffer can be accessed by the target device.
* For now, simply pins it to the GTT domain, where it should be accessible by
* all DMA devices.
*
* Returns:
- * 0 on success or a negative error code on failure.
+ * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
+ * code.
*/
-static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
{
+ struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct sg_table *sgt;
long r;
- r = drm_gem_map_attach(dma_buf, attach);
- if (r)
- return r;
-
- r = amdgpu_bo_reserve(bo, false);
- if (unlikely(r != 0))
- goto error_detach;
-
-
- if (attach->dev->driver != adev->dev->driver) {
- /*
- * We only create shared fences for internal use, but importers
- * of the dmabuf rely on exclusive fences for implicitly
- * tracking write hazards. As any of the current fences may
- * correspond to a write, we need to convert all existing
- * fences on the reservation object into a single exclusive
- * fence.
- */
- r = __dma_resv_make_exclusive(bo->tbo.base.resv);
- if (r)
- goto error_unreserve;
- }
-
- /* pin buffer into GTT */
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
- goto error_unreserve;
+ return ERR_PTR(r);
- if (attach->dev->driver != adev->dev->driver)
- bo->prime_shared_count++;
+ sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
-error_unreserve:
- amdgpu_bo_unreserve(bo);
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
-error_detach:
- if (r)
- drm_gem_map_detach(dma_buf, attach);
- return r;
+ return sgt;
+
+error_free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
}
/**
- * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation
- * @dma_buf: Shared DMA buffer
+ * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
* @attach: DMA-buf attachment
+ * @sgt: sg_table to unmap
+ * @dir: DMA direction
*
* This is called when a shared DMA buffer no longer needs to be accessible by
* another device. For now, simply unpins the buffer from GTT.
*/
-static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
{
- struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int ret = 0;
-
- ret = amdgpu_bo_reserve(bo, true);
- if (unlikely(ret != 0))
- goto error;
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
amdgpu_bo_unpin(bo);
- if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
- bo->prime_shared_count--;
- amdgpu_bo_unreserve(bo);
-
-error:
- drm_gem_map_detach(dma_buf, attach);
}
/**
@@ -308,10 +327,11 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .attach = amdgpu_dma_buf_map_attach,
- .detach = amdgpu_dma_buf_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .dynamic_mapping = true,
+ .attach = amdgpu_dma_buf_attach,
+ .detach = amdgpu_dma_buf_detach,
+ .map_dma_buf = amdgpu_dma_buf_map,
+ .unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
@@ -321,7 +341,6 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
/**
* amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
- * @dev: DRM device
* @gobj: GEM BO
* @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
*
@@ -350,31 +369,28 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
}
/**
- * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
- * implementation
+ * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
+ *
* @dev: DRM device
- * @attach: DMA-buf attachment
- * @sg: Scatter/gather table
+ * @dma_buf: DMA-buf
*
- * Imports shared DMA buffer memory exported by another device.
+ * Creates an empty SG BO for DMA-buf import.
*
* Returns:
* A new GEM BO of the given DRM device, representing the memory
* described by the given DMA-buf attachment and scatter/gather table.
*/
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
+static struct drm_gem_object *
+amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
- struct dma_resv *resv = attach->dmabuf->resv;
+ struct dma_resv *resv = dma_buf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int ret;
memset(&bp, 0, sizeof(bp));
- bp.size = attach->dmabuf->size;
+ bp.size = dma_buf->size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_CPU;
bp.flags = 0;
@@ -385,11 +401,9 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
goto error;
- bo->tbo.sg = sg;
- bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
- if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
dma_resv_unlock(resv);
@@ -405,15 +419,15 @@ error:
* @dev: DRM device
* @dma_buf: Shared DMA buffer
*
- * The main work is done by the &drm_gem_prime_import helper, which in turn
- * uses &amdgpu_gem_prime_import_sg_table.
+ * Import a dma_buf into a the driver and potentially create a new GEM object.
*
* Returns:
* GEM BO representing the shared DMA buffer for the given device.
*/
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
+ struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
@@ -428,5 +442,17 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, dma_buf);
+ obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
+ if (IS_ERR(obj))
+ return obj;
+
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+ if (IS_ERR(attach)) {
+ drm_gem_object_put(obj);
+ return ERR_CAST(attach);
+ }
+
+ get_dma_buf(dma_buf);
+ obj->import_attach = attach;
+ return obj;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index 5012e6ab58f1..ec447a7b6b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -25,11 +25,6 @@
#include <drm/drm_gem.h>
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 5803fcbae22f..9cc270efee7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -911,7 +911,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
+ !low ? &clk_freq : NULL,
+ true);
if (ret)
return 0;
return clk_freq * 100;
@@ -928,7 +929,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
+ !low ? &clk_freq : NULL,
+ true);
if (ret)
return 0;
return clk_freq * 100;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 1c5c0fd76dbf..2cfb677272af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -298,12 +298,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
-#define amdgpu_smu_get_current_power_state(adev) \
- ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
-
-#define amdgpu_smu_set_power_state(adev) \
- ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
-
#define amdgpu_dpm_get_pp_num_states(adev, data) \
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b19157b19fa0..0ffc9447b573 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -43,6 +43,8 @@
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
+
/*
* KMS wrapper.
* - 3.0.0 - initial driver
@@ -82,13 +84,12 @@
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
+ * - 3.36.0 - Allow reading more status registers on si/cik
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 35
+#define KMS_DRIVER_MINOR 36
#define KMS_DRIVER_PATCHLEVEL 0
-#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
-
int amdgpu_vram_limit = 0;
int amdgpu_vis_vram_limit = 0;
int amdgpu_gart_size = -1; /* auto */
@@ -101,7 +102,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH];
+char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
@@ -128,11 +129,7 @@ char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
/* OverDrive(bit 14) disabled by default*/
uint amdgpu_pp_feature_mask = 0xffffbfff;
-int amdgpu_ngg = 0;
-int amdgpu_prim_buf_per_se = 0;
-int amdgpu_pos_buf_per_se = 0;
-int amdgpu_cntl_sb_buf_per_se = 0;
-int amdgpu_param_buf_per_se = 0;
+uint amdgpu_force_long_training = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
@@ -146,12 +143,13 @@ int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
int amdgpu_noretry = 1;
+int amdgpu_force_asic_type = -1;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
int amdgpu_ras_enable = -1;
-uint amdgpu_ras_mask = 0xfffffffb;
+uint amdgpu_ras_mask = 0xffffffff;
/**
* DOC: vramlimit (int)
@@ -244,16 +242,21 @@ module_param_named(msi, amdgpu_msi, int, 0444);
*
* The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
* multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to default timeout.
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX. The second one is for Compute.
- * And the third and fourth ones are for SDMA and Video.
+ * to the default timeout.
+ *
+ * - With one value specified, the setting will apply to all non-compute jobs.
+ * - With multiple values specified, the first one will be for GFX.
+ * The second one is for Compute. The third and fourth ones are
+ * for SDMA and Video.
+ *
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
* jobs is 10000. And there is no timeout enforced on compute jobs.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs."
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
+ "for passthrough or sriov, 10000 for all jobs."
" 0: keep default value. negative: infinity timeout), "
- "format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
+ "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
@@ -392,6 +395,14 @@ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
/**
+ * DOC: forcelongtraining (uint)
+ * Force long memory training in resume.
+ * The default is zero, indicates short training in resume.
+ */
+MODULE_PARM_DESC(forcelongtraining, "force memory long training");
+module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
+
+/**
* DOC: pcie_gen_cap (uint)
* Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
* The default is 0 (automatic for each asic).
@@ -449,42 +460,6 @@ MODULE_PARM_DESC(virtual_display,
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
/**
- * DOC: ngg (int)
- * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
- */
-MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
-module_param_named(ngg, amdgpu_ngg, int, 0444);
-
-/**
- * DOC: prim_buf_per_se (int)
- * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
-module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
-
-/**
- * DOC: pos_buf_per_se (int)
- * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
-module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
-
-/**
- * DOC: cntl_sb_buf_per_se (int)
- * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
-module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
-
-/**
- * DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
- * The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
-module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
-
-/**
* DOC: job_hang_limit (int)
* Set how much time allow a job hang and not drop it. The default is 0.
*/
@@ -616,6 +591,16 @@ MODULE_PARM_DESC(noretry,
"Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
module_param_named(noretry, amdgpu_noretry, int, 0644);
+/**
+ * DOC: force_asic_type (int)
+ * A non negative value used to specify the asic type for all supported GPUs.
+ */
+MODULE_PARM_DESC(force_asic_type,
+ "A non negative value used to specify the asic type for all supported GPUs");
+module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+
+
+
#ifdef CONFIG_HSA_AMD
/**
* DOC: sched_policy (int)
@@ -1023,6 +1008,7 @@ static const struct pci_device_id pciidlist[] = {
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
{0, 0, 0}
};
@@ -1085,7 +1071,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
#endif
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
if (ret)
return ret;
@@ -1128,7 +1114,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
+#ifdef MODULE
+ if (THIS_MODULE->state != MODULE_STATE_GOING)
+#endif
+ DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
drm_dev_put(dev);
pci_disable_device(pdev);
@@ -1141,6 +1130,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = dev->dev_private;
+ if (amdgpu_ras_intr_triggered())
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -1175,8 +1167,13 @@ static int amdgpu_pmops_resume(struct device *dev)
static int amdgpu_pmops_freeze(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
+ int r;
- return amdgpu_device_suspend(drm_dev, false, true);
+ r = amdgpu_device_suspend(drm_dev, false, true);
+ if (r)
+ return r;
+ return amdgpu_asic_reset(adev);
}
static int amdgpu_pmops_thaw(struct device *dev)
@@ -1348,66 +1345,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
-{
- char *input = amdgpu_lockup_timeout;
- char *timeout_setting = NULL;
- int index = 0;
- long timeout;
- int ret = 0;
-
- /*
- * By default timeout for non compute jobs is 10000.
- * And there is no timeout enforced on compute jobs.
- */
- adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
-
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
-
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
-
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
- }
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1)
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- }
-
- return ret;
-}
-
static bool
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
@@ -1446,8 +1383,6 @@ static struct drm_driver kms_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 571a6dfb473e..61fcf247a638 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -37,12 +37,14 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ drm_connector_list_iter_begin(dev, &iter);
/* walk the list and link encoders to connectors */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -55,6 +57,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
@@ -62,8 +65,10 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
@@ -72,6 +77,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
amdgpu_connector->devices, encoder->encoder_type);
}
}
+ drm_connector_list_iter_end(&iter);
}
struct drm_connector *
@@ -79,15 +85,20 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->active_device & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->active_device & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_connector *
@@ -95,15 +106,20 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->devices & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->devices & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 23085b352cf2..377fe20bce23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -462,18 +462,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
timeout = adev->gfx_timeout;
break;
case AMDGPU_RING_TYPE_COMPUTE:
- /*
- * For non-sriov case, no timeout enforce
- * on compute ring by default. Unless user
- * specifies a timeout for compute ring.
- *
- * For sriov case, always use the timeout
- * as gfx ring
- */
- if (!amdgpu_sriov_vf(ring->adev))
- timeout = adev->compute_timeout;
- else
- timeout = adev->gfx_timeout;
+ timeout = adev->compute_timeout;
break;
case AMDGPU_RING_TYPE_SDMA:
timeout = adev->sdma_timeout;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 5e8bdded265f..19705e399905 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
+ struct page *dummy_page = ttm_bo_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8ceb44925947..4277125a79ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -527,13 +527,41 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error;
}
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
+/**
+ * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: GEM UAPI flags
+ *
+ * Returns the GEM UAPI flags mapped into hardware for the ASIC.
+ */
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
+{
+ uint64_t pte_flag = 0;
+
+ if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ pte_flag |= AMDGPU_PTE_EXECUTABLE;
+ if (flags & AMDGPU_VM_PAGE_READABLE)
+ pte_flag |= AMDGPU_PTE_READABLE;
+ if (flags & AMDGPU_VM_PAGE_WRITEABLE)
+ pte_flag |= AMDGPU_PTE_WRITEABLE;
+ if (flags & AMDGPU_VM_PAGE_PRT)
+ pte_flag |= AMDGPU_PTE_PRT;
+
+ if (adev->gmc.gmc_funcs->map_mtype)
+ pte_flag |= amdgpu_gmc_map_mtype(adev,
+ flags & AMDGPU_VM_MTYPE_MASK);
+
+ return pte_flag;
+}
+
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -613,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
@@ -631,7 +659,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
@@ -646,7 +674,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 0b66d2e6b5d5..e0f025dd1b14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f9bef3154b99..e00b46180d2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
+#include "amdgpu_ras.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -231,12 +232,10 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, pipe, me;
+ int i, queue, me;
for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
queue = i % adev->gfx.me.num_queue_per_pipe;
- pipe = (i / adev->gfx.me.num_queue_per_pipe)
- % adev->gfx.me.num_pipe_per_me;
me = (i / adev->gfx.me.num_queue_per_pipe)
/ adev->gfx.me.num_pipe_per_me;
@@ -320,8 +319,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
return r;
}
-void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq)
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
amdgpu_ring_fini(ring);
@@ -456,8 +454,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
}
ring = &adev->gfx.kiq.ring;
- if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring)
- kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
@@ -569,3 +565,102 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "gfx_err_count",
+ .debugfs_name = "gfx_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ if (!adev->gfx.ras_if) {
+ adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gfx.ras_if)
+ return -ENOMEM;
+ adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
+ adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gfx.ras_if->sub_block_index = 0;
+ strcpy(adev->gfx.ras_if->name, "gfx");
+ }
+ fs_info.head = ih_info.head = *adev->gfx.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ /* free gfx ras_if if ras is not supported */
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
+free:
+ kfree(adev->gfx.ras_if);
+ adev->gfx.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
+ adev->gfx.ras_if) {
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ /* TODO ue will trigger an interrupt.
+ *
+ * When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->gfx.funcs->query_ras_error_count)
+ adev->gfx.funcs->query_ras_error_count(adev, err_data);
+ amdgpu_ras_reset_gpu(adev, 0);
+ }
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ DRM_ERROR("CP ECC ERROR IRQ\n");
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 6d19183b478b..0ae0a2715b0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -201,28 +201,6 @@ struct amdgpu_gfx_funcs {
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
};
-struct amdgpu_ngg_buf {
- struct amdgpu_bo *bo;
- uint64_t gpu_addr;
- uint32_t size;
- uint32_t bo_size;
-};
-
-enum {
- NGG_PRIM = 0,
- NGG_POS,
- NGG_CNTL,
- NGG_PARAM,
- NGG_BUF_MAX
-};
-
-struct amdgpu_ngg {
- struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
- uint32_t gds_reserve_addr;
- uint32_t gds_reserve_size;
- bool init;
-};
-
struct sq_work {
struct work_struct work;
unsigned ih_data;
@@ -247,7 +225,7 @@ struct amdgpu_me {
uint32_t num_me;
uint32_t num_pipe_per_me;
uint32_t num_queue_per_pipe;
- void *mqd_backup[AMDGPU_MAX_GFX_RINGS + 1];
+ void *mqd_backup[AMDGPU_MAX_GFX_RINGS];
/* These are the resources for which amdgpu takes ownership */
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
@@ -312,9 +290,6 @@ struct amdgpu_gfx {
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
- /* NGG */
- struct amdgpu_ngg ngg;
-
/* gfx off */
bool gfx_off_state; /* true: enabled, false: disabled */
struct mutex gfx_off_mutex;
@@ -356,8 +331,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq);
-void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq);
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
@@ -385,5 +359,12 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
-
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 5790db61fa2c..a12f33c0f5df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -27,6 +27,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/**
* amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
@@ -305,3 +307,29 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
gmc->fault_hash[hash].idx = gmc->last_fault++;
return false;
}
+
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
+ r = adev->umc.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
+ r = adev->mmhub.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ return amdgpu_xgmi_ras_late_init(adev);
+}
+
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
+{
+ amdgpu_umc_ras_fini(adev);
+ amdgpu_mmhub_ras_fini(adev);
+ amdgpu_xgmi_ras_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index b6e1d98ef01e..b499a3de8bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -77,6 +77,7 @@ struct amdgpu_gmc_fault {
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
+ uint32_t vm_inv_eng0_sem;
uint32_t vm_inv_eng0_req;
uint32_t vm_inv_eng0_ack;
uint32_t vm_context0_cntl;
@@ -99,12 +100,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* set pte flags based per asic */
- uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
- uint32_t flags);
+ /* map mtype to hardware flags */
+ uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
+ /* get the pte flags to use for a BO VA mapping */
+ void (*get_vm_pte)(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags);
};
struct amdgpu_xgmi {
@@ -120,21 +124,52 @@ struct amdgpu_xgmi {
/* gpu list in the same hive */
struct list_head head;
bool supported;
+ struct ras_common_if *ras_if;
};
struct amdgpu_gmc {
+ /* FB's physical address in MMIO space (for CPU to
+ * map FB). This is different compared to the agp/
+ * gart/vram_start/end field as the later is from
+ * GPU's view and aper_base is from CPU's view.
+ */
resource_size_t aper_size;
resource_size_t aper_base;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
+ /* AGP aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place AGP by setting MC_VM_AGP_BOT/TOP registers
+ * Under VMID0, logical address == MC address. AGP
+ * aperture maps to physical bus or IOVA addressed.
+ * AGP aperture is used to simulate FB in ZFB case.
+ * AGP aperture is also used for page table in system
+ * memory (mainly for APU).
+ *
+ */
u64 agp_size;
u64 agp_start;
u64 agp_end;
+ /* GART aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR
+ * registers
+ * Under VMID0, logical address inside GART aperture will
+ * be translated through gpuvm gart page table to access
+ * paged system memory
+ */
u64 gart_size;
u64 gart_start;
u64 gart_end;
+ /* Frame buffer aperture of this GPU device. Different from
+ * fb_start (see below), this only covers the local GPU device.
+ * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios)
+ * and calculate vram_start of this local device by adding an
+ * offset inside the XGMI hive.
+ * Under VMID0, logical address == MC address
+ */
u64 vram_start;
u64 vram_end;
/* FB region , it's same as local vram region in single GPU, in XGMI
@@ -153,6 +188,7 @@ struct amdgpu_gmc {
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
+ uint8_t vram_vendor;
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
@@ -177,15 +213,14 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
- struct ras_common_if *umc_ras_if;
- struct ras_common_if *mmhub_ras_if;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
+#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
+#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
/**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
@@ -230,5 +265,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 53734da1c2df..6f9289735e31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
- if ((*id)->owner != vm->entity.fence_context ||
+ if ((*id)->owner != vm->direct.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
- if ((*id)->owner != vm->entity.fence_context)
+ if ((*id)->owner != vm->direct.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->entity.fence_context;
+ id->owner = vm->direct.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 2a3f5ec298db..30d540d23b77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -87,10 +87,13 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
struct drm_device *dev = adev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
mutex_lock(&mode_config->mutex);
- list_for_each_entry(connector, &mode_config->connector_list, head)
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
amdgpu_connector_hotplug(connector);
+ drm_connector_list_iter_end(&iter);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
@@ -153,6 +156,20 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
ret = amdgpu_ih_process(adev, &adev->irq.ih);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
+
+ /* For the hardware that cannot enable bif ring for both ras_controller_irq
+ * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
+ * register to check whether the interrupt is triggered or not, and properly
+ * ack the interrupt if it is there
+ */
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+
return ret;
}
@@ -228,10 +245,19 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.msi_enabled = false;
if (amdgpu_msi_ok(adev)) {
- int ret = pci_enable_msi(adev->pdev);
- if (!ret) {
+ int nvec = pci_msix_vec_count(adev->pdev);
+ unsigned int flags;
+
+ if (nvec <= 0) {
+ flags = PCI_IRQ_MSI;
+ } else {
+ flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
+ }
+ /* we only need one vector */
+ nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
+ if (nvec > 0) {
adev->irq.msi_enabled = true;
- dev_dbg(adev->dev, "amdgpu: using MSI.\n");
+ dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
}
}
@@ -254,7 +280,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
adev->irq.installed = true;
- r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
+ /* Use vector 0 for MSI-X */
+ r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
@@ -284,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
drm_irq_uninstall(adev->ddev);
adev->irq.installed = false;
if (adev->irq.msi_enabled)
- pci_disable_msi(adev->pdev);
+ pci_free_irq_vectors(adev->pdev);
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
}
@@ -369,7 +396,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
* amdgpu_irq_dispatch - dispatch IRQ to IP blocks
*
* @adev: amdgpu device pointer
- * @entry: interrupt vector pointer
+ * @ih: interrupt ring instance
*
* Dispatches IRQ to IP blocks.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 96b2a31ccfed..4fb20e870e63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -248,6 +248,44 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
return fence;
}
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *s_entity = NULL;
+ int i;
+
+ /* Signal all jobs not yet scheduled */
+ for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+ continue;
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list) {
+ while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_signal(&s_fence->scheduled);
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+ }
+ spin_unlock(&rq->lock);
+ }
+
+ /* Signal all jobs already scheduled to HW */
+ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+}
+
const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 51e62504c279..dc7ee9358dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -76,4 +76,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f);
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct dma_fence **fence);
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a73206784cba..b6db28a570c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -583,9 +583,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
+ vram_gtt.vram_cpu_accessible_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ vram_gtt.vram_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
@@ -598,15 +601,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
- mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ mem.cpu_accessible_vram.usable_heap_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
@@ -732,17 +738,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.vce_harvest_config = adev->vce.harvest_config;
dev_info.gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf;
-
- if (amdgpu_ngg) {
- dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
- dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
- dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
- dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
- dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
- dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
- dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
- dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
- }
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
@@ -971,6 +966,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->delayed_init_work);
+
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_ERROR("RAS Intr triggered, device disabled!!");
+ return -EHWPOISON;
+ }
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
new file mode 100644
index 000000000000..676c48c02d77
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "mmhub_err_count",
+ .debugfs_name = "mmhub_err_inject",
+ };
+
+ if (!adev->mmhub.ras_if) {
+ adev->mmhub.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->mmhub.ras_if)
+ return -ENOMEM;
+ adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
+ adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if->sub_block_index = 0;
+ strcpy(adev->mmhub.ras_if->name, "mmhub");
+ }
+ ih_info.head = fs_info.head = *adev->mmhub.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->mmhub.ras_if->block)) {
+ kfree(adev->mmhub.ras_if);
+ adev->mmhub.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
+ adev->mmhub.ras_if) {
+ struct ras_common_if *ras_if = adev->mmhub.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 2d75ecfa199b..1cd78940cf82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -23,9 +23,17 @@
struct amdgpu_mmhub_funcs {
void (*ras_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
};
+struct amdgpu_mmhub {
+ struct ras_common_if *ras_if;
+ const struct amdgpu_mmhub_funcs *funcs;
+};
+
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 31d4deb5d294..828b5167ff12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -51,438 +51,107 @@
#include "amdgpu_amdkfd.h"
/**
- * struct amdgpu_mn_node
+ * amdgpu_mn_invalidate_gfx - callback to notify about mm change
*
- * @it: interval node defining start-last of the affected address range
- * @bos: list of all BOs in the affected address range
- *
- * Manages all BOs which are affected of a certain range of address space.
- */
-struct amdgpu_mn_node {
- struct interval_tree_node it;
- struct list_head bos;
-};
-
-/**
- * amdgpu_mn_destroy - destroy the HMM mirror
- *
- * @work: previously sheduled work item
- *
- * Lazy destroys the notifier from a work item
- */
-static void amdgpu_mn_destroy(struct work_struct *work)
-{
- struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
- struct amdgpu_device *adev = amn->adev;
- struct amdgpu_mn_node *node, *next_node;
- struct amdgpu_bo *bo, *next_bo;
-
- mutex_lock(&adev->mn_lock);
- down_write(&amn->lock);
- hash_del(&amn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node,
- &amn->objects.rb_root, it.rb) {
- list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
- }
- kfree(node);
- }
- up_write(&amn->lock);
- mutex_unlock(&adev->mn_lock);
-
- hmm_mirror_unregister(&amn->mirror);
- kfree(amn);
-}
-
-/**
- * amdgpu_hmm_mirror_release - callback to notify about mm destruction
- *
- * @mirror: the HMM mirror (mm) this callback is about
- *
- * Shedule a work item to lazy destroy HMM mirror.
- */
-static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror)
-{
- struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
-
- INIT_WORK(&amn->work, amdgpu_mn_destroy);
- schedule_work(&amn->work);
-}
-
-/**
- * amdgpu_mn_lock - take the write side lock for this notifier
- *
- * @mn: our notifier
- */
-void amdgpu_mn_lock(struct amdgpu_mn *mn)
-{
- if (mn)
- down_write(&mn->lock);
-}
-
-/**
- * amdgpu_mn_unlock - drop the write side lock for this notifier
- *
- * @mn: our notifier
- */
-void amdgpu_mn_unlock(struct amdgpu_mn *mn)
-{
- if (mn)
- up_write(&mn->lock);
-}
-
-/**
- * amdgpu_mn_read_lock - take the read side lock for this notifier
- *
- * @amn: our notifier
- */
-static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
-{
- if (blockable)
- down_read(&amn->lock);
- else if (!down_read_trylock(&amn->lock))
- return -EAGAIN;
-
- return 0;
-}
-
-/**
- * amdgpu_mn_read_unlock - drop the read side lock for this notifier
- *
- * @amn: our notifier
- */
-static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
-{
- up_read(&amn->lock);
-}
-
-/**
- * amdgpu_mn_invalidate_node - unmap all BOs of a node
- *
- * @node: the node with the BOs to unmap
- * @start: start of address range affected
- * @end: end of address range affected
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
*
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
-static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
- unsigned long start,
- unsigned long end)
+static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
- list_for_each_entry(bo, &node->bos, mn_list) {
-
- if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
- continue;
-
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- }
-}
-
-/**
- * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change
- *
- * @mirror: the hmm_mirror (mm) is about to update
- * @update: the update start, end address
- *
- * Block for operations on BOs to finish and mark pages as accessed and
- * potentially dirty.
- */
-static int
-amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update)
-{
- struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
- unsigned long start = update->start;
- unsigned long end = update->end;
- bool blockable = mmu_notifier_range_blockable(update);
- struct interval_tree_node *it;
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- /* notification is exclusive, but interval is inclusive */
- end -= 1;
+ mutex_lock(&adev->notifier_lock);
- /* TODO we should be able to split locking for interval tree and
- * amdgpu_mn_invalidate_node
- */
- if (amdgpu_mn_read_lock(amn, blockable))
- return -EAGAIN;
+ mmu_interval_set_seq(mni, cur_seq);
- it = interval_tree_iter_first(&amn->objects, start, end);
- while (it) {
- struct amdgpu_mn_node *node;
-
- if (!blockable) {
- amdgpu_mn_read_unlock(amn);
- return -EAGAIN;
- }
-
- node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
-
- amdgpu_mn_invalidate_node(node, start, end);
- }
-
- amdgpu_mn_read_unlock(amn);
-
- return 0;
-}
-
-/**
- * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
- *
- * @mirror: the hmm_mirror (mm) is about to update
- * @update: the update start, end address
- *
- * We temporarily evict all BOs between start and end. This
- * necessitates evicting all user-mode queues of the process. The BOs
- * are restorted in amdgpu_mn_invalidate_range_end_hsa.
- */
-static int
-amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update)
-{
- struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
- unsigned long start = update->start;
- unsigned long end = update->end;
- bool blockable = mmu_notifier_range_blockable(update);
- struct interval_tree_node *it;
-
- /* notification is exclusive, but interval is inclusive */
- end -= 1;
-
- if (amdgpu_mn_read_lock(amn, blockable))
- return -EAGAIN;
-
- it = interval_tree_iter_first(&amn->objects, start, end);
- while (it) {
- struct amdgpu_mn_node *node;
- struct amdgpu_bo *bo;
-
- if (!blockable) {
- amdgpu_mn_read_unlock(amn);
- return -EAGAIN;
- }
-
- node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
-
- list_for_each_entry(bo, &node->bos, mn_list) {
- struct kgd_mem *mem = bo->kfd_bo;
-
- if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
- start, end))
- amdgpu_amdkfd_evict_userptr(mem, amn->mm);
- }
- }
-
- amdgpu_mn_read_unlock(amn);
-
- return 0;
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ mutex_unlock(&adev->notifier_lock);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+ return true;
}
-/* Low bits of any reasonable mm pointer will be unused due to struct
- * alignment. Use these bits to make a unique key from the mm pointer
- * and notifier type.
- */
-#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
-
-static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
- [AMDGPU_MN_TYPE_GFX] = {
- .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
- .release = amdgpu_hmm_mirror_release
- },
- [AMDGPU_MN_TYPE_HSA] = {
- .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa,
- .release = amdgpu_hmm_mirror_release
- },
+static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = {
+ .invalidate = amdgpu_mn_invalidate_gfx,
};
/**
- * amdgpu_mn_get - create HMM mirror context
+ * amdgpu_mn_invalidate_hsa - callback to notify about mm change
*
- * @adev: amdgpu device pointer
- * @type: type of MMU notifier context
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
*
- * Creates a HMM mirror context for current->mm.
+ * We temporarily evict the BO attached to this range. This necessitates
+ * evicting all user-mode queues of the process.
*/
-struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
- enum amdgpu_mn_type type)
+static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct mm_struct *mm = current->mm;
- struct amdgpu_mn *amn;
- unsigned long key = AMDGPU_MN_KEY(mm, type);
- int r;
-
- mutex_lock(&adev->mn_lock);
- if (down_write_killable(&mm->mmap_sem)) {
- mutex_unlock(&adev->mn_lock);
- return ERR_PTR(-EINTR);
- }
-
- hash_for_each_possible(adev->mn_hash, amn, node, key)
- if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
- goto release_locks;
-
- amn = kzalloc(sizeof(*amn), GFP_KERNEL);
- if (!amn) {
- amn = ERR_PTR(-ENOMEM);
- goto release_locks;
- }
-
- amn->adev = adev;
- amn->mm = mm;
- init_rwsem(&amn->lock);
- amn->type = type;
- amn->objects = RB_ROOT_CACHED;
-
- amn->mirror.ops = &amdgpu_hmm_mirror_ops[type];
- r = hmm_mirror_register(&amn->mirror, mm);
- if (r)
- goto free_amn;
+ struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
+ if (!mmu_notifier_range_blockable(range))
+ return false;
-release_locks:
- up_write(&mm->mmap_sem);
- mutex_unlock(&adev->mn_lock);
+ mutex_lock(&adev->notifier_lock);
- return amn;
+ mmu_interval_set_seq(mni, cur_seq);
-free_amn:
- up_write(&mm->mmap_sem);
- mutex_unlock(&adev->mn_lock);
- kfree(amn);
+ amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
+ mutex_unlock(&adev->notifier_lock);
- return ERR_PTR(r);
+ return true;
}
+static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = {
+ .invalidate = amdgpu_mn_invalidate_hsa,
+};
+
/**
* amdgpu_mn_register - register a BO for notifier updates
*
* @bo: amdgpu buffer object
* @addr: userptr addr we should monitor
*
- * Registers an HMM mirror for the given BO at the specified address.
+ * Registers a mmu_notifier for the given BO at the specified address.
* Returns 0 on success, -ERRNO if anything goes wrong.
*/
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
- unsigned long end = addr + amdgpu_bo_size(bo) - 1;
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- enum amdgpu_mn_type type =
- bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
- struct amdgpu_mn *amn;
- struct amdgpu_mn_node *node = NULL, *new_node;
- struct list_head bos;
- struct interval_tree_node *it;
-
- amn = amdgpu_mn_get(adev, type);
- if (IS_ERR(amn))
- return PTR_ERR(amn);
-
- new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
- if (!new_node)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&bos);
-
- down_write(&amn->lock);
-
- while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
- kfree(node);
- node = container_of(it, struct amdgpu_mn_node, it);
- interval_tree_remove(&node->it, &amn->objects);
- addr = min(it->start, addr);
- end = max(it->last, end);
- list_splice(&node->bos, &bos);
- }
-
- if (!node)
- node = new_node;
- else
- kfree(new_node);
-
- bo->mn = amn;
-
- node->it.start = addr;
- node->it.last = end;
- INIT_LIST_HEAD(&node->bos);
- list_splice(&bos, &node->bos);
- list_add(&bo->mn_list, &node->bos);
-
- interval_tree_insert(&node->it, &amn->objects);
-
- up_write(&amn->lock);
-
- return 0;
+ if (bo->kfd_bo)
+ return mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ addr, amdgpu_bo_size(bo),
+ &amdgpu_mn_hsa_ops);
+ return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ amdgpu_bo_size(bo),
+ &amdgpu_mn_gfx_ops);
}
/**
- * amdgpu_mn_unregister - unregister a BO for HMM mirror updates
+ * amdgpu_mn_unregister - unregister a BO for notifier updates
*
* @bo: amdgpu buffer object
*
- * Remove any registration of HMM mirror updates from the buffer object.
+ * Remove any registration of mmu notifier updates from the buffer object.
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_mn *amn;
- struct list_head *head;
-
- mutex_lock(&adev->mn_lock);
-
- amn = bo->mn;
- if (amn == NULL) {
- mutex_unlock(&adev->mn_lock);
+ if (!bo->notifier.mm)
return;
- }
-
- down_write(&amn->lock);
-
- /* save the next list entry for later */
- head = bo->mn_list.next;
-
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
-
- if (list_empty(head)) {
- struct amdgpu_mn_node *node;
-
- node = container_of(head, struct amdgpu_mn_node, bos);
- interval_tree_remove(&node->it, &amn->objects);
- kfree(node);
- }
-
- up_write(&amn->lock);
- mutex_unlock(&adev->mn_lock);
-}
-
-/* flags used by HMM internal, not related to CPU/GPU PTE flags */
-static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
- (1 << 0), /* HMM_PFN_VALID */
- (1 << 1), /* HMM_PFN_WRITE */
- 0 /* HMM_PFN_DEVICE_PRIVATE */
-};
-
-static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
- 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
- 0, /* HMM_PFN_NONE */
- 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
-};
-
-void amdgpu_hmm_init_range(struct hmm_range *range)
-{
- if (range) {
- range->flags = hmm_range_flags;
- range->values = hmm_range_values;
- range->pfn_shift = PAGE_SHIFT;
- }
+ mmu_interval_notifier_remove(&bo->notifier);
+ bo->notifier.mm = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
index b8ed68943625..a292238f75eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -30,63 +30,10 @@
#include <linux/workqueue.h>
#include <linux/interval_tree.h>
-enum amdgpu_mn_type {
- AMDGPU_MN_TYPE_GFX,
- AMDGPU_MN_TYPE_HSA,
-};
-
-/**
- * struct amdgpu_mn
- *
- * @adev: amdgpu device pointer
- * @mm: process address space
- * @type: type of MMU notifier
- * @work: destruction work item
- * @node: hash table node to find structure by adev and mn
- * @lock: rw semaphore protecting the notifier nodes
- * @objects: interval tree containing amdgpu_mn_nodes
- * @mirror: HMM mirror function support
- *
- * Data for each amdgpu device and process address space.
- */
-struct amdgpu_mn {
- /* constant after initialisation */
- struct amdgpu_device *adev;
- struct mm_struct *mm;
- enum amdgpu_mn_type type;
-
- /* only used on destruction */
- struct work_struct work;
-
- /* protected by adev->mn_lock */
- struct hlist_node node;
-
- /* objects protected by lock */
- struct rw_semaphore lock;
- struct rb_root_cached objects;
-
-#ifdef CONFIG_HMM_MIRROR
- /* HMM mirror */
- struct hmm_mirror mirror;
-#endif
-};
-
#if defined(CONFIG_HMM_MIRROR)
-void amdgpu_mn_lock(struct amdgpu_mn *mn);
-void amdgpu_mn_unlock(struct amdgpu_mn *mn);
-struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
- enum amdgpu_mn_type type);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
-void amdgpu_hmm_init_range(struct hmm_range *range);
#else
-static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
-static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
-static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
- enum amdgpu_mn_type type)
-{
- return NULL;
-}
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, "
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
new file mode 100644
index 000000000000..7d5c3a9de9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "pcie_bif_err_count",
+ .debugfs_name = "pcie_bif_err_inject",
+ };
+
+ if (!adev->nbio.ras_if) {
+ adev->nbio.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->nbio.ras_if)
+ return -ENOMEM;
+ adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
+ adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->nbio.ras_if->sub_block_index = 0;
+ strcpy(adev->nbio.ras_if->name, "pcie_bif");
+ }
+ ih_info.head = fs_info.head = *adev->nbio.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
+ if (r)
+ goto late_fini;
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->nbio.ras_if, &ih_info);
+free:
+ kfree(adev->nbio.ras_if);
+ adev->nbio.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) &&
+ adev->nbio.ras_if) {
+ struct ras_common_if *ras_if = adev->nbio.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
new file mode 100644
index 000000000000..919bd566ba3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_NBIO_H__
+#define __AMDGPU_NBIO_H__
+
+/*
+ * amdgpu nbio functions
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+ u32 ref_and_mask_sdma2;
+ u32 ref_and_mask_sdma3;
+ u32 ref_and_mask_sdma4;
+ u32 ref_and_mask_sdma5;
+ u32 ref_and_mask_sdma6;
+ u32 ref_and_mask_sdma7;
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index, int doorbell_size);
+ void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_interrupt)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*detect_hw_virt)(struct amdgpu_device *adev);
+ void (*remap_hdp_registers)(struct amdgpu_device *adev);
+ void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+ void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+ int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+ int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ int (*ras_late_init)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_nbio {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ struct amdgpu_irq_src ras_controller_irq;
+ struct amdgpu_irq_src ras_err_event_athub_irq;
+ struct ras_common_if *ras_if;
+ const struct amdgpu_nbio_funcs *funcs;
+};
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 7289e1b4fb60..e3f16b49e970 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -343,6 +343,70 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
}
/**
+ * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
+ *
+ * @adev: amdgpu device object
+ * @offset: offset of the BO
+ * @size: size of the BO
+ * @domain: where to place it
+ * @bo_ptr: used to initialize BOs in structures
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Creates a kernel BO at a specific offset in the address space of the domain.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ unsigned int i;
+ int r;
+
+ offset &= PAGE_MASK;
+ size = ALIGN(size, PAGE_SIZE);
+
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
+ NULL, cpu_addr);
+ if (r)
+ return r;
+
+ /*
+ * Remove the original mem node and create a new one at the request
+ * position.
+ */
+ if (cpu_addr)
+ amdgpu_bo_kunmap(*bo_ptr);
+
+ ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+
+ for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
+ (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
+ (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+ r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
+ &(*bo_ptr)->tbo.mem, &ctx);
+ if (r)
+ goto error;
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r)
+ goto error;
+ }
+
+ amdgpu_bo_unreserve(*bo_ptr);
+ return 0;
+
+error:
+ amdgpu_bo_unreserve(*bo_ptr);
+ amdgpu_bo_unref(bo_ptr);
+ return r;
+}
+
+/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
@@ -451,7 +515,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
{
struct ttm_operation_ctx ctx = {
.interruptible = (bp->type != ttm_bo_type_kernel),
- .no_wait_gpu = false,
+ .no_wait_gpu = bp->no_wait_gpu,
.resv = bp->resv,
.flags = bp->type != ttm_bo_type_kernel ?
TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
@@ -1059,7 +1123,10 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
struct vm_area_struct *vma)
{
- return ttm_fbdev_mmap(vma, &bo->tbo);
+ if (vma->vm_pgoff != 0)
+ return -EACCES;
+
+ return ttm_bo_mmap_obj(vma, &bo->tbo);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 658f4c9779b7..36dec51d1ef1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -30,6 +30,9 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#ifdef CONFIG_MMU_NOTIFIER
+#include <linux/mmu_notifier.h>
+#endif
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
#define AMDGPU_BO_MAX_PLACEMENTS 3
@@ -41,6 +44,7 @@ struct amdgpu_bo_param {
u32 preferred_domain;
u64 flags;
enum ttm_bo_type type;
+ bool no_wait_gpu;
struct dma_resv *resv;
};
@@ -100,10 +104,12 @@ struct amdgpu_bo {
struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;
- union {
- struct list_head mn_list;
- struct list_head shadow_list;
- };
+
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_interval_notifier notifier;
+#endif
+
+ struct list_head shadow_list;
struct kgd_mem *kfd_bo;
};
@@ -237,6 +243,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 03930313c263..f205f56e3358 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -161,7 +161,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
if (is_support_sw_smu(adev)) {
if (adev->smu.ppt_funcs->get_current_power_state)
- pm = amdgpu_smu_get_current_power_state(adev);
+ pm = smu_get_current_power_state(&adev->smu);
else
pm = adev->pm.dpm.user_state;
} else if (adev->powerplay.pp_funcs->get_current_power_state) {
@@ -805,8 +805,7 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
}
/**
- * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk
- * pp_dpm_pcie
+ * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
*
* The amdgpu driver provides a sysfs API for adjusting what power levels
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
@@ -822,9 +821,15 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
*
* To manually adjust these states, first select manual using
* power_dpm_force_performance_level.
- * Secondly,Enter a new value for each level by inputing a string that
+ * Secondly, enter a new value for each level by inputing a string that
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
- * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
+ * E.g.,
+ *
+ * .. code-block:: bash
+ *
+ * echo "4 5 6" > pp_dpm_sclk
+ *
+ * will enable sclk levels 4, 5, and 6.
*
* NOTE: change to the dcefclk max dpm level is not supported now
*/
@@ -902,7 +907,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
@@ -949,7 +954,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
@@ -989,7 +994,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
@@ -1029,7 +1034,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
@@ -1069,7 +1074,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
@@ -1109,7 +1114,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
@@ -1301,7 +1306,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
parameter[parameter_size] = profile_mode;
if (is_support_sw_smu(adev))
- ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
+ ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
if (!ret)
@@ -2010,7 +2015,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
uint32_t limit = 0;
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, true);
+ smu_get_power_limit(&adev->smu, &limit, true, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
@@ -2028,7 +2033,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
uint32_t limit = 0;
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, false);
+ smu_get_power_limit(&adev->smu, &limit, false, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
@@ -2196,9 +2201,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* - fan1_input: fan speed in RPM
*
- * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
+ * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
*
- * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
+ * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
*
* hwmon interfaces for GPU clocks:
*
@@ -2825,6 +2830,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_dpm_sclk\n");
return ret;
}
+
+ /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_mclk.store = NULL;
+
+ dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_socclk.store = NULL;
+
+ dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_fclk.store = NULL;
+ }
+
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
if (ret) {
DRM_ERROR("failed to create device file pp_dpm_mclk\n");
@@ -3008,7 +3026,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
smu_handle_task(&adev->smu,
smu_dpm->dpm_level,
- AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ true);
} else {
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index a46090071034..2770cba56a6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -34,6 +34,8 @@
#include "psp_v11_0.h"
#include "psp_v12_0.h"
+#include "amdgpu_ras.h"
+
static void psp_set_funcs(struct amdgpu_device *adev);
static int psp_early_init(void *handle)
@@ -88,6 +90,17 @@ static int psp_sw_init(void *handle)
return ret;
}
+ ret = psp_mem_training_init(psp);
+ if (ret) {
+ DRM_ERROR("Failed to initialize memory training!\n");
+ return ret;
+ }
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
return 0;
}
@@ -95,6 +108,7 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ psp_mem_training_fini(&adev->psp);
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
@@ -151,10 +165,19 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
while (*((unsigned int *)psp->fence_buf) != index) {
if (--timeout == 0)
break;
+ /*
+ * Shouldn't wait for timeout when err_event_athub occurs,
+ * because gpu reset thread triggered and lock resource should
+ * be released for psp resume sequence.
+ */
+ if (amdgpu_ras_intr_triggered())
+ break;
msleep(1);
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
/* In some cases, psp response status is not 0 even there is no
@@ -168,8 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
- DRM_WARN("psp command failed and response status is (0x%X)\n",
- psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
+ DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n",
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
if (!timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
@@ -253,7 +277,8 @@ static int psp_tmr_init(struct psp_context *psp)
/* For ASICs support RLC autoload, psp will parse the toc
* and calculate the total size of TMR needed */
- if (psp->toc_start_addr &&
+ if (!amdgpu_sriov_vf(psp->adev) &&
+ psp->toc_start_addr &&
psp->toc_bin_size &&
psp->fw_pri_buf) {
ret = psp_load_toc(psp, &tmr_size);
@@ -287,15 +312,9 @@ static int psp_tmr_load(struct psp_context *psp)
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
- if (ret)
- goto failed;
kfree(cmd);
- return 0;
-
-failed:
- kfree(cmd);
return ret;
}
@@ -548,7 +567,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
- if (!psp->adev->psp.ta_fw)
+ if (!psp->adev->psp.ta_fw ||
+ !psp->adev->psp.ta_xgmi_ucode_size ||
+ !psp->adev->psp.ta_xgmi_start_addr)
return -ENOENT;
if (!psp->xgmi_context.initialized) {
@@ -737,6 +758,12 @@ static int psp_ras_terminate(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
if (!psp->ras.ras_initialized)
return 0;
@@ -758,6 +785,18 @@ static int psp_ras_initialize(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_ras_ucode_size ||
+ !psp->adev->psp.ta_ras_start_addr) {
+ dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+ return 0;
+ }
+
if (!psp->ras.ras_initialized) {
ret = psp_ras_init_shared_buf(psp);
if (ret)
@@ -772,6 +811,360 @@ static int psp_ras_initialize(struct psp_context *psp)
}
// ras end
+// HDCP start
+static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t hdcp_ta_mc,
+ uint64_t hdcp_mc_shared,
+ uint32_t hdcp_ta_size,
+ uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
+ lower_32_bits(hdcp_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
+ upper_32_bits(hdcp_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_hdcp_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for hdcp ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return ret;
+}
+
+static int psp_hdcp_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
+ psp->ta_hdcp_ucode_size);
+
+ psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->hdcp_context.hdcp_shared_mc_addr,
+ psp->ta_hdcp_ucode_size,
+ PSP_HDCP_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->hdcp_context.hdcp_initialized = 1;
+ psp->hdcp_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+static int psp_hdcp_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_hdcp_ucode_size ||
+ !psp->adev->psp.ta_hdcp_start_addr) {
+ dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ ret = psp_hdcp_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_hdcp_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t hdcp_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id;
+}
+
+static int psp_hdcp_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t hdcp_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->hdcp_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_hdcp_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->hdcp_context.hdcp_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->hdcp_context.hdcp_initialized = 0;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return 0;
+}
+// HDCP end
+
+// DTM start
+static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t dtm_ta_mc,
+ uint64_t dtm_mc_shared,
+ uint32_t dtm_ta_size,
+ uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = dtm_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_dtm_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for dtm ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return ret;
+}
+
+static int psp_dtm_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
+
+ psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->dtm_context.dtm_shared_mc_addr,
+ psp->ta_dtm_ucode_size,
+ PSP_DTM_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->dtm_context.dtm_initialized = 1;
+ psp->dtm_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_dtm_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_dtm_ucode_size ||
+ !psp->adev->psp.ta_dtm_start_addr) {
+ dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->dtm_context.dtm_initialized) {
+ ret = psp_dtm_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_dtm_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t dtm_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->dtm_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_dtm_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->dtm_context.dtm_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->dtm_context.dtm_initialized = 0;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return 0;
+}
+// DTM end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -845,6 +1238,16 @@ static int psp_hw_start(struct psp_context *psp)
if (ret)
dev_err(psp->adev->dev,
"RAS: Failed to initialize RAS\n");
+
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
+
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
}
return 0;
@@ -1064,7 +1467,10 @@ out:
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM))
/*skip ucode loading in SRIOV VF */
continue;
@@ -1073,10 +1479,6 @@ out:
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
/* skip mec JT when autoload is enabled */
continue;
- /* Renoir only needs to load mec jump table one time */
- if (adev->asic_type == CHIP_RENOIR &&
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)
- continue;
psp_print_fw_hdr(psp, ucode);
@@ -1085,7 +1487,8 @@ out:
return ret;
/* Start rlc autoload after psp recieved all the gfx firmware */
- if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
+ if (psp->autoload_supported && ucode->ucode_id ==
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
ret = psp_rlc_autoload(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
@@ -1210,8 +1613,11 @@ static int psp_hw_fini(void *handle)
psp->xgmi_context.initialized == 1)
psp_xgmi_terminate(psp);
- if (psp->adev->psp.ta_fw)
+ if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_dtm_terminate(psp);
+ psp_hdcp_terminate(psp);
+ }
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
@@ -1253,6 +1659,16 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate ras ta\n");
return ret;
}
+ ret = psp_hdcp_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate hdcp ta\n");
+ return ret;
+ }
+ ret = psp_dtm_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate dtm ta\n");
+ return ret;
+ }
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
@@ -1272,6 +1688,12 @@ static int psp_resume(void *handle)
DRM_INFO("PSP is resuming...\n");
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
mutex_lock(&adev->firmware.mutex);
ret = psp_hw_start(psp);
@@ -1311,9 +1733,6 @@ int psp_rlc_autoload_start(struct psp_context *psp)
int ret;
struct psp_gfx_cmd_resp *cmd;
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index bc0947f6bc8a..09c5474ebcc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -37,6 +37,9 @@
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
+#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
+#define PSP_DTM_SHARED_MEM_SIZE 0x4000
+#define PSP_SHARED_MEM_SIZE 0x4000
struct psp_context;
struct psp_xgmi_node_info;
@@ -46,6 +49,8 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_SYSDRV = 0x10000,
PSP_BL__LOAD_SOSDRV = 0x20000,
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
+ PSP_BL__DRAM_LONG_TRAIN = 0x100000,
+ PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
};
enum psp_ring_type
@@ -108,6 +113,9 @@ struct psp_funcs
struct ta_ras_trigger_error_input *info);
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
int (*rlc_autoload_start)(struct psp_context *psp);
+ int (*mem_training_init)(struct psp_context *psp);
+ void (*mem_training_fini)(struct psp_context *psp);
+ int (*mem_training)(struct psp_context *psp, uint32_t ops);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -142,6 +150,65 @@ struct psp_ras_context {
struct amdgpu_ras *ras;
};
+struct psp_hdcp_context {
+ bool hdcp_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *hdcp_shared_bo;
+ uint64_t hdcp_shared_mc_addr;
+ void *hdcp_shared_buf;
+};
+
+struct psp_dtm_context {
+ bool dtm_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *dtm_shared_bo;
+ uint64_t dtm_shared_mc_addr;
+ void *dtm_shared_buf;
+};
+
+#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
+#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
+#define GDDR6_MEM_TRAINING_OFFSET 0x8000
+
+enum psp_memory_training_init_flag {
+ PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
+ PSP_MEM_TRAIN_SUPPORT = 0x1,
+ PSP_MEM_TRAIN_INIT_FAILED = 0x2,
+ PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4,
+ PSP_MEM_TRAIN_INIT_SUCCESS = 0x8,
+};
+
+enum psp_memory_training_ops {
+ PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1,
+ PSP_MEM_TRAIN_SAVE = 0x2,
+ PSP_MEM_TRAIN_RESTORE = 0x4,
+ PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8,
+ PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG,
+ PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG,
+};
+
+struct psp_memory_training_context {
+ /*training data size*/
+ u64 train_data_size;
+ /*
+ * sys_cache
+ * cpu virtual address
+ * system memory buffer that used to store the training data.
+ */
+ void *sys_cache;
+
+ /*vram offset of the p2c training data*/
+ u64 p2c_train_data_offset;
+ struct amdgpu_bo *p2c_bo;
+
+ /*vram offset of the c2p training data*/
+ u64 c2p_train_data_offset;
+ struct amdgpu_bo *c2p_bo;
+
+ enum psp_memory_training_init_flag init;
+ u32 training_cnt;
+};
+
struct psp_context
{
struct amdgpu_device *adev;
@@ -206,9 +273,21 @@ struct psp_context
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_ucode_size;
uint8_t *ta_ras_start_addr;
+
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_ucode_size;
+ uint8_t *ta_hdcp_start_addr;
+
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_ucode_size;
+ uint8_t *ta_dtm_start_addr;
+
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
+ struct psp_hdcp_context hdcp_context;
+ struct psp_dtm_context dtm_context;
struct mutex mutex;
+ struct psp_memory_training_context mem_train_ctx;
};
struct amdgpu_psp_funcs {
@@ -251,6 +330,12 @@ struct amdgpu_psp_funcs {
(psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
#define psp_rlc_autoload(psp) \
((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
+#define psp_mem_training_init(psp) \
+ ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
+#define psp_mem_training_fini(psp) \
+ ((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0)
+#define psp_mem_training(psp, ops) \
+ ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
@@ -279,6 +364,8 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 016ea274b955..404483437bd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -25,10 +25,13 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/reboot.h>
+#include <linux/syscalls.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
"none",
@@ -65,11 +68,16 @@ const char *ras_block_string[] = {
/* inject address is 52 bits */
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr);
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr);
+enum amdgpu_ras_retire_page_reservation {
+ AMDGPU_RAS_RETIRE_PAGE_RESERVED,
+ AMDGPU_RAS_RETIRE_PAGE_PENDING,
+ AMDGPU_RAS_RETIRE_PAGE_FAULT,
+};
+
+atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
+
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr);
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
@@ -189,6 +197,10 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return 0;
}
+
+static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
/**
* DOC: AMDGPU RAS debugfs control interface
*
@@ -208,31 +220,44 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* As their names indicate, inject operation will write the
* value to the address.
*
- * Second member: struct ras_debug_if::op.
+ * The second member: struct ras_debug_if::op.
* It has three kinds of operations.
- * 0: disable RAS on the block. Take ::head as its data.
- * 1: enable RAS on the block. Take ::head as its data.
- * 2: inject errors on the block. Take ::inject as its data.
+ *
+ * - 0: disable RAS on the block. Take ::head as its data.
+ * - 1: enable RAS on the block. Take ::head as its data.
+ * - 2: inject errors on the block. Take ::inject as its data.
*
* How to use the interface?
- * programs:
- * copy the struct ras_debug_if in your codes and initialize it.
- * write the struct to the control node.
*
- * bash:
- * echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl
- * op: disable, enable, inject
- * disable: only block is needed
- * enable: block and error are needed
- * inject: error, address, value are needed
- * block: umc, smda, gfx, .........
- * see ras_block_string[] for details
- * error: ue, ce
- * ue: multi_uncorrectable
- * ce: single_correctable
- * sub_block: sub block index, pass 0 if there is no sub block
+ * Programs
+ *
+ * Copy the struct ras_debug_if in your codes and initialize it.
+ * Write the struct to the control node.
+ *
+ * Shells
+ *
+ * .. code-block:: bash
+ *
+ * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
+ *
+ * Parameters:
+ *
+ * op: disable, enable, inject
+ * disable: only block is needed
+ * enable: block and error are needed
+ * inject: error, address, value are needed
+ * block: umc, sdma, gfx, .........
+ * see ras_block_string[] for details
+ * error: ue, ce
+ * ue: multi_uncorrectable
+ * ce: single_correctable
+ * sub_block:
+ * sub block index, pass 0 if there is no sub block
+ *
+ * here are some examples for bash commands:
+ *
+ * .. code-block:: bash
*
- * here are some examples for bash commands,
* echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
@@ -245,8 +270,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* For inject, please check corresponding err count at
* /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
*
- * NOTE: operation is only allowed on blocks which are supported.
- * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * .. note::
+ * Operations are only allowed on blocks which are supported.
+ * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * to see which blocks support RAS on a particular asic.
+ *
*/
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
@@ -276,6 +304,14 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
break;
}
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
+ amdgpu_ras_check_bad_page(adev, data.inject.address)) {
+ DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+ data.inject.address);
+ break;
+ }
+
/* data.inject.address is offset instead of absolute gpu address */
ret = amdgpu_ras_error_inject(adev, &data.inject);
break;
@@ -290,6 +326,33 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
return size;
}
+/**
+ * DOC: AMDGPU RAS debugfs EEPROM table reset interface
+ *
+ * Some boards contain an EEPROM which is used to persistently store a list of
+ * bad pages which experiences ECC errors in vram. This interface provides
+ * a way to reset the EEPROM, e.g., after testing error injection.
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo 1 > ../ras/ras_eeprom_reset
+ *
+ * will reset EEPROM table to 0 entries.
+ *
+ */
+static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ int ret;
+
+ ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
+
+ return ret == 1 ? size : -EIO;
+}
+
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.owner = THIS_MODULE,
.read = NULL,
@@ -297,6 +360,34 @@ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_eeprom_write,
+ .llseek = default_llseek
+};
+
+/**
+ * DOC: AMDGPU RAS sysfs Error Count Interface
+ *
+ * It allows the user to read the error count for each IP block on the gpu through
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ *
+ * It outputs the multiple lines which report the uncorrected (ue) and corrected
+ * (ce) error counts.
+ *
+ * The format of one line is below,
+ *
+ * [ce|ue]: count
+ *
+ * Example:
+ *
+ * .. code-block:: bash
+ *
+ * ue: 0
+ * ce: 1
+ *
+ */
static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -475,15 +566,17 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
return 0;
- ret = psp_ras_enable_features(&adev->psp, &info, enable);
- if (ret) {
- DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
- enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
- if (ret == TA_RAS_STATUS__RESET_NEEDED)
- return -EAGAIN;
- return -EINVAL;
+ if (!amdgpu_ras_intr_triggered()) {
+ ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ if (ret) {
+ DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ ret);
+ if (ret == TA_RAS_STATUS__RESET_NEEDED)
+ return -EAGAIN;
+ return -EINVAL;
+ }
}
/* setup the obj */
@@ -615,8 +708,12 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub_funcs->query_ras_error_count)
- adev->mmhub_funcs->query_ras_error_count(adev, &err_data);
+ if (adev->mmhub.funcs->query_ras_error_count)
+ adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ if (adev->nbio.funcs->query_ras_error_count)
+ adev->nbio.funcs->query_ras_error_count(adev, &err_data);
break;
default:
break;
@@ -628,12 +725,14 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
- if (err_data.ce_count)
+ if (err_data.ce_count) {
dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
obj->err_data.ce_count, ras_block_str(info->head.block));
- if (err_data.ue_count)
+ }
+ if (err_data.ue_count) {
dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
obj->err_data.ue_count, ras_block_str(info->head.block));
+ }
return 0;
}
@@ -664,6 +763,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break;
case AMDGPU_RAS_BLOCK__UMC:
case AMDGPU_RAS_BLOCK__MMHUB:
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
default:
@@ -723,18 +824,18 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
switch (flags) {
- case 0:
+ case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
return "R";
- case 1:
+ case AMDGPU_RAS_RETIRE_PAGE_PENDING:
return "P";
- case 2:
+ case AMDGPU_RAS_RETIRE_PAGE_FAULT:
default:
return "F";
};
}
-/*
- * DOC: ras sysfs gpu_vram_bad_pages interface
+/**
+ * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
*
* It allows user to read the bad pages of vram on the gpu through
* /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
@@ -746,14 +847,21 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
*
* gpu pfn and gpu page size are printed in hex format.
* flags can be one of below character,
+ *
* R: reserved, this gpu page is reserved and not able to use.
+ *
* P: pending for reserve, this gpu page is marked as bad, will be reserved
- * in next window of page_reserve.
+ * in next window of page_reserve.
+ *
* F: unable to reserve. this gpu page can't be reserved due to some reasons.
*
- * examples:
- * 0x00000001 : 0x00001000 : R
- * 0x00000002 : 0x00001000 : P
+ * Examples:
+ *
+ * .. code-block:: bash
+ *
+ * 0x00000001 : 0x00001000 : R
+ * 0x00000002 : 0x00001000 : P
+ *
*/
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
@@ -927,6 +1035,24 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
}
/* sysfs end */
+/**
+ * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
+ *
+ * Normally when there is an uncorrectable error, the driver will reset
+ * the GPU to recover. However, in the event of an unrecoverable error,
+ * the driver provides an interface to reboot the system automatically
+ * in that event.
+ *
+ * The following file in debugfs provides that interface:
+ * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo true > .../ras/auto_reboot
+ *
+ */
/* debugfs begin */
static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
@@ -934,8 +1060,21 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
con->dir = debugfs_create_dir("ras", minor->debugfs_root);
- con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
- adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_eeprom_ops);
+
+ /*
+ * After one uncorrectable error happens, usually GPU recovery will
+ * be scheduled. But due to the known problem in GPU recovery failing
+ * to bring GPU back, below interface provides one direct way to
+ * user to reboot system automatically in such case within
+ * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
+ * will never be called.
+ */
+ debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
+ &con->reboot);
}
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
@@ -980,10 +1119,8 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
- debugfs_remove(con->ent);
- debugfs_remove(con->dir);
+ debugfs_remove_recursive(con->dir);
con->dir = NULL;
- con->ent = NULL;
}
/* debugfs end */
@@ -1188,15 +1325,15 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
for (; i < data->count; i++) {
(*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].bp,
+ .bp = data->bps[i].retired_page,
.size = AMDGPU_GPU_PAGE_SIZE,
- .flags = 0,
+ .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
if (data->last_reserved <= i)
- (*bps)[i].flags = 1;
- else if (data->bps[i].bo == NULL)
- (*bps)[i].flags = 2;
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (data->bps_bo[i] == NULL)
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
*count = data->count;
@@ -1214,105 +1351,46 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
atomic_set(&ras->in_recovery, 0);
}
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr)
-{
- /* no need to free it actually. */
- amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
- return 0;
-}
-
-/* reserve vram with size@offset */
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- struct amdgpu_bo *bo;
-
- if (bo_ptr)
- *bo_ptr = NULL;
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
-
- r = amdgpu_bo_create(adev, &bp, &bo);
- if (r)
- return -EINVAL;
-
- r = amdgpu_bo_reserve(bo, false);
- if (r)
- goto error_reserve;
-
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
-
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
-
- r = amdgpu_bo_pin_restricted(bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- offset,
- offset + size);
- if (r)
- goto error_pin;
-
- if (bo_ptr)
- *bo_ptr = bo;
-
- amdgpu_bo_unreserve(bo);
- return r;
-
-error_pin:
- amdgpu_bo_unreserve(bo);
-error_reserve:
- amdgpu_bo_unref(&bo);
- return r;
-}
-
/* alloc/realloc bps array */
static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
struct ras_err_handler_data *data, int pages)
{
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
- unsigned int align_space = ALIGN(new_space, 1024);
- void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
-
- if (!tmp)
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ struct amdgpu_bo **bps_bo =
+ kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
+
+ if (!bps || !bps_bo) {
+ kfree(bps);
+ kfree(bps_bo);
return -ENOMEM;
+ }
if (data->bps) {
- memcpy(tmp, data->bps,
+ memcpy(bps, data->bps,
data->count * sizeof(*data->bps));
kfree(data->bps);
}
+ if (data->bps_bo) {
+ memcpy(bps_bo, data->bps_bo,
+ data->count * sizeof(*data->bps_bo));
+ kfree(data->bps_bo);
+ }
- data->bps = tmp;
+ data->bps = bps;
+ data->bps_bo = bps_bo;
data->space_left += align_space - old_space;
return 0;
}
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages)
+ struct eeprom_table_record *bps, int pages)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = pages;
int ret = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
@@ -1329,24 +1407,120 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out;
}
- while (i--)
- data->bps[data->count++].bp = bps[i];
-
+ memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
+ data->count += pages;
data->space_left -= pages;
+
out:
mutex_unlock(&con->recovery_lock);
return ret;
}
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ */
+static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ struct amdgpu_ras_eeprom_control *control;
+ int save_count;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ control = &con->eeprom_control;
+ data = con->eh_data;
+ save_count = data->count - control->num_recs;
+ /* only new entries are saved */
+ if (save_count > 0)
+ if (amdgpu_ras_eeprom_process_recods(control,
+ &data->bps[control->num_recs],
+ true,
+ save_count)) {
+ DRM_ERROR("Failed to save EEPROM table data!");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras.ras->eeprom_control;
+ struct eeprom_table_record *bps = NULL;
+ int ret = 0;
+
+ /* no bad page record, skip eeprom access */
+ if (!control->num_recs)
+ return ret;
+
+ bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ if (amdgpu_ras_eeprom_process_recods(control, bps, false,
+ control->num_recs)) {
+ DRM_ERROR("Failed to load EEPROM table records!");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
+
+out:
+ kfree(bps);
+ return ret;
+}
+
+/*
+ * check if an address belongs to bad page
+ *
+ * Note: this check is only for umc block
+ */
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i;
+ bool ret = false;
+
+ if (!con || !con->eh_data)
+ return ret;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ addr >>= AMDGPU_GPU_PAGE_SHIFT;
+ for (i = 0; i < data->count; i++)
+ if (addr == data->bps[i].retired_page) {
+ ret = true;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&con->recovery_lock);
+ return ret;
+}
+
/* called in gpu recovery/init */
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
uint64_t bp;
- struct amdgpu_bo *bo;
- int i;
+ struct amdgpu_bo *bo = NULL;
+ int i, ret = 0;
if (!con || !con->eh_data)
return 0;
@@ -1357,18 +1531,29 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
goto out;
/* reserve vram at driver post stage. */
for (i = data->last_reserved; i < data->count; i++) {
- bp = data->bps[i].bp;
+ bp = data->bps[i].retired_page;
- if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
- PAGE_SIZE, &bo))
- DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
+ /* There are two cases of reserve error should be ignored:
+ * 1) a ras bad page has been allocated (used by someone);
+ * 2) a ras bad page has been reserved (duplicate error injection
+ * for one page);
+ */
+ if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL))
+ DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i + 1;
+ bo = NULL;
}
+
+ /* continue to save bad pages to eeprom even reesrve_vram fails */
+ ret = amdgpu_ras_save_bad_pages(adev);
out:
mutex_unlock(&con->recovery_lock);
- return 0;
+ return ret;
}
/* called when driver unload */
@@ -1388,11 +1573,11 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
goto out;
for (i = data->last_reserved - 1; i >= 0; i--) {
- bo = data->bps[i].bo;
+ bo = data->bps_bo[i];
- amdgpu_ras_release_vram(adev, &bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i;
}
out:
@@ -1400,41 +1585,54 @@ out:
return 0;
}
-static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * write the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * read the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data **data = &con->eh_data;
+ struct ras_err_handler_data **data;
+ int ret;
- *data = kmalloc(sizeof(**data),
- GFP_KERNEL|__GFP_ZERO);
- if (!*data)
- return -ENOMEM;
+ if (con)
+ data = &con->eh_data;
+ else
+ return 0;
+
+ *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
+ if (!*data) {
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_init(&con->recovery_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
con->adev = adev;
- amdgpu_ras_load_bad_pages(adev);
- amdgpu_ras_reserve_bad_pages(adev);
+ ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
+ if (ret)
+ goto free;
+
+ if (con->eeprom_control.num_recs) {
+ ret = amdgpu_ras_load_bad_pages(adev);
+ if (ret)
+ goto free;
+ ret = amdgpu_ras_reserve_bad_pages(adev);
+ if (ret)
+ goto release;
+ }
return 0;
+
+release:
+ amdgpu_ras_release_bad_pages(adev);
+free:
+ kfree((*data)->bps);
+ kfree((*data)->bps_bo);
+ kfree(*data);
+ con->eh_data = NULL;
+out:
+ DRM_WARN("Failed to initialize ras recovery!\n");
+
+ return ret;
}
static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
@@ -1442,13 +1640,17 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data = con->eh_data;
+ /* recovery_init failed to init it, fini is useless */
+ if (!data)
+ return 0;
+
cancel_work_sync(&con->recovery_work);
- amdgpu_ras_save_bad_pages(adev);
amdgpu_ras_release_bad_pages(adev);
mutex_lock(&con->recovery_lock);
con->eh_data = NULL;
kfree(data->bps);
+ kfree(data->bps_bo);
kfree(data);
mutex_unlock(&con->recovery_lock);
@@ -1500,6 +1702,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int r;
if (con)
return 0;
@@ -1527,31 +1730,106 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* Might need get this flag from vbios. */
con->flags = RAS_DEFAULT_FLAGS;
- if (amdgpu_ras_recovery_init(adev))
- goto recovery_out;
+ if (adev->nbio.funcs->init_ras_controller_interrupt) {
+ r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
+ r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
+ if (r)
+ return r;
+ }
amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
if (amdgpu_ras_fs_init(adev))
goto fs_out;
- /* ras init for each ras block */
- if (adev->umc.funcs->ras_init)
- adev->umc.funcs->ras_init(adev);
-
DRM_INFO("RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
fs_out:
- amdgpu_ras_recovery_fini(adev);
-recovery_out:
amdgpu_ras_set_context(adev, NULL);
kfree(con);
return -EINVAL;
}
+/* helper function to handle common stuff in ip late init phase */
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info)
+{
+ int r;
+
+ /* disable RAS feature per IP block if it is not supported */
+ if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
+ amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
+ return 0;
+ }
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
+ if (r) {
+ if (r == -EAGAIN) {
+ /* request gpu reset. will run again */
+ amdgpu_ras_request_reset_on_boot(adev,
+ ras_block->block);
+ return 0;
+ } else if (adev->in_suspend || adev->in_gpu_reset) {
+ /* in resume phase, if fail to enable ras,
+ * clean up all ras fs nodes, and disable ras */
+ goto cleanup;
+ } else
+ return r;
+ }
+
+ /* in resume phase, no need to create ras fs node */
+ if (adev->in_suspend || adev->in_gpu_reset)
+ return 0;
+
+ if (ih_info->cb) {
+ r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
+ if (r)
+ goto interrupt;
+ }
+
+ amdgpu_ras_debugfs_create(adev, fs_info);
+
+ r = amdgpu_ras_sysfs_create(adev, fs_info);
+ if (r)
+ goto sysfs;
+
+ return 0;
+cleanup:
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+ return r;
+}
+
+/* helper function to remove ras fs node and interrupt handler */
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info)
+{
+ if (!ras_block || !ih_info)
+ return;
+
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+}
+
/* do some init work after IP late init as dependence.
* and it runs in resume/gpu reset/booting up cases.
*/
@@ -1645,3 +1923,18 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
return 0;
}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+{
+ uint32_t hw_supported, supported;
+
+ amdgpu_ras_check_supported(adev, &hw_supported, &supported);
+ if (!hw_supported)
+ return;
+
+ if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
+ DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+
+ amdgpu_ras_reset_gpu(adev, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6c76bb2a6843..f80fd3428c98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -317,8 +317,6 @@ struct amdgpu_ras {
struct list_head head;
/* debugfs */
struct dentry *dir;
- /* debugfs ctrl */
- struct dentry *ent;
/* sysfs */
struct device_attribute features_attr;
struct bin_attribute badpages_attr;
@@ -334,7 +332,7 @@ struct amdgpu_ras {
struct mutex recovery_lock;
uint32_t flags;
-
+ bool reboot;
struct amdgpu_ras_eeprom_control eeprom_control;
};
@@ -347,15 +345,14 @@ struct ras_err_data {
unsigned long ue_count;
unsigned long ce_count;
unsigned long err_addr_cnt;
- uint64_t *err_addr;
+ struct eeprom_table_record *err_addr;
};
struct ras_err_handler_data {
- /* point to bad pages array */
- struct {
- unsigned long bp;
- struct amdgpu_bo *bo;
- } *bps;
+ /* point to bad page records array */
+ struct eeprom_table_record *bps;
+ /* point to reserved bo array */
+ struct amdgpu_bo **bps_bo;
/* the count of entries */
int count;
/* the space can place new entries */
@@ -365,7 +362,7 @@ struct ras_err_handler_data {
};
typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry);
struct ras_ih_data {
@@ -481,6 +478,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
return ras && (ras->supported & (1 << block));
}
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
unsigned int block);
@@ -492,7 +490,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages);
+ struct eeprom_table_record *bps, int pages);
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
@@ -501,6 +499,12 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ /* save bad page to eeprom before gpu reset,
+ * i2c may be unstable in gpu reset
+ */
+ if (in_task())
+ amdgpu_ras_reserve_bad_pages(adev);
+
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
schedule_work(&ras->recovery_work);
return 0;
@@ -566,6 +570,13 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
int amdgpu_ras_init(struct amdgpu_device *adev);
int amdgpu_ras_fini(struct amdgpu_device *adev);
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info);
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info);
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
@@ -599,4 +610,14 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_dispatch_if *info);
+
+extern atomic_t amdgpu_ras_in_intr;
+
+static inline bool amdgpu_ras_intr_triggered(void)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 8a32b5c93778..7de16c0c2f20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -100,7 +100,101 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
return ret;
}
-static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control);
+
+
+static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
+{
+ int i;
+ uint32_t tbl_sum = 0;
+
+ /* Header checksum, skip checksum field in the calculation */
+ for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
+ tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
+
+ return tbl_sum;
+}
+
+static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
+ int num)
+{
+ int i, j;
+ uint32_t tbl_sum = 0;
+
+ /* Records checksum */
+ for (i = 0; i < num; i++) {
+ struct eeprom_table_record *record = &records[i];
+
+ for (j = 0; j < sizeof(*record); j++) {
+ tbl_sum += *(((unsigned char *)record) + j);
+ }
+ }
+
+ return tbl_sum;
+}
+
+static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
+}
+
+/* Checksum = 256 -((sum of all table entries) mod 256) */
+static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num,
+ uint32_t old_hdr_byte_sum)
+{
+ /*
+ * This will update the table sum with new records.
+ *
+ * TODO: What happens when the EEPROM table is to be wrapped around
+ * and old records from start will get overridden.
+ */
+
+ /* need to recalculate updated header byte sum */
+ control->tbl_byte_sum -= old_hdr_byte_sum;
+ control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
+
+ control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
+}
+
+/* table sum mod 256 + checksum must equals 256 */
+static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
+
+ if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
+ DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
+ return false;
+ }
+
+ return true;
+}
+
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
+{
+ unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ int ret = 0;
+
+ mutex_lock(&control->tbl_mutex);
+
+ hdr->header = EEPROM_TABLE_HDR_VAL;
+ hdr->version = EEPROM_TABLE_VER;
+ hdr->first_rec_offset = EEPROM_RECORD_START;
+ hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
+
+ control->tbl_byte_sum = 0;
+ __update_tbl_checksum(control, NULL, 0, 0);
+ control->next_addr = EEPROM_RECORD_START;
+
+ ret = __update_table_header(control, buff);
+
+ mutex_unlock(&control->tbl_mutex);
+
+ return ret;
+
+}
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
{
@@ -122,6 +216,10 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
break;
+ case CHIP_ARCTURUS:
+ ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
+ break;
+
default:
return 0;
}
@@ -143,25 +241,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
if (hdr->header == EEPROM_TABLE_HDR_VAL) {
control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
EEPROM_TABLE_RECORD_SIZE;
+ control->tbl_byte_sum = __calc_hdr_byte_sum(control);
+ control->next_addr = EEPROM_RECORD_START;
+
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->num_recs);
} else {
DRM_INFO("Creating new EEPROM table");
- hdr->header = EEPROM_TABLE_HDR_VAL;
- hdr->version = EEPROM_TABLE_VER;
- hdr->first_rec_offset = EEPROM_RECORD_START;
- hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
-
- adev->psp.ras.ras->eeprom_control.tbl_byte_sum =
- __calc_hdr_byte_sum(&adev->psp.ras.ras->eeprom_control);
- ret = __update_table_header(control, buff);
+ ret = amdgpu_ras_eeprom_reset_table(control);
}
- /* Start inserting records from here */
- adev->psp.ras.ras->eeprom_control.next_addr = EEPROM_RECORD_START;
-
return ret == 1 ? 0 : -EIO;
}
@@ -173,6 +264,9 @@ void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
case CHIP_VEGA20:
smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
break;
+ case CHIP_ARCTURUS:
+ smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
+ break;
default:
return;
@@ -226,8 +320,8 @@ static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *co
record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
i += 6;
- buff[i++] = record->mem_channel;
- buff[i++] = record->mcumc_id;
+ record->mem_channel = buff[i++];
+ record->mcumc_id = buff[i++];
memcpy(&tmp, buff + i, 6);
record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
@@ -266,87 +360,18 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
return curr_address;
}
-
-static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
-{
- int i;
- uint32_t tbl_sum = 0;
-
- /* Header checksum, skip checksum field in the calculation */
- for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
- tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
-
- return tbl_sum;
-}
-
-static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
- int num)
-{
- int i, j;
- uint32_t tbl_sum = 0;
-
- /* Records checksum */
- for (i = 0; i < num; i++) {
- struct eeprom_table_record *record = &records[i];
-
- for (j = 0; j < sizeof(*record); j++) {
- tbl_sum += *(((unsigned char *)record) + j);
- }
- }
-
- return tbl_sum;
-}
-
-static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num)
-{
- return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
-}
-
-/* Checksum = 256 -((sum of all table entries) mod 256) */
-static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num,
- uint32_t old_hdr_byte_sum)
-{
- /*
- * This will update the table sum with new records.
- *
- * TODO: What happens when the EEPROM table is to be wrapped around
- * and old records from start will get overridden.
- */
-
- /* need to recalculate updated header byte sum */
- control->tbl_byte_sum -= old_hdr_byte_sum;
- control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
-
- control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
-}
-
-/* table sum mod 256 + checksum must equals 256 */
-static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num)
-{
- control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
-
- if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
- DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
- return false;
- }
-
- return true;
-}
-
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
bool write,
int num)
{
int i, ret = 0;
- struct i2c_msg *msgs;
- unsigned char *buffs;
+ struct i2c_msg *msgs, *msg;
+ unsigned char *buffs, *buff;
+ struct eeprom_table_record *record;
struct amdgpu_device *adev = to_amdgpu_device(control);
- if (adev->asic_type != CHIP_VEGA20)
+ if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)
return 0;
buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
@@ -373,9 +398,9 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
* 256b
*/
for (i = 0; i < num; i++) {
- unsigned char *buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
- struct eeprom_table_record *record = &records[i];
- struct i2c_msg *msg = &msgs[i];
+ buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
+ msg = &msgs[i];
control->next_addr = __correct_eeprom_dest_address(control->next_addr);
@@ -415,8 +440,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
if (!write) {
for (i = 0; i < num; i++) {
- unsigned char *buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
- struct eeprom_table_record *record = &records[i];
+ buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
__decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 41f3fcb9a29b..622269957c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -79,6 +79,7 @@ struct eeprom_table_record {
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 5c13c503e61f..6010999d9020 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -83,3 +84,101 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
return csa_mc_addr;
}
+
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info)
+{
+ int r, i;
+ struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "sdma_err_count",
+ .debugfs_name = "sdma_err_inject",
+ };
+
+ if (!ih_info)
+ return -EINVAL;
+
+ if (!adev->sdma.ras_if) {
+ adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->sdma.ras_if)
+ return -ENOMEM;
+ adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
+ adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->sdma.ras_if->sub_block_index = 0;
+ strcpy(adev->sdma.ras_if->name, "sdma");
+ }
+ fs_info.head = ih_info->head = *adev->sdma.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->sdma.ras_if,
+ &fs_info, ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (r)
+ goto late_fini;
+ }
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info);
+free:
+ kfree(adev->sdma.ras_if);
+ adev->sdma.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
+ adev->sdma.ras_if) {
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ /* the cb member will not be used by
+ * amdgpu_ras_interrupt_remove_handler, init it only
+ * to cheat the check in ras_late_fini
+ */
+ .cb = amdgpu_sdma_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index a9ae0d8a0589..761ff8be6314 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -104,4 +104,13 @@ struct amdgpu_sdma_instance *
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info);
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev);
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b66d29d5ffa2..b158230af8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 77674a7b9616..63e734a125fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(unsigned int, context)
__field(unsigned int, seqno)
__field(struct dma_fence *, fence)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
TRACE_EVENT(amdgpu_sched_run_job,
@@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__field(unsigned int, context)
__field(unsigned int, seqno)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -323,14 +323,15 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
- uint32_t incr, uint64_t flags),
- TP_ARGS(pe, addr, count, incr, flags),
+ uint32_t incr, uint64_t flags, bool direct),
+ TP_ARGS(pe, addr, count, incr, flags, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, addr)
__field(u32, count)
__field(u32, incr)
__field(u64, flags)
+ __field(bool, direct)
),
TP_fast_assign(
@@ -339,28 +340,32 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
__entry->count = count;
__entry->incr = incr;
__entry->flags = flags;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u",
- __entry->pe, __entry->addr, __entry->incr,
- __entry->flags, __entry->count)
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, "
+ "direct=%d", __entry->pe, __entry->addr, __entry->incr,
+ __entry->flags, __entry->count, __entry->direct)
);
TRACE_EVENT(amdgpu_vm_copy_ptes,
- TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
- TP_ARGS(pe, src, count),
+ TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct),
+ TP_ARGS(pe, src, count, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, src)
__field(u32, count)
+ __field(bool, direct)
),
TP_fast_assign(
__entry->pe = pe;
__entry->src = src;
__entry->count = count;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, src=%010Lx, count=%u",
- __entry->pe, __entry->src, __entry->count)
+ TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d",
+ __entry->pe, __entry->src, __entry->count,
+ __entry->direct)
);
TRACE_EVENT(amdgpu_vm_flush,
@@ -468,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __field(const char *,name)
+ __string(ring, sched_job->base.sched->name)
__field(uint64_t, id)
__field(struct dma_fence *, fence)
__field(uint64_t, ctx)
@@ -476,14 +481,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
),
TP_fast_assign(
- __entry->name = sched_job->base.sched->name;
+ __assign_str(ring, sched_job->base.sched->name)
__entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __entry->name, __entry->id,
+ __get_str(ring), __entry->id,
__entry->fence, __entry->ctx,
__entry->seqno)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dff41d0a85fe..2616e2eafdeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -35,10 +35,12 @@
#include <linux/hmm.h>
#include <linux/pagemap.h>
#include <linux/sched/task.h>
+#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
+#include <linux/dma-buf.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -54,6 +56,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h"
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
@@ -484,15 +487,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
- adev = amdgpu_ttm_adev(bo->bdev);
-
/* create space/pages for new_mem in GTT space */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -543,15 +543,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
- adev = amdgpu_ttm_adev(bo->bdev);
-
/* make space in GTT for old_mem buffer */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -763,6 +760,7 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
*/
struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm;
+ struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
struct task_struct *usertask;
@@ -772,6 +770,20 @@ struct amdgpu_ttm_tt {
#endif
};
+#ifdef CONFIG_DRM_AMDGPU_USERPTR
+/* flags used by HMM internal, not related to CPU/GPU PTE flags */
+static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
+ (1 << 0), /* HMM_PFN_VALID */
+ (1 << 1), /* HMM_PFN_WRITE */
+ 0 /* HMM_PFN_DEVICE_PRIVATE */
+};
+
+static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
+ 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
+ 0, /* HMM_PFN_NONE */
+ 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
+};
+
/**
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
* memory and start HMM tracking CPU page table update
@@ -779,85 +791,89 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking
*/
-#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-
-#define MAX_RETRY_HMM_RANGE_FAULT 16
-
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
- struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct mm_struct *mm = gtt->usertask->mm;
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct hmm_range *range;
+ unsigned long timeout;
+ struct mm_struct *mm;
unsigned long i;
- uint64_t *pfns;
int r = 0;
- if (!mm) /* Happens during process shutdown */
- return -ESRCH;
-
- if (unlikely(!mirror)) {
- DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
- r = -EFAULT;
- goto out;
+ mm = bo->notifier.mm;
+ if (unlikely(!mm)) {
+ DRM_DEBUG_DRIVER("BO is not registered?\n");
+ return -EFAULT;
}
- vma = find_vma(mm, start);
- if (unlikely(!vma || start < vma->vm_start)) {
- r = -EFAULT;
- goto out;
- }
- if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
- vma->vm_file)) {
- r = -EPERM;
- goto out;
- }
+ /* Another get_user_pages is running at the same time?? */
+ if (WARN_ON(gtt->range))
+ return -EFAULT;
+
+ if (!mmget_not_zero(mm)) /* Happens during process shutdown */
+ return -ESRCH;
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (unlikely(!range)) {
r = -ENOMEM;
goto out;
}
+ range->notifier = &bo->notifier;
+ range->flags = hmm_range_flags;
+ range->values = hmm_range_values;
+ range->pfn_shift = PAGE_SHIFT;
+ range->start = bo->notifier.interval_tree.start;
+ range->end = bo->notifier.interval_tree.last + 1;
+ range->default_flags = hmm_range_flags[HMM_PFN_VALID];
+ if (!amdgpu_ttm_tt_is_readonly(ttm))
+ range->default_flags |= range->flags[HMM_PFN_WRITE];
- pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
- if (unlikely(!pfns)) {
+ range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns),
+ GFP_KERNEL);
+ if (unlikely(!range->pfns)) {
r = -ENOMEM;
goto out_free_ranges;
}
- amdgpu_hmm_init_range(range);
- range->default_flags = range->flags[HMM_PFN_VALID];
- range->default_flags |= amdgpu_ttm_tt_is_readonly(ttm) ?
- 0 : range->flags[HMM_PFN_WRITE];
- range->pfn_flags_mask = 0;
- range->pfns = pfns;
- range->start = start;
- range->end = start + ttm->num_pages * PAGE_SIZE;
-
- hmm_range_register(range, mirror);
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, start);
+ if (unlikely(!vma || start < vma->vm_start)) {
+ r = -EFAULT;
+ goto out_unlock;
+ }
+ if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
+ vma->vm_file)) {
+ r = -EPERM;
+ goto out_unlock;
+ }
+ up_read(&mm->mmap_sem);
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- /*
- * Just wait for range to be valid, safe to ignore return value as we
- * will use the return value of hmm_range_fault() below under the
- * mmap_sem to ascertain the validity of the range.
- */
- hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
+retry:
+ range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
down_read(&mm->mmap_sem);
r = hmm_range_fault(range, 0);
up_read(&mm->mmap_sem);
-
- if (unlikely(r < 0))
+ if (unlikely(r <= 0)) {
+ /*
+ * FIXME: This timeout should encompass the retry from
+ * mmu_interval_read_retry() as well.
+ */
+ if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout))
+ goto retry;
goto out_free_pfns;
+ }
for (i = 0; i < ttm->num_pages; i++) {
- pages[i] = hmm_device_entry_to_page(range, pfns[i]);
+ /* FIXME: The pages cannot be touched outside the notifier_lock */
+ pages[i] = hmm_device_entry_to_page(range, range->pfns[i]);
if (unlikely(!pages[i])) {
pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
- i, pfns[i]);
+ i, range->pfns[i]);
r = -ENOMEM;
goto out_free_pfns;
@@ -865,15 +881,18 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
}
gtt->range = range;
+ mmput(mm);
return 0;
+out_unlock:
+ up_read(&mm->mmap_sem);
out_free_pfns:
- hmm_range_unregister(range);
- kvfree(pfns);
+ kvfree(range->pfns);
out_free_ranges:
kfree(range);
out:
+ mmput(mm);
return r;
}
@@ -898,15 +917,18 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
"No user pages to check\n");
if (gtt->range) {
- r = hmm_range_valid(gtt->range);
- hmm_range_unregister(gtt->range);
-
+ /*
+ * FIXME: Must always hold notifier_lock for this, and must
+ * not ignore the return code.
+ */
+ r = mmu_interval_read_retry(gtt->range->notifier,
+ gtt->range->notifier_seq);
kvfree(gtt->range->pfns);
kfree(gtt->range);
gtt->range = NULL;
}
- return r;
+ return !r;
}
#endif
@@ -987,10 +1009,18 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
- if (gtt->range &&
- ttm->pages[0] == hmm_device_entry_to_page(gtt->range,
- gtt->range->pfns[0]))
- WARN_ONCE(1, "Missing get_user_page_done\n");
+ if (gtt->range) {
+ unsigned long i;
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (ttm->pages[i] !=
+ hmm_device_entry_to_page(gtt->range,
+ gtt->range->pfns[i]))
+ break;
+ }
+
+ WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
+ }
#endif
}
@@ -1217,16 +1247,14 @@ static struct ttm_backend_func amdgpu_backend_func = {
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
- struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_ttm_adev(bo->bdev);
-
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->ttm.ttm.func = &amdgpu_backend_func;
+ gtt->gobj = &bo->base;
/* allocate space for the uninitialized page entries */
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
@@ -1247,7 +1275,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt && gtt->userptr) {
@@ -1260,7 +1287,19 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
return 0;
}
- if (slave && ttm->sg) {
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ if (!ttm->sg) {
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ attach = gtt->gobj->import_attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ttm->sg = sgt;
+ }
+
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
@@ -1287,9 +1326,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
*/
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ struct amdgpu_device *adev;
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
@@ -1298,7 +1336,16 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
}
- if (slave)
+ if (ttm->sg && gtt->gobj->import_attach) {
+ struct dma_buf_attachment *attach;
+
+ attach = gtt->gobj->import_attach;
+ dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+ ttm->sg = NULL;
+ return;
+ }
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
adev = amdgpu_ttm_adev(ttm->bdev);
@@ -1634,81 +1681,105 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
*/
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- u64 vram_size = adev->gmc.visible_vram_size;
- u64 offset = adev->fw_vram_usage.start_offset;
- u64 size = adev->fw_vram_usage.size;
- struct amdgpu_bo *bo;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = adev->fw_vram_usage.size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
+ uint64_t vram_size = adev->gmc.visible_vram_size;
+
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
- if (adev->fw_vram_usage.size > 0 &&
- adev->fw_vram_usage.size <= vram_size) {
+ if (adev->fw_vram_usage.size == 0 ||
+ adev->fw_vram_usage.size > vram_size)
+ return 0;
- r = amdgpu_bo_create(adev, &bp,
- &adev->fw_vram_usage.reserved_bo);
- if (r)
- goto error_create;
+ return amdgpu_bo_create_kernel_at(adev,
+ adev->fw_vram_usage.start_offset,
+ adev->fw_vram_usage.size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+}
- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
- if (r)
- goto error_reserve;
+/*
+ * Memoy training reservation functions
+ */
- /* remove the original mem node and create a new one at the
- * request position
- */
- bo = adev->fw_vram_usage.reserved_bo;
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
+/**
+ * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free memory training reserved vram if it has been reserved.
+ */
+static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
- &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
+ ctx->c2p_bo = NULL;
- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- adev->fw_vram_usage.start_offset,
- (adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size));
- if (r)
- goto error_pin;
- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
- if (r)
- goto error_kmap;
+ amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
+ ctx->p2c_bo = NULL;
+
+ return 0;
+}
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+/**
+ * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from memory training.
+ */
+static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+
+ memset(ctx, 0, sizeof(*ctx));
+ if (!adev->fw_vram_usage.mem_train_support) {
+ DRM_DEBUG("memory training does not support!\n");
+ return 0;
}
- return r;
-error_kmap:
- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
-error_pin:
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
-error_reserve:
- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
-error_create:
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
- return r;
+ ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
+ ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
+ ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->p2c_train_data_offset,
+ ctx->train_data_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &ctx->p2c_bo,
+ NULL);
+ if (ret) {
+ DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
+ goto Err_out;
+ }
+
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->c2p_train_data_offset,
+ ctx->train_data_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &ctx->c2p_bo,
+ NULL);
+ if (ret) {
+ DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ goto Err_out;
+ }
+
+ ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
+ return 0;
+
+Err_out:
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ return ret;
}
+
/**
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1731,6 +1802,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
+ adev->ddev->vma_offset_manager,
dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1771,6 +1843,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
+ /*
+ *The reserved vram for memory training must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_ttm_training_reserve_vram_init(adev);
+ if (r)
+ return r;
+
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS
@@ -1781,6 +1861,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
NULL, &stolen_vga_buf);
if (r)
return r;
+
+ /*
+ * reserve one TMR (64K) memory at the top of VRAM which holds
+ * IP Discovery data and is protected by PSP.
+ */
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
+ DISCOVERY_TMR_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->discovery_memory,
+ NULL);
+ if (r)
+ return r;
+
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -1856,7 +1950,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return;
amdgpu_ttm_debugfs_fini(adev);
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ /* return the IP Discovery TMR memory back to VRAM */
+ amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
+
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
adev->mman.aper_base_kaddr = NULL;
@@ -1952,10 +2050,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
- num_dw = adev->mman.buffer_funcs->copy_num_dw;
- while (num_dw & 0x7)
- num_dw++;
-
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
@@ -2015,11 +2110,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
- num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
-
- /* for IB padding */
- while (num_dw & 0x7)
- num_dw++;
+ num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3a6115ad0196..833fc4b68940 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -360,6 +360,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_RAVEN:
case CHIP_VEGA12:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -368,8 +369,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
- case CHIP_ARCTURUS:
- return AMDGPU_FW_LOAD_DIRECT;
default:
DRM_ERROR("Unknown firmware load type\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index b34f00d42049..410587b950f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -108,6 +108,12 @@ struct ta_firmware_header_v1_0 {
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_offset_bytes;
uint32_t ta_ras_size_bytes;
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_offset_bytes;
+ uint32_t ta_hdcp_size_bytes;
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_offset_bytes;
+ uint32_t ta_dtm_size_bytes;
};
/* version_major=1, version_minor=0 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
new file mode 100644
index 000000000000..d4fb9cf27e21
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras.h"
+
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "umc_err_count",
+ .debugfs_name = "umc_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ if (!adev->umc.ras_if) {
+ adev->umc.ras_if =
+ kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->umc.ras_if)
+ return -ENOMEM;
+ adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
+ adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if->sub_block_index = 0;
+ strcpy(adev->umc.ras_if->name, "umc");
+ }
+ ih_info.head = fs_info.head = *adev->umc.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->umc.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ /* ras init of specific umc version */
+ if (adev->umc.funcs && adev->umc.funcs->err_cnt_init)
+ adev->umc.funcs->err_cnt_init(adev);
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info);
+free:
+ kfree(adev->umc.ras_if);
+ adev->umc.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
+ adev->umc.ras_if) {
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return AMDGPU_RAS_SUCCESS;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_count)
+ adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ DRM_WARN("Failed to alloc memory for umc error address record!\n");
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+ }
+
+ /* only uncorrectable error needs gpu reset */
+ if (err_data->ue_count) {
+ if (err_data->err_addr_cnt &&
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ err_data->err_addr_cnt))
+ DRM_WARN("Failed to add ras bad page!\n");
+
+ amdgpu_ras_reset_gpu(adev, 0);
+ }
+
+ kfree(err_data->err_addr);
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 975afa04df09..3283032a78e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -54,7 +54,8 @@
adev->umc.funcs->disable_umc_index_mode(adev);
struct amdgpu_umc_funcs {
- void (*ras_init)(struct amdgpu_device *adev);
+ void (*err_cnt_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
@@ -62,6 +63,7 @@ struct amdgpu_umc_funcs {
void (*enable_umc_index_mode)(struct amdgpu_device *adev,
uint32_t umc_instance);
void (*disable_umc_index_mode)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
};
struct amdgpu_umc {
@@ -75,8 +77,17 @@ struct amdgpu_umc {
uint32_t channel_offs;
/* channel index table of interleaved memory */
const uint32_t *channel_idx_tbl;
+ struct ras_common_if *ras_if;
const struct amdgpu_umc_funcs *funcs;
};
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b2c364b8695f..e324bfe6c58f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -39,6 +39,8 @@
#include "cikd.h"
#include "uvd/uvd_4_2_d.h"
+#include "amdgpu_ras.h"
+
/* 1 second timeout */
#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -297,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
@@ -372,7 +375,13 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
- memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+ /* re-write 0 since err_event_athub will corrupt VCPU buffer */
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
+ memset(adev->uvd.inst[j].saved_bo, 0, size);
+ } else {
+ memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 65044b1b3d4c..46b590af2fd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence);
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence);
/**
* amdgpu_vce_init - allocate memory, load vce firmware
@@ -211,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
@@ -428,9 +434,9 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
*
* Open up a stream for HW test
*/
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
@@ -508,8 +514,8 @@ err:
*
* Close up a stream for HW test or if userspace failed to do so
*/
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence)
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index e802f7d9db0a..d6d83a3ec803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -58,11 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
- struct dma_fence **fence);
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 3199e4a5ff12..9d870444d7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
&adev->vcn.dpg_sram_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5251352f5922..598c24505c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -130,7 +130,8 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == adev->vm_manager.root_level)
/* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
+ >> shift;
else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
@@ -341,7 +342,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
}
-/**
+/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
*/
struct amdgpu_vm_pt_cursor {
@@ -482,6 +483,7 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
*
* @adev: amdgpu_device structure
* @vm: amdgpu_vm structure
+ * @start: optional cursor to start with
* @cursor: state to initialize
*
* Starts a deep first traversal of the PD/PT tree.
@@ -535,7 +537,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
amdgpu_vm_pt_ancestor(cursor);
}
-/**
+/*
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
*/
#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
@@ -566,6 +568,14 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
list_add(&entry->tv.head, validated);
}
+/**
+ * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
+ *
+ * @bo: BO which was removed from the LRU
+ *
+ * Make sure the bulk_moveable flag is updated when a BO is removed from the
+ * LRU.
+ */
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_bo *abo;
@@ -600,19 +610,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -624,7 +633,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&vm->lru_bulk_move);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
vm->bulk_moveable = true;
}
@@ -693,6 +702,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
+ * @direct: use a direct update
*
* Root PD needs to be reserved when calling this.
*
@@ -701,7 +711,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_bo *bo)
+ struct amdgpu_bo *bo,
+ bool direct)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
@@ -760,6 +771,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
if (r)
@@ -813,10 +825,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requesting vm
+ * @level: the page table level
+ * @direct: use a direct update
* @bp: resulting BO allocation parameters
*/
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, struct amdgpu_bo_param *bp)
+ int level, bool direct,
+ struct amdgpu_bo_param *bp)
{
memset(bp, 0, sizeof(*bp));
@@ -831,6 +846,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
+ bp->no_wait_gpu = direct;
if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.base.resv;
}
@@ -841,6 +857,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @cursor: Which page table to allocate
+ * @direct: use a direct update
*
* Make sure a specific page table or directory is allocated.
*
@@ -850,7 +867,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *cursor)
+ struct amdgpu_vm_pt_cursor *cursor,
+ bool direct)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp;
@@ -871,7 +889,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo)
return 0;
- amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
+ amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
@@ -883,7 +901,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt);
+ r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
if (r)
goto error_free_pt;
@@ -1020,7 +1038,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
* Returns:
* 0 on success, errno otherwise.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -1034,10 +1053,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush;
- bool pasid_mapping_needed = id->pasid != job->pasid ||
- !id->pasid_mapping ||
- !dma_fence_is_signaled(id->pasid_mapping);
struct dma_fence *fence = NULL;
+ bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
int r;
@@ -1047,6 +1064,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
pasid_mapping_needed = true;
}
+ mutex_lock(&id_mgr->lock);
+ if (id->pasid != job->pasid || !id->pasid_mapping ||
+ !dma_fence_is_signaled(id->pasid_mapping))
+ pasid_mapping_needed = true;
+ mutex_unlock(&id_mgr->lock);
+
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
@@ -1086,9 +1109,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
}
if (pasid_mapping_needed) {
+ mutex_lock(&id_mgr->lock);
id->pasid = job->pasid;
dma_fence_put(id->pasid_mapping);
id->pasid_mapping = dma_fence_get(fence);
+ mutex_unlock(&id_mgr->lock);
}
dma_fence_put(fence);
@@ -1172,10 +1197,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-/*
+/**
* amdgpu_vm_update_pde - update a single level in the hierarchy
*
- * @param: parameters for the update
+ * @params: parameters for the update
* @vm: requested vm
* @entry: entry to update
*
@@ -1199,7 +1224,7 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
}
-/*
+/**
* amdgpu_vm_invalidate_pds - mark all PDs as invalid
*
* @adev: amdgpu_device pointer
@@ -1218,19 +1243,20 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
amdgpu_vm_bo_relocated(&entry->base);
}
-/*
- * amdgpu_vm_update_directories - make sure that all directories are valid
+/**
+ * amdgpu_vm_update_pdes - make sure that all directories are valid
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @direct: submit directly to the paging queue
*
* Makes sure all directories are up to date.
*
* Returns:
* 0 for success, error for failure.
*/
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct)
{
struct amdgpu_vm_update_params params;
int r;
@@ -1241,6 +1267,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
if (r)
@@ -1268,7 +1295,7 @@ error:
return r;
}
-/**
+/*
* amdgpu_vm_update_flags - figure out flags for PTE updates
*
* Make sure to set the right flags for the PTEs at the desired level.
@@ -1391,7 +1418,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
+ /* make sure that the page tables covering the address range are
+ * actually allocated
+ */
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
+ params->direct);
if (r)
return r;
@@ -1463,7 +1494,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
} while (frag_start < entry_end);
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
- /* Free all child entries */
+ /* Free all child entries.
+ * Update the tables with the flags and addresses and free up subsequent
+ * tables in the case of huge pages or freed up areas.
+ * This is the maximum you can free, because all other page tables are not
+ * completely covered by the range and so potentially still in use.
+ */
while (cursor.pfn < frag_start) {
amdgpu_vm_free_pts(adev, params->vm, &cursor);
amdgpu_vm_pt_next(adev, &cursor);
@@ -1482,13 +1518,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
- * @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
+ * @direct: direct submission in a page fault
+ * @exclusive: fence we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
* @addr: addr to set the area to
+ * @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
@@ -1497,11 +1534,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct,
struct dma_fence *exclusive,
- dma_addr_t *pages_addr,
- struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
+ dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
@@ -1511,6 +1548,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */
@@ -1569,27 +1607,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
flags &= ~AMDGPU_PTE_WRITEABLE;
- flags &= ~AMDGPU_PTE_EXECUTABLE;
- flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
-
- if (adev->asic_type >= CHIP_NAVI10) {
- flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
- } else {
- flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
- }
-
- if ((mapping->flags & AMDGPU_PTE_PRT) &&
- (adev->asic_type >= CHIP_VEGA10)) {
- flags |= AMDGPU_PTE_PRT;
- if (adev->asic_type >= CHIP_NAVI10) {
- flags |= AMDGPU_PTE_SNOOPED;
- flags |= AMDGPU_PTE_LOG;
- flags |= AMDGPU_PTE_SYSTEM;
- }
- flags &= ~AMDGPU_PTE_VALID;
- }
+ /* Apply ASIC specific mapping flags */
+ amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1633,7 +1652,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ max_entries = count *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1642,9 +1662,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
start, last, flags, addr,
- fence);
+ dma_addr, fence);
if (r)
return r;
@@ -1672,8 +1692,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* Returns:
* 0 for success, -EINVAL for failure.
*/
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
+int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1700,7 +1719,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = dma_resv_get_excl(bo->tbo.base.resv);
+ exclusive = bo->tbo.moving;
}
if (bo) {
@@ -1731,12 +1750,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
return r;
}
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_asic_flush_hdp(adev, NULL);
- }
-
/* If the BO is not in its preferred location add it back to
* the evicted list so that it gets validated again on the
* next command submission.
@@ -1744,7 +1757,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.mem.mem_type;
- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+ if (!(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(mem_type)))
amdgpu_vm_bo_evicted(&bo_va->base);
else
amdgpu_vm_bo_idle(&bo_va->base);
@@ -1938,9 +1952,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
mapping->start, mapping->last,
- init_pte_value, 0, &f);
+ init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -2682,12 +2696,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed);
- /* create scheduler entity for page table updates */
- r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
+ /* create scheduler entities for page table updates */
+ r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
adev->vm_manager.vm_pte_num_rqs, NULL);
if (r)
return r;
+ r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
+ adev->vm_manager.vm_pte_num_rqs, NULL);
+ if (r)
+ goto error_free_direct;
+
vm->pte_support_ats = false;
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
@@ -2702,7 +2721,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
@@ -2711,12 +2731,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
- amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
+ amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
r = amdgpu_bo_create(adev, &bp, &root);
if (r)
- goto error_free_sched_entity;
+ goto error_free_delayed;
r = amdgpu_bo_reserve(root, true);
if (r)
@@ -2728,7 +2748,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
- r = amdgpu_vm_clear_bo(adev, vm, root);
+ r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r)
goto error_unreserve;
@@ -2759,8 +2779,11 @@ error_free_root:
amdgpu_bo_unref(&vm->root.base.bo);
vm->root.base.bo = NULL;
-error_free_sched_entity:
- drm_sched_entity_destroy(&vm->entity);
+error_free_delayed:
+ drm_sched_entity_destroy(&vm->delayed);
+
+error_free_direct:
+ drm_sched_entity_destroy(&vm->direct);
return r;
}
@@ -2801,6 +2824,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @pasid: pasid to use
*
* This only works on GFX VMs that don't have any BOs added and no
* page tables allocated yet.
@@ -2816,7 +2840,8 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
* Returns:
* 0 for success, -errno for errors.
*/
-int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned int pasid)
{
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
int r;
@@ -2848,7 +2873,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
*/
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
if (r)
goto free_idr;
}
@@ -2858,7 +2883,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
@@ -2937,19 +2963,38 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
- int i, r;
+ int i;
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ amdgpu_bo_reserve(root, true);
if (vm->pasid) {
unsigned long flags;
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ vm->pasid = 0;
+ }
+
+ list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
+ if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
+ amdgpu_vm_prt_fini(adev, vm);
+ prt_fini_needed = false;
+ }
+
+ list_del(&mapping->list);
+ amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- drm_sched_entity_destroy(&vm->entity);
+ amdgpu_vm_free_pts(adev, vm, NULL);
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+ WARN_ON(vm->root.base.bo);
+
+ drm_sched_entity_destroy(&vm->direct);
+ drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2962,26 +3007,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
list_del(&mapping->list);
kfree(mapping);
}
- list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
- amdgpu_vm_prt_fini(adev, vm);
- prt_fini_needed = false;
- }
-
- list_del(&mapping->list);
- amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
- }
- root = amdgpu_bo_ref(vm->root.base.bo);
- r = amdgpu_bo_reserve(root, true);
- if (r) {
- dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
- } else {
- amdgpu_vm_free_pts(adev, vm, NULL);
- amdgpu_bo_unreserve(root);
- }
- amdgpu_bo_unref(&root);
- WARN_ON(vm->root.base.bo);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vmid_free_reserved(adev, vm, i);
@@ -3065,8 +3091,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
- /* current, we only have requirement to reserve vmid from gfxhub */
- r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
+ /* We only have requirement to reserve vmid from gfxhub */
+ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
+ AMDGPU_GFXHUB_0);
if (r)
return r;
break;
@@ -3109,13 +3136,88 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
*/
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
{
- if (!vm->task_info.pid) {
- vm->task_info.pid = current->pid;
- get_task_comm(vm->task_info.task_name, current);
+ if (vm->task_info.pid)
+ return;
- if (current->group_leader->mm == current->mm) {
- vm->task_info.tgid = current->group_leader->pid;
- get_task_comm(vm->task_info.process_name, current->group_leader);
- }
+ vm->task_info.pid = current->pid;
+ get_task_comm(vm->task_info.task_name, current);
+
+ if (current->group_leader->mm != current->mm)
+ return;
+
+ vm->task_info.tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info.process_name, current->group_leader);
+}
+
+/**
+ * amdgpu_vm_handle_fault - graceful handling of VM faults.
+ * @adev: amdgpu device pointer
+ * @pasid: PASID of the VM
+ * @addr: Address of the fault
+ *
+ * Try to gracefully handle a VM fault. Return true if the fault was handled and
+ * shouldn't be reported any more.
+ */
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr)
+{
+ struct amdgpu_bo *root;
+ uint64_t value, flags;
+ struct amdgpu_vm *vm;
+ long r;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ else
+ root = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+
+ if (!root)
+ return false;
+
+ r = amdgpu_bo_reserve(root, true);
+ if (r)
+ goto error_unref;
+
+ /* Double check that the VM still exists */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm && vm->root.base.bo != root)
+ vm = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (!vm)
+ goto error_unlock;
+
+ addr /= AMDGPU_GPU_PAGE_SIZE;
+ flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
+ AMDGPU_PTE_SYSTEM;
+
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
+ /* Redirect the access to the dummy page */
+ value = adev->dummy_page_addr;
+ flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
+ AMDGPU_PTE_WRITEABLE;
+ } else {
+ /* Let the hw retry silently on the PTE */
+ value = 0;
}
+
+ r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
+ flags, value, NULL, NULL);
+ if (r)
+ goto error_unlock;
+
+ r = amdgpu_vm_update_pdes(adev, vm, true);
+
+error_unlock:
+ amdgpu_bo_unreserve(root);
+ if (r < 0)
+ DRM_ERROR("Can't handle page fault (%ld)\n", r);
+
+error_unref:
+ amdgpu_bo_unref(&root);
+
+ return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2eda3a8c330d..4dbbe1b6b413 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -99,6 +99,9 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+/* Reserve 4MB VRAM for page tables */
+#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20)
+
/* max number of VMHUB */
#define AMDGPU_MAX_VMHUBS 3
#define AMDGPU_GFXHUB_0 0
@@ -199,6 +202,11 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm;
/**
+ * @direct: if changes should be made directly
+ */
+ bool direct;
+
+ /**
* @pages_addr:
*
* DMA addresses to use for mapping
@@ -254,8 +262,9 @@ struct amdgpu_vm {
struct amdgpu_vm_pt root;
struct dma_fence *last_update;
- /* Scheduler entity for page table updates */
- struct drm_sched_entity entity;
+ /* Scheduler entities for page table updates */
+ struct drm_sched_entity direct;
+ struct drm_sched_entity delayed;
unsigned int pasid;
/* dedicated to vm */
@@ -357,8 +366,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
@@ -404,6 +413,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info);
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 5222d165abfc..73fec7a0ced5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -49,13 +49,6 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
{
int r;
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
- if (unlikely(r))
- return r;
-
/* Wait for any BO move to be completed */
if (exclusive) {
r = dma_fence_wait(exclusive, true);
@@ -63,7 +56,14 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
return r;
}
- return 0;
+ /* Don't wait for submissions during page fault */
+ if (p->direct)
+ return 0;
+
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
+ * as the root PD BO
+ */
+ return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
}
/**
@@ -89,7 +89,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
pe += (unsigned long)amdgpu_bo_kptr(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
for (i = 0; i < count; i++) {
value = p->pages_addr ?
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 61fc584cbb1a..832db59f441e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -68,17 +68,19 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (r)
return r;
+ p->num_dw_left = ndw;
+
+ /* Wait for moves to be completed */
r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
if (r)
return r;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
- owner, false);
- if (r)
- return r;
+ /* Don't wait for any submissions during page fault handling */
+ if (p->direct)
+ return 0;
- p->num_dw_left = ndw;
- return 0;
+ return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
+ owner, false);
}
/**
@@ -95,22 +97,23 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
{
struct amdgpu_bo *root = p->vm->root.base.bo;
struct amdgpu_ib *ib = p->job->ibs;
+ struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
struct dma_fence *f;
int r;
- ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+ entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+ ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
- r = amdgpu_job_submit(p->job, &p->vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+ r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error;
amdgpu_bo_fence(root, f, true);
- if (fence)
+ if (fence && !p->direct)
swap(*fence, f);
dma_fence_put(f);
return 0;
@@ -120,7 +123,6 @@ error:
return r;
}
-
/**
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
*
@@ -141,7 +143,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_copy_ptes(pe, src, count);
+ trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
@@ -168,7 +170,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 3a9d8c15fe9f..82a3299e53c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -23,6 +23,9 @@
*/
#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_atomfirmware.h"
+#include "atom.h"
struct amdgpu_vram_mgr {
struct drm_mm mm;
@@ -101,6 +104,39 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
}
+static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ switch (adev->gmc.vram_vendor) {
+ case SAMSUNG:
+ return snprintf(buf, PAGE_SIZE, "samsung\n");
+ case INFINEON:
+ return snprintf(buf, PAGE_SIZE, "infineon\n");
+ case ELPIDA:
+ return snprintf(buf, PAGE_SIZE, "elpida\n");
+ case ETRON:
+ return snprintf(buf, PAGE_SIZE, "etron\n");
+ case NANYA:
+ return snprintf(buf, PAGE_SIZE, "nanya\n");
+ case HYNIX:
+ return snprintf(buf, PAGE_SIZE, "hynix\n");
+ case MOSEL:
+ return snprintf(buf, PAGE_SIZE, "mosel\n");
+ case WINBOND:
+ return snprintf(buf, PAGE_SIZE, "winbond\n");
+ case ESMT:
+ return snprintf(buf, PAGE_SIZE, "esmt\n");
+ case MICRON:
+ return snprintf(buf, PAGE_SIZE, "micron\n");
+ default:
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+ }
+}
+
static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
amdgpu_mem_info_vram_total_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
@@ -109,6 +145,8 @@ static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
amdgpu_mem_info_vram_used_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
amdgpu_mem_info_vis_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
+ amdgpu_mem_info_vram_vendor, NULL);
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
@@ -154,6 +192,11 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
return ret;
}
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
+ return ret;
+ }
return 0;
}
@@ -180,6 +223,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
return 0;
}
@@ -275,7 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
- uint64_t vis_usage = 0, mem_bytes;
+ uint64_t vis_usage = 0, mem_bytes, max_bytes;
unsigned i;
int r;
@@ -283,9 +327,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!lpfn)
lpfn = man->size;
+ max_bytes = adev->gmc.mc_vram_size;
+ if (tbo->type != ttm_bo_type_kernel)
+ max_bytes -= AMDGPU_VM_RESERVED_VRAM;
+
/* bail out quickly if there's likely not enough VRAM for this BO */
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
- if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) {
+ if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
atomic64_sub(mem_bytes, &mgr->usage);
mem->mm_node = NULL;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 65aae75f80fd..61d13d8b7b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
+#include "amdgpu_ras.h"
#include "df/df_3_6_offset.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -273,22 +274,55 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+ struct amdgpu_device *tmp_adev;
+ bool update_hive_pstate = true;
+ bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
if (!hive)
return 0;
- if (hive->pstate == pstate)
- return 0;
+ mutex_lock(&hive->hive_lock);
+
+ if (hive->pstate == pstate) {
+ adev->pstate = is_high_pstate ? pstate : adev->pstate;
+ goto out;
+ }
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
if (is_support_sw_smu_xgmi(adev))
ret = smu_set_xgmi_pstate(&adev->smu, pstate);
- if (ret)
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_xgmi_pstate)
+ ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
+ pstate);
+
+ if (ret) {
dev_err(adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret);
+ goto out;
+ }
+
+ /* Update device pstate */
+ adev->pstate = pstate;
+
+ /*
+ * Update the hive pstate only all devices of the hive
+ * are in the same pstate
+ */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ if (tmp_adev->pstate != adev->pstate) {
+ update_hive_pstate = false;
+ break;
+ }
+ }
+ if (update_hive_pstate || is_high_pstate)
+ hive->pstate = pstate;
+
+out:
+ mutex_unlock(&hive->hive_lock);
return ret;
}
@@ -363,6 +397,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit;
}
+ /* Set default device pstate */
+ adev->pstate = -1;
+
top_info = &adev->psp.xgmi_context.top_info;
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
@@ -437,3 +474,52 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
mutex_unlock(&hive->hive_lock);
}
}
+
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "xgmi_wafl_err_count",
+ .debugfs_name = "xgmi_wafl_err_inject",
+ };
+
+ if (!adev->gmc.xgmi.supported ||
+ adev->gmc.xgmi.num_physical_nodes == 0)
+ return 0;
+
+ if (!adev->gmc.xgmi.ras_if) {
+ adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gmc.xgmi.ras_if)
+ return -ENOMEM;
+ adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
+ adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gmc.xgmi.ras_if->sub_block_index = 0;
+ strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
+ }
+ ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
+ kfree(adev->gmc.xgmi.ras_if);
+ adev->gmc.xgmi.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
+ adev->gmc.xgmi.ras_if) {
+ struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index fbcee31788c4..bbf504ff7051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -42,6 +42,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
index 4853899b1824..fda99c958c3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "arct_ip_offset.h"
int arct_reg_base_init(struct amdgpu_device *adev)
@@ -52,6 +51,8 @@ int arct_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i]));
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index b81bb414fcb3..7a43993544c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -966,6 +966,25 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS},
+ {mmGRBM_STATUS2},
+ {mmGRBM_STATUS_SE0},
+ {mmGRBM_STATUS_SE1},
+ {mmGRBM_STATUS_SE2},
+ {mmGRBM_STATUS_SE3},
+ {mmSRBM_STATUS},
+ {mmSRBM_STATUS2},
+ {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
+ {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
+ {mmCP_STAT},
+ {mmCP_STALLED_STAT1},
+ {mmCP_STALLED_STAT2},
+ {mmCP_STALLED_STAT3},
+ {mmCP_CPF_BUSY_STAT},
+ {mmCP_CPF_STALLED_STAT1},
+ {mmCP_CPF_STATUS},
+ {mmCP_CPC_BUSY_STAT},
+ {mmCP_CPC_STALLED_STAT1},
+ {mmCP_CPC_STATUS},
{mmGB_ADDR_CONFIG},
{mmMC_ARB_RAMCFG},
{mmGB_TILE_MODE0},
@@ -1270,15 +1289,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
}
/**
- * cik_asic_reset - soft reset GPU
+ * cik_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int cik_asic_reset(struct amdgpu_device *adev)
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -1294,7 +1313,45 @@ static int cik_asic_reset(struct amdgpu_device *adev)
static enum amd_reset_method
cik_asic_reset_method(struct amdgpu_device *adev)
{
- return AMD_RESET_METHOD_LEGACY;
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ /* disable baco reset until it works */
+ /* smu7_asic_get_baco_capability(adev, &baco_reset); */
+ baco_reset = false;
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * cik_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int cik_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ r = smu7_asic_baco_reset(adev);
+ else
+ r = cik_asic_pci_config_reset(adev);
+
+ return r;
}
static u32 cik_get_config_memsize(struct amdgpu_device *adev)
@@ -1384,7 +1441,6 @@ static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1419,12 +1475,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1434,14 +1485,17 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
@@ -1465,15 +1519,23 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1486,26 +1548,45 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1520,15 +1601,16 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 54c625a2e570..9870bf27870e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -31,4 +31,7 @@ void cik_srbm_select(struct amdgpu_device *adev,
int cik_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
+int smu7_asic_baco_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 645550e7caf5..40d2ac723dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -330,9 +330,11 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -368,6 +370,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
amdgpu_irq_get(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -382,9 +385,11 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -397,6 +402,7 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1219,10 +1225,12 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1230,12 +1238,14 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1261,10 +1271,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1273,12 +1285,14 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1313,10 +1327,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1339,12 +1355,14 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1352,10 +1370,10 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d9f470632b2c..898ef72d423c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -348,9 +348,11 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -385,6 +387,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -399,9 +402,11 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -413,6 +418,7 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1245,10 +1251,12 @@ static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1256,12 +1264,14 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1287,10 +1297,12 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1299,12 +1311,14 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1339,10 +1353,12 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1365,12 +1381,14 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1378,10 +1396,10 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 3eb2e7429269..db15a112becc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -281,9 +281,11 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -309,7 +311,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
-
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -324,9 +326,11 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -338,6 +342,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1124,20 +1129,24 @@ static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int interlace = 0;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1164,21 +1173,25 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u8 *sadb = NULL;
int sad_count;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1221,10 +1234,12 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1244,12 +1259,14 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1257,10 +1274,10 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 tmp = 0;
@@ -1632,6 +1649,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
int bpc = 8;
@@ -1639,12 +1657,14 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
if (!dig || !dig->afmt)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a16c5e9e610e..f06c9022c1fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -275,9 +275,11 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -303,6 +305,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -317,9 +320,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -331,6 +336,7 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1157,10 +1163,12 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp = 0, offset;
@@ -1169,12 +1177,14 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1214,10 +1224,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 offset, tmp;
u8 *sadb = NULL;
@@ -1228,12 +1240,14 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1263,11 +1277,13 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1292,12 +1308,14 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1305,10 +1323,10 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c9608ae8643b..e4f94863332c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -260,15 +260,14 @@ static struct drm_encoder *
dce_virtual_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
return encoder;
}
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index 844c03868248..d6221298b477 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -33,6 +33,10 @@ static void df_v1_7_sw_init(struct amdgpu_device *adev)
{
}
+static void df_v1_7_sw_fini(struct amdgpu_device *adev)
+{
+}
+
static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
@@ -111,6 +115,7 @@ static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
const struct amdgpu_df_funcs df_v1_7_funcs = {
.sw_init = df_v1_7_sw_init,
+ .sw_fini = df_v1_7_sw_fini,
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
.get_fb_channel_number = df_v1_7_get_fb_channel_number,
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 5850c8e34caa..16fbd2bc8ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
unsigned long flags, address, data;
uint32_t ficadl_val, ficadh_val;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
@@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
@@ -220,6 +220,13 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
adev->df_perfmon_config_assign_mask[i] = 0;
}
+static void df_v3_6_sw_fini(struct amdgpu_device *adev)
+{
+
+ device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
+
+}
+
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
@@ -537,6 +544,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
+ .sw_fini = df_v3_6_sw_fini,
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 53090eae0082..ca5f0e7ea1ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -127,7 +127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
};
static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
@@ -171,7 +171,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
};
static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
@@ -1469,7 +1469,7 @@ static int gfx_v10_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v10_0_pfp_fini(adev);
@@ -1785,27 +1785,52 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
-static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
+static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
{
+ int r;
+
+ if (adev->in_gpu_reset) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (!r) {
+ adev->gfx.rlc.funcs->get_csb_buffer(adev,
+ adev->gfx.rlc.cs_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ }
+
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ if (r)
+ return r;
+ }
+
/* csib */
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+
+ return 0;
}
-static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
+static int gfx_v10_0_init_pg(struct amdgpu_device *adev)
{
int i;
+ int r;
- gfx_v10_0_init_csb(adev);
+ r = gfx_v10_0_init_csb(adev);
+ if (r)
+ return r;
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
/* TODO: init power gating */
- return;
+ return 0;
}
void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
@@ -1907,7 +1932,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
if (r)
return r;
- gfx_v10_0_init_pg(adev);
+
+ r = gfx_v10_0_init_pg(adev);
+ if (r)
+ return r;
/* enable RLC SRM */
gfx_v10_0_rlc_enable_srm(adev);
@@ -1933,7 +1961,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- gfx_v10_0_init_pg(adev);
+ r = gfx_v10_0_init_pg(adev);
+ if (r)
+ return r;
+
adev->gfx.rlc.funcs->start(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
@@ -2400,7 +2431,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
int i;
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
@@ -2413,7 +2444,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
- udelay(50);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
+
+ return 0;
}
static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
@@ -2470,7 +2511,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2540,7 +2581,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -2609,7 +2650,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2930,7 +2971,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -3114,6 +3155,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
+ int mqd_idx = ring - &adev->gfx.gfx_ring[0];
if (!adev->in_gpu_reset && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
@@ -3125,14 +3167,15 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
#endif
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else if (adev->in_gpu_reset) {
/* reset mqd with the backup copy */
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
+ adev->wb.wb[ring->wptr_offs] = 0;
amdgpu_ring_clear_ring(ring);
#ifdef BRING_UP_DEBUG
mutex_lock(&adev->srbm_mutex);
@@ -4384,7 +4427,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -4404,8 +4447,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -5331,15 +5374,12 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
{
- /* init asic gds info */
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- default:
- adev->gds.gds_size = 0x10000;
- adev->gds.gds_compute_max_wave_id = 0x4ff;
- break;
- }
+ unsigned total_cu = adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines;
+ adev->gds.gds_size = 0x10000;
+ adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
adev->gds.gws_size = 64;
adev->gds.oa_size = 16;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 87dd55e9d72b..ffbde9136372 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2103,7 +2103,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v8_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 97cf0b536873..faf2ffce5837 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -131,6 +131,18 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
+struct ras_gfx_subblock_reg {
+ const char *name;
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ uint32_t sec_count_mask;
+ uint32_t sec_count_shift;
+ uint32_t ded_count_mask;
+ uint32_t ded_count_shift;
+};
+
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -517,9 +529,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -582,9 +594,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
@@ -676,9 +688,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
@@ -691,6 +703,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
};
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
@@ -1342,7 +1356,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
/* TODO: Determine if MEC2 JT FW loading can be removed
for all GFX V9 asic and above */
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->asic_type != CHIP_ARCTURUS &&
+ adev->asic_type != CHIP_RENOIR) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
info->fw = adev->gfx.mec2_fw;
@@ -1974,190 +1989,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
- struct amdgpu_ngg_buf *ngg_buf,
- int size_se,
- int default_size_se)
-{
- int r;
-
- if (size_se < 0) {
- dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
- return -EINVAL;
- }
- size_se = size_se ? size_se : default_size_se;
-
- ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
- r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &ngg_buf->bo,
- &ngg_buf->gpu_addr,
- NULL);
- if (r) {
- dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
- return r;
- }
- ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
-
- return r;
-}
-
-static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < NGG_BUF_MAX; i++)
- amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
- &adev->gfx.ngg.buf[i].gpu_addr,
- NULL);
-
- memset(&adev->gfx.ngg.buf[0], 0,
- sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
-
- adev->gfx.ngg.init = false;
-
- return 0;
-}
-
-static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
-{
- int r;
-
- if (!amdgpu_ngg || adev->gfx.ngg.init == true)
- return 0;
-
- /* GDS reserve memory: 64 bytes alignment */
- adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
- adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size;
- adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
- adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
-
- /* Primitive Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
- amdgpu_prim_buf_per_se,
- 64 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Primitive Buffer\n");
- goto err;
- }
-
- /* Position Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
- amdgpu_pos_buf_per_se,
- 256 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Position Buffer\n");
- goto err;
- }
-
- /* Control Sideband */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
- amdgpu_cntl_sb_buf_per_se,
- 256);
- if (r) {
- dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
- goto err;
- }
-
- /* Parameter Cache, not created by default */
- if (amdgpu_param_buf_per_se <= 0)
- goto out;
-
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
- amdgpu_param_buf_per_se,
- 512 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Parameter Cache\n");
- goto err;
- }
-
-out:
- adev->gfx.ngg.init = true;
- return 0;
-err:
- gfx_v9_0_ngg_fini(adev);
- return r;
-}
-
-static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
- int r;
- u32 data, base;
-
- if (!amdgpu_ngg)
- return 0;
-
- /* Program buffer size */
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_POS].size >> 8);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
-
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
-
- /* Program buffer base address */
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
-
- /* Clear GDS reserved memory */
- r = amdgpu_ring_alloc(ring, 17);
- if (r) {
- DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
- ring->name, r);
- return r;
- }
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- (adev->gds.gds_size +
- adev->gfx.ngg.gds_reserve_size));
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
- amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
- PACKET3_DMA_DATA_DST_SEL(1) |
- PACKET3_DMA_DATA_SRC_SEL(2)));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
- adev->gfx.ngg.gds_reserve_size);
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
-
- amdgpu_ring_commit(ring);
-
- return 0;
-}
-
static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
@@ -2325,10 +2156,6 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_ngg_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -2338,19 +2165,7 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
- adev->gfx.ras_if) {
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ amdgpu_gfx_ras_fini(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2358,11 +2173,10 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v9_0_mec_fini(adev);
- gfx_v9_0_ngg_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
@@ -2930,7 +2744,10 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
* And it's needed by gfxoff feature.
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
- gfx_v9_1_init_rlc_save_restore_list(adev);
+ if (adev->asic_type == CHIP_VEGA12 ||
+ (adev->asic_type == CHIP_RAVEN &&
+ adev->rev_id >= 8))
+ gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
}
@@ -3901,12 +3718,6 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
- r = gfx_v9_0_ngg_en(adev);
- if (r)
- return r;
- }
-
return r;
}
@@ -3948,8 +3759,10 @@ static int gfx_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
- /* disable KCQ to avoid CPC touch memory not valid anymore */
- gfx_v9_0_kcq_disable(adev);
+ /* DF freeze and kcq disable will fail */
+ if (!amdgpu_ras_intr_triggered())
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ gfx_v9_0_kcq_disable(adev);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -4085,9 +3898,22 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
+ uint32_t tmp, lsb, msb, i = 0;
+ do {
+ if (i != 0)
+ udelay(1);
+ tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
+ lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
+ msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
+ i++;
+ } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
+ clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+ } else {
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ }
mutex_unlock(&adev->gfx.gpu_clock_mutex);
return clock;
}
@@ -4202,6 +4028,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
@@ -4221,6 +4048,10 @@ static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
int i, r;
+ /* only support when RAS is enabled */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 7);
if (r) {
DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
@@ -4411,33 +4242,14 @@ static int gfx_v9_0_early_init(void *handle)
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry);
-
static int gfx_v9_0_ecc_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .cb = gfx_v9_0_process_ras_data_cb,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__GFX,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "gfx",
- };
int r;
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
+ r = amdgpu_gfx_ras_late_init(adev);
+ if (r)
+ return r;
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
@@ -4448,72 +4260,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
- if (r)
- goto irq;
-
return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
}
static int gfx_v9_0_late_init(void *handle)
@@ -4578,16 +4325,14 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
{
amdgpu_gfx_rlc_enter_safe_mode(adev);
- if (is_support_sw_smu(adev) && !enable)
- smu_set_gfx_cgpg(&adev->smu, enable);
-
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
} else {
gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
- gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+ gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
amdgpu_gfx_rlc_exit_safe_mode(adev);
@@ -4856,8 +4601,6 @@ static int gfx_v9_0_set_powergating_state(void *handle,
gfx_v9_0_enable_cp_power_gating(adev, false);
/* update gfx cgpg state */
- if (is_support_sw_smu(adev) && enable)
- smu_set_gfx_cgpg(&adev->smu, enable);
gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
/* update mgcg state */
@@ -4988,7 +4731,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -5008,8 +4751,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -5741,313 +5484,446 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry)
-{
- /* TODO ue will trigger an interrupt. */
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->gfx.funcs->query_ras_error_count)
- adev->gfx.funcs->query_ras_error_count(adev, err_data);
- amdgpu_ras_reset_gpu(adev, 0);
- return AMDGPU_RAS_SUCCESS;
-}
-static const struct {
- const char *name;
- uint32_t ip;
- uint32_t inst;
- uint32_t seg;
- uint32_t reg_offset;
- uint32_t per_se_instance;
- int32_t num_instance;
- uint32_t sec_count_mask;
- uint32_t ded_count_mask;
-} gfx_ras_edc_regs[] = {
- { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1,
- REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
- { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1,
- REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) },
- { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 },
- { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 },
- { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) },
- { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 },
- { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) },
- { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) },
- { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 },
- { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 },
- { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 },
- { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) },
- { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 },
+static const struct ras_gfx_subblock_reg ras_subblock_regs[] = {
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
+ },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
+ },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
+ 0, 0
+ },
+ { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
+ 0, 0
+ },
+ { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
+ },
+ { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
+ },
+ { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
+ 0, 0
+ },
{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
- 0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
+ },
{ "GDS_OA_PHY_PHY_CMD_RAM_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
+ },
{ "GDS_OA_PHY_PHY_DATA_RAM_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
+ 0, 0
+ },
{ "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
- { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1,
- REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 },
- { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
- { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 },
- { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 },
- { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 },
- { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 },
- { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
- REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 },
- { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
- REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 },
- { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
- { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
- { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
- { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
- { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
- { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 },
- { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 },
- { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 },
- { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 },
- { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 },
- { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 },
- { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 },
- { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 },
- { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
+ },
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
+ },
+ { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
+ },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
+ },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
+ },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
- 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
- 0 },
- { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 },
+ SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
+ 0, 0
+ },
{ "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
- 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
- 0 },
- { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 },
- { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72,
- REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 },
- { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
- { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
- { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 },
- { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 },
- { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 },
- { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
- { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
- { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
- { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
- { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 },
- { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) },
- { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) },
- { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) },
- { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) },
- { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) },
- { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) },
- { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
+ },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
+ },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ 0, 0
+ },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
+ },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
+ },
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
+ },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
+ },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
+ },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
+ },
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
+ },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
+ },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
+ },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
+ },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
+ },
{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
+ },
{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
+ },
{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
- { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
- { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
- { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
- { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
- { "SQC_INST_BANKA_UTCL1_MISS_FIFO",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
- 0 },
- { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_DIRTY_BIT_RAM",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 },
- { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
- { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
- { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
- { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
- { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
- { "SQC_INST_BANKB_UTCL1_MISS_FIFO",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
- 0 },
- { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_DIRTY_BIT_RAM",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 },
- { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
- { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
- { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
- { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
- { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
- { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 },
- { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 },
- { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 },
- { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 },
- { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 },
- { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
- { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
- { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
- { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 },
- { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 },
- { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 },
- { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 },
- { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 },
- { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
+ },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
+ },
+ { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ 0, 0
+ }
};
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
@@ -6096,14 +5972,217 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
+static const char *vml2_mems[] = {
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM1",
+};
+
+static const char *vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_LOG_FIFO",
+};
+
+static const char *atc_l2_cache_2m_mems[] = {
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char *atc_l2_cache_4k_mems[] = {
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
+ struct ras_err_data *err_data)
+{
+ uint32_t i, data;
+ uint32_t sec_count, ded_count;
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+ for (i = 0; i < 16; i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < 7; i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_walker_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_walker_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_2m_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_4k_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = (data & 0x00018000L) >> 0xf;
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_4k_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+
+ return 0;
+}
+
+static int __get_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id, uint32_t value,
+ uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) {
+ if(ras_subblock_regs[i].reg_offset != reg->reg_offset ||
+ ras_subblock_regs[i].seg != reg->seg ||
+ ras_subblock_regs[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = (value &
+ ras_subblock_regs[i].sec_count_mask) >>
+ ras_subblock_regs[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ ras_subblock_regs[i].name,
+ se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ ras_subblock_regs[i].ded_count_mask) >>
+ ras_subblock_regs[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ ras_subblock_regs[i].name,
+ se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- uint32_t sec_count, ded_count;
- uint32_t i;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i, j, k;
uint32_t reg_value;
- uint32_t se_id, instance_id;
if (adev->asic_type != CHIP_VEGA20)
return -EINVAL;
@@ -6112,71 +6191,29 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count = 0;
mutex_lock(&adev->grbm_idx_mutex);
- for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) {
- for (instance_id = 0; instance_id < 256; instance_id++) {
- for (i = 0;
- i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]);
- i++) {
- if (se_id != 0 &&
- !gfx_ras_edc_regs[i].per_se_instance)
- continue;
- if (instance_id >= gfx_ras_edc_regs[i].num_instance)
- continue;
-
- gfx_v9_0_select_se_sh(adev, se_id, 0,
- instance_id);
-
- reg_value = RREG32(
- adev->reg_offset[gfx_ras_edc_regs[i].ip]
- [gfx_ras_edc_regs[i].inst]
- [gfx_ras_edc_regs[i].seg] +
- gfx_ras_edc_regs[i].reg_offset);
- sec_count = reg_value &
- gfx_ras_edc_regs[i].sec_count_mask;
- ded_count = reg_value &
- gfx_ras_edc_regs[i].ded_count_mask;
- if (sec_count) {
- DRM_INFO(
- "Instance[%d][%d]: SubBlock %s, SEC %d\n",
- se_id, instance_id,
- gfx_ras_edc_regs[i].name,
- sec_count);
- err_data->ce_count++;
- }
- if (ded_count) {
- DRM_INFO(
- "Instance[%d][%d]: SubBlock %s, DED %d\n",
- se_id, instance_id,
- gfx_ras_edc_regs[i].name,
- ded_count);
- err_data->ue_count++;
- }
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
+ for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
+ for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
+ gfx_v9_0_select_se_sh(adev, j, 0, k);
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ if (reg_value)
+ __get_ras_error_count(&sec_ded_counter_registers[i],
+ j, k, reg_value,
+ &sec_count, &ded_count);
}
}
}
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return 0;
-}
-static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
- if (!ras_if)
- return 0;
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
- ih_data.head = *ras_if;
+ gfx_v9_0_query_utc_edc_status(adev, err_data);
- DRM_ERROR("CP ECC ERROR IRQ\n");
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@@ -6343,7 +6380,7 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
.set = gfx_v9_0_set_cp_ecc_error_state,
- .process = gfx_v9_0_cp_ecc_error_irq,
+ .process = amdgpu_gfx_cp_ecc_error_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 6ce37ce77d14..e91bd7945777 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -178,6 +178,8 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
}
@@ -365,6 +367,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index db10640a3b2f..b70c7b483c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -46,21 +46,25 @@ u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
-static void gfxhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */
+ int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
-
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- gfxhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -175,6 +179,8 @@ static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
}
@@ -350,6 +356,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
index 06807940748b..392b8cd94fc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
@@ -31,5 +31,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v2_0_init(struct amdgpu_device *adev);
u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 5c7d5f73f54f..321f8a997be8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -235,6 +235,29 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
const unsigned eng = 17;
unsigned int i;
+ spin_lock(&adev->gmc.invalidate_lock);
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) {
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
/*
@@ -254,6 +277,17 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
udelay(1);
}
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
+ spin_unlock(&adev->gmc.invalidate_lock);
+
if (i < adev->usec_timeout)
return;
@@ -278,7 +312,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int r;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
mutex_lock(&adev->mman.gtt_window_lock);
@@ -338,6 +372,20 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
@@ -348,6 +396,15 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
hub->vm_inv_eng0_ack + eng,
req, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
+
return pd_addr;
}
@@ -396,43 +453,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 1 system
* 0 valid
*/
-static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
+{
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -459,12 +496,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
+ *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags |= AMDGPU_PTE_SNOOPED;
+ *flags |= AMDGPU_PTE_LOG;
+ *flags |= AMDGPU_PTE_SYSTEM;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+}
+
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v10_0_get_vm_pde
+ .map_mtype = gmc_v10_0_map_mtype,
+ .get_vm_pde = gmc_v10_0_get_vm_pde,
+ .get_vm_pte = gmc_v10_0_get_vm_pte
};
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -518,8 +575,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = 0;
- if (!amdgpu_sriov_vf(adev))
- base = gfxhub_v2_0_get_fb_location(adev);
+ base = gfxhub_v2_0_get_fb_location(adev);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
@@ -539,24 +595,13 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
-
- if (!amdgpu_emu_mode)
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- else {
- /* hard code vram_width for emulation */
- chansize = 128;
- numchan = 1;
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* Could aper size report 0 ? */
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
adev->gmc.visible_vram_size = adev->gmc.aper_size;
@@ -635,7 +680,7 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v10_0_sw_init(void *handle)
{
- int r;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v2_0_init(adev);
@@ -643,7 +688,15 @@ static int gmc_v10_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (!amdgpu_emu_mode)
+ adev->gmc.vram_width = vram_width;
+ else
+ adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -793,7 +846,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* Flush HDP after it is initialized */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 9fb1765e92d1..b205039350b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -386,27 +386,20 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
@@ -1153,7 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
- .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
+ .get_vm_pte = gmc_v6_0_get_vm_pte,
};
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0c3d9bc3a641..f08e5330642d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -463,27 +463,20 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -1343,8 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
- .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v7_0_get_vm_pde
+ .get_vm_pde = gmc_v7_0_get_vm_pde,
+ .get_vm_pte = gmc_v7_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ea764dd9245d..6d96d40fbcb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -686,29 +686,21 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -1711,8 +1703,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
- .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v8_0_get_vm_pde
+ .get_vm_pde = gmc_v8_0_get_vm_pde,
+ .get_vm_pte = gmc_v8_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f91337030dc0..3c355fb5d2b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -51,10 +51,12 @@
#include "gfxhub_v1_1.h"
#include "mmhub_v9_4.h"
#include "umc_v6_1.h"
+#include "umc_v6_0.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
@@ -243,44 +245,6 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry)
-{
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->umc.funcs->query_ras_error_count)
- adev->umc.funcs->query_ras_error_count(adev, err_data);
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- if (adev->umc.funcs->query_ras_error_address)
- adev->umc.funcs->query_ras_error_address(adev, err_data);
-
- /* only uncorrectable error needs gpu reset */
- if (err_data->ue_count)
- amdgpu_ras_reset_gpu(adev, 0);
-
- return AMDGPU_RAS_SUCCESS;
-}
-
-static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
-
- ih_data.head = *ras_if;
-
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
-}
-
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -355,6 +319,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
/* If it's the first fault for this address, process it normally */
+ if (retry_fault && !in_interrupt() &&
+ amdgpu_vm_handle_fault(adev, entry->pasid, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -417,7 +385,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
.set = gmc_v9_0_ecc_interrupt_state,
- .process = gmc_v9_0_process_ecc_irq,
+ .process = amdgpu_umc_process_ecc_irq,
};
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
@@ -491,6 +459,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
spin_lock(&adev->gmc.invalidate_lock);
+
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) {
+ for (j = 0; j < adev->usec_timeout; j++) {
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (j >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
/*
@@ -506,7 +497,18 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
break;
udelay(1);
}
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
spin_unlock(&adev->gmc.invalidate_lock);
+
if (j < adev->usec_timeout)
return;
@@ -521,6 +523,20 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
@@ -531,6 +547,15 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
hub->vm_inv_eng0_ack + eng,
req, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
+
return pd_addr;
}
@@ -584,44 +609,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
+static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
-
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
+ case AMDGPU_VM_MTYPE_RW:
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -648,12 +654,34 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
+ *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ !(*flags & AMDGPU_PTE_SYSTEM) &&
+ mapping->bo_va->is_xgmi)
+ *flags |= AMDGPU_PTE_SNOOPED;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v9_0_get_vm_pde
+ .map_mtype = gmc_v9_0_map_mtype,
+ .get_vm_pde = gmc_v9_0_get_vm_pde,
+ .get_vm_pte = gmc_v9_0_get_vm_pte
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -664,6 +692,9 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ adev->umc.funcs = &umc_v6_0_funcs;
+ break;
case CHIP_VEGA20:
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
@@ -681,7 +712,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA20:
- adev->mmhub_funcs = &mmhub_v1_0_funcs;
+ adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
default:
break;
@@ -762,140 +793,10 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v9_0_ecc_ras_block_late_init(void *handle,
- struct ras_fs_if *fs_info, struct ras_common_if *ras_block)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = NULL;
- struct ras_ih_if ih_info = {
- .cb = gmc_v9_0_process_ras_data_cb,
- };
- int r;
-
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
- ras_if = &adev->gmc.umc_ras_if;
- else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB)
- ras_if = &adev->gmc.mmhub_ras_if;
- else
- BUG();
-
- if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
- amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
- return 0;
- }
-
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = *ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info->head = **ras_if;
-
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
- }
-
- amdgpu_ras_debugfs_create(adev, fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, fs_info);
- if (r)
- goto sysfs;
-resume:
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
- r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
- if (r)
- goto irq;
- }
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
-}
-
-static int gmc_v9_0_ecc_late_init(void *handle)
-{
- int r;
-
- struct ras_fs_if umc_fs_info = {
- .sysfs_name = "umc_err_count",
- .debugfs_name = "umc_err_inject",
- };
- struct ras_common_if umc_ras_block = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "umc",
- };
- struct ras_fs_if mmhub_fs_info = {
- .sysfs_name = "mmhub_err_count",
- .debugfs_name = "mmhub_err_inject",
- };
- struct ras_common_if mmhub_ras_block = {
- .block = AMDGPU_RAS_BLOCK__MMHUB,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "mmhub",
- };
-
- r = gmc_v9_0_ecc_ras_block_late_init(handle,
- &umc_fs_info, &umc_ras_block);
- if (r)
- return r;
-
- r = gmc_v9_0_ecc_ras_block_late_init(handle,
- &mmhub_fs_info, &mmhub_ras_block);
- return r;
-}
-
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool r;
+ int r;
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
@@ -929,7 +830,7 @@ static int gmc_v9_0_late_init(void *handle)
}
}
- r = gmc_v9_0_ecc_late_init(handle);
+ r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
@@ -970,33 +871,11 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
int r;
- if (amdgpu_sriov_vf(adev)) {
- /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
- * and DF related registers is not readable, seems hardcord is the
- * only way to set the correct vram_width
- */
- adev->gmc.vram_width = 2048;
- } else if (amdgpu_emu_mode != 1) {
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- }
-
- if (!adev->gmc.vram_width) {
- /* hbm memory channel size */
- if (adev->flags & AMD_IS_APU)
- chansize = 64;
- else
- chansize = 128;
-
- numchan = adev->df_funcs->get_hbm_channel_number(adev);
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
@@ -1108,7 +987,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v9_0_sw_init(void *handle)
{
- int r;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v1_0_init(adev);
@@ -1119,7 +998,32 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (amdgpu_sriov_vf(adev))
+ /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
+ * and DF related registers is not readable, seems hardcord is the
+ * only way to set the correct vram_width
+ */
+ adev->gmc.vram_width = 2048;
+ else if (amdgpu_emu_mode != 1)
+ adev->gmc.vram_width = vram_width;
+
+ if (!adev->gmc.vram_width) {
+ int chansize, numchan;
+
+ /* hbm memory channel size */
+ if (adev->flags & AMD_IS_APU)
+ chansize = 64;
+ else
+ chansize = 128;
+
+ numchan = adev->df_funcs->get_hbm_channel_number(adev);
+ adev->gmc.vram_width = numchan * chansize;
+ }
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_RAVEN:
adev->num_vmhubs = 2;
@@ -1240,33 +1144,7 @@ static int gmc_v9_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
void *stolen_vga_buf;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
- adev->gmc.umc_ras_if) {
- struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /* remove fs first */
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /* remove the IH */
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
-
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
- adev->gmc.mmhub_ras_if) {
- struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
-
- /* remove fs and disable ras feature */
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
-
+ amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1316,13 +1194,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*/
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
- int r, i;
- bool value;
- u32 tmp;
-
- amdgpu_device_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
+ int r;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -1332,15 +1204,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- /* TODO for renoir */
- mmhub_v1_0_update_power_gating(adev, true);
- break;
- default:
- break;
- }
-
r = gfxhub_v1_0_gart_enable(adev);
if (r)
return r;
@@ -1352,6 +1215,49 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->gmc.gart_size >> 20),
+ (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+ adev->gart.ready = true;
+ return 0;
+}
+
+static int gmc_v9_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool value;
+ int r, i;
+ u32 tmp;
+
+ /* The sequence of these two function calls matters.*/
+ gmc_v9_0_init_golden_registers(adev);
+
+ if (adev->mode_info.num_crtc) {
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ /* Lockout access through VGA aperture*/
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+
+ /* disable VGA render */
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ }
+ }
+
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_vega10_hdp,
+ ARRAY_SIZE(golden_settings_vega10_hdp));
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ /* TODO for renoir */
+ mmhub_v1_0_update_power_gating(adev, true);
+ break;
+ case CHIP_ARCTURUS:
+ WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
+ break;
+ default:
+ break;
+ }
+
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
@@ -1361,7 +1267,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
/* After HDP is initialized, flush HDP.*/
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -1377,28 +1283,8 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
for (i = 0; i < adev->num_vmhubs; ++i)
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
- (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
- adev->gart.ready = true;
- return 0;
-}
-
-static int gmc_v9_0_hw_init(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- /* The sequence of these two function calls matters.*/
- gmc_v9_0_init_golden_registers(adev);
-
- if (adev->mode_info.num_crtc) {
- /* Lockout access through VGA aperture*/
- WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
-
- /* disable VGA render */
- WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- }
+ if (adev->umc.funcs && adev->umc.funcs->init_registers)
+ adev->umc.funcs->init_registers(adev);
r = gmc_v9_0_gart_enable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 04cd4b6f95d4..28105e4af507 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -206,6 +206,8 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
}
@@ -418,6 +420,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
@@ -616,5 +620,6 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
}
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index b39bea6f54e9..a7cb185d639a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -31,20 +31,25 @@
#include "soc15_common.h"
-static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */
+ int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- mmhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -161,6 +166,8 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
}
@@ -341,6 +348,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
index db16f3ece218..3ea4344f0315 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
@@ -31,5 +31,7 @@ void mmhub_v2_0_init(struct amdgpu_device *adev);
int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 9ed178fa241c..66efe2f7bd76 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -249,6 +249,8 @@ static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
@@ -502,6 +504,10 @@ void mmhub_v9_4_init(struct amdgpu_device *adev)
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
new file mode 100644
index 000000000000..0d8767eb7a70
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "nbio/nbio_2_3_offset.h"
+#include "nbio/nbio_2_3_sh_mask.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_1_0_sh_mask.h"
+#include "soc15.h"
+#include "navi10_ih.h"
+#include "soc15_common.h"
+#include "mxgpu_nv.h"
+#include "mxgpu_ai.h"
+
+static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
+{
+ WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
+}
+
+static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
+{
+ WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
+}
+
+/*
+ * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
+ * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
+ * by host.
+ *
+ * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
+ * correct value since it doesn't return the RCV_DW0 under the case that
+ * RCV_MSG_VALID is set by host.
+ */
+static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
+{
+ return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+}
+
+
+static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
+ enum idh_event event)
+{
+ u32 reg;
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ if (reg != event)
+ return -ENOENT;
+
+ xgpu_nv_mailbox_send_ack(adev);
+
+ return 0;
+}
+
+static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
+{
+ return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
+}
+
+static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
+{
+ int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
+ u8 reg;
+
+ do {
+ reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
+ if (reg & 2)
+ return 0;
+
+ mdelay(5);
+ timeout -= 5;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
+
+ return -ETIME;
+}
+
+static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
+{
+ int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
+
+ do {
+ r = xgpu_nv_mailbox_rcv_msg(adev, event);
+ if (!r)
+ return 0;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+
+ return -ETIME;
+}
+
+static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
+ enum idh_request req, u32 data1, u32 data2, u32 data3)
+{
+ u32 reg;
+ int r;
+ uint8_t trn;
+
+ /* IMPORTANT:
+ * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
+ * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
+ * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
+ * will return immediatly
+ */
+ do {
+ xgpu_nv_mailbox_set_valid(adev, false);
+ trn = xgpu_nv_peek_ack(adev);
+ if (trn) {
+ pr_err("trn=%x ACK should not assert! wait again !\n", trn);
+ msleep(1);
+ }
+ } while (trn);
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
+ reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
+ MSGBUF_DATA, req);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
+ reg);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
+ data1);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
+ data2);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
+ data3);
+
+ xgpu_nv_mailbox_set_valid(adev, true);
+
+ /* start to poll ack */
+ r = xgpu_nv_poll_ack(adev);
+ if (r)
+ pr_err("Doesn't get ack from pf, continue\n");
+
+ xgpu_nv_mailbox_set_valid(adev, false);
+}
+
+static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+{
+ int r;
+
+ xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ /* start to check msg if request is idh_req_gpu_init_access */
+ if (req == IDH_REQ_GPU_INIT_ACCESS ||
+ req == IDH_REQ_GPU_FINI_ACCESS ||
+ req == IDH_REQ_GPU_RESET_ACCESS) {
+ r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ if (r) {
+ pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
+ return r;
+ }
+ /* Retrieve checksum from mailbox2 */
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
+ adev->virt.fw_reserve.checksum_key =
+ RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+ }
+ }
+
+ return 0;
+}
+
+static int xgpu_nv_request_reset(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+}
+
+static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+
+ req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
+ return xgpu_nv_send_access_requests(adev, req);
+}
+
+static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+ int r = 0;
+
+ req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
+ r = xgpu_nv_send_access_requests(adev, req);
+
+ return r;
+}
+
+static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("get ack intr and do nothing.\n");
+ return 0;
+}
+
+static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
+ int locked;
+
+ /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
+ * otherwise the mailbox msg will be ruined/reseted by
+ * the VF FLR.
+ *
+ * we can unlock the lock_reset to allow "amdgpu_job_timedout"
+ * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
+ * which means host side had finished this VF's FLR.
+ */
+ locked = mutex_trylock(&adev->lock_reset);
+ if (locked)
+ adev->in_gpu_reset = 1;
+
+ do {
+ if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
+ goto flr_done;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+flr_done:
+ if (locked) {
+ adev->in_gpu_reset = 0;
+ mutex_unlock(&adev->lock_reset);
+ }
+
+ /* Trigger recovery for world switch failure if no TDR */
+ if (amdgpu_device_should_recover_gpu(adev))
+ amdgpu_device_gpu_recover(adev, NULL);
+}
+
+static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+
+ switch (event) {
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.flr_work);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_ack_irq,
+ .process = xgpu_nv_mailbox_ack_irq,
+};
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_rcv_irq,
+ .process = xgpu_nv_mailbox_rcv_irq,
+};
+
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->virt.ack_irq.num_types = 1;
+ adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
+ adev->virt.rcv_irq.num_types = 1;
+ adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
+}
+
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ return 0;
+}
+
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
+ if (r)
+ return r;
+ r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+
+ return 0;
+}
+
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
+{
+ amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+}
+
+const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
+ .req_full_gpu = xgpu_nv_request_full_gpu_access,
+ .rel_full_gpu = xgpu_nv_release_full_gpu_access,
+ .reset_gpu = xgpu_nv_request_reset,
+ .wait_reset = NULL,
+ .trans_msg = xgpu_nv_mailbox_trans_msg,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
new file mode 100644
index 000000000000..99b15f6865cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MXGPU_NV_H__
+#define __MXGPU_NV_H__
+
+#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500
+
+extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
+
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
+
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 9fe08408db58..9af73567e716 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
navi10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
@@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
- adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
ih->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
index a56c93620e78..88efaecf9f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi10_ip_offset.h"
int navi10_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
index cadc7603ca41..a786d159e5e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi12_ip_offset.h"
int navi12_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
index 3b5f0f65e096..4ea1e8fbb601 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi14_ip_offset.h"
int navi14_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index c05d78d4efc6..f3a3fe746222 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -27,11 +27,21 @@
#include "nbio/nbio_2_3_default.h"
#include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
#define smnPCIE_CONFIG_CNTL 0x11180044
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+
+static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -56,10 +66,9 @@ static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
@@ -311,7 +320,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
- .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
@@ -331,4 +339,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
.detect_hw_virt = nbio_v2_3_detect_hw_virt,
+ .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
index 5ae52085f6b7..a43b60acf7f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6590143c3f75..635d9e1fc0a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
- .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 0743a6f016f3..6dc743b73218 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 74eecb768a82..d6cbf26074bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -292,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
- .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index 508d549c5029..e7aefb252550 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 910fffced43b..0db458f9fafc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -23,10 +23,12 @@
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v7_4.h"
+#include "amdgpu_ras.h"
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "nbio/nbio_7_4_0_smn.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
@@ -266,7 +268,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -306,17 +308,208 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
- uint32_t def, data;
- def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
- data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+}
- if (def != data)
- WREG32_PCIE(smnPCIE_CI_CNTL, data);
+static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+
+static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
+ .set = nbio_v7_4_set_ras_controller_irq_state,
+ .process = nbio_v7_4_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_4_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_4_process_err_event_athub_irq,
+};
+
+static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
+{
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_4_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
+{
+
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_4_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ uint32_t global_sts, central_sts, int_eoi;
+ uint32_t corr, fatal, non_fatal;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
+ corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
+ fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
+ non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
+ ParityErrNonFatal);
+
+ if (corr)
+ err_data->ce_count++;
+ if (fatal)
+ err_data->ue_count++;
+
+ if (corr || fatal || non_fatal) {
+ central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
+ /* clear error status register */
+ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
+
+ if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
+ BIFL_RasContller_Intr_Recv)) {
+ /* clear interrupt status register */
+ WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
+ int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
+ int_eoi = REG_SET_FIELD(int_eoi,
+ IOHC_INTERRUPT_EOI, SMI_EOI, 1);
+ WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
+ }
+ }
+}
+
+static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
- .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
@@ -330,6 +523,7 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
+ .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
@@ -337,4 +531,10 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.init_registers = nbio_v7_4_init_registers,
.detect_hw_virt = nbio_v7_4_detect_hw_virt,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
+ .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
+ .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
+ .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+ .ras_late_init = amdgpu_nbio_ras_late_init,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index c442865bac4f..b1ac82872752 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index de9b995b65b1..0ba66bef5746 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -40,12 +40,14 @@
#include "gc/gc_10_1_0_sh_mask.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
#include "soc15.h"
#include "soc15_common.h"
#include "gmc_v10_0.h"
#include "gfxhub_v2_0.h"
#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
@@ -53,6 +55,7 @@
#include "vcn_v2_0.h"
#include "dce_virtual.h"
#include "mes_v10_1.h"
+#include "mxgpu_nv.h"
static const struct amd_ip_funcs nv_common_ip_funcs;
@@ -63,8 +66,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -78,8 +81,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -119,7 +122,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 nv_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 nv_get_xclk(struct amdgpu_device *adev)
@@ -154,8 +157,27 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev)
static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
- /* TODO: will implement it when SMU header is available */
- return false;
+ u32 *dw_ptr;
+ u32 i, length_dw;
+
+ if (bios == NULL)
+ return false;
+ if (length_bytes == 0)
+ return false;
+ /* APU vbios image is part of sbios image */
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ dw_ptr = (u32 *)bios;
+ length_dw = ALIGN(length_bytes, 4) / 4;
+
+ /* set rom index to 0 */
+ WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ /* read out the rom data */
+ for (i = 0; i < length_dw; i++)
+ dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+
+ return true;
}
static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
@@ -176,6 +198,7 @@ static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
@@ -279,7 +302,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -296,7 +319,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
- if (smu_baco_is_support(smu))
+ if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
return AMD_RESET_METHOD_BACO;
else
return AMD_RESET_METHOD_MODE1;
@@ -368,8 +391,8 @@ static void nv_program_aspm(struct amdgpu_device *adev)
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version nv_common_ip_block =
@@ -423,9 +446,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
- adev->nbio_funcs = &nbio_v2_3_funcs;
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- adev->nbio_funcs->detect_hw_virt(adev);
+ adev->nbio.funcs->detect_hw_virt(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.ops = &xgpu_nv_virt_ops;
switch (adev->asic_type) {
case CHIP_NAVI10:
@@ -435,7 +462,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -446,7 +473,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
if (adev->enable_mes)
@@ -458,7 +485,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -469,7 +496,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
break;
@@ -482,12 +509,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void nv_invalidate_hdp(struct amdgpu_device *adev,
@@ -532,6 +559,16 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+
+ /* TODO
+ * dummy implement for pcie_replay_count sysfs interface
+ * */
+
+ return 0;
+}
+
static void nv_init_doorbell_index(struct amdgpu_device *adev)
{
adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
@@ -579,12 +616,16 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.need_full_reset = &nv_need_full_reset,
.get_pcie_usage = &nv_get_pcie_usage,
.need_reset_on_init = &nv_need_reset_on_init,
+ .get_pcie_replay_count = &nv_get_pcie_replay_count,
};
static int nv_common_early_init(void *handle)
{
+#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &nv_pcie_rreg;
@@ -667,16 +708,31 @@ static int nv_common_early_init(void *handle)
return -EINVAL;
}
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_setting(adev);
+ xgpu_nv_mailbox_set_irq_funcs(adev);
+ }
+
return 0;
}
static int nv_common_late_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_get_irq(adev);
+
return 0;
}
static int nv_common_sw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_add_irq_id(adev);
+
return 0;
}
@@ -694,7 +750,13 @@ static int nv_common_hw_init(void *handle)
/* enable aspm */
nv_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
+ /* remap HDP registers to a hole in mmio space,
+ * for the purpose of expose those registers
+ * to process space
+ */
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
nv_enable_doorbell_aperture(adev, true);
@@ -856,9 +918,9 @@ static int nv_common_set_clockgating_state(void *handle,
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
nv_update_hdp_mem_power_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -886,7 +948,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_MGCG */
tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 5d95e614369a..b345e69ba246 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -40,6 +40,9 @@
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
MODULE_FIRMWARE("amdgpu/picasso_asd.bin");
MODULE_FIRMWARE("amdgpu/raven2_asd.bin");
+MODULE_FIRMWARE("amdgpu/picasso_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven2_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven_ta.bin");
static int psp_v10_0_init_microcode(struct psp_context *psp)
{
@@ -48,7 +51,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *hdr;
-
+ const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -79,7 +82,45 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
adev->psp.asd_start_addr = (uint8_t *)hdr +
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v10.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)
+ adev->psp.ta_fw->data;
+ adev->psp.ta_hdcp_ucode_version =
+ le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+ adev->psp.ta_hdcp_ucode_size =
+ le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+ adev->psp.ta_hdcp_start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_dtm_ucode_version =
+ le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+ adev->psp.ta_dtm_ucode_size =
+ le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+ adev->psp.ta_dtm_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ }
+
return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
out:
if (err) {
dev_err(adev->dev,
@@ -228,6 +269,7 @@ static int psp_v10_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 10166104b8a3..ffeaa2f5588d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -57,6 +58,8 @@ MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
#define mmRLC_GPM_UCODE_DATA_NV10 0x5b62
#define mmSDMA0_UCODE_ADDR_NV10 0x5880
#define mmSDMA0_UCODE_DATA_NV10 0x5881
+/* memory training timeout define */
+#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
@@ -155,6 +158,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
switch (adev->asic_type) {
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -182,7 +186,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
- case CHIP_ARCTURUS:
break;
default:
BUG();
@@ -205,18 +208,26 @@ out:
return err;
}
+static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
@@ -233,7 +244,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
/* Copy PSP KDB binary to memory */
memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
- /* Provide the sys driver to bootloader */
+ /* Provide the PSP KDB to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
@@ -252,13 +263,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
@@ -296,13 +305,11 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
@@ -398,6 +405,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
return false;
}
+static int psp_v11_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Write the ring destroy command*/
+ if (psp_v11_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) */
+ if (psp_v11_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+
+ return ret;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -407,6 +442,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
if (psp_v11_0_support_vmr_ring(psp)) {
+ ret = psp_v11_0_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -426,6 +467,14 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
0x80000000, 0x8000FFFF, false);
} else {
+ /* Wait for sOS ready for ring creation */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ if (ret) {
+ DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
+ return ret;
+ }
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
@@ -451,33 +500,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
return ret;
}
-static int psp_v11_0_ring_stop(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct amdgpu_device *adev = psp->adev;
-
- /* Write the ring destroy command*/
- if (psp_v11_0_support_vmr_ring(psp))
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
- else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
- GFX_CTRL_CMD_ID_DESTROY_RINGS);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) */
- if (psp_v11_0_support_vmr_ring(psp))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
- else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
-
- return ret;
-}
static int psp_v11_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type)
@@ -541,6 +563,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
@@ -889,6 +912,162 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
return psp_rlc_autoload_start(psp);
}
+static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
+{
+ int ret;
+ int i;
+ uint32_t data_32;
+ int max_wait;
+ struct amdgpu_device *adev = psp->adev;
+
+ data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg);
+
+ max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
+ for (i = 0; i < max_wait; i++) {
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret == 0)
+ break;
+ }
+ if (i < max_wait)
+ ret = 0;
+ else
+ ret = -ETIME;
+
+ DRM_DEBUG("training %s %s, cost %d @ %d ms\n",
+ (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
+ (ret == 0) ? "succeed" : "failed",
+ i, adev->usec_timeout/1000);
+ return ret;
+}
+
+static void psp_v11_0_memory_training_fini(struct psp_context *psp)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ kfree(ctx->sys_cache);
+ ctx->sys_cache = NULL;
+}
+
+static int psp_v11_0_memory_training_init(struct psp_context *psp)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
+ DRM_DEBUG("memory training is not supported!\n");
+ return 0;
+ }
+
+ ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
+ if (ctx->sys_cache == NULL) {
+ DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
+ ret = -ENOMEM;
+ goto Err_out;
+ }
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+ ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
+ return 0;
+
+Err_out:
+ psp_v11_0_memory_training_fini(psp);
+ return ret;
+}
+
+/*
+ * save and restore proces
+ */
+static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
+{
+ int ret;
+ uint32_t p2c_header[4];
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+ uint32_t *pcache = (uint32_t*)ctx->sys_cache;
+
+ if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
+ DRM_DEBUG("Memory training is not supported.\n");
+ return 0;
+ } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
+ DRM_ERROR("Memory training initialization failure.\n");
+ return -EINVAL;
+ }
+
+ if (psp_v11_0_is_sos_alive(psp)) {
+ DRM_DEBUG("SOS is alive, skip memory training.\n");
+ return 0;
+ }
+
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
+ pcache[0], pcache[1], pcache[2], pcache[3],
+ p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ DRM_DEBUG("Short training depends on restore.\n");
+ ops |= PSP_MEM_TRAIN_RESTORE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_RESTORE) &&
+ pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ pcache[3] == p2c_header[3])) {
+ DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_SAVE) &&
+ p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n");
+ ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ DRM_DEBUG("Memory training ops:%x.\n", ops);
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
+ if (ret) {
+ DRM_ERROR("Send long training msg failed.\n");
+ return ret;
+ }
+ }
+
+ if (ops & PSP_MEM_TRAIN_SAVE) {
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
+ }
+
+ if (ops & PSP_MEM_TRAIN_RESTORE) {
+ amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
+ PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
+ if (ret) {
+ DRM_ERROR("send training msg failed.\n");
+ return ret;
+ }
+ }
+ ctx->training_cnt++;
+ return 0;
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -909,6 +1088,9 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ras_trigger_error = psp_v11_0_ras_trigger_error,
.ras_cure_posion = psp_v11_0_ras_cure_posion,
.rlc_autoload_start = psp_v11_0_rlc_autoload_start,
+ .mem_training_init = psp_v11_0_memory_training_init,
+ .mem_training_fini = psp_v11_0_memory_training_fini,
+ .mem_training = psp_v11_0_memory_training,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index c72e43f8e0be..8f553f6f92d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -378,6 +378,7 @@ static int psp_v12_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index d2c727f6a8bd..fdc00938327b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -454,6 +454,7 @@ static int psp_v3_1_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4554e72c8378..4ef4d31f5231 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -747,13 +747,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
sdma_v4_0_wait_reg_mem(ring, 0, 1,
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
ref_and_mask, ref_and_mask, 10);
}
@@ -1691,102 +1691,17 @@ static int sdma_v4_0_early_init(void *handle)
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry);
static int sdma_v4_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->sdma.ras_if;
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
- struct ras_fs_if fs_info = {
- .sysfs_name = "sdma_err_count",
- .debugfs_name = "sdma_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__SDMA,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "sdma",
- };
- int r, i;
-
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
-
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- r = 0;
- }
- goto feature;
- }
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- for (i = 0; i < adev->sdma.num_instances; i++) {
- r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
- if (r)
- goto irq;
- }
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
+ return amdgpu_sdma_ras_late_init(adev, &ih_info);
}
static int sdma_v4_0_sw_init(void *handle)
@@ -1858,21 +1773,7 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
- adev->sdma.ras_if) {
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /*remove fs first*/
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /*remove the IH*/
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ amdgpu_sdma_ras_fini(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
@@ -1892,7 +1793,7 @@ static int sdma_v4_0_hw_init(void *handle)
if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu) ||
- adev->asic_type == CHIP_RENOIR)
+ (adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset))
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
if (!amdgpu_sriov_vf(adev))
@@ -2025,52 +1926,28 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry)
{
- uint32_t err_source;
int instance;
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ goto out;
+
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
if (instance < 0)
- return 0;
-
- switch (entry->src_id) {
- case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
- err_source = 0;
- break;
- case SDMA0_4_0__SRCID__SDMA_ECC:
- err_source = 1;
- break;
- default:
- return 0;
- }
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ goto out;
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
+out:
return AMDGPU_RAS_SUCCESS;
}
-static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
-
- ih_data.head = *ras_if;
-
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
-}
-
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -2418,7 +2295,7 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
.set = sdma_v4_0_set_ecc_irq_state,
- .process = sdma_v4_0_process_ecc_irq,
+ .process = amdgpu_sdma_process_ecc_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 8493bfbbc148..f4ad2990f973 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -406,7 +406,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -416,8 +416,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
@@ -683,7 +683,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring->doorbell_index, 20);
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 493af42152f2..f2d70a47a3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -975,6 +975,17 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{GRBM_STATUS},
+ {mmGRBM_STATUS2},
+ {mmGRBM_STATUS_SE0},
+ {mmGRBM_STATUS_SE1},
+ {mmSRBM_STATUS},
+ {mmSRBM_STATUS2},
+ {DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
+ {DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
+ {mmCP_STAT},
+ {mmCP_STALLED_STAT1},
+ {mmCP_STALLED_STAT2},
+ {mmCP_STALLED_STAT3},
{GB_ADDR_CONFIG},
{MC_ARB_RAMCFG},
{GB_TILE_MODE0},
@@ -1633,7 +1644,6 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
static void si_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1668,12 +1678,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1682,14 +1687,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -1706,15 +1714,23 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
}
for (i = 0; i < 10; i++) {
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -1726,25 +1742,44 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
mdelay(100);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
+
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -1757,15 +1792,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 57bb5f9e08b2..88ae27a5a03d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
si_ih_disable_interrupts(adev);
- WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4ccfcdf8f16a..8e1640bc07af 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -58,6 +58,9 @@
#include "mmhub_v1_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
#include "vega10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
@@ -91,8 +94,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -106,8 +109,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -121,8 +124,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u64 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
/* read low 32 bit */
@@ -142,8 +145,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
/* write low 32 bit */
@@ -262,7 +265,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
@@ -336,6 +339,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
@@ -461,7 +465,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -475,42 +479,66 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
- *cap = false;
- return -ENOENT;
- }
+ *cap = smu_baco_is_support(smu);
+ return 0;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
+ *cap = false;
+ return -ENOENT;
+ }
- return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+ return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+ }
}
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
+ /* avoid NBIF got stuck when do RAS recovery in BACO reset */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
+ dev_info(adev->dev, "GPU BACO reset\n");
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
- dev_info(adev->dev, "GPU BACO reset\n");
+ if (smu_baco_reset(smu))
+ return -EIO;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- adev->in_baco_reset = 1;
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+ }
+
+ /* re-enable doorbell interrupt after BACO exit */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
}
static int soc15_mode2_reset(struct amdgpu_device *adev)
{
+ if (is_support_sw_smu(adev))
+ return smu_mode2_reset(&adev->smu);
if (!adev->powerplay.pp_funcs ||
!adev->powerplay.pp_funcs->asic_reset_mode_2)
return -ENOENT;
@@ -525,6 +553,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
return AMD_RESET_METHOD_MODE2;
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -626,8 +655,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -641,7 +670,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
int soc15_set_ip_blocks(struct amdgpu_device *adev)
@@ -667,13 +696,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true;
- if (adev->flags & AMD_IS_APU)
- adev->nbio_funcs = &nbio_v7_0_funcs;
- else if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS)
- adev->nbio_funcs = &nbio_v7_4_funcs;
- else
- adev->nbio_funcs = &nbio_v6_1_funcs;
+ if (adev->flags & AMD_IS_APU) {
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ } else if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS) {
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ } else {
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ }
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->df_funcs = &df_v3_6_funcs;
@@ -681,7 +714,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->df_funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
- adev->nbio_funcs->detect_hw_virt(adev);
+ adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
@@ -750,13 +783,26 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ }
+
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+
+ if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
break;
case CHIP_RENOIR:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
@@ -785,7 +831,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void soc15_invalidate_hdp(struct amdgpu_device *adev,
@@ -1099,7 +1145,9 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG;
} else if (adev->pdev->device == 0x15d8) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1142,7 +1190,9 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG;
}
break;
case CHIP_ARCTURUS:
@@ -1157,7 +1207,8 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
- AMD_CG_SUPPORT_MC_LS;
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_IH_CG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x32;
break;
@@ -1203,11 +1254,15 @@ static int soc15_common_early_init(void *handle)
static int soc15_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r = 0;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
- return 0;
+ if (adev->nbio.funcs->ras_late_init)
+ r = adev->nbio.funcs->ras_late_init(adev);
+
+ return r;
}
static int soc15_common_sw_init(void *handle)
@@ -1224,6 +1279,10 @@ static int soc15_common_sw_init(void *handle)
static int soc15_common_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_nbio_ras_fini(adev);
+ adev->df_funcs->sw_fini(adev);
return 0;
}
@@ -1236,12 +1295,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- adev->nbio_funcs->sdma_doorbell_range(adev, i,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
ring->use_doorbell, ring->doorbell_index,
adev->doorbell_index.sdma_doorbell_range);
}
- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
adev->irq.ih.doorbell_index);
}
}
@@ -1255,13 +1314,13 @@ static int soc15_common_hw_init(void *handle)
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
/* remap HDP registers to a hole in mmio space,
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio_funcs->remap_hdp_registers)
- adev->nbio_funcs->remap_hdp_registers(adev);
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
@@ -1284,6 +1343,14 @@ static int soc15_common_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
+ if (adev->nbio.ras_if &&
+ amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ if (adev->nbio.funcs->init_ras_controller_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
+
return 0;
}
@@ -1424,9 +1491,9 @@ static int soc15_common_set_clockgating_state(void *handle,
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -1441,9 +1508,9 @@ static int soc15_common_set_clockgating_state(void *handle,
break;
case CHIP_RAVEN:
case CHIP_RENOIR:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -1472,7 +1539,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index a3dde0c31f57..57af489a5de3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -28,8 +28,8 @@
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
-#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
-#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
+#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6
+#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3
extern const struct amd_ip_funcs soc15_common_ip_funcs;
@@ -67,6 +67,8 @@ struct soc15_allowed_register_entry {
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
+#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int soc15_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
new file mode 100644
index 000000000000..0d6b50528d76
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_0.h"
+#include "amdgpu.h"
+
+static void umc_v6_0_init_registers(struct amdgpu_device *adev)
+{
+ unsigned i,j;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++)
+ WREG32((i*0x100000 + 0x5010c + j*0x2000)/4, 0x1002);
+}
+
+const struct amdgpu_umc_funcs umc_v6_0_funcs = {
+ .init_registers = umc_v6_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
new file mode 100644
index 000000000000..109f1a57a46e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_0_H__
+#define __UMC_V6_0_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+extern const struct amdgpu_umc_funcs umc_v6_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 8502e736f721..47c4b96b14d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -75,6 +75,17 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
RSMU_UMC_INDEX_MODE_EN, 0);
}
+static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev)
+{
+ uint32_t rsmu_umc_index;
+
+ rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ return REG_GET_FIELD(rsmu_umc_index,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ RSMU_UMC_INDEX_INSTANCE);
+}
+
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
@@ -165,7 +176,8 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
uint32_t umc_reg_offset, uint32_t channel_index)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr;
+ uint64_t mc_umc_status, err_addr, retired_page;
+ struct eeprom_table_record *err_rec;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -177,6 +189,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
/* calculate error address if ue/ce error is detected */
@@ -191,12 +204,24 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
err_addr &= ~((0x1ULL << lsb) - 1);
/* translate umc channel address to soc pa, 3 parts are included */
- err_data->err_addr[err_data->err_addr_cnt] =
- ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- err_data->err_addr_cnt++;
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev);
+
+ err_data->err_addr_cnt++;
+ }
}
/* clear umc status */
@@ -209,7 +234,7 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
amdgpu_umc_for_each_channel(umc_v6_1_query_error_address);
}
-static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev,
+static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t umc_reg_offset, uint32_t channel_index)
{
@@ -239,15 +264,16 @@ static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev,
WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
}
-static void umc_v6_1_ras_init(struct amdgpu_device *adev)
+static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
{
void *ras_error_status = NULL;
- amdgpu_umc_for_each_channel(umc_v6_1_ras_init_per_channel);
+ amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel);
}
const struct amdgpu_umc_funcs umc_v6_1_funcs = {
- .ras_init = umc_v6_1_ras_init,
+ .err_cnt_init = umc_v6_1_err_cnt_init,
+ .ras_late_init = amdgpu_umc_ras_late_init,
.query_ras_error_count = umc_v6_1_query_ras_error_count,
.query_ras_error_address = umc_v6_1_query_ras_error_address,
.enable_umc_index_mode = umc_v6_1_enable_umc_index_mode,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 93b3500e522b..b4f84a820a44 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -202,7 +202,6 @@ static int vcn_v1_0_hw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = true;
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 36ad0c0e8efb..38f787a560cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -244,33 +244,24 @@ static int vcn_v2_0_hw_init(void *handle)
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, 0);
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.inst->ring_jpeg;
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
done:
if (!r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 395c2259f979..93edf9193a7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v2_0.h"
@@ -255,32 +256,24 @@ static int vcn_v2_5_hw_init(void *handle)
continue;
ring = &adev->vcn.inst[j].ring_dec;
- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, j);
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
- ring->sched.ready = false;
- continue;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.inst[j].ring_jpeg;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
done:
if (!r)
@@ -300,7 +293,7 @@ static int vcn_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int i;
+ int i, j;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -312,8 +305,8 @@ static int vcn_v2_5_hw_fini(void *handle)
ring->sched.ready = false;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst[i].ring_enc[i];
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ ring = &adev->vcn.inst[i].ring_enc[j];
ring->sched.ready = false;
}
@@ -423,7 +416,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
* @adev: amdgpu_device pointer
- * @sw: enable SW clock gating
*
* Disable clock gating for VCN block
*/
@@ -542,7 +534,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
* @adev: amdgpu_device pointer
- * @sw: enable SW clock gating
*
* Enable clock gating for VCN block
*/
@@ -716,6 +707,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -946,6 +940,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 9eae3536ddad..5cb7e231de5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -226,7 +226,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
vega10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
ih = &adev->irq.ih;
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
@@ -675,10 +675,49 @@ static int vega10_ih_soft_reset(void *handle)
return 0;
}
+static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ /**
+ * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
+ * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
+ */
+ if (adev->asic_type > CHIP_VEGA10) {
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
+ }
+
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
+ }
+}
+
static int vega10_ih_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega10_ih_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
return 0;
+
}
static int vega10_ih_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index bd0580334f83..6b52a539d51b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
int vega10_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 587e33f5dcce..556f854e3551 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega20_ip_offset.h"
int vega20_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 5f8c8786cac5..78e5cdc0c058 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -689,16 +689,50 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
+ *cap = false;
+ return -ENOENT;
+ }
+
+ return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+}
+
+int smu7_asic_baco_reset(struct amdgpu_device *adev)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+
+ dev_info(adev->dev, "GPU BACO reset\n");
+
+ return 0;
+}
+
/**
- * vi_asic_reset - soft reset GPU
+ * vi_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int vi_asic_reset(struct amdgpu_device *adev)
+static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -714,7 +748,47 @@ static int vi_asic_reset(struct amdgpu_device *adev)
static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device *adev)
{
- return AMD_RESET_METHOD_LEGACY;
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_TOPAZ:
+ smu7_asic_get_baco_capability(adev, &baco_reset);
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * vi_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int vi_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ r = smu7_asic_baco_reset(adev);
+ else
+ r = vi_asic_pci_config_reset(adev);
+
+ return r;
}
static u32 vi_get_config_memsize(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 8de0772f986c..40d4174913a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -31,4 +31,7 @@ void vi_srbm_select(struct amdgpu_device *adev,
int vi_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
+int smu7_asic_baco_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 177d1e5329a5..9f59ba93cfe0 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -33,7 +33,9 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
- unsigned int vmid, pasid;
+ unsigned int vmid;
+ uint16_t pasid;
+ bool ret;
/* This workaround is due to HW/FW limitation on Hawaii that
* VMID and PASID are not written into ih_ring_entry
@@ -48,13 +50,13 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
*tmp_ihre = *ihre;
vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
- pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+ ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid);
tmp_ihre->ring_id &= 0x000000ff;
tmp_ihre->ring_id |= vmid << 8;
tmp_ihre->ring_id |= pasid << 16;
- return (pasid != 0) &&
+ return ret && (pasid != 0) &&
vmid >= dev->vm_info.first_vmid_kfd &&
vmid <= dev->vm_info.last_vmid_kfd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 901fe3590165..d3400da6ab64 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x7a5d0000, 0x807c817c,
0x807aff7a, 0x00000080,
0xbf0a717c, 0xbf85fff8,
- 0xbf820141, 0xbef4037e,
+ 0xbf820142, 0xbef4037e,
0x8775ff7f, 0x0000ffff,
0x8875ff75, 0x00040000,
0xbef60380, 0xbef703ff,
@@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x725d0000, 0xe0304080,
0x725d0100, 0xe0304100,
0x725d0200, 0xe0304180,
- 0x725d0300, 0xbf820031,
+ 0x725d0300, 0xbf820032,
0xbef603ff, 0x01000000,
0xbef20378, 0x8078ff78,
0x00000400, 0xbefc0384,
@@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x725d0000, 0xe0304100,
0x725d0100, 0xe0304200,
0x725d0200, 0xe0304300,
- 0x725d0300, 0xb9782a05,
- 0x80788178, 0x907c9973,
- 0x877c817c, 0xbf06817c,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb9721e06, 0x8f728a72,
- 0x80787278, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef603ff,
- 0x01000000, 0xbefc03ff,
- 0x0000006c, 0x80f89078,
- 0xf429003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc847c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0x80f8a078,
- 0xf42d003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc887c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0x80f8c078,
- 0xf431003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0xbe883108,
- 0xbe8a310a, 0xbe8c310c,
- 0xbe8e310e, 0xbf06807c,
- 0xbf84fff0, 0xb9782a05,
- 0x80788178, 0x907c9973,
- 0x877c817c, 0xbf06817c,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb9721e06, 0x8f728a72,
- 0x80787278, 0x8078ff78,
- 0x00000200, 0xbef603ff,
- 0x01000000, 0xf4211bfa,
+ 0x725d0300, 0xbf8c3f70,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0x80f8ff78, 0x00000050,
+ 0xbef603ff, 0x01000000,
+ 0xbefc03ff, 0x0000006c,
+ 0x80f89078, 0xf429003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc847c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0x80f8a078, 0xf42d003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc887c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0x80f8c078, 0xf431003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0xbe883108, 0xbe8a310a,
+ 0xbe8c310c, 0xbe8e310e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0xbef603ff, 0x01000000,
+ 0xf4211bfa, 0xf0000000,
+ 0x80788478, 0xf4211b3a,
0xf0000000, 0x80788478,
- 0xf4211b3a, 0xf0000000,
- 0x80788478, 0xf4211b7a,
+ 0xf4211b7a, 0xf0000000,
+ 0x80788478, 0xf4211eba,
0xf0000000, 0x80788478,
- 0xf4211eba, 0xf0000000,
- 0x80788478, 0xf4211efa,
+ 0xf4211efa, 0xf0000000,
+ 0x80788478, 0xf4211c3a,
0xf0000000, 0x80788478,
- 0xf4211c3a, 0xf0000000,
- 0x80788478, 0xf4211c7a,
+ 0xf4211c7a, 0xf0000000,
+ 0x80788478, 0xf4211e7a,
0xf0000000, 0x80788478,
- 0xf4211e7a, 0xf0000000,
- 0x80788478, 0xf4211cfa,
+ 0xf4211cfa, 0xf0000000,
+ 0x80788478, 0xf4211bba,
0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef814, 0xf4211bba,
- 0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef815,
- 0xbef2036d, 0x876dff72,
- 0x0000ffff, 0xbefc036f,
- 0xbefe037a, 0xbeff037b,
- 0x876f71ff, 0x000003ff,
- 0xb9ef4803, 0xb9f9f816,
- 0x876f71ff, 0xfffff800,
- 0x906f8b6f, 0xb9efa2c3,
- 0xb9f3f801, 0x876fff72,
- 0xfc000000, 0x906f9a6f,
- 0x8f6f906f, 0xbef30380,
+ 0xb9eef815, 0xbef2036d,
+ 0x876dff72, 0x0000ffff,
+ 0xbefc036f, 0xbefe037a,
+ 0xbeff037b, 0x876f71ff,
+ 0x000003ff, 0xb9ef4803,
+ 0xb9f9f816, 0x876f71ff,
+ 0xfffff800, 0x906f8b6f,
+ 0xb9efa2c3, 0xb9f3f801,
+ 0x876fff72, 0xfc000000,
+ 0x906f9a6f, 0x8f6f906f,
+ 0xbef30380, 0x88736f73,
+ 0x876fff72, 0x02000000,
+ 0x906f996f, 0x8f6f8f6f,
0x88736f73, 0x876fff72,
- 0x02000000, 0x906f996f,
- 0x8f6f8f6f, 0x88736f73,
- 0x876fff72, 0x01000000,
- 0x906f986f, 0x8f6f996f,
- 0x88736f73, 0x876fff70,
- 0x00800000, 0x906f976f,
- 0xb9f3f807, 0x87fe7e7e,
- 0x87ea6a6a, 0xb9f0f802,
- 0xbf8a0000, 0xbe80226c,
- 0xbf810000, 0xbf9f0000,
+ 0x01000000, 0x906f986f,
+ 0x8f6f996f, 0x88736f73,
+ 0x876fff70, 0x00800000,
+ 0x906f976f, 0xb9f3f807,
+ 0x87fe7e7e, 0x87ea6a6a,
+ 0xb9f0f802, 0xbf8a0000,
+ 0xbe80226c, 0xbf810000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
0xbf820001, 0xbf8202c4,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index cdaa523ce6be..4433bda2ce25 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -758,6 +758,7 @@ L_RESTORE_V0:
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
/* restore SGPRs */
//will be 2+8+16*6
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1d3cd5c50d5f..1544007af34a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -49,7 +49,7 @@ static const char kfd_dev_name[] = "kfd";
static const struct file_operations kfd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = kfd_ioctl,
- .compat_ioctl = kfd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = kfd_open,
.mmap = kfd_mmap,
};
@@ -282,7 +282,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
+ pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
p->pasid,
dev->id);
@@ -332,7 +332,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("Destroying queue id %d for pasid %d\n",
+ pr_debug("Destroying queue id %d for pasid 0x%x\n",
args->queue_id,
p->pasid);
@@ -378,7 +378,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
- pr_debug("Updating queue id %d for pasid %d\n",
+ pr_debug("Updating queue id %d for pasid 0x%x\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
@@ -855,7 +855,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
args->num_of_nodes = 0;
@@ -913,7 +913,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
uint32_t nodes = 0;
int ret;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
@@ -1128,7 +1128,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
mutex_unlock(&p->mutex);
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
- pdd->qpd.vmid != 0)
+ pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
dev->kfd2kgd->set_scratch_backing_va(
dev->kgd, args->va_addr, pdd->qpd.vmid);
@@ -1801,7 +1801,7 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
} else
goto err_i1;
- dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
process = kfd_get_process(current);
if (IS_ERR(process)) {
@@ -1856,7 +1856,8 @@ err_i1:
kfree(kdata);
if (retcode)
- dev_dbg(kfd_device, "ret = %d\n", retcode);
+ dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
+ nr, arg, retcode);
return retcode;
}
@@ -1877,7 +1878,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("Process %d mapping mmio page\n"
+ pr_debug("pasid 0x%x mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 66387caf966e..de9f68d5c312 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -138,6 +138,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
+#define renoir_cache_info carrizo_cache_info
/* TODO - check & update Navi10 cache details */
#define navi10_cache_info carrizo_cache_info
@@ -670,7 +671,13 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = raven_cache_info;
num_of_cache_types = ARRAY_SIZE(raven_cache_info);
break;
+ case CHIP_RENOIR:
+ pcache_info = renoir_cache_info;
+ num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
+ break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pcache_info = navi10_cache_info;
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
break;
@@ -703,7 +710,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info,
cu_info,
mem_available,
- cu_info->cu_bitmap[i][j],
+ cu_info->cu_bitmap[i % 4][j + i / 4],
ct,
cu_processor_id,
k);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index a3441b0e385b..d59f2cd056c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -761,6 +761,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
{
int status = 0;
unsigned int vmid;
+ uint16_t queried_pasid;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
@@ -782,19 +783,18 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
*/
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
- (dev->kgd, vmid)) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid
- (dev->kgd, vmid) == p->pasid) {
- pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
- vmid, p->pasid);
- break;
- }
+ status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
+ (dev->kgd, vmid, &queried_pasid);
+
+ if (status && queried_pasid == p->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
+ vmid, p->pasid);
+ break;
}
}
if (vmid > last_vmid_to_scan) {
- pr_err("Didn't find vmid for pasid %d\n", p->pasid);
+ pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 9d4af961c5d1..9bfa50633654 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -96,7 +96,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
if (pmgr->pasid != 0) {
- pr_debug("H/W debugger is already active using pasid %d\n",
+ pr_debug("H/W debugger is already active using pasid 0x%x\n",
pmgr->pasid);
return -EBUSY;
}
@@ -117,7 +117,7 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
- pr_debug("H/W debugger is not registered by calling pasid %d\n",
+ pr_debug("H/W debugger is not registered by calling pasid 0x%x\n",
p->pasid);
return -EINVAL;
}
@@ -134,7 +134,7 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
wac_info->process->pasid);
return -EINVAL;
}
@@ -147,7 +147,7 @@ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
adw_info->process->pasid);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 0dc1084b5e82..4fa8834ce7cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -39,6 +39,41 @@
*/
static atomic_t kfd_locked = ATOMIC_INIT(0);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
+#endif
+extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
+extern const struct kfd2kgd_calls arcturus_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
+
+static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
+#ifdef KFD_SUPPORT_IOMMU_V2
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
+ [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_TONGA] = &gfx_v8_kfd2kgd,
+ [CHIP_FIJI] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
+ [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
+ [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
+ [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
+};
+
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
@@ -351,6 +386,24 @@ static const struct kfd_device_info arcturus_device_info = {
.num_sdma_queues_per_engine = 8,
};
+static const struct kfd_device_info renoir_device_info = {
+ .asic_family = CHIP_RENOIR,
+ .asic_name = "renoir",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info navi10_device_info = {
.asic_family = CHIP_NAVI10,
.asic_name = "navi10",
@@ -369,133 +422,64 @@ static const struct kfd_device_info navi10_device_info = {
.num_sdma_queues_per_engine = 8,
};
-struct kfd_deviceid {
- unsigned short did;
- const struct kfd_device_info *device_info;
+static const struct kfd_device_info navi12_device_info = {
+ .asic_family = CHIP_NAVI12,
+ .asic_name = "navi12",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+};
+
+static const struct kfd_device_info navi14_device_info = {
+ .asic_family = CHIP_NAVI14,
+ .asic_name = "navi14",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
};
-static const struct kfd_deviceid supported_devices[] = {
+/* For each entry, [0] is regular and [1] is virtualisation device. */
+static const struct kfd_device_info *kfd_supported_devices[][2] = {
#ifdef KFD_SUPPORT_IOMMU_V2
- { 0x1304, &kaveri_device_info }, /* Kaveri */
- { 0x1305, &kaveri_device_info }, /* Kaveri */
- { 0x1306, &kaveri_device_info }, /* Kaveri */
- { 0x1307, &kaveri_device_info }, /* Kaveri */
- { 0x1309, &kaveri_device_info }, /* Kaveri */
- { 0x130A, &kaveri_device_info }, /* Kaveri */
- { 0x130B, &kaveri_device_info }, /* Kaveri */
- { 0x130C, &kaveri_device_info }, /* Kaveri */
- { 0x130D, &kaveri_device_info }, /* Kaveri */
- { 0x130E, &kaveri_device_info }, /* Kaveri */
- { 0x130F, &kaveri_device_info }, /* Kaveri */
- { 0x1310, &kaveri_device_info }, /* Kaveri */
- { 0x1311, &kaveri_device_info }, /* Kaveri */
- { 0x1312, &kaveri_device_info }, /* Kaveri */
- { 0x1313, &kaveri_device_info }, /* Kaveri */
- { 0x1315, &kaveri_device_info }, /* Kaveri */
- { 0x1316, &kaveri_device_info }, /* Kaveri */
- { 0x1317, &kaveri_device_info }, /* Kaveri */
- { 0x1318, &kaveri_device_info }, /* Kaveri */
- { 0x131B, &kaveri_device_info }, /* Kaveri */
- { 0x131C, &kaveri_device_info }, /* Kaveri */
- { 0x131D, &kaveri_device_info }, /* Kaveri */
- { 0x9870, &carrizo_device_info }, /* Carrizo */
- { 0x9874, &carrizo_device_info }, /* Carrizo */
- { 0x9875, &carrizo_device_info }, /* Carrizo */
- { 0x9876, &carrizo_device_info }, /* Carrizo */
- { 0x9877, &carrizo_device_info }, /* Carrizo */
- { 0x15DD, &raven_device_info }, /* Raven */
- { 0x15D8, &raven_device_info }, /* Raven */
+ [CHIP_KAVERI] = {&kaveri_device_info, NULL},
+ [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
+ [CHIP_RAVEN] = {&raven_device_info, NULL},
#endif
- { 0x67A0, &hawaii_device_info }, /* Hawaii */
- { 0x67A1, &hawaii_device_info }, /* Hawaii */
- { 0x67A2, &hawaii_device_info }, /* Hawaii */
- { 0x67A8, &hawaii_device_info }, /* Hawaii */
- { 0x67A9, &hawaii_device_info }, /* Hawaii */
- { 0x67AA, &hawaii_device_info }, /* Hawaii */
- { 0x67B0, &hawaii_device_info }, /* Hawaii */
- { 0x67B1, &hawaii_device_info }, /* Hawaii */
- { 0x67B8, &hawaii_device_info }, /* Hawaii */
- { 0x67B9, &hawaii_device_info }, /* Hawaii */
- { 0x67BA, &hawaii_device_info }, /* Hawaii */
- { 0x67BE, &hawaii_device_info }, /* Hawaii */
- { 0x6920, &tonga_device_info }, /* Tonga */
- { 0x6921, &tonga_device_info }, /* Tonga */
- { 0x6928, &tonga_device_info }, /* Tonga */
- { 0x6929, &tonga_device_info }, /* Tonga */
- { 0x692B, &tonga_device_info }, /* Tonga */
- { 0x6938, &tonga_device_info }, /* Tonga */
- { 0x6939, &tonga_device_info }, /* Tonga */
- { 0x7300, &fiji_device_info }, /* Fiji */
- { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
- { 0x67C0, &polaris10_device_info }, /* Polaris10 */
- { 0x67C1, &polaris10_device_info }, /* Polaris10 */
- { 0x67C2, &polaris10_device_info }, /* Polaris10 */
- { 0x67C4, &polaris10_device_info }, /* Polaris10 */
- { 0x67C7, &polaris10_device_info }, /* Polaris10 */
- { 0x67C8, &polaris10_device_info }, /* Polaris10 */
- { 0x67C9, &polaris10_device_info }, /* Polaris10 */
- { 0x67CA, &polaris10_device_info }, /* Polaris10 */
- { 0x67CC, &polaris10_device_info }, /* Polaris10 */
- { 0x67CF, &polaris10_device_info }, /* Polaris10 */
- { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
- { 0x67DF, &polaris10_device_info }, /* Polaris10 */
- { 0x6FDF, &polaris10_device_info }, /* Polaris10 */
- { 0x67E0, &polaris11_device_info }, /* Polaris11 */
- { 0x67E1, &polaris11_device_info }, /* Polaris11 */
- { 0x67E3, &polaris11_device_info }, /* Polaris11 */
- { 0x67E7, &polaris11_device_info }, /* Polaris11 */
- { 0x67E8, &polaris11_device_info }, /* Polaris11 */
- { 0x67E9, &polaris11_device_info }, /* Polaris11 */
- { 0x67EB, &polaris11_device_info }, /* Polaris11 */
- { 0x67EF, &polaris11_device_info }, /* Polaris11 */
- { 0x67FF, &polaris11_device_info }, /* Polaris11 */
- { 0x6980, &polaris12_device_info }, /* Polaris12 */
- { 0x6981, &polaris12_device_info }, /* Polaris12 */
- { 0x6985, &polaris12_device_info }, /* Polaris12 */
- { 0x6986, &polaris12_device_info }, /* Polaris12 */
- { 0x6987, &polaris12_device_info }, /* Polaris12 */
- { 0x6995, &polaris12_device_info }, /* Polaris12 */
- { 0x6997, &polaris12_device_info }, /* Polaris12 */
- { 0x699F, &polaris12_device_info }, /* Polaris12 */
- { 0x694C, &vegam_device_info }, /* VegaM */
- { 0x694E, &vegam_device_info }, /* VegaM */
- { 0x694F, &vegam_device_info }, /* VegaM */
- { 0x6860, &vega10_device_info }, /* Vega10 */
- { 0x6861, &vega10_device_info }, /* Vega10 */
- { 0x6862, &vega10_device_info }, /* Vega10 */
- { 0x6863, &vega10_device_info }, /* Vega10 */
- { 0x6864, &vega10_device_info }, /* Vega10 */
- { 0x6867, &vega10_device_info }, /* Vega10 */
- { 0x6868, &vega10_device_info }, /* Vega10 */
- { 0x6869, &vega10_device_info }, /* Vega10 */
- { 0x686A, &vega10_device_info }, /* Vega10 */
- { 0x686B, &vega10_device_info }, /* Vega10 */
- { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
- { 0x686D, &vega10_device_info }, /* Vega10 */
- { 0x686E, &vega10_device_info }, /* Vega10 */
- { 0x686F, &vega10_device_info }, /* Vega10 */
- { 0x687F, &vega10_device_info }, /* Vega10 */
- { 0x69A0, &vega12_device_info }, /* Vega12 */
- { 0x69A1, &vega12_device_info }, /* Vega12 */
- { 0x69A2, &vega12_device_info }, /* Vega12 */
- { 0x69A3, &vega12_device_info }, /* Vega12 */
- { 0x69AF, &vega12_device_info }, /* Vega12 */
- { 0x66a0, &vega20_device_info }, /* Vega20 */
- { 0x66a1, &vega20_device_info }, /* Vega20 */
- { 0x66a2, &vega20_device_info }, /* Vega20 */
- { 0x66a3, &vega20_device_info }, /* Vega20 */
- { 0x66a4, &vega20_device_info }, /* Vega20 */
- { 0x66a7, &vega20_device_info }, /* Vega20 */
- { 0x66af, &vega20_device_info }, /* Vega20 */
- { 0x738C, &arcturus_device_info }, /* Arcturus */
- { 0x7388, &arcturus_device_info }, /* Arcturus */
- { 0x738E, &arcturus_device_info }, /* Arcturus */
- { 0x7390, &arcturus_device_info }, /* Arcturus vf */
- { 0x7310, &navi10_device_info }, /* Navi10 */
- { 0x7312, &navi10_device_info }, /* Navi10 */
- { 0x7318, &navi10_device_info }, /* Navi10 */
- { 0x731a, &navi10_device_info }, /* Navi10 */
- { 0x731f, &navi10_device_info }, /* Navi10 */
+ [CHIP_HAWAII] = {&hawaii_device_info, NULL},
+ [CHIP_TONGA] = {&tonga_device_info, NULL},
+ [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
+ [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
+ [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
+ [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
+ [CHIP_VEGAM] = {&vegam_device_info, NULL},
+ [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
+ [CHIP_VEGA12] = {&vega12_device_info, NULL},
+ [CHIP_VEGA20] = {&vega20_device_info, NULL},
+ [CHIP_RENOIR] = {&renoir_device_info, NULL},
+ [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
+ [CHIP_NAVI10] = {&navi10_device_info, NULL},
+ [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
+ [CHIP_NAVI14] = {&navi14_device_info, NULL},
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
@@ -504,32 +488,25 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-static const struct kfd_device_info *lookup_device_info(unsigned short did)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, unsigned int asic_type, bool vf)
{
- size_t i;
+ struct kfd_dev *kfd;
+ const struct kfd_device_info *device_info;
+ const struct kfd2kgd_calls *f2g;
- for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
- if (supported_devices[i].did == did) {
- WARN_ON(!supported_devices[i].device_info);
- return supported_devices[i].device_info;
- }
+ if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
+ || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
+ dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
+ return NULL; /* asic_type out of range */
}
- dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
- did);
+ device_info = kfd_supported_devices[asic_type][vf];
+ f2g = kfd2kgd_funcs[asic_type];
- return NULL;
-}
-
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
- struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
-{
- struct kfd_dev *kfd;
- const struct kfd_device_info *device_info =
- lookup_device_info(pdev->device);
-
- if (!device_info) {
- dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ if (!device_info || !f2g) {
+ dev_err(kfd_device, "%s %s not supported in kfd\n",
+ amdgpu_asic_name[asic_type], vf ? "VF" : "");
return NULL;
}
@@ -593,10 +570,12 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
unsigned int size;
+ kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
@@ -751,9 +730,6 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
return 0;
kgd2kfd_suspend(kfd);
- /* hold dqm->lock to prevent further execution*/
- dqm_lock(kfd->dqm);
-
kfd_signal_reset_event(kfd);
return 0;
}
@@ -771,8 +747,6 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
if (!kfd->init_complete)
return 0;
- dqm_unlock(kfd->dqm);
-
ret = kfd_resume(kfd);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d985e31fcc1e..984c2f2b24b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -195,20 +195,30 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit, allocated_vmid;
+ int allocated_vmid = -1, i;
- if (dqm->vmid_bitmap == 0)
- return -ENOMEM;
+ for (i = dqm->dev->vm_info.first_vmid_kfd;
+ i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
+ if (!dqm->vmid_pasid[i]) {
+ allocated_vmid = i;
+ break;
+ }
+ }
+
+ if (allocated_vmid < 0) {
+ pr_err("no more vmid to allocate\n");
+ return -ENOSPC;
+ }
+
+ pr_debug("vmid allocated: %d\n", allocated_vmid);
+
+ dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
- bit = ffs(dqm->vmid_bitmap) - 1;
- dqm->vmid_bitmap &= ~(1 << bit);
+ set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
- allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
- pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
- set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
program_sh_mem_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
@@ -220,8 +230,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd));
- dqm->dev->kfd2kgd->set_scratch_backing_va(
- dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+ if (dqm->dev->kfd2kgd->set_scratch_backing_va)
+ dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
+ qpd->sh_hidden_private_base, qpd->vmid);
return 0;
}
@@ -248,8 +259,6 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
-
/* On GFX v7, CP doesn't flush TC at dequeue */
if (q->device->device_info->asic_family == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
@@ -259,8 +268,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+ dqm->vmid_pasid[qpd->vmid] = 0;
- dqm->vmid_bitmap |= (1 << bit);
qpd->vmid = 0;
q->properties.vmid = 0;
}
@@ -331,6 +340,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (q->properties.is_active) {
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
+ goto add_queue_to_list;
+ }
if (WARN(q->process->mm != current->mm,
"should only run in user thread"))
@@ -342,6 +355,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
goto out_free_mqd;
}
+add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
@@ -449,6 +463,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
+ return 0;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
@@ -524,6 +543,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
+
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Update non-HWS queue while stopped\n");
+ goto out_unlock;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -579,7 +604,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -593,6 +618,11 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
+ dqm->queue_count--;
+
+ if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
+ continue;
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -601,7 +631,6 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count--;
}
out:
@@ -621,7 +650,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -667,7 +696,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -702,6 +731,11 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
+ dqm->queue_count++;
+
+ if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
+ continue;
+
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties, mm);
if (retval && !ret)
@@ -709,7 +743,6 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count++;
}
qpd->evicted = 0;
out:
@@ -739,7 +772,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -879,7 +912,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[pipe] |= 1 << queue;
}
- dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
+ memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
+
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -902,12 +936,20 @@ static void uninitialize(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm)
{
init_interrupts(dqm);
- return pm_init(&dqm->packets, dqm);
+
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ return pm_init(&dqm->packets, dqm);
+ dqm->sched_running = true;
+
+ return 0;
}
static int stop_nocpsch(struct device_queue_manager *dqm)
{
- pm_uninit(&dqm->packets);
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ pm_uninit(&dqm->packets);
+ dqm->sched_running = false;
+
return 0;
}
@@ -1058,6 +1100,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm_lock(dqm);
/* clear hang status when driver try to start the hw scheduler */
dqm->is_hws_hang = false;
+ dqm->sched_running = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1073,6 +1116,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
{
dqm_lock(dqm);
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ dqm->sched_running = false;
dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
@@ -1259,9 +1303,10 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
{
int retval;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
return 0;
-
if (dqm->active_runlist)
return 0;
@@ -1283,6 +1328,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
int retval = 0;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->is_hws_hang)
return -EIO;
if (!dqm->active_runlist)
@@ -1676,7 +1723,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- dev->device_info->num_sdma_engines *
+ (dev->device_info->num_sdma_engines +
+ dev->device_info->num_xgmi_sdma_engines) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
@@ -1786,10 +1834,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
device_queue_manager_init_v9(&dqm->asic_ops);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
device_queue_manager_init_v10_navi10(&dqm->asic_ops);
break;
default:
@@ -1883,6 +1934,12 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
int pipe, queue;
int r = 0;
+ if (!dqm->sched_running) {
+ seq_printf(m, " Device is stopped\n");
+
+ return 0;
+ }
+
r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
&dump, &n_regs);
@@ -1917,7 +1974,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
+ get_num_xgmi_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 90db2c9275f6..a8c37e6da027 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -32,6 +32,8 @@
#include "kfd_mqd_manager.h"
+#define VMID_NUM 16
+
struct device_process_node {
struct qcm_process_device *qpd;
struct list_head list;
@@ -185,7 +187,8 @@ struct device_queue_manager {
unsigned int *allocated_queues;
uint64_t sdma_bitmap;
uint64_t xgmi_sdma_bitmap;
- unsigned int vmid_bitmap;
+ /* the pasid mapping for each kfd vmid */
+ uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
struct kfd_mem_obj *pipeline_mem;
uint64_t fence_gpu_addr;
@@ -198,6 +201,7 @@ struct device_queue_manager {
bool is_hws_hang;
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
+ bool sched_running;
};
void device_queue_manager_init_cik(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d674d4b3340f..908081c85de1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -852,8 +852,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
- "Sending SIGSEGV to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGSEGV to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
@@ -861,13 +861,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
- "Sending SIGTERM to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGTERM to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
- "HSA Process (PID %d) got unhandled exception",
- p->lead_thread->pid);
+ "Process %d (pasid 0x%x) got unhandled exception",
+ p->lead_thread->pid, p->pasid);
}
}
}
@@ -936,7 +936,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
/* Workaround on Raven to not kill the process when memory is freed
* before IOMMU is able to finish processing all the excessive PPRs
*/
- if (dev->device_info->asic_family != CHIP_RAVEN) {
+ if (dev->device_info->asic_family != CHIP_RAVEN &&
+ dev->device_info->asic_family != CHIP_RENOIR) {
mutex_lock(&p->event_mutex);
/* Lookup events by type and signal them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 9dc4bff8085e..bb77b8890e77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process)
/*Iterating over all devices*/
while (kfd_topology_enum_kfd_devices(id, &dev) == 0) {
- if (!dev) {
- id++; /* Skip non GPU devices */
+ if (!dev || kfd_devcgroup_check_permission(dev)) {
+ /* Skip non GPU devices and devices to which the
+ * current process have no access to. Access can be
+ * limited by placing the process in a specific
+ * cgroup hierarchy
+ */
+ id++;
continue;
}
@@ -405,8 +410,11 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kfd_init_apertures_v9(pdd, id);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 3ef67d2e0d9f..e05d75ecda21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -54,8 +54,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
memcpy(patched_ihre, ih_ring_entry,
dev->device_info->ih_ring_entry_size);
- pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
- dev->kgd, vmid);
+ pasid = dev->dqm->vmid_pasid[vmid];
/* Patch the pasid field */
patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index c56ac47cd318..bc47f6a44456 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
}
kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+ if (unlikely(!kfd->ih_wq)) {
+ kfifo_free(&kfd->ih_fifo);
+ dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
+ return -ENOMEM;
+ }
spin_lock_init(&kfd->interrupt_lock);
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 5f35df23fb18..193e2835bd4d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -160,7 +160,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
if (!p)
return;
- pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
mutex_lock(kfd_get_dbgmgr_mutex());
@@ -194,7 +194,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
struct kfd_dev *dev;
dev_warn_ratelimited(kfd_device,
- "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
+ "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
@@ -235,7 +235,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
p->lead_thread);
if (err < 0) {
- pr_err("Unexpected pasid %d binding failure\n",
+ pr_err("Unexpected pasid 0x%x binding failure\n",
p->pasid);
mutex_unlock(&p->mutex);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 8b4564f71a7a..11d244891393 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -330,10 +330,13 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
kernel_queue_init_v9(&kq->ops_asic_specific);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kernel_queue_init_v10(&kq->ops_asic_specific);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 986ff52d5750..f4b7f7e6c40e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -82,7 +82,7 @@ static void kfd_exit(void)
kfd_chardev_exit();
}
-int kgd2kfd_init()
+int kgd2kfd_init(void)
{
return kfd_init();
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 9cd3eb2d90bd..4a236b2c2354 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -69,35 +69,13 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
struct queue_properties *q)
{
- int retval;
- struct kfd_mem_obj *mqd_mem_obj = NULL;
+ struct kfd_mem_obj *mqd_mem_obj;
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
- */
- if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if (!mqd_mem_obj)
- return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
- ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE),
- &(mqd_mem_obj->gtt_mem),
- &(mqd_mem_obj->gpu_addr),
- (void *)&(mqd_mem_obj->cpu_ptr), true);
- } else {
- retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
- &mqd_mem_obj);
- }
-
- if (retval) {
- kfree(mqd_mem_obj);
+ if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
+ &mqd_mem_obj))
return NULL;
- }
return mqd_mem_obj;
-
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
@@ -250,14 +228,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
static void free_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- struct kfd_dev *kfd = mm->dev;
-
- if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
- kfree(mqd_mem_obj);
- } else {
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
- }
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 2c8624c5b42c..83ef4b3dd2fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -239,10 +239,13 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
pm->pmf = &kfd_v9_pm_funcs;
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pm->pmf = &kfd_v10_pm_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index c89326125d71..060a9e8b301e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -36,6 +36,10 @@
#include <linux/seq_file.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
+#include <linux/device_cgroup.h>
+#include <drm/drm_file.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
#include "amd_shared.h"
@@ -179,10 +183,6 @@ enum cache_policy {
cache_policy_noncoherent
};
-#define KFD_IS_VI(chip) ((chip) >= CHIP_CARRIZO && (chip) <= CHIP_POLARIS11)
-#define KFD_IS_DGPU(chip) (((chip) >= CHIP_TONGA && \
- (chip) <= CHIP_NAVI10) || \
- (chip) == CHIP_HAWAII)
#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
struct kfd_event_interrupt_class {
@@ -230,6 +230,7 @@ struct kfd_dev {
const struct kfd_device_info *device_info;
struct pci_dev *pdev;
+ struct drm_device *ddev;
unsigned int id; /* topology stub index */
@@ -687,7 +688,7 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- unsigned int pasid;
+ uint16_t pasid;
unsigned int doorbell_index;
/*
@@ -1040,6 +1041,21 @@ bool kfd_is_locked(void);
void kfd_inc_compute_active(struct kfd_dev *dev);
void kfd_dec_compute_active(struct kfd_dev *dev);
+/* Cgroup Support */
+/* Check with device cgroup if @kfd device is accessible */
+static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
+{
+#if defined(CONFIG_CGROUP_DEVICE)
+ struct drm_device *ddev = kfd->ddev;
+
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
+ ddev->render->index,
+ DEVCG_ACC_WRITE | DEVCG_ACC_READ);
+#else
+ return 0;
+#endif
+}
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 40e3fc0c6942..10f9af5784f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -416,7 +416,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
list_for_each_entry_safe(pdd, temp, &p->per_device_data,
per_device_list) {
- pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
if (pdd->drm_file) {
@@ -687,6 +687,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct kfd_dev *dev)
{
unsigned int i;
+ int range_start = dev->shared_resources.non_cp_doorbells_start;
+ int range_end = dev->shared_resources.non_cp_doorbells_end;
if (!KFD_IS_SOC15(dev->device_info->asic_family))
return 0;
@@ -698,14 +700,16 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
return -ENOMEM;
/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
+ range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
+
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
- if (i >= dev->shared_resources.non_cp_doorbells_start
- && i <= dev->shared_resources.non_cp_doorbells_end) {
+ if (i >= range_start && i <= range_end) {
set_bit(i, qpd->doorbell_bitmap);
set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
qpd->doorbell_bitmap);
- pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
- i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
}
}
@@ -1020,7 +1024,7 @@ static void evict_process_worker(struct work_struct *work)
*/
flush_delayed_work(&p->restore_work);
- pr_debug("Started evicting pasid %d\n", p->pasid);
+ pr_debug("Started evicting pasid 0x%x\n", p->pasid);
ret = kfd_process_evict_queues(p);
if (!ret) {
dma_fence_signal(p->ef);
@@ -1029,9 +1033,9 @@ static void evict_process_worker(struct work_struct *work)
queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- pr_debug("Finished evicting pasid %d\n", p->pasid);
+ pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
} else
- pr_err("Failed to evict queues of pasid %d\n", p->pasid);
+ pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
}
static void restore_process_worker(struct work_struct *work)
@@ -1046,7 +1050,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
- pr_debug("Started restoring pasid %d\n", p->pasid);
+ pr_debug("Started restoring pasid 0x%x\n", p->pasid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -1062,7 +1066,7 @@ static void restore_process_worker(struct work_struct *work)
ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
- pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
+ pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
@@ -1072,9 +1076,9 @@ static void restore_process_worker(struct work_struct *work)
ret = kfd_process_restore_queues(p);
if (!ret)
- pr_debug("Finished restoring pasid %d\n", p->pasid);
+ pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
else
- pr_err("Failed to restore queues of pasid %d\n", p->pasid);
+ pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void kfd_suspend_all_processes(void)
@@ -1088,7 +1092,7 @@ void kfd_suspend_all_processes(void)
cancel_delayed_work_sync(&p->restore_work);
if (kfd_process_evict_queues(p))
- pr_err("Failed to suspend process %d\n", p->pasid);
+ pr_err("Failed to suspend process 0x%x\n", p->pasid);
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
@@ -1171,7 +1175,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID %d:\n",
+ seq_printf(m, "Process %d PASID 0x%x:\n",
p->lead_thread->tgid, p->pasid);
mutex_lock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7e6c3ee82f5b..2659d226c056 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -53,7 +53,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("Cannot open more queues for process with pasid %d\n",
+ pr_info("Cannot open more queues for process with pasid 0x%x\n",
pqm->process->pasid);
return -ENOMEM;
}
@@ -298,7 +298,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
@@ -377,7 +377,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
- pr_err("Pasid %d destroy queue %d failed, ret %d\n",
+ pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
pqm->process->pasid,
pqn->q->properties.queue_id, retval);
if (retval != -ETIME)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 7551761f2aa9..69bd0628fdc6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
iolink = container_of(attr, struct kfd_iolink_properties, attr);
+ if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
@@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
mem = container_of(attr, struct kfd_mem_properties, attr);
+ if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
sysfs_show_32bit_prop(buffer, "flags", mem->flags);
@@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
cache = container_of(attr, struct kfd_cache_properties, attr);
+ if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "processor_id_low",
cache->processor_id_low);
sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
@@ -414,6 +420,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_32bit_val(buffer, dev->gpu_id);
}
@@ -421,11 +429,15 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev = container_of(attr, struct kfd_topology_device,
attr_name);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_str_val(buffer, dev->node_props.name);
}
dev = container_of(attr, struct kfd_topology_device,
attr_props);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
@@ -1098,6 +1110,9 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
+ struct kfd_mem_properties *mem;
+ struct kfd_cache_properties *cache;
+ struct kfd_iolink_properties *iolink;
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -1111,6 +1126,13 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
+
+ list_for_each_entry(mem, &dev->mem_props, list)
+ mem->gpu = dev->gpu;
+ list_for_each_entry(cache, &dev->cache_props, list)
+ cache->gpu = dev->gpu;
+ list_for_each_entry(iolink, &dev->io_link_props, list)
+ iolink->gpu = dev->gpu;
break;
}
}
@@ -1317,8 +1339,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index d4718d58d0f2..15843e0fc756 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -102,6 +102,7 @@ struct kfd_mem_properties {
uint32_t flags;
uint32_t width;
uint32_t mem_clk_max;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -123,6 +124,7 @@ struct kfd_cache_properties {
uint32_t cache_latency;
uint32_t cache_type;
uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -141,6 +143,7 @@ struct kfd_iolink_properties {
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
uint32_t flags;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 71991a28a775..313183b80032 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -23,16 +23,16 @@ config DRM_AMD_DC_DCN2_0
depends on DRM_AMD_DC && X86
depends on DRM_AMD_DC_DCN1_0
help
- Choose this option if you want to have
- Navi support for display engine
+ Choose this option if you want to have
+ Navi support for display engine
config DRM_AMD_DC_DCN2_1
- bool "DCN 2.1 family"
- depends on DRM_AMD_DC && X86
- depends on DRM_AMD_DC_DCN2_0
- help
- Choose this option if you want to have
- Renoir support for display engine
+ bool "DCN 2.1 family"
+ depends on DRM_AMD_DC && X86
+ depends on DRM_AMD_DC_DCN2_0
+ help
+ Choose this option if you want to have
+ Renoir support for display engine
config DRM_AMD_DC_DSC_SUPPORT
bool "DSC support"
@@ -41,8 +41,16 @@ config DRM_AMD_DC_DSC_SUPPORT
depends on DRM_AMD_DC_DCN1_0
depends on DRM_AMD_DC_DCN2_0
help
- Choose this option if you want to have
- Dynamic Stream Compression support
+ Choose this option if you want to have
+ Dynamic Stream Compression support
+
+config DRM_AMD_DC_HDCP
+ bool "Enable HDCP support in DC"
+ depends on DRM_AMD_DC
+ help
+ Choose this option
+ if you want to support
+ HDCP authentication
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 496cee000f10..36b3d6a5d04d 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -34,12 +34,19 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
+ifdef CONFIG_DRM_AMD_DC_HDCP
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
+endif
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DAL_LIBS += modules/hdcp
+endif
+
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
include $(AMD_DAL)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 94911871eb9b..9a3b7bf8ab0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
endif
+ifdef CONFIG_DRM_AMD_DC_HDCP
+AMDGPUDM += amdgpu_dm_hdcp.o
+endif
+
ifneq ($(CONFIG_DEBUG_FS),)
AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4139f129eafb..7aac9568d3be 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -37,6 +37,9 @@
#include "amdgpu_ucode.h"
#include "atom.h"
#include "amdgpu_dm.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "amdgpu_dm_hdcp.h"
+#endif
#include "amdgpu_pm.h"
#include "amd_shared.h"
@@ -67,6 +70,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_hdcp.h>
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -143,6 +147,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
+static void amdgpu_dm_set_psr_caps(struct dc_link *link);
+static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+
+
/*
* dm_vblank_get_counter
*
@@ -263,6 +273,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -407,6 +424,13 @@ static void dm_vupdate_high_irq(void *interrupt_params)
}
}
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
static void dm_crtc_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -646,11 +670,18 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct dc_callback_init init_params;
+#endif
+
adev->dm.ddev = adev->ddev;
adev->dm.adev = adev;
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&init_params, 0, sizeof(init_params));
+#endif
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
@@ -697,6 +728,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
init_data.flags.multi_mon_pp_mclk_switch = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+ init_data.flags.disable_fractional_pwm = true;
+
init_data.flags.power_down_display_on_boot = true;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
@@ -713,6 +747,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ dc_hardware_init(adev->dm.dc);
+
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
@@ -723,6 +759,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN) {
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+
+ if (!adev->dm.hdcp_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+ dc_init_callbacks(adev->dm.dc, &init_params);
+ }
+#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -764,6 +812,16 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_destroy(adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+ if (adev->dm.dc)
+ dc_deinit_callbacks(adev->dm.dc);
+#endif
+
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
@@ -897,27 +955,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int ret = 0;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
- aconnector, aconnector->base.base.id);
+ aconnector,
+ aconnector->base.base.id);
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0) {
DRM_ERROR("DM_MST: Failed to start MST\n");
- ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
- return ret;
- }
+ aconnector->dc_link->type =
+ dc_connection_single;
+ break;
}
+ }
}
+ drm_connector_list_iter_end(&iter);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
return ret;
}
@@ -940,6 +1000,11 @@ static int dm_late_init(void *handle)
params.backlight_lut_array_size = 16;
params.backlight_lut_array = linear_lut;
+ /* Min backlight level after ABM reduction, Don't allow below 1%
+ * 0xFFFF x 0.01 = 0x28F
+ */
+ params.min_abm_backlight = 0x28F;
+
/* todo will enable for navi10 */
if (adev->asic_type <= CHIP_RAVEN) {
ret = dmcu_load_iram(dmcu, params);
@@ -955,14 +1020,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_port)
@@ -973,15 +1037,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
- ret = drm_dp_mst_topology_mgr_resume(mgr);
+ ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
drm_dp_mst_topology_mgr_set_mst(mgr, false);
need_hotplug = true;
}
}
}
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
@@ -989,7 +1052,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
/**
* dm_hw_init() - Initialize DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Initialize the &struct amdgpu_display_manager device. This involves calling
* the initializers of each DM component, then populating the struct with them.
@@ -1019,7 +1082,7 @@ static int dm_hw_init(void *handle)
/**
* dm_hw_fini() - Teardown DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Teardown components within &struct amdgpu_display_manager that require
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
@@ -1163,6 +1226,7 @@ static int dm_resume(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state;
@@ -1185,17 +1249,18 @@ static int dm_resume(void *handle)
/* program HPD filter */
dc_resume(dm->dc);
- /* On resume we need to rewrite the MSTM control bits to enamble MST*/
- s3_handle_mst(ddev, false);
-
/*
* early enable HPD Rx IRQ, should be done before set mode as short
* pulse interrupts are used for MST
*/
amdgpu_dm_irq_resume_early(adev);
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
+ s3_handle_mst(ddev, false);
+
/* Do detection*/
- list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
/*
@@ -1223,6 +1288,7 @@ static int dm_resume(void *handle)
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
}
+ drm_connector_list_iter_end(&iter);
/* Force mode set in atomic commit */
for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
@@ -1438,6 +1504,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+#endif
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1452,6 +1523,9 @@ static void handle_hpd_irq(void *param)
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct amdgpu_device *adev = dev->dev_private;
+#endif
/*
* In case of failure or MST no need to update connector status or notify the OS
@@ -1459,6 +1533,10 @@ static void handle_hpd_irq(void *param)
*/
mutex_lock(&aconnector->hpd_lock);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+#endif
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -1577,6 +1655,12 @@ static void handle_hpd_rx_irq(void *param)
struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ union hpd_irq_data hpd_irq_data;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+#endif
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
@@ -1586,7 +1670,12 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
+#else
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
+#endif
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_sink(dc_link, &new_connection_type))
@@ -1621,6 +1710,10 @@ static void handle_hpd_rx_irq(void *param)
drm_kms_helper_hotplug_event(dev);
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
dm_handle_hpd_rx_irq(aconnector);
@@ -2334,6 +2427,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
+ if (amdgpu_dc_feature_mask & DC_PSR_MASK)
+ amdgpu_dm_set_psr_caps(link);
}
@@ -3311,8 +3406,12 @@ static void fill_stream_properties_from_drm_display_mode(
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct hdmi_vendor_infoframe hv_frame;
+ struct hdmi_avi_infoframe avi_frame;
- memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+ memset(&hv_frame, 0, sizeof(hv_frame));
+ memset(&avi_frame, 0, sizeof(avi_frame));
timing_out->h_border_left = 0;
timing_out->h_border_right = 0;
@@ -3322,6 +3421,9 @@ static void fill_stream_properties_from_drm_display_mode(
if (drm_mode_is_420_only(info, mode_in)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector->force_yuv420_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -3346,6 +3448,13 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
}
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->vic = avi_frame.video_code;
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->hdmi_vic = hv_frame.vic;
+ }
+
timing_out->h_addressable = mode_in->crtc_hdisplay;
timing_out->h_total = mode_in->crtc_htotal;
timing_out->h_sync_width =
@@ -3566,6 +3675,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->dm_stream_context = aconnector;
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+ drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
+
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
/* Search for preferred mode */
if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
@@ -3621,8 +3733,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_link_get_link_cap(aconnector->dc_link));
if (dsc_caps.is_dsc_supported)
- if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc,
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
&dsc_caps,
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg))
@@ -3639,6 +3752,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
update_stream_signal(stream, sink);
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
+ if (stream->link->psr_feature_enabled) {
+ struct dc *core_dc = stream->link->ctx->dc;
+
+ if (dc_is_dmcu_initialized(core_dc)) {
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ stream->psr_version = dmcu->dmcu_version.psr_version;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+ }
+ }
finish:
dc_sink_release(sink);
@@ -4114,8 +4239,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
result = MODE_OK;
else
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
- mode->vdisplay,
mode->hdisplay,
+ mode->vdisplay,
mode->clock,
dc_result);
@@ -4494,7 +4619,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
tv.num_shared = 1;
list_add(&tv.head, &list);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
@@ -4837,7 +4962,13 @@ static int to_drm_connector_type(enum signal_type st)
static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
{
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+ struct drm_encoder *encoder;
+
+ /* There is only one encoder per connector */
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
}
static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
@@ -5082,6 +5213,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_vrr_capable_property(
&aconnector->base);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ drm_connector_attach_content_protection_property(&aconnector->base, false);
+#endif
}
}
@@ -5324,6 +5459,53 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
return false;
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static bool is_content_protection_different(struct drm_connector_state *state,
+ const struct drm_connector_state *old_state,
+ const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /* CP is being re enabled, ignore this */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ return false;
+ }
+
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
+ * hot-plug, headless s3, dpms
+ */
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
+ aconnector->dc_sink != NULL)
+ return true;
+
+ if (old_state->content_protection == state->content_protection)
+ return false;
+
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ return true;
+
+ return false;
+}
+
+static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector);
+ else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index);
+
+}
+#endif
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dc_stream_state *stream)
@@ -5665,6 +5847,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
+ bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -5710,6 +5893,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane = dm_new_plane_state->dc_state;
+ if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
+ swizzle = false;
+
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -5864,6 +6050,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) {
+ bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) {
bundle->stream_update.src = acrtc_state->stream->src;
bundle->stream_update.dst = acrtc_state->stream->dst;
@@ -5899,14 +6086,29 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&acrtc_state->vrr_params.adjust);
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
-
mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+
dc_commit_updates_for_stream(dm->dc,
bundle->surface_updates,
planes_count,
acrtc_state->stream,
&bundle->stream_update,
dc_state);
+
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->psr_version &&
+ !acrtc_state->stream->link->psr_feature_enabled)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_feature_enabled &&
+ !acrtc_state->stream->link->psr_allow_active &&
+ swizzle) {
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ }
+
mutex_unlock(&dm->dc_lock);
}
@@ -6215,10 +6417,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
crtc->hwmode = new_crtc_state->mode;
} else if (modereset_required(new_crtc_state)) {
DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
-
/* i.e. reset mode */
- if (dm_old_crtc_state->stream)
+ if (dm_old_crtc_state->stream) {
+ if (dm_old_crtc_state->stream->link->psr_allow_active)
+ amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
+
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ }
}
} /* for_each_crtc_in_state() */
@@ -6248,6 +6453,30 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ new_crtc_state = NULL;
+
+ if (acrtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ continue;
+ }
+
+ if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+ update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue);
+ }
+#endif
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -6287,9 +6516,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
+ stream_update.stream = dm_new_crtc_state->stream;
if (scaling_changed) {
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
- dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+ dm_new_con_state, dm_new_crtc_state->stream);
stream_update.src = dm_new_crtc_state->stream->src;
stream_update.dst = dm_new_crtc_state->stream->dst;
@@ -7158,7 +7388,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
-
+ stream_update.stream = new_dm_crtc_state->stream;
/*
* TODO: DC modifies the surface during this call so we need
* to lock here - find a way to do this without locking.
@@ -7569,3 +7799,92 @@ update:
freesync_capable);
}
+static void amdgpu_dm_set_psr_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ return;
+ if (link->type == dc_connection_none)
+ return;
+ if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+ dpcd_data, sizeof(dpcd_data))) {
+ link->psr_feature_enabled = dpcd_data[0] ? true:false;
+ DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+ }
+}
+
+/*
+ * amdgpu_dm_link_setup_psr() - configure psr link
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+{
+ struct dc_link *link = NULL;
+ struct psr_config psr_config = {0};
+ struct psr_context psr_context = {0};
+ struct dc *dc = NULL;
+ bool ret = false;
+
+ if (stream == NULL)
+ return false;
+
+ link = stream->link;
+ dc = link->ctx->dc;
+
+ psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+
+ if (psr_config.psr_version > 0) {
+ psr_config.psr_exit_link_training_required = 0x1;
+ psr_config.psr_frame_capture_indication_req = 0;
+ psr_config.psr_rfb_setup_time = 0x37;
+ psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
+ psr_config.allow_smu_optimizations = 0x0;
+
+ ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+
+ }
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
+
+ return ret;
+}
+
+/*
+ * amdgpu_dm_psr_enable() - enable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+{
+ struct dc_link *link = stream->link;
+ struct dc_static_screen_events triggers = {0};
+
+ DRM_DEBUG_DRIVER("Enabling psr...\n");
+
+ triggers.cursor_update = true;
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ dc_stream_set_static_screen_events(link->ctx->dc,
+ &stream, 1,
+ &triggers);
+
+ return dc_link_set_psr_allow_active(link, true, false);
+}
+
+/*
+ * amdgpu_dm_psr_disable() - disable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+{
+
+ DRM_DEBUG_DRIVER("Disabling psr...\n");
+
+ return dc_link_set_psr_allow_active(stream->link, false, true);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index c8c525a2b505..77c5166e6b08 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -108,6 +108,12 @@ struct amdgpu_dm_backlight_caps {
* @display_indexes_num: Max number of display streams supported
* @irq_handler_list_table_lock: Synchronizes access to IRQ tables
* @backlight_dev: Backlight control device
+ * @backlight_link: Link on which to control backlight
+ * @backlight_caps: Capabilities of the backlight device
+ * @freesync_module: Module handling freesync calculations
+ * @fw_dmcu: Reference to DMCU firmware
+ * @dmcu_fw_version: Version of the DMCU firmware
+ * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
*/
@@ -128,7 +134,7 @@ struct amdgpu_display_manager {
u16 display_indexes_num;
/**
- * @atomic_obj
+ * @atomic_obj:
*
* In combination with &dm_atomic_state it helps manage
* global atomic state that doesn't map cleanly into existing
@@ -225,6 +231,9 @@ struct amdgpu_display_manager {
struct amdgpu_dm_backlight_caps backlight_caps;
struct mod_freesync *freesync_module;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct hdcp_workqueue *hdcp_workqueue;
+#endif
struct drm_atomic_state *cached_state;
@@ -234,6 +243,8 @@ struct amdgpu_display_manager {
uint32_t dmcu_fw_version;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/**
+ * @soc_bounding_box:
+ *
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
@@ -287,6 +298,7 @@ struct amdgpu_dm_connector {
uint32_t debugfs_dpcd_address;
uint32_t debugfs_dpcd_size;
#endif
+ bool force_yuv420_output;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index b43bb7f90e4e..2233d293a707 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
res = mod_color_calculate_regamma_params(func, gamma, true, has_rom,
NULL);
+ dc_gamma_release(&gamma);
+
return res ? 0 : -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index a549c7c717dd..eaad9099bc0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -122,11 +122,16 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
}
/* Configure dithering */
- if (!dm_need_crc_dither(source))
+ if (!dm_need_crc_dither(source)) {
dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
- else
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_DISABLE);
+ } else {
dc_stream_set_dither_option(stream_state,
DITHER_OPTION_DEFAULT);
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_AUTO);
+ }
unlock:
mutex_unlock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f3dfb2887ae0..bdb37e611015 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -942,6 +942,52 @@ static const struct {
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
+/*
+ * Force YUV420 output if available from the given mode
+ */
+static int force_yuv420_output_set(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ connector->force_yuv420_output = (bool)val;
+
+ return 0;
+}
+
+/*
+ * Check if YUV420 is forced when available from the given mode
+ */
+static int force_yuv420_output_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ *val = connector->force_yuv420_output;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
+ force_yuv420_output_set, "%llu\n");
+
+/*
+ * Read PSR state
+ */
+static int psr_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ uint32_t psr_state = 0;
+
+ dc_link_get_psr_state(link, &psr_state);
+
+ *val = psr_state;
+
+ return 0;
+}
+
+
+DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
+
void connector_debugfs_init(struct amdgpu_dm_connector *connector)
{
int i;
@@ -955,6 +1001,12 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
dp_debugfs_entries[i].fops);
}
}
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
+
+ debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
+ &force_yuv420_output_fops);
+
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
new file mode 100644
index 000000000000..77181ddf6c8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "amdgpu_dm_hdcp.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "dm_helpers.h"
+#include <drm/drm_hdcp.h>
+
+static bool
+lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+
+ struct dc_link *link = handle;
+ struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
+ struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
+ struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size);
+}
+
+static bool
+lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
+}
+
+static void process_output(struct hdcp_workqueue *hdcp_work)
+{
+ struct mod_hdcp_output output = hdcp_work->output;
+
+ if (output.callback_stop)
+ cancel_delayed_work(&hdcp_work->callback_dwork);
+
+ if (output.callback_needed)
+ schedule_delayed_work(&hdcp_work->callback_dwork,
+ msecs_to_jiffies(output.callback_delay));
+
+ if (output.watchdog_timer_stop)
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ if (output.watchdog_timer_needed)
+ schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
+ msecs_to_jiffies(output.watchdog_timer_delay));
+
+}
+
+void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+
+ mutex_lock(&hdcp_w->mutex);
+ hdcp_w->aconnector = aconnector;
+
+ mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
+
+ schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+
+}
+
+void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output);
+
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+
+}
+
+void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
+
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+}
+
+void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ schedule_work(&hdcp_w->cpirq_work);
+}
+
+
+
+
+static void event_callback(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
+ callback_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+
+}
+static void event_property_update(struct work_struct *work)
+{
+
+ struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+ struct drm_device *dev = hdcp_work->aconnector->base.dev;
+ long ret;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&hdcp_work->mutex);
+
+
+ if (aconnector->base.state->commit) {
+ ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
+
+ if (ret == 0) {
+ DRM_ERROR("HDCP state unknown! Setting it to DESIRED");
+ hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
+ }
+
+ if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON)
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ else
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED);
+
+
+ mutex_unlock(&hdcp_work->mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static void event_property_validate(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work =
+ container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
+ struct mod_hdcp_display_query query;
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+
+ mutex_lock(&hdcp_work->mutex);
+
+ query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query);
+
+ if (query.encryption_status != hdcp_work->encryption_status) {
+ hdcp_work->encryption_status = query.encryption_status;
+ schedule_work(&hdcp_work->property_update_work);
+ }
+
+ schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+
+ mutex_unlock(&hdcp_work->mutex);
+}
+
+static void event_watchdog_timer(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work),
+ struct hdcp_workqueue,
+ watchdog_timer_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+static void event_cpirq(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+
+void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+{
+ int i = 0;
+
+ for (i = 0; i < hdcp_work->max_link; i++) {
+ cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ }
+
+ kfree(hdcp_work);
+
+}
+
+static void update_config(void *handle, struct cp_psp_stream_config *config)
+{
+ struct hdcp_workqueue *hdcp_work = handle;
+ struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
+ int link_index = aconnector->dc_link->link_index;
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+
+ memset(display, 0, sizeof(*display));
+ memset(link, 0, sizeof(*link));
+
+ display->index = aconnector->base.index;
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+
+ if (aconnector->dc_sink != NULL)
+ link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
+
+ display->controller = CONTROLLER_ID_D0 + config->otg_inst;
+ display->dig_fe = config->stream_enc_inst;
+ link->dig_be = config->link_enc_inst;
+ link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
+ link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+ link->adjust.hdcp2.disable = 1;
+
+}
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+{
+
+ int max_caps = dc->caps.max_links;
+ struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+ int i = 0;
+
+ if (hdcp_work == NULL)
+ goto fail_alloc_context;
+
+ hdcp_work->max_link = max_caps;
+
+ for (i = 0; i < max_caps; i++) {
+
+ mutex_init(&hdcp_work[i].mutex);
+
+ INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
+ INIT_WORK(&hdcp_work[i].property_update_work, event_property_update);
+ INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback);
+ INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
+ INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
+
+ hdcp_work[i].hdcp.config.psp.handle = psp_context;
+ hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
+ hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ }
+
+ cp_psp->funcs.update_stream_config = update_config;
+ cp_psp->handle = hdcp_work;
+
+ return hdcp_work;
+
+fail_alloc_context:
+ kfree(hdcp_work);
+
+ return NULL;
+
+
+
+}
+
+
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
new file mode 100644
index 000000000000..d3ba505d0696
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_
+#define AMDGPU_DM_AMDGPU_DM_HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp.h"
+#include "dc.h"
+#include "dm_cp_psp.h"
+
+struct mod_hdcp;
+struct mod_hdcp_link;
+struct mod_hdcp_display;
+struct cp_psp;
+
+struct hdcp_workqueue {
+ struct work_struct cpirq_work;
+ struct work_struct property_update_work;
+ struct delayed_work callback_dwork;
+ struct delayed_work watchdog_timer_dwork;
+ struct delayed_work property_validate_dwork;
+ struct amdgpu_dm_connector *aconnector;
+ struct mutex mutex;
+
+ struct mod_hdcp hdcp;
+ struct mod_hdcp_output output;
+ struct mod_hdcp_display display;
+ struct mod_hdcp_link link;
+
+ enum mod_hdcp_encryption_status encryption_status;
+ uint8_t max_link;
+};
+
+void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector);
+void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index);
+void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_destroy(struct hdcp_workqueue *work);
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+
+#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ee1dc75f5ddc..11e5784aa62a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -97,11 +97,10 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
(struct edid *) edid->raw_edid);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
- if (sad_count <= 0) {
- DRM_INFO("SADs count is: %d, don't need to read it\n",
- sad_count);
+ if (sad_count < 0)
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return result;
- }
edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
for (i = 0; i < edid_caps->audio_mode_count; ++i) {
@@ -282,7 +281,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
* Polls for ACT (allocation change trigger) handled and sends
* ALLOCATE_PAYLOAD message.
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream)
{
@@ -293,19 +292,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
if (!aconnector || !aconnector->mst_port)
- return false;
+ return ACT_FAILED;
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
- return false;
+ return ACT_FAILED;
ret = drm_dp_check_act_status(mst_mgr);
if (ret)
- return false;
+ return ACT_FAILED;
- return true;
+ return ACT_SUCCESS;
}
bool dm_helpers_dp_mst_send_payload_allocation(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index fa5d503d379c..64445c4cc4c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -732,8 +732,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
@@ -751,6 +753,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
true);
}
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -765,8 +768,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
@@ -779,4 +784,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
false);
}
}
+ drm_connector_list_iter_end(&iter);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 16218a202b59..2bf8534c18fb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,7 +36,9 @@
#include "dc_link_ddc.h"
#include "i2caux_interface.h"
-
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
/* #define TRACE_DPCD */
#ifdef TRACE_DPCD
@@ -113,6 +115,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
result = -EIO;
break;
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
result = -EBUSY;
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
@@ -123,31 +126,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
return result;
}
-static enum drm_connector_status
-dm_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_dm_connector *master = aconnector->mst_port;
-
- enum drm_connector_status status =
- drm_dp_mst_detect_port(
- connector,
- &master->mst_mgr,
- aconnector->port);
-
- return status;
-}
-
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
- if (amdgpu_dm_connector->edid) {
- kfree(amdgpu_dm_connector->edid);
- amdgpu_dm_connector->edid = NULL;
- }
+ kfree(amdgpu_dm_connector->edid);
+ amdgpu_dm_connector->edid = NULL;
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
@@ -163,6 +149,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+ amdgpu_dm_connector->debugfs_dpcd_address = 0;
+ amdgpu_dm_connector->debugfs_dpcd_size = 0;
+#endif
+
return drm_dp_mst_connector_late_register(connector, port);
}
@@ -177,7 +169,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
}
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
- .detect = dm_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dm_dp_mst_connector_destroy,
.reset = amdgpu_dm_connector_funcs_reset,
@@ -245,17 +236,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return ret;
}
-static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+static struct drm_encoder *
+dm_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
+}
+
+static int
+dm_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *master = aconnector->mst_port;
- return &amdgpu_dm_connector->mst_encoder->base;
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ aconnector->port);
}
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
.get_modes = dm_dp_mst_get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
- .best_encoder = dm_mst_best_encoder,
+ .atomic_best_encoder = dm_mst_atomic_best_encoder,
+ .detect_ctx = dm_dp_mst_detect,
};
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -416,7 +419,11 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
- aconnector->base.name, dm->adev->dev);
+ &aconnector->base);
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ return;
+
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index f4cfa0caeba8..55a520a63712 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -345,7 +345,7 @@ bool dm_pp_get_clock_levels_by_type(
/* Error in pplib. Provide default values. */
return true;
}
- } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) {
if (smu_get_clock_by_type(&adev->smu,
dc_to_pp_clock_type(clk_type),
&pp_clks)) {
@@ -365,7 +365,7 @@ bool dm_pp_get_clock_levels_by_type(
validation_clks.memory_max_clock = 80000;
validation_clks.level = 0;
}
- } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) {
if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
validation_clks.engine_max_clock = 72000;
@@ -506,8 +506,8 @@ bool dm_pp_apply_clock_for_voltage_request(
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
adev->powerplay.pp_handle,
&pp_clock_request);
- else if (adev->smu.funcs &&
- adev->smu.funcs->display_clock_voltage_request)
+ else if (adev->smu.ppt_funcs &&
+ adev->smu.ppt_funcs->display_clock_voltage_request)
ret = smu_display_clock_voltage_request(&adev->smu,
&pp_clock_request);
if (ret)
@@ -527,7 +527,7 @@ bool dm_pp_get_static_clocks(
ret = adev->powerplay.pp_funcs->get_current_clocks(
adev->powerplay.pp_handle,
&pp_clk_info);
- else if (adev->smu.funcs)
+ else if (adev->smu.ppt_funcs)
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
if (ret)
return false;
@@ -589,10 +589,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
&wm_with_clock_ranges);
- else if (adev->smu.funcs &&
- adev->smu.funcs->set_watermarks_for_clock_ranges)
+ else
smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges);
+ &wm_with_clock_ranges);
}
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -604,7 +603,7 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
pp_funcs->notify_smu_enable_pwe(pp_handle);
- else if (adev->smu.funcs)
+ else if (adev->smu.ppt_funcs)
smu_notify_smu_enable_pwe(&adev->smu);
}
@@ -665,7 +664,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
wm_with_clock_ranges.wm_dmif_clocks_ranges;
@@ -708,15 +706,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
- if (!smu->funcs)
- return PP_SMU_RESULT_UNSUPPORTED;
-
- /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
- * 1: fail
- */
- if (smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges))
- return PP_SMU_RESULT_UNSUPPORTED;
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
return PP_SMU_RESULT_OK;
}
@@ -727,10 +717,10 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */
+ /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL; 1: fail */
if (smu_set_azalia_d3_pme(smu))
return PP_SMU_RESULT_FAIL;
@@ -743,10 +733,10 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */
+ /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */
if (smu_set_display_count(smu, count))
return PP_SMU_RESULT_FAIL;
@@ -759,10 +749,10 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
+ /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
if (smu_set_deep_sleep_dcefclk(smu, mhz))
return PP_SMU_RESULT_FAIL;
@@ -777,13 +767,13 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -799,13 +789,13 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_mem_clock;
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -835,7 +825,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
switch (clock_id) {
@@ -853,7 +843,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
}
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -869,13 +859,13 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->funcs->get_max_sustainable_clocks_by_dc)
+ if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
+ if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
@@ -894,13 +884,97 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
if (!smu->ppt_funcs->get_uclk_dpm_states)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
+ if (!smu_get_uclk_dpm_states(smu,
clock_values_in_khz, num_states))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
}
+#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+enum pp_smu_status pp_rn_get_dpm_clock_table(
+ struct pp_smu *pp, struct dpm_clocks *clock_table)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->ppt_funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu->ppt_funcs->get_dpm_clock_table)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu_get_dpm_clock_table(smu, clock_table))
+ return PP_SMU_RESULT_OK;
+
+ return PP_SMU_RESULT_FAIL;
+}
+
+enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
+ wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
+ wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ if (!smu->ppt_funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_mhz;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_drain_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_drain_clk_mhz;
+ }
+
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
+
+ return PP_SMU_RESULT_OK;
+}
+#endif
+
void dm_pp_get_funcs(
struct dc_context *ctx,
struct pp_smu_funcs *funcs)
@@ -945,6 +1019,15 @@ void dm_pp_get_funcs(
funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
break;
#endif
+
+#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+ case DCN_VERSION_2_1:
+ funcs->ctx.ver = PP_SMU_VER_RN;
+ funcs->rn_funcs.pp_smu.dm = ctx;
+ funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
+ funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
+ break;
+#endif
default:
DRM_ERROR("smu version is not supported !\n");
break;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 627982cb15d2..a160512a2f04 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -48,6 +48,10 @@ DC_LIBS += dce110
DC_LIBS += dce100
DC_LIBS += dce80
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DC_LIBS += hdcp
+endif
+
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 221e0f56389f..823843cd2613 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info(
/* Sort voltage table from low to high*/
if (result == BP_RESULT_OK) {
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j].max_supported_clk <
info->disp_clk_voltage[j-1].max_supported_clk) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index dff65c0fe82f..7873abea4112 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info(
struct atom_common_table_header *header;
struct atom_data_revision revision;
-
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j-1].max_supported_clk
) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index c43797bea413..8828dd9c3783 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -65,6 +65,31 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count;
}
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (dc->hwss.exit_optimized_pwr_state)
+ dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
+
+ if (edp_link) {
+ clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+ dc_link_set_psr_allow_active(edp_link, false, false);
+ }
+
+}
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link)
+ dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false);
+
+ if (dc->hwss.optimize_pwr_state)
+ dc->hwss.optimize_pwr_state(dc, dc->current_state);
+
+}
struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
{
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index c5c8c4901eed..26db1c5d4e4d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz) / target_div;
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
@@ -239,7 +239,7 @@ int dce_set_clock(
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr_dce->dentist_vco_freq_khz / 64);
+ clk_mgr_dce->base.dentist_vco_freq_khz / 64);
/* Prepare to program display clock*/
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
@@ -276,11 +276,11 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
int i;
if (bp->integrated_info)
- clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
- clk_mgr_dce->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0)
- clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ clk_mgr_dce->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) {
+ clk_mgr_dce->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr_dce->base.dentist_vco_freq_khz == 0)
+ clk_mgr_dce->base.dentist_vco_freq_khz = 3600000;
}
/*update the maximum display clock for each power state*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 7c746ef1e32e..a6c46e903ff9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr_dce->dentist_vco_freq_khz / 62);
+ clk_mgr_dce->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
@@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr->dentist_vco_freq_khz / 62);
+ clk_mgr->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 47f529ce280a..3fab9296918a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -139,6 +139,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
ASSERT(clk_mgr->pp_smu);
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
pp_smu = &clk_mgr->pp_smu->rv_funcs;
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
@@ -266,11 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_
clk_mgr->base.dprefclk_khz = 600000;
if (bp->integrated_info)
- clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (bp->fw_info_valid && clk_mgr->dentist_vco_freq_khz == 0) {
- clk_mgr->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) {
+ clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 3e8ac303bd52..25d7b7c6681c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -104,84 +104,39 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
{
int i;
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst, dppclk_khz;
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
-
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ /* Loop index will match dpp->inst if resource exists,
+ * and we want to avoid dependency on dpp object
+ */
+ dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
clk_mgr->dccg->funcs->update_dpp_dto(
- clk_mgr->dccg, dpp_inst, dppclk_khz, false);
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
}
}
-static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
{
int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / khz;
-
- uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
-
- REG_UPDATE(DENTIST_DISPCLK_CNTL,
- DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
-}
-
-static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
-{
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / khz;
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
+ uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
+// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
}
-static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- struct pp_smu_funcs_nv *pp_smu = NULL;
- bool going_up = clk_mgr->base.clks.dispclk_khz < khz;
-
- if (dc->res_pool->pp_smu)
- pp_smu = &dc->res_pool->pp_smu->nv_funcs;
-
- clk_mgr->base.clks.dispclk_khz = khz;
-
- if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
-
- update_display_clk(clk_mgr, khz);
-
- if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
-}
-
-static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- struct pp_smu_funcs_nv *pp_smu = NULL;
- bool going_up = clk_mgr->base.clks.dppclk_khz < khz;
-
- if (dc->res_pool->pp_smu)
- pp_smu = &dc->res_pool->pp_smu->nv_funcs;
-
- clk_mgr->base.clks.dppclk_khz = khz;
- clk_mgr->dccg->ref_dppclk = khz;
-
- if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-
- update_global_dpp_clk(clk_mgr, khz);
-
- if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-}
void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
@@ -192,11 +147,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc *dc = clk_mgr_base->ctx->dc;
struct pp_smu_funcs_nv *pp_smu = NULL;
int display_count;
+ bool update_dppclk = false;
bool update_dispclk = false;
bool enter_display_off = false;
+ bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
- int i;
if (dc->work_arounds.skip_clock_update)
return;
@@ -251,12 +207,10 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
-
clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
}
- clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
@@ -264,50 +218,40 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000);
}
- if (dc->config.forced_clocks == false) {
- // First update display clock
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz))
- request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz);
-
- // Updating DPP clock requires some more logic
- if (!safe_to_lower) {
- // For pre-programming, we need to make sure any DPP clock that will go up has to go up
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
- // First raise the global reference if needed
- if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz)
- request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
- // Then raise any dividers that need raising
- for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
+ update_dppclk = true;
+ }
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
- dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ update_dispclk = true;
+ }
- clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true);
- }
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+ if (dpp_clock_lowered) {
+ // if clock is being lowered, increase DTO before lowering refclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dentist(clk_mgr);
} else {
- // For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs
-
- if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz)
- request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
-
- for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
-
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
-
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
- dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
-
- clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false);
- }
+ // if clock is being raised, increase refclk before lowering DTO
+ if (update_dppclk || update_dispclk)
+ dcn20_update_clocks_update_dentist(clk_mgr);
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
}
+
if (update_dispclk &&
dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
@@ -320,6 +264,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
/* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
@@ -357,14 +303,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
}
- /* Both fclk and dppclk ref are run on the same scemi clock so we
- * need to keep the same value for both
+ /* Both fclk and ref_dppclk run on the same scemi clock.
+ * So take the higher value since the DPP DTO is typically programmed
+ * such that max dppclk is 1:1 with ref_dppclk.
*/
if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
+ // Both fclk and ref_dppclk run on the same scemi clock.
+ clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
+
dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
}
@@ -409,12 +359,36 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
}
}
+static bool dcn2_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->socclk_khz != b->socclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->phyclk_khz != b->phyclk_khz)
+ return false;
+ else if (a->dramclk_khz != b->dramclk_khz)
+ return false;
+ else if (a->p_state_change_support != b->p_state_change_support)
+ return false;
+
+ return true;
+}
+
static struct clk_mgr_funcs dcn2_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn2_update_clocks,
.init_clocks = dcn2_init_clocks,
.enable_pme_wa = dcn2_enable_pme_wa,
.get_clock = dcn2_get_clock,
+ .are_clock_states_equal = dcn2_are_clock_states_equal,
};
@@ -442,7 +416,7 @@ void dcn20_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
dcn2_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->dentist_vco_freq_khz = 3850000;
+ clk_mgr->base.dentist_vco_freq_khz = 3850000;
} else {
/* DFS Slice 2 should be used for DPREFCLK */
@@ -466,15 +440,15 @@ void dcn20_clk_mgr_construct(
pll_req = dc_fixpt_mul_int(pll_req, 100000);
/* integer part is now VCO frequency in kHz */
- clk_mgr->dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
+ clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
/* in case we don't get a value from the register, use default */
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3850000;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3850000;
/* Calculate the DPREFCLK in kHz.*/
clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz) / target_div;
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
}
//Integrated_info table does not exist on dGPU projects so should not be referenced
//anywhere in code for dGPUs.
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
index ac31a9787305..c9fd824f3c23 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
@@ -50,4 +50,5 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
#endif //__DCN20_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 787f94d815f4..790a2d211bd6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -52,6 +52,45 @@
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
+int rn_get_active_display_cnt_wa(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, display_count;
+ bool hdmi_present = false;
+
+ display_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ hdmi_present = true;
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ const struct dc_link *link = dc->links[i];
+
+ /*
+ * Only notify active stream or virtual stream.
+ * Need to notify virtual stream to work around
+ * headless case. HPD does not fire when system is in
+ * S0i2.
+ */
+ /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ display_count++;
+ }
+
+ /* WA for hang on HDMI after display off back back on*/
+ if (display_count == 0 && hdmi_present)
+ display_count = 1;
+
+ return display_count;
+}
+
void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -62,17 +101,36 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
- bool enter_display_off = false;
bool dpp_clock_lowered = false;
- struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+ struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- if (display_count == 0)
- enter_display_off = true;
+ if (dc->work_arounds.skip_clock_update)
+ return;
- if (enter_display_off == safe_to_lower) {
- rn_vbios_smu_set_display_count(clk_mgr, display_count);
+ /*
+ * if it is safe to lower, but we are already in the lower state, we don't have to do anything
+ * also if safe to lower is false, we just go in the higher state
+ */
+ if (safe_to_lower) {
+ /* check that we're not already in lower */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+
+ display_count = rn_get_active_display_cnt_wa(dc, context);
+ /* if we can go lower, go lower */
+ if (display_count == 0) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ }
+ } else {
+ /* check that we're not already in D0 */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
+ }
}
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
@@ -113,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
- if (update_dppclk)
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
@@ -319,7 +378,7 @@ void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
rn_dump_clk_registers(&sb, clk_mgr_base, &log_info);
- s->dprefclk_khz = sb.dprefclk;
+ s->dprefclk_khz = sb.dprefclk * 1000;
}
void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -329,12 +388,96 @@ void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
rn_vbios_smu_enable_pme_wa(clk_mgr);
}
+void rn_init_clocks(struct clk_mgr *clk_mgr)
+{
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+}
+
+void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+{
+ int i, num_valid_sets;
+
+ num_valid_sets = 0;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ /* skip empty entries, the smu array has no holes*/
+ if (!bw_params->wm_table.entries[i].valid)
+ continue;
+
+ ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
+ ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
+ /* We will not select WM based on dcfclk, so leave it as unconstrained */
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ /* fclk wil be used to select WM*/
+
+ if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
+ if (i == 0)
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
+ else {
+ /* add 1 to make it non-overlapping with next lvl */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
+ }
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+
+ } else {
+ /* unconstrained for memory retraining */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* Modify previous watermark range to cover up to max */
+ ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ }
+ num_valid_sets++;
+ }
+
+ ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
+ ranges->num_reader_wm_sets = num_valid_sets;
+
+ /* modify the min and max to make sure we cover the whole range*/
+ ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* This is for writeback only, does not matter currently as no writeback support*/
+ ranges->num_writer_wm_sets = 1;
+ ranges->writer_wm_sets[0].wm_inst = WM_A;
+ ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+}
+
+static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+ struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
+ struct pp_smu_wm_range_sets ranges = {0};
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
+
+ if (!debug->disable_pplib_wm_range) {
+ build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
+ pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ }
+
+}
+
static struct clk_mgr_funcs dcn21_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rn_update_clocks,
- .init_clocks = dcn2_init_clocks,
+ .init_clocks = rn_init_clocks,
.enable_pme_wa = rn_enable_pme_wa,
- /* .dump_clk_registers = rn_dump_clk_registers */
+ /* .dump_clk_registers = rn_dump_clk_registers, */
+ .notify_wm_ranges = rn_notify_wm_ranges
};
struct clk_bw_params rn_bw_params = {
@@ -405,80 +548,50 @@ struct clk_bw_params rn_bw_params = {
}
};
-void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
- int i, num_valid_sets;
-
- num_valid_sets = 0;
-
- for (i = 0; i < WM_SET_COUNT; i++) {
- /* skip empty entries, the smu array has no holes*/
- if (!bw_params->wm_table.entries[i].valid)
- continue;
-
- ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
- ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
- /* We will not select WM based on dcfclk, so leave it as unconstrained */
- ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- /* fclk wil be used to select WM*/
-
- if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
- if (i == 0)
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
- else {
- /* add 1 to make it non-overlapping with next lvl */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
- }
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
-
- } else {
- /* unconstrained for memory retraining */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ int i;
- /* Modify previous watermark range to cover up to max */
- ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- }
- num_valid_sets++;
+ for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
+ if (clock_table->DcfClocks[i].Vol == voltage)
+ return clock_table->DcfClocks[i].Freq;
}
- ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
- ranges->num_reader_wm_sets = num_valid_sets;
-
- /* modify the min and max to make sure we cover the whole range*/
- ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
-
- /* This is for writeback only, does not matter currently as no writeback support*/
- ranges->num_writer_wm_sets = 1;
- ranges->writer_wm_sets[0].wm_inst = WM_A;
- ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
-
+ ASSERT(0);
+ return 0;
}
-void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
+static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
{
- int i;
+ int i, j = 0;
+
+ j = -1;
ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
- for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) {
- if (clock_table->FClocks[i].Freq == 0)
+ /* Find lowest DPM, FCLK is filled in reverse order*/
+
+ for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
+ if (clock_table->FClocks[i].Freq != 0) {
+ j = i;
break;
+ }
+ }
+
+ if (j == -1) {
+ /* clock table is all 0s, just use our own hardcode */
+ ASSERT(0);
+ return;
+ }
+
+ bw_params->clk_table.num_entries = j + 1;
- bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq;
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq;
- bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq;
- bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol;
+ for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
+ bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
+ bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
}
- bw_params->clk_table.num_entries = i;
bw_params->vram_type = asic_id->vram_type;
bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
@@ -486,7 +599,7 @@ void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct d
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
- if (clock_table->FClocks[i].Freq == 0) {
+ if (i >= bw_params->clk_table.num_entries) {
bw_params->wm_table.entries[i].valid = false;
continue;
}
@@ -534,57 +647,42 @@ void rn_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
clk_mgr->base.dprefclk_khz = 600000;
} else {
struct clk_log_info log_info = {0};
/* TODO: Check we get what we expect during bringup */
- clk_mgr->dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
+ clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
/* in case we don't get a value from the register, use default */
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
rn_dump_clk_registers(&s, &clk_mgr->base, &log_info);
- clk_mgr->base.dprefclk_khz = s.dprefclk;
-
- if (clk_mgr->base.dprefclk_khz != 600000) {
- clk_mgr->base.dprefclk_khz = 600000;
- ASSERT(1); //TODO: Renoir follow up.
- }
+ /* Convert dprefclk units from MHz to KHz */
+ /* Value already divided by 10, some resolution lost */
+ clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
/* in case we don't get a value from the register, use default */
- if (clk_mgr->base.dprefclk_khz == 0)
+ if (clk_mgr->base.dprefclk_khz == 0) {
+ ASSERT(clk_mgr->base.dprefclk_khz == 600000);
clk_mgr->base.dprefclk_khz = 600000;
+ }
}
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = &rn_bw_params;
- if (pp_smu) {
+ if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
- clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
+ rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
}
- /*
- * Notify SMU which set of WM should be selected for different ranges of fclk
- * On Renoir there is a maximumum of 4 DF pstates supported, could be less
- * depending on DDR speed and fused maximum fclk.
- */
- if (!debug->disable_pplib_wm_range) {
- struct pp_smu_wm_range_sets ranges = {0};
-
- build_watermark_ranges(clk_mgr->base.bw_params, &ranges);
-
- /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
- if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
- pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
+ /* enable powerfeatures when displaycount goes to 0 */
+ rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
}
-
- /* enable powerfeatures when displaycount goes to 0 */
- if (!debug->disable_48mhz_pwrdwn)
- rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index aadec06fde10..e4322fa5475b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -26,11 +26,13 @@
#ifndef __RN_CLK_MGR_H__
#define __RN_CLK_MGR_H__
+#include "clk_mgr.h"
+#include "dm_pp_smu.h"
+
struct rn_clk_registers {
uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */
};
-
void rn_clk_mgr_construct(struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 50984c1811bb..cb7c0e8b7e1b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -33,7 +33,7 @@
#include "mp/mp_12_0_0_sh_mask.h"
#define REG(reg_name) \
- (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
@@ -84,16 +84,12 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
int actual_dispclk_set_mhz = -1;
struct dc *core_dc = clk_mgr->base.ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- uint32_t clk = requested_dispclk_khz / 1000;
-
- if (clk <= 100)
- clk = 101;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
- clk);
+ requested_dispclk_khz / 1000);
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
@@ -124,7 +120,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
{
int actual_dcfclk_set_mhz = -1;
- if (clk_mgr->smu_ver < 0xFFFFFFFF)
+ if (clk_mgr->smu_ver < 0x370c00)
return actual_dcfclk_set_mhz;
actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param(
@@ -139,7 +135,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int
{
int actual_min_ds_dcfclk_mhz = -1;
- if (clk_mgr->smu_ver < 0xFFFFFFFF)
+ if (clk_mgr->smu_ver < 0x370c00)
return actual_min_ds_dcfclk_mhz;
actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param(
@@ -162,33 +158,35 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_
{
int actual_dppclk_set_mhz = -1;
- uint32_t clk = requested_dpp_khz / 1000;
-
- if (clk <= 100)
- clk = 101;
-
actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
- clk);
+ requested_dpp_khz / 1000);
return actual_dppclk_set_mhz * 1000;
}
-void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count)
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state)
{
+ int disp_count;
+
+ if (state == DCN_PWR_STATE_LOW_POWER)
+ disp_count = 0;
+ else
+ disp_count = 1;
+
rn_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDisplayCount,
- display_count);
+ clk_mgr,
+ VBIOSSMC_MSG_SetDisplayCount,
+ disp_count);
}
-void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr)
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
- 0);
+ enable);
}
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index da3a49487c6d..ccc01879c9d4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -33,8 +33,8 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
-void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count);
-void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr);
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count);
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4b8819c27fcd..32f31bf91915 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -194,7 +194,7 @@ static bool create_links(
}
}
- if (!should_destory_link) {
+ if (dc->config.force_enum_edp || !should_destory_link) {
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
@@ -411,6 +411,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
return false;
}
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option)
+{
+ /* OPP FMT dyn expansion updates*/
+ int i = 0;
+ struct pipe_ctx *pipe_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx->stream_res.opp->dyn_expansion = option;
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ stream->signal);
+ }
+ }
+}
+
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option)
{
@@ -765,8 +786,13 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
#endif
- dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+#endif
}
current_ctx = dc->current_state;
@@ -789,9 +815,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (false == construct(dc, init_params))
goto construct_fail;
- /*TODO: separate HW and SW initialization*/
- dc->hwss.init_hw(dc);
-
full_pipe_count = dc->res_pool->pipe_count;
if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
full_pipe_count--;
@@ -824,9 +847,24 @@ alloc_fail:
return NULL;
}
+void dc_hardware_init(struct dc *dc)
+{
+ dc->hwss.init_hw(dc);
+}
+
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params)
{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ dc->ctx->cp_psp = init_params->cp_psp;
+#endif
+}
+
+void dc_deinit_callbacks(struct dc *dc)
+{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
+#endif
}
void dc_destroy(struct dc **dc)
@@ -905,15 +943,11 @@ static void program_timing_sync(
/* set first pipe with plane as master */
for (j = 0; j < group_size; j++) {
- struct pipe_ctx *temp;
-
if (pipe_set[j]->plane_state) {
if (j == 0)
break;
- temp = pipe_set[0];
- pipe_set[0] = pipe_set[j];
- pipe_set[j] = temp;
+ swap(pipe_set[0], pipe_set[j]);
break;
}
}
@@ -970,40 +1004,87 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
struct dc_crtc_timing *crtc_timing)
{
struct timing_generator *tg;
+ struct stream_encoder *se = NULL;
+
+ struct dc_crtc_timing hw_crtc_timing = {0};
+
struct dc_link *link = sink->link;
- unsigned int enc_inst, tg_inst;
+ unsigned int i, enc_inst, tg_inst = 0;
+
+ // Seamless port only support single DP and EDP so far
+ if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
+ sink->sink_signal != SIGNAL_TYPE_EDP)
+ return false;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return false;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- /* Instance should be within the range of the pool */
- if (enc_inst >= dc->res_pool->pipe_count)
+ if (enc_inst == ENGINE_ID_UNKNOWN)
return false;
- if (enc_inst >= dc->res_pool->stream_enc_count)
- return false;
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ if (dc->res_pool->stream_enc[i]->id == enc_inst) {
+
+ se = dc->res_pool->stream_enc[i];
+
+ tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[i]);
+ break;
+ }
+ }
- tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg(
- dc->res_pool->stream_enc[enc_inst]);
+ // tg_inst not found
+ if (i == dc->res_pool->stream_enc_count)
+ return false;
if (tg_inst >= dc->res_pool->timing_generator_count)
return false;
tg = dc->res_pool->timing_generators[tg_inst];
- if (!tg->funcs->is_matching_timing)
+ if (!tg->funcs->get_hw_timing)
+ return false;
+
+ if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
+ return false;
+
+ if (crtc_timing->h_total != hw_crtc_timing.h_total)
+ return false;
+
+ if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
return false;
- if (!tg->funcs->is_matching_timing(tg, crtc_timing))
+ if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
+ return false;
+
+ if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
+ return false;
+
+ if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
+ return false;
+
+ if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
+ return false;
+
+ if (crtc_timing->v_total != hw_crtc_timing.v_total)
+ return false;
+
+ if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
+ return false;
+
+ if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
+ return false;
+
+ if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
+ return false;
+
+ if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
+ return false;
+
+ if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
return false;
if (dc_is_dp_signal(link->connector_signal)) {
@@ -1016,6 +1097,20 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
return false;
+ if (!se->funcs->dp_get_pixel_format)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format(
+ se,
+ &hw_crtc_timing.pixel_encoding,
+ &hw_crtc_timing.display_color_depth))
+ return false;
+
+ if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
+ return false;
+
+ if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
+ return false;
}
return true;
@@ -1077,15 +1172,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->mode_changed)
- continue;
+ if (dc->hwss.apply_ctx_for_surface)
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->mode_changed)
+ continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context); /* use new pipe config in new context */
- }
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context); /* use new pipe config in new context */
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1104,16 +1204,21 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
/*
* enable stereo
@@ -1140,15 +1245,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (!dc->optimize_seamless_boot)
- /* pplib is notified if disp_num changed */
- dc->hwss.optimize_bandwidth(dc, context);
-
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
- memset(&context->commit_hints, 0, sizeof(context->commit_hints));
-
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1496,20 +1595,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
union surface_update_flags *update_flags = &u->surface->update_flags;
- update_flags->raw = 0; // Reset all flags
-
if (u->flip_addr)
update_flags->bits.addr_update = 1;
- if (!is_surface_in_context(context, u->surface)) {
- update_flags->bits.new_plane = 1;
+ if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ update_flags->raw = 0xFFFFFFFF;
return UPDATE_TYPE_FULL;
}
- if (u->surface->force_full_update) {
- update_flags->bits.full_update = 1;
- return UPDATE_TYPE_FULL;
- }
+ update_flags->raw = 0; // Reset all flags
type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type);
@@ -1567,40 +1661,43 @@ static enum surface_update_type check_update_surfaces_for_stream(
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
if (stream_status == NULL || stream_status->plane_count != surface_count)
- return UPDATE_TYPE_FULL;
+ overall_type = UPDATE_TYPE_FULL;
/* some stream updates require passive update */
if (stream_update) {
- if ((stream_update->src.height != 0) &&
- (stream_update->src.width != 0))
- return UPDATE_TYPE_FULL;
+ union stream_update_flags *su_flags = &stream_update->stream->update_flags;
- if ((stream_update->dst.height != 0) &&
- (stream_update->dst.width != 0))
- return UPDATE_TYPE_FULL;
+ if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0))
+ su_flags->bits.scaling = 1;
if (stream_update->out_transfer_func)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.abm_level = 1;
if (stream_update->dpms_off)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.dpms_off = 1;
+
+ if (stream_update->gamut_remap)
+ su_flags->bits.gamut_remap = 1;
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (stream_update->wb_update)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.wb_update = 1;
#endif
+ if (su_flags->raw != 0)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_update->output_csc_transform || stream_update->output_color_space)
+ su_flags->bits.out_csc = 1;
}
for (i = 0 ; i < surface_count; i++) {
enum surface_update_type type =
det_surface_update(dc, &updates[i]);
- if (type == UPDATE_TYPE_FULL)
- return type;
-
elevate_update_type(&overall_type, type);
}
@@ -1622,16 +1719,29 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
int i;
enum surface_update_type type;
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL)
+ if (type == UPDATE_TYPE_FULL) {
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
- if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
- dc->optimized_required = true;
+ if (type == UPDATE_TYPE_FAST) {
+ // If there's an available clock comparator, we use that.
+ if (dc->clk_mgr->funcs->are_clock_states_equal) {
+ if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
+ dc->optimized_required = true;
+ // Else we fallback to mem compare.
+ } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
+ dc->optimized_required = true;
+ }
+ }
return type;
}
@@ -1872,6 +1982,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
struct dc_state *context)
{
int j;
+ bool should_program_abm;
// Stream updates
for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -1952,14 +2063,21 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
- if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
- // if otg funcs defined check if blanked before programming
- if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = true;
+
+ // if otg funcs defined check if blanked before programming
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked)
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = false;
+
+ if (should_program_abm) {
+ if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ } else {
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);
- } else
- pipe_ctx->stream_res.abm->funcs->set_abm_level(
- pipe_ctx->stream_res.abm, stream->abm_level);
+ }
+ }
}
}
}
@@ -2004,7 +2122,13 @@ static void commit_planes_for_stream(struct dc *dc,
* In case of turning off screen, no need to program front end a second time.
* just return after program blank.
*/
- dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
+
return;
}
@@ -2064,10 +2188,15 @@ static void commit_planes_for_stream(struct dc *dc,
stream_status =
stream_get_status(context, pipe_ctx->stream);
- dc->hwss.apply_ctx_for_surface(
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ca20b150afcc..12ba6fdf89b7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -79,7 +79,6 @@ static void destruct(struct dc_link *link)
int i;
if (link->hpd_gpio != NULL) {
- dal_gpio_close(link->hpd_gpio);
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -520,7 +519,7 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
-static void read_edp_current_link_settings_on_detect(struct dc_link *link)
+static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = { {0} };
uint8_t link_bw_set;
@@ -555,17 +554,23 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
&link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
- /* If standard link rates are not being used,
- * Read DPCD 00115h to find the link rate set used
- */
- core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
-
- if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
- link->cur_link_settings.link_rate_set = link_rate_set;
- link->cur_link_settings.use_link_rate_set = true;
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the edp link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // edp_supported_link_rates_count = 0 for DP
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ // Link Rate not found. Seamless boot may not work.
+ ASSERT(false);
}
} else {
link->cur_link_settings.link_rate = link_bw_set;
@@ -680,7 +685,7 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
-bool wait_for_alt_mode(struct dc_link *link)
+static bool wait_for_alt_mode(struct dc_link *link)
{
/**
@@ -738,7 +743,8 @@ bool wait_for_alt_mode(struct dc_link *link)
* This does not create remote sinks but will trigger DM
* to start MST detection if a branch is detected.
*/
-bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+static bool dc_link_detect_helper(struct dc_link *link,
+ enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct display_sink_capability sink_caps = { 0 };
@@ -753,6 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
struct dpcd_caps prev_dpcd_caps;
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
+ bool perform_dp_seamless_boot = false;
+
DC_LOGGER_INIT(link->ctx->logger);
if (dc_is_virtual_signal(link->connector_signal))
@@ -809,15 +817,15 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
case SIGNAL_TYPE_EDP: {
- read_edp_current_link_settings_on_detect(link);
+ read_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
- sink_caps.transaction_type =
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
}
case SIGNAL_TYPE_DISPLAY_PORT: {
+
/* wa HPD high coming too early*/
if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
@@ -865,12 +873,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* empty which leads to allocate_mst_payload() has "0"
* pbn_per_slot value leading to exception on dc_fixpt_div()
*/
- link->verified_link_cap = link->reported_link_cap;
+ dp_verify_mst_link_cap(link);
+
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false;
}
+ // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
+ if (reason == DETECT_REASON_BOOT &&
+ dc_ctx->dc->config.power_down_display_on_boot == false &&
+ link->link_status.link_active == true)
+ perform_dp_seamless_boot = true;
+
+ if (perform_dp_seamless_boot) {
+ read_current_link_settings_on_detect(link);
+ link->verified_link_cap = link->reported_link_cap;
+ }
+
break;
}
@@ -955,10 +975,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* two link trainings
*/
- /* deal with non-mst cases */
- dp_verify_link_cap_with_retries(link,
- &link->reported_link_cap,
- LINK_TRAINING_MAX_VERIFY_RETRY);
+ // verify link cap for SST non-seamless boot
+ if (!perform_dp_seamless_boot)
+ dp_verify_link_cap_with_retries(link,
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
@@ -1047,6 +1068,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
dc_sink_release(prev_sink);
return true;
+
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ const struct dc *dc = link->dc;
+ bool ret;
+
+ /* get out of low power state */
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
+ ret = dc_link_detect_helper(link, reason);
+
+ /* Go back to power optimized state */
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+
+ return ret;
}
bool dc_link_get_hpd_state(struct dc_link *dc_link)
@@ -1492,7 +1530,7 @@ static enum dc_status enable_link_dp(
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- if (!apply_seamless_boot_optimization)
+ if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
dp_enable_link_phy(
@@ -2169,8 +2207,10 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
dp_set_fec_ready(link, false);
}
#endif
- } else
- link->link_enc->funcs->disable_output(link->link_enc, signal);
+ } else {
+ if (signal != SIGNAL_TYPE_VIRTUAL)
+ link->link_enc->funcs->disable_output(link->link_enc, signal);
+ }
if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
/* MST disable link only when no stream use the link */
@@ -2217,7 +2257,7 @@ static bool dp_active_dongle_validate_timing(
break;
}
- if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
dongle_caps->extendedCapValid == false)
return true;
@@ -2381,17 +2421,206 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
return true;
}
-bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)
{
struct dc *core_dc = link->ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
- dmcu->funcs->set_psr_enable(dmcu, enable, wait);
+
+
+ if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
+
+ link->psr_allow_active = allow_active;
return true;
}
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ if (dmcu != NULL && link->psr_feature_enabled)
+ dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+ return true;
+}
+
+static inline enum physical_phy_id
+transmitter_to_phy_id(enum transmitter transmitter_value)
+{
+ switch (transmitter_value) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYLD_0;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYLD_1;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYLD_2;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYLD_3;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYLD_4;
+ case TRANSMITTER_UNIPHY_F:
+ return PHYLD_5;
+ case TRANSMITTER_NUTMEG_CRT:
+ return PHYLD_6;
+ case TRANSMITTER_TRAVIS_CRT:
+ return PHYLD_7;
+ case TRANSMITTER_TRAVIS_LCD:
+ return PHYLD_8;
+ case TRANSMITTER_UNIPHY_G:
+ return PHYLD_9;
+ case TRANSMITTER_COUNT:
+ return PHYLD_COUNT;
+ case TRANSMITTER_UNKNOWN:
+ return PHYLD_UNKNOWN;
+ default:
+ WARN_ONCE(1, "Unknown transmitter value %d\n",
+ transmitter_value);
+ return PHYLD_UNKNOWN;
+ }
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ struct dc *core_dc;
+ struct dmcu *dmcu;
+ int i;
+ /* updateSinkPsrDpcdConfig*/
+ union dpcd_psr_configuration psr_configuration;
+
+ psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ core_dc = link->ctx->dc;
+ dmcu = core_dc->res_pool->dmcu;
+
+ if (!dmcu)
+ return false;
+
+
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+ psr_configuration.bits.ENABLE = 1;
+ psr_configuration.bits.CRC_VERIFICATION = 1;
+ psr_configuration.bits.FRAME_CAPTURE_INDICATION =
+ psr_config->psr_frame_capture_indication_req;
+
+ /* Check for PSR v2*/
+ if (psr_config->psr_version == 0x2) {
+ /* For PSR v2 selective update.
+ * Indicates whether sink should start capturing
+ * immediately following active scan line,
+ * or starting with the 2nd active scan line.
+ */
+ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+ /*For PSR v2, determines whether Sink should generate
+ * IRQ_HPD when CRC mismatch is detected.
+ */
+ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 368,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+ psr_context->transmitterId = link->link_enc->transmitter;
+ psr_context->engineId = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ psr_context->controllerId =
+ core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
+ psr_context->phyType = PHY_TYPE_UNIPHY;
+ /*PhyId is associated with the transmitter id*/
+ psr_context->smuPhyId =
+ transmitter_to_phy_id(link->link_enc->transmitter);
+
+ psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+ psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+ timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ psr_context->psrSupportedDisplayConfig = true;
+ psr_context->psrExitLinkTrainingRequired =
+ psr_config->psr_exit_link_training_required;
+ psr_context->sdpTransmitLineNumDeadline =
+ psr_config->psr_sdp_transmit_line_num_deadline;
+ psr_context->psrFrameCaptureIndicationReq =
+ psr_config->psr_frame_capture_indication_req;
+
+ psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+ psr_context->numberOfControllers =
+ link->dc->res_pool->timing_generator_count;
+
+ psr_context->rfb_update_auto_en = true;
+
+ /* 2 frames before enter PSR. */
+ psr_context->timehyst_frames = 2;
+ /* half a frame
+ * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+ */
+ psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+ psr_context->aux_repeats = 10;
+
+ psr_context->psr_level.u32all = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /*skip power down the single pipe since it blocks the cstate*/
+ if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+#endif
+
+ /* SMU will perform additional powerdown sequence.
+ * For unsupported ASICs, set psr_level flag to skip PSR
+ * static screen notification to SMU.
+ * (Always set for DAL2, did not check ASIC)
+ */
+ psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations;
+
+ /* Complete PSR entry before aborting to prevent intermittent
+ * freezes on certain eDPs
+ */
+ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+ /* Controls additional delay after remote frame capture before
+ * continuing power down, default = 0
+ */
+ psr_context->frame_delay = 0;
+
+ link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+
+ /* psr_enabled == 0 indicates setup_psr did not succeed, but this
+ * should not happen since firmware should be running at this point
+ */
+ if (link->psr_feature_enabled == 0)
+ ASSERT(0);
+
+ return true;
+
+}
+
const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
{
return &link->link_status;
@@ -2510,7 +2739,7 @@ static void update_mst_stream_alloc_table(
/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
* because stream_encoder is not exposed to dm
*/
-static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
@@ -2521,6 +2750,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
uint8_t i;
+ enum act_return_status ret;
DC_LOGGER_INIT(link->ctx->logger);
/* enable_link_dp_mst already check link->enabled_stream_count
@@ -2568,14 +2798,16 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
&link->mst_stream_alloc_table);
/* send down message */
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
stream);
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
+ if (ret != ACT_LINK_LOST) {
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
/* slot X.Y for only current stream */
pbn_per_slot = get_pbn_per_slot(stream);
@@ -2667,6 +2899,24 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
+{
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+ if (cp_psp && cp_psp->funcs.update_stream_config) {
+ struct cp_psp_stream_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
+ config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+ config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
+ config.dpms_off = dpms_off;
+ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+ cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
+ }
+}
+#endif
void core_link_enable_stream(
struct dc_state *state,
@@ -2677,6 +2927,10 @@ void core_link_enable_stream(
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
@@ -2727,6 +2981,9 @@ void core_link_enable_stream(
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2734,6 +2991,9 @@ void core_link_enable_stream(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
apply_edp_fast_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2786,13 +3046,16 @@ void core_link_enable_stream(
#endif
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- allocate_mst_payload(pipe_ctx);
+ dc_link_allocate_mst_payload(pipe_ctx);
core_dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
@@ -2810,6 +3073,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, true);
+#endif
+
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 51991bf26a93..7f904d55c1bc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -508,7 +508,7 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size)
{
- bool ret;
+ bool ret = false;
uint32_t payload_size =
dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
@@ -527,34 +527,32 @@ bool dal_ddc_service_query_ddc_data(
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
- struct aux_payload write_payload = {
- .i2c_over_aux = true,
- .write = true,
- .mot = true,
- .address = address,
- .length = write_size,
- .data = write_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- struct aux_payload read_payload = {
- .i2c_over_aux = true,
- .write = false,
- .mot = false,
- .address = address,
- .length = read_size,
- .data = read_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- ret = dc_link_aux_transfer_with_retries(ddc, &write_payload);
+ struct aux_payload payload;
+ bool read_available = true;
+
+ payload.i2c_over_aux = true;
+ payload.address = address;
+ payload.reply = NULL;
+ payload.defer_delay = get_defer_delay(ddc);
+
+ if (write_size != 0) {
+ payload.write = true;
+ payload.mot = false;
+ payload.length = write_size;
+ payload.data = write_buf;
+
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ read_available = ret;
+ }
- if (!ret)
- return false;
+ if (read_size != 0 && read_available) {
+ payload.write = false;
+ payload.mot = false;
+ payload.length = read_size;
+ payload.data = read_buf;
- ret = dc_link_aux_transfer_with_retries(ddc, &read_payload);
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ }
} else {
struct i2c_payloads *payloads =
dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
@@ -585,6 +583,41 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload)
+{
+ uint8_t retrieved = 0;
+ bool ret = 0;
+
+ if (!ddc)
+ return false;
+
+ if (!payload)
+ return false;
+
+ do {
+ struct aux_payload current_payload;
+ bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
+ payload->length ? true : false;
+
+ current_payload.address = payload->address;
+ current_payload.data = &payload->data[retrieved];
+ current_payload.defer_delay = payload->defer_delay;
+ current_payload.i2c_over_aux = payload->i2c_over_aux;
+ current_payload.length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
+ current_payload.mot = !is_end_of_payload;
+ current_payload.reply = payload->reply;
+ current_payload.write = payload->write;
+
+ ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
+
+ retrieved += current_payload.length;
+ } while (retrieved < payload->length && ret == true);
+
+ return ret;
+}
+
/* dc_link_aux_transfer_raw() - Attempt to transfer
* the given aux payload. This function does not perform
* retries or handle error states. The reply is returned
@@ -613,6 +646,20 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
return dce_aux_transfer_with_retries(ddc, payload);
}
+
+enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout)
+{
+ enum dc_status status = DC_OK;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+
+ if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL)
+ return DC_ERROR_UNEXPECTED;
+ if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout))
+ status = DC_ERROR_UNEXPECTED;
+ return status;
+}
+
/*test only function*/
void dal_ddc_service_set_ddc_pin(
struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index f5742719b5d9..0f59b68aa4c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1409,6 +1409,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH3;
+ if (link->link_enc->funcs->get_max_link_cap)
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
max_link_cap.lane_count =
@@ -1653,11 +1656,14 @@ bool dp_verify_link_cap_with_retries(
for (i = 0; i < attempts; i++) {
int fail_count = 0;
- enum dc_connection_type type;
+ enum dc_connection_type type = dc_connection_none;
memset(&link->verified_link_cap, 0,
sizeof(struct dc_link_settings));
- if (!dc_link_detect_sink(link, &type)) {
+ if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) {
+ link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+ link->verified_link_cap.link_rate = LINK_RATE_LOW;
+ link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
break;
} else if (dp_verify_link_cap(link,
&link->reported_link_cap,
@@ -1670,6 +1676,19 @@ bool dp_verify_link_cap_with_retries(
return success;
}
+bool dp_verify_mst_link_cap(
+ struct dc_link *link)
+{
+ struct dc_link_settings max_link_cap = {0};
+
+ max_link_cap = get_max_link_cap(link);
+ link->verified_link_cap = get_common_supported_link_settings(
+ link->reported_link_cap,
+ max_link_cap);
+
+ return true;
+}
+
static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b)
@@ -2057,11 +2076,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link)
return false;
}
-static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
+static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
union dpcd_psr_configuration psr_configuration;
- if (!link->psr_enabled)
+ if (!link->psr_feature_enabled)
return false;
dm_helpers_dp_read_dpcd(
@@ -2100,8 +2119,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- dc_link_set_psr_enable(link, false, true);
- dc_link_set_psr_enable(link, true, true);
+ dc_link_set_psr_allow_active(link, false, true);
+ dc_link_set_psr_allow_active(link, true, true);
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -2364,6 +2383,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
enum dc_status result;
bool status = false;
+ struct pipe_ctx *pipe_ctx;
+ int i;
if (out_link_loss)
*out_link_loss = false;
@@ -2440,6 +2461,15 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
&link->cur_link_settings,
true, LINK_TRAINING_ATTEMPTS);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_link_allocate_mst_payload(pipe_ctx);
+ }
+ }
+
status = false;
if (out_link_loss)
*out_link_loss = true;
@@ -2545,6 +2575,7 @@ static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
union dp_downstream_port_present ds_port = { .byte = data };
+ memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
/* decode converter info*/
if (!ds_port.fields.PORT_PRESENT) {
@@ -2691,6 +2722,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
* keep receiver powered all the time.*/
case DP_BRANCH_DEVICE_ID_0010FA:
case DP_BRANCH_DEVICE_ID_0080E1:
+ case DP_BRANCH_DEVICE_ID_00E04C:
link->wa_flags.dp_keep_receiver_powered = true;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 79438c4f1e20..a519dbc5ecb6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -277,7 +277,8 @@ void dp_retrain_link_dp_test(struct dc_link *link,
if (pipes[i].stream != NULL &&
!pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
pipes[i].stream->link != NULL &&
- pipes[i].stream_res.stream_enc != NULL) {
+ pipes[i].stream_res.stream_enc != NULL &&
+ pipes[i].stream->link == link) {
udelay(100);
pipes[i].stream_res.stream_enc->funcs->dp_blank(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index f25ac17f47fa..37698305a2dc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -951,7 +951,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
}
-static bool are_rect_integer_multiples(struct rect src, struct rect dest)
+static bool are_rects_integer_multiples(struct rect src, struct rect dest)
{
if (dest.width >= src.width && dest.width % src.width == 0 &&
dest.height >= src.height && dest.height % src.height == 0)
@@ -959,6 +959,38 @@ static bool are_rect_integer_multiples(struct rect src, struct rect dest)
return false;
}
+
+static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx->plane_state->scaling_quality.integer_scaling)
+ return;
+
+ //for Centered Mode
+ if (pipe_ctx->stream->dst.width == pipe_ctx->stream->src.width &&
+ pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) {
+ // calculate maximum # of replication of src onto addressable
+ unsigned int integer_multiple = min(
+ pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width,
+ pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height);
+
+ //scale dst
+ pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width;
+ pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height;
+
+ //center dst onto addressable
+ pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2;
+ pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2;
+ }
+
+ //disable taps if src & dst are integer ratio
+ if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) {
+ pipe_ctx->plane_state->scaling_quality.v_taps = 1;
+ pipe_ctx->plane_state->scaling_quality.h_taps = 1;
+ pipe_ctx->plane_state->scaling_quality.v_taps_c = 1;
+ pipe_ctx->plane_state->scaling_quality.h_taps_c = 1;
+ }
+}
+
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -972,6 +1004,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
+ calculate_integer_scaling(pipe_ctx);
+
calculate_scaling_ratios(pipe_ctx);
calculate_viewport(pipe_ctx);
@@ -1002,13 +1036,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
- if (res &&
- plane_state->scaling_quality.integer_scaling &&
- are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport,
- pipe_ctx->plane_res.scl_data.recout)) {
- pipe_ctx->plane_res.scl_data.taps.v_taps = 1;
- pipe_ctx->plane_res.scl_data.taps.h_taps = 1;
- }
if (!res) {
/* Try 24 bpp linebuffer */
@@ -1635,7 +1662,8 @@ static int acquire_first_free_pipe(
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
- enum engine_id id)
+ enum engine_id id,
+ enum dce_version dc_version)
{
int i, available_audio_count;
@@ -1854,28 +1882,28 @@ static int acquire_resource_from_hw_enabled_state(
struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
- unsigned int inst, tg_inst;
+ unsigned int i, inst, tg_inst = 0;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return -1;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- /* Instance should be within the range of the pool */
- if (inst >= pool->pipe_count)
- return -1;
+ if (inst == ENGINE_ID_UNKNOWN)
+ return false;
- if (inst >= pool->stream_enc_count)
- return -1;
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (pool->stream_enc[i]->id == inst) {
+ tg_inst = pool->stream_enc[i]->funcs->dig_source_otg(
+ pool->stream_enc[i]);
+ break;
+ }
+ }
- tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]);
+ // tg_inst not found
+ if (i == pool->stream_enc_count)
+ return false;
if (tg_inst >= pool->timing_generator_count)
return false;
@@ -1971,7 +1999,7 @@ enum dc_status resource_map_pool_resources(
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
stream->audio_info.mode_count && stream->audio_info.flags.all) {
pipe_ctx->stream_res.audio = find_first_free_audio(
- &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
/*
* Audio assigned in order first come first get.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index bf1d7bb90e0f..bb09243758fe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc,
if (dwb->funcs->is_enabled(dwb)) {
/* writeback pipe already enabled, only need to update */
- dc->hwss.update_writeback(dc, stream_status, wb_info);
+ dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
} else {
/* Enable writeback pipe from scratch*/
- dc->hwss.enable_writeback(dc, stream_status, wb_info);
+ dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index a82352a87808..0416a17b0897 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.48"
+#define DC_VER "3.2.56"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -111,19 +111,20 @@ struct dc_caps {
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
+ bool extended_aux_timeout_support;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool hw_3d_lut;
#endif
struct dc_plane_cap planes[MAX_PLANES];
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa {
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool no_connect_phy_config;
bool dedcn20_305_wa;
+#endif
bool skip_clock_update;
};
-#endif
struct dc_dcc_surface_param {
struct dc_size surface_size;
@@ -219,7 +220,9 @@ struct dc_config {
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
bool edp_not_connected;
+ bool force_enum_edp;
bool forced_clocks;
+ bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
};
@@ -227,6 +230,7 @@ enum visual_confirm {
VISUAL_CONFIRM_DISABLE = 0,
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
+ VISUAL_CONFIRM_MPCTREE = 4,
};
enum dcc_option {
@@ -245,6 +249,19 @@ enum wm_report_mode {
WM_REPORT_DEFAULT = 0,
WM_REPORT_OVERRIDE = 1,
};
+enum dtm_pstate{
+ dtm_level_p0 = 0,/*highest voltage*/
+ dtm_level_p1,
+ dtm_level_p2,
+ dtm_level_p3,
+ dtm_level_p4,/*when active_display_count = 0*/
+};
+
+enum dcn_pwr_state {
+ DCN_PWR_STATE_UNKNOWN = -1,
+ DCN_PWR_STATE_MISSION_MODE = 0,
+ DCN_PWR_STATE_LOW_POWER = 3,
+};
/*
* For any clocks that may differ per pipe
@@ -252,11 +269,7 @@ enum wm_report_mode {
*/
struct dc_clocks {
int dispclk_khz;
- int max_supported_dppclk_khz;
- int max_supported_dispclk_khz;
int dppclk_khz;
- int bw_dppclk_khz; /*a copy of dppclk_khz*/
- int bw_dispclk_khz;
int dcfclk_khz;
int socclk_khz;
int dcfclk_deep_sleep_khz;
@@ -264,12 +277,17 @@ struct dc_clocks {
int phyclk_khz;
int dramclk_khz;
bool p_state_change_support;
-
+ enum dcn_pwr_state pwr_state;
/*
* Elements below are not compared for the purposes of
* optimization required
*/
bool prev_p_state_change_support;
+ enum dtm_pstate dtm_level;
+ int max_supported_dppclk_khz;
+ int max_supported_dispclk_khz;
+ int bw_dppclk_khz; /*a copy of dppclk_khz*/
+ int bw_dispclk_khz;
};
struct dc_bw_validation_profile {
@@ -347,6 +365,7 @@ struct dc_debug_options {
bool disable_hubp_power_gate;
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool disable_dsc_power_gate;
+ int dsc_min_slice_height_override;
#endif
bool disable_pplib_wm_range;
enum wm_report_mode pplib_wm_report_mode;
@@ -462,9 +481,7 @@ struct dc {
struct dc_config config;
struct dc_debug_options debug;
struct dc_bounding_box_overrides bb_overrides;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa work_arounds;
-#endif
struct dc_context *ctx;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config vm_pa_config;
@@ -553,10 +570,16 @@ struct dc_init_data {
};
struct dc_callback_init {
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#else
uint8_t reserved;
+#endif
};
struct dc *dc_create(const struct dc_init_data *init_params);
+void dc_hardware_init(struct dc *dc);
+
int dc_get_vmid_use_vector(struct dc *dc);
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid);
@@ -565,6 +588,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
#endif
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params);
+void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
/*******************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 4ef97f65e55d..4f8f576d5fcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -49,7 +49,8 @@ enum aux_channel_operation_result {
AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
- AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+ AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON,
+ AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 6e42209f0e20..0ed2962add5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -30,6 +30,7 @@
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+#include "dc_types.h"
struct dc_dsc_bw_range {
uint32_t min_kbps; /* Bandwidth if min_target_bpp_x16 is used */
@@ -39,13 +40,21 @@ struct dc_dsc_bw_range {
uint32_t stream_kbps; /* Uncompressed stream bandwidth */
};
+struct display_stream_compressor {
+ const struct dsc_funcs *funcs;
+#ifndef AMD_EDID_UTILITY
+ struct dc_context *ctx;
+ int inst;
+#endif
+};
bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
struct dsc_dec_dpcd_caps *dsc_sink_caps);
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
const uint32_t min_kbps,
const uint32_t max_kbps,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
@@ -53,8 +62,9 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range);
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 0b8700a8a94a..e0856bb8511f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -26,6 +26,8 @@
#ifndef DC_HW_TYPES_H
#define DC_HW_TYPES_H
+#ifndef AMD_EDID_UTILITY
+
#include "os_types.h"
#include "fixed31_32.h"
#include "signal_types.h"
@@ -124,20 +126,6 @@ struct plane_size {
int chroma_pitch;
struct rect surface_size;
struct rect chroma_size;
-
- union {
- struct {
- struct rect surface_size;
- int surface_pitch;
- } grph;
-
- struct {
- struct rect luma_size;
- int luma_pitch;
- struct rect chroma_size;
- int chroma_pitch;
- } video;
- };
};
struct dc_plane_dcc_param {
@@ -148,21 +136,6 @@ struct dc_plane_dcc_param {
int meta_pitch_c;
bool independent_64b_blks_c;
-
- union {
- struct {
- int meta_pitch;
- bool independent_64b_blks;
- } grph;
-
- struct {
- int meta_pitch_l;
- bool independent_64b_blks_l;
-
- int meta_pitch_c;
- bool independent_64b_blks_c;
- } video;
- };
};
/*Displayable pixel format in fb*/
@@ -605,6 +578,11 @@ enum dc_quantization_range {
QUANTIZATION_RANGE_LIMITED
};
+enum dc_dynamic_expansion {
+ DYN_EXPANSION_AUTO,
+ DYN_EXPANSION_DISABLE
+};
+
/* XFM */
/* used in struct dc_plane_state */
@@ -616,6 +594,8 @@ struct scaling_taps {
bool integer_scaling;
};
+#endif /* AMD_EDID_UTILITY */
+
enum dc_timing_standard {
DC_TIMING_STANDARD_UNDEFINED,
DC_TIMING_STANDARD_DMT,
@@ -737,30 +717,6 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
-enum trigger_delay {
- TRIGGER_DELAY_NEXT_PIXEL = 0,
- TRIGGER_DELAY_NEXT_LINE,
-};
-
-enum crtc_event {
- CRTC_EVENT_VSYNC_RISING = 0,
- CRTC_EVENT_VSYNC_FALLING
-};
-
-struct crtc_trigger_info {
- bool enabled;
- struct dc_stream_state *event_source;
- enum crtc_event event;
- enum trigger_delay delay;
-};
-
-struct dc_crtc_timing_adjust {
- uint32_t v_total_min;
- uint32_t v_total_max;
- uint32_t v_total_mid;
- uint32_t v_total_mid_frame_num;
-};
-
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_dsc_config {
uint32_t num_slices_h; /* Number of DSC slices - horizontal */
@@ -804,6 +760,33 @@ struct dc_crtc_timing {
#endif
};
+#ifndef AMD_EDID_UTILITY
+
+enum trigger_delay {
+ TRIGGER_DELAY_NEXT_PIXEL = 0,
+ TRIGGER_DELAY_NEXT_LINE,
+};
+
+enum crtc_event {
+ CRTC_EVENT_VSYNC_RISING = 0,
+ CRTC_EVENT_VSYNC_FALLING
+};
+
+struct crtc_trigger_info {
+ bool enabled;
+ struct dc_stream_state *event_source;
+ enum crtc_event event;
+ enum trigger_delay delay;
+};
+
+struct dc_crtc_timing_adjust {
+ uint32_t v_total_min;
+ uint32_t v_total_max;
+ uint32_t v_total_mid;
+ uint32_t v_total_mid_frame_num;
+};
+
+
/* Passed on init */
enum vram_type {
VIDEO_MEMORY_TYPE_GDDR5 = 2,
@@ -874,5 +857,7 @@ struct tg_color {
uint16_t color_b_cb;
};
+#endif /* AMD_EDID_UTILITY */
+
#endif /* DC_HW_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 9ea75db3484e..f24fd19ed93d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -126,7 +126,8 @@ struct dc_link {
unsigned short chip_caps;
unsigned int dpcd_sink_count;
enum edp_revision edp_revision;
- bool psr_enabled;
+ bool psr_feature_enabled;
+ bool psr_allow_active;
/* MST record stream using this link */
struct link_flags {
@@ -158,6 +159,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
return dc->links[link_index];
}
+static inline struct dc_link *get_edp_link(const struct dc *dc)
+{
+ int i;
+
+ // report any eDP links, even unconnected DDI's
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
+ return dc->links[i];
+ }
+ return NULL;
+}
+
/* Set backlight level of an embedded panel (eDP, LVDS).
* backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
* and 16 bit fractional, where 1.0 is max backlight value.
@@ -170,7 +183,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
-bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
@@ -192,6 +205,7 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 0fa1c26bc20d..fdb6adc37857 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -113,6 +113,21 @@ struct periodic_interrupt_config {
int lines_offset;
};
+union stream_update_flags {
+ struct {
+ uint32_t scaling:1;
+ uint32_t out_tf:1;
+ uint32_t out_csc:1;
+ uint32_t abm_level:1;
+ uint32_t dpms_off:1;
+ uint32_t gamut_remap:1;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ uint32_t wb_update:1;
+#endif
+ } bits;
+
+ uint32_t raw;
+};
struct dc_stream_state {
// sink is deprecated, new code should not reference
@@ -214,9 +229,14 @@ struct dc_stream_state {
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool is_dsc_enabled;
#endif
+ union stream_update_flags update_flags;
};
+#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF
+
struct dc_stream_update {
+ struct dc_stream_state *stream;
+
struct rect src;
struct rect dst;
struct dc_transfer_func *out_transfer_func;
@@ -431,6 +451,9 @@ void dc_stream_set_static_screen_events(struct dc *dc,
int num_streams,
const struct dc_static_screen_events *events);
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option);
+
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index b273735b6a3e..d9be8fc3889f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -25,6 +25,11 @@
#ifndef DC_TYPES_H_
#define DC_TYPES_H_
+#ifndef AMD_EDID_UTILITY
+/* AND EdidUtility only needs a portion
+ * of this file, including the rest only
+ * causes additional issues.
+ */
#include "os_types.h"
#include "fixed31_32.h"
#include "irq_types.h"
@@ -33,6 +38,10 @@
#include "dal_types.h"
#include "grph_object_defs.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
+
/* forward declarations */
struct dc_plane_state;
struct dc_stream_state;
@@ -100,6 +109,9 @@ struct dc_context {
uint32_t dc_sink_id_count;
uint32_t dc_stream_id_count;
uint64_t fbc_gpu_addr;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#endif
};
@@ -159,6 +171,12 @@ enum dc_edid_status {
EDID_THE_SAME,
};
+enum act_return_status {
+ ACT_SUCCESS,
+ ACT_LINK_LOST,
+ ACT_FAILED
+};
+
/* audio capability from EDID*/
struct dc_cea_audio_mode {
uint8_t format_code; /* ucData[0] [6:3]*/
@@ -739,6 +757,9 @@ struct dc_clock_config {
uint32_t current_clock_khz;/*current clock in use*/
};
+#endif /*AMD_EDID_UTILITY*/
+//AMD EDID UTILITY does not need any of the above structures
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* DSC DPCD capabilities */
union dsc_slice_caps1 {
@@ -810,4 +831,5 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
#endif
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 58bd131d5b48..b8a3fc505c9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
return true;
}
@@ -401,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm)
/* Enable the backlight output */
REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+ /* Disable fractional pwm if configured */
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
+ abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
+
/* Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
@@ -489,9 +496,6 @@ void dce_abm_destroy(struct abm **abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
- if (abm_dce->base.dmcu_is_running == true)
- abm_dce->base.funcs->set_abm_immediate_disable(*abm);
-
kfree(abm_dce);
*abm = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index c3f9f4185ce8..e472608faf33 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -42,6 +42,10 @@
#include "reg_helper.h"
+#undef FN
+#define FN(reg_name, field_name) \
+ aux110->shift->field_name, aux110->mask->field_name
+
#define FROM_AUX_ENGINE(ptr) \
container_of((ptr), struct aux_engine_dce110, base)
@@ -55,6 +59,14 @@ enum {
AUX_TIMED_OUT_RETRY_COUNTER = 2,
AUX_DEFER_RETRY_COUNTER = 6
};
+
+#define TIME_OUT_INCREMENT 1016
+#define TIME_OUT_MULTIPLIER_8 8
+#define TIME_OUT_MULTIPLIER_16 16
+#define TIME_OUT_MULTIPLIER_32 32
+#define TIME_OUT_MULTIPLIER_64 64
+#define MAX_TIMEOUT_LENGTH 127
+
static void release_engine(
struct dce_aux *engine)
{
@@ -198,7 +210,7 @@ static void submit_channel_request(
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
/* set the delay and the number of bytes to write */
@@ -327,7 +339,7 @@ static enum aux_channel_operation_result get_channel_status(
/* poll to make sure that SW_DONE is asserted */
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
value = REG_READ(AUX_SW_STATUS);
/* in case HPD is LOW, exit AUX transaction */
@@ -414,20 +426,77 @@ void dce110_engine_destroy(struct dce_aux **engine)
*engine = NULL;
}
+
+static bool dce_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout_in_us)
+{
+ uint32_t multiplier = 0;
+ uint32_t length = 0;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+ struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+
+ /* 1-Update polling timeout period */
+ aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER;
+
+ /* 2-Update aux timeout period length and multiplier */
+ if (timeout_in_us <= TIME_OUT_INCREMENT) {
+ multiplier = 0;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_8;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0)
+ length++;
+ } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) {
+ multiplier = 1;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_16;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0)
+ length++;
+ } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) {
+ multiplier = 2;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_32;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0)
+ length++;
+ } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) {
+ multiplier = 3;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_64;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0)
+ length++;
+ }
+
+ length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH;
+
+ REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier);
+
+ return true;
+}
+
+static struct dce_aux_funcs aux_functions = {
+ .configure_timeout = NULL,
+ .destroy = NULL,
+};
+
struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs)
+ const struct dce110_aux_registers *regs,
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable)
{
aux_engine110->base.ddc = NULL;
aux_engine110->base.ctx = ctx;
aux_engine110->base.delay = 0;
aux_engine110->base.max_defer_write_retry = 0;
aux_engine110->base.inst = inst;
- aux_engine110->timeout_period = timeout_period;
+ aux_engine110->polling_timeout_period = timeout_period;
aux_engine110->regs = regs;
+ aux_engine110->mask = mask;
+ aux_engine110->shift = shift;
+ aux_engine110->base.funcs = &aux_functions;
+ if (is_ext_aux_timeout_configurable)
+ aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout;
+
return &aux_engine110->base;
}
@@ -464,8 +533,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- if (!acquire(aux_engine, ddc_pin))
+ if (!acquire(aux_engine, ddc_pin)) {
+ *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE;
return -1;
+ }
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -475,7 +546,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
aux_req.action = i2caux_action_from_payload(payload);
aux_req.address = payload->address;
- aux_req.delay = payload->defer_delay * 10;
+ aux_req.delay = 0;
aux_req.length = payload->length;
aux_req.data = payload->data;
@@ -544,8 +615,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_TRANSACTION_REPLY_AUX_DEFER:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
- if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
+ } else {
+ if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
+ (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
+ if (payload->defer_delay > 0)
+ msleep(payload->defer_delay);
+ }
+ }
break;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
@@ -582,6 +660,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
default:
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index ed7fec8fe253..b4b2c79a8073 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -29,6 +29,7 @@
#include "i2caux_interface.h"
#include "inc/hw/aux_engine.h"
+
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define AUX_COMMON_REG_LIST0(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
@@ -36,6 +37,7 @@
SRI(AUX_SW_DATA, DP_AUX, id), \
SRI(AUX_SW_CONTROL, DP_AUX, id), \
SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
SRI(AUX_SW_STATUS, DP_AUX, id)
#endif
@@ -55,6 +57,7 @@ struct dce110_aux_registers {
uint32_t AUX_SW_DATA;
uint32_t AUX_SW_CONTROL;
uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_DPHY_RX_CONTROL1;
uint32_t AUX_SW_STATUS;
uint32_t AUXN_IMPCAL;
uint32_t AUXP_IMPCAL;
@@ -62,6 +65,156 @@ struct dce110_aux_registers {
uint32_t AUX_RESET_MASK;
};
+#define DCE_AUX_REG_FIELD_LIST(type)\
+ type AUX_EN;\
+ type AUX_RESET;\
+ type AUX_RESET_DONE;\
+ type AUX_REG_RW_CNTL_STATUS;\
+ type AUX_SW_USE_AUX_REG_REQ;\
+ type AUX_SW_DONE_USING_AUX_REG;\
+ type AUX_SW_AUTOINCREMENT_DISABLE;\
+ type AUX_SW_DATA_RW;\
+ type AUX_SW_INDEX;\
+ type AUX_SW_GO;\
+ type AUX_SW_DATA;\
+ type AUX_SW_REPLY_BYTE_COUNT;\
+ type AUX_SW_DONE;\
+ type AUX_SW_DONE_ACK;\
+ type AUXN_IMPCAL_ENABLE;\
+ type AUXP_IMPCAL_ENABLE;\
+ type AUXN_IMPCAL_OVERRIDE_ENABLE;\
+ type AUXP_IMPCAL_OVERRIDE_ENABLE;\
+ type AUX_RX_TIMEOUT_LEN;\
+ type AUX_RX_TIMEOUT_LEN_MUL;\
+ type AUXN_CALOUT_ERROR_AK;\
+ type AUXP_CALOUT_ERROR_AK;\
+ type AUX_SW_START_DELAY;\
+ type AUX_SW_WR_BYTES
+
+#define DCE10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE12_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* DCN10 MASK */
+#define DCN10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* for all other DCN */
+#define DCN_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
+
+#define AUX_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
enum { /* This is the timeout as defined in DP 1.2a,
* 2.3.4 "Detailed uPacket TX AUX CH State Description".
*/
@@ -97,20 +250,34 @@ struct dce_aux {
uint32_t max_defer_write_retry;
bool acquire_reset;
+ struct dce_aux_funcs *funcs;
+};
+
+struct dce110_aux_registers_mask {
+ DCE_AUX_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce110_aux_registers_shift {
+ DCE_AUX_REG_FIELD_LIST(uint8_t);
};
+
struct aux_engine_dce110 {
struct dce_aux base;
const struct dce110_aux_registers *regs;
+ const struct dce110_aux_registers_mask *mask;
+ const struct dce110_aux_registers_shift *shift;
struct {
uint32_t aux_control;
uint32_t aux_arb_control;
uint32_t aux_sw_data;
uint32_t aux_sw_control;
uint32_t aux_interrupt_control;
+ uint32_t aux_dphy_rx_control1;
+ uint32_t aux_dphy_rx_control0;
uint32_t aux_sw_status;
} addr;
- uint32_t timeout_period;
+ uint32_t polling_timeout_period;
};
struct aux_engine_dce110_init_data {
@@ -120,12 +287,15 @@ struct aux_engine_dce110_init_data {
const struct dce110_aux_registers *regs;
};
-struct dce_aux *dce110_aux_engine_construct(
- struct aux_engine_dce110 *aux_engine110,
+struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs);
+ const struct dce110_aux_registers *regs,
+
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable);
void dce110_engine_destroy(struct dce_aux **engine);
@@ -139,4 +309,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
+
+struct dce_aux_funcs {
+ bool (*configure_timeout)
+ (struct ddc_service *ddc,
+ uint32_t timeout);
+ void (*destroy)
+ (struct aux_engine **ptr);
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 0b86cee4876f..ba995d3f2318 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -907,9 +907,6 @@ void dce_dmcu_destroy(struct dmcu **dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
- if (dmcu_dce->base.dmcu_state == DMCU_RUNNING)
- dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true);
-
kfree(dmcu_dce);
*dmcu = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index ac04d77058f0..32d145a0d6fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -679,6 +679,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 31b698bf9cfc..8aa937f496c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -606,11 +606,11 @@ static void dce_mi_allocate_dmif(
}
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
@@ -636,11 +636,11 @@ static void dce_mi_free_dmif(
10, 3500);
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 76d54885374a..a5e122c721ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -506,6 +537,14 @@ static const struct dce_mem_input_mask mi_masks = {
.ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
static struct mem_input *dce100_mem_input_create(
struct dc_context *ctx,
uint32_t inst)
@@ -571,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -611,7 +654,10 @@ struct dce_aux *dce100_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -997,6 +1043,8 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.disable_dp_clk_share = true;
+ dc->caps.extended_aux_timeout_support = false;
+
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 01a924bf477a..f0e837d14000 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -944,7 +944,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
/* notify audio driver for audio modes of monitor */
struct dc *core_dc;
- struct pp_smu_funcs *pp_smu = NULL;
struct clk_mgr *clk_mgr;
unsigned int i, num_audio = 1;
@@ -957,9 +956,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
return;
- if (core_dc->res_pool->pp_smu)
- pp_smu = core_dc->res_pool->pp_smu;
-
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
@@ -984,7 +980,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
{
struct dc *dc;
- struct pp_smu_funcs *pp_smu = NULL;
struct clk_mgr *clk_mgr;
if (!pipe_ctx || !pipe_ctx->stream)
@@ -1001,9 +996,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->enabled = false;
- if (dc->res_pool->pp_smu)
- pp_smu = dc->res_pool->pp_smu;
-
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.stream_enc);
@@ -1169,8 +1161,9 @@ static void build_audio_output(
}
}
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (state->clk_mgr &&
+ (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
state->clk_mgr->funcs->get_dp_ref_clk_frequency(
state->clk_mgr);
@@ -1418,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
- pipe_ctx->stream->link->psr_enabled = false;
+ pipe_ctx->stream->link->psr_feature_enabled = false;
return DC_OK;
}
@@ -1428,8 +1421,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
int i;
- enum connector_id connector_id;
- enum signal_type signal = SIGNAL_TYPE_NONE;
/* do not know BIOS back-front mapping, simply blank all. It will not
* hurt for non-DP
@@ -1440,15 +1431,12 @@ static void power_down_encoders(struct dc *dc)
}
for (i = 0; i < dc->link_count; i++) {
- connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id);
- if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
- (connector_id == CONNECTOR_ID_EDP)) {
+ enum signal_type signal = dc->links[i]->connector_signal;
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT))
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
- if (connector_id == CONNECTOR_ID_EDP)
- signal = SIGNAL_TYPE_EDP;
- }
dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
@@ -1529,18 +1517,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context)
return NULL;
}
-static struct dc_link *get_edp_link(struct dc *dc)
-{
- int i;
-
- // report any eDP links, even unconnected DDI's
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
- return dc->links[i];
- }
- return NULL;
-}
-
static struct dc_link *get_edp_link_with_sink(
struct dc *dc,
struct dc_state *context)
@@ -1834,7 +1810,7 @@ static bool should_enable_fbc(struct dc *dc,
return false;
/* PSR should not be enabled */
- if (pipe_ctx->stream->link->psr_enabled)
+ if (pipe_ctx->stream->link->psr_feature_enabled)
return false;
/* Nothing to compress */
@@ -2464,7 +2440,6 @@ static void dce110_program_front_end_for_pipe(
struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct mem_input *mi = pipe_ctx->plane_res.mi;
- struct pipe_ctx *old_pipe = NULL;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
@@ -2472,9 +2447,6 @@ static void dce110_program_front_end_for_pipe(
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
- if (dc->current_state)
- old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
-
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 89620adc81d8..83a4dbf6d76e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_110_REG_LIST(id),\
@@ -440,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -617,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -657,7 +700,10 @@ struct dce_aux *dce110_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1293,6 +1339,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.is_apu = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 21a657e79306..97dcc5d0862b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_DCE110_REG_LIST_DCE_BASE(id)\
@@ -417,6 +425,37 @@ static const struct dc_plane_cap plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -575,14 +614,18 @@ struct link_encoder *dce112_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -630,7 +673,10 @@ struct dce_aux *dce112_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1163,7 +1209,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
-
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 7c52f7f9196c..63543f6918ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE12_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE12_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_120_REG_LIST(id),\
@@ -356,6 +364,37 @@ static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define clk_src_regs(index, id)\
[index] = {\
CS_COMMON_REG_LIST_DCE_112(id),\
@@ -404,7 +443,10 @@ struct dce_aux *dce120_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -655,14 +697,18 @@ static struct link_encoder *dce120_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
@@ -1006,7 +1052,7 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.psp_setup_panel_mode = true;
-
+ dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
/*************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 643ccb0ade00..3e8d4b49f279 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
@@ -431,6 +439,37 @@ static const struct dce_abm_mask abm_mask = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -491,7 +530,10 @@ struct dce_aux *dce80_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -669,14 +711,18 @@ struct link_encoder *dce80_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -895,6 +941,7 @@ static bool dce80_construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d8b2da18db39..997e9582edc7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
-static bool dpp_get_optimal_number_of_taps(
+bool dpp1_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
@@ -521,7 +521,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
.dpp_set_csc_default = dpp1_cm_set_output_csc_default,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index e2c613611ac9..1d4a7d640334 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1504,6 +1504,11 @@ void dpp1_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
+bool dpp1_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
void dpp1_construct(struct dcn10_dpp *dpp1,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 001db49e4bb2..14d1be6c66e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -841,6 +841,14 @@ void min_set_viewport(
REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
PRI_VIEWPORT_X_START_C, viewport_c->x,
PRI_VIEWPORT_Y_START_C, viewport_c->y);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
+ SEC_VIEWPORT_WIDTH_C, viewport_c->width,
+ SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
+ SEC_VIEWPORT_X_START_C, viewport_c->x,
+ SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
void hubp1_read_state_common(struct hubp *hubp)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index cb20d10288c0..ae70d9c0aa1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -47,6 +47,8 @@
SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
@@ -57,8 +59,12 @@
SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\
@@ -150,6 +156,8 @@
uint32_t DCSURF_SEC_VIEWPORT_START; \
uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \
uint32_t DCSURF_PRI_VIEWPORT_START_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_START_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \
uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \
@@ -160,8 +168,12 @@
uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_C; \
uint32_t DCSURF_SURFACE_INUSE; \
uint32_t DCSURF_SURFACE_INUSE_HIGH; \
uint32_t DCSURF_SURFACE_INUSE_C; \
@@ -279,6 +291,10 @@
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
@@ -289,8 +305,12 @@
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
@@ -469,6 +489,10 @@
type PRI_VIEWPORT_HEIGHT_C; \
type PRI_VIEWPORT_X_START_C; \
type PRI_VIEWPORT_Y_START_C; \
+ type SEC_VIEWPORT_WIDTH_C; \
+ type SEC_VIEWPORT_HEIGHT_C; \
+ type SEC_VIEWPORT_X_START_C; \
+ type SEC_VIEWPORT_Y_START_C; \
type PRIMARY_SURFACE_ADDRESS_HIGH;\
type PRIMARY_SURFACE_ADDRESS;\
type SECONDARY_SURFACE_ADDRESS_HIGH;\
@@ -479,8 +503,12 @@
type SECONDARY_META_SURFACE_ADDRESS;\
type PRIMARY_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_SURFACE_ADDRESS_C;\
+ type SECONDARY_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_SURFACE_ADDRESS_C;\
type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_META_SURFACE_ADDRESS_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_C;\
type SURFACE_INUSE_ADDRESS;\
type SURFACE_INUSE_ADDRESS_HIGH;\
type SURFACE_INUSE_ADDRESS_C;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 60123db7ba02..eb91432621ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -670,6 +670,10 @@ static void dcn10_bios_golden_init(struct dc *dc)
int i;
bool allow_self_fresh_force_enable = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc))
+ return;
+#endif
if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
allow_self_fresh_force_enable =
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
@@ -1300,6 +1304,10 @@ static void dcn10_init_hw(struct dc *dc)
}
dc->hwss.enable_power_gating_plane(dc->hwseq, true);
+
+ if (dc->clk_mgr->funcs->notify_wm_ranges)
+ dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+
}
static void dcn10_reset_hw_ctx_wrap(
@@ -1452,15 +1460,15 @@ static void log_tf(struct dc_context *ctx,
DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
for (i = 0; i < hw_points_num; i++) {
- DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
- DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
- DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
+ DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
- DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
- DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
- DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
+ DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
}
@@ -2304,8 +2312,7 @@ void update_dchubp_dpp(
dc->res_pool->dccg->funcs->update_dpp_dto(
dc->res_pool->dccg,
dpp->inst,
- pipe_ctx->plane_res.bw.dppclk_khz,
- false);
+ pipe_ctx->plane_res.bw.dppclk_khz);
else
dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
dc->clk_mgr->clks.dispclk_khz / 2 :
@@ -2512,8 +2519,10 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
}
if (pipe_ctx->plane_state != NULL)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 8bf5f0f2301d..88fcc395adf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -113,6 +113,20 @@ struct dcn10_link_enc_registers {
uint32_t DIG_LANE_ENABLE;
/* UNIPHY */
uint32_t CHANNEL_XBAR_CNTL;
+ /* DPCS */
+ uint32_t RDPCSTX_PHY_CNTL3;
+ uint32_t RDPCSTX_PHY_CNTL4;
+ uint32_t RDPCSTX_PHY_CNTL5;
+ uint32_t RDPCSTX_PHY_CNTL6;
+ uint32_t RDPCSTX_PHY_CNTL7;
+ uint32_t RDPCSTX_PHY_CNTL8;
+ uint32_t RDPCSTX_PHY_CNTL9;
+ uint32_t RDPCSTX_PHY_CNTL10;
+ uint32_t RDPCSTX_PHY_CNTL11;
+ uint32_t RDPCSTX_PHY_CNTL12;
+ uint32_t RDPCSTX_PHY_CNTL13;
+ uint32_t RDPCSTX_PHY_CNTL14;
+ uint32_t RDPCSTX_PHY_CNTL15;
/* indirect registers */
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3;
@@ -250,6 +264,10 @@ struct dcn10_link_enc_registers {
type RDPCS_EXT_REFCLK_EN;\
type RDPCS_TX_FIFO_EN;\
type UNIPHY_LINK_ENABLE;\
+ type UNIPHY_CHANNEL0_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL1_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL2_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL3_XBAR_SOURCE;\
type UNIPHY_CHANNEL0_INVERT;\
type UNIPHY_CHANNEL1_INVERT;\
type UNIPHY_CHANNEL2_INVERT;\
@@ -337,16 +355,46 @@ struct dcn10_link_enc_registers {
type RDPCS_TX_FIFO_ERROR_MASK;\
type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\
type RDPCS_DPALT_4LANE_TOGGLE_MASK;\
+ type RDPCS_PHY_DPALT_DP4;\
type RDPCS_PHY_DPALT_DISABLE;\
type RDPCS_PHY_DPALT_DISABLE_ACK;\
type RDPCS_PHY_DP_MPLLB_V2I;\
type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\
+ type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\
+ type RDPCS_PHY_RX_VREF_CTRL;\
type RDPCS_PHY_DP_MPLLB_CP_INT;\
type RDPCS_PHY_DP_MPLLB_CP_PROP;\
type RDPCS_PHY_RX_REF_LD_VAL;\
type RDPCS_PHY_RX_VCO_LD_VAL;\
type DPCSTX_DEBUG_CONFIG; \
- type RDPCSTX_DEBUG_CONFIG
+ type RDPCSTX_DEBUG_CONFIG; \
+ type RDPCS_PHY_DP_TX0_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX0_EQ_PRE;\
+ type RDPCS_PHY_DP_TX0_EQ_POST;\
+ type RDPCS_PHY_DP_TX1_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX1_EQ_PRE;\
+ type RDPCS_PHY_DP_TX1_EQ_POST;\
+ type RDPCS_PHY_DP_TX2_EQ_MAIN;\
+ type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\
+ type RDPCS_PHY_DP_TX2_EQ_PRE;\
+ type RDPCS_PHY_DP_TX2_EQ_POST;\
+ type RDPCS_PHY_DP_TX3_EQ_MAIN;\
+ type RDPCS_PHY_DCO_RANGE;\
+ type RDPCS_PHY_DCO_FINETUNE;\
+ type RDPCS_PHY_DP_TX3_EQ_PRE;\
+ type RDPCS_PHY_DP_TX3_EQ_POST;\
+ type RDPCS_PHY_SUP_PRE_HP;\
+ type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\
+ type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\
+ type UNIPHYA_SOFT_RESET;\
+ type UNIPHYB_SOFT_RESET;\
+ type UNIPHYC_SOFT_RESET;\
+ type UNIPHYD_SOFT_RESET;\
+ type UNIPHYE_SOFT_RESET;\
+ type UNIPHYF_SOFT_RESET
#define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_LANE0EN;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index e9ebbbe256b4..0a9ad692f541 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -168,7 +168,10 @@ static void opp1_set_pixel_encoding(
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
- REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1);
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 1,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
@@ -237,6 +240,9 @@ void opp1_set_dyn_expansion(
FMT_DYNAMIC_EXP_EN, 0,
FMT_DYNAMIC_EXP_MODE, 0);
+ if (opp->dyn_expansion == DYN_EXPANSION_DISABLE)
+ return;
+
/*00 - 10-bit -> 12-bit dynamic expansion*/
/*01 - 8-bit -> 12-bit dynamic expansion*/
if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index 0f10adea000c..2c0ecfa5a643 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -116,6 +116,8 @@
type FMT_RAND_G_SEED; \
type FMT_RAND_B_SEED; \
type FMT_PIXEL_ENCODING; \
+ type FMT_SUBSAMPLING_MODE; \
+ type FMT_CBCR_BIT_REDUCTION_BYPASS; \
type FMT_CLAMP_DATA_EN; \
type FMT_CLAMP_COLOR_FORMAT; \
type FMT_DYNAMIC_EXP_EN; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e74a07d03fde..dabccbd49ad4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1230,59 +1230,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc)
return ret;
}
-bool optc1_is_matching_timing(struct timing_generator *tg,
- const struct dc_crtc_timing *otg_timing)
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing)
{
- struct dc_crtc_timing hw_crtc_timing = {0};
struct dcn_otg_state s = {0};
- if (tg == NULL || otg_timing == NULL)
+ if (tg == NULL || hw_crtc_timing == NULL)
return false;
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
- hw_crtc_timing.h_total = s.h_total + 1;
- hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
- hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start;
- hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
+ hw_crtc_timing->h_total = s.h_total + 1;
+ hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
+ hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start;
+ hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
- hw_crtc_timing.v_total = s.v_total + 1;
- hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
- hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start;
- hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
-
- if (otg_timing->h_total != hw_crtc_timing.h_total)
- return false;
-
- if (otg_timing->h_border_left != hw_crtc_timing.h_border_left)
- return false;
-
- if (otg_timing->h_addressable != hw_crtc_timing.h_addressable)
- return false;
-
- if (otg_timing->h_border_right != hw_crtc_timing.h_border_right)
- return false;
-
- if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch)
- return false;
-
- if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width)
- return false;
-
- if (otg_timing->v_total != hw_crtc_timing.v_total)
- return false;
-
- if (otg_timing->v_border_top != hw_crtc_timing.v_border_top)
- return false;
-
- if (otg_timing->v_addressable != hw_crtc_timing.v_addressable)
- return false;
-
- if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
- return false;
-
- if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width)
- return false;
+ hw_crtc_timing->v_total = s.v_total + 1;
+ hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
+ hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start;
+ hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
return true;
}
@@ -1486,7 +1452,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
- .is_matching_timing = optc1_is_matching_timing,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
@@ -1514,7 +1479,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.configure_crc = optc1_configure_crc,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc1_program_manual_trigger,
- .setup_manual_trigger = optc1_setup_manual_trigger
+ .setup_manual_trigger = optc1_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 83575599672e..c8d795b335ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -547,9 +547,8 @@ struct dcn_otg_state {
void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s);
-bool optc1_is_matching_timing(
- struct timing_generator *tg,
- const struct dc_crtc_timing *otg_timing);
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing);
bool optc1_validate_timing(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 1599bb971111..15640aedd664 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -319,6 +319,14 @@ static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN10(id),\
@@ -471,6 +479,28 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN10(_MASK)
};
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
@@ -642,7 +672,10 @@ struct dce_aux *dcn10_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -751,14 +784,18 @@ struct link_encoder *dcn10_link_encoder_create(
{
struct dcn10_link_encoder *enc10 =
kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc10)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dcn10_link_encoder_construct(enc10,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
@@ -1308,6 +1345,8 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
+ dc->caps.extended_aux_timeout_support = false;
+
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
dc->caps.force_dp_tps4_for_cp2520 = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 9aa258f3550b..06e5bbb4545c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1553,6 +1553,66 @@ unsigned int enc1_dig_source_otg(
return tg_inst;
}
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth)
+{
+ uint32_t hw_encoding = 0;
+ uint32_t hw_depth = 0;
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enc == NULL ||
+ encoding == NULL ||
+ depth == NULL)
+ return false;
+
+ REG_GET_2(DP_PIXEL_FORMAT,
+ DP_PIXEL_ENCODING, &hw_encoding,
+ DP_COMPONENT_DEPTH, &hw_depth);
+
+ switch (hw_depth) {
+ case DP_COMPONENT_PIXEL_DEPTH_6BPC:
+ *depth = COLOR_DEPTH_666;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_8BPC:
+ *depth = COLOR_DEPTH_888;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_10BPC:
+ *depth = COLOR_DEPTH_101010;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_12BPC:
+ *depth = COLOR_DEPTH_121212;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_16BPC:
+ *depth = COLOR_DEPTH_161616;
+ break;
+ default:
+ *depth = COLOR_DEPTH_UNDEFINED;
+ break;
+ }
+
+ switch (hw_encoding) {
+ case DP_PIXEL_ENCODING_TYPE_RGB444:
+ *encoding = PIXEL_ENCODING_RGB;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR422:
+ *encoding = PIXEL_ENCODING_YCBCR422;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR444:
+ case DP_PIXEL_ENCODING_TYPE_Y_ONLY:
+ *encoding = PIXEL_ENCODING_YCBCR444;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR420:
+ *encoding = PIXEL_ENCODING_YCBCR420;
+ break;
+ default:
+ *encoding = PIXEL_ENCODING_UNDEFINED;
+ break;
+ }
+ return true;
+}
+
static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dp_set_stream_attribute =
enc1_stream_encoder_dp_set_stream_attribute,
@@ -1589,6 +1649,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
.dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
};
void dcn10_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index a512cbea00d1..c9cbc21d121e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -621,4 +621,9 @@ void get_audio_clock_info(
void enc1_reset_hdmi_stream_attribute(
struct stream_encoder *enc);
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 16476ed25536..1e1151356e60 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -44,16 +44,12 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg2_update_dpp_dto(struct dccg *dccg,
- int dpp_inst,
- int req_dppclk,
- bool reduce_divider_only)
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
- int current_phase, current_modulo;
ASSERT(req_dppclk <= ref_dppclk);
/* need to clamp to 8 bits */
@@ -65,28 +61,9 @@ void dccg2_update_dpp_dto(struct dccg *dccg,
if (req_dppclk > ref_dppclk)
req_dppclk = ref_dppclk;
}
-
- REG_GET_2(DPPCLK_DTO_PARAM[dpp_inst],
- DPPCLK0_DTO_PHASE, &current_phase,
- DPPCLK0_DTO_MODULO, &current_modulo);
-
- if (reduce_divider_only) {
- // requested phase/modulo greater than current
- if (req_dppclk * current_modulo >= current_phase * ref_dppclk) {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
- } else {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, current_phase,
- DPPCLK0_DTO_MODULO, current_modulo);
- }
- } else {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
- }
-
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, req_dppclk,
+ DPPCLK0_DTO_MODULO, ref_dppclk);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
@@ -119,32 +96,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg,
void dccg2_init(struct dccg *dccg)
{
- struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
-
- // Fallthrough intentional to program all available dpp_dto's
- switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
- case 6:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
- /* Fall through */
- case 5:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
- /* Fall through */
- case 4:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
- /* Fall through */
- case 3:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
- /* Fall through */
- case 2:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
- /* Fall through */
- case 1:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
- break;
- default:
- ASSERT(false);
- break;
- }
}
static const struct dccg_funcs dccg2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 74a074a873cd..2205cb0204e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -97,7 +97,7 @@ struct dcn_dccg {
const struct dccg_mask *dccg_mask;
};
-void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk, bool raise_divider_only);
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 2f5aade1e882..4d7e45892f08 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -376,13 +376,6 @@ bool dpp2_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
@@ -464,7 +457,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_read_state = dpp20_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 290b2854bd2c..5b03b737b1d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -30,16 +30,20 @@
#define TO_DCN20_DPP(dpp)\
container_of(dpp, struct dcn20_dpp, base)
-#define TF_REG_LIST_DCN20(id) \
- TF_REG_LIST_DCN(id), \
+#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \
SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON(id) \
SRI(CM_BLNDGAM_CONTROL, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \
@@ -66,9 +70,6 @@
SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \
@@ -147,7 +148,12 @@
SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
- SRI(CM_SHAPER_LUT_INDEX, CM, id), \
+ SRI(CM_SHAPER_LUT_INDEX, CM, id)
+
+#define TF_REG_LIST_DCN20(id) \
+ TF_REG_LIST_DCN(id), \
+ TF_REG_LIST_DCN20_COMMON(id), \
+ TF_REG_LIST_DCN20_COMMON_UPDATED(id), \
SRI(CURSOR_CONTROL, CURSOR0_, id), \
SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \
SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \
@@ -166,27 +172,41 @@
SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
-#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
- TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+
+#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -261,18 +281,9 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -341,9 +352,6 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \
TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \
@@ -356,7 +364,6 @@
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \
- TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \
TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \
TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \
@@ -521,9 +528,14 @@
TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \
TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
@@ -560,6 +572,7 @@
TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+
#define TF_REG_FIELD_LIST_DCN2_0(type) \
TF_REG_FIELD_LIST(type) \
type CM_BLNDGAM_LUT_DATA; \
@@ -593,6 +606,7 @@
type OBUF_MEM_PWR_FORCE;\
type LUT_MEM_PWR_FORCE
+
struct dcn2_dpp_shift {
TF_REG_FIELD_LIST_DCN2_0(uint8_t);
};
@@ -691,11 +705,6 @@ void dpp2_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps);
-
bool dpp2_construct(struct dcn20_dpp *dpp2,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 1b419407af94..63eb377ed9c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -118,7 +118,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index cd8bc92ce3ba..880954ac0b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
struct scaling_taps num_taps)
{
uint32_t h_ratio_luma = 1;
- uint32_t h_ratio_chroma = 1;
uint32_t h_taps_luma = num_taps.h_taps;
uint32_t h_taps_chroma = num_taps.h_taps_c;
int32_t h_init_phase_luma = 0;
@@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
h_ratio_luma = -1;
else
h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5;
- h_ratio_chroma = h_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma);
@@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
enum dwb_subsample_position subsample_position)
{
uint32_t v_ratio_luma = 1;
- uint32_t v_ratio_chroma = 1;
uint32_t v_taps_luma = num_taps.v_taps;
uint32_t v_taps_chroma = num_taps.v_taps_c;
int32_t v_init_phase_luma = 0;
@@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
v_ratio_luma = -1;
else
v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5;
- v_ratio_chroma = v_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index b83c022e2c6f..8b8438566101 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -186,14 +186,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl
}
static void hubbub2_det_request_size(
+ unsigned int detile_buf_size,
unsigned int height,
unsigned int width,
unsigned int bpe,
bool *req128_horz_wc,
bool *req128_vert_wc)
{
- unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
-
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
@@ -236,7 +235,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
&segment_order_horz, &segment_order_vert))
return false;
- hubbub2_det_request_size(input->surface_size.height, input->surface_size.width,
+ hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
bpe, &req128_horz_wc, &req128_vert_wc);
if (!req128_horz_wc && !req128_vert_wc) {
@@ -588,7 +588,7 @@ static void hubbub2_program_watermarks(
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
}
static const struct hubbub_funcs hubbub2_funcs = {
@@ -600,7 +600,8 @@ static const struct hubbub_funcs hubbub2_funcs = {
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
- .program_watermarks = hubbub2_program_watermarks
+ .program_watermarks = hubbub2_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
@@ -618,4 +619,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 626117d3b4e9..501532dd523a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -81,6 +81,7 @@ struct dcn20_hubbub {
unsigned int debug_test_index_pstate;
struct dcn_watermark_set watermarks;
struct dcn20_vmid vmid[16];
+ unsigned int detile_buf_size;
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 1212da12c414..921a36668ced 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -688,7 +688,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
return true;
}
-static bool dcn20_set_blend_lut(
+bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -710,7 +710,7 @@ static bool dcn20_set_blend_lut(
return result;
}
-static bool dcn20_set_shaper_3dlut(
+bool dcn20_set_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -999,72 +999,6 @@ void dcn20_enable_plane(
}
-static void dcn20_program_pipe(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- pipe_ctx->plane_state->update_flags.bits.full_update =
- context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update;
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dcn20_enable_plane(dc, pipe_ctx, context);
-
- update_dchubp_dpp(dc, pipe_ctx, context);
-
- set_hdr_multiplier(pipe_ctx);
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
-
- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
- * only do gamma programming for full update.
- * TODO: This can be further optimized/cleaned up
- * Always call this for now since it does memcmp inside before
- * doing heavy calculation and programming
- */
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
-}
-
-static void dcn20_program_all_pipe_in_tree(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) {
- bool blank = !is_pipe_tree_visible(pipe_ctx);
-
- pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg,
- pipe_ctx->pipe_dlg_param.vready_offset,
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
-
- pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
-
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
-
- if (dc->hwss.update_odm)
- dc->hwss.update_odm(dc, context, pipe_ctx);
- }
-
- if (pipe_ctx->plane_state != NULL)
- dcn20_program_pipe(dc, pipe_ctx, context);
-
- if (pipe_ctx->bottom_pipe != NULL) {
- ASSERT(pipe_ctx->bottom_pipe != pipe_ctx);
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
- } else if (pipe_ctx->next_odm_pipe != NULL) {
- ASSERT(pipe_ctx->next_odm_pipe != pipe_ctx);
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->next_odm_pipe, context);
- }
-}
-
void dcn20_pipe_control_lock_global(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -1124,114 +1058,456 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_apply_ctx_for_surface(
- struct dc *dc,
- const struct dc_stream_state *stream,
- int num_planes,
- struct dc_state *context)
+static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
{
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
- int i;
- struct timing_generator *tg;
- bool removed_pipe[6] = { false };
- bool interdependent_update = false;
- struct pipe_ctx *top_pipe_to_program =
- find_top_pipe_for_stream(dc, context, stream);
- struct pipe_ctx *prev_top_pipe_to_program =
- find_top_pipe_for_stream(dc, dc->current_state, stream);
- DC_LOGGER_INIT(dc->ctx->logger);
+ new_pipe->update_flags.raw = 0;
- if (!top_pipe_to_program)
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
- /* Carry over GSL groups in case the context is changing. */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == stream &&
- pipe_ctx->stream == old_pipe_ctx->stream)
- pipe_ctx->stream_res.gsl_group =
- old_pipe_ctx->stream_res.gsl_group;
+ /* Detect top pipe only changes */
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ /* Detect odm changes */
+ if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
+ || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
+ || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Detect global sync changes */
+ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
+ || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
+ || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
+ || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
+ new_pipe->update_flags.bits.global_sync = 1;
}
- tg = top_pipe_to_program->stream_res.tg;
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /* Detect mpcc blending changes, only dpp inst and bot matter here */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp
+ || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && new_pipe->bottom_pipe
+ && old_pipe->bottom_pipe->plane_res.mpcc_inst
+ != new_pipe->bottom_pipe->plane_res.mpcc_inst))
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
+ struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
+
+ /* Detect pipe interdependent updates */
+ if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
+ old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
+ old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
+ old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
+ old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
+ old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
+ old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
+ old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
+ old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
+ old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
+ old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
+ old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
+ old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
+ old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
+ old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
+ old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
+ old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
+ old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
+ old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
+ memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
+ memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
+}
- interdependent_update = top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update;
+static void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- if (interdependent_update)
- lock_all_pipes(dc, context, true);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, true);
+ if (pipe_ctx->update_flags.bits.dppclk)
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
- if (num_planes == 0) {
- /* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
+ if (pipe_ctx->update_flags.bits.hubp_interdependent)
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ struct dc_bias_and_scale bns_params = {0};
+
+ // program the input csc
+ dpp->funcs->dpp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO,
+ plane_state->input_csc_color_matrix,
+ plane_state->color_space,
+ NULL);
+
+ if (dpp->funcs->dpp_program_bias_and_scale) {
+ //TODO :for CNVC set scale and bias registers if necessary
+ dcn10_build_prescale_params(&bns_params, plane_state);
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
}
- /* Disconnect unused mpcc */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dc->hwss.disable_plane(dc, old_pipe_ctx);
+ if (pipe_ctx->update_flags.bits.mpcc
+ || plane_state->update_flags.bits.global_alpha_change
+ || plane_state->update_flags.bits.per_pixel_alpha_change) {
+ /* Need mpcc to be idle if changing opp */
+ if (pipe_ctx->update_flags.bits.opp_changed) {
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+ int mpcc_inst;
+
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst])
+ continue;
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ }
}
+ dc->hwss.update_mpcc(dc, pipe_ctx);
+ }
- if ((!pipe_ctx->plane_state ||
- pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
- old_pipe_ctx->plane_state &&
- old_pipe_ctx->stream_res.tg == tg) {
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
- dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling))
+ hubp->funcs->mem_program_viewport(
+ hubp,
+ &pipe_ctx->plane_res.scl_data.viewport,
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+
+ /* Any updates are handled in dc interface, just need to apply existing for plane enable */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed)
+ && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
- DC_LOG_DC("Reset mpcc for pipe %d\n",
- old_pipe_ctx->pipe_idx);
- }
+ /* Any updates are handled in dc interface, just need
+ * to apply existing for plane enable / opp change */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->stream->update_flags.bits.gamut_remap
+ || pipe_ctx->stream->update_flags.bits.out_csc) {
+ /* dpp/cm gamut remap*/
+ dc->hwss.program_gamut_remap(pipe_ctx);
+
+ /*call the dcn2 method which uses mpc csc*/
+ dc->hwss.program_output_csc(dc,
+ pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix,
+ hubp->opp_id);
}
- if (num_planes > 0)
- dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror,
+ 0);
+ hubp->power_gated = false;
+ }
- /* Program secondary blending tree and writeback pipes */
- if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
- dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
+ if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
- if (interdependent_update)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (pipe_ctx->update_flags.bits.enable)
+ hubp->funcs->set_blank(hubp, false);
+}
+
+
+static void dcn20_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ /* Only need to unblank on top pipe */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
+ && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+
+ if (pipe_ctx->update_flags.bits.global_sync) {
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ pipe_ctx->pipe_dlg_param.vready_offset,
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm)
+ dc->hwss.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable)
+ dcn20_enable_plane(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
+ dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->plane_state->update_flags.bits.sdr_white_level)
+ set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
+ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
+ }
+}
+
+static bool does_pipe_need_lock(struct pipe_ctx *pipe)
+{
+ if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->update_flags.raw)
+ return true;
+ if (pipe->bottom_pipe)
+ return does_pipe_need_lock(pipe->bottom_pipe);
+
+ return false;
+}
+
+static void dcn20_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ int i;
+ bool pipe_locked[MAX_PIPES] = {false};
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Carry over GSL groups in case the context is changing. */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
+ context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (!context->res_ctx.pipe_ctx[i].top_pipe &&
+ does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- /* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
- !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
- continue;
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
+ pipe_locked[i] = true;
+ }
- pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
- pipe_ctx->plane_res.hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
- if (interdependent_update)
- lock_all_pipes(dc, context, false);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, false);
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ dcn20_program_pipe(dc, pipe, context);
+ pipe = pipe->bottom_pipe;
+ }
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
+ && dc->hwss.program_all_writeback_pipes_in_tree)
+ dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+ }
+ }
+ /* Unlock all locked pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
- dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ if (pipe_locked[i]) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
/*
* If we are enabling a pipe, we need to wait for pending clear as this is a critical
@@ -1239,15 +1515,22 @@ static void dcn20_apply_ctx_for_surface(
* will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
* is unsupported on DCN.
*/
- i = 0;
- if (num_planes > 0 && top_pipe_to_program &&
- (prev_top_pipe_to_program == NULL || prev_top_pipe_to_program->plane_state == NULL)) {
- while (i < TIMEOUT_FOR_PIPE_ENABLE_MS &&
- top_pipe_to_program->plane_res.hubp->funcs->hubp_is_flip_pending(top_pipe_to_program->plane_res.hubp)) {
- i += 1;
- msleep(1);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ msleep(1);
}
}
+
+ /* WA to apply WM setting*/
+ if (dc->hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
}
@@ -1319,8 +1602,12 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
if (pipe_ctx->prev_odm_pipe == NULL)
dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
@@ -1337,7 +1624,8 @@ bool dcn20_update_bandwidth(
static void dcn20_enable_writeback(
struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info)
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
@@ -1354,7 +1642,7 @@ static void dcn20_enable_writeback(
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
- mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
/* Enable MCIF_WB */
mcif_wb->funcs->enable_mcif(mcif_wb);
/* Enable DWB */
@@ -1702,6 +1990,28 @@ static void dcn20_reset_hw_ctx_wrap(
}
}
+void dcn20_get_mpctree_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ const struct tg_color pipe_colors[6] = {
+ {MAX_TG_COLOR_VALUE, 0, 0}, // red
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
+ {0, MAX_TG_COLOR_VALUE, 0}, // blue
+ {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple
+ {0, 0, MAX_TG_COLOR_VALUE}, // green
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange
+ };
+
+ struct pipe_ctx *top_pipe = pipe_ctx;
+
+ while (top_pipe->top_pipe) {
+ top_pipe = top_pipe->top_pipe;
+ }
+
+ *color = pipe_colors[top_pipe->pipe_idx];
+}
+
static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -1719,6 +2029,9 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
dcn10_get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
+ dcn20_get_mpctree_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
}
if (per_pixel_alpha)
@@ -1919,8 +2232,10 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
- if (link->dc->hwss.program_dmdata_engine)
- link->dc->hwss.program_dmdata_engine(pipe_ctx);
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
+ if (link->dc->hwss.program_dmdata_engine)
+ link->dc->hwss.program_dmdata_engine(pipe_ctx);
+ }
link->dc->hwss.update_info_frame(pipe_ctx);
@@ -2095,7 +2410,8 @@ void dcn20_hw_sequencer_construct(struct dc *dc)
dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
- dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface;
+ dc->hwss.apply_ctx_for_surface = NULL;
+ dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx;
dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 92ab3dd91814..3098f1049ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -96,4 +96,20 @@ void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg);
void dcn20_display_init(struct dc *dc);
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_plane(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+void dcn20_get_mpctree_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 3736b5548a25..0c98a0bbbd14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -91,6 +91,13 @@ struct mpll_cfg {
uint32_t ref_range;
uint32_t ref_clk;
bool hdmimode_enable;
+ bool sup_pre_hp;
+ bool dp_tx0_vergdrv_byp;
+ bool dp_tx1_vergdrv_byp;
+ bool dp_tx2_vergdrv_byp;
+ bool dp_tx3_vergdrv_byp;
+
+
};
struct dpcssys_phy_seq_cfg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 2137e2be2140..3b613fb93ef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -287,6 +287,10 @@ void optc2_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 2;
else
*num_of_src_opp = 1;
+
+ /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */
+ if (*src_opp_id_1 == 0xf)
+ *num_of_src_opp = 1;
}
void optc2_set_dwb_source(struct timing_generator *optc,
@@ -456,7 +460,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
- .is_matching_timing = optc1_is_matching_timing
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn20_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 6b2f2f1a1c9c..bbd1c98564be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -581,11 +581,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
};
static const struct dcn2_dpp_shift tf_shift = {
- TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN10
};
static const struct dcn2_dpp_mask tf_mask = {
- TF_REG_LIST_SH_MASK_DCN20(_MASK)
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN10
};
#define dwbc_regs_dcn2(id)\
@@ -732,6 +734,42 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define dsc_regsDCN20(id)\
[id] = {\
@@ -825,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
- .force_single_disp_pipe_split = true,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
@@ -922,7 +960,10 @@ struct dce_aux *dcn20_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1042,14 +1083,18 @@ struct link_encoder *dcn20_link_encoder_create(
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc20)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dcn20_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
@@ -1159,6 +1204,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn20_hwseq_create,
};
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
void dcn20_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
@@ -1601,7 +1648,7 @@ static void swizzle_to_dml_params(
}
}
-static bool dcn20_split_stream_for_odm(
+bool dcn20_split_stream_for_odm(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
@@ -1622,7 +1669,6 @@ static bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.dsc = NULL;
#endif
if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
- ASSERT(!next_odm_pipe->next_odm_pipe);
next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
}
@@ -1679,7 +1725,7 @@ static bool dcn20_split_stream_for_odm(
return true;
}
-static void dcn20_split_stream_for_mpc(
+void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
@@ -1765,7 +1811,7 @@ int dcn20_populate_dml_pipes_from_context(
pipe_cnt = i;
continue;
}
- if (!resource_are_streams_timing_synchronizable(
+ if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable(
res_ctx->pipe_ctx[pipe_cnt].stream,
res_ctx->pipe_ctx[i].stream)) {
synchronized_vblank = false;
@@ -1897,7 +1943,7 @@ int dcn20_populate_dml_pipes_from_context(
break;
case PIXEL_ENCODING_YCBCR420:
pipes[pipe_cnt].dout.output_format = dm_420;
- pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
+ pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
break;
case PIXEL_ENCODING_YCBCR422:
if (true) /* todo */
@@ -1911,6 +1957,11 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
}
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
+ pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
+#endif
+
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
/*
@@ -2132,7 +2183,7 @@ void dcn20_set_mcif_arb_params(
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
{
int i;
@@ -2167,7 +2218,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
}
#endif
-static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
@@ -2207,7 +2258,8 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
*/
if (secondary_pipe == NULL) {
for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
- if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
preferred_pipe_idx = j;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
@@ -2243,29 +2295,11 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
return secondary_pipe;
}
-bool dcn20_fast_validate_bw(
+void dcn20_merge_pipes_for_validate(
struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *pipe_cnt_out,
- int *pipe_split_from,
- int *vlevel_out)
+ struct dc_state *context)
{
- bool out = false;
-
- int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
- bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
- bool force_split = false;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- bool failed_non_odm_dsc = false;
-#endif
- int split_threshold = dc->res_pool->pipe_count / 2;
- bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
-
-
- ASSERT(pipes);
- if (!pipes)
- return false;
+ int i;
/* merge previously split odm pipes since mode support needs to make the decision */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2320,51 +2354,19 @@ bool dcn20_fast_validate_bw(
if (pipe->plane_state)
resource_build_scaling_params(pipe);
}
+}
- if (dc->res_pool->funcs->populate_dml_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
- else
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
-
- *pipe_cnt_out = pipe_cnt;
-
- if (!pipe_cnt) {
- out = true;
- goto validate_out;
- }
-
- context->bw_ctx.dml.ip.odm_capable = 0;
-
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- context->bw_ctx.dml.ip.odm_capable = odm_capable;
-
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- /* 1 dsc per stream dsc validation */
- if (vlevel <= context->bw_ctx.dml.soc.num_states)
- if (!dcn20_validate_dsc(dc, context)) {
- failed_non_odm_dsc = true;
- vlevel = context->bw_ctx.dml.soc.num_states + 1;
- }
-#endif
-
- if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- if (vlevel > context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold)
- || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold))
- context->commit_hints.full_update_needed = true;
-
- /*initialize pipe_just_split_from to invalid idx*/
- for (i = 0; i < MAX_PIPES; i++)
- pipe_split_from[i] = -1;
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ bool *split)
+{
+ int i, pipe_idx, vlevel_split;
+ bool force_split = false;
+ bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
- /* Single display only conditionals get set here */
+ /* Single display loop, exits if there is more than one display */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
bool exit_loop = false;
@@ -2391,38 +2393,107 @@ bool dcn20_fast_validate_bw(
if (exit_loop)
break;
}
-
- if (context->stream_count > split_threshold)
+ /* TODO: fix dc bugs and remove this split threshold thing */
+ if (context->stream_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
- vlevel_unsplit = vlevel;
+ /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
+ if (avoid_split) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
+ if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
+ break;
+ /* Impossible to not split this pipe */
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ vlevel = vlevel_split;
+ pipe_idx++;
+ }
+ context->bw_ctx.dml.vba.maxMpcComb = 0;
+ }
+
+ /* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++)
- if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1)
- break;
+
+ if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
+ split[i] = true;
+ if ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))
+ split[i] = true;
+ if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = true;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
+ }
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ /* Adjust dppclk when split is forced, do not bother with dispclk */
+ if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
pipe_idx++;
}
+ return vlevel;
+}
+
+bool dcn20_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *pipe_split_from,
+ int *vlevel_out)
+{
+ bool out = false;
+ bool split[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx, vlevel;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ dcn20_merge_pipes_for_validate(dc, context);
+
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes);
+
+ *pipe_cnt_out = pipe_cnt;
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
+
+ /*initialize pipe_just_split_from to invalid idx*/
+ for (i = 0; i < MAX_PIPES; i++)
+ pipe_split_from[i] = -1;
+
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
- bool need_split = true;
- bool need_split3d;
if (!pipe->stream || pipe_split_from[i] >= 0)
continue;
pipe_idx++;
- if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- force_split = true;
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
- }
- if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
@@ -2440,40 +2511,26 @@ bool dcn20_fast_validate_bw(
if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
continue;
- need_split3d = ((pipe->stream->view_format ==
- VIEW_3D_FORMAT_SIDE_BY_SIDE ||
- pipe->stream->view_format ==
- VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
- (pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
- pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_SIDE_BY_SIDE));
-
- if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) {
- need_split = false;
- vlevel = vlevel_unsplit;
- context->bw_ctx.dml.vba.maxMpcComb = 0;
- } else
- need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2;
-
/* We do not support mpo + odm at the moment */
if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
- if (need_split3d || need_split || force_split) {
+ if (split[i]) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
- ASSERT(hsplit_pipe || force_split);
- if (!hsplit_pipe)
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe) {
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2;
continue;
-
+ }
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
&context->res_ctx, dc->res_pool,
pipe, hsplit_pipe))
goto validate_fail;
+ dcn20_build_mapped_resource(dc, context, pipe->stream);
} else
dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
@@ -2487,7 +2544,7 @@ bool dcn20_fast_validate_bw(
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Actual dsc count per stream dsc validation*/
- if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
+ if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
@@ -2506,7 +2563,7 @@ validate_out:
return out;
}
-void dcn20_calculate_wm(
+static void dcn20_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
@@ -2527,7 +2584,7 @@ void dcn20_calculate_wm(
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipe_idx++;
@@ -2536,7 +2593,7 @@ void dcn20_calculate_wm(
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
}
@@ -2579,6 +2636,11 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
if (vlevel < 2) {
pipes[0].clks_cfg.voltage = 2;
@@ -2590,6 +2652,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
if (vlevel < 3) {
pipes[0].clks_cfg.voltage = 3;
@@ -2601,6 +2667,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
@@ -2610,6 +2680,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
}
void dcn20_calculate_dlg_params(
@@ -2629,7 +2703,7 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
- context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
!= dm_dram_clock_change_unsupported;
@@ -2645,8 +2719,8 @@ void dcn20_calculate_dlg_params(
continue;
if (!visited[pipe_idx]) {
- display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src;
- display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest;
+ display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
+ display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
@@ -2806,7 +2880,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
ASSERT(false);
restore_dml_state:
- memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
return voltage_supported;
@@ -2892,6 +2965,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
+ .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
@@ -2900,8 +2974,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
- ASSERT(pipe_count > 0);
-
for (i = 0; i < pipe_count; i++) {
struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
GFP_KERNEL);
@@ -2947,7 +3019,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
return true;
}
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
@@ -2962,7 +3034,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
return pp_smu;
}
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -2970,7 +3042,7 @@ void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
}
}
-static void cap_soc_clocks(
+void dcn20_cap_soc_clocks(
struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks)
{
@@ -3037,10 +3109,10 @@ static void cap_soc_clocks(
}
}
-static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
+void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
{
- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
+ struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
int i;
int num_calculated_states = 0;
int min_dcfclk = 0;
@@ -3048,12 +3120,18 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
if (num_states == 0)
return;
+ memset(calculated_states, 0, sizeof(calculated_states));
+
if (dc->bb_overrides.min_dcfclk_mhz > 0)
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
- else
- // Accounting for SOC/DCF relationship, we can go as high as
- // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506.
- min_dcfclk = 507;
+ else {
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ min_dcfclk = 310;
+ else
+ // Accounting for SOC/DCF relationship, we can go as high as
+ // 506Mhz in Vmin.
+ min_dcfclk = 506;
+ }
for (i = 0; i < num_states; i++) {
int min_fclk_required_by_uclk;
@@ -3093,7 +3171,7 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
bb->clock_limits[num_calculated_states].state = bb->num_states;
}
-static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
kernel_fpu_begin();
if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
@@ -3292,14 +3370,14 @@ static bool init_soc_bounding_box(struct dc *dc,
}
if (clock_limits_available && uclk_states_available && num_states)
- update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
+ dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
else if (clock_limits_available)
- cap_soc_clocks(loaded_bb, max_clocks);
+ dcn20_cap_soc_clocks(loaded_bb, max_clocks);
}
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
- patch_bounding_box(dc, loaded_bb);
+ dcn20_patch_bounding_box(dc, loaded_bb);
return true;
}
@@ -3345,6 +3423,7 @@ static bool construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.hw_3d_lut = true;
+ dc->caps.extended_aux_timeout_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index 44f95aa0d61e..fef473d68a4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -95,9 +95,12 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst);
void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx);
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
-
+void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb);
+void dcn20_cap_soc_clocks(
+ struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table max_clocks);
+void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states);
struct hubp *dcn20_hubp_create(
struct dc_context *ctx,
uint32_t inst);
@@ -116,6 +119,31 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+void dcn20_merge_pipes_for_validate(
+ struct dc *dc,
+ struct dc_state *context);
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ bool *split);
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
+#endif
+void dcn20_split_stream_for_mpc(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe);
+bool dcn20_split_stream_for_odm(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *prev_odm_pipe,
+ struct pipe_ctx *next_odm_pipe);
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
bool dcn20_fast_validate_bw(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 5ab9d6240498..4b3401616434 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -578,6 +578,10 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format =
+ enc1_stream_encoder_dp_get_pixel_format,
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.enc_read_state = enc2_read_state,
.dp_set_dsc_config = enc2_dp_set_dsc_config,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index ff50ae71fe27..14113ccf498d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for DCN21.
-DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
+DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index d1266741763b..f546260c15b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -22,6 +22,7 @@
* Authors: AMD
*
*/
+#include <linux/delay.h>
#include "dm_services.h"
#include "dcn20/dcn20_hubbub.h"
#include "dcn21_hubbub.h"
@@ -51,7 +52,7 @@
#ifdef NUM_VMID
#undef NUM_VMID
#endif
-#define NUM_VMID 1
+#define NUM_VMID 16
static uint32_t convert_and_clamp(
uint32_t wm_ns,
@@ -71,56 +72,76 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t riommu_active;
+ int i;
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
//Poll until RIOMMU_ACTIVE = 1
- //TODO: Figure out interval us and retry count
- REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100);
+ for (i = 0; i < 100; i++) {
+ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
- //Reflect the power status of DCHUBBUB
- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+ if (riommu_active)
+ break;
+ else
+ udelay(5);
+ }
+
+ if (riommu_active) {
+ //Reflect the power status of DCHUBBUB
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
- //Start rIOMMU prefetching
- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+ //Start rIOMMU prefetching
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
- // Enable dynamic clock gating
- REG_UPDATE_4(DCHVM_CLK_CTRL,
- HVM_DISPCLK_R_GATE_DIS, 0,
- HVM_DISPCLK_G_GATE_DIS, 0,
- HVM_DCFCLK_R_GATE_DIS, 0,
- HVM_DCFCLK_G_GATE_DIS, 0);
+ // Enable dynamic clock gating
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 0,
+ HVM_DISPCLK_G_GATE_DIS, 0,
+ HVM_DCFCLK_R_GATE_DIS, 0,
+ HVM_DCFCLK_G_GATE_DIS, 0);
- //Poll until HOSTVM_PREFETCH_DONE = 1
- //TODO: Figure out interval us and retry count
- REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+ //Poll until HOSTVM_PREFETCH_DONE = 1
+ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+ }
}
-static int hubbub21_init_dchub(struct hubbub *hubbub,
+int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config phys_config;
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
- FB_BASE, pa_config->system_aperture.fb_base);
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
- FB_TOP, pa_config->system_aperture.fb_top);
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
REG_SET(DCN_VM_FB_OFFSET, 0,
- FB_OFFSET, pa_config->system_aperture.fb_offset);
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
REG_SET(DCN_VM_AGP_BOT, 0,
- AGP_BOT, pa_config->system_aperture.agp_bot);
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
REG_SET(DCN_VM_AGP_TOP, 0,
- AGP_TOP, pa_config->system_aperture.agp_top);
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
REG_SET(DCN_VM_AGP_BASE, 0,
- AGP_BASE, pa_config->system_aperture.agp_base);
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+ phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+ phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+ phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
+ // Init VMID 0 based on PA config
+ dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
+ }
dcn21_dchvm_init(hubbub);
return NUM_VMID;
}
-static void hubbub21_program_urgent_watermarks(
+void hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -160,6 +181,13 @@ static void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
+ hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
+ }
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -192,6 +220,14 @@ static void hubbub21_program_urgent_watermarks(
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
+ hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
+ }
+
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
@@ -223,6 +259,14 @@ static void hubbub21_program_urgent_watermarks(
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
+ hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
+ }
+
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
@@ -253,9 +297,17 @@ static void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
}
+
+ if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
+ hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
+ }
}
-static void hubbub21_program_stutter_watermarks(
+void hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -389,7 +441,7 @@ static void hubbub21_program_stutter_watermarks(
}
}
-static void hubbub21_program_pstate_watermarks(
+void hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -564,17 +616,26 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
}
+void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+}
static const struct hubbub_funcs hubbub21_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub21_init_dchub,
- .init_vm_ctx = NULL,
+ .init_vm_ctx = hubbub2_init_vm_ctx,
.dcc_support_swizzle = hubbub2_dcc_support_swizzle,
.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub21_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub21_program_watermarks,
+ .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
};
void hubbub21_construct(struct dcn20_hubbub *hubbub,
@@ -592,4 +653,5 @@ void hubbub21_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index 6ff3cdb89178..c4840dfb1fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -36,6 +36,10 @@
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\
SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DCHVM_CTRL0), \
SR(DCHVM_MEM_CTRL), \
@@ -44,16 +48,9 @@
SR(DCHVM_RIOMMU_STAT0)
#define HUBBUB_REG_LIST_DCN21()\
- HUBBUB_REG_LIST_DCN_COMMON(), \
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
HUBBUB_SR_WATERMARK_REG_LIST(), \
- HUBBUB_HVM_REG_LIST(), \
- SR(DCHUBBUB_CRC_CTRL), \
- SR(DCN_VM_FB_LOCATION_BASE),\
- SR(DCN_VM_FB_LOCATION_TOP),\
- SR(DCN_VM_FB_OFFSET),\
- SR(DCN_VM_AGP_BOT),\
- SR(DCN_VM_AGP_TOP),\
- SR(DCN_VM_AGP_BASE)
+ HUBBUB_HVM_REG_LIST()
#define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \
@@ -102,7 +99,7 @@
HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh)
#define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\
- HUBBUB_MASK_SH_LIST_HVM(mask_sh),\
+ HUBBUB_MASK_SH_LIST_HVM(mask_sh), \
HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
@@ -114,11 +111,28 @@
HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh)
void dcn21_dchvm_init(struct hubbub *hubbub);
+int hubbub21_init_dchub(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
void hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
+void hubbub21_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
void hubbub21_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index a00af513aa2b..2f5a5867e674 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -22,6 +22,8 @@
* Authors: AMD
*
*/
+
+#include "dcn10/dcn10_hubp.h"
#include "dcn21_hubp.h"
#include "dm_services.h"
@@ -202,7 +204,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
- .hubp_program_surface_config = hubp2_program_surface_config,
+ .hubp_program_surface_config = hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp21_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
new file mode 100644
index 000000000000..b25215cadf85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dce/dce_hwseq.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "vmid.h"
+#include "reg_helper.h"
+#include "hw/clk_mgr.h"
+
+
+#define DC_LOGGER_INIT(logger)
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+/* Temporary read settings, future will get values from kmd directly */
+static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config,
+ struct dce_hwseq *hws)
+{
+ uint32_t page_table_base_hi;
+ uint32_t page_table_base_lo;
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo);
+
+ config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo;
+
+}
+
+static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+{
+ struct dcn_hubbub_phys_addr_config config;
+
+ config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
+ config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
+ config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
+ config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
+ config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
+ config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
+ config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
+ config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
+ config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+
+ mmhub_update_page_table_config(&config, hws);
+
+ return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
+}
+
+// work around for Renoir s0i3, if register is programmed, bypass golden init.
+
+static bool dcn21_s0i3_golden_init_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t value = 0;
+
+ value = REG_READ(MICROSECOND_TIME_BASE_DIV);
+
+ return value != 0x00120464;
+}
+
+void dcn21_exit_optimized_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ false);
+}
+
+void dcn21_optimize_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ true);
+}
+
+void dcn21_hw_sequencer_construct(struct dc *dc)
+{
+ dcn20_hw_sequencer_construct(dc);
+ dc->hwss.init_sys_ctx = dcn21_init_sys_ctx;
+ dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa;
+ dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state;
+ dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
new file mode 100644
index 000000000000..be67b62e6fb1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -0,0 +1,33 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN21_H__
+#define __DC_HWSS_DCN21_H__
+
+struct dc;
+
+void dcn21_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
new file mode 100644
index 000000000000..e8a504ca5890
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include <linux/delay.h>
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn21_link_encoder.h"
+#include "stream_encoder.h"
+
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+static struct mpll_cfg dcn21_mpll_cfg_ref[] = {
+ // RBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 238,
+ .mpllb_fracn_en = 0,
+ .mpllb_fracn_quot = 0,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 44237,
+ .mpllb_ssc_stepsize = 59454,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 2,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 2,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ // HBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 1,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR2
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR3
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 304,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 49152,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 55296,
+ .mpllb_ssc_stepsize = 74318,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 1,
+ .mpllb_ana_cp_int = 7,
+ .mpllb_ana_cp_prop = 16,
+ .hdmi_pixel_clk_div = 0,
+ },
+};
+
+
+static bool update_cfg_data(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings,
+ struct dpcssys_phy_seq_cfg *cfg)
+{
+ int i;
+
+ cfg->load_sram_fw = false;
+ cfg->use_calibration_setting = true;
+
+ //TODO: need to implement a proper lane mapping for Renoir.
+ for (i = 0; i < 4; i++)
+ cfg->lane_en[i] = true;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[0];
+ break;
+ case LINK_RATE_HIGH:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[1];
+ break;
+ case LINK_RATE_HIGH2:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[2];
+ break;
+ case LINK_RATE_HIGH3:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[3];
+ break;
+ default:
+ DC_LOG_ERROR("%s: No supported link rate found %X!\n",
+ __func__, link_settings->link_rate);
+ return false;
+ }
+
+ return true;
+}
+
+void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
+
+ if (!value && link_settings->lane_count > LANE_COUNT_TWO)
+ link_settings->lane_count = LANE_COUNT_TWO;
+}
+
+bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
+
+ // if value == 1 alt mode is disabled, otherwise it is enabled
+ return !value;
+}
+
+bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ int value;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+
+ if (value == 1) {
+ ASSERT(0);
+ return false;
+ }
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 0);
+
+ udelay(40);
+
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+ if (value == 1) {
+ ASSERT(0);
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ return false;
+ }
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1);
+
+ return true;
+}
+
+
+
+static void dcn21_link_encoder_release_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0);
+
+}
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10;
+ struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg;
+
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ return;
+ }
+
+ if (!update_cfg_data(enc10, link_settings, cfg))
+ return;
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT);
+
+}
+
+void dcn21_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+}
+
+void dcn21_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ dcn10_link_encoder_disable_output(enc, signal);
+
+ if (dc_is_dp_signal(signal))
+ dcn21_link_encoder_release_phy(enc);
+}
+
+
+static const struct link_encoder_funcs dcn21_link_enc_funcs = {
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ .read_state = link_enc2_read_state,
+#endif
+ .validate_output_with_stream =
+ dcn10_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn21_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn21_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .fec_is_active = enc2_fec_is_active,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+};
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc21->enc10;
+
+ enc10->base.funcs = &dcn21_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc10->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc10->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc10->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc10->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc10->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc10->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
new file mode 100644
index 000000000000..1d7a1a51f13d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN21_H__
+#define __DC_LINK_ENCODER__DCN21_H__
+
+#include "dcn20/dcn20_link_encoder.h"
+
+struct dcn21_link_encoder {
+ struct dcn10_link_encoder enc10;
+ struct dpcssys_phy_seq_cfg phy_seq_cfg;
+};
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index de182185fe1f..459bd9a5caed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "dc.h"
@@ -42,11 +40,11 @@
#include "irq/dcn21/irq_service_dcn21.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn20/dcn20_optc.h"
-#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn20/dcn20_opp.h"
#include "dcn20/dcn20_dsc.h"
-#include "dcn20/dcn20_link_encoder.h"
+#include "dcn21/dcn21_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
@@ -84,8 +82,9 @@
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
- .gpuvm_enable = 0,
- .hostvm_enable = 0,
+ .odm_capable = 1,
+ .gpuvm_enable = 1,
+ .hostvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 2,
@@ -205,11 +204,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.state = 4,
.dcfclk_mhz = 810.0,
.fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
- .dscclk_mhz = 318.334,
+ .dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
/*Extra state, no dispclk ramping*/
@@ -217,18 +216,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.state = 5,
.dcfclk_mhz = 810.0,
.fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
- .dscclk_mhz = 318.334,
+ .dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
},
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 17.0,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -350,6 +349,30 @@ static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_6)
};
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCN10_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCN10(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCN20_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN20(_MASK)
+};
+
#ifdef CONFIG_DRM_AMD_DC_DMUB
static const struct dcn21_dmcub_registers dmcub_regs = {
DMCUB_REG_LIST_DCN()
@@ -628,6 +651,14 @@ static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(4),
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
};
@@ -636,6 +667,11 @@ static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
};
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
+static int dcn21_populate_dml_pipes_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
static struct input_pixel_processor *dcn21_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -683,7 +719,10 @@ static struct dce_aux *dcn21_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -726,11 +765,12 @@ static const struct resource_caps res_cap_rn = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
- .num_audio = 6, // 6 audio endpoints. 4 audio streams
+ .num_audio = 4, // 4 audio endpoints. 4 audio streams
.num_stream_encoder = 5,
.num_pll = 5, // maybe 3 because the last two used for USB-c
.num_dwb = 1,
.num_ddc = 5,
+ .num_vmid = 1,
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 3,
#endif
@@ -796,15 +836,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
- .force_single_disp_pipe_split = true,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 5120,/*upto 5K*/
+ .max_downscale_src_width = 3840,
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = true,
- .disable_48mhz_pwrdwn = true,
+ .disable_48mhz_pwrdwn = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -939,7 +979,7 @@ static void destruct(struct dcn21_resource_pool *pool)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.pp_smu != NULL)
- dcn20_pp_smu_destroy(&pool->base.pp_smu);
+ dcn21_pp_smu_destroy(&pool->base.pp_smu);
}
@@ -969,11 +1009,35 @@ static void calculate_wm_set_for_vlevel(
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
#endif
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
}
+static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+{
+ kernel_fpu_begin();
+ if (dc->bb_overrides.sr_exit_time_ns) {
+ bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
+ bb->sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.urgent_latency_ns) {
+ bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns) {
+ bb->dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+ }
+ kernel_fpu_end();
+}
+
void dcn21_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -988,6 +1052,8 @@ void dcn21_calculate_wm(
ASSERT(bw_params);
+ patch_bounding_box(dc, &context->bw_ctx.dml.soc);
+
for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -1021,7 +1087,7 @@ void dcn21_calculate_wm(
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
&context->res_ctx, pipes);
else
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
+ pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
&context->res_ctx, pipes);
}
@@ -1271,6 +1337,12 @@ struct display_stream_compressor *dcn21_dsc_create(
static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ /*
+ TODO: Fix this function to calcualte correct values.
+ There are known issues with this function currently
+ that will need to be investigated. Use hardcoded known good values for now.
+
+
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
int i;
@@ -1278,7 +1350,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels;
- dcn2_1_soc.num_states = 0;
for (i = 0; i < clk_table->num_entries; i++) {
@@ -1286,10 +1357,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- /* This is probably wrong, TODO: find correct calculation */
dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000;
- dcn2_1_soc.num_states++;
}
+ dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
+ dcn2_1_soc.num_states = i;
+ */
}
/* Temporary Place holder until we can get them from fuse */
@@ -1317,32 +1389,42 @@ static struct dpm_clocks dummy_clocks = {
};
-enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
+static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
struct dpm_clocks *clock_table)
{
*clock_table = dummy_clocks;
return PP_SMU_RESULT_OK;
}
-struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
- pp_smu->ctx.ver = PP_SMU_VER_RN;
+ if (!pp_smu)
+ return pp_smu;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) {
+ pp_smu->ctx.ver = PP_SMU_VER_RN;
+ pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
+ pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
+ } else {
- pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
- pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
+ dm_pp_get_funcs(ctx, pp_smu);
+
+ if (pp_smu->ctx.ver != PP_SMU_VER_RN)
+ pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
+ }
return pp_smu;
}
-void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -1400,6 +1482,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
+ hws->wa.DEGVIDCN21 = true;
}
return hws;
}
@@ -1418,10 +1501,152 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn21_hwseq_create,
};
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN10_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+ link_regs(2, C),
+ link_regs(3, D),
+ link_regs(4, E),
+};
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4)
+};
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
+static struct link_encoder *dcn21_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn21_link_encoder *enc21 =
+ kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
+ int link_regs_id;
+
+ if (!enc21)
+ return NULL;
+
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
+ dcn21_link_encoder_construct(enc21,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[link_regs_id],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc21->enc10.base;
+}
+#define CTX ctx
+
+#define REG(reg_name) \
+ (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = REG_READ(CC_DC_PIPE_DIS);
+ /* RV1 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
+static int dcn21_populate_dml_pipes_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+{
+ uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes);
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipes[i].pipe.src.hostvm = 1;
+ pipes[i].pipe.src.gpuvm = 1;
+ }
+
+ return pipe_cnt;
+}
+
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
- .link_enc_create = dcn20_link_encoder_create,
+ .link_enc_create = dcn21_link_encoder_create,
.validate_bandwidth = dcn21_validate_bandwidth,
+ .populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
@@ -1437,9 +1662,11 @@ static bool construct(
struct dc *dc,
struct dcn21_resource_pool *pool)
{
- int i;
+ int i, j;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ uint32_t num_pipes;
ctx->dc_bios->regs = &bios_regs;
@@ -1457,7 +1684,9 @@ static bool construct(
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
- pool->base.pipe_count = 4;
+ /* max pipe num for ASIC before check pipe fuses */
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 256;
@@ -1467,6 +1696,7 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.extended_aux_timeout_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1516,6 +1746,26 @@ static bool construct(
goto create_fail;
}
+ pool->base.dmcu = dcn20_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
#ifdef CONFIG_DRM_AMD_DC_DMUB
pool->base.dmcub = dcn21_dmcub_create(ctx,
&dmcub_regs,
@@ -1530,6 +1780,14 @@ static bool construct(
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
+ num_pipes = dcn2_1_ip.max_num_dpp;
+
+ for (i = 0; i < dcn2_1_ip.max_num_dpp; i++)
+ if (pipe_fuses & 1 << i)
+ num_pipes--;
+ dcn2_1_ip.max_num_dpp = num_pipes;
+ dcn2_1_ip.max_num_otg = num_pipes;
+
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
init_data.ctx = dc->ctx;
@@ -1537,8 +1795,15 @@ static bool construct(
if (!pool->base.irqs)
goto create_fail;
+ j = 0;
/* mem input -> ipp -> dpp -> opp -> TG */
for (i = 0; i < pool->base.pipe_count; i++) {
+ /* if pipe is disabled, skip instance of HW pipe,
+ * i.e, skip ASIC register instance
+ */
+ if ((pipe_fuses & (1 << i)) != 0)
+ continue;
+
pool->base.hubps[i] = dcn21_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
BREAK_TO_DEBUGGER();
@@ -1562,6 +1827,23 @@ static bool construct(
"DC: failed to create dpps!\n");
goto create_fail;
}
+
+ pool->base.opps[i] = dcn21_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+
+ pool->base.timing_generators[i] = dcn21_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ j++;
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
@@ -1582,27 +1864,9 @@ static bool construct(
pool->base.sw_i2cs[i] = NULL;
}
- for (i = 0; i < pool->base.res_cap->num_opp; i++) {
- pool->base.opps[i] = dcn21_opp_create(ctx, i);
- if (pool->base.opps[i] == NULL) {
- BREAK_TO_DEBUGGER();
- dm_error(
- "DC: failed to create output pixel processor!\n");
- goto create_fail;
- }
- }
-
- for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
- pool->base.timing_generators[i] = dcn21_timing_generator_create(
- ctx, i);
- if (pool->base.timing_generators[i] == NULL) {
- BREAK_TO_DEBUGGER();
- dm_error("DC: failed to create tg!\n");
- goto create_fail;
- }
- }
-
- pool->base.timing_generator_count = i;
+ pool->base.timing_generator_count = j;
+ pool->base.pipe_count = j;
+ pool->base.mpcc_count = j;
pool->base.mpc = dcn21_mpc_create(ctx);
if (pool->base.mpc == NULL) {
@@ -1645,7 +1909,7 @@ static bool construct(
&res_create_funcs : &res_create_maximus_funcs)))
goto create_fail;
- dcn20_hw_sequencer_construct(dc);
+ dcn21_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
new file mode 100644
index 000000000000..626d22d437f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DM_CP_PSP_IF__H
+#define DM_CP_PSP_IF__H
+
+struct dc_link;
+
+struct cp_psp_stream_config {
+ uint8_t otg_inst;
+ uint8_t link_enc_inst;
+ uint8_t stream_enc_inst;
+ void *dm_stream_ctx;
+ bool dpms_off;
+};
+
+struct cp_psp_funcs {
+ void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config);
+};
+
+struct cp_psp {
+ void *handle;
+ struct cp_psp_funcs funcs;
+};
+
+
+#endif /* DM_CP_PSP_IF__H */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index b6b4333737f2..94b75e942607 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -74,7 +74,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
/*
* Polls for ACT (allocation change trigger) handled and
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index c03a441ee638..ef7df9ef6d7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -249,10 +249,8 @@ struct pp_smu_funcs_nv {
};
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
-
#define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8
-#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 4
+#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8
#define PP_SMU_NUM_FCLK_DPM_LEVELS 4
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
@@ -288,7 +286,6 @@ struct pp_smu_funcs_rn {
enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp,
struct dpm_clocks *clock_table);
};
-#endif
struct pp_smu_funcs {
struct pp_smu ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 0fafd693ffb4..3c70dd577292 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -38,6 +38,7 @@
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN20_MAX_DSC_IMAGE_WIDTH 5184
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
@@ -2610,7 +2611,8 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
@@ -3901,6 +3903,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
@@ -3925,7 +3931,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ if (mode_lib->vba.ODMCapability == false ||
+ (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
+ && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) {
locals->ODMCombineEnablePerState[i][k] = false;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 878bf4782ce6..2c7455e22a65 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- // FIXME: take the max between luma, chroma chunk size?
+ // TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
@@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
- // FIXME check if ppe apply for both luma and chroma in 422 case
+ // TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -959,7 +959,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = 0; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index ed8bf5f723c9..1e6aeb1bd2bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- // FIXME: take the max between luma, chroma chunk size?
+ // TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
@@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
- // FIXME check if ppe apply for both luma and chroma in 422 case
+ // TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = 0; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 3b6ed60dcd35..ba77957aefe3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -65,6 +65,7 @@ typedef struct {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN21_MAX_DSC_IMAGE_WIDTH 5184
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -3379,6 +3380,8 @@ static unsigned int TruncToValidBPP(
return 30;
else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
+ return 18;
else
return BPP_INVALID;
}
@@ -3936,6 +3939,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
@@ -3965,7 +3972,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ if (mode_lib->vba.ODMCapability == false ||
+ (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
+ && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) {
locals->ODMCombineEnablePerState[i][k] = false;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index f4c1ef9046bf..cfacd6027467 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -269,7 +269,7 @@ struct writeback_st {
struct _vcs_dpi_display_output_params_st {
int dp_lanes;
- int output_bpp;
+ double output_bpp;
int dsc_enable;
int wb_enable;
int num_active_wb;
@@ -318,6 +318,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
+ unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 65cf4edddaff..7f9a5621922f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -375,6 +375,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
+ mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
@@ -432,8 +433,12 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
dst->odm_combine;
+ mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] =
+ dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) (dout->output_format);
+ mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
+ dout->output_bpp;
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
(enum output_encoder_class) (dout->output_type);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 91decac50557..1540ffbe3979 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -387,6 +387,7 @@ struct vba_vars_st {
/* vba mode support */
/*inputs*/
+ bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index ad8571f5a142..4c3e9cc30167 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -243,7 +243,7 @@ void dml1_extract_rq_regs(
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- /* FIXME: take the max between luma, chroma chunk size?
+ /* TODO: take the max between luma, chroma chunk size?
* okay for now, as we are setting chunk_bytes to 8kb anyways
*/
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
@@ -602,7 +602,7 @@ static void get_surf_rq_param(
unsigned int log2_dpte_group_length;
unsigned int func_meta_row_height, func_dpte_row_height;
- /* FIXME check if ppe apply for both luma and chroma in 422 case */
+ /* TODO check if ppe apply for both luma and chroma in 422 case */
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -1141,7 +1141,7 @@ void dml1_rq_dlg_get_dlg_params(
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
- prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */
+ prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */
min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
@@ -1182,7 +1182,7 @@ void dml1_rq_dlg_get_dlg_params(
dcc_en = e2e_pipe_param.pipe.src.dcc;
dual_plane = is_dual_plane(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format);
- mode_422 = 0; /* FIXME */
+ mode_422 = 0; /* TODO */
access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
bytes_per_element_l = get_bytes_per_element(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format,
@@ -1837,7 +1837,7 @@ void dml1_rq_dlg_get_dlg_params(
cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1)
* (double) cur0_req_width;
cur0_req_per_width = cur0_width_ub / (double) cur0_req_width;
- hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */
+ hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */
if (vratio_pre_l <= 1.0) {
refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 5995bcdfed54..e60f760585e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -23,8 +23,7 @@
*/
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dc.h"
-#include "core_types.h"
+#include "dc_hw_types.h"
#include "dsc.h"
#include <drm/drm_dp_helper.h>
@@ -47,6 +46,59 @@ const struct dc_dsc_policy dsc_policy = {
/* This module's internal functions */
+static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ if (timing->flags.DSC) {
+ kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
+ kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ return kbps;
+ }
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+
+}
+
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
{
@@ -178,12 +230,11 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp
}
static void get_dsc_enc_caps(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
// This is a static HW query, so we can use any DSC
- struct display_stream_compressor *dsc = dc->res_pool->dscs[0];
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
if (dsc)
@@ -290,7 +341,7 @@ static void get_dsc_bandwidth_range(
struct dc_dsc_bw_range *range)
{
/* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+ range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing);
/* max dsc target bpp */
range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz);
@@ -512,6 +563,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ int min_slice_height_override,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -680,7 +732,10 @@ static bool setup_dsc_config(
// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
- slice_height = min(dsc_policy.min_sice_height, pic_height);
+ if (min_slice_height_override == 0)
+ slice_height = min(dsc_policy.min_sice_height, pic_height);
+ else
+ slice_height = min(min_slice_height_override, pic_height);
while (slice_height < pic_height && (pic_height % slice_height != 0 ||
(timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0)))
@@ -802,7 +857,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp
* If DSC is not possible, leave '*range' untouched.
*/
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
const uint32_t min_bpp,
const uint32_t max_bpp,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
@@ -814,16 +870,14 @@ bool dc_dsc_compute_bandwidth_range(
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
- is_dsc_possible = setup_dsc_config(dsc_sink_caps,
- &dsc_enc_caps,
- 0,
- timing, &config);
+ is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
+ dsc_min_slice_height_override, &config);
if (is_dsc_possible)
get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range);
@@ -832,8 +886,9 @@ bool dc_dsc_compute_bandwidth_range(
}
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg)
@@ -841,11 +896,11 @@ bool dc_dsc_compute_config(
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, dsc_cfg);
+ timing, dsc_min_slice_height_override, dsc_cfg);
return is_dsc_possible;
}
#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index ca51e83f8764..76c4b12d6824 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -177,7 +177,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
{
float bpp_group;
float initial_xmit_delay_factor;
- int source_bpp;
int padding_pixels;
int i;
@@ -217,8 +216,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
rc->initial_xmit_delay++;
}
- source_bpp = MODE_SELECT(bpc * 3, bpc * 2, bpc * 1.5);
-
rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_det_thresh = 2 << (bpc - 8);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
index f8f85490e77e..f67c18375bfd 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -321,8 +321,6 @@ void dal_gpio_destroy(
return;
}
- dal_gpio_close(*gpio);
-
switch ((*gpio)->id) {
case GPIO_ID_DDC_DATA:
kfree((*gpio)->hw_container.ddc);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index d03165e71dc6..92280cc05e2d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -169,7 +169,6 @@ void dal_gpio_destroy_generic_mux(
return;
}
- dal_gpio_close(*mux);
dal_gpio_destroy(mux);
kfree(*mux);
@@ -460,7 +459,6 @@ void dal_gpio_destroy_irq(
return;
}
- dal_gpio_close(*irq);
dal_gpio_destroy(irq);
kfree(*irq);
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
new file mode 100644
index 000000000000..4170b6eb9ec0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
@@ -0,0 +1,28 @@
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'hdcp' sub-component of DAL.
+#
+
+HDCP_MSG = hdcp_msg.o
+
+AMD_DAL_HDCP_MSG = $(addprefix $(AMDDALPATH)/dc/hdcp/,$(HDCP_MSG))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP_MSG)
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
new file mode 100644
index 000000000000..6f730b5bfe42
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "include/hdcp_types.h"
+#include "include/i2caux_interface.h"
+#include "include/signal_types.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+#define HDCP14_KSV_SIZE 5
+#define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
+
+static const bool hdcp_cmd_is_read[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = true,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = true,
+ [HDCP_MESSAGE_ID_READ_PJ] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = false,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = false,
+ [HDCP_MESSAGE_ID_WRITE_AN] = false,
+ [HDCP_MESSAGE_ID_READ_VH_X] = true,
+ [HDCP_MESSAGE_ID_READ_VH_0] = true,
+ [HDCP_MESSAGE_ID_READ_VH_1] = true,
+ [HDCP_MESSAGE_ID_READ_VH_2] = true,
+ [HDCP_MESSAGE_ID_READ_VH_3] = true,
+ [HDCP_MESSAGE_ID_READ_VH_4] = true,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = true,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = true,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true,
+ [HDCP_MESSAGE_ID_READ_BINFO] = true,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = true,
+ [HDCP_MESSAGE_ID_RX_CAPS] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = true,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
+};
+
+struct protection_properties {
+ bool supported;
+ bool (*process_transaction)(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info);
+};
+
+static const struct protection_properties non_supported_protection = {
+ .supported = false
+};
+
+static bool hdmi_14_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ uint8_t *buff = NULL;
+ bool result;
+ const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
+ const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
+ struct i2c_command i2c_command;
+ uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
+ struct i2c_payload i2c_payloads[] = {
+ { true, 0, 1, &offset },
+ /* actual hdcp payload, will be filled later, zeroed for now*/
+ { 0 }
+ };
+
+ switch (message_info->link) {
+ case HDCP_LINK_SECONDARY:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_secondary;
+ break;
+ case HDCP_LINK_PRIMARY:
+ default:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_primary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_primary;
+ break;
+ }
+
+ if (hdcp_cmd_is_read[message_info->msg_id]) {
+ i2c_payloads[1].write = false;
+ i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads);
+ i2c_payloads[1].length = message_info->length;
+ i2c_payloads[1].data = message_info->data;
+ } else {
+ i2c_command.number_of_payloads = 1;
+ buff = kzalloc(message_info->length + 1, GFP_KERNEL);
+
+ if (!buff)
+ return false;
+
+ buff[0] = offset;
+ memmove(&buff[1], message_info->data, message_info->length);
+ i2c_payloads[0].length = message_info->length + 1;
+ i2c_payloads[0].data = buff;
+ }
+
+ i2c_command.payloads = i2c_payloads;
+ i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW
+ i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+ result = dm_helpers_submit_i2c(
+ link->ctx,
+ link,
+ &i2c_command);
+ kfree(buff);
+
+ return result;
+}
+
+static const struct protection_properties hdmi_14_protection = {
+ .supported = true,
+ .process_transaction = hdmi_14_process_transaction
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+ [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494
+};
+
+static bool dpcd_access_helper(
+ struct dc_link *link,
+ uint32_t length,
+ uint8_t *data,
+ uint32_t dpcd_addr,
+ bool is_read)
+{
+ enum dc_status status;
+ uint32_t cur_length = 0;
+ uint32_t offset = 0;
+ uint32_t ksv_read_size = 0x6803b - 0x6802c;
+
+ /* Read KSV, need repeatedly handle */
+ if (dpcd_addr == 0x6802c) {
+ if (length % HDCP14_KSV_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_KSV_SIZE);
+ }
+ if (length > HDCP14_MAX_KSV_FIFO_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_MAX_KSV_FIFO_SIZE);
+ }
+
+ DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n",
+ __func__,
+ length / HDCP14_KSV_SIZE);
+
+ while (length > 0) {
+ if (length > ksv_read_size) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ ksv_read_size);
+
+ data += ksv_read_size;
+ length -= ksv_read_size;
+ } else {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ length);
+
+ data += length;
+ length = 0;
+ }
+
+ if (status != DC_OK)
+ return false;
+ }
+ } else {
+ while (length > 0) {
+ if (length > DEFAULT_AUX_MAX_DATA_SIZE)
+ cur_length = DEFAULT_AUX_MAX_DATA_SIZE;
+ else
+ cur_length = length;
+
+ if (is_read) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ } else {
+ status = core_link_write_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ }
+
+ if (status != DC_OK)
+ return false;
+
+ length -= cur_length;
+ offset += cur_length;
+ }
+ }
+ return true;
+}
+
+static bool dp_11_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ return dpcd_access_helper(
+ link,
+ message_info->length,
+ message_info->data,
+ hdcp_dpcd_addrs[message_info->msg_id],
+ hdcp_cmd_is_read[message_info->msg_id]);
+}
+
+static const struct protection_properties dp_11_protection = {
+ .supported = true,
+ .process_transaction = dp_11_process_transaction
+};
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f189307750ab..a831079607cd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -52,7 +52,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
#include "clock_source.h"
#include "audio.h"
#include "dm_pp_smu.h"
-
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
/************ link *****************/
struct link_init_data {
@@ -231,6 +233,7 @@ struct resource_pool {
struct dcn_fe_bandwidth {
int dppclk_khz;
+
};
struct stream_resource {
@@ -395,10 +398,6 @@ struct dc_state {
struct clk_mgr *clk_mgr;
- struct {
- bool full_update_needed : 1;
- } commit_hints;
-
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index b1fab251c09b..14716ba35662 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -95,6 +95,9 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload);
+
int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_channel_operation_result *operation_result);
@@ -102,6 +105,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
+enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout);
+
void dal_ddc_service_write_scdc_data(
struct ddc_service *ddc_service,
uint32_t pix_clk,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 08a4df2c61a8..045138dbdccb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -28,6 +28,8 @@
#define LINK_TRAINING_ATTEMPTS 4
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/
struct dc_link;
struct dc_stream_state;
@@ -43,6 +45,9 @@ bool dp_verify_link_cap_with_retries(
struct dc_link_settings *known_limit_link_setting,
int attempts);
+bool dp_verify_mst_link_cap(
+ struct dc_link *link);
+
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index e79cd4e92919..e77b3a76766d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -140,6 +140,9 @@ struct write_command_context {
struct aux_engine_funcs {
+ bool (*configure_timeout)(
+ struct ddc_service *ddc,
+ uint32_t timeout);
void (*destroy)(
struct aux_engine **ptr);
bool (*acquire_engine)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 76f9ad1b23df..4e18e77dcf42 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -47,7 +47,7 @@
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
/* Will these bw structures be ASIC specific? */
-#define MAX_NUM_DPM_LVL 4
+#define MAX_NUM_DPM_LVL 8
#define WM_SET_COUNT 4
@@ -180,13 +180,19 @@ struct clk_mgr_funcs {
struct dc_state *context,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+
+ bool (*are_clock_states_equal) (struct dc_clocks *a,
+ struct dc_clocks *b);
+ void (*notify_wm_ranges)(struct clk_mgr *clk_mgr);
};
struct clk_mgr {
struct dc_context *ctx;
struct clk_mgr_funcs *funcs;
struct dc_clocks clks;
+ bool psr_allow_active_cache;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
+ int dentist_vco_freq_khz;
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
struct clk_bw_params *bw_params;
#endif
@@ -199,4 +205,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr);
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 7dd46eb96d67..a17a77192690 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -184,6 +184,21 @@ struct clk_mgr_registers {
uint32_t MP1_SMN_C2PMSG_91;
};
+enum clock_type {
+ clock_type_dispclk = 1,
+ clock_type_dcfclk,
+ clock_type_socclk,
+ clock_type_pixelclk,
+ clock_type_phyclk,
+ clock_type_dppclk,
+ clock_type_fclk,
+ clock_type_dcfdsclk,
+ clock_type_dscclk,
+ clock_type_uclk,
+ clock_type_dramclk,
+};
+
+
struct state_dependent_clocks {
int display_clk_khz;
int pixel_clk_khz;
@@ -210,8 +225,6 @@ struct clk_mgr_internal {
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
/*TODO: figure out which of the below fields should be here vs in asic specific portion */
- int dentist_vco_freq_khz;
-
/* Cache the status of DFS-bypass feature*/
bool dfs_bypass_enabled;
/* True if the DFS-bypass feature is enabled and active. */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index d8e744f366e5..05ee5295d2c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -38,8 +38,7 @@ struct dccg {
struct dccg_funcs {
void (*update_dpp_dto)(struct dccg *dccg,
int dpp_inst,
- int req_dppclk,
- bool reduce_divider_only);
+ int req_dppclk);
void (*get_dccg_ref_freq)(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index a6297219d7fc..c81a17aeaa25 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -147,6 +147,7 @@ struct hubbub_funcs {
bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub);
void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
+ void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index 1ddb1c6fa149..c6ff3d78b435 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -28,7 +28,11 @@
#include "dc_dsc.h"
#include "dc_hw_types.h"
-#include "dc_dp_types.h"
+#include "dc_types.h"
+/* do not include any other headers
+ * or else it might break Edid Utility functionality.
+ */
+
/* Input parameters for configuring DSC from the outside of DSC */
struct dsc_config {
@@ -81,12 +85,6 @@ struct dsc_enc_caps {
uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */
};
-struct display_stream_compressor {
- const struct dsc_funcs *funcs;
- struct dc_context *ctx;
- int inst;
-};
-
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index abb4e4237fb6..b21909216fb6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -184,6 +184,10 @@ struct link_encoder_funcs {
bool (*fec_is_active)(struct link_encoder *enc);
#endif
bool (*is_in_alt_mode) (struct link_encoder *enc);
+
+ void (*get_max_link_cap)(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
enum signal_type (*get_dig_mode)(
struct link_encoder *enc);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index e8668388581b..67b610d6d91f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -43,6 +43,7 @@ struct dcn_watermarks {
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
uint32_t frac_urg_bw_nom;
uint32_t frac_urg_bw_flip;
+ int32_t urgent_latency_ns;
#endif
struct cstate_pstate_watermarks_st cstate_pstate;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 957e9047381a..18def2b6fafe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -208,6 +208,7 @@ struct output_pixel_processor {
struct mpc_tree mpc_tree_params;
bool mpcc_disconnect_pending[MAX_PIPES];
const struct opp_funcs *funcs;
+ uint32_t dyn_expansion;
};
enum fmt_stereo_action {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index fe9b7a10a1c3..6305e388612a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -214,6 +214,11 @@ struct stream_encoder_funcs {
unsigned int (*dig_source_otg)(
struct stream_encoder *enc);
+ bool (*dp_get_pixel_format)(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 6196cc32356e..27c73caf74ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -261,6 +261,8 @@ struct timing_generator_funcs {
void (*program_manual_trigger)(struct timing_generator *optc);
void (*setup_manual_trigger)(struct timing_generator *optc);
+ bool (*get_hw_timing)(struct timing_generator *optc,
+ struct dc_crtc_timing *hw_crtc_timing);
void (*set_vtg_params)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 3a938cd414ea..d39c1e11def5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -114,6 +114,9 @@ struct hw_sequencer_funcs {
int opp_id);
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ void (*program_front_end_for_ctx)(
+ struct dc *dc,
+ struct dc_state *context);
void (*program_triplebuffer)(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -229,6 +232,13 @@ struct hw_sequencer_funcs {
struct dc *dc,
struct dc_state *context);
+ void (*exit_optimized_pwr_state)(
+ const struct dc *dc,
+ struct dc_state *context);
+ void (*optimize_pwr_state)(
+ const struct dc *dc,
+ struct dc_state *context);
+
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool (*update_bandwidth)(
struct dc *dc,
@@ -321,10 +331,12 @@ struct hw_sequencer_funcs {
struct dc_state *context);
void (*update_writeback)(struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void (*enable_writeback)(struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void (*disable_writeback)(struct dc *dc,
unsigned int dwb_pipe_inst);
#endif
@@ -337,6 +349,9 @@ struct hw_sequencer_funcs {
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ bool (*s0i3_golden_init_wa)(struct dc *dc);
+#endif
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 18961707db23..9ad49da50a17 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -31,6 +31,8 @@
#define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9
#define DP_BRANCH_DEVICE_ID_00001A 0x00001A
#define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1
+#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24
+#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h
new file mode 100644
index 000000000000..f31e6befc8d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h
@@ -0,0 +1,96 @@
+/*
+* Copyright 2019 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*
+* Authors: AMD
+*
+*/
+
+#ifndef __DC_HDCP_TYPES_H__
+#define __DC_HDCP_TYPES_H__
+
+enum hdcp_message_id {
+ HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ HDCP_MESSAGE_ID_READ_BKSV = 0,
+ /* HDMI is called Ri', DP is called R0' */
+ HDCP_MESSAGE_ID_READ_RI_R0,
+ HDCP_MESSAGE_ID_READ_PJ,
+ HDCP_MESSAGE_ID_WRITE_AKSV,
+ HDCP_MESSAGE_ID_WRITE_AINFO,
+ HDCP_MESSAGE_ID_WRITE_AN,
+ HDCP_MESSAGE_ID_READ_VH_X,
+ HDCP_MESSAGE_ID_READ_VH_0,
+ HDCP_MESSAGE_ID_READ_VH_1,
+ HDCP_MESSAGE_ID_READ_VH_2,
+ HDCP_MESSAGE_ID_READ_VH_3,
+ HDCP_MESSAGE_ID_READ_VH_4,
+ HDCP_MESSAGE_ID_READ_BCAPS,
+ HDCP_MESSAGE_ID_READ_BSTATUS,
+ HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ HDCP_MESSAGE_ID_READ_BINFO,
+
+ /* HDCP 2.2 */
+
+ HDCP_MESSAGE_ID_HDCP2VERSION,
+ HDCP_MESSAGE_ID_RX_CAPS,
+ HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ HDCP_MESSAGE_ID_READ_RXSTATUS,
+ HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+
+ HDCP_MESSAGE_ID_MAX
+};
+
+enum hdcp_version {
+ HDCP_Unknown = 0,
+ HDCP_VERSION_14,
+ HDCP_VERSION_22,
+};
+
+enum hdcp_link {
+ HDCP_LINK_PRIMARY,
+ HDCP_LINK_SECONDARY
+};
+
+struct hdcp_protection_message {
+ enum hdcp_version version;
+ /* relevant only for DVI */
+ enum hdcp_link link;
+ enum hdcp_message_id msg_id;
+ uint32_t length;
+ uint8_t max_retries;
+ uint8_t *data;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 2d8f14b69117..1de4805cb8c7 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -373,7 +373,42 @@ static struct fixed31_32 translate_from_linear_space(
return dc_fixpt_mul(args->arg, args->a1);
}
-static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+
+static struct fixed31_32 translate_from_linear_space_long(
+ struct translate_from_linear_space_args *args)
+{
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
+
+ if (dc_fixpt_lt(one, args->arg))
+ return one;
+
+ if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0)))
+ return dc_fixpt_sub(
+ args->a2,
+ dc_fixpt_mul(
+ dc_fixpt_add(
+ one,
+ args->a3),
+ dc_fixpt_pow(
+ dc_fixpt_neg(args->arg),
+ dc_fixpt_recip(args->gamma))));
+ else if (dc_fixpt_le(args->a0, args->arg))
+ return dc_fixpt_sub(
+ dc_fixpt_mul(
+ dc_fixpt_add(
+ one,
+ args->a3),
+ dc_fixpt_pow(
+ args->arg,
+ dc_fixpt_recip(args->gamma))),
+ args->a2);
+ else
+ return dc_fixpt_mul(
+ args->arg,
+ args->a1);
+}
+
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf)
{
struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
@@ -384,9 +419,13 @@ static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
scratch_gamma_args.a3 = dc_fixpt_zero;
scratch_gamma_args.gamma = gamma;
+ if (use_eetf)
+ return translate_from_linear_space_long(&scratch_gamma_args);
+
return translate_from_linear_space(&scratch_gamma_args);
}
+
static struct fixed31_32 translate_to_linear_space(
struct fixed31_32 arg,
struct fixed31_32 a0,
@@ -920,11 +959,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
if (fs_params->max_display < 100) // cap at 100 at the top
max_display = dc_fixpt_from_int(100);
- if (fs_params->min_content < fs_params->min_display)
- use_eetf = true;
- else
- min_content = min_display;
-
+ // only max used, we don't adjust min luminance
if (fs_params->max_content > fs_params->max_display)
use_eetf = true;
else
@@ -950,7 +985,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
output = dc_fixpt_zero;
else
- output = calculate_gamma22(scaledX);
+ output = calculate_gamma22(scaledX, use_eetf);
rgb->r = output;
rgb->g = output;
@@ -2173,5 +2208,3 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
rgb_degamma_alloc_fail:
return ret;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index ec70c9b12e1a..16e69bbc69aa 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_EXIT_MARGIN 2000
+/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_MAX_MARGIN 2500
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
@@ -234,6 +234,10 @@ static void update_v_total_for_static_ramp(
current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000);
+ /* v_total cannot be less than nominal */
+ if (v_total < stream->timing.v_total)
+ v_total = stream->timing.v_total;
+
in_out_vrr->adjust.v_total_min = v_total;
in_out_vrr->adjust.v_total_max = v_total;
}
@@ -250,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
- unsigned int min_frame_duration_in_ns = 0;
- unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
-
- min_frame_duration_in_ns = ((unsigned int) (div64_u64(
- (1000000000ULL * 1000000),
- in_out_vrr->max_refresh_in_uhz)));
+ unsigned int max_render_time_in_us =
+ in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
/* Program BTR */
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
- } else if (last_render_time_in_us > max_render_time_in_us) {
+ } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- in_out_vrr->btr.btr_active = true;
+ if (!in_out_vrr->btr.btr_active) {
+ in_out_vrr->btr.btr_active = true;
+ }
}
/* BTR set to "not active" so disengage */
@@ -323,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
+ (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
+ mid_point_frames_floor < 2)) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
@@ -339,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
- in_out_vrr->max_duration_in_us) &&
+ max_render_time_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -743,6 +747,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
+ /* Rounded to the nearest Hz */
+ nominal_field_rate_in_uhz = 1000000ULL *
+ div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
+
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
@@ -788,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+
in_out_vrr->supported = true;
}
@@ -803,6 +816,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
+
in_out_vrr->btr.mid_point_in_us =
(in_out_vrr->min_duration_in_us +
in_out_vrr->max_duration_in_us) / 2;
@@ -975,13 +989,9 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
unsigned int *inserted_frames,
unsigned int *inserted_duration_in_us)
{
- struct core_freesync *core_freesync = NULL;
-
if (mod_freesync == NULL)
return;
- core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
-
if (vrr->supported) {
*v_total_min = vrr->adjust.v_total_min;
*v_total_max = vrr->adjust.v_total_max;
@@ -996,14 +1006,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream)
{
unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned int total = stream->timing.h_total * stream->timing.v_total;
- /* Calculate nominal field rate for stream */
+ /* Calculate nominal field rate for stream, rounded up to nearest integer */
nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.h_total);
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.v_total);
+
+ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
return nominal_field_rate_in_uhz;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
new file mode 100644
index 000000000000..1c3c6d47973a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'hdcp' sub-module of DAL.
+#
+
+HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \
+ hdcp1_execution.o hdcp1_transition.o
+
+AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP))
+#$(info ************ DAL-HDCP_MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
new file mode 100644
index 000000000000..d7ac445dec6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static void push_error_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_status status)
+{
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+
+ if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
+ trace->errors[trace->error_count].status = status;
+ trace->errors[trace->error_count].state_id = hdcp->state.id;
+ trace->error_count++;
+ HDCP_ERROR_TRACE(hdcp, status);
+ }
+
+ hdcp->connection.hdcp1_retry_count++;
+}
+
+static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
+{
+ int i, display_enabled = 0;
+
+ /* if all displays on the link are disabled, hdcp is not desired */
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->connection.displays[i].adjust.disable) {
+ display_enabled = 1;
+ break;
+ }
+ }
+
+ return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
+ display_enabled && !hdcp->connection.link.adjust.hdcp1.disable;
+}
+
+static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* initialize transition input */
+ memset(input, 0, sizeof(union mod_hdcp_transition_input));
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* update topology event if hdcp is not desired */
+ status = mod_hdcp_add_display_topology(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_execution(hdcp,
+ event_ctx, &input->hdcp1);
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->unexpected_event)
+ goto out;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (is_dp_hdcp(hdcp))
+ if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else if (is_hdmi_dvi_sl_hdcp(hdcp))
+ if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ increment_stay_counter(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_hdcp1(hdcp)) {
+ if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN)
+ mod_hdcp_hdcp1_destroy_session(hdcp);
+
+ if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ }
+
+out:
+ /* stop callback and watchdog requests from previous authentication*/
+ output->watchdog_timer_stop = 1;
+ output->callback_stop = 1;
+ return status;
+}
+
+static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ if (current_state(hdcp) != HDCP_UNINITIALIZED) {
+ HDCP_TOP_RESET_CONN_TRACE(hdcp);
+ set_state_id(hdcp, output, HDCP_UNINITIALIZED);
+ }
+ memset(&hdcp->connection, 0, sizeof(hdcp->connection));
+out:
+ return status;
+}
+
+/*
+ * Implementation of functions in mod_hdcp.h
+ */
+size_t mod_hdcp_get_memory_size(void)
+{
+ return sizeof(struct mod_hdcp);
+}
+
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config)
+{
+ struct mod_hdcp_output output;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ memset(&output, 0, sizeof(output));
+ hdcp->config = *config;
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, &output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_output output;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ memset(&output, 0, sizeof(output));
+ status = reset_connection(hdcp, &output);
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ else
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display_container = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* skip inactive display */
+ if (display->state != MOD_HDCP_DISPLAY_ACTIVE) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* check existing display container */
+ if (get_active_display_at_index(hdcp, display->index)) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* find an empty display container */
+ display_container = get_empty_display_container(hdcp);
+ if (!display_container) {
+ status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND;
+ goto out;
+ }
+
+ /* reset existing authentication status */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* add display to connection */
+ hdcp->connection.link = *link;
+ *display_container = *display;
+
+ /* reset retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication */
+ if (current_state(hdcp) != HDCP_INITIALIZED)
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* stop current authentication */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* remove display */
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+
+ /* clear retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication for remaining displays*/
+ if (get_active_display_count(hdcp) > 0)
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
+ output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ goto out;
+ }
+
+ /* populate query */
+ query->link = &hdcp->connection.link;
+ query->display = display;
+ query->trace = &hdcp->connection.trace;
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status);
+
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status exec_status, trans_status, reset_status, status;
+ struct mod_hdcp_event_context event_ctx;
+
+ HDCP_EVENT_TRACE(hdcp, event);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+ memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context));
+ event_ctx.event = event;
+
+ /* execute and transition */
+ exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input);
+ trans_status = transition(
+ hdcp, &event_ctx, &hdcp->auth.trans_input, output);
+ if (trans_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (exec_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE;
+ push_error_status(hdcp, status);
+ } else {
+ status = exec_status;
+ push_error_status(hdcp, status);
+ }
+
+ /* reset authentication if needed */
+ if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) {
+ HDCP_FULL_DDC_TRACE(hdcp);
+ reset_status = reset_authentication(hdcp, output);
+ if (reset_status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, reset_status);
+ }
+ return status;
+}
+
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal)
+{
+ enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ mode = MOD_HDCP_MODE_DEFAULT;
+ break;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ mode = MOD_HDCP_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ mode = MOD_HDCP_MODE_DP_MST;
+ break;
+ default:
+ break;
+ };
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
new file mode 100644
index 000000000000..5664bc0b5bd0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef HDCP_H_
+#define HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp_log.h"
+
+#define BCAPS_READY_MASK 0x20
+#define BCAPS_REPEATER_MASK 0x40
+#define BSTATUS_DEVICE_COUNT_MASK 0X007F
+#define BSTATUS_MAX_DEVS_EXCEEDED_MASK 0x0080
+#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800
+#define BCAPS_HDCP_CAPABLE_MASK_DP 0x01
+#define BCAPS_REPEATER_MASK_DP 0x02
+#define BSTATUS_READY_MASK_DP 0x01
+#define BSTATUS_R0_P_AVAILABLE_MASK_DP 0x02
+#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x04
+#define BSTATUS_REAUTH_REQUEST_MASK_DP 0x08
+#define BINFO_DEVICE_COUNT_MASK_DP 0X007F
+#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080
+#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800
+
+#define RXSTATUS_MSG_SIZE_MASK 0x03FF
+#define RXSTATUS_READY_MASK 0x0400
+#define RXSTATUS_REAUTH_REQUEST_MASK 0x0800
+#define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0
+#define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01
+#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02
+#define RXSTATUS_READY_MASK_DP 0x0001
+#define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002
+#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP 0x0004
+#define RXSTATUS_REAUTH_REQUEST_MASK_DP 0x0008
+#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x0010
+
+enum mod_hdcp_trans_input_result {
+ UNKNOWN = 0,
+ PASS,
+ FAIL
+};
+
+struct mod_hdcp_transition_input_hdcp1 {
+ uint8_t bksv_read;
+ uint8_t bksv_validation;
+ uint8_t add_topology;
+ uint8_t create_session;
+ uint8_t an_write;
+ uint8_t aksv_write;
+ uint8_t ainfo_write;
+ uint8_t bcaps_read;
+ uint8_t r0p_read;
+ uint8_t rx_validation;
+ uint8_t encryption;
+ uint8_t link_maintenance;
+ uint8_t ready_check;
+ uint8_t bstatus_read;
+ uint8_t max_cascade_check;
+ uint8_t max_devs_check;
+ uint8_t device_count_check;
+ uint8_t ksvlist_read;
+ uint8_t vp_read;
+ uint8_t ksvlist_vp_validation;
+
+ uint8_t hdcp_capable_dp;
+ uint8_t binfo_read_dp;
+ uint8_t r0p_available_dp;
+ uint8_t link_integiry_check;
+ uint8_t reauth_request_check;
+ uint8_t stream_encryption_dp;
+};
+
+union mod_hdcp_transition_input {
+ struct mod_hdcp_transition_input_hdcp1 hdcp1;
+};
+
+struct mod_hdcp_message_hdcp1 {
+ uint8_t an[8];
+ uint8_t aksv[5];
+ uint8_t ainfo;
+ uint8_t bksv[5];
+ uint16_t r0p;
+ uint8_t bcaps;
+ uint16_t bstatus;
+ uint8_t ksvlist[635];
+ uint16_t ksvlist_size;
+ uint8_t vp[20];
+
+ uint16_t binfo_dp;
+};
+
+union mod_hdcp_message {
+ struct mod_hdcp_message_hdcp1 hdcp1;
+};
+
+struct mod_hdcp_auth_counters {
+ uint8_t stream_management_retry_count;
+};
+
+/* contains values per connection */
+struct mod_hdcp_connection {
+ struct mod_hdcp_link link;
+ struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
+ uint8_t is_repeater;
+ uint8_t is_km_stored;
+ struct mod_hdcp_trace trace;
+ uint8_t hdcp1_retry_count;
+};
+
+/* contains values per authentication cycle */
+struct mod_hdcp_authentication {
+ uint32_t id;
+ union mod_hdcp_message msg;
+ union mod_hdcp_transition_input trans_input;
+ struct mod_hdcp_auth_counters count;
+};
+
+/* contains values per state change */
+struct mod_hdcp_state {
+ uint8_t id;
+ uint32_t stay_count;
+};
+
+/* per event in a state */
+struct mod_hdcp_event_context {
+ enum mod_hdcp_event event;
+ uint8_t rx_id_list_ready;
+ uint8_t unexpected_event;
+};
+
+struct mod_hdcp {
+ /* per link */
+ struct mod_hdcp_config config;
+ /* per connection */
+ struct mod_hdcp_connection connection;
+ /* per authentication attempt */
+ struct mod_hdcp_authentication auth;
+ /* per state in an authentication */
+ struct mod_hdcp_state state;
+ /* reserved memory buffer */
+ uint8_t buf[2025];
+};
+
+enum mod_hdcp_initial_state_id {
+ HDCP_UNINITIALIZED = 0x0,
+ HDCP_INITIAL_STATE_START = HDCP_UNINITIALIZED,
+ HDCP_INITIALIZED,
+ HDCP_CP_NOT_DESIRED,
+ HDCP_INITIAL_STATE_END = HDCP_CP_NOT_DESIRED
+};
+
+enum mod_hdcp_hdcp1_state_id {
+ HDCP1_STATE_START = HDCP_INITIAL_STATE_END,
+ H1_A0_WAIT_FOR_ACTIVE_RX,
+ H1_A1_EXCHANGE_KSVS,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER,
+ H1_A45_AUTHENTICATED,
+ H1_A8_WAIT_FOR_READY,
+ H1_A9_READ_KSV_LIST,
+ HDCP1_STATE_END = H1_A9_READ_KSV_LIST
+};
+
+enum mod_hdcp_hdcp1_dp_state_id {
+ HDCP1_DP_STATE_START = HDCP1_STATE_END,
+ D1_A0_DETERMINE_RX_HDCP_CAPABLE,
+ D1_A1_EXCHANGE_KSVS,
+ D1_A23_WAIT_FOR_R0_PRIME,
+ D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER,
+ D1_A4_AUTHENTICATED,
+ D1_A6_WAIT_FOR_READY,
+ D1_A7_READ_KSV_LIST,
+ HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST,
+};
+
+/* hdcp1 executions and transitions */
+typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp);
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str);
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+
+/* log functions */
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size);
+/* TODO: add adjustment log */
+
+/* psp functions */
+enum mod_hdcp_status mod_hdcp_add_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_remove_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status);
+/* ddc functions */
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
+
+/* hdcp version helpers */
+static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
+ hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DEFAULT);
+}
+
+/* hdcp state helpers */
+static inline uint8_t current_state(struct mod_hdcp *hdcp)
+{
+ return hdcp->state.id;
+}
+
+static inline void set_state_id(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output, uint8_t id)
+{
+ memset(&hdcp->state, 0, sizeof(hdcp->state));
+ hdcp->state.id = id;
+ /* callback timer should be reset per state */
+ output->callback_stop = 1;
+ output->watchdog_timer_stop = 1;
+ HDCP_NEXT_STATE_TRACE(hdcp, id, output);
+}
+
+static inline uint8_t is_in_hdcp1_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_STATE_START &&
+ current_state(hdcp) <= HDCP1_STATE_END);
+}
+
+static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_DP_STATE_START &&
+ current_state(hdcp) <= HDCP1_DP_STATE_END);
+}
+
+static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)
+{
+ return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));
+}
+
+static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_CP_NOT_DESIRED;
+}
+
+static inline uint8_t is_in_initialized_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_INITIALIZED;
+}
+
+/* transition operation helpers */
+static inline void increment_stay_counter(struct mod_hdcp *hdcp)
+{
+ hdcp->state.stay_count++;
+}
+
+static inline void fail_and_restart_in_ms(uint16_t time,
+ enum mod_hdcp_status *status,
+ struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+ output->watchdog_timer_needed = 0;
+ output->watchdog_timer_delay = 0;
+ *status = MOD_HDCP_STATUS_RESET_NEEDED;
+}
+
+static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+}
+
+static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
+ struct mod_hdcp_output *output)
+{
+ output->watchdog_timer_needed = 1;
+ output->watchdog_timer_delay = time;
+}
+
+/* connection topology helpers */
+static inline uint8_t is_display_active(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
+}
+
+static inline uint8_t is_display_added(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+}
+
+static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+}
+
+static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_active(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline struct mod_hdcp_display *get_first_added_display(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_active_display_at_index(
+ struct mod_hdcp *hdcp, uint8_t index)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (hdcp->connection.displays[i].index == index &&
+ is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_empty_display_container(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (!is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline void reset_retry_counts(struct mod_hdcp *hdcp)
+{
+ hdcp->connection.hdcp1_retry_count = 0;
+}
+
+#endif /* HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
new file mode 100644
index 000000000000..3db4a7da414f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
+{
+ uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv;
+ uint8_t count = 0;
+
+ while (n) {
+ count++;
+ n &= (n - 1);
+ }
+ return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+}
+
+static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
+{
+ if (is_dp_hdcp(hdcp))
+ return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+ return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+}
+
+static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE;
+}
+
+static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ if (is_dp_hdcp(hdcp)) {
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_R0_P_AVAILABLE_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING;
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ }
+ return status;
+}
+
+static inline enum mod_hdcp_status check_link_integrity_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_reauthentication_request_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp1.binfo_dp &
+ BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp1.binfo_dp &
+ BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_MAX_DEVS_EXCEEDED_MASK) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
+{
+ return is_dp_hdcp(hdcp) ?
+ (hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) :
+ (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK);
+}
+
+static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
+{
+ /* device count must be greater than or equal to tracked hdcp displays */
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
+ &input->add_topology, &status,
+ hdcp, "add_topology"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
+ &input->create_session, &status,
+ hdcp, "create_session"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_an,
+ &input->an_write, &status,
+ hdcp, "an_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv,
+ &input->aksv_write, &status,
+ hdcp, "aksv_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(validate_bksv,
+ &input->bksv_validation, &status,
+ hdcp, "bksv_validation"))
+ goto out;
+ if (hdcp->auth.msg.hdcp1.ainfo) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo,
+ &input->ainfo_write, &status,
+ hdcp, "ainfo_write"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status computations_validate_rx_test_for_repeater(
+ struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p,
+ &input->r0p_read, &status,
+ hdcp, "r0p_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx,
+ &input->rx_validation, &status,
+ hdcp, "rx_validation"))
+ goto out;
+ if (hdcp->connection.is_repeater) {
+ if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption)
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
+ &input->link_maintenance, &status,
+ hdcp, "link_maintenance"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integiry_check, &status,
+ hdcp, "link_integiry_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_ksv_ready,
+ &input->ready_check, &status,
+ hdcp, "ready_check"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ uint8_t device_count;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo,
+ &input->binfo_read_dp, &status,
+ hdcp, "binfo_read_dp"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_no_max_cascade,
+ &input->max_cascade_check, &status,
+ hdcp, "max_cascade_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_max_devs,
+ &input->max_devs_check, &status,
+ hdcp, "max_devs_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_device_count,
+ &input->device_count_check, &status,
+ hdcp, "device_count_check"))
+ goto out;
+ device_count = get_device_count(hdcp);
+ hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist,
+ &input->ksvlist_read, &status,
+ hdcp, "ksvlist_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp,
+ &input->vp_read, &status,
+ hdcp, "vp_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp,
+ &input->ksvlist_vp_validation, &status,
+ hdcp, "ksvlist_vp_validation"))
+ goto out;
+ if (input->encryption != PASS)
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp,
+ &input->hdcp_capable_dp, &status,
+ hdcp, "hdcp_capable_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_r0p_available_dp,
+ &input->r0p_available_dp, &status,
+ hdcp, "r0p_available_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integiry_check, &status,
+ hdcp, "link_integiry_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+out:
+ return status;
+}
+
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str)
+{
+ *status = func(hdcp);
+ if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) {
+ HDCP_INPUT_PASS_TRACE(hdcp, str);
+ *flag = PASS;
+ } else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) {
+ HDCP_INPUT_FAIL_TRACE(hdcp, str);
+ *flag = FAIL;
+ }
+ return (*status == MOD_HDCP_STATUS_SUCCESS);
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ status = wait_for_active_rx(hdcp, event_ctx, input);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(hdcp,
+ event_ctx, input);
+ break;
+ case H1_A45_AUTHENTICATED:
+ status = authenticated(hdcp, event_ctx, input);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
+
+extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ status = wait_for_r0_prime_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(
+ hdcp, event_ctx, input);
+ break;
+ case D1_A4_AUTHENTICATED:
+ status = authenticated_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
new file mode 100644
index 000000000000..136b8011ff3f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ if (input->bksv_read != PASS || input->bcaps_read != PASS) {
+ /* 1A-04: repeatedly attempts on port access failure */
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(300, output);
+ set_state_id(hdcp, output,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ if (input->bcaps_read != PASS ||
+ input->r0p_read != PASS ||
+ input->rx_validation != PASS ||
+ (!conn->is_repeater && input->encryption != PASS)) {
+ /* 1A-06: consider invalid r0' a failure */
+ /* 1A-08: consider bksv listed in SRM a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case H1_A45_AUTHENTICATED:
+ if (input->link_maintenance != PASS) {
+ /* 1A-07: consider invalid ri' a failure */
+ /* 1A-07a: consider read ri' not returned a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-03: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ /* continue ksv list READY polling*/
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A9_READ_KSV_LIST);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ if (input->bstatus_read != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS ||
+ input->device_count_check != PASS ||
+ input->ksvlist_read != PASS ||
+ input->vp_read != PASS ||
+ input->ksvlist_vp_validation != PASS ||
+ input->encryption != PASS) {
+ /* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */
+ /* 1B-05: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-04: consider invalid v' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ if (input->bcaps_read != PASS) {
+ /* 1A-04: no authentication on bcaps read failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->hdcp_capable_dp != PASS) {
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 100, output);
+ set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ if (input->bstatus_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->r0p_available_dp != PASS) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ if (input->r0p_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->rx_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1A-06: consider invalid r0' a failure
+ * after 3 attempts.
+ * 1A-08: consider bksv listed in SRM a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ }
+ break;
+ } else if ((!conn->is_repeater && input->encryption != PASS) ||
+ (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);
+ } else {
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case D1_A4_AUTHENTICATED:
+ if (input->link_integiry_check != PASS ||
+ input->reauth_request_check != PASS) {
+ /* 1A-07: restart hdcp on a link integrity failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ if (input->link_integiry_check == FAIL ||
+ input->reauth_request_check == FAIL) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-04: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A7_READ_KSV_LIST);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ if (input->binfo_read_dp != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS) {
+ /* 1B-06: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->device_count_check != PASS) {
+ /*
+ * some slow dongle doesn't update
+ * device count as soon as downstream is connected.
+ * give it more time to react.
+ */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(1000, &status, output);
+ break;
+ } else if (input->ksvlist_read != PASS ||
+ input->vp_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ksvlist_vp_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1B-05: consider invalid v' a failure
+ * after 3 attempts.
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ }
+ break;
+ } else if (input->encryption != PASS ||
+ (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
new file mode 100644
index 000000000000..e7baae059b85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
+#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
+#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
+
+enum mod_hdcp_ddc_message_id {
+ MOD_HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ MOD_HDCP_MESSAGE_ID_READ_BKSV = 0,
+ MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ MOD_HDCP_MESSAGE_ID_READ_VH_X,
+ MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ MOD_HDCP_MESSAGE_ID_READ_BINFO,
+
+ MOD_HDCP_MESSAGE_ID_MAX
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+};
+
+static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ success = hdcp->config.ddc.funcs.read_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[msg_id],
+ buf,
+ (uint32_t)buf_len);
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len,
+ uint8_t read_size)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, read_size);
+ status = read(hdcp, msg_id, buf + data_offset, cur_size);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+
+ return status;
+}
+
+static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.write_dpcd(
+ hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
+ memmove(&hdcp->buf[1], buf, buf_len);
+ success = hdcp->config.ddc.funcs.write_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp->buf,
+ (uint32_t)(buf_len+1));
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV,
+ hdcp->auth.msg.hdcp1.bksv,
+ sizeof(hdcp->auth.msg.hdcp1.bksv));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ &hdcp->auth.msg.hdcp1.bcaps,
+ sizeof(hdcp->auth.msg.hdcp1.bcaps));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ 1);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ sizeof(hdcp->auth.msg.hdcp1.bstatus));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
+ sizeof(hdcp->auth.msg.hdcp1.r0p));
+}
+
+/* special case, reading repeatedly at the same address, don't use read() */
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size,
+ KSV_READ_SIZE);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ &hdcp->auth.msg.hdcp1.vp[0], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ &hdcp->auth.msg.hdcp1.vp[4], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ &hdcp->auth.msg.hdcp1.vp[8], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ &hdcp->auth.msg.hdcp1.vp[12], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ &hdcp->auth.msg.hdcp1.vp[16], 4);
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
+ else
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ hdcp->auth.msg.hdcp1.aksv,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+}
+
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ &hdcp->auth.msg.hdcp1.ainfo,
+ sizeof(hdcp->auth.msg.hdcp1.ainfo));
+}
+
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ hdcp->auth.msg.hdcp1.an,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
new file mode 100644
index 000000000000..3982ced5f969
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "hdcp.h"
+
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size)
+{
+ const uint8_t bytes_per_line = 16,
+ byte_size = 3,
+ newline_size = 1,
+ terminator_size = 1;
+ uint32_t line_count = msg_size / bytes_per_line,
+ trailing_bytes = msg_size % bytes_per_line;
+ uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count +
+ byte_size * trailing_bytes + newline_size + terminator_size;
+ uint32_t buf_pos = 0;
+ uint32_t i = 0;
+
+ if (buf_size >= target_size) {
+ for (i = 0; i < msg_size; i++) {
+ if (i % bytes_per_line == 0)
+ buf[buf_pos++] = '\n';
+ sprintf(&buf[buf_pos], "%02X ", msg[i]);
+ buf_pos += byte_size;
+ }
+ buf[buf_pos++] = '\0';
+ }
+}
+
+char *mod_hdcp_status_to_str(int32_t status)
+{
+ switch (status) {
+ case MOD_HDCP_STATUS_SUCCESS:
+ return "MOD_HDCP_STATUS_SUCCESS";
+ case MOD_HDCP_STATUS_FAILURE:
+ return "MOD_HDCP_STATUS_FAILURE";
+ case MOD_HDCP_STATUS_RESET_NEEDED:
+ return "MOD_HDCP_STATUS_RESET_NEEDED";
+ case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND";
+ case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND";
+ case MOD_HDCP_STATUS_INVALID_STATE:
+ return "MOD_HDCP_STATUS_INVALID_STATE";
+ case MOD_HDCP_STATUS_NOT_IMPLEMENTED:
+ return "MOD_HDCP_STATUS_NOT_IMPLEMENTED";
+ case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE:
+ return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE";
+ case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE:
+ return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE";
+ case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER";
+ case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE";
+ case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING:
+ return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
+ return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED:
+ return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED";
+ case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV:
+ return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV";
+ case MOD_HDCP_STATUS_DDC_FAILURE:
+ return "MOD_HDCP_STATUS_DDC_FAILURE";
+ case MOD_HDCP_STATUS_INVALID_OPERATION:
+ return "MOD_HDCP_STATUS_INVALID_OPERATION";
+ default:
+ return "MOD_HDCP_STATUS_UNKNOWN";
+ }
+}
+
+char *mod_hdcp_state_id_to_str(int32_t id)
+{
+ switch (id) {
+ case HDCP_UNINITIALIZED:
+ return "HDCP_UNINITIALIZED";
+ case HDCP_INITIALIZED:
+ return "HDCP_INITIALIZED";
+ case HDCP_CP_NOT_DESIRED:
+ return "HDCP_CP_NOT_DESIRED";
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ return "H1_A0_WAIT_FOR_ACTIVE_RX";
+ case H1_A1_EXCHANGE_KSVS:
+ return "H1_A1_EXCHANGE_KSVS";
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER";
+ case H1_A45_AUTHENTICATED:
+ return "H1_A45_AUTHENTICATED";
+ case H1_A8_WAIT_FOR_READY:
+ return "H1_A8_WAIT_FOR_READY";
+ case H1_A9_READ_KSV_LIST:
+ return "H1_A9_READ_KSV_LIST";
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ return "D1_A0_DETERMINE_RX_HDCP_CAPABLE";
+ case D1_A1_EXCHANGE_KSVS:
+ return "D1_A1_EXCHANGE_KSVS";
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ return "D1_A23_WAIT_FOR_R0_PRIME";
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER";
+ case D1_A4_AUTHENTICATED:
+ return "D1_A4_AUTHENTICATED";
+ case D1_A6_WAIT_FOR_READY:
+ return "D1_A6_WAIT_FOR_READY";
+ case D1_A7_READ_KSV_LIST:
+ return "D1_A7_READ_KSV_LIST";
+ default:
+ return "UNKNOWN_STATE_ID";
+ };
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
new file mode 100644
index 000000000000..2fd0e0a893ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_LOG_H_
+#define MOD_HDCP_LOG_H_
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__)
+#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
+#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
+#endif
+
+/* default logs */
+#define HDCP_ERROR_TRACE(hdcp, status) \
+ HDCP_LOG_ERR(hdcp, \
+ "[Link %d] ERROR %s IN STATE %s", \
+ hdcp->config.index, \
+ mod_hdcp_status_to_str(status), \
+ mod_hdcp_state_id_to_str(hdcp->state.id))
+#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 1.4 enabled on display %d", \
+ hdcp->config.index, displayIndex)
+/* state machine logs */
+#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] HDCP_REMOVE_DISPLAY index %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_INPUT_PASS_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tPASS %s", \
+ hdcp->config.index, str)
+#define HDCP_INPUT_FAIL_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tFAIL %s", \
+ hdcp->config.index, str)
+#define HDCP_NEXT_STATE_TRACE(hdcp, id, output) do { \
+ if (output->watchdog_timer_needed) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s with %d ms watchdog", \
+ hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id), output->watchdog_timer_delay); \
+ else \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s", hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id)); \
+} while (0)
+#define HDCP_TIMEOUT_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> TIMEOUT", hdcp->config.index)
+#define HDCP_CPIRQ_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index)
+#define HDCP_EVENT_TRACE(hdcp, event) \
+ if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
+ HDCP_TIMEOUT_TRACE(hdcp); \
+ else if (event == MOD_HDCP_EVENT_CPIRQ) \
+ HDCP_CPIRQ_TRACE(hdcp)
+/* TODO: find some way to tell if logging is off to save time */
+#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Read %s%s", hdcp->config.index, \
+ msg_name, hdcp->buf); \
+} while (0)
+#define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Write %s%s", \
+ hdcp->config.index, msg_name,\
+ hdcp->buf); \
+} while (0)
+#define HDCP_FULL_DDC_TRACE(hdcp) do { \
+ HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
+ sizeof(hdcp->auth.msg.hdcp1.bksv)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
+ sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
+ sizeof(hdcp->auth.msg.hdcp1.an)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
+ sizeof(hdcp->auth.msg.hdcp1.aksv)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
+ sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
+ HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
+ sizeof(hdcp->auth.msg.hdcp1.r0p)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
+ HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
+ hdcp->auth.msg.hdcp1.ksvlist_size); \
+ HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
+ sizeof(hdcp->auth.msg.hdcp1.vp)); \
+} while (0)
+#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tremove display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \
+ hdcp->config.index)
+#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index)
+#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset connection", hdcp->config.index)
+#define HDCP_TOP_INTERFACE_TRACE(hdcp) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s", hdcp->config.index, __func__); \
+} while (0)
+#define HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, i) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \
+} while (0)
+
+#endif // MOD_HDCP_LOG_H_
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
new file mode 100644
index 000000000000..646d909bbc37
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#define MAX_NUM_DISPLAYS 24
+
+
+#include "hdcp.h"
+
+#include "amdgpu.h"
+#include "hdcp_psp.h"
+
+enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ uint8_t i;
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) {
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ display = &hdcp->connection.displays[i];
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ struct mod_hdcp_link *link = &hdcp->connection.link;
+ uint8_t i;
+
+ if (!psp->dtm_context.dtm_initialized) {
+ DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
+ display = &hdcp->connection.displays[i];
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+ dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+ dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+ dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+ dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
+ hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+ memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+ memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+
+ HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id;
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv,
+ TA_HDCP__HDCP1_KSV_SIZE);
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p;
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+ if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
+ /* needs second part of authentication */
+ hdcp->connection.is_repeater = 1;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+ hdcp->connection.is_repeater = 0;
+ } else
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
+
+ if (!is_dp_mst_hdcp(hdcp)) {
+ display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
+ }
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size;
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp,
+ sizeof(hdcp->auth.msg.hdcp1.vp));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo =
+ is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ int i = 0;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->connection.displays[i].adjust.disable)
+ continue;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+ hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+
+ return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status)
+{
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS)
+ return MOD_HDCP_STATUS_FAILURE;
+
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
new file mode 100644
index 000000000000..986fc07ea9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_HDCP_HDCP_PSP_H_
+#define MODULES_HDCP_HDCP_PSP_H_
+
+/*
+ * NOTE: These parameters are a one-to-one copy of the
+ * parameters required by PSP
+ */
+enum bgd_security_hdcp_encryption_level {
+ HDCP_ENCRYPTION_LEVEL__INVALID = 0,
+ HDCP_ENCRYPTION_LEVEL__OFF,
+ HDCP_ENCRYPTION_LEVEL__ON
+};
+
+enum ta_dtm_command {
+ TA_DTM_COMMAND__UNUSED_1 = 1,
+ TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2,
+ TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE
+};
+
+/* DTM related enumerations */
+/**********************************************************/
+
+enum ta_dtm_status {
+ TA_DTM_STATUS__SUCCESS = 0x00,
+ TA_DTM_STATUS__GENERIC_FAILURE = 0x01,
+ TA_DTM_STATUS__INVALID_PARAMETER = 0x02,
+ TA_DTM_STATUS__NULL_POINTER = 0x3
+};
+
+/* input/output structures for DTM commands */
+/**********************************************************/
+/**
+ * Input structures
+ */
+enum ta_dtm_hdcp_version_max_supported {
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__NONE = 0,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x = 10,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_0 = 20,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_1 = 21,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2 = 22,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3 = 23
+};
+
+struct ta_dtm_topology_update_input_v2 {
+ /* display handle is unique across the driver and is used to identify a display */
+ /* for all security interfaces which reference displays such as HDCP */
+ uint32_t display_handle;
+ uint32_t is_active;
+ uint32_t is_miracast;
+ uint32_t controller;
+ uint32_t ddc_line;
+ uint32_t dig_be;
+ uint32_t dig_fe;
+ uint32_t dp_mst_vcid;
+ uint32_t is_assr;
+ uint32_t max_hdcp_supported_version;
+};
+
+struct ta_dtm_topology_assr_enable {
+ uint32_t display_topology_dig_be_index;
+};
+
+/**
+ * Output structures
+ */
+
+/* No output structures yet */
+
+union ta_dtm_cmd_input {
+ struct ta_dtm_topology_update_input_v2 topology_update_v2;
+ struct ta_dtm_topology_assr_enable topology_assr_enable;
+};
+
+union ta_dtm_cmd_output {
+ uint32_t reserved;
+};
+
+struct ta_dtm_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_dtm_status dtm_status;
+ uint32_t reserved;
+ union ta_dtm_cmd_input dtm_in_message;
+ union ta_dtm_cmd_output dtm_out_message;
+};
+
+int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
+ uint64_t fence_mc_addr);
+
+enum ta_hdcp_command {
+ TA_HDCP_COMMAND__INITIALIZE,
+ TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
+ TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION,
+ TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS,
+};
+
+
+/* HDCP related enumerations */
+/**********************************************************/
+#define TA_HDCP__INVALID_SESSION 0xFFFF
+#define TA_HDCP__HDCP1_AN_SIZE 8
+#define TA_HDCP__HDCP1_KSV_SIZE 5
+#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
+#define TA_HDCP__HDCP1_V_PRIME_SIZE 20
+
+enum ta_hdcp_status {
+ TA_HDCP_STATUS__SUCCESS = 0x00,
+ TA_HDCP_STATUS__GENERIC_FAILURE = 0x01,
+ TA_HDCP_STATUS__NULL_POINTER = 0x02,
+ TA_HDCP_STATUS__FAILED_ALLOCATING_SESSION = 0x03,
+ TA_HDCP_STATUS__FAILED_SETUP_TX = 0x04,
+ TA_HDCP_STATUS__INVALID_PARAMETER = 0x05,
+ TA_HDCP_STATUS__VHX_ERROR = 0x06,
+ TA_HDCP_STATUS__SESSION_NOT_CLOSED_PROPERLY = 0x07,
+ TA_HDCP_STATUS__SRM_FAILURE = 0x08,
+ TA_HDCP_STATUS__MST_AUTHENTICATED_ALREADY_STARTED = 0x09,
+ TA_HDCP_STATUS__AKE_SEND_CERT_FAILURE = 0x0A,
+ TA_HDCP_STATUS__AKE_NO_STORED_KM_FAILURE = 0x0B,
+ TA_HDCP_STATUS__AKE_SEND_HPRIME_FAILURE = 0x0C,
+ TA_HDCP_STATUS__LC_SEND_LPRIME_FAILURE = 0x0D,
+ TA_HDCP_STATUS__SKE_SEND_EKS_FAILURE = 0x0E,
+ TA_HDCP_STATUS__REPAUTH_SEND_RXIDLIST_FAILURE = 0x0F,
+ TA_HDCP_STATUS__REPAUTH_STREAM_READY_FAILURE = 0x10,
+ TA_HDCP_STATUS__ASD_GENERIC_FAILURE = 0x11,
+ TA_HDCP_STATUS__UNWRAP_SECRET_FAILURE = 0x12,
+ TA_HDCP_STATUS__ENABLE_ENCR_FAILURE = 0x13,
+ TA_HDCP_STATUS__DISABLE_ENCR_FAILURE = 0x14,
+ TA_HDCP_STATUS__NOT_ENOUGH_MEMORY_FAILURE = 0x15,
+ TA_HDCP_STATUS__UNKNOWN_MESSAGE = 0x16,
+ TA_HDCP_STATUS__TOO_MANY_STREAM = 0x17
+};
+
+enum ta_hdcp_authentication_status {
+ TA_HDCP_AUTHENTICATION_STATUS__NOT_STARTED = 0x00,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_FAILED = 0x01,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+};
+
+
+/* input/output structures for HDCP commands */
+/**********************************************************/
+struct ta_hdcp_cmd_hdcp1_create_session_input {
+ uint8_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_create_session_output {
+ uint32_t session_handle;
+ uint8_t an_primary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_primary;
+ uint8_t an_secondary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_destroy_session_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_input {
+ uint32_t session_handle;
+ uint8_t bksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bcaps;
+ uint16_t r0_prime_primary;
+ uint16_t r0_prime_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_input {
+ uint32_t session_handle;
+ uint16_t bstatus_binfo;
+ uint8_t ksv_list[TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES][TA_HDCP__HDCP1_KSV_SIZE];
+ uint32_t ksv_list_size;
+ uint8_t pj_prime;
+ uint8_t v_prime[TA_HDCP__HDCP1_V_PRIME_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_encryption_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input {
+ uint32_t session_handle;
+ uint32_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_output {
+ uint32_t protection_level;
+};
+
+/**********************************************************/
+/* Common input structure for HDCP callbacks */
+union ta_hdcp_cmd_input {
+ struct ta_hdcp_cmd_hdcp1_create_session_input hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_destroy_session_input hdcp1_destroy_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_input hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_input hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption;
+ struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status;
+};
+
+/* Common output structure for HDCP callbacks */
+union ta_hdcp_cmd_output {
+ struct ta_hdcp_cmd_hdcp1_create_session_output hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status;
+};
+/**********************************************************/
+
+struct ta_hdcp_shared_memory {
+ uint32_t cmd_id;
+ enum ta_hdcp_status hdcp_status;
+ uint32_t reserved;
+ union ta_hdcp_cmd_input in_msg;
+ union ta_hdcp_cmd_output out_msg;
+};
+
+enum psp_status {
+ PSP_STATUS__SUCCESS = 0,
+ PSP_STATUS__ERROR_INVALID_PARAMS,
+ PSP_STATUS__ERROR_GENERIC,
+ PSP_STATUS__ERROR_OUT_OF_MEMORY,
+ PSP_STATUS__ERROR_UNSUPPORTED_FEATURE
+};
+
+#endif /* MODULES_HDCP_HDCP_PSP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dc187844d10b..dbe7835aabcf 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,6 +92,7 @@ struct mod_vrr_params_btr {
uint32_t inserted_duration_in_us;
uint32_t frames_to_insert;
uint32_t frame_counter;
+ uint32_t margin_in_us;
};
struct mod_vrr_params_fixed_refresh {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
new file mode 100644
index 000000000000..dea21702edff
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_H_
+#define MOD_HDCP_H_
+
+#include "os_types.h"
+#include "signal_types.h"
+
+/* Forward Declarations */
+struct mod_hdcp;
+
+#define MAX_NUM_OF_DISPLAYS 6
+#define MAX_NUM_OF_ATTEMPTS 4
+#define MAX_NUM_OF_ERROR_TRACE 10
+
+/* detailed return status */
+enum mod_hdcp_status {
+ MOD_HDCP_STATUS_SUCCESS = 0,
+ MOD_HDCP_STATUS_FAILURE,
+ MOD_HDCP_STATUS_RESET_NEEDED,
+ MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND,
+ MOD_HDCP_STATUS_DISPLAY_NOT_FOUND,
+ MOD_HDCP_STATUS_INVALID_STATE,
+ MOD_HDCP_STATUS_NOT_IMPLEMENTED,
+ MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE,
+ MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE,
+ MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER,
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED,
+ MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV,
+ MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */
+ MOD_HDCP_STATUS_INVALID_OPERATION,
+ MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING,
+ MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE,
+};
+
+struct mod_hdcp_displayport {
+ uint8_t rev;
+ uint8_t assr_supported;
+};
+
+struct mod_hdcp_hdmi {
+ uint8_t reserved;
+};
+enum mod_hdcp_operation_mode {
+ MOD_HDCP_MODE_OFF,
+ MOD_HDCP_MODE_DEFAULT,
+ MOD_HDCP_MODE_DP,
+ MOD_HDCP_MODE_DP_MST
+};
+
+enum mod_hdcp_display_state {
+ MOD_HDCP_DISPLAY_INACTIVE = 0,
+ MOD_HDCP_DISPLAY_ACTIVE,
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
+ MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
+};
+
+struct mod_hdcp_ddc {
+ void *handle;
+ struct {
+ bool (*read_i2c)(void *handle,
+ uint32_t address,
+ uint8_t offset,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_i2c)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ bool (*read_dpcd)(void *handle,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_dpcd)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ } funcs;
+};
+
+struct mod_hdcp_psp {
+ void *handle;
+ void *funcs;
+};
+
+struct mod_hdcp_display_adjustment {
+ uint8_t disable : 1;
+ uint8_t reserved : 7;
+};
+
+struct mod_hdcp_link_adjustment_hdcp1 {
+ uint8_t disable : 1;
+ uint8_t postpone_encryption : 1;
+ uint8_t reserved : 6;
+};
+
+struct mod_hdcp_link_adjustment_hdcp2 {
+ uint8_t disable : 1;
+ uint8_t disable_type1 : 1;
+ uint8_t force_no_stored_km : 1;
+ uint8_t increase_h_prime_timeout: 1;
+ uint8_t reserved : 4;
+};
+
+struct mod_hdcp_link_adjustment {
+ uint8_t auth_delay;
+ struct mod_hdcp_link_adjustment_hdcp1 hdcp1;
+ struct mod_hdcp_link_adjustment_hdcp2 hdcp2;
+};
+
+struct mod_hdcp_error {
+ enum mod_hdcp_status status;
+ uint8_t state_id;
+};
+
+struct mod_hdcp_trace {
+ struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE];
+ uint8_t error_count;
+};
+
+enum mod_hdcp_encryption_status {
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON
+};
+
+/* per link events dm has to notify to hdcp module */
+enum mod_hdcp_event {
+ MOD_HDCP_EVENT_CALLBACK = 0,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ MOD_HDCP_EVENT_CPIRQ
+};
+
+/* output flags from module requesting timer operations */
+struct mod_hdcp_output {
+ uint8_t callback_needed;
+ uint8_t callback_stop;
+ uint8_t watchdog_timer_needed;
+ uint8_t watchdog_timer_stop;
+ uint16_t callback_delay;
+ uint16_t watchdog_timer_delay;
+};
+
+/* used to represent per display info */
+struct mod_hdcp_display {
+ enum mod_hdcp_display_state state;
+ uint8_t index;
+ uint8_t controller;
+ uint8_t dig_fe;
+ union {
+ uint8_t vc_id;
+ };
+ struct mod_hdcp_display_adjustment adjust;
+};
+
+/* used to represent per link info */
+/* in case a link has multiple displays, they share the same link info */
+struct mod_hdcp_link {
+ enum mod_hdcp_operation_mode mode;
+ uint8_t dig_be;
+ uint8_t ddc_line;
+ union {
+ struct mod_hdcp_displayport dp;
+ struct mod_hdcp_hdmi hdmi;
+ };
+ struct mod_hdcp_link_adjustment adjust;
+};
+
+/* a query structure for a display's hdcp information */
+struct mod_hdcp_display_query {
+ const struct mod_hdcp_display *display;
+ const struct mod_hdcp_link *link;
+ const struct mod_hdcp_trace *trace;
+ enum mod_hdcp_encryption_status encryption_status;
+};
+
+/* contains values per on external display configuration change */
+struct mod_hdcp_config {
+ struct mod_hdcp_psp psp;
+ struct mod_hdcp_ddc ddc;
+ uint8_t index;
+};
+
+struct mod_hdcp;
+
+/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/
+size_t mod_hdcp_get_memory_size(void);
+
+/* called per link on link creation */
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config);
+
+/* called per link on link destroy */
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp);
+
+/* called per display on cp_desired set to true */
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output);
+
+/* called per display on cp_desired set to false */
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output);
+
+/* called to query hdcp information on a specific index */
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query);
+
+/* called per link on connectivity change */
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output);
+
+/* called per link on events (i.e. callback, watchdog, CP_IRQ) */
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output);
+
+/* called to convert enum mod_hdcp_status to c string */
+char *mod_hdcp_status_to_str(int32_t status);
+
+/* called to convert state id to c string */
+char *mod_hdcp_state_id_to_str(int32_t id);
+
+/* called to convert signal type to operation mode */
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal);
+#endif /* MOD_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index d930bdecb117..ca8ce3c55337 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -35,4 +35,7 @@ struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet);
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index d885d642ed7f..db6b08f6d093 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -31,6 +31,7 @@
#include "dc.h"
#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HF_VSIF_VERSION 1
// VTEM Byte Offset
#define VTEM_PB0 0
@@ -395,3 +396,100 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
+/**
+ *****************************************************************************
+ * Function: mod_build_hf_vsif_infopacket
+ *
+ * @brief
+ * Prepare HDMI Vendor Specific info frame.
+ * Follows HDMI Spec to build up Vendor Specific info frame
+ *
+ * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
+ * @param [out] info_packet: output structure where to store VSIF
+ *****************************************************************************
+ */
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue)
+{
+ unsigned int length = 5;
+ bool hdmi_vic_mode = false;
+ uint8_t checksum = 0;
+ uint32_t i = 0;
+ enum dc_timing_3d_format format;
+ bool bALLM = (bool)ALLMEnabled;
+ bool bALLMVal = (bool)ALLMValue;
+
+ info_packet->valid = false;
+ format = stream->timing.timing_3d_format;
+ if (stream->view_format == VIEW_3D_FORMAT_NONE)
+ format = TIMING_3D_FORMAT_NONE;
+
+ if (stream->timing.hdmi_vic != 0
+ && stream->timing.h_total >= 3840
+ && stream->timing.v_total >= 2160
+ && format == TIMING_3D_FORMAT_NONE)
+ hdmi_vic_mode = true;
+
+ if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM)
+ return;
+
+ info_packet->sb[1] = 0x03;
+ info_packet->sb[2] = 0x0C;
+ info_packet->sb[3] = 0x00;
+
+ if (bALLM) {
+ info_packet->sb[1] = 0xD8;
+ info_packet->sb[2] = 0x5D;
+ info_packet->sb[3] = 0xC4;
+ info_packet->sb[4] = HF_VSIF_VERSION;
+ }
+
+ if (format != TIMING_3D_FORMAT_NONE)
+ info_packet->sb[4] = (2 << 5);
+
+ else if (hdmi_vic_mode)
+ info_packet->sb[4] = (1 << 5);
+
+ switch (format) {
+ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+ info_packet->sb[5] = (0x0 << 4);
+ break;
+
+ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+ case TIMING_3D_FORMAT_SBS_SW_PACKED:
+ info_packet->sb[5] = (0x8 << 4);
+ length = 6;
+ break;
+
+ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+ case TIMING_3D_FORMAT_TB_SW_PACKED:
+ info_packet->sb[5] = (0x6 << 4);
+ break;
+
+ default:
+ break;
+ }
+
+ if (hdmi_vic_mode)
+ info_packet->sb[5] = stream->timing.hdmi_vic;
+
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR;
+ info_packet->hb1 = 0x01;
+ info_packet->hb2 = (uint8_t) (length);
+
+ if (bALLM)
+ info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1);
+
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= length; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 05e2be856037..4e2f615c3566 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -80,18 +80,18 @@ struct abm_parameters {
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xE0},
- {0xff, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xE0},
- {0xff, 0x40, 0x20, 0x00, 0xff, 0x90, 0x68, 0x40, 0xE0},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0x90, 0xb3, 0x70, 0x70},
+ {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0},
+ {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf},
+ {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0},
+ {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
};
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0x99, 0x65, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
+ {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
};
static const struct abm_parameters * const abm_settings[] = {
@@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = {
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
- uint16_t flags; /* 0x00 U16 */
+ uint16_t min_abm_backlight; /* 0x00 U16 */
/* parameters for ABM2.0 algorithm */
uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
@@ -140,10 +140,10 @@ struct iram_table_v_2 {
/* For reading PSR State directly from IRAM */
uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
uint8_t dmcu_state; /* 0xf6 */
uint16_t blRampReduction; /* 0xf7 */
@@ -164,42 +164,43 @@ struct iram_table_v_2_2 {
uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
- uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
- uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
- uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
- uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
- uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
- uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
- uint8_t pad[21]; /* 0x6b U0.8 */
+ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
+ uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
+ uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
+ uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
+ uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
+ uint16_t min_abm_backlight; /* 0x6b U16 */
+ uint8_t pad[19]; /* 0x6d U0.8 */
/* parameters for crgb conversion */
- uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
- uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
- uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
+ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
/* parameters for custom curve */
/* thresholds for brightness --> backlight */
- uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
/* offsets for brightness --> backlight */
- uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
/* For reading PSR State directly from IRAM */
- uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
- uint8_t dmcu_state; /* 0xf6 */
-
- uint8_t dummy1; /* 0xf7 */
- uint8_t dummy2; /* 0xf8 */
- uint8_t dummy3; /* 0xf9 */
- uint8_t dummy4; /* 0xfa */
- uint8_t dummy5; /* 0xfb */
- uint8_t dummy6; /* 0xfc */
- uint8_t dummy7; /* 0xfd */
- uint8_t dummy8; /* 0xfe */
- uint8_t dummy9; /* 0xff */
+ uint8_t psr_state; /* 0xf0 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_state; /* 0xf6 */
+
+ uint8_t dummy1; /* 0xf7 */
+ uint8_t dummy2; /* 0xf8 */
+ uint8_t dummy3; /* 0xf9 */
+ uint8_t dummy4; /* 0xfa */
+ uint8_t dummy5; /* 0xfb */
+ uint8_t dummy6; /* 0xfc */
+ uint8_t dummy7; /* 0xfd */
+ uint8_t dummy8; /* 0xfe */
+ uint8_t dummy9; /* 0xff */
};
#pragma pack(pop)
@@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
{
unsigned int set = params.set;
- ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
ram_table->deviation_gain = 0xb3;
ram_table->blRampReduction =
@@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
ram_table->deviation_gain[0] = 0xb3;
ram_table->deviation_gain[1] = 0xa8;
ram_table->deviation_gain[2] = 0x98;
@@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
unsigned int set = params.set;
ram_table->flags = 0x0;
+
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index da5df00fedce..e54157026330 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -38,6 +38,7 @@ struct dmcu_iram_parameters {
unsigned int backlight_lut_array_size;
unsigned int backlight_ramping_reduction;
unsigned int backlight_ramping_start;
+ unsigned int min_abm_backlight;
unsigned int set;
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 8889aaceec60..dc7eb28f0296 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -143,6 +143,8 @@ enum PP_FEATURE_MASK {
enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
+ DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4,
+ DC_PSR_MASK = 0x8,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
index a761ba07f937..fce965984e76 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmBUS_CNTL 0x1508
#define mmCONFIG_CNTL 0x1509
#define mmCONFIG_MEMSIZE 0x150a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
index 8fbfd0261d27..39cc4880beb4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x0
#define BUS_CNTL__BIOS_ROM_DIS_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 809759f7bb81..8d05d6ca1c8d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmCC_BIF_BX_STRAP2 0x152A
#define mmBIF_MM_INDACCESS_CNTL 0x1500
#define mmBIF_DOORBELL_APER_EN 0x1501
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
index adc71b01f793..73435687d049 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x2
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
#define BIF_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
index be4249adb356..eddf83ec1c39 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
@@ -9859,6 +9859,8 @@
#define mmDP0_DP_STEER_FIFO_BASE_IDX 2
#define mmDP0_DP_MSA_MISC 0x210e
#define mmDP0_DP_MSA_MISC_BASE_IDX 2
+#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP0_DP_VID_TIMING 0x2110
#define mmDP0_DP_VID_TIMING_BASE_IDX 2
#define mmDP0_DP_VID_N 0x2111
@@ -10187,6 +10189,8 @@
#define mmDP1_DP_STEER_FIFO_BASE_IDX 2
#define mmDP1_DP_MSA_MISC 0x220e
#define mmDP1_DP_MSA_MISC_BASE_IDX 2
+#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP1_DP_VID_TIMING 0x2210
#define mmDP1_DP_VID_TIMING_BASE_IDX 2
#define mmDP1_DP_VID_N 0x2211
@@ -10515,6 +10519,8 @@
#define mmDP2_DP_STEER_FIFO_BASE_IDX 2
#define mmDP2_DP_MSA_MISC 0x230e
#define mmDP2_DP_MSA_MISC_BASE_IDX 2
+#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP2_DP_VID_TIMING 0x2310
#define mmDP2_DP_VID_TIMING_BASE_IDX 2
#define mmDP2_DP_VID_N 0x2311
@@ -10843,6 +10849,8 @@
#define mmDP3_DP_STEER_FIFO_BASE_IDX 2
#define mmDP3_DP_MSA_MISC 0x240e
#define mmDP3_DP_MSA_MISC_BASE_IDX 2
+#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP3_DP_VID_TIMING 0x2410
#define mmDP3_DP_VID_TIMING_BASE_IDX 2
#define mmDP3_DP_VID_N 0x2411
@@ -11171,6 +11179,8 @@
#define mmDP4_DP_STEER_FIFO_BASE_IDX 2
#define mmDP4_DP_MSA_MISC 0x250e
#define mmDP4_DP_MSA_MISC_BASE_IDX 2
+#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP4_DP_VID_TIMING 0x2510
#define mmDP4_DP_VID_TIMING_BASE_IDX 2
#define mmDP4_DP_VID_N 0x2511
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index ca16d9125fbc..2bfaaa8157d0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -1146,7 +1146,14 @@
#define mmATC_L2_MEM_POWER_LS_BASE_IDX 0
#define mmATC_L2_CGTT_CLK_CTRL 0x080c
#define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmATC_L2_CACHE_4K_EDC_INDEX 0x080e
+#define mmATC_L2_CACHE_4K_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_INDEX 0x080f
+#define mmATC_L2_CACHE_2M_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_4K_EDC_CNT 0x0810
+#define mmATC_L2_CACHE_4K_EDC_CNT_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_CNT 0x0811
+#define mmATC_L2_CACHE_2M_EDC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2pfdec
// base address: 0xa100
@@ -1206,7 +1213,14 @@
#define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
#define mmVM_L2_CGTT_CLK_CTRL 0x085e
#define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmVM_L2_MEM_ECC_INDEX 0x0860
+#define mmVM_L2_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_INDEX 0x0861
+#define mmVM_L2_WALKER_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_MEM_ECC_CNT 0x0862
+#define mmVM_L2_MEM_ECC_CNT_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_CNT 0x0863
+#define mmVM_L2_WALKER_MEM_ECC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2vcdec
// base address: 0xa200
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index 064c4bb1dc62..d4c613a85352 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -6661,7 +6661,6 @@
#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
// addressBlock: gc_utcl2_vml2pfdec
//VM_L2_CNTL
#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
@@ -6991,7 +6990,22 @@
#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
+//VM_L2_MEM_ECC_INDEX
+#define VM_L2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_WALKER_MEM_ECC_INDEX
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_MEM_ECC_CNT
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
+//VM_L2_WALKER_MEM_ECC_CNT
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
// addressBlock: gc_utcl2_vml2vcdec
//VM_CONTEXT0_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
index 4bcacf529852..991128bb9476 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
@@ -22,6 +22,9 @@
#ifndef _nbio_7_4_0_SMN_HEADER
#define _nbio_7_4_0_SMN_HEADER
+// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk
+// base address: 0x10100000
+#define smnBIFL_RAS_CENTRAL_STATUS 0x10139040
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
#define smnCPM_CONTROL 0x11180460
@@ -53,4 +56,13 @@
#define smnPCIE_RX_NUM_NAK 0x11180038
#define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c
+// addressBlock: nbio_iohub_nb_misc_misc_cfgdec
+// base address: 0x13a10000
+#define smnIOHC_INTERRUPT_EOI 0x13a10120
+
+// addressBlock: nbio_iohub_nb_rascfg_ras_cfgdec
+// base address: 0x13a20000
+#define smnRAS_GLOBAL_STATUS_LO 0x13a20020
+#define smnRAS_GLOBAL_STATUS_HI 0x13a20024
+
#endif // _nbio_7_4_0_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
index 994e796a28d7..ce5830ebe095 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
@@ -2793,8 +2793,8 @@
#define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2
#define mmBIF_FB_EN 0x00ff
#define mmBIF_FB_EN_BASE_IDX 2
-#define mmBIF_BUSY_DELAY_CNTR 0x0100
-#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2
+#define mmBIF_INTR_CNTL 0x0100
+#define mmBIF_INTR_CNTL_BASE_IDX 2
#define mmBIF_MST_TRANS_PENDING_VF 0x0109
#define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2
#define mmBIF_SLV_TRANS_PENDING_VF 0x010a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
index d467b939c971..07f04b2b5bdd 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
@@ -20420,9 +20420,9 @@
#define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
#define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
#define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
-//BIF_BUSY_DELAY_CNTR
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL
+//BIF_INTR_CNTL
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
//BIF_MST_TRANS_PENDING_VF
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
@@ -48436,4 +48436,47 @@
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
+//IOHC_INTERRUPT_EOI
+#define IOHC_INTERRUPT_EOI__SMI_EOI__SHIFT 0x0
+#define IOHC_INTERRUPT_EOI__SCI_EOI__SHIFT 0x1
+#define IOHC_INTERRUPT_EOI__NMI_EOI__SHIFT 0x2
+#define IOHC_INTERRUPT_EOI__SMI_EOI_MASK 0x00000001L
+#define IOHC_INTERRUPT_EOI__SCI_EOI_MASK 0x00000002L
+#define IOHC_INTERRUPT_EOI__NMI_EOI_MASK 0x00000004L
+
+//RAS_GLOBAL_STATUS_LO
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal__SHIFT 0x2
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr__SHIFT 0x3
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI__SHIFT 0x6
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI__SHIFT 0x7
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI__SHIFT 0x8
+#define RAS_GLOBAL_STATUS_LO__SW_SMI__SHIFT 0x9
+#define RAS_GLOBAL_STATUS_LO__SW_SCI__SHIFT 0xa
+#define RAS_GLOBAL_STATUS_LO__SW_NMI__SHIFT 0xb
+#define RAS_GLOBAL_STATUS_LO__APML_NMI__SHIFT 0xc
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld__SHIFT 0xd
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI__SHIFT 0xe
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private__SHIFT 0xf
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal_MASK 0x00000002L
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal_MASK 0x00000004L
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr_MASK 0x00000008L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI_MASK 0x00000040L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI_MASK 0x00000080L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI_MASK 0x00000100L
+#define RAS_GLOBAL_STATUS_LO__SW_SMI_MASK 0x00000200L
+#define RAS_GLOBAL_STATUS_LO__SW_SCI_MASK 0x00000400L
+#define RAS_GLOBAL_STATUS_LO__SW_NMI_MASK 0x00000800L
+#define RAS_GLOBAL_STATUS_LO__APML_NMI_MASK 0x00001000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_MASK 0x00002000L
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI_MASK 0x00004000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private_MASK 0x00008000L
+//RAS_GLOBAL_STATUS_HI
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr_MASK 0x00000002L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
index dc9895a684fe..096d878eb1de 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
@@ -588,11 +588,15 @@
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
index dbc2e723f659..71169daa701a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
index 6af9f0217b34..61a9a84e0c3a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index bd3685166779..351446754c72 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
index 627906674fe8..4bfd5f8ba66c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index f35aba72e640..21da61c398f5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -52,6 +52,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
index 481ee6560aa9..f64fe0fbcb32 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -220,6 +220,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
index d3876052562b..687d6843c258 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
@@ -121,6 +121,98 @@
#define mmCKSVII2C_IC_COMP_VERSION_BASE_IDX 0
#define mmCKSVII2C_IC_COMP_TYPE 0x006d
#define mmCKSVII2C_IC_COMP_TYPE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CON 0x0080
+#define mmCKSVII2C1_IC_CON_BASE_IDX 0
+#define mmCKSVII2C1_IC_TAR 0x0081
+#define mmCKSVII2C1_IC_TAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SAR 0x0082
+#define mmCKSVII2C1_IC_SAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_MADDR 0x0083
+#define mmCKSVII2C1_IC_HS_MADDR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DATA_CMD 0x0084
+#define mmCKSVII2C1_IC_DATA_CMD_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_HCNT 0x0085
+#define mmCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_LCNT 0x0086
+#define mmCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_HCNT 0x0087
+#define mmCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_LCNT 0x0088
+#define mmCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_HCNT 0x0089
+#define mmCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_LCNT 0x008a
+#define mmCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_STAT 0x008b
+#define mmCKSVII2C1_IC_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_MASK 0x008c
+#define mmCKSVII2C1_IC_INTR_MASK_BASE_IDX 0
+#define mmCKSVII2C1_IC_RAW_INTR_STAT 0x008d
+#define mmCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_RX_TL 0x008e
+#define mmCKSVII2C1_IC_RX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_TL 0x008f
+#define mmCKSVII2C1_IC_TX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_INTR 0x0090
+#define mmCKSVII2C1_IC_CLR_INTR_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_UNDER 0x0091
+#define mmCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_OVER 0x0092
+#define mmCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_OVER 0x0093
+#define mmCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RD_REQ 0x0094
+#define mmCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_ABRT 0x0095
+#define mmCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_DONE 0x0096
+#define mmCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_ACTIVITY 0x0097
+#define mmCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_STOP_DET 0x0098
+#define mmCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_START_DET 0x0099
+#define mmCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_GEN_CALL 0x009a
+#define mmCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE 0x009b
+#define mmCKSVII2C1_IC_ENABLE_BASE_IDX 0
+#define mmCKSVII2C1_IC_STATUS 0x009c
+#define mmCKSVII2C1_IC_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_TXFLR 0x009d
+#define mmCKSVII2C1_IC_TXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_RXFLR 0x009e
+#define mmCKSVII2C1_IC_RXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_HOLD 0x009f
+#define mmCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_CR 0x00a2
+#define mmCKSVII2C1_IC_DMA_CR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_TDLR 0x00a3
+#define mmCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_RDLR 0x00a4
+#define mmCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_SETUP 0x00a5
+#define mmCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE_STATUS 0x00a7
+#define mmCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SPKLEN 0x00a8
+#define mmCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SPKLEN 0x00a9
+#define mmCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RESTART_DET 0x00aa
+#define mmCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_PARAM_1 0x00ab
+#define mmCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_VERSION 0x00ac
+#define mmCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_TYPE 0x00ad
+#define mmCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0
#define mmSMUIO_MP_RESET_INTR 0x00c1
#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0
#define mmSMUIO_SOC_HALT 0x00c2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
index f8afa3518bf2..6905a9618127 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
@@ -268,6 +268,182 @@
//CKSVII2C_IC_COMP_TYPE
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL
+//CKSVII2C1_IC_CON
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C1_IC_TAR
+#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0
+#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa
+#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL
+#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L
+#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C1_IC_SAR
+#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0
+#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL
+//CKSVII2C1_IC_HS_MADDR
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L
+//CKSVII2C1_IC_DATA_CMD
+#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0
+#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8
+#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9
+#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa
+#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL
+#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L
+#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L
+#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L
+//CKSVII2C1_IC_SS_SCL_HCNT
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_SS_SCL_LCNT
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_HCNT
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_LCNT
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_HCNT
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_LCNT
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_INTR_STAT
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_INTR_MASK
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_RAW_INTR_STAT
+//CKSVII2C1_IC_RX_TL
+//CKSVII2C1_IC_TX_TL
+//CKSVII2C1_IC_CLR_INTR
+//CKSVII2C1_IC_CLR_RX_UNDER
+//CKSVII2C1_IC_CLR_RX_OVER
+//CKSVII2C1_IC_CLR_TX_OVER
+//CKSVII2C1_IC_CLR_RD_REQ
+//CKSVII2C1_IC_CLR_TX_ABRT
+//CKSVII2C1_IC_CLR_RX_DONE
+//CKSVII2C1_IC_CLR_ACTIVITY
+//CKSVII2C1_IC_CLR_STOP_DET
+//CKSVII2C1_IC_CLR_START_DET
+//CKSVII2C1_IC_CLR_GEN_CALL
+//CKSVII2C1_IC_ENABLE
+#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L
+//CKSVII2C1_IC_STATUS
+#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0
+#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1
+#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2
+#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3
+#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6
+#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L
+#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L
+#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L
+#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L
+#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L
+//CKSVII2C1_IC_TXFLR
+//CKSVII2C1_IC_RXFLR
+//CKSVII2C1_IC_SDA_HOLD
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD_MASK 0x00FFFFFFL
+//CKSVII2C1_IC_TX_ABRT_SOURCE
+//CKSVII2C1_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C1_IC_DMA_CR
+//CKSVII2C1_IC_DMA_TDLR
+//CKSVII2C1_IC_DMA_RDLR
+//CKSVII2C1_IC_SDA_SETUP
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL
+//CKSVII2C1_IC_ACK_GENERAL_CALL
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C1_IC_ENABLE_STATUS
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED__SHIFT 0x2
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED_MASK 0x00000002L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED_MASK 0x00000004L
//SMUIO_MP_RESET_INTR
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
index cf2149cc12ee..90350f46a0c4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
@@ -24,6 +24,18 @@
// addressBlock: uvd0_mmsch_dec
// base address: 0x1e000
+#define mmMMSCH_VF_VMID 0x000b
+#define mmMMSCH_VF_VMID_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
+#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
+#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_CTX_SIZE 0x000e
+#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_HOST 0x0012
+#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_RESP 0x0013
+#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
// addressBlock: uvd0_jpegnpdec
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index e88541d67aa0..dd7cbc00a0aa 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -492,12 +492,13 @@ struct atom_firmware_info_v3_1
/* Total 32bit cap indication */
enum atombios_firmware_capability
{
- ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
- ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
- ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
- ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
- ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
- ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
+ ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
+ ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
+ ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
+ ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
+ ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400,
};
enum atom_cooling_solution_id{
@@ -671,6 +672,20 @@ struct vram_usagebyfirmware_v2_1
uint16_t used_by_driver_in_kb;
};
+/* This is part of vram_usagebyfirmware_v2_1 */
+struct vram_reserve_block
+{
+ uint32_t start_address_in_kb;
+ uint16_t used_by_firmware_in_kb;
+ uint16_t used_by_driver_in_kb;
+};
+
+/* Definitions for constance */
+enum atomfirmware_internal_constants
+{
+ ONE_KiB = 0x400,
+ ONE_MiB = 0x100000,
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 5dcb776548d8..7ec4331e67f2 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -25,7 +25,6 @@
#define _DISCOVERY_H_
#define PSP_HEADER_SIZE 256
-#define BINARY_MAX_SIZE (64 << 10)
#define BINARY_SIGNATURE 0x28211407
#define DISCOVERY_TABLE_SIGNATURE 0x53445049
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
new file mode 100644
index 000000000000..79af4258f259
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_NBIF_7_4_H__
+#define __IRQSRCS_NBIF_7_4_H__
+
+#define NBIF_7_4__SRCID__CHIP_ERR_INT_EVENT 0x5E // Error generated
+#define NBIF_7_4__SRCID__DOORBELL_INTERRUPT 0x5F // Interrupt for doorbell event during VDDGFX off
+#define NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT 0x60 // Interrupt for ras_intr_valid from RAS controller
+#define NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT 0x61 // Interrupt for SDP ErrEvent received from ATHUB
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_VALID 0x87 // Valid message in PF->VF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_ACK 0x88 // Acknowledge message in PF->VF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_VALID 0x89 // Valid message in VF->PF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_ACK 0x8A // Acknowledge message in VF->PF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__CHIP_DPA_INT_EVENT 0xA0 // BIF_CHIP_DPA_INT_EVENT
+#define NBIF_7_4__SRCID__CHIP_SLOT_POWER_CHG_INT_EVENT 0xA1 // BIF_CHIP_SLOT_POWER_CHG_INT_EVENT
+#define NBIF_7_4__SRCID__ATOMIC_UR_OPCODE 0xCE // BIF receives unsupported atomic opcode from MC
+#define NBIF_7_4__SRCID__ATOMIC_REQESTEREN_LOW 0xCF // BIF receive atomic request from MC while AtomicOp Requester is not enabled in PCIE config space
+
+#endif // __IRQSRCS_NBIF_7_4_H__
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 98b9533e672b..2cd217e60125 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -291,15 +291,18 @@ struct kfd2kgd_calls {
uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset);
- bool (*get_atc_vmid_pasid_mapping_valid)(
+ bool (*get_atc_vmid_pasid_mapping_info)(
struct kgd_dev *kgd,
- uint8_t vmid);
- uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
- struct kgd_dev *kgd,
- uint8_t vmid);
+ uint8_t vmid,
+ uint16_t *p_pasid);
+ /* No longer needed from GFXv9 onward. The scratch base address is
+ * passed to the shader by the CP. It's the user mode driver's
+ * responsibility.
+ */
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
+
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 27cf0afaa0b4..a7f92d0b3a90 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -179,6 +179,11 @@ enum pp_mp1_state {
PP_MP1_STATE_RESET,
};
+enum pp_df_cstate {
+ DF_CSTATE_DISALLOW = 0,
+ DF_CSTATE_ALLOW,
+};
+
#define PP_GROUP_MASK 0xF0000000
#define PP_GROUP_SHIFT 28
@@ -215,6 +220,9 @@ enum pp_mp1_state {
((group) << PP_GROUP_SHIFT | (block) << PP_BLOCK_SHIFT | \
(support) << PP_STATE_SUPPORT_SHIFT | (state) << PP_STATE_SHIFT)
+#define XGMI_MODE_PSTATE_D3 0
+#define XGMI_MODE_PSTATE_D0 1
+
struct seq_file;
enum amd_pp_clock_type;
struct amd_pp_simple_clock_info;
@@ -312,6 +320,8 @@ struct amd_pm_funcs {
int (*get_ppfeature_status)(void *handle, char *buf);
int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
int (*asic_reset_mode_2)(void *handle);
+ int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
+ int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
};
#endif
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 094648cac392..07633e22e99a 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -169,6 +169,11 @@ static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DCN_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
@@ -1361,4 +1366,33 @@ static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x0240300
#define UVD0_BASE__INST6_SEG3 0
#define UVD0_BASE__INST6_SEG4 0
+#define DCN_BASE__INST0_SEG0 0x00000012
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define DCN_BASE__INST0_SEG2 0x000034C0
+#define DCN_BASE__INST0_SEG3 0
+#define DCN_BASE__INST0_SEG4 0
+
+#define DCN_BASE__INST1_SEG0 0
+#define DCN_BASE__INST1_SEG1 0
+#define DCN_BASE__INST1_SEG2 0
+#define DCN_BASE__INST1_SEG3 0
+#define DCN_BASE__INST1_SEG4 0
+
+#define DCN_BASE__INST2_SEG0 0
+#define DCN_BASE__INST2_SEG1 0
+#define DCN_BASE__INST2_SEG2 0
+#define DCN_BASE__INST2_SEG3 0
+#define DCN_BASE__INST2_SEG4 0
+
+#define DCN_BASE__INST3_SEG0 0
+#define DCN_BASE__INST3_SEG1 0
+#define DCN_BASE__INST3_SEG2 0
+#define DCN_BASE__INST3_SEG3 0
+#define DCN_BASE__INST3_SEG4 0
+
+#define DCN_BASE__INST4_SEG0 0
+#define DCN_BASE__INST4_SEG1 0
+#define DCN_BASE__INST4_SEG2 0
+#define DCN_BASE__INST4_SEG3 0
+#define DCN_BASE__INST4_SEG4 0
#endif
diff --git a/drivers/gpu/drm/amd/include/vega10_enum.h b/drivers/gpu/drm/amd/include/vega10_enum.h
index c14ba65a2415..adf1b754666e 100644
--- a/drivers/gpu/drm/amd/include/vega10_enum.h
+++ b/drivers/gpu/drm/amd/include/vega10_enum.h
@@ -1037,6 +1037,7 @@ TCC_CACHE_POLICY_STREAM = 0x00000001,
typedef enum MTYPE {
MTYPE_NC = 0x00000000,
MTYPE_WC = 0x00000001,
+MTYPE_RW = 0x00000001,
MTYPE_CC = 0x00000002,
MTYPE_UC = 0x00000003,
} MTYPE;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index fa8ad7db2b3a..7932eb163a00 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -969,6 +969,14 @@ static int pp_dpm_switch_power_profile(void *handle,
workload = hwmgr->workload_setting[index];
}
+ if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
+ hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
+ if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
+ mutex_unlock(&hwmgr->smu_lock);
+ return -EINVAL;
+ }
+ }
+
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
mutex_unlock(&hwmgr->smu_lock);
@@ -1421,6 +1429,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
{
struct pp_hwmgr *hwmgr = handle;
+ *cap = false;
if (!hwmgr)
return -EINVAL;
@@ -1548,6 +1557,40 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
return ret;
}
+static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1606,4 +1649,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_ppfeature_status = pp_set_ppfeature_status,
.asic_reset_mode_2 = pp_asic_reset_mode_2,
.smu_i2c_bus_access = pp_smu_i2c_bus_access,
+ .set_df_cstate = pp_set_df_cstate,
+ .set_xgmi_pstate = pp_set_xgmi_pstate,
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4acf139ea014..40b546c75fc2 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -25,11 +25,16 @@
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu_v12_0.h"
#include "atom.h"
#include "amd_pcie.h"
+#include "vega20_ppt.h"
+#include "arcturus_ppt.h"
+#include "navi10_ppt.h"
+#include "renoir_ppt.h"
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) #type
@@ -67,6 +72,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
+ mutex_lock(&smu->mutex);
+
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
goto failed;
@@ -92,9 +99,57 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
}
failed:
+ mutex_unlock(&smu->mutex);
+
return size;
}
+static int smu_feature_update_enable_state(struct smu_context *smu,
+ uint64_t feature_mask,
+ bool enabled)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ feature_low = (feature_mask >> 0 ) & 0xffffffff;
+ feature_high = (feature_mask >> 32) & 0xffffffff;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&feature->mutex);
+ if (enabled)
+ bitmap_or(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ else
+ bitmap_andnot(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
{
int ret = 0;
@@ -103,9 +158,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
+ mutex_lock(&smu->mutex);
+
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
- return ret;
+ goto out;
feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
@@ -115,14 +172,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
if (feature_2_enabled) {
ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
if (ret)
- return ret;
+ goto out;
}
if (feature_2_disabled) {
ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
if (ret)
- return ret;
+ goto out;
}
+out:
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -159,8 +219,7 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max)
{
- int ret = 0, clk_id = 0;
- uint32_t param;
+ int ret = 0;
if (min <= 0 && max <= 0)
return -EINVAL;
@@ -168,27 +227,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
- clk_id = smu_clk_get_index(smu, clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
- if (ret)
- return ret;
- }
-
-
+ ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
return ret;
}
@@ -229,7 +268,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
}
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max)
+ uint32_t *min, uint32_t *max, bool lock_needed)
{
uint32_t clock_limit;
int ret = 0;
@@ -237,6 +276,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!min && !max)
return -EINVAL;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
@@ -260,14 +302,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
-
- return 0;
+ } else {
+ /*
+ * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
+ * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
+ */
+ ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
}
- /*
- * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
- * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
- */
- ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -338,7 +383,20 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
return true;
}
-
+/**
+ * smu_dpm_set_power_gate - power gate/ungate the specific IP block
+ *
+ * @smu: smu_context pointer
+ * @block_type: the IP block to power gate/ungate
+ * @gate: to power gate if true, ungate otherwise
+ *
+ * This API uses no smu->mutex lock protection due to:
+ * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
+ * This is guarded to be race condition free by the caller.
+ * 2. Or get called on user setting request of power_dpm_force_performance_level.
+ * Under this case, the smu->mutex lock protection is already enforced on
+ * the parent API smu_force_performance_level of the call path.
+ */
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -364,12 +422,6 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
return ret;
}
-enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
-{
- /* not support power state */
- return POWER_STATE_TYPE_DEFAULT;
-}
-
int smu_get_power_num_states(struct smu_context *smu,
struct pp_states_info *state_info)
{
@@ -439,7 +491,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
int ret = 0;
int table_id = smu_table_get_index(smu, table_index);
- if (!table_data || table_id >= smu_table->table_count || table_id < 0)
+ if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table = &smu_table->tables[table_index];
@@ -463,7 +515,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
return ret;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
if (!drv2smu)
memcpy(table_data, table->cpu_addr, table->size);
@@ -483,7 +535,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
{
- if (amdgpu_dpm != 1)
+ if (!is_support_sw_smu(adev))
return false;
if (adev->asic_type == CHIP_VEGA20)
@@ -495,16 +547,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t powerplay_table_size;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
+ mutex_lock(&smu->mutex);
+
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
else
*table = smu_table->power_play_table;
- return smu_table->power_play_table_size;
+ powerplay_table_size = smu_table->power_play_table_size;
+
+ mutex_unlock(&smu->mutex);
+
+ return powerplay_table_size;
}
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
@@ -531,13 +590,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
memcpy(smu_table->hardcode_pptable, buf, size);
smu_table->power_play_table = smu_table->hardcode_pptable;
smu_table->power_play_table_size = size;
- mutex_unlock(&smu->mutex);
+
+ /*
+ * Special hw_fini action(for Navi1x, the DPMs disablement will be
+ * skipped) may be needed for custom pptable uploading.
+ */
+ smu->uploading_custom_pp_table = true;
ret = smu_reset(smu);
if (ret)
pr_info("smu reset failed, ret = %d\n", ret);
- return ret;
+ smu->uploading_custom_pp_table = false;
failed:
mutex_unlock(&smu->mutex);
@@ -569,41 +633,7 @@ int smu_feature_init_dpm(struct smu_context *smu)
return ret;
}
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled)
-{
- uint32_t feature_low = 0, feature_high = 0;
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
- feature_low = (feature_mask >> 0 ) & 0xffffffff;
- feature_high = (feature_mask >> 32) & 0xffffffff;
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- }
-
- return ret;
-}
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
@@ -633,8 +663,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
- uint64_t feature_mask = 0;
- int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
@@ -642,22 +670,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
WARN_ON(feature_id > feature->feature_num);
- feature_mask = 1ULL << feature_id;
-
- mutex_lock(&feature->mutex);
- ret = smu_feature_update_enable_state(smu, feature_mask, enable);
- if (ret)
- goto failed;
-
- if (enable)
- test_and_set_bit(feature_id, feature->enabled);
- else
- test_and_clear_bit(feature_id, feature->enabled);
-
-failed:
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return smu_feature_update_enable_state(smu,
+ 1ULL << feature_id,
+ enable);
}
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
@@ -707,20 +722,27 @@ static int smu_set_funcs(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
+ if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+ smu->od_enabled = true;
+
switch (adev->asic_type) {
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ vega20_set_ppt_funcs(smu);
+ break;
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ navi10_set_ppt_funcs(smu);
+ break;
case CHIP_ARCTURUS:
- if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
- smu->od_enabled = true;
- smu_v11_0_set_smu_funcs(smu);
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ arcturus_set_ppt_funcs(smu);
+ /* OD is not supported on Arcturus */
+ smu->od_enabled =false;
break;
case CHIP_RENOIR:
- if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
- smu->od_enabled = true;
- smu_v12_0_set_smu_funcs(smu);
+ renoir_set_ppt_funcs(smu);
break;
default:
return -EINVAL;
@@ -736,6 +758,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
+ smu->is_apu = false;
mutex_init(&smu->mutex);
return smu_set_funcs(adev);
@@ -749,11 +772,10 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
- mutex_lock(&smu->mutex);
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level,
- AMD_PP_TASK_COMPLETE_INIT);
- mutex_unlock(&smu->mutex);
+ AMD_PP_TASK_COMPLETE_INIT,
+ false);
return 0;
}
@@ -919,14 +941,9 @@ static int smu_init_fb_allocations(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
- uint32_t i = 0;
- int32_t ret = 0;
+ int ret, i;
- if (table_count <= 0)
- return -EINVAL;
-
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
ret = amdgpu_bo_create_kernel(adev,
@@ -942,7 +959,7 @@ static int smu_init_fb_allocations(struct smu_context *smu)
return 0;
failed:
- for (; i > 0; i--) {
+ while (--i >= 0) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -957,13 +974,12 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
uint32_t i = 0;
- if (table_count == 0 || tables == NULL)
+ if (!tables)
return 0;
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -974,50 +990,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
-static int smu_override_pcie_parameters(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
- int ret;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
- pcie_gen = 3;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- pcie_gen = 2;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- pcie_gen = 1;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
- pcie_gen = 0;
-
- /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
- * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
- * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
- */
- if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
- pcie_width = 6;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
- pcie_width = 5;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
- pcie_width = 4;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
- pcie_width = 3;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
- pcie_width = 2;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
- pcie_width = 1;
-
- smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
- if (ret)
- pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
- return ret;
-}
-
static int smu_smc_table_hw_init(struct smu_context *smu,
bool initialize)
{
@@ -1092,8 +1064,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- /* issue RunAfllBtc msg */
- ret = smu_run_afll_btc(smu);
+ /* issue Run*Btc msg */
+ ret = smu_run_btc(smu);
if (ret)
return ret;
@@ -1106,10 +1078,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
if (adev->asic_type != CHIP_ARCTURUS) {
- ret = smu_override_pcie_parameters(smu);
- if (ret)
- return ret;
-
ret = smu_notify_display_change(smu);
if (ret)
return ret;
@@ -1138,6 +1106,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
+ }
+
ret = smu_set_default_od_settings(smu, initialize);
if (ret)
return ret;
@@ -1147,7 +1121,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_get_power_limit(smu, &smu->default_power_limit, true);
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
if (ret)
return ret;
}
@@ -1226,29 +1200,46 @@ static int smu_free_memory_pool(struct smu_context *smu)
return ret;
}
-static int smu_hw_init(void *handle)
+static int smu_start_smc_engine(struct smu_context *smu)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (adev->asic_type < CHIP_NAVI10) {
- ret = smu_load_microcode(smu);
- if (ret)
- return ret;
+ if (smu->ppt_funcs->load_microcode) {
+ ret = smu->ppt_funcs->load_microcode(smu);
+ if (ret)
+ return ret;
+ }
}
}
- ret = smu_check_fw_status(smu);
+ if (smu->ppt_funcs->check_fw_status) {
+ ret = smu->ppt_funcs->check_fw_status(smu);
+ if (ret)
+ pr_err("SMC is not ready\n");
+ }
+
+ return ret;
+}
+
+static int smu_hw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ ret = smu_start_smc_engine(smu);
if (ret) {
- pr_err("SMC firmware status is not correct\n");
+ pr_err("SMU is not ready yet!\n");
return ret;
}
if (adev->flags & AMD_IS_APU) {
smu_powergate_sdma(&adev->smu, false);
smu_powergate_vcn(&adev->smu, false);
+ smu_set_gfx_cgpg(&adev->smu, true);
}
if (!smu->pm_enabled)
@@ -1291,6 +1282,11 @@ failed:
return ret;
}
+static int smu_stop_dpms(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1303,6 +1299,33 @@ static int smu_hw_fini(void *handle)
smu_powergate_vcn(&adev->smu, true);
}
+ ret = smu_stop_thermal_control(smu);
+ if (ret) {
+ pr_warn("Fail to stop thermal control!\n");
+ return ret;
+ }
+
+ /*
+ * For custom pptable uploading, skip the DPM features
+ * disable process on Navi1x ASICs.
+ * - As the gfx related features are under control of
+ * RLC on those ASICs. RLC reinitialization will be
+ * needed to reenable them. That will cost much more
+ * efforts.
+ *
+ * - SMU firmware can handle the DPM reenablement
+ * properly.
+ */
+ if (!smu->uploading_custom_pp_table ||
+ !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+ ret = smu_stop_dpms(smu);
+ if (ret) {
+ pr_warn("Fail to stop Dpms!\n");
+ return ret;
+ }
+ }
+
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
@@ -1344,7 +1367,10 @@ static int smu_suspend(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
+ bool baco_feature_is_enabled = false;
+
+ if(!(adev->flags & AMD_IS_APU))
+ baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
if (ret)
@@ -1363,6 +1389,8 @@ static int smu_suspend(void *handle)
if (adev->asic_type >= CHIP_NAVI10 &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, false);
return 0;
}
@@ -1375,7 +1403,11 @@ static int smu_resume(void *handle)
pr_info("SMU is resuming...\n");
- mutex_lock(&smu->mutex);
+ ret = smu_start_smc_engine(smu);
+ if (ret) {
+ pr_err("SMU is not ready yet!\n");
+ goto failed;
+ }
ret = smu_smc_table_hw_init(smu, false);
if (ret)
@@ -1385,13 +1417,16 @@ static int smu_resume(void *handle)
if (ret)
goto failed;
- mutex_unlock(&smu->mutex);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, true);
+
+ smu->disable_uclk_switch = 0;
pr_info("SMU is resumed successfully!\n");
return 0;
+
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1409,8 +1444,9 @@ int smu_display_configuration_change(struct smu_context *smu,
mutex_lock(&smu->mutex);
- smu_set_deep_sleep_dcefclk(smu,
- display_config->min_dcef_deep_sleep_set_clk / 100);
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
+ display_config->min_dcef_deep_sleep_set_clk / 100);
for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0)
@@ -1529,7 +1565,8 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
+
+ if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1587,9 +1624,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d
&soc_mask);
if (ret)
return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1653,7 +1690,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
workload = smu->workload_setting[index];
if (smu->power_profile_mode != workload)
- smu_set_power_profile_mode(smu, &workload, 0);
+ smu_set_power_profile_mode(smu, &workload, 0, false);
}
return ret;
@@ -1661,18 +1698,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id)
+ enum amd_pp_task task_id,
+ bool lock_needed)
{
int ret = 0;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_set_cpu_power_state(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -1683,6 +1724,10 @@ int smu_handle_task(struct smu_context *smu,
break;
}
+out:
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -1715,7 +1760,7 @@ int smu_switch_power_profile(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- smu_set_power_profile_mode(smu, &workload, 0);
+ smu_set_power_profile_mode(smu, &workload, 0, false);
mutex_unlock(&smu->mutex);
@@ -1727,7 +1772,7 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
enum amd_dpm_forced_level level;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
mutex_lock(&(smu->mutex));
@@ -1742,15 +1787,22 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int ret = 0;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
+ mutex_lock(&smu->mutex);
+
ret = smu_enable_umd_pstate(smu, &level);
- if (ret)
+ if (ret) {
+ mutex_unlock(&smu->mutex);
return ret;
+ }
ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+
+ mutex_unlock(&smu->mutex);
return ret;
}
@@ -1766,6 +1818,144 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count)
return ret;
}
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_debug("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+ ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_SHUTDOWN:
+ msg = SMU_MSG_PrepareMp1ForShutdown;
+ break;
+ case PP_MP1_STATE_UNLOAD:
+ msg = SMU_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_RESET:
+ msg = SMU_MSG_PrepareMp1ForReset;
+ break;
+ case PP_MP1_STATE_NONE:
+ default:
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ /* some asics may not support those messages */
+ if (smu_msg_get_index(smu, msg) < 0) {
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ ret = smu_send_smc_msg(smu, msg);
+ if (ret)
+ pr_err("[PrepareMp1] Failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ int ret = 0;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu->ppt_funcs->set_df_cstate(smu, state);
+ if (ret)
+ pr_err("[SetDfCstate] failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_write_watermarks_table(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = NULL;
+
+ table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+
+ if (!table->cpu_addr)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+ true);
+
+ return ret;
+}
+
+int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int ret = 0;
+ struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
+ void *table = watermarks->cpu_addr;
+
+ mutex_lock(&smu->mutex);
+
+ if (!smu->disable_watermark &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_set_watermarks_table(smu, table, clock_ranges);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
@@ -1802,3 +1992,559 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
.rev = 0,
.funcs = &smu_ip_funcs,
};
+
+int smu_load_microcode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->load_microcode)
+ ret = smu->ppt_funcs->load_microcode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_check_fw_status(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->check_fw_status)
+ ret = smu->ppt_funcs->check_fw_status(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_gfx_cgpg)
+ ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_rpm)
+ ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_limit)
+ ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_limit)
+ ret = smu->ppt_funcs->set_power_limit(smu, limit);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->print_clk_levels)
+ ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_od_percentage)
+ ret = smu->ppt_funcs->get_od_percentage(smu, type);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_od_percentage)
+ ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->od_edit_dpm_table)
+ ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->read_sensor)
+ ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_profile_mode)
+ ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_profile_mode)
+ ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_get_fan_control_mode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_control_mode)
+ ret = smu->ppt_funcs->get_fan_control_mode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_control_mode(struct smu_context *smu, int value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_control_mode)
+ ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_percent)
+ ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_percent)
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_rpm)
+ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_active_display_count)
+ ret = smu->ppt_funcs->set_active_display_count(smu, count);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type)
+ ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_high_clocks)
+ ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_latency)
+ ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_voltage)
+ ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_clock_voltage_request)
+ ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
+{
+ int ret = -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_disable_memory_clock_switch)
+ ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_notify_smu_enable_pwe(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->notify_smu_enable_pwe)
+ ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_xgmi_pstate)
+ ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_azalia_d3_pme(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_azalia_d3_pme)
+ ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+bool smu_baco_is_support(struct smu_context *smu)
+{
+ bool ret = false;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_is_support)
+ ret = smu->ppt_funcs->baco_is_support(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
+{
+ if (smu->ppt_funcs->baco_get_state)
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+ *state = smu->ppt_funcs->baco_get_state(smu);
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+int smu_baco_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_reset)
+ ret = smu->ppt_funcs->baco_reset(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_mode2_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->mode2_reset)
+ ret = smu->ppt_funcs->mode2_reset(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
+ ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_uclk_dpm_states)
+ ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_current_power_state)
+ pm_state = smu->ppt_funcs->get_current_power_state(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return pm_state;
+}
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_dpm_clock_table)
+ ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
+{
+ uint32_t ret = 0;
+
+ if (smu->ppt_funcs->get_pptable_power_limit)
+ ret = smu->ppt_funcs->get_pptable_power_limit(smu);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index d493a3f8c07a..58c7c4a3053e 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -36,6 +37,12 @@
#include "smu_v11_0_pptable.h"
#include "arcturus_ppsmc.h"
#include "nbio/nbio_7_4_sh_mask.h"
+#include "amdgpu_xgmi.h"
+#include <linux/i2c.h>
+#include <linux/pci.h>
+#include "amdgpu_ras.h"
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
@@ -112,8 +119,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown),
MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc),
- MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc),
- MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize),
@@ -172,6 +178,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(SMU_METRICS),
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(OVERDRIVE),
+ TAB_MAP(I2C_COMMANDS),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -294,6 +301,9 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
@@ -528,9 +538,17 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int arcturus_run_btc_afll(struct smu_context *smu)
+static int arcturus_run_btc(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ if (ret) {
+ pr_err("RunAfllBtc failed!\n");
+ return ret;
+ }
+
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -610,12 +628,17 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
return ret;
}
+ /*
+ * For DPM disabled case, there will be only one clock level.
+ * And it's safe to assume that is always the current clock.
+ */
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n", i,
clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_MCLK:
@@ -635,9 +658,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_SOCCLK:
@@ -657,9 +681,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_FCLK:
@@ -679,9 +704,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < single_dpm_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
default:
@@ -756,8 +782,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
uint32_t soft_min_level, soft_max_level;
int ret = 0;
- mutex_lock(&(smu->mutex));
-
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -792,91 +816,19 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
break;
case SMU_MCLK:
- single_dpm_table = &(dpm_table->mem_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
- break;
-
case SMU_SOCCLK:
- single_dpm_table = &(dpm_table->soc_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
- break;
-
case SMU_FCLK:
- single_dpm_table = &(dpm_table->fclk_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
+ /*
+ * Should not arrive here since Arcturus does not
+ * support mclk/socclk/fclk softmin/softmax settings
+ */
+ ret = -EINVAL;
break;
default:
break;
}
- mutex_unlock(&(smu->mutex));
return ret;
}
@@ -1043,7 +995,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
mutex_unlock(&smu->sensor_lock);
@@ -1186,6 +1138,7 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
{
struct arcturus_dpm_table *dpm_table =
(struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
uint32_t soft_level;
int ret = 0;
@@ -1199,40 +1152,27 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
dpm_table->gfx_table.dpm_state.soft_max_level =
dpm_table->gfx_table.dpm_levels[soft_level].value;
- /* uclk */
- if (highest)
- soft_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
- else
- soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
-
- dpm_table->mem_table.dpm_state.soft_min_level =
- dpm_table->mem_table.dpm_state.soft_max_level =
- dpm_table->mem_table.dpm_levels[soft_level].value;
-
- /* socclk */
- if (highest)
- soft_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
- else
- soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
-
- dpm_table->soc_table.dpm_state.soft_min_level =
- dpm_table->soc_table.dpm_state.soft_max_level =
- dpm_table->soc_table.dpm_levels[soft_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload boot level to %s!\n",
highest ? "highest" : "lowest");
return ret;
}
- ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload dpm max level to %s!\n!",
highest ? "highest" : "lowest");
return ret;
}
+ if (hive)
+ /*
+ * Force XGMI Pstate to highest or lowest
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, highest ? 1 : 0);
+
return ret;
}
@@ -1240,6 +1180,7 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu)
{
struct arcturus_dpm_table *dpm_table =
(struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
uint32_t soft_min_level, soft_max_level;
int ret = 0;
@@ -1251,34 +1192,25 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu)
dpm_table->gfx_table.dpm_state.soft_max_level =
dpm_table->gfx_table.dpm_levels[soft_max_level].value;
- /* uclk */
- soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
- soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
- dpm_table->mem_table.dpm_state.soft_min_level =
- dpm_table->gfx_table.dpm_levels[soft_min_level].value;
- dpm_table->mem_table.dpm_state.soft_max_level =
- dpm_table->gfx_table.dpm_levels[soft_max_level].value;
-
- /* socclk */
- soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
- soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
- dpm_table->soc_table.dpm_state.soft_min_level =
- dpm_table->soc_table.dpm_levels[soft_min_level].value;
- dpm_table->soc_table.dpm_state.soft_max_level =
- dpm_table->soc_table.dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload DPM Bootup Levels!");
return ret;
}
- ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload DPM Max Levels!");
return ret;
}
+ if (hive)
+ /*
+ * Reset XGMI Pstate back to default
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, 0);
+
return ret;
}
@@ -1329,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu,
static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1360,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
@@ -1891,6 +1816,260 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static int arcturus_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+ if (ret) {
+ pr_err("[EnableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+ if (ret) {
+ pr_err("[DisableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+
+static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write,
+ uint8_t address, uint32_t numbytes,
+ uint8_t *data)
+{
+ int i;
+
+ BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+ req->I2CcontrollerPort = 0;
+ req->I2CSpeed = 2;
+ req->SlaveAddress = address;
+ req->NumCmds = numbytes;
+
+ for (i = 0; i < numbytes; i++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
+
+ /* First 2 bytes are always write for lower 2b EEPROM address */
+ if (i < 2)
+ cmd->Cmd = 1;
+ else
+ cmd->Cmd = write;
+
+
+ /* Add RESTART for read after address filled */
+ cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+ /* Add STOP in the end */
+ cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+ /* Fill with data regardless if read or write to simplify code */
+ cmd->RegisterAddr = data[i];
+ }
+}
+
+static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t i, ret = 0;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ /* Now read data starting with that address */
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+ true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+ /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
+ for (i = 0; i < numbytes; i++)
+ data[i] = res->SwI2cCmds[i].Data;
+
+ pr_debug("arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ } else
+ pr_err("arcturus_i2c_eeprom_read_data - error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ pr_debug("arcturus_i2c_write(), address = %x, bytes = %d , data: ",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ /*
+ * According to EEPROM spec there is a MAX of 10 ms required for
+ * EEPROM to flush internal RX buffer after STOP was issued at the
+ * end of write transaction. During this time the EEPROM will not be
+ * responsive to any more commands - so wait a bit more.
+ */
+ msleep(10);
+
+ } else
+ pr_err("arcturus_i2c_write- error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+ uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+ for (i = 0; i < num; i++) {
+ /*
+ * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+ * once and hence the data needs to be spliced into chunks and sent each
+ * chunk separately
+ */
+ data_size = msgs[i].len - 2;
+ data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+ next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+ data_ptr = msgs[i].buf + 2;
+
+ for (j = 0; j < data_size / data_chunk_size; j++) {
+ /* Insert the EEPROM dest addess, bits 0-15 */
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+
+ memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+ } else {
+
+ memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+
+ next_eeprom_addr += data_chunk_size;
+ data_ptr += data_chunk_size;
+ }
+
+ if (data_size % data_chunk_size) {
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+
+ memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+ } else {
+ memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ return num;
+}
+
+static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
+ .master_xfer = arcturus_i2c_eeprom_i2c_xfer,
+ .functionality = arcturus_i2c_eeprom_i2c_func,
+};
+
+static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res;
+
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &arcturus_i2c_eeprom_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+
+ res = i2c_add_adapter(control);
+ if (res)
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+ return res;
+}
+
+static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
+{
+ i2c_del_adapter(control);
+}
+
+static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -1909,7 +2088,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
/* btc */
- .run_afll_btc = arcturus_run_btc_afll,
+ .run_btc = arcturus_run_btc,
/* dpm/clk tables */
.set_default_dpm_table = arcturus_set_default_dpm_table,
.populate_umd_state_clk = arcturus_populate_umd_state_clk,
@@ -1929,12 +2108,62 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
.is_dpm_running = arcturus_is_dpm_running,
+ .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
+ .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
+ .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .get_pptable_power_limit = arcturus_get_pptable_power_limit,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &arcturus_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index cc63705920dc..2773966ae434 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -36,7 +36,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
pp_overdriver.o smu_helper.o \
vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \
- vega12_baco.o smu9_baco.o
+ vega12_baco.o smu9_baco.o tonga_baco.o polaris_baco.o fiji_baco.o \
+ ci_baco.o smu7_baco.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
new file mode 100644
index 000000000000..3be40114e63d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "ci_baco.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
+};
+
+int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
new file mode 100644
index 000000000000..17041f187020
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_BACO_H__
+#define __CI_BACO_H__
+#include "smu7_baco.h"
+
+extern int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
index 9c57c1f67749..1c73776bd606 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
@@ -79,6 +79,25 @@ static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 m
return ret;
}
+bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size)
+{
+ u32 i, reg = 0;
+
+ for (i = 0; i < array_size; i++) {
+ if ((entry[i].cmd == CMD_WRITE) ||
+ (entry[i].cmd == CMD_READMODIFYWRITE) ||
+ (entry[i].cmd == CMD_WAITFOR))
+ reg = entry[i].reg_offset;
+ if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask,
+ entry[i].shift, entry[i].val, entry[i].timeout))
+ return false;
+ }
+
+ return true;
+}
+
bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
index 95296c916f4e..8393eb62706d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
@@ -33,6 +33,15 @@ enum baco_cmd_type {
CMD_DELAY_US,
};
+struct baco_cmd_entry {
+ enum baco_cmd_type cmd;
+ uint32_t reg_offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t timeout;
+ uint32_t val;
+};
+
struct soc15_baco_cmd_entry {
enum baco_cmd_type cmd;
uint32_t hwip;
@@ -44,6 +53,10 @@ struct soc15_baco_cmd_entry {
uint32_t timeout;
uint32_t val;
};
+
+extern bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size);
extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
new file mode 100644
index 000000000000..c0368f2dfb21
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "fiji_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_3, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_4, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_5, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_8, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_9, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_10, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_11, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_12, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_13, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_14, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_15, 0, 0, 0, 0 }
+};
+
+int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
new file mode 100644
index 000000000000..47f402900bdb
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __FIJI_BACO_H__
+#define __FIJI_BACO_H__
+#include "smu7_baco.h"
+
+extern int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index a24beaa4fb01..d2909c91d65b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev;
+
if (!hwmgr)
return -EINVAL;
@@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr_init_workload_prority(hwmgr);
hwmgr->gfxoff_state_changed_by_workload = false;
+ adev = hwmgr->adev;
+
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &ci_smu_funcs;
ci_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
@@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_CZ:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->od_enabled = false;
hwmgr->smumgr_funcs = &smu8_smu_funcs;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
smu8_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
@@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
case AMDGPU_FAMILY_AI:
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
@@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
vega12_hwmgr_init(hwmgr);
break;
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega20_smu_funcs;
vega20_hwmgr_init(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
new file mode 100644
index 000000000000..8f8e296f2fe9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "polaris_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x40, 0x6, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x8, 0x3, 0, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x3fff00, 0x8, 0, 0x32 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_FUNC_CNTL_2, MPLL_FUNC_CNTL_2__ISO_DIS_P_MASK, MPLL_FUNC_CNTL_2__ISO_DIS_P__SHIFT, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl_vg[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl_vg[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ if (hwmgr->chip_id == CHIP_VEGAM) {
+ baco_program_registers(hwmgr, use_bclk_tbl_vg, ARRAY_SIZE(use_bclk_tbl_vg));
+ baco_program_registers(hwmgr, turn_off_plls_tbl_vg,
+ ARRAY_SIZE(turn_off_plls_tbl_vg));
+ } else {
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ }
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
new file mode 100644
index 000000000000..87a5fa0a157a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __POLARIS_BACO_H__
+#define __POLARIS_BACO_H__
+#include "smu7_baco.h"
+
+extern int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
new file mode 100644
index 000000000000..044cda005aed
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smu7_baco.h"
+#include "tonga_baco.h"
+#include "fiji_baco.h"
+#include "polaris_baco.h"
+#include "ci_baco.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ *cap = false;
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
+ return 0;
+
+ reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
+
+ if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
+ *cap = true;
+
+ return 0;
+}
+
+int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ reg = RREG32(mmBACO_CNTL);
+
+ if (reg & BACO_CNTL__BACO_MODE_MASK)
+ /* gfx has already entered BACO state */
+ *state = BACO_STATE_IN;
+ else
+ *state = BACO_STATE_OUT;
+ return 0;
+}
+
+int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ return tonga_baco_set_state(hwmgr, state);
+ case CHIP_FIJI:
+ return fiji_baco_set_state(hwmgr, state);
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ return polaris_baco_set_state(hwmgr, state);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ return ci_baco_set_state(hwmgr, state);
+#endif
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
new file mode 100644
index 000000000000..be0d98abb536
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU7_BACO_H__
+#define __SMU7_BACO_H__
+#include "hwmgr.h"
+#include "common_baco.h"
+
+extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
+extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 203ce4b1028f..f73dff68e799 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,7 @@
#include "smu7_clockpowergating.h"
#include "processpptables.h"
#include "pp_thermal.h"
+#include "smu7_baco.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -1994,7 +1995,6 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
table_size = lookup_table->count;
PP_ASSERT_WITH_CODE(0 != lookup_table->count,
@@ -2005,9 +2005,8 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -3983,6 +3982,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
"Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if (hwmgr->hardcode_pp_table != NULL)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
tmp_result = smu7_update_avfs(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update avfs voltages!",
@@ -5158,6 +5164,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
+ .get_asic_baco_capability = smu7_baco_get_capability,
+ .get_asic_baco_state = smu7_baco_get_state,
+ .set_asic_baco_state = smu7_baco_set_state,
.power_off_asic = smu7_power_off_asic,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
new file mode 100644
index 000000000000..ea743bea8e29
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "tonga_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry gpio_tbl_iceland[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }
+};
+
+static const struct baco_cmd_entry exit_baco_tbl_iceland[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl_iceland[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ if (hwmgr->chip_id == CHIP_TOPAZ)
+ baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland));
+ else
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (hwmgr->chip_id == CHIP_TOPAZ) {
+ if (baco_program_registers(hwmgr, exit_baco_tbl_iceland,
+ ARRAY_SIZE(exit_baco_tbl_iceland))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl_iceland,
+ ARRAY_SIZE(clean_baco_tbl_iceland)))
+ return 0;
+ }
+ } else {
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
new file mode 100644
index 000000000000..5dc16cc8a295
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __TONGA_BACO_H__
+#define __TONGA_BACO_H__
+#include "smu7_baco.h"
+
+extern int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index beacfffbdc3e..d71a492c87a3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -712,7 +712,6 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
"Lookup table is empty", return -EINVAL);
@@ -724,9 +723,8 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -3691,6 +3689,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if(hwmgr->hardcode_pp_table != NULL)
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
vega10_update_avfs(hwmgr);
/*
@@ -5265,6 +5270,59 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
return 0;
}
+static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t feature_mask = 0;
+
+ if (disable) {
+ feature_mask |= data->smu_features[GNLD_ULV].enabled ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ } else {
+ feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ }
+
+ if (feature_mask)
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
+ !disable, feature_mask),
+ "enable/disable power features for compute performance Failed!",
+ return -EINVAL);
+
+ if (disable) {
+ data->smu_features[GNLD_ULV].enabled = false;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = false;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = false;
+ data->smu_features[GNLD_DS_LCLK].enabled = false;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
+ } else {
+ data->smu_features[GNLD_ULV].enabled = true;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = true;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = true;
+ data->smu_features[GNLD_DS_LCLK].enabled = true;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
+ }
+
+ return 0;
+
+}
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
@@ -5332,6 +5390,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.get_ppfeature_status = vega10_get_ppfeature_status,
.set_ppfeature_status = vega10_set_ppfeature_status,
.set_mp1_state = vega10_set_mp1_state,
+ .disable_power_features_for_compute_performance =
+ vega10_disable_power_features_for_compute_performance,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index df6ff9252401..9b5e72bdceca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -29,7 +29,7 @@
#include "vega20_baco.h"
#include "vega20_smumgr.h"
-
+#include "amdgpu_ras.h"
static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
{
@@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum BACO_STATE cur_state;
uint32_t data;
@@ -84,13 +85,19 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
if (state == BACO_STATE_IN) {
- data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
- data |= 0x80000000;
- WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
-
-
- if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
- return -EINVAL;
+ if (!ras || !ras->supported) {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+ } else {
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 1))
+ return -EINVAL;
+ }
} else if (state == BACO_STATE_OUT) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f5915308e643..5bcf0d684151 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -183,6 +183,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_BACO);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
@@ -490,8 +493,8 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to init sclk threshold!",
return ret);
- if (adev->in_baco_reset) {
- adev->in_baco_reset = 0;
+ if (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
@@ -4155,6 +4158,38 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
return res;
}
+static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
+ enum pp_df_cstate state)
+{
+ int ret;
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (hwmgr->smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+ if (ret)
+ pr_err("SetDfCstate failed!\n");
+
+ return ret;
+}
+
+static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
+ uint32_t pstate)
+{
+ int ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetXgmiMode,
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ if (ret)
+ pr_err("SetXgmiPstate failed!\n");
+
+ return ret;
+}
+
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
.backend_init = vega20_hwmgr_backend_init,
@@ -4223,6 +4258,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.set_asic_baco_state = vega20_baco_set_state,
.set_mp1_state = vega20_set_mp1_state,
.smu_i2c_bus_access = vega20_smu_i2c_bus_access,
+ .set_df_cstate = vega20_set_df_cstate,
+ .set_xgmi_pstate = vega20_set_xgmi_pstate,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 23171a4d9a31..031e0c22fcc7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -259,10 +259,8 @@ struct smu_table_context
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
struct smu_table *tables;
- uint32_t table_count;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
- uint16_t TDPODLimit;
void *overdrive_table;
};
@@ -322,6 +320,13 @@ struct mclock_latency_table {
struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
};
+enum smu_reset_mode
+{
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+};
+
enum smu_baco_state
{
SMU_BACO_STATE_ENTER = 0,
@@ -341,7 +346,6 @@ struct smu_context
struct amdgpu_device *adev;
struct amdgpu_irq_src *irq_source;
- const struct smu_funcs *funcs;
const struct pptable_funcs *ppt_funcs;
struct mutex mutex;
struct mutex sensor_lock;
@@ -382,11 +386,15 @@ struct smu_context
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
bool pm_enabled;
+ bool is_apu;
uint32_t smc_if_version;
+ bool uploading_custom_pp_table;
};
+struct i2c_adapter;
+
struct pptable_funcs {
int (*alloc_dpm_context)(struct smu_context *smu);
int (*store_powerplay_table)(struct smu_context *smu);
@@ -398,7 +406,7 @@ struct pptable_funcs {
int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
- int (*run_afll_btc)(struct smu_context *smu);
+ int (*run_btc)(struct smu_context *smu);
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
int (*set_default_dpm_table)(struct smu_context *smu);
@@ -459,17 +467,19 @@ struct pptable_funcs {
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
void (*dump_pptable)(struct smu_context *smu);
int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default);
- int (*get_dpm_uclk_limited)(struct smu_context *smu, uint32_t *clock, bool max);
-};
-
-struct smu_funcs
-{
+ int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq);
+ int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+ int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+ int (*i2c_eeprom_init)(struct i2c_adapter *control);
+ void (*i2c_eeprom_fini)(struct i2c_adapter *control);
+ int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
int (*init_microcode)(struct smu_context *smu);
+ int (*load_microcode)(struct smu_context *smu);
int (*init_smc_tables)(struct smu_context *smu);
int (*fini_smc_tables)(struct smu_context *smu);
int (*init_power)(struct smu_context *smu);
int (*fini_power)(struct smu_context *smu);
- int (*load_microcode)(struct smu_context *smu);
int (*check_fw_status)(struct smu_context *smu);
int (*setup_pptable)(struct smu_context *smu);
int (*get_vbios_bootup_values)(struct smu_context *smu);
@@ -485,7 +495,6 @@ struct smu_funcs
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
int (*notify_memory_pool_location)(struct smu_context *smu);
- int (*write_watermarks_table)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
@@ -499,8 +508,7 @@ struct smu_funcs
int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value);
int (*init_max_sustainable_clocks)(struct smu_context *smu);
int (*start_thermal_control)(struct smu_context *smu);
- int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
+ int (*stop_thermal_control)(struct smu_context *smu);
int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
int (*set_active_display_count)(struct smu_context *smu, uint32_t count);
int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time,
@@ -522,8 +530,6 @@ struct smu_funcs
int (*get_current_shallow_sleep_clocks)(struct smu_context *smu,
struct smu_clock_info *clocks);
int (*notify_smu_enable_pwe)(struct smu_context *smu);
- int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*conv_power_profile_to_pplib_workload)(int power_profile);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
@@ -538,234 +544,90 @@ struct smu_funcs
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
int (*baco_reset)(struct smu_context *smu);
+ int (*mode2_reset)(struct smu_context *smu);
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+ int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+ int (*override_pcie_parameters)(struct smu_context *smu);
+ uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
};
-#define smu_init_microcode(smu) \
- ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0)
-#define smu_init_smc_tables(smu) \
- ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0)
-#define smu_fini_smc_tables(smu) \
- ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0)
-#define smu_init_power(smu) \
- ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0)
-#define smu_fini_power(smu) \
- ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0)
-#define smu_load_microcode(smu) \
- ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0)
-#define smu_check_fw_status(smu) \
- ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
-#define smu_setup_pptable(smu) \
- ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0)
-#define smu_powergate_sdma(smu, gate) \
- ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0)
-#define smu_powergate_vcn(smu, gate) \
- ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0)
-#define smu_set_gfx_cgpg(smu, enabled) \
- ((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0)
-#define smu_get_vbios_bootup_values(smu) \
- ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
-#define smu_get_clk_info_from_vbios(smu) \
- ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0)
-#define smu_check_pptable(smu) \
- ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0)
-#define smu_parse_pptable(smu) \
- ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0)
-#define smu_populate_smc_tables(smu) \
- ((smu)->funcs->populate_smc_tables ? (smu)->funcs->populate_smc_tables((smu)) : 0)
-#define smu_check_fw_version(smu) \
- ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0)
-#define smu_write_pptable(smu) \
- ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0)
-#define smu_set_min_dcef_deep_sleep(smu) \
- ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0)
-#define smu_set_tool_table_location(smu) \
- ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0)
-#define smu_notify_memory_pool_location(smu) \
- ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0)
-#define smu_gfx_off_control(smu, enable) \
- ((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0)
-
-#define smu_write_watermarks_table(smu) \
- ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0)
-#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
- ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
-#define smu_system_features_control(smu, en) \
- ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0)
-#define smu_init_max_sustainable_clocks(smu) \
- ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
-#define smu_set_default_od_settings(smu, initialize) \
- ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-#define smu_set_fan_speed_rpm(smu, speed) \
- ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
-#define smu_send_smc_msg(smu, msg) \
- ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0)
-#define smu_send_smc_msg_with_param(smu, msg, param) \
- ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
- ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0)
-#define smu_alloc_dpm_context(smu) \
- ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
-#define smu_init_display_count(smu, count) \
- ((smu)->funcs->init_display_count ? (smu)->funcs->init_display_count((smu), (count)) : 0)
-#define smu_feature_set_allowed_mask(smu) \
- ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0)
-#define smu_feature_get_enabled_mask(smu, mask, num) \
- ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
-#define smu_is_dpm_running(smu) \
- ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
-#define smu_notify_display_change(smu) \
- ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0)
-#define smu_store_powerplay_table(smu) \
- ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
-#define smu_check_powerplay_table(smu) \
- ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
-#define smu_append_powerplay_table(smu) \
- ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
-#define smu_set_default_dpm_table(smu) \
- ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
-#define smu_populate_umd_state_clk(smu) \
- ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
-#define smu_set_default_od8_settings(smu) \
- ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
-#define smu_get_power_limit(smu, limit, def) \
- ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0)
-#define smu_set_power_limit(smu, limit) \
- ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
-#define smu_get_current_clk_freq(smu, clk_id, value) \
- ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
-#define smu_print_clk_levels(smu, clk_type, buf) \
- ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0)
-#define smu_force_clk_levels(smu, clk_type, level) \
- ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0)
-#define smu_get_od_percentage(smu, type) \
- ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
-#define smu_set_od_percentage(smu, type, value) \
- ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0)
-#define smu_od_edit_dpm_table(smu, type, input, size) \
- ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0)
-#define smu_tables_init(smu, tab) \
- ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
-#define smu_set_thermal_fan_table(smu) \
- ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
-#define smu_start_thermal_control(smu) \
- ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
-#define smu_read_sensor(smu, sensor, data, size) \
- ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
-#define smu_smc_read_sensor(smu, sensor, data, size) \
- ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
-#define smu_get_power_profile_mode(smu, buf) \
- ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0)
-#define smu_set_power_profile_mode(smu, param, param_size) \
- ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
-#define smu_pre_display_config_changed(smu) \
- ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
-#define smu_display_config_changed(smu) \
- ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
-#define smu_apply_clocks_adjust_rules(smu) \
- ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
-#define smu_notify_smc_dispaly_config(smu) \
- ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
-#define smu_force_dpm_limit_value(smu, highest) \
- ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
-#define smu_unforce_dpm_levels(smu) \
- ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
-#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
- ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
-#define smu_set_cpu_power_state(smu) \
- ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
-#define smu_get_fan_control_mode(smu) \
- ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0)
-#define smu_set_fan_control_mode(smu, value) \
- ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0)
-#define smu_get_fan_speed_percent(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
-#define smu_set_fan_speed_percent(smu, speed) \
- ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
-#define smu_get_fan_speed_rpm(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
-
-#define smu_msg_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_clk_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_feature_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_table_get_index(smu, tab) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
-#define smu_power_get_index(smu, src) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
-#define smu_workload_get_type(smu, profile) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
-#define smu_run_afll_btc(smu) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
-#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
-#define smu_set_deep_sleep_dcefclk(smu, clk) \
- ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0)
-#define smu_set_active_display_count(smu, count) \
- ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0)
-#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
- ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
-#define smu_get_clock_by_type(smu, type, clocks) \
- ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0)
-#define smu_get_max_high_clocks(smu, clocks) \
- ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0)
-#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0)
-#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
-#define smu_display_clock_voltage_request(smu, clock_req) \
- ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0)
-#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \
- ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL)
-#define smu_get_dal_power_level(smu, clocks) \
- ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
-#define smu_get_perf_level(smu, designation, level) \
- ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0)
-#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
- ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
-#define smu_notify_smu_enable_pwe(smu) \
- ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
-#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \
- ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0)
-#define smu_dpm_set_uvd_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
-#define smu_dpm_set_vce_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
-#define smu_set_xgmi_pstate(smu, pstate) \
- ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
-#define smu_set_watermarks_table(smu, tab, clock_ranges) \
- ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
-#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
- ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
-#define smu_thermal_temperature_range_update(smu, range, rw) \
- ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
-#define smu_get_thermal_temperature_range(smu, range) \
- ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
-#define smu_register_irq_handler(smu) \
- ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
-#define smu_set_azalia_d3_pme(smu) \
- ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
-#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
- ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
-#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
- ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
-#define smu_baco_is_support(smu) \
- ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false)
-#define smu_baco_get_state(smu, state) \
- ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
-#define smu_baco_reset(smu) \
- ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
-#define smu_asic_set_performance_level(smu, level) \
- ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
-#define smu_dump_pptable(smu) \
- ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
-#define smu_get_dpm_uclk_limited(smu, clock, max) \
- ((smu)->ppt_funcs->get_dpm_uclk_limited ? (smu)->ppt_funcs->get_dpm_uclk_limited((smu), (clock), (max)) : -EINVAL)
+int smu_load_microcode(struct smu_context *smu);
+
+int smu_check_fw_status(struct smu_context *smu);
+
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+
+#define smu_i2c_eeprom_init(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
+#define smu_i2c_eeprom_fini(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed);
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type);
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value);
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf);
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed);
+int smu_get_fan_control_mode(struct smu_context *smu);
+int smu_set_fan_control_mode(struct smu_context *smu, int value);
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed);
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed);
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks);
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req);
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch);
+int smu_notify_smu_enable_pwe(struct smu_context *smu);
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_set_azalia_d3_pme(struct smu_context *smu);
+
+bool smu_baco_is_support(struct smu_context *smu);
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
+
+int smu_baco_reset(struct smu_context *smu);
+int smu_mode2_reset(struct smu_context *smu);
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
@@ -799,6 +661,10 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table);
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
+int smu_write_watermarks_table(struct smu_context *smu);
+int smu_set_watermarks_for_clock_ranges(
+ struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
/* smu to display interface */
extern int smu_display_configuration_change(struct smu_context *smu, const
@@ -809,7 +675,8 @@ extern int smu_get_current_clocks(struct smu_context *smu,
extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
extern int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id);
+ enum amd_pp_task task_id,
+ bool lock_needed);
int smu_switch_power_profile(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE type,
bool en);
@@ -819,7 +686,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *value);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max);
+ uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -828,10 +695,29 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed);
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state);
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state);
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states);
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table);
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index 78e5927b7711..e3291259b249 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -95,8 +95,7 @@
//BTC
#define PPSMC_MSG_RunAfllBtc 0x30
-#define PPSMC_MSG_RunGfxDcBtc 0x31
-#define PPSMC_MSG_RunSocDcBtc 0x32
+#define PPSMC_MSG_RunDcBtc 0x31
//Debug
#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 7bf9a14bfa0b..af977675fd33 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -355,6 +355,10 @@ struct pp_hwmgr_func {
int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state);
int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode);
int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire);
+ int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state);
+ int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate);
+ int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr,
+ bool disable);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index e02950b505fa..a886f0644d24 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -137,29 +137,29 @@
#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT )
#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
-#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
+#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT )
#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT )
-#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT )
+#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT )
#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT )
#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT )
#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT )
#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT )
-#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT )
+#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT )
#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT )
#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT )
#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT )
-#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT )
-#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK )
+#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT )
+#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
//FIXME need updating
// Debug Overrides Bitmask
#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001
-#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCN_FCLK 0x00000002
+#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002
// I2C Config Bit Defines
#define I2C_CONTROLLER_ENABLED 1
@@ -423,18 +423,30 @@ typedef enum {
} PwrConfig_e;
typedef enum {
- XGMI_LINK_RATE_12 = 0, // 12Gbps
- XGMI_LINK_RATE_16, // 16Gbps
- XGMI_LINK_RATE_22, // 22Gbps
- XGMI_LINK_RATE_25, // 25Gbps
+ XGMI_LINK_RATE_2 = 2, // 2Gbps
+ XGMI_LINK_RATE_4 = 4, // 4Gbps
+ XGMI_LINK_RATE_8 = 8, // 8Gbps
+ XGMI_LINK_RATE_12 = 12, // 12Gbps
+ XGMI_LINK_RATE_16 = 16, // 16Gbps
+ XGMI_LINK_RATE_17 = 17, // 17Gbps
+ XGMI_LINK_RATE_18 = 18, // 18Gbps
+ XGMI_LINK_RATE_19 = 19, // 19Gbps
+ XGMI_LINK_RATE_20 = 20, // 20Gbps
+ XGMI_LINK_RATE_21 = 21, // 21Gbps
+ XGMI_LINK_RATE_22 = 22, // 22Gbps
+ XGMI_LINK_RATE_23 = 23, // 23Gbps
+ XGMI_LINK_RATE_24 = 24, // 24Gbps
+ XGMI_LINK_RATE_25 = 25, // 25Gbps
XGMI_LINK_RATE_COUNT
} XGMI_LINK_RATE_e;
typedef enum {
- XGMI_LINK_WIDTH_2 = 0, // x2
- XGMI_LINK_WIDTH_4, // x4
- XGMI_LINK_WIDTH_8, // x8
- XGMI_LINK_WIDTH_16, // x16
+ XGMI_LINK_WIDTH_1 = 1, // x1
+ XGMI_LINK_WIDTH_2 = 2, // x2
+ XGMI_LINK_WIDTH_4 = 4, // x4
+ XGMI_LINK_WIDTH_8 = 8, // x8
+ XGMI_LINK_WIDTH_9 = 9, // x9
+ XGMI_LINK_WIDTH_16 = 16, // x16
XGMI_LINK_WIDTH_COUNT
} XGMI_LINK_WIDTH_e;
@@ -696,7 +708,11 @@ typedef struct {
uint8_t GpioI2cSda; // Serial Data
uint16_t GpioPadding;
- uint32_t BoardReserved[9];
+ // Platform input telemetry voltage coefficient
+ uint32_t BoardVoltageCoeffA; // decode by /1000
+ uint32_t BoardVoltageCoeffB; // decode by /1000
+
+ uint32_t BoardReserved[7];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8]; // SMU internal use
@@ -802,7 +818,7 @@ typedef struct {
uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units
- uint32_t EnabledAvfsModules[2];
+ uint32_t EnabledAvfsModules[3];
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsFuseOverride_t;
@@ -865,7 +881,8 @@ typedef struct {
//#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 7
#define TABLE_WAFL_XGMI_TOPOLOGY 8
-#define TABLE_COUNT 9
+#define TABLE_I2C_COMMANDS 9
+#define TABLE_COUNT 10
// These defines are used with the SMC_MSG_SetUclkFastSwitch message.
typedef enum {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index b0dd05d431dd..d8c9b7f91fcc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -114,6 +114,7 @@
__SMU_DUMMY_MAP(PowerDownJpeg), \
__SMU_DUMMY_MAP(BacoAudioD3PME), \
__SMU_DUMMY_MAP(ArmD3), \
+ __SMU_DUMMY_MAP(RunDcBtc), \
__SMU_DUMMY_MAP(RunGfxDcBtc), \
__SMU_DUMMY_MAP(RunSocDcBtc), \
__SMU_DUMMY_MAP(SetMemoryChannelEnable), \
@@ -168,6 +169,7 @@
__SMU_DUMMY_MAP(PowerGateAtHub), \
__SMU_DUMMY_MAP(SetSoftMinJpeg), \
__SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
+ __SMU_DUMMY_MAP(DFCstateControl), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -251,6 +253,7 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \
__SMU_DUMMY_MAP(MMHUB_PG), \
__SMU_DUMMY_MAP(ATHUB_PG), \
+ __SMU_DUMMY_MAP(APCC_DFLL), \
__SMU_DUMMY_MAP(WAFL_CG),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 5bda8539447a..606149085683 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x09
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x10
#define SMU11_DRIVER_IF_VERSION_NV10 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x34
@@ -48,6 +48,8 @@
#define SMU11_TOOL_SIZE 0x19000
+#define MAX_PCIE_CONF 2
+
#define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)}
@@ -88,6 +90,11 @@ struct smu_11_0_dpm_table {
uint32_t max; /* MHz */
};
+struct smu_11_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+};
+
struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table soc_table;
struct smu_11_0_dpm_table gfx_table;
@@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table display_table;
struct smu_11_0_dpm_table phy_table;
struct smu_11_0_dpm_table fclk_table;
+ struct smu_11_0_pcie_table pcie_table;
};
struct smu_11_0_dpm_context {
@@ -130,6 +138,128 @@ enum smu_v11_0_baco_seq {
BACO_SEQ_COUNT,
};
-void smu_v11_0_set_smu_funcs(struct smu_context *smu);
+int smu_v11_0_init_microcode(struct smu_context *smu);
+
+int smu_v11_0_load_microcode(struct smu_context *smu);
+
+int smu_v11_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_init_power(struct smu_context *smu);
+
+int smu_v11_0_fini_power(struct smu_context *smu);
+
+int smu_v11_0_check_fw_status(struct smu_context *smu);
+
+int smu_v11_0_setup_pptable(struct smu_context *smu);
+
+int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
+
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu);
+
+int smu_v11_0_check_pptable(struct smu_context *smu);
+
+int smu_v11_0_parse_pptable(struct smu_context *smu);
+
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu);
+
+int smu_v11_0_check_fw_version(struct smu_context *smu);
+
+int smu_v11_0_write_pptable(struct smu_context *smu);
+
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
+
+int smu_v11_0_set_tool_table_location(struct smu_context *smu);
+
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
+
+int smu_v11_0_system_features_control(struct smu_context *smu,
+ bool en);
+
+int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg);
+
+int
+smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param);
+
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_v11_0_set_allowed_mask(struct smu_context *smu);
+
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num);
+
+int smu_v11_0_notify_display_change(struct smu_context *smu);
+
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n);
+
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_id,
+ uint32_t *value);
+
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu);
+
+int smu_v11_0_start_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_stop_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
+
+int
+smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request
+ *clock_req);
+
+uint32_t
+smu_v11_0_get_fan_control_mode(struct smu_context *smu);
+
+int
+smu_v11_0_set_fan_control_mode(struct smu_context *smu,
+ uint32_t mode);
+
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed);
+
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v11_0_register_irq_handler(struct smu_context *smu);
+
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu);
+
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+bool smu_v11_0_baco_is_support(struct smu_context *smu);
+
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
+
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
+
+int smu_v11_0_baco_reset(struct smu_context *smu);
+
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size);
+
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index 86cdc3393eac..b2f96a101124 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table
struct smu_11_0_power_saving_clock_table power_saving_clock;
struct smu_11_0_overdrive_table overdrive_table;
+#ifndef SMU_11_0_PARTIAL_PPTABLE
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
+#endif
} __attribute__((packed));
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index acf3db12f59f..9b9f5df0911c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -37,6 +37,45 @@ struct smu_12_0_cmn2aisc_mapping {
int map_to;
};
-void smu_v12_0_set_smu_funcs(struct smu_context *smu);
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg);
+
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v12_0_wait_for_response(struct smu_context *smu);
+
+int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg);
+
+int
+smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param);
+
+int smu_v12_0_check_fw_status(struct smu_context *smu);
+
+int smu_v12_0_check_fw_version(struct smu_context *smu);
+
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate);
+
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
+
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable);
+
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
+
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v12_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_populate_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v12_0_mode2_reset(struct smu_context *smu);
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index a0883038f3c3..0c66f0fe1aaf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -120,7 +120,8 @@
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60
-#define PPSMC_Message_Count 0x61
+#define PPSMC_MSG_DFCstateControl 0x63
+#define PPSMC_Message_Count 0x64
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 328e258a6895..aaec884d63ed 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -35,6 +36,7 @@
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
#include "smu_v11_0_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
@@ -177,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
FEA_MAP(TEMP_DEPENDENT_VMIN),
FEA_MAP(MMHUB_PG),
FEA_MAP(ATHUB_PG),
+ FEA_MAP(APCC_DFLL),
};
static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
@@ -327,40 +330,52 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
memset(feature_mask, 0, sizeof(uint32_t) * num);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
- | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_LINK_BIT)
- | FEATURE_MASK(FEATURE_GFX_ULV_BIT)
| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_PPT_BIT)
| FEATURE_MASK(FEATURE_TDC_BIT)
| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
+ | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
| FEATURE_MASK(FEATURE_VR0HOT_BIT)
| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
| FEATURE_MASK(FEATURE_THERMAL_BIT)
| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
- | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT)
+ | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
| FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
- | FEATURE_MASK(FEATURE_FW_CTF_BIT);
+ | FEATURE_MASK(FEATURE_FW_CTF_BIT)
+ | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+
+ if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+
+ if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
- if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
+ if (adev->pm.pp_feature & PP_ULV_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
- /* TODO: remove it once fw fix the bug */
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
- }
if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
@@ -585,6 +600,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
PPTable_t *driver_ppt = NULL;
+ int i;
driver_ppt = table_context->driver_pptable;
@@ -615,6 +631,11 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
+ for (i = 0; i < MAX_PCIE_CONF; i++) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
+ }
+
return 0;
}
@@ -677,13 +698,29 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+{
+ return od_table->cap[feature];
+}
+
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
+ uint16_t *curve_settings;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ uint32_t gen_speed, lane_width;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct amdgpu_device *adev = smu->adev;
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
switch (clk_type) {
case SMU_GFXCLK:
@@ -734,6 +771,69 @@ static int navi10_print_clk_levels(struct smu_context *smu,
}
break;
+ case SMU_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
+ (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
+ "*" : "");
+ break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+ break;
+ size += sprintf(buf + size, "OD_SCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ break;
+ size += sprintf(buf + size, "OD_MCLK:\n");
+ size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+ break;
+ size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ curve_settings = &od_table->GfxclkFreq1;
+ break;
+ case 1:
+ curve_settings = &od_table->GfxclkFreq2;
+ break;
+ case 2:
+ curve_settings = &od_table->GfxclkFreq3;
+ break;
+ default:
+ break;
+ }
+ size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ }
+ break;
default:
break;
}
@@ -789,13 +889,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
int ret = 0;
uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
- ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
if (ret)
return ret;
smu->pstate_sclk = min_sclk_freq * 100;
- ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
if (ret)
return ret;
@@ -848,7 +948,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
if (ret)
return ret;
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
@@ -898,7 +998,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -925,7 +1025,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -1260,7 +1360,9 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu_display_clock_voltage_request(smu, &clock_req)) {
+
+ ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
+ if (!ret) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -1414,7 +1516,7 @@ static int navi10_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
mutex_unlock(&smu->sensor_lock);
@@ -1457,18 +1559,47 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
uint32_t sclk_freq = 0, uclk_freq = 0;
uint32_t uclk_level = 0;
- switch (adev->pdev->revision) {
- case 0xf0: /* XTX */
- case 0xc0:
- sclk_freq = NAVI10_PEAK_SCLK_XTX;
- break;
- case 0xf1: /* XT */
- case 0xc1:
- sclk_freq = NAVI10_PEAK_SCLK_XT;
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ switch (adev->pdev->revision) {
+ case 0xf0: /* XTX */
+ case 0xc0:
+ sclk_freq = NAVI10_PEAK_SCLK_XTX;
+ break;
+ case 0xf1: /* XT */
+ case 0xc1:
+ sclk_freq = NAVI10_PEAK_SCLK_XT;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI10_PEAK_SCLK_XL;
+ break;
+ }
break;
- default: /* XL */
- sclk_freq = NAVI10_PEAK_SCLK_XL;
+ case CHIP_NAVI14:
+ switch (adev->pdev->revision) {
+ case 0xc7: /* XT */
+ case 0xf4:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
+ break;
+ case 0xc1: /* XTM */
+ case 0xf2:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
+ break;
+ case 0xc3: /* XLM */
+ case 0xf3:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ case 0xc5: /* XTX */
+ case 0xf6:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
+ break;
+ }
break;
+ default:
+ return -EINVAL;
}
ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
@@ -1491,10 +1622,6 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
int ret = 0;
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->asic_type != CHIP_NAVI10)
- return -EINVAL;
switch (level) {
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1547,17 +1674,22 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
return ret;
}
+static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static int navi10_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1580,23 +1712,291 @@ static int navi10_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
return 0;
}
+static int navi10_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+
+ if (ret)
+ return ret;
+
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
+ if (pptable->PcieLaneCount[i] > pcie_width_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ }
+
+ return 0;
+}
+
+static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
+ pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
+ pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
+ pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
+ pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
+ pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
+}
+
+static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
+{
+ if (value < od_table->min[setting]) {
+ pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
+ return -EINVAL;
+ }
+ if (value > od_table->max[setting]) {
+ pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int navi10_setup_od_limits(struct smu_context *smu) {
+ struct smu_11_0_overdrive_table *overdrive_table = NULL;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+
+ if (!smu->smu_table.power_play_table) {
+ pr_err("powerplay table uninitialized!\n");
+ return -ENOENT;
+ }
+ powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
+ overdrive_table = &powerplay_table->overdrive_table;
+ if (!smu->od_settings) {
+ smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
+ } else {
+ memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
+ }
+ return 0;
+}
+
+static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
+ OverDriveTable_t *od_table;
+ int ret = 0;
+
+ ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = navi10_setup_od_limits(smu);
+ if (ret) {
+ pr_err("Failed to retrieve board OD limits\n");
+ return ret;
+ }
+
+ }
+
+ od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ if (od_table) {
+ navi10_dump_od_table(od_table);
+ }
+
+ return ret;
+}
+
+static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
+ int i;
+ int ret = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table;
+ struct smu_11_0_overdrive_table *od_settings;
+ enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
+ uint16_t *freq_ptr, *voltage_ptr;
+ od_table = (OverDriveTable_t *)table_context->overdrive_table;
+
+ if (!smu->od_enabled) {
+ pr_warn("OverDrive is not enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!smu->od_settings) {
+ pr_err("OD board limits are not set!\n");
+ return -ENOENT;
+ }
+
+ od_settings = smu->od_settings;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ pr_warn("GFXCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (!table_context->overdrive_table) {
+ pr_err("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+ switch (input[i]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
+ freq_ptr = &od_table->GfxclkFmin;
+ if (input[i + 1] > od_table->GfxclkFmax) {
+ pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmin);
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
+ freq_ptr = &od_table->GfxclkFmax;
+ if (input[i + 1] < od_table->GfxclkFmin) {
+ pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ pr_info("Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[i + 1];
+ }
+ break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ pr_warn("UCLK_MAX not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 2) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (input[0] != 1) {
+ pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
+ pr_info("Supported indices: [1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
+ if (ret)
+ return ret;
+ od_table->UclkFmax = input[1];
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ navi10_dump_od_table(od_table);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ // no lock needed because smu_od_edit_dpm_table has it
+ ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+ if (ret) {
+ return ret;
+ }
+ break;
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ pr_warn("GFXCLK_CURVE not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 3) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (!od_table) {
+ pr_info("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+
+ switch (input[0]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
+ freq_ptr = &od_table->GfxclkFreq1;
+ voltage_ptr = &od_table->GfxclkVolt1;
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
+ freq_ptr = &od_table->GfxclkFreq2;
+ voltage_ptr = &od_table->GfxclkVolt2;
+ break;
+ case 2:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
+ freq_ptr = &od_table->GfxclkFreq3;
+ voltage_ptr = &od_table->GfxclkVolt3;
+ break;
+ default:
+ pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
+ pr_info("Supported indices: [0, 1, 2]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]);
+ if (ret)
+ return ret;
+ // Allow setting zero to disable the OverDrive VDDC curve
+ if (input[2] != 0) {
+ ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[1];
+ *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
+ pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
+ } else {
+ // If setting 0, disable all voltage curve settings
+ od_table->GfxclkVolt1 = 0;
+ od_table->GfxclkVolt2 = 0;
+ od_table->GfxclkVolt3 = 0;
+ }
+ navi10_dump_od_table(od_table);
+ break;
+ default:
+ return -ENOSYS;
+ }
+ return ret;
+}
+
+static int navi10_run_btc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ if (ret)
+ pr_err("RunBtc failed!\n");
+
+ return ret;
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1635,12 +2035,63 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
.display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
.get_power_limit = navi10_get_power_limit,
+ .update_pcie_parameters = navi10_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .set_default_od_settings = navi10_set_default_od_settings,
+ .od_edit_dpm_table = navi10_od_edit_dpm_table,
+ .get_pptable_power_limit = navi10_get_pptable_power_limit,
+ .run_btc = navi10_run_btc,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &navi10_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 620ff17c2fef..ec03c7992f6d 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -27,6 +27,17 @@
#define NAVI10_PEAK_SCLK_XT (1755)
#define NAVI10_PEAK_SCLK_XL (1625)
+#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670)
+#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448)
+#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181)
+#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
+#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
+
+#define NAVI10_VOLTAGE_SCALE (4)
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index e62bfba51562..04daf7e9fe05 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "soc15_common.h"
#include "smu_v12_0_ppsmc.h"
#include "smu12_driver_if.h"
@@ -160,21 +161,17 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
* This interface just for getting uclk ultimate freq and should't introduce
* other likewise function result in overmuch callback.
*/
-static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, bool max)
+static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- DpmClocks_t *table = smu->smu_table.clocks_table;
-
- if (!clock || !table)
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
- if (max)
- *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq;
- else
- *clock = table->FClocks[0].Freq;
+ GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq);
return 0;
-
}
static int renoir_print_clk_levels(struct smu_context *smu,
@@ -183,11 +180,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- SmuMetrics_t metrics = {0};
+ SmuMetrics_t metrics;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
+ memset(&metrics, 0, sizeof(metrics));
+
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)&metrics, false);
if (ret)
@@ -198,7 +197,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_SCLK:
/* retirve table returned paramters unit is MHz */
cur_value = metrics.ClockFrequency[CLOCK_GFXCLK];
- ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max);
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false);
if (!ret) {
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (cur_value == max)
@@ -246,20 +245,474 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return size;
}
+static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context ||
+ !smu_dpm_ctx->dpm_current_power_state)
+ return -EINVAL;
+
+ switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
+ case SMU_STATE_UI_LABEL_BATTERY:
+ pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
+ case SMU_STATE_UI_LABEL_BALLANCED:
+ pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
+ case SMU_STATE_UI_LABEL_PERFORMANCE:
+ pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
+ pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+
+ return pm_type;
+}
+
+static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_GFXCLK,
+ SMU_MCLK,
+ SMU_SOCCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_unforce_dpm_levels(struct smu_context *smu) {
+
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT},
+ {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+ if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
+{
+
+ uint32_t pplib_workload = 0;
+
+ switch (profile) {
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_COUNT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pplib_workload;
+}
+
+static int renoir_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (sclk_mask)
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if(sclk_mask)
+ /* The sclk as gfxclk and has three level about max/min/current */
+ *sclk_mask = 3 - 1;
+
+ if(mclk_mask)
+ *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
+
+ if(soc_mask)
+ *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
+ }
+
+ return 0;
+}
+
+/**
+ * This interface get dpm clock table for dc
+ */
+static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
+ clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
+ clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
+ clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
+ clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
+ }
+
+ for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
+ clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
+ }
+
+ return 0;
+}
+
+static int renoir_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+
+ int ret = 0 ;
+ uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (soft_min_level > 2 || soft_max_level > 2) {
+ pr_info("Currently sclk only support 3 levels on APU\n");
+ return -EINVAL;
+ }
+
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ soft_max_level == 0 ? min_freq :
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ soft_min_level == 2 ? max_freq :
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0) {
+ pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+ if (ret) {
+ pr_err("Fail to set workload type %d\n", workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int renoir_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = renoir_set_peak_clock_by_device(smu);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* save watermark settings into pplib smu structure,
+ * also pass data to smu controller
+ */
+static int renoir_set_watermarks_table(
+ struct smu_context *smu,
+ void *watermarks,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int i;
+ int ret = 0;
+ Watermarks_t *table = watermarks;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+
+ /* pass data to smu controller */
+ ret = smu_write_watermarks_table(smu);
+
+ return ret;
+}
+
+static int renoir_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!smu->pm_enabled || !buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
+ return size;
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_table_index = renoir_get_smu_table_index,
.tables_init = renoir_tables_init,
.set_power_state = NULL,
- .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited,
+ .get_dpm_clk_limited = renoir_get_dpm_clk_limited,
.print_clk_levels = renoir_print_clk_levels,
+ .get_current_power_state = renoir_get_current_power_state,
+ .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable,
+ .force_dpm_limit_value = renoir_force_dpm_limit_value,
+ .unforce_dpm_levels = renoir_unforce_dpm_levels,
+ .get_workload_type = renoir_get_workload_type,
+ .get_profiling_clk_mask = renoir_get_profiling_clk_mask,
+ .force_clk_levels = renoir_force_clk_levels,
+ .set_power_profile_mode = renoir_set_power_profile_mode,
+ .set_performance_level = renoir_set_performance_level,
+ .get_dpm_clock_table = renoir_get_dpm_clock_table,
+ .set_watermarks_table = renoir_set_watermarks_table,
+ .get_power_profile_mode = renoir_get_power_profile_mode,
+ .check_fw_status = smu_v12_0_check_fw_status,
+ .check_fw_version = smu_v12_0_check_fw_version,
+ .powergate_sdma = smu_v12_0_powergate_sdma,
+ .powergate_vcn = smu_v12_0_powergate_vcn,
+ .send_smc_msg = smu_v12_0_send_msg,
+ .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
+ .read_smc_arg = smu_v12_0_read_arg,
+ .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
+ .gfx_off_control = smu_v12_0_gfx_off_control,
+ .init_smc_tables = smu_v12_0_init_smc_tables,
+ .fini_smc_tables = smu_v12_0_fini_smc_tables,
+ .populate_smc_tables = smu_v12_0_populate_smc_tables,
+ .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
+ .mode2_reset = smu_v12_0_mode2_reset,
+ .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &renoir_ppt_funcs;
smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
- smu_table->table_count = TABLE_COUNT;
+ smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
new file mode 100644
index 000000000000..8bcda7871309
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SMU_INTERNAL_H__
+#define __SMU_INTERNAL_H__
+
+#include "amdgpu_smu.h"
+
+#define smu_init_microcode(smu) \
+ ((smu)->ppt_funcs->init_microcode ? (smu)->ppt_funcs->init_microcode((smu)) : 0)
+#define smu_init_smc_tables(smu) \
+ ((smu)->ppt_funcs->init_smc_tables ? (smu)->ppt_funcs->init_smc_tables((smu)) : 0)
+#define smu_fini_smc_tables(smu) \
+ ((smu)->ppt_funcs->fini_smc_tables ? (smu)->ppt_funcs->fini_smc_tables((smu)) : 0)
+#define smu_init_power(smu) \
+ ((smu)->ppt_funcs->init_power ? (smu)->ppt_funcs->init_power((smu)) : 0)
+#define smu_fini_power(smu) \
+ ((smu)->ppt_funcs->fini_power ? (smu)->ppt_funcs->fini_power((smu)) : 0)
+
+#define smu_setup_pptable(smu) \
+ ((smu)->ppt_funcs->setup_pptable ? (smu)->ppt_funcs->setup_pptable((smu)) : 0)
+#define smu_powergate_sdma(smu, gate) \
+ ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0)
+#define smu_powergate_vcn(smu, gate) \
+ ((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0)
+
+#define smu_get_vbios_bootup_values(smu) \
+ ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
+#define smu_get_clk_info_from_vbios(smu) \
+ ((smu)->ppt_funcs->get_clk_info_from_vbios ? (smu)->ppt_funcs->get_clk_info_from_vbios((smu)) : 0)
+#define smu_check_pptable(smu) \
+ ((smu)->ppt_funcs->check_pptable ? (smu)->ppt_funcs->check_pptable((smu)) : 0)
+#define smu_parse_pptable(smu) \
+ ((smu)->ppt_funcs->parse_pptable ? (smu)->ppt_funcs->parse_pptable((smu)) : 0)
+#define smu_populate_smc_tables(smu) \
+ ((smu)->ppt_funcs->populate_smc_tables ? (smu)->ppt_funcs->populate_smc_tables((smu)) : 0)
+#define smu_check_fw_version(smu) \
+ ((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version((smu)) : 0)
+#define smu_write_pptable(smu) \
+ ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
+#define smu_set_min_dcef_deep_sleep(smu) \
+ ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_tool_table_location(smu) \
+ ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
+#define smu_notify_memory_pool_location(smu) \
+ ((smu)->ppt_funcs->notify_memory_pool_location ? (smu)->ppt_funcs->notify_memory_pool_location((smu)) : 0)
+#define smu_gfx_off_control(smu, enable) \
+ ((smu)->ppt_funcs->gfx_off_control ? (smu)->ppt_funcs->gfx_off_control((smu), (enable)) : 0)
+
+#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
+ ((smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
+#define smu_system_features_control(smu, en) \
+ ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control((smu), (en)) : 0)
+#define smu_init_max_sustainable_clocks(smu) \
+ ((smu)->ppt_funcs->init_max_sustainable_clocks ? (smu)->ppt_funcs->init_max_sustainable_clocks((smu)) : 0)
+#define smu_set_default_od_settings(smu, initialize) \
+ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
+
+#define smu_send_smc_msg(smu, msg) \
+ ((smu)->ppt_funcs->send_smc_msg? (smu)->ppt_funcs->send_smc_msg((smu), (msg)) : 0)
+#define smu_send_smc_msg_with_param(smu, msg, param) \
+ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
+#define smu_read_smc_arg(smu, arg) \
+ ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
+#define smu_alloc_dpm_context(smu) \
+ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
+#define smu_init_display_count(smu, count) \
+ ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count((smu), (count)) : 0)
+#define smu_feature_set_allowed_mask(smu) \
+ ((smu)->ppt_funcs->set_allowed_mask? (smu)->ppt_funcs->set_allowed_mask((smu)) : 0)
+#define smu_feature_get_enabled_mask(smu, mask, num) \
+ ((smu)->ppt_funcs->get_enabled_mask? (smu)->ppt_funcs->get_enabled_mask((smu), (mask), (num)) : 0)
+#define smu_is_dpm_running(smu) \
+ ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
+#define smu_notify_display_change(smu) \
+ ((smu)->ppt_funcs->notify_display_change? (smu)->ppt_funcs->notify_display_change((smu)) : 0)
+#define smu_store_powerplay_table(smu) \
+ ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
+#define smu_check_powerplay_table(smu) \
+ ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
+#define smu_append_powerplay_table(smu) \
+ ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
+#define smu_set_default_dpm_table(smu) \
+ ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
+#define smu_populate_umd_state_clk(smu) \
+ ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
+#define smu_set_default_od8_settings(smu) \
+ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
+
+#define smu_get_current_clk_freq(smu, clk_id, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq? (smu)->ppt_funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
+
+#define smu_tables_init(smu, tab) \
+ ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
+#define smu_set_thermal_fan_table(smu) \
+ ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
+#define smu_start_thermal_control(smu) \
+ ((smu)->ppt_funcs->start_thermal_control? (smu)->ppt_funcs->start_thermal_control((smu)) : 0)
+#define smu_stop_thermal_control(smu) \
+ ((smu)->ppt_funcs->stop_thermal_control? (smu)->ppt_funcs->stop_thermal_control((smu)) : 0)
+
+#define smu_smc_read_sensor(smu, sensor, data, size) \
+ ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
+
+#define smu_pre_display_config_changed(smu) \
+ ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
+#define smu_display_config_changed(smu) \
+ ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
+#define smu_apply_clocks_adjust_rules(smu) \
+ ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
+#define smu_notify_smc_dispaly_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_force_dpm_limit_value(smu, highest) \
+ ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
+#define smu_unforce_dpm_levels(smu) \
+ ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
+#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
+ ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
+#define smu_set_cpu_power_state(smu) \
+ ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
+
+#define smu_msg_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_clk_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_feature_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_table_get_index(smu, tab) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
+#define smu_power_get_index(smu, src) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
+#define smu_workload_get_type(smu, profile) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
+#define smu_run_btc(smu) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0)
+#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
+
+
+#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
+ ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
+
+#define smu_get_dal_power_level(smu, clocks) \
+ ((smu)->ppt_funcs->get_dal_power_level ? (smu)->ppt_funcs->get_dal_power_level((smu), (clocks)) : 0)
+#define smu_get_perf_level(smu, designation, level) \
+ ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level((smu), (designation), (level)) : 0)
+#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
+ ((smu)->ppt_funcs->get_current_shallow_sleep_clocks ? (smu)->ppt_funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
+
+#define smu_dpm_set_uvd_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
+#define smu_dpm_set_vce_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+
+#define smu_set_watermarks_table(smu, tab, clock_ranges) \
+ ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
+#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
+#define smu_thermal_temperature_range_update(smu, range, rw) \
+ ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
+#define smu_get_thermal_temperature_range(smu, range) \
+ ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
+#define smu_register_irq_handler(smu) \
+ ((smu)->ppt_funcs->register_irq_handler ? (smu)->ppt_funcs->register_irq_handler(smu) : 0)
+
+#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
+ ((smu)->ppt_funcs->get_dpm_ultimate_freq ? (smu)->ppt_funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
+
+#define smu_asic_set_performance_level(smu, level) \
+ ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
+#define smu_dump_pptable(smu) \
+ ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
+#define smu_get_dpm_clk_limited(smu, clk_type, dpm_level, freq) \
+ ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL)
+
+#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \
+ ((smu)->ppt_funcs->set_soft_freq_limited_range ? (smu)->ppt_funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL)
+
+#define smu_override_pcie_parameters(smu) \
+ ((smu)->ppt_funcs->override_pcie_parameters ? (smu)->ppt_funcs->override_pcie_parameters((smu)) : 0)
+
+#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
+ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c5257ae3188a..fc9679ea2368 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -24,17 +24,19 @@
#include <linux/module.h>
#include <linux/pci.h>
+#define SMU_11_0_PARTIAL_PPTABLE
+
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
+#include "smu_v11_0_pptable.h"
#include "soc15_common.h"
#include "atom.h"
-#include "vega20_ppt.h"
-#include "arcturus_ppt.h"
-#include "navi10_ppt.h"
+#include "amd_pcie.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -61,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -88,7 +90,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
+int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -113,7 +115,7 @@ static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
}
-static int
+int
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
uint32_t param)
{
@@ -144,7 +146,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
return ret;
}
-static int smu_v11_0_init_microcode(struct smu_context *smu)
+int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const char *chip_name;
@@ -206,7 +208,7 @@ out:
return err;
}
-static int smu_v11_0_load_microcode(struct smu_context *smu)
+int smu_v11_0_load_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const uint32_t *src;
@@ -244,7 +246,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_check_fw_status(struct smu_context *smu)
+int smu_v11_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
@@ -259,7 +261,7 @@ static int smu_v11_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-static int smu_v11_0_check_fw_version(struct smu_context *smu)
+int smu_v11_0_check_fw_version(struct smu_context *smu)
{
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
@@ -354,7 +356,7 @@ static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
return 0;
}
-static int smu_v11_0_setup_pptable(struct smu_context *smu)
+int smu_v11_0_setup_pptable(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v1_0 *hdr;
@@ -369,6 +371,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
version_major = le16_to_cpu(hdr->header.header_version_major);
version_minor = le16_to_cpu(hdr->header.header_version_minor);
if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
+ pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
switch (version_minor) {
case 0:
ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
@@ -385,6 +388,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
return ret;
} else {
+ pr_info("use vbios provided pptable\n");
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
@@ -433,13 +437,13 @@ static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_smc_tables(struct smu_context *smu)
+int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
int ret = 0;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -460,18 +464,17 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
+int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
int ret = 0;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->tables);
kfree(smu_table->metrics_table);
smu_table->tables = NULL;
- smu_table->table_count = 0;
smu_table->metrics_table = NULL;
smu_table->metrics_time = 0;
@@ -481,7 +484,7 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_power(struct smu_context *smu)
+int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -499,7 +502,7 @@ static int smu_v11_0_init_power(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_power(struct smu_context *smu)
+int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -576,7 +579,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
{
int ret, index;
struct amdgpu_device *adev = smu->adev;
@@ -673,7 +676,7 @@ static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
@@ -719,7 +722,7 @@ static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_check_pptable(struct smu_context *smu)
+int smu_v11_0_check_pptable(struct smu_context *smu)
{
int ret;
@@ -727,7 +730,7 @@ static int smu_v11_0_check_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_parse_pptable(struct smu_context *smu)
+int smu_v11_0_parse_pptable(struct smu_context *smu)
{
int ret;
@@ -751,7 +754,7 @@ static int smu_v11_0_parse_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
{
int ret;
@@ -760,7 +763,7 @@ static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_pptable(struct smu_context *smu)
+int smu_v11_0_write_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
@@ -771,24 +774,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
-{
- int ret = 0;
- struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_WATERMARKS];
-
- if (!table->cpu_addr)
- return -EINVAL;
-
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
- true);
-
- return ret;
-}
-
-static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
@@ -800,7 +786,7 @@ static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t cl
return ret;
}
-static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -809,11 +795,10 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
if (!table_context)
return -EINVAL;
- return smu_set_deep_sleep_dcefclk(smu,
- table_context->boot_values.dcefclk / 100);
+ return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
}
-static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
+int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
@@ -831,7 +816,7 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
@@ -843,7 +828,7 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
}
-static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
+int smu_v11_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
@@ -870,7 +855,7 @@ failed:
return ret;
}
-static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
uint32_t feature_mask_high = 0, feature_mask_low = 0;
@@ -899,7 +884,7 @@ static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_system_features_control(struct smu_context *smu,
+int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -925,7 +910,7 @@ static int smu_v11_0_system_features_control(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_notify_display_change(struct smu_context *smu)
+int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
@@ -983,7 +968,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return ret;
}
-static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
{
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
int ret = 0;
@@ -1063,13 +1048,44 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
+ uint32_t od_limit, max_power_limit;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ powerplay_table = table_context->power_play_table;
+
+ max_power_limit = smu_get_pptable_power_limit(smu);
+
+ if (!max_power_limit) {
+ // If we couldn't get the table limit, fall back on first-read value
+ if (!smu->default_power_limit)
+ smu->default_power_limit = smu->power_limit;
+ max_power_limit = smu->default_power_limit;
+ }
+
+ if (smu->od_enabled) {
+ od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+
+ pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
+
+ max_power_limit *= (100 + od_limit);
+ max_power_limit /= 100;
+ }
+
+ return max_power_limit;
+}
+
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
int ret = 0;
+ uint32_t max_power_limit;
- if (n > smu->default_power_limit) {
- pr_err("New power limit is over the max allowed %d\n",
- smu->default_power_limit);
+ max_power_limit = smu_v11_0_get_max_power_limit(smu);
+
+ if (n > max_power_limit) {
+ pr_err("New power limit (%d) is over the max allowed %d\n",
+ n,
+ max_power_limit);
return -EINVAL;
}
@@ -1091,7 +1107,7 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return 0;
}
-static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_id,
uint32_t *value)
{
@@ -1170,7 +1186,7 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_start_thermal_control(struct smu_context *smu)
+int smu_v11_0_start_thermal_control(struct smu_context *smu)
{
int ret = 0;
struct smu_temperature_range range;
@@ -1212,6 +1228,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
+int smu_v11_0_stop_thermal_control(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
+
+ return 0;
+}
+
static uint16_t convert_to_vddc(uint8_t vid)
{
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
@@ -1236,7 +1261,7 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-static int smu_v11_0_read_sensor(struct smu_context *smu,
+int smu_v11_0_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
@@ -1273,7 +1298,7 @@ static int smu_v11_0_read_sensor(struct smu_context *smu,
return ret;
}
-static int
+int
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request
*clock_req)
@@ -1316,9 +1341,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
return 0;
- mutex_lock(&smu->mutex);
ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
- mutex_unlock(&smu->mutex);
if(clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq;
@@ -1328,27 +1351,7 @@ failed:
return ret;
}
-static int
-smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
- dm_pp_wm_sets_with_clock_ranges_soc15
- *clock_ranges)
-{
- int ret = 0;
- struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
- void *table = watermarks->cpu_addr;
-
- if (!smu->disable_watermark &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
- smu_set_watermarks_table(smu, table, clock_ranges);
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
- }
-
- return ret;
-}
-
-static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0;
struct amdgpu_device *adev = smu->adev;
@@ -1361,12 +1364,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_NAVI12:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
- mutex_lock(&smu->mutex);
if (enable)
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
else
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
- mutex_unlock(&smu->mutex);
break;
default:
break;
@@ -1375,7 +1376,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static uint32_t
+uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1415,7 +1416,7 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
return 0;
}
-static int
+int
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
@@ -1444,7 +1445,7 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}
-static int
+int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
@@ -1472,7 +1473,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
@@ -1482,10 +1483,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
if (!speed)
return -EINVAL;
- mutex_lock(&(smu->mutex));
ret = smu_v11_0_auto_fan_control(smu, 0);
if (ret)
- goto set_fan_speed_rpm_failed;
+ return ret;
crystal_clock_freq = amdgpu_asic_get_xclk(adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
@@ -1496,23 +1496,16 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
-set_fan_speed_rpm_failed:
- mutex_unlock(&(smu->mutex));
return ret;
}
-#define XGMI_STATE_D0 1
-#define XGMI_STATE_D3 0
-
-static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
int ret = 0;
- mutex_lock(&(smu->mutex));
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
- pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
- mutex_unlock(&(smu->mutex));
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
return ret;
}
@@ -1559,7 +1552,7 @@ static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
.process = smu_v11_0_irq_process,
};
-static int smu_v11_0_register_irq_handler(struct smu_context *smu)
+int smu_v11_0_register_irq_handler(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct amdgpu_irq_src *irq_src = smu->irq_source;
@@ -1591,7 +1584,7 @@ static int smu_v11_0_register_irq_handler(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -1621,13 +1614,11 @@ static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- mutex_lock(&smu->mutex);
ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1637,7 +1628,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v
return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
}
-static bool smu_v11_0_baco_is_support(struct smu_context *smu)
+bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -1661,7 +1652,7 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu)
return false;
}
-static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
enum smu_baco_state baco_state;
@@ -1673,7 +1664,7 @@ static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
return baco_state;
}
-static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -1697,7 +1688,7 @@ out:
return ret;
}
-static int smu_v11_0_baco_reset(struct smu_context *smu)
+int smu_v11_0_baco_reset(struct smu_context *smu)
{
int ret = 0;
@@ -1718,13 +1709,12 @@ static int smu_v11_0_baco_reset(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0, clk_id = 0;
uint32_t param = 0;
- mutex_lock(&smu->mutex);
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0) {
ret = -EINVAL;
@@ -1751,80 +1741,102 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
}
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
-static const struct smu_funcs smu_v11_0_funcs = {
- .init_microcode = smu_v11_0_init_microcode,
- .load_microcode = smu_v11_0_load_microcode,
- .check_fw_status = smu_v11_0_check_fw_status,
- .check_fw_version = smu_v11_0_check_fw_version,
- .send_smc_msg = smu_v11_0_send_msg,
- .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
- .setup_pptable = smu_v11_0_setup_pptable,
- .init_smc_tables = smu_v11_0_init_smc_tables,
- .fini_smc_tables = smu_v11_0_fini_smc_tables,
- .init_power = smu_v11_0_init_power,
- .fini_power = smu_v11_0_fini_power,
- .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
- .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
- .check_pptable = smu_v11_0_check_pptable,
- .parse_pptable = smu_v11_0_parse_pptable,
- .populate_smc_tables = smu_v11_0_populate_smc_pptable,
- .write_pptable = smu_v11_0_write_pptable,
- .write_watermarks_table = smu_v11_0_write_watermarks_table,
- .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
- .set_tool_table_location = smu_v11_0_set_tool_table_location,
- .init_display_count = smu_v11_0_init_display_count,
- .set_allowed_mask = smu_v11_0_set_allowed_mask,
- .get_enabled_mask = smu_v11_0_get_enabled_mask,
- .system_features_control = smu_v11_0_system_features_control,
- .notify_display_change = smu_v11_0_notify_display_change,
- .set_power_limit = smu_v11_0_set_power_limit,
- .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
- .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
- .start_thermal_control = smu_v11_0_start_thermal_control,
- .read_sensor = smu_v11_0_read_sensor,
- .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
- .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
- .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
- .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
- .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
- .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
- .gfx_off_control = smu_v11_0_gfx_off_control,
- .register_irq_handler = smu_v11_0_register_irq_handler,
- .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
- .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
- .baco_reset = smu_v11_0_baco_reset,
- .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
-};
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
-void smu_v11_0_set_smu_funcs(struct smu_context *smu)
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ uint32_t pcie_gen = 0, pcie_width = 0;
+ int ret;
- smu->funcs = &smu_v11_0_funcs;
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- vega20_set_ppt_funcs(smu);
- break;
- case CHIP_ARCTURUS:
- arcturus_set_ppt_funcs(smu);
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- navi10_set_ppt_funcs(smu);
- break;
- default:
- pr_warn("Unknown asic for smu11\n");
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
+
+ if (ret)
+ pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+
+ return ret;
+
+}
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (initialize) {
+ if (table_context->overdrive_table) {
+ return -EINVAL;
+ }
+ table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
+ if (!table_context->overdrive_table) {
+ return -ENOMEM;
+ }
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export overdrive table!\n");
+ return ret;
+ }
}
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 9d2280ca1f4b..139dd737eaa5 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -24,12 +24,12 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v12_0.h"
#include "soc15_common.h"
#include "atom.h"
-#include "renoir_ppt.h"
#include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
@@ -41,7 +41,7 @@
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
-static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
@@ -50,7 +50,7 @@ static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -58,7 +58,7 @@ static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
return 0;
}
-static int smu_v12_0_wait_for_response(struct smu_context *smu)
+int smu_v12_0_wait_for_response(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t cur_value, i;
@@ -77,7 +77,7 @@ static int smu_v12_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
+int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -102,7 +102,7 @@ static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
}
-static int
+int
smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
uint32_t param)
{
@@ -132,7 +132,7 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
return ret;
}
-static int smu_v12_0_check_fw_status(struct smu_context *smu)
+int smu_v12_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
@@ -147,7 +147,7 @@ static int smu_v12_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-static int smu_v12_0_check_fw_version(struct smu_context *smu)
+int smu_v12_0_check_fw_version(struct smu_context *smu)
{
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
@@ -181,7 +181,7 @@ static int smu_v12_0_check_fw_version(struct smu_context *smu)
return ret;
}
-static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
{
if (!(smu->adev->flags & AMD_IS_APU))
return 0;
@@ -192,7 +192,7 @@ static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
}
-static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
{
if (!(smu->adev->flags & AMD_IS_APU))
return 0;
@@ -203,7 +203,7 @@ static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
}
-static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
{
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
@@ -224,7 +224,7 @@ static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
* Returns 2=Not in GFXOFF.
* Returns 3=Transition into GFXOFF.
*/
-static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
{
uint32_t reg;
uint32_t gfxOff_Status = 0;
@@ -237,22 +237,13 @@ static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
return gfxOff_Status;
}
-static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0, timeout = 500;
if (enable) {
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
- /* confirm gfx is back to "off" state, timeout is 5 seconds */
- while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) {
- msleep(10);
- timeout--;
- if (timeout == 0) {
- DRM_ERROR("enable gfxoff timeout and failed!\n");
- break;
- }
- }
} else {
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
@@ -270,12 +261,12 @@ static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static int smu_v12_0_init_smc_tables(struct smu_context *smu)
+int smu_v12_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -288,11 +279,11 @@ static int smu_v12_0_init_smc_tables(struct smu_context *smu)
return smu_tables_init(smu, tables);
}
-static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
+int smu_v12_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->clocks_table);
@@ -304,7 +295,7 @@ static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
+int smu_v12_0_populate_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = NULL;
@@ -319,14 +310,20 @@ static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
}
-static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0;
-
- mutex_lock(&smu->mutex);
+ uint32_t mclk_mask, soc_mask;
if (max) {
+ ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ NULL,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -340,14 +337,20 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, max, true);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
if (ret)
goto failed;
break;
default:
ret = -EINVAL;
goto failed;
-
}
}
@@ -365,7 +368,14 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, min, false);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
if (ret)
goto failed;
break;
@@ -373,40 +383,65 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
ret = -EINVAL;
goto failed;
}
-
}
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
-static const struct smu_funcs smu_v12_0_funcs = {
- .check_fw_status = smu_v12_0_check_fw_status,
- .check_fw_version = smu_v12_0_check_fw_version,
- .powergate_sdma = smu_v12_0_powergate_sdma,
- .powergate_vcn = smu_v12_0_powergate_vcn,
- .send_smc_msg = smu_v12_0_send_msg,
- .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
- .read_smc_arg = smu_v12_0_read_arg,
- .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
- .gfx_off_control = smu_v12_0_gfx_off_control,
- .init_smc_tables = smu_v12_0_init_smc_tables,
- .fini_smc_tables = smu_v12_0_fini_smc_tables,
- .populate_smc_tables = smu_v12_0_populate_smc_tables,
- .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
-};
-
-void smu_v12_0_set_smu_funcs(struct smu_context *smu)
+int smu_v12_0_mode2_reset(struct smu_context *smu){
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+}
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
- smu->funcs = &smu_v12_0_funcs;
+ if (max < min)
+ return -EINVAL;
- switch (adev->asic_type) {
- case CHIP_RENOIR:
- renoir_set_ppt_funcs(smu);
- break;
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ if (ret)
+ return ret;
+ break;
default:
- pr_warn("Unknown asic for smu12\n");
+ return -EINVAL;
}
+
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 3f12cf341511..aa0ee2b46135 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 4728aa23a818..7dca04a89217 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -177,12 +177,10 @@ static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
uint32_t tmp;
int ret = 0;
struct cgs_firmware_info info = {0};
- struct smu8_smumgr *smu8_smu;
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- smu8_smu = hwmgr->smu_backend;
ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 0dbdde69f2d9..0f3836fd9666 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index f9589806bf83..90c782c132d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return -EINVAL);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index b9089c6bea85..f604612f411f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 92c393f613d3..0b4892833808 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -143,6 +144,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PrepareMp1ForShutdown),
MSG_MAP(SetMGpuFanBoostLimitRpm),
MSG_MAP(GetAVFSVoltageByDpm),
+ MSG_MAP(DFCstateControl),
};
static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = {
@@ -464,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
sizeof(PPTable_t));
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
- table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
return 0;
}
@@ -634,7 +635,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
!smu_dpm_ctx->dpm_current_power_state)
return -EINVAL;
- mutex_lock(&(smu->mutex));
switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
case SMU_STATE_UI_LABEL_BATTERY:
pm_type = POWER_STATE_TYPE_BATTERY;
@@ -652,7 +652,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&(smu->mutex));
return pm_type;
}
@@ -1274,16 +1273,8 @@ static int vega20_force_clk_levels(struct smu_context *smu,
struct vega20_dpm_table *dpm_table;
struct vega20_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level, hard_min_level;
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
int ret = 0;
- if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
- return -EINVAL;
- }
-
- mutex_lock(&(smu->mutex));
-
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -1436,7 +1427,6 @@ static int vega20_force_clk_levels(struct smu_context *smu,
break;
}
- mutex_unlock(&(smu->mutex));
return ret;
}
@@ -1451,8 +1441,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
dpm_table = smu_dpm->dpm_context;
- mutex_lock(&smu->mutex);
-
switch (clk_type) {
case SMU_GFXCLK:
single_dpm_table = &(dpm_table->gfx_table);
@@ -1474,7 +1462,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
ret = -EINVAL;
}
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -2260,7 +2247,7 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) {
+ if (!smu_v11_0_display_clock_voltage_request(smu, &clock_req)) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -2547,8 +2534,6 @@ static int vega20_set_od_percentage(struct smu_context *smu,
int feature_enabled;
PPCLK_e clk_id;
- mutex_lock(&(smu->mutex));
-
dpm_table = smu_dpm->dpm_context;
golden_table = smu_dpm->golden_dpm_context;
@@ -2598,11 +2583,10 @@ static int vega20_set_od_percentage(struct smu_context *smu,
}
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
set_od_failed:
- mutex_unlock(&(smu->mutex));
-
return ret;
}
@@ -2827,10 +2811,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
}
if (type == PP_OD_COMMIT_DPM_TABLE) {
- mutex_lock(&(smu->mutex));
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
- mutex_unlock(&(smu->mutex));
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
}
return ret;
@@ -3047,7 +3030,7 @@ static int vega20_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
mutex_unlock(&smu->sensor_lock);
@@ -3141,6 +3124,49 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu,
return 0;
}
+static int vega20_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+}
+
+static int vega20_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ }
+
+ return ret;
+}
+
+
static const struct pptable_funcs vega20_ppt_funcs = {
.tables_init = vega20_tables_init,
.alloc_dpm_context = vega20_allocate_dpm_context,
@@ -3153,7 +3179,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_smu_table_index = vega20_get_smu_table_index,
.get_smu_power_index = vega20_get_pwr_src_index,
.get_workload_type = vega20_get_workload_type,
- .run_afll_btc = vega20_run_btc_afll,
+ .run_btc = vega20_run_btc_afll,
.get_allowed_feature_mask = vega20_get_allowed_feature_mask,
.get_current_power_state = vega20_get_current_power_state,
.set_default_dpm_table = vega20_set_default_dpm_table,
@@ -3183,13 +3209,61 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_fan_speed_percent = vega20_get_fan_speed_percent,
.get_fan_speed_rpm = vega20_get_fan_speed_rpm,
.set_watermarks_table = vega20_set_watermarks_table,
- .get_thermal_temperature_range = vega20_get_thermal_temperature_range
+ .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+ .set_df_cstate = vega20_set_df_cstate,
+ .update_pcie_parameters = vega20_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
};
void vega20_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &vega20_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 6b7f791685ec..d6a6692db0ac 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -45,7 +46,7 @@ static int arcpgu_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct arcpgu_drm_private *arcpgu;
- struct device_node *encoder_node;
+ struct device_node *encoder_node = NULL, *endpoint_node = NULL;
struct resource *res;
int ret;
@@ -80,14 +81,23 @@ static int arcpgu_load(struct drm_device *drm)
if (arc_pgu_setup_crtc(drm) < 0)
return -ENODEV;
- /* find the encoder node and initialize it */
- encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
+ /*
+ * There is only one output port inside each device. It is linked with
+ * encoder endpoint.
+ */
+ endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ if (endpoint_node) {
+ encoder_node = of_graph_get_remote_port_parent(endpoint_node);
+ of_node_put(endpoint_node);
+ }
+
if (encoder_node) {
ret = arcpgu_drm_hdmi_init(drm, encoder_node);
of_node_put(encoder_node);
if (ret < 0)
return ret;
} else {
+ dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n");
ret = arcpgu_drm_sim_init(drm, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 98aac743cc26..8fd7094beece 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index cec0639e3aa1..e87ff8623076 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -12,3 +12,9 @@ config DRM_KOMEDA
Processor driver. It supports the D71 variants of the hardware.
If compiled as a module it will be called komeda.
+
+config DRM_KOMEDA_ERROR_PRINT
+ bool "Enable komeda error print"
+ depends on DRM_KOMEDA
+ help
+ Choose this option to enable error printing.
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index 5c3900c2e764..f095a1c68ac7 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -22,4 +22,6 @@ komeda-y += \
d71/d71_dev.o \
d71/d71_component.o
+komeda-$(CONFIG_DRM_KOMEDA_ERROR_PRINT) += komeda_event.o
+
obj-$(CONFIG_DRM_KOMEDA) += komeda.o
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 55a8cc94808a..f0ba26e282c3 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -106,6 +106,23 @@ static void dump_block_header(struct seq_file *sf, void __iomem *reg)
i, hdr.output_ids[i]);
}
+/* On D71, we are using the global line size. From D32, every component have
+ * a line size register to indicate the fifo size.
+ */
+static u32 __get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg,
+ u32 max_default)
+{
+ if (!d71->periph_addr)
+ max_default = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+
+ return max_default;
+}
+
+static u32 get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg)
+{
+ return __get_blk_line_size(d71, reg, d71->max_line_size);
+}
+
static u32 to_rot_ctrl(u32 rot)
{
u32 lr_ctrl = 0;
@@ -332,7 +349,56 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
}
+static int d71_layer_validate(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(state);
+ struct komeda_layer *layer = to_layer(c);
+ struct drm_plane_state *plane_st;
+ struct drm_framebuffer *fb;
+ u32 fourcc, line_sz, max_line_sz;
+
+ plane_st = drm_atomic_get_new_plane_state(state->obj.state,
+ state->plane);
+ fb = plane_st->fb;
+ fourcc = fb->format->format;
+
+ if (drm_rotation_90_or_270(st->rot))
+ line_sz = st->vsize - st->afbc_crop_t - st->afbc_crop_b;
+ else
+ line_sz = st->hsize - st->afbc_crop_l - st->afbc_crop_r;
+
+ if (fb->modifier) {
+ if ((fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
+ AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+ max_line_sz = layer->line_sz;
+ else
+ max_line_sz = layer->line_sz / 2;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("afbc request line_sz: %d exceed the max afbc line_sz: %d.\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+ }
+
+ if (fourcc == DRM_FORMAT_YUV420_10BIT && line_sz > 2046 && (st->afbc_crop_l % 4)) {
+ DRM_DEBUG_ATOMIC("YUV420_10BIT input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ if (fourcc == DRM_FORMAT_X0L2 && line_sz > 2046 && (st->addr[0] % 16)) {
+ DRM_DEBUG_ATOMIC("X0L2 input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct komeda_component_funcs d71_layer_funcs = {
+ .validate = d71_layer_validate,
.update = d71_layer_update,
.disable = d71_layer_disable,
.dump_register = d71_layer_dump,
@@ -365,7 +431,28 @@ static int d71_layer_init(struct d71_dev *d71,
else
layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER;
- set_range(&layer->hsize_in, 4, d71->max_line_size);
+ if (!d71->periph_addr) {
+ /* D32 or newer product */
+ layer->line_sz = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+ layer->yuv_line_sz = L_INFO_YUV_MAX_LINESZ(layer_info);
+ } else if (d71->max_line_size > 2048) {
+ /* D71 4K */
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ /* D71 2K */
+ if (layer->layer_type == KOMEDA_FMT_RICH_LAYER) {
+ /* rich layer is 4K configuration */
+ layer->line_sz = d71->max_line_size * 2;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = 0;
+ }
+ }
+
+ set_range(&layer->hsize_in, 4, layer->line_sz);
+
set_range(&layer->vsize_in, 4, d71->max_vsize);
malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP);
@@ -456,9 +543,11 @@ static int d71_wb_layer_init(struct d71_dev *d71,
wb_layer = to_layer(c);
wb_layer->layer_type = KOMEDA_FMT_WB_LAYER;
+ wb_layer->line_sz = get_blk_line_size(d71, reg);
+ wb_layer->yuv_line_sz = wb_layer->line_sz;
- set_range(&wb_layer->hsize_in, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&wb_layer->vsize_in, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&wb_layer->hsize_in, 64, wb_layer->line_sz);
+ set_range(&wb_layer->vsize_in, 64, d71->max_vsize);
return 0;
}
@@ -595,8 +684,8 @@ static int d71_compiz_init(struct d71_dev *d71,
compiz = to_compiz(c);
- set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&compiz->hsize, 64, get_blk_line_size(d71, reg));
+ set_range(&compiz->vsize, 64, d71->max_vsize);
return 0;
}
@@ -703,7 +792,7 @@ static void d71_scaler_update(struct komeda_component *c,
static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
{
- u32 v[9];
+ u32 v[10];
dump_block_header(sf, c->reg);
@@ -723,6 +812,18 @@ static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]);
seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]);
seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]);
+
+ get_values_from_reg(c->reg, 0x130, 10, v);
+ seq_printf(sf, "SC_ENH_LIMITS:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "SC_ENH_COEFF0:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "SC_ENH_COEFF1:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "SC_ENH_COEFF2:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "SC_ENH_COEFF3:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "SC_ENH_COEFF4:\t\t0x%X\n", v[5]);
+ seq_printf(sf, "SC_ENH_COEFF5:\t\t0x%X\n", v[6]);
+ seq_printf(sf, "SC_ENH_COEFF6:\t\t0x%X\n", v[7]);
+ seq_printf(sf, "SC_ENH_COEFF7:\t\t0x%X\n", v[8]);
+ seq_printf(sf, "SC_ENH_COEFF8:\t\t0x%X\n", v[9]);
}
static const struct komeda_component_funcs d71_scaler_funcs = {
@@ -753,7 +854,7 @@ static int d71_scaler_init(struct d71_dev *d71,
}
scaler = to_scaler(c);
- set_range(&scaler->hsize, 4, 2048);
+ set_range(&scaler->hsize, 4, __get_blk_line_size(d71, reg, 2048));
set_range(&scaler->vsize, 4, 4096);
scaler->max_downscaling = 6;
scaler->max_upscaling = 64;
@@ -862,7 +963,7 @@ static int d71_splitter_init(struct d71_dev *d71,
splitter = to_splitter(c);
- set_range(&splitter->hsize, 4, d71->max_line_size);
+ set_range(&splitter->hsize, 4, get_blk_line_size(d71, reg));
set_range(&splitter->vsize, 4, d71->max_vsize);
return 0;
@@ -933,7 +1034,8 @@ static int d71_merger_init(struct d71_dev *d71,
merger = to_merger(c);
- set_range(&merger->hsize_merged, 4, 4032);
+ set_range(&merger->hsize_merged, 4,
+ __get_blk_line_size(d71, reg, 4032));
set_range(&merger->vsize_merged, 4, 4096);
return 0;
@@ -944,13 +1046,26 @@ static void d71_improc_update(struct komeda_component *c,
{
struct komeda_improc_state *st = to_improc_st(state);
u32 __iomem *reg = c->reg;
- u32 index;
+ u32 index, mask = 0, ctrl = 0;
for_each_changed_input(state, index)
malidp_write32(reg, BLK_INPUT_ID0 + index * 4,
to_d71_input_id(state, index));
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+ malidp_write32(reg, IPS_DEPTH, st->color_depth);
+
+ mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+
+ /* config color format */
+ if (st->color_format == DRM_COLOR_FORMAT_YCRCB420)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB422)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB444)
+ ctrl |= IPS_CTRL_YUV;
+
+ malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl);
}
static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
@@ -1218,6 +1333,90 @@ int d71_probe_block(struct d71_dev *d71,
return err;
}
+static void d71_gcu_dump(struct d71_dev *d71, struct seq_file *sf)
+{
+ u32 v[5];
+
+ seq_puts(sf, "\n------ GCU ------\n");
+
+ get_values_from_reg(d71->gcu_addr, 0, 3, v);
+ seq_printf(sf, "GLB_ARCH_ID:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "GLB_CORE_ID:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "GLB_CORE_INFO:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(d71->gcu_addr, 0x10, 1, v);
+ seq_printf(sf, "GLB_IRQ_STATUS:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(d71->gcu_addr, 0xA0, 5, v);
+ seq_printf(sf, "GCU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "GCU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "GCU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "GCU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "GCU_STATUS:\t\t0x%X\n", v[4]);
+
+ get_values_from_reg(d71->gcu_addr, 0xD0, 3, v);
+ seq_printf(sf, "GCU_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "GCU_CONFIG_VALID0:\t0x%X\n", v[1]);
+ seq_printf(sf, "GCU_CONFIG_VALID1:\t0x%X\n", v[2]);
+}
+
+static void d71_lpu_dump(struct d71_pipeline *pipe, struct seq_file *sf)
+{
+ u32 v[6];
+
+ seq_printf(sf, "\n------ LPU%d ------\n", pipe->base.id);
+
+ dump_block_header(sf, pipe->lpu_addr);
+
+ get_values_from_reg(pipe->lpu_addr, 0xA0, 6, v);
+ seq_printf(sf, "LPU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "LPU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "LPU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "LPU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "LPU_STATUS:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "LPU_TBU_STATUS:\t\t0x%X\n", v[5]);
+
+ get_values_from_reg(pipe->lpu_addr, 0xC0, 1, v);
+ seq_printf(sf, "LPU_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(pipe->lpu_addr, 0xD0, 3, v);
+ seq_printf(sf, "LPU_RAXI_CONTROL:\t0x%X\n", v[0]);
+ seq_printf(sf, "LPU_WAXI_CONTROL:\t0x%X\n", v[1]);
+ seq_printf(sf, "LPU_TBU_CONTROL:\t0x%X\n", v[2]);
+}
+
+static void d71_dou_dump(struct d71_pipeline *pipe, struct seq_file *sf)
+{
+ u32 v[5];
+
+ seq_printf(sf, "\n------ DOU%d ------\n", pipe->base.id);
+
+ dump_block_header(sf, pipe->dou_addr);
+
+ get_values_from_reg(pipe->dou_addr, 0xA0, 5, v);
+ seq_printf(sf, "DOU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "DOU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "DOU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "DOU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "DOU_STATUS:\t\t0x%X\n", v[4]);
+}
+
+static void d71_pipeline_dump(struct komeda_pipeline *pipe, struct seq_file *sf)
+{
+ struct d71_pipeline *d71_pipe = to_d71_pipeline(pipe);
+
+ d71_lpu_dump(d71_pipe, sf);
+ d71_dou_dump(d71_pipe, sf);
+}
+
const struct komeda_pipeline_funcs d71_pipeline_funcs = {
- .downscaling_clk_check = d71_downscaling_clk_check,
+ .downscaling_clk_check = d71_downscaling_clk_check,
+ .dump_register = d71_pipeline_dump,
};
+
+void d71_dump(struct komeda_dev *mdev, struct seq_file *sf)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+
+ d71_gcu_dump(d71, sf);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index d567ab7ed314..822b23a1ce75 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -195,7 +195,7 @@ d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
if (gcu_status & GLB_IRQ_STATUS_PIPE1)
evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
- return gcu_status ? IRQ_HANDLED : IRQ_NONE;
+ return IRQ_RETVAL(gcu_status);
}
#define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
@@ -395,6 +395,22 @@ static int d71_enum_resources(struct komeda_dev *mdev)
err = PTR_ERR(pipe);
goto err_cleanup;
}
+
+ /* D71 HW doesn't update shadow registers when display output
+ * is turning off, so when we disable all pipeline components
+ * together with display output disable by one flush or one
+ * operation, the disable operation updated registers will not
+ * be flush to or valid in HW, which may leads problem.
+ * To workaround this problem, introduce a two phase disable.
+ * Phase1: Disabling components with display is on to make sure
+ * the disable can be flushed to HW.
+ * Phase2: Only turn-off display output.
+ */
+ value = KOMEDA_PIPELINE_IMPROCS |
+ BIT(KOMEDA_COMPONENT_TIMING_CTRLR);
+
+ pipe->standalone_disabled_comps = value;
+
d71->pipes[i] = to_d71_pipeline(pipe);
}
@@ -561,17 +577,18 @@ static int d71_disconnect_iommu(struct komeda_dev *mdev)
}
static const struct komeda_dev_funcs d71_chip_funcs = {
- .init_format_table = d71_init_fmt_tbl,
- .enum_resources = d71_enum_resources,
- .cleanup = d71_cleanup,
- .irq_handler = d71_irq_handler,
- .enable_irq = d71_enable_irq,
- .disable_irq = d71_disable_irq,
- .on_off_vblank = d71_on_off_vblank,
- .change_opmode = d71_change_opmode,
- .flush = d71_flush,
- .connect_iommu = d71_connect_iommu,
- .disconnect_iommu = d71_disconnect_iommu,
+ .init_format_table = d71_init_fmt_tbl,
+ .enum_resources = d71_enum_resources,
+ .cleanup = d71_cleanup,
+ .irq_handler = d71_irq_handler,
+ .enable_irq = d71_enable_irq,
+ .disable_irq = d71_disable_irq,
+ .on_off_vblank = d71_on_off_vblank,
+ .change_opmode = d71_change_opmode,
+ .flush = d71_flush,
+ .connect_iommu = d71_connect_iommu,
+ .disconnect_iommu = d71_disconnect_iommu,
+ .dump_register = d71_dump,
};
const struct komeda_dev_funcs *
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
index 84f1878b647d..c7357c2b9e62 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
@@ -49,4 +49,6 @@ int d71_probe_block(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg);
void d71_read_block_header(u32 __iomem *reg, struct block_header *blk);
+void d71_dump(struct komeda_dev *mdev, struct seq_file *sf);
+
#endif /* !_D71_DEV_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
index 2d5e6d00b42c..1727dc993909 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -10,6 +10,7 @@
/* Common block registers offset */
#define BLK_BLOCK_INFO 0x000
#define BLK_PIPELINE_INFO 0x004
+#define BLK_MAX_LINE_SIZE 0x008
#define BLK_VALID_INPUT_ID0 0x020
#define BLK_OUTPUT_ID0 0x060
#define BLK_INPUT_ID0 0x080
@@ -321,6 +322,7 @@
#define L_INFO_RF BIT(0)
#define L_INFO_CM BIT(1)
#define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7)
+#define L_INFO_YUV_MAX_LINESZ(x) (((x) >> 16) & 0xFFFF)
/* Scaler registers */
#define SC_COEFFTAB 0x0DC
@@ -494,13 +496,6 @@ enum d71_blk_type {
#define D71_DEFAULT_PREPRETCH_LINE 5
#define D71_BUS_WIDTH_16_BYTES 16
-#define D71_MIN_LINE_SIZE 64
-#define D71_MIN_VERTICAL_SIZE 64
-#define D71_SC_MIN_LIN_SIZE 4
-#define D71_SC_MIN_VERTICAL_SIZE 4
-#define D71_SC_MAX_LIN_SIZE 2048
-#define D71_SC_MAX_VERTICAL_SIZE 4096
-
#define D71_SC_MAX_UPSCALING 64
#define D71_SC_MAX_DOWNSCALING 6
#define D71_SC_SPLIT_OVERLAP 8
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 624d257da20f..252015210fbc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -5,7 +5,6 @@
*
*/
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <drm/drm_atomic.h>
@@ -18,6 +17,33 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats)
+{
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_st;
+ u32 conn_color_formats = ~0u;
+ int i, min_bpc = 31, conn_bpc = 0;
+
+ for_each_new_connector_in_state(crtc_st->state, conn, conn_st, i) {
+ if (conn_st->crtc != crtc_st->crtc)
+ continue;
+
+ conn_bpc = conn->display_info.bpc ? conn->display_info.bpc : 8;
+ conn_color_formats &= conn->display_info.color_formats;
+
+ if (conn_bpc < min_bpc)
+ min_bpc = conn_bpc;
+ }
+
+ /* connector doesn't config any color_format, use RGB444 as default */
+ if (!conn_color_formats)
+ conn_color_formats = DRM_COLOR_FORMAT_RGB444;
+
+ *color_depths = GENMASK(min_bpc, 0);
+ *color_formats = conn_color_formats;
+}
+
static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
{
u64 pxlclk, aclk;
@@ -250,23 +276,57 @@ komeda_crtc_atomic_enable(struct drm_crtc *crtc,
{
komeda_crtc_prepare(to_kcrtc(crtc));
drm_crtc_vblank_on(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc));
komeda_crtc_do_flush(crtc, old);
}
static void
+komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
+ struct completion *input_flip_done)
+{
+ struct drm_device *drm = kcrtc->base.dev;
+ struct komeda_dev *mdev = kcrtc->master->mdev;
+ struct completion *flip_done;
+ struct completion temp;
+ int timeout;
+
+ /* if caller doesn't send a flip_done, use a private flip_done */
+ if (input_flip_done) {
+ flip_done = input_flip_done;
+ } else {
+ init_completion(&temp);
+ kcrtc->disable_done = &temp;
+ flip_done = &temp;
+ }
+
+ mdev->funcs->flush(mdev, kcrtc->master->id, 0);
+
+ /* wait the flip take affect.*/
+ timeout = wait_for_completion_timeout(flip_done, HZ);
+ if (timeout == 0) {
+ DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id);
+ if (!input_flip_done) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ kcrtc->disable_done = NULL;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+ }
+}
+
+static void
komeda_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old)
{
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
- struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
- struct completion *disable_done = &crtc->state->commit->flip_done;
- struct completion temp;
- int timeout;
+ struct completion *disable_done;
+ bool needs_phase2 = false;
- DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x.\n",
+ DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x\n",
drm_crtc_index(crtc),
old_st->active_pipes, old_st->affected_pipes);
@@ -274,7 +334,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
komeda_pipeline_disable(slave, old->state);
if (has_bit(master->id, old_st->active_pipes))
- komeda_pipeline_disable(master, old->state);
+ needs_phase2 = komeda_pipeline_disable(master, old->state);
/* crtc_disable has two scenarios according to the state->active switch.
* 1. active -> inactive
@@ -293,32 +353,23 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
* That's also the reason why skip modeset commit in
* komeda_crtc_atomic_flush()
*/
- if (crtc->state->active) {
- struct komeda_pipeline_state *pipe_st;
- /* clear the old active_comps to zero */
- pipe_st = komeda_pipeline_get_old_state(master, old->state);
- pipe_st->active_comps = 0;
+ disable_done = (needs_phase2 || crtc->state->active) ?
+ NULL : &crtc->state->commit->flip_done;
- init_completion(&temp);
- kcrtc->disable_done = &temp;
- disable_done = &temp;
- }
+ /* wait phase 1 disable done */
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
- mdev->funcs->flush(mdev, master->id, 0);
+ /* phase 2 */
+ if (needs_phase2) {
+ komeda_pipeline_disable(kcrtc->master, old->state);
- /* wait the disable take affect.*/
- timeout = wait_for_completion_timeout(disable_done, HZ);
- if (timeout == 0) {
- DRM_ERROR("disable pipeline%d timeout.\n", kcrtc->master->id);
- if (crtc->state->active) {
- unsigned long flags;
+ disable_done = crtc->state->active ?
+ NULL : &crtc->state->commit->flip_done;
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- kcrtc->disable_done = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
}
+ drm_crtc_vblank_put(crtc);
drm_crtc_vblank_off(crtc);
komeda_crtc_unprepare(kcrtc);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index ca64a129c594..937a6d4c4865 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -25,6 +25,8 @@ static int komeda_register_show(struct seq_file *sf, void *x)
struct komeda_dev *mdev = sf->private;
int i;
+ seq_puts(sf, "\n====== Komeda register dump =========\n");
+
if (mdev->funcs->dump_register)
mdev->funcs->dump_register(mdev, sf);
@@ -91,9 +93,19 @@ config_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(config_id);
+static ssize_t
+aclk_hz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(mdev->aclk));
+}
+static DEVICE_ATTR_RO(aclk_hz);
+
static struct attribute *komeda_sysfs_entries[] = {
&dev_attr_core_id.attr,
&dev_attr_config_id.attr,
+ &dev_attr_aclk_hz.attr,
NULL,
};
@@ -216,7 +228,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
product->product_id,
MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id));
err = -ENODEV;
- goto err_cleanup;
+ goto disable_clk;
}
DRM_INFO("Found ARM Mali-D%x version r%dp%d\n",
@@ -229,19 +241,19 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
err = mdev->funcs->enum_resources(mdev);
if (err) {
DRM_ERROR("enumerate display resource failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
err = komeda_parse_dt(dev, mdev);
if (err) {
DRM_ERROR("parse device tree failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
err = komeda_assemble_pipelines(mdev);
if (err) {
DRM_ERROR("assemble display pipelines failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
dev->dma_parms = &mdev->dma_parms;
@@ -254,11 +266,14 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
if (mdev->iommu && mdev->funcs->connect_iommu) {
err = mdev->funcs->connect_iommu(mdev);
if (err) {
+ DRM_ERROR("connect iommu failed.\n");
mdev->iommu = NULL;
- goto err_cleanup;
+ goto disable_clk;
}
}
+ clk_disable_unprepare(mdev->aclk);
+
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
if (err) {
DRM_ERROR("create sysfs group failed.\n");
@@ -271,6 +286,8 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
return mdev;
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
err_cleanup:
komeda_dev_destroy(mdev);
return ERR_PTR(err);
@@ -288,8 +305,12 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
debugfs_remove_recursive(mdev->debugfs_root);
#endif
+ if (mdev->aclk)
+ clk_prepare_enable(mdev->aclk);
+
if (mdev->iommu && mdev->funcs->disconnect_iommu)
- mdev->funcs->disconnect_iommu(mdev);
+ if (mdev->funcs->disconnect_iommu(mdev))
+ DRM_ERROR("disconnect iommu failed.\n");
mdev->iommu = NULL;
for (i = 0; i < mdev->n_pipelines; i++) {
@@ -317,3 +338,47 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
devm_kfree(dev, mdev);
}
+
+int komeda_dev_resume(struct komeda_dev *mdev)
+{
+ int ret = 0;
+
+ clk_prepare_enable(mdev->aclk);
+
+ if (mdev->iommu && mdev->funcs->connect_iommu) {
+ ret = mdev->funcs->connect_iommu(mdev);
+ if (ret < 0) {
+ DRM_ERROR("connect iommu failed.\n");
+ goto disable_clk;
+ }
+ }
+
+ ret = mdev->funcs->enable_irq(mdev);
+
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
+
+ return ret;
+}
+
+int komeda_dev_suspend(struct komeda_dev *mdev)
+{
+ int ret = 0;
+
+ clk_prepare_enable(mdev->aclk);
+
+ if (mdev->iommu && mdev->funcs->disconnect_iommu) {
+ ret = mdev->funcs->disconnect_iommu(mdev);
+ if (ret < 0) {
+ DRM_ERROR("disconnect iommu failed.\n");
+ goto disable_clk;
+ }
+ }
+
+ ret = mdev->funcs->disable_irq(mdev);
+
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index d1c86b6174c8..414200233b64 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -40,6 +40,17 @@
#define KOMEDA_ERR_TTNG BIT_ULL(30)
#define KOMEDA_ERR_TTF BIT_ULL(31)
+#define KOMEDA_ERR_EVENTS \
+ (KOMEDA_EVENT_URUN | KOMEDA_EVENT_IBSY | KOMEDA_EVENT_OVR |\
+ KOMEDA_ERR_TETO | KOMEDA_ERR_TEMR | KOMEDA_ERR_TITR |\
+ KOMEDA_ERR_CPE | KOMEDA_ERR_CFGE | KOMEDA_ERR_AXIE |\
+ KOMEDA_ERR_ACE0 | KOMEDA_ERR_ACE1 | KOMEDA_ERR_ACE2 |\
+ KOMEDA_ERR_ACE3 | KOMEDA_ERR_DRIFTTO | KOMEDA_ERR_FRAMETO |\
+ KOMEDA_ERR_ZME | KOMEDA_ERR_MERR | KOMEDA_ERR_TCF |\
+ KOMEDA_ERR_TTNG | KOMEDA_ERR_TTF)
+
+#define KOMEDA_WARN_EVENTS KOMEDA_ERR_CSCE
+
/* malidp device id */
enum {
MALI_D71 = 0,
@@ -207,4 +218,13 @@ void komeda_dev_destroy(struct komeda_dev *mdev);
struct komeda_dev *dev_to_mdev(struct device *dev);
+#ifdef CONFIG_DRM_KOMEDA_ERROR_PRINT
+void komeda_print_events(struct komeda_events *evts);
+#else
+static inline void komeda_print_events(struct komeda_events *evts) {}
+#endif
+
+int komeda_dev_resume(struct komeda_dev *mdev);
+int komeda_dev_suspend(struct komeda_dev *mdev);
+
#endif /*_KOMEDA_DEV_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 69ace6f9055d..d6c2222c5d33 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
@@ -136,13 +137,40 @@ static const struct of_device_id komeda_of_match[] = {
MODULE_DEVICE_TABLE(of, komeda_of_match);
+static int __maybe_unused komeda_pm_suspend(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+ struct drm_device *drm = &mdrv->kms->base;
+ int res;
+
+ res = drm_mode_config_helper_suspend(drm);
+
+ komeda_dev_suspend(mdrv->mdev);
+
+ return res;
+}
+
+static int __maybe_unused komeda_pm_resume(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+ struct drm_device *drm = &mdrv->kms->base;
+
+ komeda_dev_resume(mdrv->mdev);
+
+ return drm_mode_config_helper_resume(drm);
+}
+
+static const struct dev_pm_ops komeda_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(komeda_pm_suspend, komeda_pm_resume)
+};
+
static struct platform_driver komeda_platform_driver = {
.probe = komeda_platform_probe,
.remove = komeda_platform_remove,
.driver = {
.name = "komeda",
.of_match_table = komeda_of_match,
- .pm = NULL,
+ .pm = &komeda_pm_ops,
},
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_event.c b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
new file mode 100644
index 000000000000..a36fb86cc054
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <drm/drm_print.h>
+
+#include "komeda_dev.h"
+
+struct komeda_str {
+ char *str;
+ u32 sz;
+ u32 len;
+};
+
+/* return 0 on success, < 0 on no space.
+ */
+static int komeda_sprintf(struct komeda_str *str, const char *fmt, ...)
+{
+ va_list args;
+ int num, free_sz;
+ int err;
+
+ free_sz = str->sz - str->len - 1;
+ if (free_sz <= 0)
+ return -ENOSPC;
+
+ va_start(args, fmt);
+
+ num = vsnprintf(str->str + str->len, free_sz, fmt, args);
+
+ va_end(args);
+
+ if (num < free_sz) {
+ str->len += num;
+ err = 0;
+ } else {
+ str->len = str->sz - 1;
+ err = -ENOSPC;
+ }
+
+ return err;
+}
+
+static void evt_sprintf(struct komeda_str *str, u64 evt, const char *msg)
+{
+ if (evt)
+ komeda_sprintf(str, msg);
+}
+
+static void evt_str(struct komeda_str *str, u64 events)
+{
+ if (events == 0ULL) {
+ komeda_sprintf(str, "None");
+ return;
+ }
+
+ evt_sprintf(str, events & KOMEDA_EVENT_VSYNC, "VSYNC|");
+ evt_sprintf(str, events & KOMEDA_EVENT_FLIP, "FLIP|");
+ evt_sprintf(str, events & KOMEDA_EVENT_EOW, "EOW|");
+ evt_sprintf(str, events & KOMEDA_EVENT_MODE, "OP-MODE|");
+
+ evt_sprintf(str, events & KOMEDA_EVENT_URUN, "UNDERRUN|");
+ evt_sprintf(str, events & KOMEDA_EVENT_OVR, "OVERRUN|");
+
+ /* GLB error */
+ evt_sprintf(str, events & KOMEDA_ERR_MERR, "MERR|");
+ evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
+
+ /* DOU error */
+ evt_sprintf(str, events & KOMEDA_ERR_DRIFTTO, "DRIFTTO|");
+ evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
+ evt_sprintf(str, events & KOMEDA_ERR_TETO, "TETO|");
+ evt_sprintf(str, events & KOMEDA_ERR_CSCE, "CSCE|");
+
+ /* LPU errors or events */
+ evt_sprintf(str, events & KOMEDA_EVENT_IBSY, "IBSY|");
+ evt_sprintf(str, events & KOMEDA_ERR_AXIE, "AXIE|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE0, "ACE0|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE1, "ACE1|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE2, "ACE2|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE3, "ACE3|");
+
+ /* LPU TBU errors*/
+ evt_sprintf(str, events & KOMEDA_ERR_TCF, "TCF|");
+ evt_sprintf(str, events & KOMEDA_ERR_TTNG, "TTNG|");
+ evt_sprintf(str, events & KOMEDA_ERR_TITR, "TITR|");
+ evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
+ evt_sprintf(str, events & KOMEDA_ERR_TTF, "TTF|");
+
+ /* CU errors*/
+ evt_sprintf(str, events & KOMEDA_ERR_CPE, "COPROC|");
+ evt_sprintf(str, events & KOMEDA_ERR_ZME, "ZME|");
+ evt_sprintf(str, events & KOMEDA_ERR_CFGE, "CFGE|");
+ evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
+
+ if (str->len > 0 && (str->str[str->len - 1] == '|')) {
+ str->str[str->len - 1] = 0;
+ str->len--;
+ }
+}
+
+static bool is_new_frame(struct komeda_events *a)
+{
+ return (a->pipes[0] | a->pipes[1]) &
+ (KOMEDA_EVENT_FLIP | KOMEDA_EVENT_EOW);
+}
+
+void komeda_print_events(struct komeda_events *evts)
+{
+ u64 print_evts = KOMEDA_ERR_EVENTS;
+ static bool en_print = true;
+
+ /* reduce the same msg print, only print the first evt for one frame */
+ if (evts->global || is_new_frame(evts))
+ en_print = true;
+ if (!en_print)
+ return;
+
+ if ((evts->global | evts->pipes[0] | evts->pipes[1]) & print_evts) {
+ char msg[256];
+ struct komeda_str str;
+
+ str.str = msg;
+ str.sz = sizeof(msg);
+ str.len = 0;
+
+ komeda_sprintf(&str, "gcu: ");
+ evt_str(&str, evts->global);
+ komeda_sprintf(&str, ", pipes[0]: ");
+ evt_str(&str, evts->pipes[0]);
+ komeda_sprintf(&str, ", pipes[1]: ");
+ evt_str(&str, evts->pipes[1]);
+
+ DRM_ERROR("err detect: %s\n", msg);
+
+ en_print = false;
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index ae274902ff92..52648b4008bc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -48,6 +48,8 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
memset(&evts, 0, sizeof(evts));
status = mdev->funcs->irq_handler(mdev, &evts);
+ komeda_print_events(&evts);
+
/* Notify the crtc to handle the events */
for (i = 0; i < kms->n_crtcs; i++)
komeda_crtc_handle_event(&kms->crtcs[i], &evts);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 45c498e15e7a..456f3c435719 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -166,6 +166,8 @@ static inline bool has_flip_h(u32 rot)
return !!(rotation & DRM_MODE_REFLECT_X);
}
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats);
unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st);
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index cf5bea578ad9..bd6ca7c87037 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -227,6 +227,8 @@ struct komeda_layer {
/* accepted h/v input range before rotation */
struct malidp_range hsize_in, vsize_in;
u32 layer_type; /* RICH, SIMPLE or WB */
+ u32 line_sz;
+ u32 yuv_line_sz; /* maximum line size for YUV422 and YUV420 */
u32 supported_rots;
/* komeda supports layer split which splits a whole image to two parts
* left and right and handle them by two individual layer processors
@@ -323,6 +325,7 @@ struct komeda_improc {
struct komeda_improc_state {
struct komeda_component_state base;
+ u8 color_format, color_depth;
u16 hsize, vsize;
};
@@ -389,6 +392,18 @@ struct komeda_pipeline {
int id;
/** @avail_comps: available components mask of pipeline */
u32 avail_comps;
+ /**
+ * @standalone_disabled_comps:
+ *
+ * When disable the pipeline, some components can not be disabled
+ * together with others, but need a sparated and standalone disable.
+ * The standalone_disabled_comps are the components which need to be
+ * disabled standalone, and this concept also introduce concept of
+ * two phase.
+ * phase 1: for disabling the common components.
+ * phase 2: for disabling the standalong_disabled_comps.
+ */
+ u32 standalone_disabled_comps;
/** @n_layers: the number of layer on @layers */
int n_layers;
/** @layers: the pipeline layers */
@@ -535,7 +550,7 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
struct komeda_pipeline_state *
komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
struct drm_atomic_state *state);
-void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
void komeda_pipeline_update(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index b848270e0a1f..52750116aa19 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -285,6 +285,7 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
struct komeda_data_flow_cfg *dflow)
{
u32 src_x, src_y, src_w, src_h;
+ u32 line_sz, max_line_sz;
if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
return -EINVAL;
@@ -314,6 +315,22 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
return -EINVAL;
}
+ if (drm_rotation_90_or_270(dflow->rot))
+ line_sz = dflow->in_h;
+ else
+ line_sz = dflow->in_w;
+
+ if (kfb->base.format->hsub > 1)
+ max_line_sz = layer->yuv_line_sz;
+ else
+ max_line_sz = layer->line_sz;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -743,6 +760,7 @@ komeda_improc_validate(struct komeda_improc *improc,
struct komeda_data_flow_cfg *dflow)
{
struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct drm_crtc_state *crtc_st = &kcrtc_st->base;
struct komeda_component_state *c_st;
struct komeda_improc_state *st;
@@ -756,6 +774,34 @@ komeda_improc_validate(struct komeda_improc *improc,
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
+ if (drm_atomic_crtc_needs_modeset(crtc_st)) {
+ u32 output_depths, output_formats;
+ u32 avail_depths, avail_formats;
+
+ komeda_crtc_get_color_config(crtc_st, &output_depths,
+ &output_formats);
+
+ avail_depths = output_depths & improc->supported_color_depths;
+ if (avail_depths == 0) {
+ DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n",
+ output_depths,
+ improc->supported_color_depths);
+ return -EINVAL;
+ }
+
+ avail_formats = output_formats &
+ improc->supported_color_formats;
+ if (!avail_formats) {
+ DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n",
+ output_formats,
+ improc->supported_color_formats);
+ return -EINVAL;
+ }
+
+ st->color_depth = __fls(avail_depths);
+ st->color_format = BIT(__ffs(avail_formats));
+ }
+
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &improc->base, 0);
@@ -1218,7 +1264,17 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
return 0;
}
-void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+/* Since standalong disabled components must be disabled separately and in the
+ * last, So a complete disable operation may needs to call pipeline_disable
+ * twice (two phase disabling).
+ * Phase 1: disable the common components, flush it.
+ * Phase 2: disable the standalone disabled components, flush it.
+ *
+ * RETURNS:
+ * true: disable is not complete, needs a phase 2 disable.
+ * false: disable is complete.
+ */
+bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state)
{
struct komeda_pipeline_state *old;
@@ -1228,9 +1284,14 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
old = komeda_pipeline_get_old_state(pipe, old_state);
- disabling_comps = old->active_comps;
- DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
- pipe->id, disabling_comps);
+ disabling_comps = old->active_comps &
+ (~pipe->standalone_disabled_comps);
+ if (!disabling_comps)
+ disabling_comps = old->active_comps &
+ pipe->standalone_disabled_comps;
+
+ DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
+ pipe->id, old->active_comps, disabling_comps);
dp_for_each_set_bit(id, disabling_comps) {
c = komeda_pipeline_get_component(pipe, id);
@@ -1248,6 +1309,13 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
c->funcs->disable(c);
}
+
+ /* Update the pipeline state, if there are components that are still
+ * active, return true for calling the phase 2 disable.
+ */
+ old->active_comps &= ~disabling_comps;
+
+ return old->active_comps ? true : false;
}
void komeda_pipeline_update(struct komeda_pipeline *pipe,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index b72840c06ab7..e465cc4879c9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -141,6 +141,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
struct komeda_dev *mdev = kms->base.dev_private;
struct komeda_wb_connector *kwb_conn;
struct drm_writeback_connector *wb_conn;
+ struct drm_display_info *info;
u32 *formats, n_formats = 0;
int err;
@@ -172,6 +173,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
+ info = &kwb_conn->base.base.display_info;
+ info->bpc = __fls(kcrtc->master->improc->supported_color_depths);
+ info->color_formats = kcrtc->master->improc->supported_color_formats;
+
kcrtc->wb_conn = kwb_conn;
return 0;
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 333b88a5efb0..37d92a06318e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -368,7 +368,7 @@ malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
return false;
}
-struct drm_framebuffer *
+static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -491,9 +491,9 @@ void malidp_error(struct malidp_drm *malidp,
spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
}
-void malidp_error_stats_dump(const char *prefix,
- struct malidp_error_stats error_stats,
- struct seq_file *m)
+static void malidp_error_stats_dump(const char *prefix,
+ struct malidp_error_stats error_stats,
+ struct seq_file *m)
{
seq_printf(m, "[%s] num_errors : %d\n", prefix,
error_stats.num_errors);
@@ -665,7 +665,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
}
-DEVICE_ATTR_RO(core_id);
+static DEVICE_ATTR_RO(core_id);
static int malidp_init_sysfs(struct device *dev)
{
@@ -817,6 +817,12 @@ static int malidp_bind(struct device *dev)
malidp->core_id = version;
+ ret = of_property_read_u32(dev->of_node,
+ "arm,malidp-arqos-value",
+ &hwdev->arqos_value);
+ if (ret)
+ hwdev->arqos_value = 0x0;
+
/* set the number of lines used for output of RGB data */
ret = of_property_read_u8_array(dev->of_node,
"arm,malidp-output-port-lines",
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index bd8265f02e0b..ca570b135478 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -379,6 +379,15 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
else
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+
+ /*
+ * Program the RQoS register to avoid high resolutions flicker
+ * issue on the LS1028A.
+ */
+ if (hwdev->arqos_value) {
+ val = hwdev->arqos_value;
+ malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY);
+ }
}
int malidp_format_get_bpp(u32 fmt)
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 968a65eed371..e4c36bc90bda 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -251,6 +251,9 @@ struct malidp_hw_device {
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
+
+ /* priority level of RQOS register used for driven the ARQOS signal */
+ u32 arqos_value;
};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 993031542fa1..514c50dcb74d 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -210,6 +210,16 @@
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP500_CONFIG_ID 0x00fd4
+/*
+ * The quality of service (QoS) register on the DP500. RQOS register values
+ * are driven by the ARQOS signal, using AXI transacations, dependent on the
+ * FIFO input level.
+ * The RQOS register can also set QoS levels for:
+ * - RED_ARQOS @ A 4-bit signal value for close to underflow conditions
+ * - GREEN_ARQOS @ A 4-bit signal value for normal conditions
+ */
+#define MALIDP500_RQOS_QUALITY 0x00500
+
/* register offsets and bits specific to DP550/DP650 */
#define MALIDP550_ADDR_SPACE_SIZE 0x10000
#define MALIDP550_DE_CONTROL 0x00010
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index 829620d5326c..fbcf2f45cef5 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -4,6 +4,8 @@ config DRM_AST
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 6ed6ff49efc0..1f17794b0890 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -35,7 +35,6 @@
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
@@ -201,10 +200,7 @@ static struct pci_driver ast_pci_driver = {
.driver.pm = &ast_pm_ops,
};
-static const struct file_operations ast_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(ast_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 244cc7c382af..ff161bd622f3 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -82,6 +82,25 @@ enum ast_tx_chip {
#define AST_DRAM_4Gx16 7
#define AST_DRAM_8Gx16 8
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
+#define AST_HWC_SIGNATURE_SIZE 32
+
+#define AST_DEFAULT_HWC_NUM 2
+
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+
struct ast_private {
struct drm_device *dev;
@@ -97,8 +116,11 @@ struct ast_private {
int fb_mtrr;
- struct drm_gem_object *cursor_cache;
- int next_cursor;
+ struct {
+ struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
+ unsigned int next_index;
+ } cursor;
+
bool support_wide_screen;
enum {
ast_use_p2a,
@@ -199,23 +221,6 @@ static inline void ast_open_key(struct ast_private *ast)
#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-#define AST_MAX_HWC_WIDTH 64
-#define AST_MAX_HWC_HEIGHT 64
-
-#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
-#define AST_HWC_SIGNATURE_SIZE 32
-
-#define AST_DEFAULT_HWC_NUM 2
-/* define for signature structure */
-#define AST_HWC_SIGNATURE_CHECKSUM 0x00
-#define AST_HWC_SIGNATURE_SizeX 0x04
-#define AST_HWC_SIGNATURE_SizeY 0x08
-#define AST_HWC_SIGNATURE_X 0x0C
-#define AST_HWC_SIGNATURE_Y 0x10
-#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
-#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
-
-
struct ast_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 50de8e47659c..21715d6a9b56 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -33,7 +33,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index d349c721501c..b13eaa2619ab 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -687,17 +687,6 @@ static void ast_encoder_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-
-static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
-
static const struct drm_encoder_funcs ast_enc_funcs = {
.destroy = ast_encoder_destroy,
};
@@ -847,7 +836,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
.mode_valid = ast_mode_valid,
.get_modes = ast_get_modes,
- .best_encoder = ast_best_single_encoder,
};
static const struct drm_connector_funcs ast_connector_funcs = {
@@ -895,50 +883,53 @@ static int ast_connector_init(struct drm_device *dev)
static int ast_cursor_init(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- int size;
- int ret;
- struct drm_gem_object *obj;
+ size_t size, i;
struct drm_gem_vram_object *gbo;
- s64 gpu_addr;
- void *base;
+ int ret;
- size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+ size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- ret = ast_gem_create(dev, size, true, &obj);
- if (ret)
- return ret;
- gbo = drm_gem_vram_of_gem(obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- goto fail;
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- drm_gem_vram_unpin(gbo);
- ret = (int)gpu_addr;
- goto fail;
- }
+ for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
+ gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
+ size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
+ goto err_drm_gem_vram_put;
+ }
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
+ if (ret) {
+ drm_gem_vram_put(gbo);
+ goto err_drm_gem_vram_put;
+ }
- /* kmap the object */
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto fail;
+ ast->cursor.gbo[i] = gbo;
}
- ast->cursor_cache = obj;
return 0;
-fail:
+
+err_drm_gem_vram_put:
+ while (i) {
+ --i;
+ gbo = ast->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ ast->cursor.gbo[i] = NULL;
+ }
return ret;
}
static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(ast->cursor_cache);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
- drm_gem_object_put_unlocked(ast->cursor_cache);
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
+ gbo = ast->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ }
}
int ast_mode_init(struct drm_device *dev)
@@ -1076,23 +1067,6 @@ static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
kfree(i2c);
}
-static void ast_show_cursor(struct drm_crtc *crtc)
-{
- struct ast_private *ast = crtc->dev->dev_private;
- u8 jreg;
-
- jreg = 0x2;
- /* enable ARGB cursor */
- jreg |= 1;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
-}
-
-static void ast_hide_cursor(struct drm_crtc *crtc)
-{
- struct ast_private *ast = crtc->dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
-}
-
static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
{
union {
@@ -1149,21 +1123,99 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
return csum;
}
+static int ast_cursor_update(void *dst, void *src, unsigned int width,
+ unsigned int height)
+{
+ u32 csum;
+
+ /* do data transfer to cursor cache */
+ csum = copy_cursor_image(src, dst, width, height);
+
+ /* write checksum + signature */
+ dst += AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+ return 0;
+}
+
+static void ast_cursor_set_base(struct ast_private *ast, u64 address)
+{
+ u8 addr0 = (address >> 3) & 0xff;
+ u8 addr1 = (address >> 11) & 0xff;
+ u8 addr2 = (address >> 19) & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
+}
+
+static int ast_show_cursor(struct drm_crtc *crtc, void *src,
+ unsigned int width, unsigned int height)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_vram_object *gbo;
+ void *dst;
+ s64 off;
+ int ret;
+ u8 jreg;
+
+ gbo = ast->cursor.gbo[ast->cursor.next_index];
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+ off = drm_gem_vram_offset(gbo);
+ if (off < 0) {
+ ret = (int)off;
+ goto err_drm_gem_vram_vunmap;
+ }
+
+ ret = ast_cursor_update(dst, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ ast_cursor_set_base(ast, off);
+
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+
+ ++ast->cursor.next_index;
+ ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
+
+ drm_gem_vram_vunmap(gbo, dst);
+
+ return 0;
+
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, dst);
+ return ret;
+}
+
+static void ast_hide_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
static int ast_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width,
uint32_t height)
{
- struct ast_private *ast = crtc->dev->dev_private;
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct drm_gem_object *obj;
struct drm_gem_vram_object *gbo;
- s64 dst_gpu;
- u64 gpu_addr;
- u32 csum;
+ u8 *src;
int ret;
- u8 *src, *dst;
if (!handle) {
ast_hide_cursor(crtc);
@@ -1179,70 +1231,23 @@ static int ast_cursor_set(struct drm_crtc *crtc,
return -ENOENT;
}
gbo = drm_gem_vram_of_gem(obj);
-
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret)
- goto err_drm_gem_object_put_unlocked;
- src = drm_gem_vram_kmap(gbo, true, NULL);
+ src = drm_gem_vram_vmap(gbo);
if (IS_ERR(src)) {
ret = PTR_ERR(src);
- goto err_drm_gem_vram_unpin;
- }
-
- dst = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
- false, NULL);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- goto err_drm_gem_vram_kunmap;
- }
- dst_gpu = drm_gem_vram_offset(drm_gem_vram_of_gem(ast->cursor_cache));
- if (dst_gpu < 0) {
- ret = (int)dst_gpu;
- goto err_drm_gem_vram_kunmap;
- }
-
- dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
-
- /* do data transfer to cursor cache */
- csum = copy_cursor_image(src, dst, width, height);
-
- /* write checksum + signature */
- {
- struct drm_gem_vram_object *dst_gbo =
- drm_gem_vram_of_gem(ast->cursor_cache);
- u8 *dst = drm_gem_vram_kmap(dst_gbo, false, NULL);
- dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
- writel(csum, dst);
- writel(width, dst + AST_HWC_SIGNATURE_SizeX);
- writel(height, dst + AST_HWC_SIGNATURE_SizeY);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
-
- /* set pattern offset */
- gpu_addr = (u64)dst_gpu;
- gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
- gpu_addr >>= 3;
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+ goto err_drm_gem_object_put_unlocked;
}
- ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
- ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
-
- ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
- ast_show_cursor(crtc);
+ ret = ast_show_cursor(crtc, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(gbo, src);
drm_gem_object_put_unlocked(obj);
return 0;
-err_drm_gem_vram_kunmap:
- drm_gem_vram_kunmap(gbo);
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, src);
err_drm_gem_object_put_unlocked:
drm_gem_object_put_unlocked(obj);
return ret;
@@ -1253,12 +1258,17 @@ static int ast_cursor_move(struct drm_crtc *crtc,
{
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_gem_vram_object *gbo;
int x_offset, y_offset;
- u8 *sig;
+ u8 *dst, *sig;
+ u8 jreg;
- sig = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
- false, NULL);
- sig += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ gbo = ast->cursor.gbo[ast->cursor.next_index];
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);
@@ -1281,7 +1291,11 @@ static int ast_cursor_move(struct drm_crtc *crtc,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
/* dummy write to fire HWC */
- ast_show_cursor(crtc);
+ jreg = 0x02 |
+ 0x01; /* enable ARGB4444 cursor */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+
+ drm_gem_vram_vunmap(gbo, dst);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index c52d92294171..fad34106083a 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -30,7 +30,6 @@
#include <drm/drm_print.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
@@ -42,7 +41,7 @@ int ast_mm_init(struct ast_private *ast)
vmm = drm_vram_helper_alloc_mm(
dev, pci_resource_start(dev->pdev, 0),
- ast->vram_size, &drm_gem_vram_mm_funcs);
+ ast->vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 375fa84c548b..121b62682d80 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -107,7 +107,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
output->encoder.possible_crtcs = 0x1;
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 89f5a756fa37..034f202dfe8f 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -601,7 +601,6 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
struct drm_framebuffer *fb = state->base.fb;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
- unsigned int tmp;
int ret;
int i;
@@ -694,9 +693,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
* Swap width and size in case of 90 or 270 degrees rotation
*/
if (drm_rotation_90_or_270(state->base.rotation)) {
- tmp = state->src_w;
- state->src_w = state->src_h;
- state->src_h = tmp;
+ swap(state->src_w, state->src_h);
}
if (!desc->layout.size &&
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index 32b043abb668..7bcdf294fed8 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -4,6 +4,8 @@ config DRM_BOCHS
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Choose this option for qemu.
If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 68483a2fc12c..917767173ee6 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -10,7 +10,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vram_mm_helper.h>
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 770e1625d05e..10460878414e 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -58,10 +58,7 @@ err:
return ret;
}
-static const struct file_operations bochs_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(bochs_fops);
static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
@@ -114,7 +111,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
}
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "bochsdrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 02a9c1ed165b..3f0006c2470d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -69,33 +69,11 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
}
}
-static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
- gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
-}
-
-static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
- gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]);
- drm_gem_vram_unpin(gbo);
-}
-
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
.enable = bochs_pipe_enable,
.update = bochs_pipe_update,
- .prepare_fb = bochs_pipe_prepare_fb,
- .cleanup_fb = bochs_pipe_cleanup_fb,
+ .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
+ .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
};
static int bochs_connector_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 8f9bb886f7ad..1b74f530b07c 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -11,8 +11,7 @@ int bochs_mm_init(struct bochs_device *bochs)
struct drm_vram_mm *vmm;
vmm = drm_vram_helper_alloc_mm(bochs->dev, bochs->fb_base,
- bochs->fb_size,
- &drm_gem_vram_mm_funcs);
+ bochs->fb_size);
return PTR_ERR_OR_ZERO(vmm);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 1cc9f502c1f2..34362976cd6f 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -87,8 +87,7 @@ config DRM_SIL_SII8620
depends on OF
select DRM_KMS_HELPER
imply EXTCON
- select INPUT
- select RC_CORE
+ depends on RC_CORE || !RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index 3c7cc5af735c..274989f96a91 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
@@ -38,12 +39,20 @@
#define AUX_CH_BUFFER_SIZE 16
#define AUX_WAIT_TIMEOUT_MS 15
-static const u8 anx78xx_i2c_addresses[] = {
- [I2C_IDX_TX_P0] = TX_P0,
- [I2C_IDX_TX_P1] = TX_P1,
- [I2C_IDX_TX_P2] = TX_P2,
- [I2C_IDX_RX_P0] = RX_P0,
- [I2C_IDX_RX_P1] = RX_P1,
+static const u8 anx7808_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x78,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
+};
+
+static const u8 anx781x_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x70,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
};
struct anx78xx_platform_data {
@@ -62,7 +71,6 @@ struct anx78xx {
struct i2c_client *client;
struct edid *edid;
struct drm_connector connector;
- struct drm_dp_link link;
struct anx78xx_platform_data pdata;
struct mutex lock;
@@ -715,7 +723,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
/* 1.0V digital core power regulator */
pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
if (IS_ERR(pdata->dvdd10)) {
- DRM_ERROR("DVDD10 regulator not found\n");
+ if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
+ DRM_ERROR("DVDD10 regulator not found\n");
+
return PTR_ERR(pdata->dvdd10);
}
@@ -737,7 +747,7 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
{
- u8 dp_bw, value;
+ u8 dp_bw, dpcd[2];
int err;
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
@@ -790,18 +800,34 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /* Check link capabilities */
- err = drm_dp_link_probe(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to probe link capabilities: %d\n", err);
- return err;
- }
+ /*
+ * Power up the sink (DP_SET_POWER register is only available on DPCD
+ * v1.1 and later).
+ */
+ if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
+ err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
+ err);
+ return err;
+ }
- /* Power up the sink */
- err = drm_dp_link_power_up(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n", err);
- return err;
+ dpcd[0] &= ~DP_SET_POWER_MASK;
+ dpcd[0] |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to power up DisplayPort link: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
}
/* Possibly enable downspread on the sink */
@@ -840,15 +866,22 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- value = drm_dp_link_rate_to_bw_code(anx78xx->link.rate);
+ dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
+ dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_MAIN_LINK_BW_SET_REG, value);
+ SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)
return err;
- err = drm_dp_link_configure(&anx78xx->aux, &anx78xx->link);
+ dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd);
+
+ if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
+ dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd,
+ sizeof(dpcd));
if (err < 0) {
- DRM_ERROR("Failed to configure DisplayPort link: %d\n", err);
+ DRM_ERROR("Failed to configure link: %d\n", err);
return err;
}
@@ -1301,6 +1334,7 @@ static const struct regmap_config anx78xx_regmap_config = {
};
static const u16 anx78xx_chipid_list[] = {
+ 0x7808,
0x7812,
0x7814,
0x7818,
@@ -1312,6 +1346,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
struct anx78xx *anx78xx;
struct anx78xx_platform_data *pdata;
unsigned int i, idl, idh, version;
+ const u8 *i2c_addresses;
bool found = false;
int err;
@@ -1332,7 +1367,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
err = anx78xx_init_pdata(anx78xx);
if (err) {
- DRM_ERROR("Failed to initialize pdata: %d\n", err);
+ if (err != -EPROBE_DEFER)
+ DRM_ERROR("Failed to initialize pdata: %d\n", err);
+
return err;
}
@@ -1349,22 +1386,26 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
}
/* Map slave addresses of ANX7814 */
+ i2c_addresses = device_get_match_data(&client->dev);
for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
- anx78xx->i2c_dummy[i] = i2c_new_dummy(client->adapter,
- anx78xx_i2c_addresses[i] >> 1);
- if (!anx78xx->i2c_dummy[i]) {
- err = -ENOMEM;
- DRM_ERROR("Failed to reserve I2C bus %02x\n",
- anx78xx_i2c_addresses[i]);
+ struct i2c_client *i2c_dummy;
+
+ i2c_dummy = i2c_new_dummy_device(client->adapter,
+ i2c_addresses[i] >> 1);
+ if (IS_ERR(i2c_dummy)) {
+ err = PTR_ERR(i2c_dummy);
+ DRM_ERROR("Failed to reserve I2C bus %02x: %d\n",
+ i2c_addresses[i], err);
goto err_unregister_i2c;
}
+ anx78xx->i2c_dummy[i] = i2c_dummy;
anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i],
&anx78xx_regmap_config);
if (IS_ERR(anx78xx->map[i])) {
err = PTR_ERR(anx78xx->map[i]);
DRM_ERROR("Failed regmap initialization %02x\n",
- anx78xx_i2c_addresses[i]);
+ i2c_addresses[i]);
goto err_unregister_i2c;
}
}
@@ -1463,7 +1504,10 @@ MODULE_DEVICE_TABLE(i2c, anx78xx_id);
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id anx78xx_match_table[] = {
- { .compatible = "analogix,anx7814", },
+ { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
+ { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, anx78xx_match_table);
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h
index 25e063bcecbc..55d6c2109740 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.h
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.h
@@ -6,15 +6,8 @@
#ifndef __ANX78xx_H
#define __ANX78xx_H
-#define TX_P0 0x70
-#define TX_P1 0x7a
-#define TX_P2 0x72
-
-#define RX_P0 0x7e
-#define RX_P1 0x80
-
/***************************************************************/
-/* Register definition of device address 0x7e */
+/* Register definitions for RX_PO */
/***************************************************************/
/*
@@ -171,7 +164,7 @@
#define SP_VSI_RCVD BIT(1)
/***************************************************************/
-/* Register definition of device address 0x80 */
+/* Register definitions for RX_P1 */
/***************************************************************/
/* HDCP BCAPS Shadow Register */
@@ -217,7 +210,7 @@
#define SP_SET_AVMUTE BIT(0)
/***************************************************************/
-/* Register definition of device address 0x70 */
+/* Register definitions for TX_P0 */
/***************************************************************/
/* HDCP Status Register */
@@ -451,7 +444,7 @@
#define SP_DP_BUF_DATA0_REG 0xf0
/***************************************************************/
-/* Register definition of device address 0x72 */
+/* Register definitions for TX_P2 */
/***************************************************************/
/*
@@ -674,7 +667,7 @@
#define SP_INT_CTRL_REG 0xff
/***************************************************************/
-/* Register definition of device address 0x7a */
+/* Register definitions for TX_P1 */
/***************************************************************/
/* DP TX Link Training Control Register */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 22885dceaa17..bb411fe52ae8 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -21,6 +21,7 @@
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 6166dca6be81..3a5bd4e7fd1e 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -956,7 +956,8 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
panel = of_drm_find_panel(np);
if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
} else {
bridge = of_drm_find_bridge(dev->dev.of_node);
if (!bridge)
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 7aa789c35882..cc33dc411b9e 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -12,6 +12,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 2ab2c234f26c..e2132a8d5106 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -106,7 +106,8 @@ static int lvds_encoder_probe(struct platform_device *pdev)
}
lvds_encoder->panel_bridge =
- devm_drm_panel_bridge_add(dev, panel, DRM_MODE_CONNECTOR_LVDS);
+ devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_LVDS);
if (IS_ERR(lvds_encoder->panel_bridge))
return PTR_ERR(lvds_encoder->panel_bridge);
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 6e81e5db57f2..e8a49f6146c6 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -25,6 +25,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d4a1cc5052c3..57ff01339559 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index b12ae3a4c5f1..f4e293e7cf64 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -133,8 +134,6 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* just calls the appropriate functions from &drm_panel.
*
* @panel: The drm_panel being wrapped. Must be non-NULL.
- * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
- * created.
*
* For drivers converting from directly using drm_panel: The expected
* usage pattern is that during either encoder module probe or DSI
@@ -148,11 +147,37 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* drm_mode_config_cleanup() if the bridge has already been attached), then
* drm_panel_bridge_remove() to free it.
*
+ * The connector type is set to @panel->connector_type, which must be set to a
+ * known type. Calling this function with a panel whose connector type is
+ * DRM_MODE_CONNECTOR_Unknown will return NULL.
+ *
* See devm_drm_panel_bridge_add() for an automatically manged version of this
* function.
*/
-struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
- u32 connector_type)
+struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
+{
+ if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
+ return NULL;
+
+ return drm_panel_bridge_add_typed(panel, panel->connector_type);
+}
+EXPORT_SYMBOL(drm_panel_bridge_add);
+
+/**
+ * drm_panel_bridge_add_typed - Creates a &drm_bridge and &drm_connector with
+ * an explicit connector type.
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ * @connector_type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * This is just like drm_panel_bridge_add(), but forces the connector type to
+ * @connector_type instead of infering it from the panel.
+ *
+ * This function is deprecated and should not be used in new drivers. Use
+ * drm_panel_bridge_add() instead, and fix panel drivers as necessary if they
+ * don't report a connector type.
+ */
+struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
+ u32 connector_type)
{
struct panel_bridge *panel_bridge;
@@ -176,7 +201,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
return &panel_bridge->bridge;
}
-EXPORT_SYMBOL(drm_panel_bridge_add);
+EXPORT_SYMBOL(drm_panel_bridge_add_typed);
/**
* drm_panel_bridge_remove - Unregisters and frees a drm_bridge
@@ -213,15 +238,38 @@ static void devm_drm_panel_bridge_release(struct device *dev, void *res)
* that just calls the appropriate functions from &drm_panel.
* @dev: device to tie the bridge lifetime to
* @panel: The drm_panel being wrapped. Must be non-NULL.
- * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
- * created.
*
* This is the managed version of drm_panel_bridge_add() which automatically
* calls drm_panel_bridge_remove() when @dev is unbound.
*/
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
- struct drm_panel *panel,
- u32 connector_type)
+ struct drm_panel *panel)
+{
+ if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
+ return NULL;
+
+ return devm_drm_panel_bridge_add_typed(dev, panel,
+ panel->connector_type);
+}
+EXPORT_SYMBOL(devm_drm_panel_bridge_add);
+
+/**
+ * devm_drm_panel_bridge_add_typed - Creates a managed &drm_bridge and
+ * &drm_connector with an explicit connector type.
+ * @dev: device to tie the bridge lifetime to
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ * @connector_type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * This is just like devm_drm_panel_bridge_add(), but forces the connector type
+ * to @connector_type instead of infering it from the panel.
+ *
+ * This function is deprecated and should not be used in new drivers. Use
+ * devm_drm_panel_bridge_add() instead, and fix panel drivers as necessary if
+ * they don't report a connector type.
+ */
+struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
+ struct drm_panel *panel,
+ u32 connector_type)
{
struct drm_bridge **ptr, *bridge;
@@ -230,7 +278,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- bridge = drm_panel_bridge_add(panel, connector_type);
+ bridge = drm_panel_bridge_add_typed(panel, connector_type);
if (!IS_ERR(bridge)) {
*ptr = bridge;
devres_add(dev, ptr);
@@ -240,4 +288,4 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
return bridge;
}
-EXPORT_SYMBOL(devm_drm_panel_bridge_add);
+EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 93c68e2e9484..b7a72dfdcac3 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 38f75ac580df..b70e8c5cf2e1 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 25d4ad8c7ad6..f81f81b7051f 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -13,6 +13,7 @@
* Dharam Kumar <dharam.kr@samsung.com>
*/
#include <drm/bridge/mhl.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -841,39 +842,28 @@ static int sii9234_init_resources(struct sii9234 *ctx,
ctx->client[I2C_MHL] = client;
- ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR);
- if (!ctx->client[I2C_TPI]) {
+ ctx->client[I2C_TPI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_TPI_ADDR);
+ if (IS_ERR(ctx->client[I2C_TPI])) {
dev_err(ctx->dev, "failed to create TPI client\n");
- return -ENODEV;
+ return PTR_ERR(ctx->client[I2C_TPI]);
}
- ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR);
- if (!ctx->client[I2C_HDMI]) {
+ ctx->client[I2C_HDMI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_HDMI_ADDR);
+ if (IS_ERR(ctx->client[I2C_HDMI])) {
dev_err(ctx->dev, "failed to create HDMI RX client\n");
- goto fail_tpi;
+ return PTR_ERR(ctx->client[I2C_HDMI]);
}
- ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR);
- if (!ctx->client[I2C_CBUS]) {
+ ctx->client[I2C_CBUS] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_CBUS_ADDR);
+ if (IS_ERR(ctx->client[I2C_CBUS])) {
dev_err(ctx->dev, "failed to create CBUS client\n");
- goto fail_hdmi;
+ return PTR_ERR(ctx->client[I2C_CBUS]);
}
return 0;
-
-fail_hdmi:
- i2c_unregister_device(ctx->client[I2C_HDMI]);
-fail_tpi:
- i2c_unregister_device(ctx->client[I2C_TPI]);
-
- return -ENODEV;
-}
-
-static void sii9234_deinit_resources(struct sii9234 *ctx)
-{
- i2c_unregister_device(ctx->client[I2C_CBUS]);
- i2c_unregister_device(ctx->client[I2C_HDMI]);
- i2c_unregister_device(ctx->client[I2C_TPI]);
}
static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
@@ -950,7 +940,6 @@ static int sii9234_remove(struct i2c_client *client)
sii9234_cable_out(ctx);
drm_bridge_remove(&ctx->bridge);
- sii9234_deinit_resources(ctx);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index bd3165ee5354..4c0eef406eb1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -9,6 +9,7 @@
#include <asm/unaligned.h>
#include <drm/bridge/mhl.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -1759,10 +1760,8 @@ static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode)
scancode &= MHL_RCP_KEY_ID_MASK;
- if (!ctx->rc_dev) {
- dev_dbg(ctx->dev, "RCP input device not initialized\n");
+ if (!IS_ENABLED(CONFIG_RC_CORE) || !ctx->rc_dev)
return false;
- }
if (pressed)
rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0);
@@ -2099,6 +2098,9 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
struct rc_dev *rc_dev;
int ret;
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc_dev) {
dev_err(ctx->dev, "Failed to allocate RC device\n");
@@ -2213,6 +2215,9 @@ static void sii8620_detach(struct drm_bridge *bridge)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_unregister_device(ctx->rc_dev);
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index ac1e001d0882..70ab4fbdc23e 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -285,7 +285,7 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
ret = cec_register_adapter(cec->adap, pdev->dev.parent);
if (ret < 0) {
- cec_notifier_cec_adap_unregister(cec->notify);
+ cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
return ret;
}
@@ -302,7 +302,7 @@ static int dw_hdmi_cec_remove(struct platform_device *pdev)
{
struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
- cec_notifier_cec_adap_unregister(cec->notify);
+ cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 1d15cf9b6821..d7e65c869415 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -102,6 +102,7 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
}
dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate);
+ dw_hdmi_set_channel_status(hdmi, hparms->iec.status);
dw_hdmi_set_channel_count(hdmi, hparms->channels);
dw_hdmi_set_channel_allocation(hdmi, hparms->cea.channel_allocation);
@@ -109,6 +110,14 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
hdmi_write(audio, conf0, HDMI_AUD_CONF0);
hdmi_write(audio, conf1, HDMI_AUD_CONF1);
+ return 0;
+}
+
+static int dw_hdmi_i2s_audio_startup(struct device *dev, void *data)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
dw_hdmi_audio_enable(hdmi);
return 0;
@@ -151,11 +160,23 @@ static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
return -EINVAL;
}
+static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev);
+}
+
static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
+ .audio_startup = dw_hdmi_i2s_audio_startup,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
.get_eld = dw_hdmi_i2s_get_eld,
.get_dai_id = dw_hdmi_i2s_get_dai_id,
+ .hook_plugged_cb = dw_hdmi_i2s_hook_plugged_cb,
};
static int snd_dw_hdmi_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 521d689413c8..67fca439bbfb 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -25,7 +25,9 @@
#include <uapi/linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -36,6 +38,7 @@
#include "dw-hdmi-cec.h"
#include "dw-hdmi.h"
+#define DDC_CI_ADDR 0x37
#define DDC_SEGMENT_ADDR 0x30
#define HDMI_EDID_LEN 512
@@ -191,6 +194,10 @@ struct dw_hdmi {
struct mutex cec_notifier_mutex;
struct cec_notifier *cec_notifier;
+
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ enum drm_connector_status last_connector_result;
};
#define HDMI_IH_PHY_STAT0_RX_SENSE \
@@ -215,6 +222,28 @@ static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset)
return val;
}
+static void handle_plugged_change(struct dw_hdmi *hdmi, bool plugged)
+{
+ if (hdmi->plugged_cb && hdmi->codec_dev)
+ hdmi->plugged_cb(hdmi->codec_dev, plugged);
+}
+
+int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ bool plugged;
+
+ mutex_lock(&hdmi->mutex);
+ hdmi->plugged_cb = fn;
+ hdmi->codec_dev = codec_dev;
+ plugged = hdmi->last_connector_result == connector_status_connected;
+ handle_plugged_change(hdmi, plugged);
+ mutex_unlock(&hdmi->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_plugged_cb);
+
static void hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
{
regmap_update_bits(hdmi->regm, reg << hdmi->reg_shift, mask, data);
@@ -398,6 +427,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
u8 addr = msgs[0].addr;
int i, ret = 0;
+ if (addr == DDC_CI_ADDR)
+ /*
+ * The internal I2C controller does not support the multi-byte
+ * read and write operations needed for DDC/CI.
+ * TOFIX: Blacklist the DDC/CI address until we filter out
+ * unsupported I2C operations.
+ */
+ return -EOPNOTSUPP;
+
dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
for (i = 0; i < num; i++) {
@@ -580,6 +618,26 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
return n;
}
+/*
+ * When transmitting IEC60958 linear PCM audio, these registers allow to
+ * configure the channel status information of all the channel status
+ * bits in the IEC60958 frame. For the moment this configuration is only
+ * used when the I2S audio interface, General Purpose Audio (GPA),
+ * or AHB audio DMA (AHBAUDDMA) interface is active
+ * (for S/PDIF interface this information comes from the stream).
+ */
+void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi,
+ u8 *channel_status)
+{
+ /*
+ * Set channel status register for frequency and word length.
+ * Use default values for other registers.
+ */
+ hdmi_writeb(hdmi, channel_status[3], HDMI_FC_AUDSCHNLS7);
+ hdmi_writeb(hdmi, channel_status[4], HDMI_FC_AUDSCHNLS8);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_status);
+
static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
unsigned long pixel_clk, unsigned int sample_rate)
{
@@ -1712,6 +1770,41 @@ static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi,
HDMI_FC_DATAUTO0_VSD_MASK);
}
+static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi)
+{
+ const struct drm_connector_state *conn_state = hdmi->connector.state;
+ struct hdmi_drm_infoframe frame;
+ u8 buffer[30];
+ ssize_t err;
+ int i;
+
+ if (!hdmi->plat_data->use_drm_infoframe)
+ return;
+
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+
+ err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state);
+ if (err < 0)
+ return;
+
+ err = hdmi_drm_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack drm infoframe: %zd\n", err);
+ return;
+ }
+
+ hdmi_writeb(hdmi, frame.version, HDMI_FC_DRM_HB0);
+ hdmi_writeb(hdmi, frame.length, HDMI_FC_DRM_HB1);
+
+ for (i = 0; i < frame.length; i++)
+ hdmi_writeb(hdmi, buffer[4 + i], HDMI_FC_DRM_PB0 + i);
+
+ hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP);
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+}
+
static void hdmi_av_composer(struct dw_hdmi *hdmi,
const struct drm_display_mode *mode)
{
@@ -2023,7 +2116,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step E - Configure audio */
hdmi_clk_regenerator_update_pixel_clock(hdmi);
- hdmi_enable_audio_clk(hdmi, true);
+ hdmi_enable_audio_clk(hdmi, hdmi->audio_enable);
}
/* not for DVI mode */
@@ -2033,6 +2126,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step F - Configure AVI InfoFrame */
hdmi_config_AVI(hdmi, mode);
hdmi_config_vendor_specific_infoframe(hdmi, mode);
+ hdmi_config_drm_infoframe(hdmi);
} else {
dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
}
@@ -2161,6 +2255,7 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
connector);
+ enum drm_connector_status result;
mutex_lock(&hdmi->mutex);
hdmi->force = DRM_FORCE_UNSPECIFIED;
@@ -2168,7 +2263,18 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
dw_hdmi_update_phy_mask(hdmi);
mutex_unlock(&hdmi->mutex);
- return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+ result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+
+ mutex_lock(&hdmi->mutex);
+ if (result != hdmi->last_connector_result) {
+ dev_dbg(hdmi->dev, "read_hpd result: %d", result);
+ handle_plugged_change(hdmi,
+ result == connector_status_connected);
+ hdmi->last_connector_result = result;
+ }
+ mutex_unlock(&hdmi->mutex);
+
+ return result;
}
static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -2199,6 +2305,45 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
+static bool hdr_metadata_equal(const struct drm_connector_state *old_state,
+ const struct drm_connector_state *new_state)
+{
+ struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
+ struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
+
+ if (!old_blob || !new_blob)
+ return old_blob == new_blob;
+
+ if (old_blob->length != new_blob->length)
+ return false;
+
+ return !memcmp(old_blob->data, new_blob->data, old_blob->length);
+}
+
+static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc = new_state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ if (!hdr_metadata_equal(old_state, new_state)) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+}
+
static void dw_hdmi_connector_force(struct drm_connector *connector)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
@@ -2223,6 +2368,7 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
+ .atomic_check = dw_hdmi_connector_atomic_check,
};
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
@@ -2243,6 +2389,10 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
+ if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property, 0);
+
drm_connector_attach_encoder(connector, encoder);
cec_fill_conn_info_from_drm(&conn_info, connector);
@@ -2619,6 +2769,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->rxsense = true;
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
hdmi->mc_clkdis = 0x7f;
+ hdmi->last_connector_result = connector_status_disconnected;
mutex_init(&hdmi->mutex);
mutex_init(&hdmi->audio_mutex);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index 6988f12d89d9..1999db05bc3b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -158,6 +158,8 @@
#define HDMI_FC_SPDDEVICEINF 0x1062
#define HDMI_FC_AUDSCONF 0x1063
#define HDMI_FC_AUDSSTAT 0x1064
+#define HDMI_FC_AUDSCHNLS7 0x106e
+#define HDMI_FC_AUDSCHNLS8 0x106f
#define HDMI_FC_DATACH0FILL 0x1070
#define HDMI_FC_DATACH1FILL 0x1071
#define HDMI_FC_DATACH2FILL 0x1072
@@ -252,6 +254,7 @@
#define HDMI_FC_POL2 0x10DB
#define HDMI_FC_PRCONF 0x10E0
#define HDMI_FC_SCRAMBLER_CTRL 0x10E1
+#define HDMI_FC_PACKET_TX_EN 0x10E3
#define HDMI_FC_GMD_STAT 0x1100
#define HDMI_FC_GMD_EN 0x1101
@@ -287,6 +290,37 @@
#define HDMI_FC_GMD_PB26 0x111F
#define HDMI_FC_GMD_PB27 0x1120
+#define HDMI_FC_DRM_UP 0x1167
+#define HDMI_FC_DRM_HB0 0x1168
+#define HDMI_FC_DRM_HB1 0x1169
+#define HDMI_FC_DRM_PB0 0x116A
+#define HDMI_FC_DRM_PB1 0x116B
+#define HDMI_FC_DRM_PB2 0x116C
+#define HDMI_FC_DRM_PB3 0x116D
+#define HDMI_FC_DRM_PB4 0x116E
+#define HDMI_FC_DRM_PB5 0x116F
+#define HDMI_FC_DRM_PB6 0x1170
+#define HDMI_FC_DRM_PB7 0x1171
+#define HDMI_FC_DRM_PB8 0x1172
+#define HDMI_FC_DRM_PB9 0x1173
+#define HDMI_FC_DRM_PB10 0x1174
+#define HDMI_FC_DRM_PB11 0x1175
+#define HDMI_FC_DRM_PB12 0x1176
+#define HDMI_FC_DRM_PB13 0x1177
+#define HDMI_FC_DRM_PB14 0x1178
+#define HDMI_FC_DRM_PB15 0x1179
+#define HDMI_FC_DRM_PB16 0x117A
+#define HDMI_FC_DRM_PB17 0x117B
+#define HDMI_FC_DRM_PB18 0x117C
+#define HDMI_FC_DRM_PB19 0x117D
+#define HDMI_FC_DRM_PB20 0x117E
+#define HDMI_FC_DRM_PB21 0x117F
+#define HDMI_FC_DRM_PB22 0x1180
+#define HDMI_FC_DRM_PB23 0x1181
+#define HDMI_FC_DRM_PB24 0x1182
+#define HDMI_FC_DRM_PB25 0x1183
+#define HDMI_FC_DRM_PB26 0x1184
+
#define HDMI_FC_DBGFORCE 0x1200
#define HDMI_FC_DBGAUD0CH0 0x1201
#define HDMI_FC_DBGAUD1CH0 0x1202
@@ -742,6 +776,11 @@ enum {
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
+/* FC_PACKET_TX_EN field values */
+ HDMI_FC_PACKET_TX_EN_DRM_MASK = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_ENABLE = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_DISABLE = 0x00,
+
/* FC_AVICONF0-FC_AVICONF3 field values */
HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 675442bfc1bd..b6e793bb653c 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -316,7 +316,8 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
return ret;
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
@@ -981,7 +982,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct reset_control *apb_rst;
struct dw_mipi_dsi *dsi;
- struct resource *res;
int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
@@ -997,11 +997,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
}
if (!plat_data->base) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return ERR_PTR(-ENODEV);
-
- dsi->base = devm_ioremap_resource(dev, res);
+ dsi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->base))
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 170f162ffa55..db298f550a5a 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -16,6 +16,7 @@
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8a8d605021f0..8029478ffebb 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
@@ -228,7 +229,9 @@ static bool tc_test_pattern;
module_param_named(test, tc_test_pattern, bool, 0644);
struct tc_edp_link {
- struct drm_dp_link base;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ unsigned int rate;
+ u8 num_lanes;
u8 assr;
bool scrambler_dis;
bool spread;
@@ -437,9 +440,9 @@ static u32 tc_srcctrl(struct tc_data *tc)
reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
if (tc->link.spread)
reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */
- if (tc->link.base.rate != 162000)
+ if (tc->link.rate != 162000)
reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */
return reg;
}
@@ -662,23 +665,35 @@ err:
static int tc_get_display_props(struct tc_data *tc)
{
+ u8 revision, num_lanes;
+ unsigned int rate;
int ret;
u8 reg;
/* Read DP Rx Link Capability */
- ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ ret = drm_dp_dpcd_read(&tc->aux, DP_DPCD_REV, tc->link.dpcd,
+ DP_RECEIVER_CAP_SIZE);
if (ret < 0)
goto err_dpcd_read;
- if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+
+ revision = tc->link.dpcd[DP_DPCD_REV];
+ rate = drm_dp_max_link_rate(tc->link.dpcd);
+ num_lanes = drm_dp_max_lane_count(tc->link.dpcd);
+
+ if (rate != 162000 && rate != 270000) {
dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
- tc->link.base.rate = 270000;
+ rate = 270000;
}
- if (tc->link.base.num_lanes > 2) {
+ tc->link.rate = rate;
+
+ if (num_lanes > 2) {
dev_dbg(tc->dev, "Falling to 2 lanes\n");
- tc->link.base.num_lanes = 2;
+ num_lanes = 2;
}
+ tc->link.num_lanes = num_lanes;
+
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, &reg);
if (ret < 0)
goto err_dpcd_read;
@@ -696,11 +711,11 @@ static int tc_get_display_props(struct tc_data *tc)
tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
- tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
- (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
- tc->link.base.num_lanes,
- (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
- "enhanced" : "non-enhanced");
+ revision >> 4, revision & 0x0f,
+ (tc->link.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
+ tc->link.num_lanes,
+ drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ "enhanced" : "default");
dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n",
tc->link.spread ? "0.5%" : "0.0%",
tc->link.scrambler_dis ? "disabled" : "enabled");
@@ -739,7 +754,7 @@ static int tc_set_video_mode(struct tc_data *tc,
*/
in_bw = mode->clock * bits_per_pixel / 8;
- out_bw = tc->link.base.num_lanes * tc->link.base.rate;
+ out_bw = tc->link.num_lanes * tc->link.rate;
max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
dev_dbg(tc->dev, "set mode %dx%d\n",
@@ -901,7 +916,7 @@ static int tc_main_link_enable(struct tc_data *tc)
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
ret = regmap_write(tc->regmap, DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
- ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
if (ret)
return ret;
@@ -911,7 +926,7 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Setup Main Link */
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
dp_phy_ctrl |= PHY_2LANE;
ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
@@ -974,7 +989,13 @@ static int tc_main_link_enable(struct tc_data *tc)
}
/* Setup Link & DPRx Config for Training */
- ret = drm_dp_link_configure(aux, &tc->link.base);
+ tmp[0] = drm_dp_link_rate_to_bw_code(tc->link.rate);
+ tmp[1] = tc->link.num_lanes;
+
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
+ tmp[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ ret = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, tmp, 2);
if (ret < 0)
goto err_dpcd_write;
@@ -1018,9 +1039,8 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Enable DP0 to start Link Training */
ret = regmap_write(tc->regmap, DP0CTL,
- ((tc->link.base.capabilities &
- DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) |
- DP_EN);
+ (drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ EF_EN : 0) | DP_EN);
if (ret)
return ret;
@@ -1099,7 +1119,7 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = -ENODEV;
}
- if (tc->link.base.num_lanes == 2) {
+ if (tc->link.num_lanes == 2) {
value = (tmp[0] >> 4) & DP_CHANNEL_EQ_BITS;
if (value != DP_CHANNEL_EQ_BITS) {
@@ -1170,7 +1190,7 @@ static int tc_stream_enable(struct tc_data *tc)
return ret;
value = VID_MN_GEN | DP_EN;
- if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
value |= EF_EN;
ret = regmap_write(tc->regmap, DP0CTL, value);
if (ret)
@@ -1296,7 +1316,7 @@ static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge,
return MODE_CLOCK_HIGH;
req = mode->clock * bits_per_pixel / 8;
- avail = tc->link.base.num_lanes * tc->link.base.rate;
+ avail = tc->link.num_lanes * tc->link.rate;
if (req > avail)
return MODE_BAD;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 0a580957c8cf..43abf01ebd4c 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 61cc2354ef1b..6f6d6d1e60ae 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -284,8 +285,8 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi)
else
dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
- dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode,
- "hpd-gpios", 0, GPIOD_IN, "hpd");
+ dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode,
+ "hpd", 0, GPIOD_IN, "hpd");
if (IS_ERR(dvi->hpd)) {
ret = PTR_ERR(dvi->hpd);
dvi->hpd = NULL;
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 36a69aec8a4b..248c9f765c45 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -390,7 +390,7 @@ static int cirrus_conn_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
/* cirrus (simple) display pipe */
-static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc,
+static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
@@ -510,7 +510,7 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
-DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops);
+DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -532,7 +532,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
struct cirrus_device *cirrus;
int ret;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
deleted file mode 100644
index 1f73916e528e..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Red Hat
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#ifndef __CIRRUS_DRV_H__
-#define __CIRRUS_DRV_H__
-
-#include <video/vga.h>
-
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
-
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
-
-#include <drm/drm_gem.h>
-
-#define DRIVER_AUTHOR "Matthew Garrett"
-
-#define DRIVER_NAME "cirrus"
-#define DRIVER_DESC "qemu Cirrus emulation"
-#define DRIVER_DATE "20110418"
-
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-#define CIRRUSFB_CONN_LIMIT 1
-
-#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
-#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
-
-#define SEQ_INDEX 4
-#define SEQ_DATA 5
-
-#define WREG_SEQ(reg, v) \
- do { \
- WREG8(SEQ_INDEX, reg); \
- WREG8(SEQ_DATA, v); \
- } while (0) \
-
-#define CRT_INDEX 0x14
-#define CRT_DATA 0x15
-
-#define WREG_CRT(reg, v) \
- do { \
- WREG8(CRT_INDEX, reg); \
- WREG8(CRT_DATA, v); \
- } while (0) \
-
-#define GFX_INDEX 0xe
-#define GFX_DATA 0xf
-
-#define WREG_GFX(reg, v) \
- do { \
- WREG8(GFX_INDEX, reg); \
- WREG8(GFX_DATA, v); \
- } while (0) \
-
-/*
- * Cirrus has a "hidden" DAC register that can be accessed by writing to
- * the pixel mask register to reset the state, then reading from the register
- * four times. The next write will then pass to the DAC
- */
-#define VGA_DAC_MASK 0x6
-
-#define WREG_HDR(v) \
- do { \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- WREG8(VGA_DAC_MASK, v); \
- } while (0) \
-
-
-#define CIRRUS_MAX_FB_HEIGHT 4096
-#define CIRRUS_MAX_FB_WIDTH 4096
-
-#define CIRRUS_DPMS_CLEARED (-1)
-
-#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
-#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-
-struct cirrus_crtc {
- struct drm_crtc base;
- int last_dpms;
- bool enabled;
-};
-
-struct cirrus_fbdev;
-struct cirrus_mode_info {
- struct cirrus_crtc *crtc;
- /* pointer to fbdev info structure */
- struct cirrus_fbdev *gfbdev;
-};
-
-struct cirrus_encoder {
- struct drm_encoder base;
- int last_dpms;
-};
-
-struct cirrus_connector {
- struct drm_connector base;
-};
-
-struct cirrus_mc {
- resource_size_t vram_size;
- resource_size_t vram_base;
-};
-
-struct cirrus_device {
- struct drm_device *dev;
- unsigned long flags;
-
- resource_size_t rmmio_base;
- resource_size_t rmmio_size;
- void __iomem *rmmio;
-
- struct cirrus_mc mc;
- struct cirrus_mode_info mode_info;
-
- int num_crtc;
- int fb_mtrr;
-
- struct {
- struct ttm_bo_device bdev;
- } ttm;
- bool mm_inited;
-};
-
-
-struct cirrus_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct drm_framebuffer *gfb;
- void *sysram;
- int size;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
-
-struct cirrus_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
-};
-#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
-
-static inline struct cirrus_bo *
-cirrus_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct cirrus_bo, bo);
-}
-
-
-#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
-
- /* cirrus_main.c */
-int cirrus_device_init(struct cirrus_device *cdev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
- uint32_t flags);
-void cirrus_device_fini(struct cirrus_device *cdev);
-void cirrus_gem_free_object(struct drm_gem_object *obj);
-int cirrus_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset);
-int cirrus_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj);
-int cirrus_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-int cirrus_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
- int bpp, int pitch);
-
- /* cirrus_display.c */
-int cirrus_modeset_init(struct cirrus_device *cdev);
-void cirrus_modeset_fini(struct cirrus_device *cdev);
-
- /* cirrus_fbdev.c */
-int cirrus_fbdev_init(struct cirrus_device *cdev);
-void cirrus_fbdev_fini(struct cirrus_device *cdev);
-
-
-
- /* cirrus_irq.c */
-void cirrus_driver_irq_preinstall(struct drm_device *dev);
-int cirrus_driver_irq_postinstall(struct drm_device *dev);
-void cirrus_driver_irq_uninstall(struct drm_device *dev);
-irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
-
- /* cirrus_kms.c */
-int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
-void cirrus_driver_unload(struct drm_device *dev);
-extern struct drm_ioctl_desc cirrus_ioctls[];
-extern int cirrus_max_ioctl;
-
-int cirrus_mm_init(struct cirrus_device *cirrus);
-void cirrus_mm_fini(struct cirrus_device *cirrus);
-void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
-int cirrus_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct cirrus_bo **pcirrusbo);
-int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
-
-static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
-{
- int ret;
-
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EBUSY)
- DRM_ERROR("reserve failed %p\n", bo);
- return ret;
- }
- return 0;
-}
-
-static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
-{
- ttm_bo_unreserve(&bo->bo);
-}
-
-int cirrus_bo_push_sysram(struct cirrus_bo *bo);
-int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
-
-extern int cirrus_bpp;
-
-#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2dd2cd87cdbb..b191d39c071d 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -31,6 +31,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_plane_helper.h>
@@ -97,17 +98,6 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
-/*
- * For connectors that support multiple encoders, either the
- * .atomic_best_encoder() or .best_encoder() operation must be implemented.
- */
-static struct drm_encoder *
-pick_single_encoder_for_connector(struct drm_connector *connector)
-{
- WARN_ON(connector->encoder_ids[1]);
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-
static int handle_conflicting_encoders(struct drm_atomic_state *state,
bool disable_conflicting_encoders)
{
@@ -135,7 +125,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = pick_single_encoder_for_connector(connector);
+ new_encoder = drm_connector_get_single_encoder(connector);
if (new_encoder) {
if (encoder_mask & drm_encoder_mask(new_encoder)) {
@@ -359,7 +349,7 @@ update_connector_routing(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = pick_single_encoder_for_connector(connector);
+ new_encoder = drm_connector_get_single_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -482,7 +472,7 @@ mode_fixup(struct drm_atomic_state *state)
continue;
funcs = crtc->helper_private;
- if (!funcs->mode_fixup)
+ if (!funcs || !funcs->mode_fixup)
continue;
ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 7a26bfb5329c..0d466d3b0809 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1405,7 +1405,7 @@ retry:
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
- if (unlikely(drm_debug & DRM_UT_STATE))
+ if (drm_debug_enabled(DRM_UT_STATE))
drm_atomic_print_state(state);
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 37ac168fcb60..121481f6aa71 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -130,7 +130,12 @@
* Z position is set up with drm_plane_create_zpos_immutable_property() and
* drm_plane_create_zpos_property(). It controls the visibility of overlapping
* planes. Without this property the primary plane is always below the cursor
- * plane, and ordering between all other planes is undefined.
+ * plane, and ordering between all other planes is undefined. The positive
+ * Z axis points towards the user, i.e. planes with lower Z position values
+ * are underneath planes with higher Z position values. Two planes with the
+ * same Z position value have undefined ordering. Note that the Z position
+ * value can also be immutable, to inform userspace about the hard-coded
+ * stacking of planes, see drm_plane_create_zpos_immutable_property().
*
* pixel blend mode:
* Pixel blend mode is set up with drm_plane_create_blend_mode_property().
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 3bd76e918b5d..03e01b000f7a 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -62,10 +62,10 @@ static void drm_cache_flush_clflush(struct page *pages[],
{
unsigned long i;
- mb();
+ mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
- mb();
+ mb(); /*Also used after CLFLUSH so that all cache is flushed*/
}
#endif
@@ -92,6 +92,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#elif defined(__powerpc__)
unsigned long i;
+
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
@@ -125,10 +126,10 @@ drm_clflush_sg(struct sg_table *st)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
- mb();
+ mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
- mb();
+ mb(); /*Make sure that all cache line entry is flushed*/
return;
}
@@ -157,12 +158,13 @@ drm_clflush_virt_range(void *addr, unsigned long length)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
+
addr = (void *)(((unsigned long)addr) & -size);
- mb();
+ mb(); /*CLFLUSH is only ordered with a full memory barrier*/
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
- mb();
+ mb(); /*Ensure that evry data cache line entry is flushed*/
return;
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index c8922b7cac09..895b73f23079 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -415,9 +415,8 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->possible_crtcs & drm_crtc_mask(crtc))
return true;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 4a8b2e5c2af6..2166000ed057 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -365,8 +365,6 @@ EXPORT_SYMBOL(drm_connector_attach_edid_property);
int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
- int i;
-
/*
* In the past, drivers have attempted to model the static association
* of connector to encoder in simple connector/encoder devices using a
@@ -381,18 +379,15 @@ int drm_connector_attach_encoder(struct drm_connector *connector,
if (WARN_ON(connector->encoder))
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) {
- if (connector->encoder_ids[i] == 0) {
- connector->encoder_ids[i] = encoder->base.id;
- return 0;
- }
- }
- return -ENOMEM;
+ connector->possible_encoders |= drm_encoder_mask(encoder);
+
+ return 0;
}
EXPORT_SYMBOL(drm_connector_attach_encoder);
/**
- * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other
+ * drm_connector_has_possible_encoder - check if the connector and encoder are
+ * associated with each other
* @connector: the connector
* @encoder: the encoder
*
@@ -402,15 +397,7 @@ EXPORT_SYMBOL(drm_connector_attach_encoder);
bool drm_connector_has_possible_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
- struct drm_encoder *enc;
- int i;
-
- drm_connector_for_each_possible_encoder(connector, enc, i) {
- if (enc == encoder)
- return true;
- }
-
- return false;
+ return connector->possible_encoders & drm_encoder_mask(encoder);
}
EXPORT_SYMBOL(drm_connector_has_possible_encoder);
@@ -480,7 +467,10 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* drm_connector_register - register a connector
* @connector: the connector to register
*
- * Register userspace interfaces for a connector
+ * Register userspace interfaces for a connector. Only call this for connectors
+ * which can be hotplugged after drm_dev_register() has been called already,
+ * e.g. DP MST connectors. All other connectors will be registered automatically
+ * when calling drm_dev_register().
*
* Returns:
* Zero on success, error code on failure.
@@ -526,7 +516,10 @@ EXPORT_SYMBOL(drm_connector_register);
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
- * Unregister userspace interfaces for a connector
+ * Unregister userspace interfaces for a connector. Only call this for
+ * connectors which have registered explicitly by calling drm_dev_register(),
+ * since connectors are unregistered automatically when drm_dev_unregister() is
+ * called.
*/
void drm_connector_unregister(struct drm_connector *connector)
{
@@ -882,6 +875,38 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = {
{ DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
};
+/*
+ * As per DP 1.4a spec, 2.2.5.7.5 VSC SDP Payload for Pixel Encoding/Colorimetry
+ * Format Table 2-120
+ */
+static const struct drm_prop_enum_list dp_colorspaces[] = {
+ /* For Default case, driver will set the colorspace */
+ { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+ { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED, "RGB_Wide_Gamut_Fixed_Point" },
+ /* Colorimetry based on scRGB (IEC 61966-2-2) */
+ { DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT, "RGB_Wide_Gamut_Floating_Point" },
+ /* Colorimetry based on IEC 61966-2-5 */
+ { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+ /* Colorimetry based on SMPTE RP 431-2 */
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+ { DRM_MODE_COLORIMETRY_BT601_YCC, "BT601_YCC" },
+ { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+ /* Standard Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+ /* High Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+ /* Colorimetry based on IEC 61966-2-1/Amendment 1 */
+ { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 [33] */
+ { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+};
+
/**
* DOC: standard connector properties
*
@@ -1674,7 +1699,6 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* DOC: standard connector properties
*
* Colorspace:
- * drm_mode_create_colorspace_property - create colorspace property
* This property helps select a suitable colorspace based on the sink
* capability. Modern sink devices support wider gamut like BT2020.
* This helps switch to BT2020 mode if the BT2020 encoded video stream
@@ -1694,32 +1718,68 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* - This property is just to inform sink what colorspace
* source is trying to drive.
*
+ * Because between HDMI and DP have different colorspaces,
+ * drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and
+ * drm_mode_create_dp_colorspace_property() is used for DP connector.
+ */
+
+/**
+ * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
+ * @connector: connector to create the Colorspace property on.
+ *
* Called by a driver the first time it's needed, must be attached to desired
- * connectors.
+ * HDMI connectors.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
*/
-int drm_mode_create_colorspace_property(struct drm_connector *connector)
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_property *prop;
- if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Colorspace",
- hdmi_colorspaces,
- ARRAY_SIZE(hdmi_colorspaces));
- if (!prop)
- return -ENOMEM;
- } else {
- DRM_DEBUG_KMS("Colorspace property not supported\n");
+ if (connector->colorspace_property)
return 0;
- }
- connector->colorspace_property = prop;
+ connector->colorspace_property =
+ drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
+ hdmi_colorspaces,
+ ARRAY_SIZE(hdmi_colorspaces));
+
+ if (!connector->colorspace_property)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
+
+/**
+ * drm_mode_create_dp_colorspace_property - create dp colorspace property
+ * @connector: connector to create the Colorspace property on.
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * DP connectors.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
+ */
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ if (connector->colorspace_property)
+ return 0;
+
+ connector->colorspace_property =
+ drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
+ dp_colorspaces,
+ ARRAY_SIZE(dp_colorspaces));
+
+ if (!connector->colorspace_property)
+ return -ENOMEM;
return 0;
}
-EXPORT_SYMBOL(drm_mode_create_colorspace_property);
+EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property);
/**
* drm_mode_create_content_type_property - create content type property
@@ -2121,7 +2181,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
int encoders_count = 0;
int ret = 0;
int copied = 0;
- int i;
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
uint32_t __user *encoder_ptr;
@@ -2136,14 +2195,13 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector)
return -ENOENT;
- drm_connector_for_each_possible_encoder(connector, encoder, i)
- encoders_count++;
+ encoders_count = hweight32(connector->possible_encoders);
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (put_user(encoder->base.id, encoder_ptr + copied)) {
ret = -EFAULT;
goto out;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 80ddf13ad996..499b05aaccfc 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -36,6 +36,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -459,6 +460,22 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
__drm_helper_disable_unused_functions(dev);
}
+/*
+ * For connectors that support multiple encoders, either the
+ * .atomic_best_encoder() or .best_encoder() operation must be implemented.
+ */
+struct drm_encoder *
+drm_connector_get_single_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ WARN_ON(hweight32(connector->possible_encoders) > 1);
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
@@ -624,7 +641,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector) {
- new_encoder = connector_funcs->best_encoder(connector);
+ if (connector_funcs->best_encoder)
+ new_encoder = connector_funcs->best_encoder(connector);
+ else
+ new_encoder = drm_connector_get_single_encoder(connector);
+
/* if we can't get an encoder for a connector
we are setting now - then fail */
if (new_encoder == NULL)
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index b5ac1581e623..f0a66ef47e5a 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -75,3 +75,6 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode);
enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
+
+struct drm_encoder *
+drm_connector_get_single_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 8230dac01a89..3a4126dc2520 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -212,8 +212,14 @@ retry:
drm_for_each_plane(plane, fb->dev) {
struct drm_plane_state *plane_state;
- if (plane->state->fb != fb)
+ ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
+ if (ret)
+ goto out;
+
+ if (plane->state->fb != fb) {
+ drm_modeset_unlock(&plane->mutex);
continue;
+ }
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index be1b7ba92ffe..ca3c55c6b815 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -334,19 +334,17 @@ static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
return LINE_LEN(crc->values_cnt);
}
-static unsigned int crtc_crc_poll(struct file *file, poll_table *wait)
+static __poll_t crtc_crc_poll(struct file *file, poll_table *wait)
{
struct drm_crtc *crtc = file->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
- unsigned ret;
+ __poll_t ret = 0;
poll_wait(file, &crc->wq, wait);
spin_lock_irq(&crc->lock);
if (crc->source && crtc_crc_data_count(crc))
- ret = POLLIN | POLLRDNORM;
- else
- ret = 0;
+ ret |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irq(&crc->lock);
return ret;
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index b15cee85b702..3ab2609f9ec7 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -8,9 +8,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <drm/drm_dp_helper.h>
+
#include <media/cec.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_dp_helper.h>
+
/*
* Unfortunately it turns out that we have a chicken-and-egg situation
* here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
@@ -295,7 +299,10 @@ static void drm_dp_cec_unregister_work(struct work_struct *work)
*/
void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
{
- u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+ struct drm_connector *connector = aux->cec.connector;
+ u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD |
+ CEC_CAP_CONNECTOR_INFO;
+ struct cec_connector_info conn_info;
unsigned int num_las = 1;
u8 cap;
@@ -344,13 +351,17 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
/* Create a new adapter */
aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
- aux, aux->cec.name, cec_caps,
+ aux, connector->name, cec_caps,
num_las);
if (IS_ERR(aux->cec.adap)) {
aux->cec.adap = NULL;
goto unlock;
}
- if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+ cec_s_conn_info(aux->cec.adap, &conn_info);
+
+ if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) {
cec_delete_adapter(aux->cec.adap);
aux->cec.adap = NULL;
} else {
@@ -406,22 +417,20 @@ EXPORT_SYMBOL(drm_dp_cec_unset_edid);
/**
* drm_dp_cec_register_connector() - register a new connector
* @aux: DisplayPort AUX channel
- * @name: name of the CEC device
- * @parent: parent device
+ * @connector: drm connector
*
* A new connector was registered with associated CEC adapter name and
* CEC adapter parent device. After registering the name and parent
* drm_dp_cec_set_edid() is called to check if the connector supports
* CEC and to register a CEC adapter if that is the case.
*/
-void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
- struct device *parent)
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
{
WARN_ON(aux->cec.adap);
if (WARN_ON(!aux->transfer))
return;
- aux->cec.name = name;
- aux->cec.parent = parent;
+ aux->cec.connector = connector;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
drm_dp_cec_unregister_work);
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index ffc68d305afe..2c7870aef469 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -120,33 +120,49 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane)
+{
+ unsigned int offset = DP_ADJUST_REQUEST_POST_CURSOR2;
+ u8 value = dp_link_status(link_status, offset);
+
+ return (value >> (lane << 1)) & 0x3;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor);
+
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
- udelay(100);
+ rd_interval = 100;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0)
- udelay(400);
+ rd_interval = 400;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
@@ -220,7 +236,6 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
ret = aux->transfer(aux, &msg);
-
if (ret >= 0) {
native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
@@ -337,134 +352,6 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
- * drm_dp_link_probe() - probe a DisplayPort link for capabilities
- * @aux: DisplayPort AUX channel
- * @link: pointer to structure in which to return link capabilities
- *
- * The structure filled in by this function can usually be passed directly
- * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
- * configure the link based on the link's capabilities.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[3];
- int err;
-
- memset(link, 0, sizeof(*link));
-
- err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
- if (err < 0)
- return err;
-
- link->revision = values[0];
- link->rate = drm_dp_bw_code_to_link_rate(values[1]);
- link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
-
- if (values[2] & DP_ENHANCED_FRAME_CAP)
- link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_probe);
-
-/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_up);
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_down);
-
-/**
- * drm_dp_link_configure() - configure a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[2];
- int err;
-
- values[0] = drm_dp_link_rate_to_bw_code(link->rate);
- values[1] = link->num_lanes;
-
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_configure);
-
-/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
@@ -1109,6 +996,14 @@ EXPORT_SYMBOL(drm_dp_aux_init);
* @aux: DisplayPort AUX channel
*
* Automatically calls drm_dp_aux_init() if this hasn't been done yet.
+ * This should only be called when the underlying &struct drm_connector is
+ * initialiazed already. Therefore the best place to call this is from
+ * &drm_connector_funcs.late_register. Not that drivers which don't follow this
+ * will Oops when CONFIG_DRM_DP_AUX_CHARDEV is enabled.
+ *
+ * Drivers which need to use the aux channel before that point (e.g. at driver
+ * load time, before drm_dev_register() has been called) need to call
+ * drm_dp_aux_init().
*
* Returns 0 on success or a negative error code on failure.
*/
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 82add736e17d..ae5809a1f19a 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -28,15 +28,22 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+#include <linux/stacktrace.h>
+#include <linux/sort.h>
+#include <linux/timekeeping.h>
+#include <linux/math64.h>
+#endif
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "drm_crtc_helper_internal.h"
+#include "drm_dp_mst_topology_internal.h"
/**
* DOC: dp mst helper
@@ -45,9 +52,14 @@
* protocol. The helpers contain a topology manager and bandwidth manager.
* The helpers encapsulate the sending and received of sideband msgs.
*/
+struct drm_dp_pending_up_req {
+ struct drm_dp_sideband_msg_hdr hdr;
+ struct drm_dp_sideband_msg_req_body msg;
+ struct list_head next;
+};
+
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf);
-static int test_calc_pbn_mode(void);
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
@@ -62,8 +74,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb);
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
@@ -74,6 +86,8 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+#define DBG_PREFIX "[dp_mst]"
+
#define DP_STR(x) [DP_ ## x] = #x
static const char *drm_dp_mst_req_type_str(u8 req_type)
@@ -130,6 +144,43 @@ static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
}
#undef DP_STR
+#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
+
+static const char *drm_dp_mst_sideband_tx_state_str(int state)
+{
+ static const char * const sideband_reason_str[] = {
+ DP_STR(QUEUED),
+ DP_STR(START_SEND),
+ DP_STR(SENT),
+ DP_STR(RX),
+ DP_STR(TIMEOUT),
+ };
+
+ if (state >= ARRAY_SIZE(sideband_reason_str) ||
+ !sideband_reason_str[state])
+ return "unknown";
+
+ return sideband_reason_str[state];
+}
+
+static int
+drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
+{
+ int i;
+ u8 unpacked_rad[16];
+
+ for (i = 0; i < lct; i++) {
+ if (i % 2)
+ unpacked_rad[i] = rad[i / 2] >> 4;
+ else
+ unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
+ }
+
+ /* TODO: Eventually add something to printk so we can format the rad
+ * like this: 1.2.3
+ */
+ return snprintf(out, len, "%*phC", lct, unpacked_rad);
+}
/* sideband msg handling */
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
@@ -262,8 +313,9 @@ static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
return true;
}
-static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
- struct drm_dp_sideband_msg_tx *raw)
+void
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw)
{
int idx = 0;
int i;
@@ -272,6 +324,8 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
switch (req->req_type) {
case DP_ENUM_PATH_RESOURCES:
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
idx++;
break;
@@ -359,14 +413,253 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
+ }
+ raw->cur_len = idx;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
+
+/* Decode a sideband request we've encoded, mainly used for debugging */
+int
+drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
+ struct drm_dp_sideband_msg_req_body *req)
+{
+ const u8 *buf = raw->msg;
+ int i, idx = 0;
+ req->req_type = buf[idx++] & 0x7f;
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
- buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
- idx++;
+ req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ {
+ struct drm_dp_allocate_payload *a =
+ &req->u.allocate_payload;
+
+ a->number_sdp_streams = buf[idx] & 0xf;
+ a->port_number = (buf[idx] >> 4) & 0xf;
+
+ WARN_ON(buf[++idx] & 0x80);
+ a->vcpi = buf[idx] & 0x7f;
+
+ a->pbn = buf[++idx] << 8;
+ a->pbn |= buf[++idx];
+
+ idx++;
+ for (i = 0; i < a->number_sdp_streams; i++) {
+ a->sdp_stream_sink[i] =
+ (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
+ }
+ }
+ break;
+ case DP_QUERY_PAYLOAD:
+ req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
+ WARN_ON(buf[++idx] & 0x80);
+ req->u.query_payload.vcpi = buf[idx] & 0x7f;
+ break;
+ case DP_REMOTE_DPCD_READ:
+ {
+ struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
+
+ r->port_number = (buf[idx] >> 4) & 0xf;
+
+ r->dpcd_address = (buf[idx] << 16) & 0xf0000;
+ r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
+ r->dpcd_address |= buf[++idx] & 0xff;
+
+ r->num_bytes = buf[++idx];
+ }
+ break;
+ case DP_REMOTE_DPCD_WRITE:
+ {
+ struct drm_dp_remote_dpcd_write *w =
+ &req->u.dpcd_write;
+
+ w->port_number = (buf[idx] >> 4) & 0xf;
+
+ w->dpcd_address = (buf[idx] << 16) & 0xf0000;
+ w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
+ w->dpcd_address |= buf[++idx] & 0xff;
+
+ w->num_bytes = buf[++idx];
+
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
+ GFP_KERNEL);
+ if (!w->bytes)
+ return -ENOMEM;
+ }
+ break;
+ case DP_REMOTE_I2C_READ:
+ {
+ struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
+ struct drm_dp_remote_i2c_read_tx *tx;
+ bool failed = false;
+
+ r->num_transactions = buf[idx] & 0x3;
+ r->port_number = (buf[idx] >> 4) & 0xf;
+ for (i = 0; i < r->num_transactions; i++) {
+ tx = &r->transactions[i];
+
+ tx->i2c_dev_id = buf[++idx] & 0x7f;
+ tx->num_bytes = buf[++idx];
+ tx->bytes = kmemdup(&buf[++idx],
+ tx->num_bytes,
+ GFP_KERNEL);
+ if (!tx->bytes) {
+ failed = true;
+ break;
+ }
+ idx += tx->num_bytes;
+ tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
+ tx->i2c_transaction_delay = buf[idx] & 0xf;
+ }
+
+ if (failed) {
+ for (i = 0; i < r->num_transactions; i++)
+ kfree(tx->bytes);
+ return -ENOMEM;
+ }
+
+ r->read_i2c_device_id = buf[++idx] & 0x7f;
+ r->num_bytes_read = buf[++idx];
+ }
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ {
+ struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
+
+ w->port_number = (buf[idx] >> 4) & 0xf;
+ w->write_i2c_device_id = buf[++idx] & 0x7f;
+ w->num_bytes = buf[++idx];
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
+ GFP_KERNEL);
+ if (!w->bytes)
+ return -ENOMEM;
+ }
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
+
+void
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
+ int indent, struct drm_printer *printer)
+{
+ int i;
+
+#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
+ if (req->req_type == DP_LINK_ADDRESS) {
+ /* No contents to print */
+ P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
+ return;
+ }
+
+ P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
+ indent++;
+
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ P("port=%d\n", req->u.port_num.port_number);
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
+ req->u.allocate_payload.port_number,
+ req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
+ req->u.allocate_payload.number_sdp_streams,
+ req->u.allocate_payload.number_sdp_streams,
+ req->u.allocate_payload.sdp_stream_sink);
+ break;
+ case DP_QUERY_PAYLOAD:
+ P("port=%d vcpi=%d\n",
+ req->u.query_payload.port_number,
+ req->u.query_payload.vcpi);
+ break;
+ case DP_REMOTE_DPCD_READ:
+ P("port=%d dpcd_addr=%05x len=%d\n",
+ req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
+ req->u.dpcd_read.num_bytes);
+ break;
+ case DP_REMOTE_DPCD_WRITE:
+ P("port=%d addr=%05x len=%d: %*ph\n",
+ req->u.dpcd_write.port_number,
+ req->u.dpcd_write.dpcd_address,
+ req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
+ req->u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ P("port=%d num_tx=%d id=%d size=%d:\n",
+ req->u.i2c_read.port_number,
+ req->u.i2c_read.num_transactions,
+ req->u.i2c_read.read_i2c_device_id,
+ req->u.i2c_read.num_bytes_read);
+
+ indent++;
+ for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
+ const struct drm_dp_remote_i2c_read_tx *rtx =
+ &req->u.i2c_read.transactions[i];
+
+ P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
+ i, rtx->i2c_dev_id, rtx->num_bytes,
+ rtx->no_stop_bit, rtx->i2c_transaction_delay,
+ rtx->num_bytes, rtx->bytes);
+ }
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ P("port=%d id=%d size=%d: %*ph\n",
+ req->u.i2c_write.port_number,
+ req->u.i2c_write.write_i2c_device_id,
+ req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
+ req->u.i2c_write.bytes);
+ break;
+ default:
+ P("???\n");
+ break;
+ }
+#undef P
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
+
+static inline void
+drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
+ const struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_sideband_msg_req_body req;
+ char buf[64];
+ int ret;
+ int i;
+
+ drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
+ sizeof(buf));
+ drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
+ txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
+ drm_dp_mst_sideband_tx_state_str(txmsg->state),
+ txmsg->path_msg, buf);
+
+ ret = drm_dp_decode_sideband_req(txmsg, &req);
+ if (ret) {
+ drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
+ return;
+ }
+ drm_dp_dump_sideband_msg_req_body(&req, 1, p);
+
+ switch (req.req_type) {
+ case DP_REMOTE_DPCD_WRITE:
+ kfree(req.u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ for (i = 0; i < req.u.i2c_read.num_transactions; i++)
+ kfree(req.u.i2c_read.transactions[i].bytes);
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ kfree(req.u.i2c_write.bytes);
break;
}
- raw->cur_len = idx;
}
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
@@ -842,11 +1135,11 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
clear_bit(vcpi - 1, &mgr->vcpi_mask);
for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->proposed_vcpis[i])
- if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
- mgr->proposed_vcpis[i] = NULL;
- clear_bit(i + 1, &mgr->payload_mask);
- }
+ if (mgr->proposed_vcpis[i] &&
+ mgr->proposed_vcpis[i]->vcpi == vcpi) {
+ mgr->proposed_vcpis[i] = NULL;
+ clear_bit(i + 1, &mgr->payload_mask);
+ }
}
mutex_unlock(&mgr->payload_lock);
}
@@ -899,6 +1192,11 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
}
}
out:
+ if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
+ }
mutex_unlock(&mgr->qlock);
return ret;
@@ -1108,39 +1406,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
}
EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
-static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+
+#define STACK_DEPTH 8
+
+static noinline void
+__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_ref_history *history,
+ enum drm_dp_mst_topology_ref_type type)
{
- struct drm_dp_mst_branch *mstb =
- container_of(kref, struct drm_dp_mst_branch, topology_kref);
- struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
- struct drm_dp_mst_port *port, *tmp;
- bool wake_tx = false;
+ struct drm_dp_mst_topology_ref_entry *entry = NULL;
+ depot_stack_handle_t backtrace;
+ ulong stack_entries[STACK_DEPTH];
+ uint n;
+ int i;
- mutex_lock(&mgr->lock);
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
- list_del(&port->next);
- drm_dp_mst_topology_put_port(port);
+ n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
+ backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
+ if (!backtrace)
+ return;
+
+ /* Try to find an existing entry for this backtrace */
+ for (i = 0; i < history->len; i++) {
+ if (history->entries[i].backtrace == backtrace) {
+ entry = &history->entries[i];
+ break;
+ }
}
- mutex_unlock(&mgr->lock);
- /* drop any tx slots msg */
- mutex_lock(&mstb->mgr->qlock);
- if (mstb->tx_slots[0]) {
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[0] = NULL;
- wake_tx = true;
+ /* Otherwise add one */
+ if (!entry) {
+ struct drm_dp_mst_topology_ref_entry *new;
+ int new_len = history->len + 1;
+
+ new = krealloc(history->entries, sizeof(*new) * new_len,
+ GFP_KERNEL);
+ if (!new)
+ return;
+
+ entry = &new[history->len];
+ history->len = new_len;
+ history->entries = new;
+
+ entry->backtrace = backtrace;
+ entry->type = type;
+ entry->count = 0;
}
- if (mstb->tx_slots[1]) {
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[1] = NULL;
- wake_tx = true;
+ entry->count++;
+ entry->ts_nsec = ktime_get_ns();
+}
+
+static int
+topology_ref_history_cmp(const void *a, const void *b)
+{
+ const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
+
+ if (entry_a->ts_nsec > entry_b->ts_nsec)
+ return 1;
+ else if (entry_a->ts_nsec < entry_b->ts_nsec)
+ return -1;
+ else
+ return 0;
+}
+
+static inline const char *
+topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
+{
+ if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
+ return "get";
+ else
+ return "put";
+}
+
+static void
+__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
+ void *ptr, const char *type_str)
+{
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ int i;
+
+ if (!buf)
+ return;
+
+ if (!history->len)
+ goto out;
+
+ /* First, sort the list so that it goes from oldest to newest
+ * reference entry
+ */
+ sort(history->entries, history->len, sizeof(*history->entries),
+ topology_ref_history_cmp, NULL);
+
+ drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
+ type_str, ptr);
+
+ for (i = 0; i < history->len; i++) {
+ const struct drm_dp_mst_topology_ref_entry *entry =
+ &history->entries[i];
+ ulong *entries;
+ uint nr_entries;
+ u64 ts_nsec = entry->ts_nsec;
+ u32 rem_nsec = do_div(ts_nsec, 1000000000);
+
+ nr_entries = stack_depot_fetch(entry->backtrace, &entries);
+ stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
+
+ drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
+ entry->count,
+ topology_ref_type_to_str(entry->type),
+ ts_nsec, rem_nsec / 1000, buf);
}
- mutex_unlock(&mstb->mgr->qlock);
- if (wake_tx)
- wake_up_all(&mstb->mgr->tx_waitq);
+ /* Now free the history, since this is the only time we expose it */
+ kfree(history->entries);
+out:
+ kfree(buf);
+}
- drm_dp_mst_put_mstb_malloc(mstb);
+static __always_inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
+{
+ __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
+ "MSTB");
+}
+
+static __always_inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
+{
+ __dump_topology_ref_history(&port->topology_ref_history, port,
+ "Port");
+}
+
+static __always_inline void
+save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
+}
+
+static __always_inline void
+save_port_topology_ref(struct drm_dp_mst_port *port,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(port->mgr, &port->topology_ref_history, type);
+}
+
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->topology_ref_history_lock);
+}
+
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_unlock(&mgr->topology_ref_history_lock);
+}
+#else
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
+static inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
+#define save_mstb_topology_ref(mstb, type)
+#define save_port_topology_ref(port, type)
+#endif
+
+static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb =
+ container_of(kref, struct drm_dp_mst_branch, topology_kref);
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+
+ drm_dp_mst_dump_mstb_topology_history(mstb);
+
+ INIT_LIST_HEAD(&mstb->destroy_next);
+
+ /*
+ * This can get called under mgr->mutex, so we need to perform the
+ * actual destruction of the mstb in another worker
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1168,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
{
- int ret = kref_get_unless_zero(&mstb->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("mstb %p (%d)\n", mstb,
- kref_read(&mstb->topology_kref));
+ topology_ref_history_lock(mstb->mgr);
+ ret = kref_get_unless_zero(&mstb->topology_kref);
+ if (ret) {
+ DRM_DEBUG("mstb %p (%d)\n",
+ mstb, kref_read(&mstb->topology_kref));
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+
+ topology_ref_history_unlock(mstb->mgr);
return ret;
}
@@ -1193,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
*/
static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
WARN_ON(kref_read(&mstb->topology_kref) == 0);
kref_get(&mstb->topology_kref);
DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
+
+ topology_ref_history_unlock(mstb->mgr);
}
/**
@@ -1213,27 +1677,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
DRM_DEBUG("mstb %p (%d)\n",
mstb, kref_read(&mstb->topology_kref) - 1);
- kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
-}
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
-static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
-{
- struct drm_dp_mst_branch *mstb;
-
- switch (old_pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mstb = port->mstb;
- port->mstb = NULL;
- drm_dp_mst_topology_put_mstb(mstb);
- break;
- }
+ topology_ref_history_unlock(mstb->mgr);
+ kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
}
static void drm_dp_destroy_port(struct kref *kref)
@@ -1242,31 +1693,24 @@ static void drm_dp_destroy_port(struct kref *kref)
container_of(kref, struct drm_dp_mst_port, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
- if (!port->input) {
- kfree(port->cached_edid);
+ drm_dp_mst_dump_port_topology_history(port);
- /*
- * The only time we don't have a connector
- * on an output port is if the connector init
- * fails.
- */
- if (port->connector) {
- /* we can't destroy the connector here, as
- * we might be holding the mode_config.mutex
- * from an EDID retrieval */
-
- mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->next, &mgr->destroy_connector_list);
- mutex_unlock(&mgr->destroy_connector_lock);
- schedule_work(&mgr->destroy_connector_work);
- return;
- }
- /* no need to clean up vcpi
- * as if we have no connector we never setup a vcpi */
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ /* There's nothing that needs locking to destroy an input port yet */
+ if (port->input) {
+ drm_dp_mst_put_port_malloc(port);
+ return;
}
- drm_dp_mst_put_port_malloc(port);
+
+ kfree(port->cached_edid);
+
+ /*
+ * we can't destroy the connector here, as we might be holding the
+ * mode_config.mutex from an EDID retrieval
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&port->next, &mgr->destroy_port_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1294,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
{
- int ret = kref_get_unless_zero(&port->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("port %p (%d)\n", port,
- kref_read(&port->topology_kref));
+ topology_ref_history_lock(port->mgr);
+ ret = kref_get_unless_zero(&port->topology_kref);
+ if (ret) {
+ DRM_DEBUG("port %p (%d)\n",
+ port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+ topology_ref_history_unlock(port->mgr);
return ret;
}
@@ -1318,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
WARN_ON(kref_read(&port->topology_kref) == 0);
kref_get(&port->topology_kref);
DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+
+ topology_ref_history_unlock(port->mgr);
}
/**
@@ -1336,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
DRM_DEBUG("port %p (%d)\n",
port, kref_read(&port->topology_kref) - 1);
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
+
+ topology_ref_history_unlock(port->mgr);
kref_put(&port->topology_kref, drm_dp_destroy_port);
}
@@ -1454,38 +1913,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-/*
- * return sends link address for new mstb
- */
-static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
{
- int ret;
- u8 rad[6], lct;
- bool send_link = false;
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ struct drm_dp_mst_branch *mstb;
+ u8 rad[8], lct;
+ int ret = 0;
+
+ if (port->pdt == new_pdt)
+ return 0;
+
+ /* Teardown the old pdt, if there is one */
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /*
+ * If the new PDT would also have an i2c bus, don't bother
+ * with reregistering it
+ */
+ if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ new_pdt == DP_PEER_DEVICE_SST_SINK) {
+ port->pdt = new_pdt;
+ return 0;
+ }
+
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ break;
+ }
+
+ port->pdt = new_pdt;
switch (port->pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux);
break;
+
case DP_PEER_DEVICE_MST_BRANCHING:
lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p", port);
+ goto out;
+ }
- port->mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (port->mstb) {
- port->mstb->mgr = port->mgr;
- port->mstb->port_parent = port;
- /*
- * Make sure this port's memory allocation stays
- * around until its child MSTB releases it
- */
- drm_dp_mst_get_port_malloc(port);
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- send_link = true;
- }
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
+
+ /* And make sure we send a link address for this */
+ ret = 1;
break;
}
- return send_link;
+
+out:
+ if (ret < 0)
+ port->pdt = DP_PEER_DEVICE_NONE;
+ return ret;
}
/**
@@ -1617,44 +2117,131 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
-static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
- struct drm_device *dev,
- struct drm_dp_link_addr_reply_port *port_msg)
+static void
+drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ char proppath[255];
+ int ret;
+
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+ port->connector = mgr->cbs->add_connector(mgr, port, proppath);
+ if (!port->connector) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ port->cached_edid = drm_get_edid(port->connector,
+ &port->aux.ddc);
+ drm_connector_set_tile_property(port->connector);
+ }
+
+ mgr->cbs->register_connector(port->connector);
+ return;
+
+error:
+ DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
+}
+
+/*
+ * Drop a topology reference, and unlink the port from the in-memory topology
+ * layout
+ */
+static void
+drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
+{
+ mutex_lock(&mgr->lock);
+ list_del(&port->next);
+ mutex_unlock(&mgr->lock);
+ drm_dp_mst_topology_put_port(port);
+}
+
+static struct drm_dp_mst_port *
+drm_dp_mst_add_port(struct drm_device *dev,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb, u8 port_number)
+{
+ struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
+
+ if (!port)
+ return NULL;
+
+ kref_init(&port->topology_kref);
+ kref_init(&port->malloc_kref);
+ port->parent = mstb;
+ port->port_num = port_number;
+ port->mgr = mgr;
+ port->aux.name = "DPMST";
+ port->aux.dev = dev->dev;
+ port->aux.is_remote = true;
+
+ /*
+ * Make sure the memory allocation for our parent branch stays
+ * around until our own memory allocation is released
+ */
+ drm_dp_mst_get_mstb_malloc(mstb);
+
+ return port;
+}
+
+static int
+drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
+ struct drm_device *dev,
+ struct drm_dp_link_addr_reply_port *port_msg)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- bool ret;
- bool created = false;
- int old_pdt = 0;
- int old_ddps = 0;
+ int old_ddps = 0, ret;
+ u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
if (!port)
- return;
- kref_init(&port->topology_kref);
- kref_init(&port->malloc_kref);
- port->parent = mstb;
- port->port_num = port_msg->port_number;
- port->mgr = mstb->mgr;
- port->aux.name = "DPMST";
- port->aux.dev = dev->dev;
- port->aux.is_remote = true;
-
- /*
- * Make sure the memory allocation for our parent branch stays
- * around until our own memory allocation is released
+ return -ENOMEM;
+ created = true;
+ changed = true;
+ } else if (!port->input && port_msg->input_port && port->connector) {
+ /* Since port->connector can't be changed here, we create a
+ * new port if input_port changes from 0 to 1
*/
- drm_dp_mst_get_mstb_malloc(mstb);
-
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ drm_dp_mst_topology_put_port(port);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
+ if (!port)
+ return -ENOMEM;
+ changed = true;
created = true;
- } else {
- old_pdt = port->pdt;
+ } else if (port->input && !port_msg->input_port) {
+ changed = true;
+ } else if (port->connector) {
+ /* We're updating a port that's exposed to userspace, so do it
+ * under lock
+ */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+
old_ddps = port->ddps;
+ changed = port->ddps != port_msg->ddps ||
+ (port->ddps &&
+ (port->ldps != port_msg->legacy_device_plug_status ||
+ port->dpcd_rev != port_msg->dpcd_revision ||
+ port->mcs != port_msg->mcs ||
+ port->pdt != port_msg->peer_device_type ||
+ port->num_sdp_stream_sinks !=
+ port_msg->num_sdp_stream_sinks));
}
- port->pdt = port_msg->peer_device_type;
port->input = port_msg->input_port;
+ if (!port->input)
+ new_pdt = port_msg->peer_device_type;
port->mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
@@ -1665,77 +2252,104 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
/* manage mstb port lists with mgr lock - take a reference
for this list */
if (created) {
- mutex_lock(&mstb->mgr->lock);
+ mutex_lock(&mgr->lock);
drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
- mutex_unlock(&mstb->mgr->lock);
+ mutex_unlock(&mgr->lock);
}
if (old_ddps != port->ddps) {
if (port->ddps) {
if (!port->input) {
- drm_dp_send_enum_path_resources(mstb->mgr,
- mstb, port);
+ drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
}
} else {
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
-
- ret = drm_dp_port_setup_pdt(port);
- if (ret == true)
- drm_dp_send_link_address(mstb->mgr, port->mstb);
+ ret = drm_dp_port_set_pdt(port, new_pdt);
+ if (ret == 1) {
+ send_link_addr = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT on port %p: %d\n",
+ port, ret);
+ goto fail;
}
- if (created && !port->input) {
- char proppath[255];
-
- build_mst_prop_path(mstb, port->port_num, proppath,
- sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
- port,
- proppath);
- if (!port->connector) {
- /* remove it from the port list */
- mutex_lock(&mstb->mgr->lock);
- list_del(&port->next);
- mutex_unlock(&mstb->mgr->lock);
- /* drop port list reference */
- drm_dp_mst_topology_put_port(port);
- goto out;
- }
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
- port->cached_edid = drm_get_edid(port->connector,
- &port->aux.ddc);
- drm_connector_set_tile_property(port->connector);
- }
- (*mstb->mgr->cbs->register_connector)(port->connector);
+ /*
+ * If this port wasn't just created, then we're reprobing because
+ * we're coming out of suspend. In this case, always resend the link
+ * address if there's an MSTB on this port
+ */
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+ send_link_addr = true;
+
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (!port->input)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+ if (send_link_addr && port->mstb) {
+ ret = drm_dp_send_link_address(mgr, port->mstb);
+ if (ret == 1) /* MSTB below us changed */
+ changed = true;
+ else if (ret < 0)
+ goto fail_put;
}
-out:
/* put reference to this port */
drm_dp_mst_topology_put_port(port);
+ return changed;
+
+fail:
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+fail_put:
+ drm_dp_mst_topology_put_port(port);
+ return ret;
}
-static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
- struct drm_dp_connection_status_notify *conn_stat)
+static void
+drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_connection_status_notify *conn_stat)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_pdt;
- int old_ddps;
- bool dowork = false;
+ int old_ddps, ret;
+ u8 new_pdt;
+ bool dowork = false, create_connector = false;
+
port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port)
return;
+ if (port->connector) {
+ if (!port->input && conn_stat->input_port) {
+ /*
+ * We can't remove a connector from an already exposed
+ * port, so just throw the port out and make sure we
+ * reprobe the link address of it's parent MSTB
+ */
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ mstb->link_address_sent = false;
+ dowork = true;
+ goto out;
+ }
+
+ /* Locking is only needed if the port's exposed to userspace */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ } else if (port->input && !conn_stat->input_port) {
+ create_connector = true;
+ /* Reprobe link address so we get num_sdp_streams */
+ mstb->link_address_sent = false;
+ dowork = true;
+ }
+
old_ddps = port->ddps;
- old_pdt = port->pdt;
- port->pdt = conn_stat->peer_device_type;
+ port->input = conn_stat->input_port;
port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -1747,17 +2361,27 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
- if (drm_dp_port_setup_pdt(port))
- dowork = true;
+ new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
+
+ ret = drm_dp_port_set_pdt(port, new_pdt);
+ if (ret == 1) {
+ dowork = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT for port %p: %d\n",
+ port, ret);
+ dowork = false;
}
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (create_connector)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+out:
drm_dp_mst_topology_put_port(port);
if (dowork)
queue_work(system_long_wq, &mstb->mgr->work);
-
}
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
@@ -1800,7 +2424,7 @@ out:
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *mstb,
- uint8_t *guid)
+ const uint8_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
@@ -1824,7 +2448,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
- uint8_t *guid)
+ const uint8_t *guid)
{
struct drm_dp_mst_branch *mstb;
int ret;
@@ -1843,41 +2467,62 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
return mstb;
}
-static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
- struct drm_dp_mst_branch *mstb_child;
- if (!mstb->link_address_sent)
- drm_dp_send_link_address(mgr, mstb);
+ int ret;
+ bool changed = false;
+
+ if (!mstb->link_address_sent) {
+ ret = drm_dp_send_link_address(mgr, mstb);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
+ }
list_for_each_entry(port, &mstb->ports, next) {
- if (port->input)
- continue;
+ struct drm_dp_mst_branch *mstb_child = NULL;
- if (!port->ddps)
+ if (port->input || !port->ddps)
continue;
- if (!port->available_pbn)
+ if (!port->available_pbn) {
+ drm_modeset_lock(&mgr->base.lock, NULL);
drm_dp_send_enum_path_resources(mgr, mstb, port);
+ drm_modeset_unlock(&mgr->base.lock);
+ changed = true;
+ }
- if (port->mstb) {
+ if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb);
- if (mstb_child) {
- drm_dp_check_and_send_link_address(mgr, mstb_child);
- drm_dp_mst_topology_put_mstb(mstb_child);
- }
+
+ if (mstb_child) {
+ ret = drm_dp_check_and_send_link_address(mgr,
+ mstb_child);
+ drm_dp_mst_topology_put_mstb(mstb_child);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
}
}
+
+ return changed;
}
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_device *dev = mgr->dev;
struct drm_dp_mst_branch *mstb;
int ret;
+ mutex_lock(&mgr->probe_lock);
+
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (mstb) {
@@ -1886,10 +2531,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
mstb = NULL;
}
mutex_unlock(&mgr->lock);
- if (mstb) {
- drm_dp_check_and_send_link_address(mgr, mstb);
- drm_dp_mst_topology_put_mstb(mstb);
+ if (!mstb) {
+ mutex_unlock(&mgr->probe_lock);
+ return;
}
+
+ ret = drm_dp_check_and_send_link_address(mgr, mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
+
+ mutex_unlock(&mgr->probe_lock);
+ if (ret)
+ drm_kms_helper_hotplug_event(dev);
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -2035,8 +2687,11 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
idx += tosend + 1;
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
- if (ret) {
- DRM_DEBUG_KMS("sideband msg failed to send\n");
+ if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_printf(&p, "sideband msg failed to send\n");
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
return ret;
}
@@ -2098,21 +2753,52 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
+
+ if (drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
+ }
+
if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static void
+drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
+{
+ struct drm_dp_link_addr_reply_port *port_reply;
+ int i;
+
+ for (i = 0; i < reply->nports; i++) {
+ port_reply = &reply->ports[i];
+ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
+ i,
+ port_reply->input_port,
+ port_reply->peer_device_type,
+ port_reply->port_number,
+ port_reply->dpcd_revision,
+ port_reply->mcs,
+ port_reply->ddps,
+ port_reply->legacy_device_plug_status,
+ port_reply->num_sdp_streams,
+ port_reply->num_sdp_stream_sinks);
+ }
+}
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
- int len;
struct drm_dp_sideband_msg_tx *txmsg;
- int ret;
+ struct drm_dp_link_address_ack_reply *reply;
+ struct drm_dp_mst_port *port, *tmp;
+ int i, len, ret, port_mask = 0;
+ bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
- return;
+ return -ENOMEM;
txmsg->dst = mstb;
len = build_link_address(txmsg);
@@ -2120,48 +2806,67 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
+ /* FIXME: Actually do some real error handling here */
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
- if (ret > 0) {
- int i;
+ if (ret <= 0) {
+ DRM_ERROR("Sending link address failed with %d\n", ret);
+ goto out;
+ }
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
+ DRM_ERROR("link address NAK received\n");
+ ret = -EIO;
+ goto out;
+ }
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
- DRM_DEBUG_KMS("link address nak received\n");
- } else {
- DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
- DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
- txmsg->reply.u.link_addr.ports[i].input_port,
- txmsg->reply.u.link_addr.ports[i].peer_device_type,
- txmsg->reply.u.link_addr.ports[i].port_number,
- txmsg->reply.u.link_addr.ports[i].dpcd_revision,
- txmsg->reply.u.link_addr.ports[i].mcs,
- txmsg->reply.u.link_addr.ports[i].ddps,
- txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
- txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
- txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
- }
+ reply = &txmsg->reply.u.link_addr;
+ DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
+ drm_dp_dump_link_address(reply);
- drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+ drm_dp_check_mstb_guid(mstb, reply->guid);
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
- drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
- }
- drm_kms_helper_hotplug_event(mgr->dev);
- }
- } else {
- mstb->link_address_sent = false;
- DRM_DEBUG_KMS("link address failed %d\n", ret);
+ for (i = 0; i < reply->nports; i++) {
+ port_mask |= BIT(reply->ports[i].port_number);
+ ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
+ &reply->ports[i]);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ goto out;
}
+ /* Prune any ports that are currently a part of mstb in our in-memory
+ * topology, but were not seen in this link address. Usually this
+ * means that they were removed while the topology was out of sync,
+ * e.g. during suspend/resume
+ */
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ if (port_mask & BIT(port->port_num))
+ continue;
+
+ DRM_DEBUG_KMS("port %d was not in link address, removing\n",
+ port->port_num);
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ changed = true;
+ }
+ mutex_unlock(&mgr->lock);
+
+out:
+ if (ret <= 0)
+ mstb->link_address_sent = false;
kfree(txmsg);
+ return ret < 0 ? ret : changed;
}
-static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_port *port)
+static int
+drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
{
- int len;
+ struct drm_dp_enum_path_resources_ack_reply *path_res;
struct drm_dp_sideband_msg_tx *txmsg;
+ int len;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2175,14 +2880,20 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
+ path_res = &txmsg->reply.u.path_resources;
+
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
DRM_DEBUG_KMS("enum path resources nak received\n");
} else {
- if (port->port_num != txmsg->reply.u.path_resources.port_number)
+ if (port->port_num != path_res->port_number)
DRM_ERROR("got incorrect port in response\n");
- DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
- txmsg->reply.u.path_resources.avail_payload_bw_number);
- port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
+
+ DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
+ path_res->port_number,
+ path_res->full_payload_bw_number,
+ path_res->avail_payload_bw_number);
+ port->available_pbn =
+ path_res->avail_payload_bw_number;
}
}
@@ -2655,30 +3366,13 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
- int dp_link_count,
- int *out)
+static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
{
- switch (dp_link_bw) {
- default:
+ if (dp_link_bw == 0 || dp_link_count == 0)
DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
dp_link_bw, dp_link_count);
- return false;
- case DP_LINK_BW_1_62:
- *out = 3 * dp_link_count;
- break;
- case DP_LINK_BW_2_7:
- *out = 5 * dp_link_count;
- break;
- case DP_LINK_BW_5_4:
- *out = 10 * dp_link_count;
- break;
- case DP_LINK_BW_8_1:
- *out = 15 * dp_link_count;
- break;
- }
- return true;
+ return dp_link_bw * dp_link_count / 2;
}
/**
@@ -2710,9 +3404,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
- mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
- &mgr->pbn_div)) {
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ if (mgr->pbn_div == 0) {
ret = -EINVAL;
goto out_unlock;
}
@@ -2767,6 +3461,23 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
+static void
+drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+
+ /* The link address will need to be re-sent on resume */
+ mstb->link_address_sent = false;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ /* The PBN for each port will also need to be re-probed */
+ port->available_pbn = 0;
+
+ if (port->mstb)
+ drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
+ }
+}
+
/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
@@ -2780,62 +3491,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
+ flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ flush_work(&mgr->delayed_destroy_work);
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_state && mgr->mst_primary)
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
/**
* drm_dp_mst_topology_mgr_resume() - resume the MST manager
* @mgr: manager to resume
+ * @sync: whether or not to perform topology reprobing synchronously
*
* This will fetch DPCD and see if the device is still there,
* if it is, it will rewrite the MSTM control bits, and return.
*
- * if the device fails this returns -1, and the driver should do
+ * If the device fails this returns -1, and the driver should do
* a full MST reprobe, in case we were undocked.
+ *
+ * During system resume (where it is assumed that the driver will be calling
+ * drm_atomic_helper_resume()) this function should be called beforehand with
+ * @sync set to true. In contexts like runtime resume where the driver is not
+ * expected to be calling drm_atomic_helper_resume(), this function should be
+ * called with @sync set to false in order to avoid deadlocking.
+ *
+ * Returns: -1 if the MST topology was removed while we were suspended, 0
+ * otherwise.
*/
-int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ bool sync)
{
- int ret = 0;
+ int ret;
+ u8 guid[16];
mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
- if (mgr->mst_primary) {
- int sret;
- u8 guid[16];
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
+ DP_RECEIVER_CAP_SIZE);
+ if (ret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
- if (sret != DP_RECEIVER_CAP_SIZE) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
- if (ret < 0) {
- DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
- /* Some hubs forget their guids after they resume */
- sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (sret != 16) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ /*
+ * For the final step of resuming the topology, we need to bring the
+ * state of our in-memory topology back into sync with reality. So,
+ * restart the probing process as if we're probing a new hub
+ */
+ queue_work(system_long_wq, &mgr->work);
+ mutex_unlock(&mgr->lock);
- ret = 0;
- } else
- ret = -1;
+ if (sync) {
+ DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
+ flush_work(&mgr->work);
+ }
-out_unlock:
+ return 0;
+
+out_fail:
mutex_unlock(&mgr->lock);
- return ret;
+ return -1;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
@@ -2890,136 +3628,198 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
- int ret = 0;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
+ int slot = -1;
+
+ if (!drm_dp_get_one_sb_msg(mgr, false))
+ goto clear_down_rep_recv;
- if (!drm_dp_get_one_sb_msg(mgr, false)) {
- memset(&mgr->down_rep_recv, 0,
- sizeof(struct drm_dp_sideband_msg_rx));
+ if (!mgr->down_rep_recv.have_eomt)
return 0;
+
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr->lct);
+ goto clear_down_rep_recv;
+ }
+
+ /* find the message */
+ slot = hdr->seqno;
+ mutex_lock(&mgr->qlock);
+ txmsg = mstb->tx_slots[slot];
+ /* remove from slots */
+ mutex_unlock(&mgr->qlock);
+
+ if (!txmsg) {
+ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
+ mstb, hdr->seqno, hdr->lct, hdr->rad[0],
+ mgr->down_rep_recv.msg[0]);
+ goto no_msg;
}
- if (mgr->down_rep_recv.have_eomt) {
- struct drm_dp_sideband_msg_tx *txmsg;
- struct drm_dp_mst_branch *mstb;
- int slot = -1;
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->down_rep_recv.initial_hdr.lct,
- mgr->down_rep_recv.initial_hdr.rad);
+ drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
+ txmsg->reply.req_type,
+ drm_dp_mst_req_type_str(txmsg->reply.req_type),
+ txmsg->reply.u.nak.reason,
+ drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
+ txmsg->reply.u.nak.nak_data);
- /* find the message */
- slot = mgr->down_rep_recv.initial_hdr.seqno;
- mutex_lock(&mgr->qlock);
- txmsg = mstb->tx_slots[slot];
- /* remove from slots */
- mutex_unlock(&mgr->qlock);
-
- if (!txmsg) {
- DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
- mstb,
- mgr->down_rep_recv.initial_hdr.seqno,
- mgr->down_rep_recv.initial_hdr.lct,
- mgr->down_rep_recv.initial_hdr.rad[0],
- mgr->down_rep_recv.msg[0]);
- drm_dp_mst_topology_put_mstb(mstb);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ drm_dp_mst_topology_put_mstb(mstb);
- drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ mutex_lock(&mgr->qlock);
+ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
+ mstb->tx_slots[slot] = NULL;
+ mutex_unlock(&mgr->qlock);
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
- txmsg->reply.req_type,
- drm_dp_mst_req_type_str(txmsg->reply.req_type),
- txmsg->reply.u.nak.reason,
- drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
- txmsg->reply.u.nak.nak_data);
-
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_mst_topology_put_mstb(mstb);
+ wake_up_all(&mgr->tx_waitq);
+
+ return 0;
- mutex_lock(&mgr->qlock);
- txmsg->state = DRM_DP_SIDEBAND_TX_RX;
- mstb->tx_slots[slot] = NULL;
- mutex_unlock(&mgr->qlock);
+no_msg:
+ drm_dp_mst_topology_put_mstb(mstb);
+clear_down_rep_recv:
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- wake_up_all(&mgr->tx_waitq);
- }
- return ret;
+ return 0;
}
-static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+static inline bool
+drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_pending_up_req *up_req)
{
- int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+ struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
+ struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
+ bool hotplug = false;
- if (!drm_dp_get_one_sb_msg(mgr, true)) {
- memset(&mgr->up_req_recv, 0,
- sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+ if (hdr->broadcast) {
+ const u8 *guid = NULL;
+
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
+ guid = msg->u.conn_stat.guid;
+ else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
+ guid = msg->u.resource_stat.guid;
+
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+ } else {
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
- if (mgr->up_req_recv.have_eomt) {
- struct drm_dp_sideband_msg_req_body msg;
- struct drm_dp_mst_branch *mstb = NULL;
- bool seqno;
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr->lct);
+ return false;
+ }
- if (!mgr->up_req_recv.initial_hdr.broadcast) {
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->up_req_recv.initial_hdr.lct,
- mgr->up_req_recv.initial_hdr.rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
- }
+ /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
- seqno = mgr->up_req_recv.initial_hdr.seqno;
- drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+ drm_dp_mst_topology_put_mstb(mstb);
+ return hotplug;
+}
- if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+static void drm_dp_mst_up_req_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ up_req_work);
+ struct drm_dp_pending_up_req *up_req;
+ bool send_hotplug = false;
- if (!mstb)
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
+ mutex_lock(&mgr->probe_lock);
+ while (true) {
+ mutex_lock(&mgr->up_req_lock);
+ up_req = list_first_entry_or_null(&mgr->up_req_list,
+ struct drm_dp_pending_up_req,
+ next);
+ if (up_req)
+ list_del(&up_req->next);
+ mutex_unlock(&mgr->up_req_lock);
+
+ if (!up_req)
+ break;
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
+ kfree(up_req);
+ }
+ mutex_unlock(&mgr->probe_lock);
- drm_dp_update_port(mstb, &msg.u.conn_stat);
+ if (send_hotplug)
+ drm_kms_helper_hotplug_event(mgr->dev);
+}
- DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
- drm_kms_helper_hotplug_event(mgr->dev);
+static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
+ struct drm_dp_pending_up_req *up_req;
+ bool seqno;
- } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
- if (!mstb)
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
+ if (!drm_dp_get_one_sb_msg(mgr, true))
+ goto out;
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ if (!mgr->up_req_recv.have_eomt)
+ return 0;
- DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
- }
+ up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
+ if (!up_req) {
+ DRM_ERROR("Not enough memory to process MST up req\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&up_req->next);
- if (mstb)
- drm_dp_mst_topology_put_mstb(mstb);
+ seqno = hdr->seqno;
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
+ up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
+ DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
+ up_req->msg.req_type);
+ kfree(up_req);
+ goto out;
}
- return ret;
+
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
+ seqno, false);
+
+ if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ const struct drm_dp_connection_status_notify *conn_stat =
+ &up_req->msg.u.conn_stat;
+
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
+ conn_stat->port_number,
+ conn_stat->legacy_device_plug_status,
+ conn_stat->displayport_device_plug_status,
+ conn_stat->message_capability_status,
+ conn_stat->input_port,
+ conn_stat->peer_device_type);
+ } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ const struct drm_dp_resource_status_notify *res_stat =
+ &up_req->msg.u.resource_stat;
+
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
+ res_stat->port_number,
+ res_stat->available_pbn);
+ }
+
+ up_req->hdr = *hdr;
+ mutex_lock(&mgr->up_req_lock);
+ list_add_tail(&up_req->next, &mgr->up_req_list);
+ mutex_unlock(&mgr->up_req_lock);
+ queue_work(system_long_wq, &mgr->up_req_work);
+
+out:
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
}
/**
@@ -3063,22 +3863,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port
+ * @ctx: The acquisition context to use for grabbing locks
* @mgr: manager for this port
- * @port: unverified pointer to a port
+ * @port: pointer to a port
*
- * This returns the current connection state for a port. It validates the
- * port pointer still exists so the caller doesn't require a reference
+ * This returns the current connection state for a port.
*/
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
- struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+int
+drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
- enum drm_connector_status status = connector_status_disconnected;
+ int ret;
/* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ goto out;
+
+ ret = connector_status_disconnected;
+
if (!port->ddps)
goto out;
@@ -3088,7 +3897,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
case DP_PEER_DEVICE_SST_SINK:
- status = connector_status_connected;
+ ret = connector_status_connected;
/* for logical ports - cache the EDID */
if (port->port_num >= 8 && !port->cached_edid) {
port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
@@ -3096,12 +3905,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
- status = connector_status_connected;
+ ret = connector_status_connected;
break;
}
out:
drm_dp_mst_topology_put_port(port);
- return status;
+ return ret;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
@@ -3237,7 +4046,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
{
struct drm_dp_mst_topology_state *topology_state;
struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
- int prev_slots, req_slots, ret;
+ int prev_slots, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
@@ -3284,8 +4093,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
}
vcpi->vcpi = req_slots;
- ret = req_slots;
- return ret;
+ return req_slots;
}
EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
@@ -3539,13 +4347,6 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
*/
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
- u64 kbps;
- s64 peak_kbps;
- u32 numerator;
- u32 denominator;
-
- kbps = clock * bpp;
-
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
@@ -3556,41 +4357,11 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
*/
-
- numerator = 64 * 1006;
- denominator = 54 * 8 * 1000 * 1000;
-
- kbps *= numerator;
- peak_kbps = drm_fixp_from_fraction(kbps, denominator);
-
- return drm_fixp2int_ceil(peak_kbps);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
+ 8 * 54 * 1000 * 1000);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
-static int test_calc_pbn_mode(void)
-{
- int ret;
- ret = drm_dp_calc_pbn_mode(154000, 30);
- if (ret != 689) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 154000, 30, 689, ret);
- return -EINVAL;
- }
- ret = drm_dp_calc_pbn_mode(234000, 30);
- if (ret != 1047) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 234000, 30, 1047, ret);
- return -EINVAL;
- }
- ret = drm_dp_calc_pbn_mode(297000, 24);
- if (ret != 1063) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 297000, 24, 1063, ret);
- return -EINVAL;
- }
- return 0;
-}
-
/* we want to kick the TX after we've ack the up/down IRQs. */
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
{
@@ -3729,36 +4500,103 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_destroy_connector_work(struct work_struct *work)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_dp_mst_port *port;
- bool send_hotplug = false;
+ if (port->connector)
+ port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+ drm_dp_mst_put_port_malloc(port);
+}
+
+static inline void
+drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ struct drm_dp_mst_port *port, *tmp;
+ bool wake_tx = false;
+
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ }
+ mutex_unlock(&mgr->lock);
+
+ /* drop any tx slots msg */
+ mutex_lock(&mstb->mgr->qlock);
+ if (mstb->tx_slots[0]) {
+ mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[0] = NULL;
+ wake_tx = true;
+ }
+ if (mstb->tx_slots[1]) {
+ mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[1] = NULL;
+ wake_tx = true;
+ }
+ mutex_unlock(&mstb->mgr->qlock);
+
+ if (wake_tx)
+ wake_up_all(&mstb->mgr->tx_waitq);
+
+ drm_dp_mst_put_mstb_malloc(mstb);
+}
+
+static void drm_dp_delayed_destroy_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ delayed_destroy_work);
+ bool send_hotplug = false, go_again;
+
/*
* Not a regular list traverse as we have to drop the destroy
- * connector lock before destroying the connector, to avoid AB->BA
+ * connector lock before destroying the mstb/port, to avoid AB->BA
* ordering between this lock and the config mutex.
*/
- for (;;) {
- mutex_lock(&mgr->destroy_connector_lock);
- port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
- if (!port) {
- mutex_unlock(&mgr->destroy_connector_lock);
- break;
+ do {
+ go_again = false;
+
+ for (;;) {
+ struct drm_dp_mst_branch *mstb;
+
+ mutex_lock(&mgr->delayed_destroy_lock);
+ mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
+ struct drm_dp_mst_branch,
+ destroy_next);
+ if (mstb)
+ list_del(&mstb->destroy_next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+
+ if (!mstb)
+ break;
+
+ drm_dp_delayed_destroy_mstb(mstb);
+ go_again = true;
}
- list_del(&port->next);
- mutex_unlock(&mgr->destroy_connector_lock);
- INIT_LIST_HEAD(&port->next);
+ for (;;) {
+ struct drm_dp_mst_port *port;
- mgr->cbs->destroy_connector(mgr, port->connector);
+ mutex_lock(&mgr->delayed_destroy_lock);
+ port = list_first_entry_or_null(&mgr->destroy_port_list,
+ struct drm_dp_mst_port,
+ next);
+ if (port)
+ list_del(&port->next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ if (!port)
+ break;
+
+ drm_dp_delayed_destroy_port(port);
+ send_hotplug = true;
+ go_again = true;
+ }
+ } while (go_again);
- drm_dp_mst_put_port_malloc(port);
- send_hotplug = true;
- }
if (send_hotplug)
drm_kms_helper_hotplug_event(mgr->dev);
}
@@ -3920,9 +4758,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_device *dev = mgr->dev;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
}
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
@@ -3948,12 +4783,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
- mutex_init(&mgr->destroy_connector_lock);
+ mutex_init(&mgr->delayed_destroy_lock);
+ mutex_init(&mgr->up_req_lock);
+ mutex_init(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_init(&mgr->topology_ref_history_lock);
+#endif
INIT_LIST_HEAD(&mgr->tx_msg_downq);
- INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ INIT_LIST_HEAD(&mgr->destroy_port_list);
+ INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
+ INIT_LIST_HEAD(&mgr->up_req_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
- INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
+ INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
+ INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
init_waitqueue_head(&mgr->tx_waitq);
mgr->dev = dev;
mgr->aux = aux;
@@ -3970,8 +4813,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (!mgr->proposed_vcpis)
return -ENOMEM;
set_bit(0, &mgr->payload_mask);
- if (test_calc_pbn_mode() < 0)
- DRM_ERROR("MST PBN self-test failed\n");
mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
if (mst_state == NULL)
@@ -3996,7 +4837,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ cancel_work_sync(&mgr->delayed_destroy_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
mgr->payloads = NULL;
@@ -4007,6 +4848,16 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
mgr->aux = NULL;
drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
+
+ mutex_destroy(&mgr->delayed_destroy_lock);
+ mutex_destroy(&mgr->payload_lock);
+ mutex_destroy(&mgr->qlock);
+ mutex_destroy(&mgr->lock);
+ mutex_destroy(&mgr->up_req_lock);
+ mutex_destroy(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_destroy(&mgr->topology_ref_history_lock);
+#endif
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/drm_dp_mst_topology_internal.h
new file mode 100644
index 000000000000..eeda9a61c657
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology_internal.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Declarations for DP MST related functions which are only used in selftests
+ *
+ * Copyright © 2018 Red Hat
+ * Authors:
+ * Lyude Paul <lyude@redhat.com>
+ */
+
+#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
+#define _DRM_DP_MST_HELPER_INTERNAL_H_
+
+#include <drm/drm_dp_mst_helper.h>
+
+void
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw);
+int drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
+ struct drm_dp_sideband_msg_req_body *req);
+void
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
+ int indent, struct drm_printer *printer);
+
+#endif /* !_DRM_DP_MST_HELPER_INTERNAL_H_ */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 769feefeeeef..1b9b40a1c7c9 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,26 +46,9 @@
#include "drm_internal.h"
#include "drm_legacy.h"
-/*
- * drm_debug: Enable debug output.
- * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
- */
-unsigned int drm_debug = 0;
-EXPORT_SYMBOL(drm_debug);
-
MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
-"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
-"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
-"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
-"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
-"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
-"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
-"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 77f4e5ae4197..4a475d9696ff 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -216,13 +216,11 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
*/
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
pps_payload->rc_range_parameters[i] =
- ((dsc_cfg->rc_range_params[i].range_min_qp <<
- DSC_PPS_RC_RANGE_MINQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_max_qp <<
- DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_bpg_offset));
- pps_payload->rc_range_parameters[i] =
- cpu_to_be16(pps_payload->rc_range_parameters[i]);
+ cpu_to_be16((dsc_cfg->rc_range_params[i].range_min_qp <<
+ DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_max_qp <<
+ DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_bpg_offset));
}
/* PPS 88 */
@@ -336,12 +334,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
else
vdsc_cfg->nfl_bpg_offset = 0;
- /* 2^16 - 1 */
- if (vdsc_cfg->nfl_bpg_offset > 65535) {
- DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
- return -ERANGE;
- }
-
/* Number of groups used to code the entire slice */
groups_total = groups_per_line * vdsc_cfg->slice_height;
@@ -371,11 +363,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
vdsc_cfg->scale_increment_interval = 0;
}
- if (vdsc_cfg->scale_increment_interval > 65535) {
- DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
- return -ERANGE;
- }
-
/*
* DSC spec mentions that bits_per_pixel specifies the target
* bits/pixel (bpp) rate that is used by the encoder,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6b0177112e18..474ac04d5600 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1278,6 +1278,106 @@ static const struct drm_display_mode edid_cea_modes[] = {
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 108 - 1280x720@48Hz 16:9 */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
+ 2280, 2500, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 109 - 1280x720@48Hz 64:27 */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
+ 2280, 2500, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 110 - 1680x720@48Hz 64:27 */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 2490,
+ 2530, 2750, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 111 - 1920x1080@48Hz 16:9 */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 112 - 1920x1080@48Hz 64:27 */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 113 - 2560x1080@48Hz 64:27 */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 3558,
+ 3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 114 - 3840x2160@48Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 115 - 4096x2160@48Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 116 - 3840x2160@48Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 117 - 3840x2160@100Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 118 - 3840x2160@120Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 119 - 3840x2160@100Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 120 - 3840x2160@120Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 121 - 5120x2160@24Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 7116,
+ 7204, 7500, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 122 - 5120x2160@25Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 6816,
+ 6904, 7200, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 123 - 5120x2160@30Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 5784,
+ 5872, 6000, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 124 - 5120x2160@48Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5866,
+ 5954, 6250, 0, 2160, 2168, 2178, 2475, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 125 - 5120x2160@50Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 6216,
+ 6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 126 - 5120x2160@60Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5284,
+ 5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 127 - 5120x2160@100Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 6216,
+ 6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
};
/*
@@ -1554,7 +1654,7 @@ static void connector_bad_edid(struct drm_connector *connector,
{
int i;
- if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+ if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
dev_warn(connector->dev->dev,
@@ -2092,7 +2192,8 @@ static int standard_timing_level(struct edid *edid)
return LEVEL_CVT;
if (drm_gtf2_hbreak(edid))
return LEVEL_GTF2;
- return LEVEL_GTF;
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ return LEVEL_GTF;
}
return LEVEL_DMT;
}
@@ -3108,18 +3209,10 @@ static bool drm_valid_cea_vic(u8 vic)
return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
}
-/**
- * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
- * the input VIC from the CEA mode list
- * @video_code: ID given to each of the CEA modes
- *
- * Returns picture aspect ratio
- */
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
+static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
return edid_cea_modes[video_code].picture_aspect_ratio;
}
-EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
@@ -3722,7 +3815,7 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
if (*end < 4 || *end > 127)
return -ERANGE;
} else {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -4191,7 +4284,7 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -4252,7 +4345,7 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -5071,6 +5164,49 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata);
+static u8 drm_mode_hdmi_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ bool has_hdmi_infoframe = connector ?
+ connector->display_info.has_hdmi_infoframe : false;
+
+ if (!has_hdmi_infoframe)
+ return 0;
+
+ /* No HDMI VIC when signalling 3D video format */
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ return 0;
+
+ return drm_match_hdmi_mode(mode);
+}
+
+static u8 drm_mode_cea_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ u8 vic;
+
+ /*
+ * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
+ * we should send its VIC in vendor infoframes, else send the
+ * VIC in AVI infoframes. Lets check if this mode is present in
+ * HDMI 1.4b 4K modes
+ */
+ if (drm_mode_hdmi_vic(connector, mode))
+ return 0;
+
+ vic = drm_match_cea_mode(mode);
+
+ /*
+ * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
+ * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
+ * have to make sure we dont break HDMI 1.4 sinks.
+ */
+ if (!is_hdmi2_sink(connector) && vic > 64)
+ return 0;
+
+ return vic;
+}
+
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
@@ -5098,29 +5234,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
- frame->video_code = drm_match_cea_mode(mode);
-
- /*
- * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
- * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
- * have to make sure we dont break HDMI 1.4 sinks.
- */
- if (!is_hdmi2_sink(connector) && frame->video_code > 64)
- frame->video_code = 0;
-
- /*
- * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
- * we should send its VIC in vendor infoframes, else send the
- * VIC in AVI infoframes. Lets check if this mode is present in
- * HDMI 1.4b 4K modes
- */
- if (frame->video_code) {
- u8 vendor_if_vic = drm_match_hdmi_mode(mode);
- bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK;
-
- if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d)
- frame->video_code = 0;
- }
+ frame->video_code = drm_mode_cea_vic(connector, mode);
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
@@ -5285,6 +5399,23 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range);
+/**
+ * drm_hdmi_avi_infoframe_bars() - fill the HDMI AVI infoframe
+ * bar information
+ * @frame: HDMI AVI infoframe
+ * @conn_state: connector state
+ */
+void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ frame->right_bar = conn_state->tv.margins.right;
+ frame->left_bar = conn_state->tv.margins.left;
+ frame->top_bar = conn_state->tv.margins.top;
+ frame->bottom_bar = conn_state->tv.margins.bottom;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_bars);
+
static enum hdmi_3d_structure
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
{
@@ -5337,8 +5468,6 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
bool has_hdmi_infoframe = connector ?
connector->display_info.has_hdmi_infoframe : false;
int err;
- u32 s3d_flags;
- u8 vic;
if (!frame || !mode)
return -EINVAL;
@@ -5346,8 +5475,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
if (!has_hdmi_infoframe)
return -EINVAL;
- vic = drm_match_hdmi_mode(mode);
- s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+ err = hdmi_vendor_infoframe_init(frame);
+ if (err < 0)
+ return err;
/*
* Even if it's not absolutely necessary to send the infoframe
@@ -5358,15 +5488,7 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
* mode if the source simply stops sending the infoframe when
* it wants to switch from 3D to 2D.
*/
-
- if (vic && s3d_flags)
- return -EINVAL;
-
- err = hdmi_vendor_infoframe_init(frame);
- if (err < 0)
- return err;
-
- frame->vic = vic;
+ frame->vic = drm_mode_hdmi_vic(connector, mode);
frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index d38b3b255926..37d8ba3ddb46 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -175,7 +175,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
u8 *edid;
int fwsize, builtin;
int i, valid_extensions = 0;
- bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+ bool print_bad_edid = !connector->bad_edid_counter || drm_debug_enabled(DRM_UT_KMS);
builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
if (builtin >= 0) {
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 7fb47b7b8b44..80d88a55302e 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -22,6 +22,7 @@
#include <linux/export.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a7ba5b4902d6..8ebeccdeed23 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -46,6 +46,7 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include "drm_crtc_helper_internal.h"
#include "drm_internal.h"
static bool drm_fbdev_emulation = true;
@@ -91,9 +92,12 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
*
* Drivers that support a dumb buffer with a virtual address and mmap support,
* should try out the generic fbdev emulation using drm_fbdev_generic_setup().
+ * It will automatically set up deferred I/O if the driver requires a shadow
+ * buffer.
*
- * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
- * down by calling drm_fb_helper_fbdev_teardown().
+ * For other drivers, setup fbdev emulation by calling
+ * drm_fb_helper_fbdev_setup() and tear it down by calling
+ * drm_fb_helper_fbdev_teardown().
*
* At runtime drivers should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
@@ -126,8 +130,10 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
- * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup
- * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()).
+ * mmap page writes.
+ *
+ * Deferred I/O is not compatible with SHMEM. Such drivers should request an
+ * fbdev shadow buffer and call drm_fbdev_generic_setup() instead.
*/
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
@@ -679,49 +685,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
- * drm_fb_helper_defio_init - fbdev deferred I/O initialization
- * @fb_helper: driver-allocated fbdev helper
- *
- * This function allocates &fb_deferred_io, sets callback to
- * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init().
- * It should be called from the &drm_fb_helper_funcs->fb_probe callback.
- * drm_fb_helper_fbdev_teardown() cleans up deferred I/O.
- *
- * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done
- * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby
- * affect other instances of that &fb_ops.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
-{
- struct fb_info *info = fb_helper->fbdev;
- struct fb_deferred_io *fbdefio;
- struct fb_ops *fbops;
-
- fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- if (!fbdefio || !fbops) {
- kfree(fbdefio);
- kfree(fbops);
- return -ENOMEM;
- }
-
- info->fbdefio = fbdefio;
- fbdefio->delay = msecs_to_jiffies(50);
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
-
- *fbops = *info->fbops;
- info->fbops = fbops;
-
- fb_deferred_io_init(info);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_defio_init);
-
-/**
* drm_fb_helper_sys_read - wrapper around fb_sys_read
* @info: fb_info struct pointer
* @buf: userspace buffer to read from framebuffer memory
@@ -2355,7 +2318,10 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* Drivers that set the dirty callback on their framebuffer will get a shadow
* fbdev buffer that is blitted onto the real buffer. This is done in order to
- * make deferred I/O work with all kinds of buffers.
+ * make deferred I/O work with all kinds of buffers. A shadow buffer can be
+ * requested explicitly by setting struct drm_mode_config.prefer_shadow or
+ * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
+ * required to use generic fbdev emulation with SHMEM helpers.
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6854f5867d51..000fa4a1899f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1099,23 +1099,12 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
+ int ret;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (obj->funcs && obj->funcs->vm_ops)
- vma->vm_ops = obj->funcs->vm_ops;
- else if (dev->driver->gem_vm_ops)
- vma->vm_ops = dev->driver->gem_vm_ops;
- else
- return -EINVAL;
-
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = obj;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
@@ -1124,6 +1113,33 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
*/
drm_gem_object_get(obj);
+ if (obj->funcs && obj->funcs->mmap) {
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret) {
+ drm_gem_object_put_unlocked(obj);
+ return ret;
+ }
+ WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
+ } else {
+ if (obj->funcs && obj->funcs->vm_ops)
+ vma->vm_ops = obj->funcs->vm_ops;
+ else if (dev->driver->gem_vm_ops)
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ else {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ }
+
+ vma->vm_private_data = obj;
+
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index f5918707672f..0810d3ef6961 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -32,7 +32,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
@@ -505,39 +505,30 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
drm_gem_vm_close(vma);
}
-const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
};
-EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
/**
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
- * @filp: File object
+ * @obj: gem object
* @vma: VMA for the area to be mapped
*
* This function implements an augmented version of the GEM DRM file mmap
* operation for shmem objects. Drivers which employ the shmem helpers should
- * use this function as their &file_operations.mmap handler in the DRM device file's
- * file_operations structure.
- *
- * Instead of directly referencing this function, drivers should use the
- * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ * use this function as their &drm_gem_object_funcs.mmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct drm_gem_shmem_object *shmem;
int ret;
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+ shmem = to_drm_gem_shmem_obj(obj);
ret = drm_gem_shmem_get_pages(shmem);
if (ret) {
@@ -545,12 +536,10 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
- /* VM_PFNMAP was set by drm_gem_mmap() */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
-
- /* Remove the fake offset */
- vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
new file mode 100644
index 000000000000..605a8a3da7f9
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/module.h>
+
+#include <drm/drm_gem_ttm_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helper functions for gem objects backed by
+ * ttm.
+ */
+
+/**
+ * drm_gem_ttm_print_info() - Print &ttm_buffer_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @gem: GEM object
+ *
+ * This function can be used as &drm_gem_object_funcs.print_info
+ * callback.
+ */
+void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *gem)
+{
+ static const char * const plname[] = {
+ [ TTM_PL_SYSTEM ] = "system",
+ [ TTM_PL_TT ] = "tt",
+ [ TTM_PL_VRAM ] = "vram",
+ [ TTM_PL_PRIV ] = "priv",
+
+ [ 16 ] = "cached",
+ [ 17 ] = "uncached",
+ [ 18 ] = "wc",
+ [ 19 ] = "contig",
+
+ [ 21 ] = "pinned", /* NO_EVICT */
+ [ 22 ] = "topdown",
+ };
+ const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+
+ drm_printf_indent(p, indent, "placement=");
+ drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
+ drm_printf(p, "\n");
+
+ if (bo->mem.bus.is_iomem) {
+ drm_printf_indent(p, indent, "bus.base=%lx\n",
+ (unsigned long)bo->mem.bus.base);
+ drm_printf_indent(p, indent, "bus.offset=%lx\n",
+ (unsigned long)bo->mem.bus.offset);
+ }
+}
+EXPORT_SYMBOL(drm_gem_ttm_print_info);
+
+/**
+ * drm_gem_ttm_mmap() - mmap &ttm_buffer_object
+ * @gem: GEM object.
+ * @vma: vm area.
+ *
+ * This function can be used as &drm_gem_object_funcs.mmap
+ * callback.
+ */
+int drm_gem_ttm_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ int ret;
+
+ ret = ttm_bo_mmap_obj(vma, bo);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * ttm has its own object refcounting, so drop gem reference
+ * to avoid double accounting counting.
+ */
+ drm_gem_object_put_unlocked(gem);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_ttm_mmap);
+
+MODULE_DESCRIPTION("DRM gem ttm helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index fd751078bae1..666cb4c22bb9 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,10 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_mode.h>
+#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
-#include <drm/drm_vram_mm_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_page_alloc.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
@@ -14,6 +19,11 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
*
* This library provides a GEM buffer object that is backed by video RAM
* (VRAM). It can be used for framebuffer devices with dedicated memory.
+ *
+ * The data structure &struct drm_vram_mm and its helpers implement a memory
+ * manager for simple framebuffer devices with dedicated video memory. Buffer
+ * objects are either placed in video RAM or evicted to system memory. The rsp.
+ * buffer object is provided by &struct drm_gem_vram_object.
*/
/*
@@ -26,6 +36,10 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
+
+ WARN_ON(gbo->kmap_use_count);
+ WARN_ON(gbo->kmap.virtual);
+
drm_gem_object_release(&gbo->bo.base);
}
@@ -47,6 +61,7 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
{
unsigned int i;
unsigned int c = 0;
+ u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
@@ -54,15 +69,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
if (pl_flag & TTM_PL_FLAG_VRAM)
gbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ TTM_PL_FLAG_VRAM |
+ invariant_flags;
if (pl_flag & TTM_PL_FLAG_SYSTEM)
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ TTM_PL_FLAG_SYSTEM |
+ invariant_flags;
if (!c)
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ TTM_PL_FLAG_SYSTEM |
+ invariant_flags;
gbo->placement.num_placement = c;
gbo->placement.num_busy_placement = c;
@@ -82,8 +100,7 @@ static int drm_gem_vram_init(struct drm_device *dev,
int ret;
size_t acc_size;
- if (!gbo->bo.base.funcs)
- gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
+ gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, &gbo->bo.base, size);
if (ret)
@@ -192,30 +209,12 @@ s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_offset);
-/**
- * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
- * @gbo: the GEM VRAM object
- * @pl_flag: a bitmask of possible memory regions
- *
- * Pinning a buffer object ensures that it is not evicted from
- * a memory region. A pinned buffer object has to be unpinned before
- * it can be pinned to another region. If the pl_flag argument is 0,
- * the buffer is pinned at its current location (video RAM or system
- * memory).
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
+ unsigned long pl_flag)
{
int i, ret;
struct ttm_operation_ctx ctx = { false, false };
- ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
- if (ret < 0)
- return ret;
-
if (gbo->pin_count)
goto out;
@@ -227,62 +226,123 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
- goto err_ttm_bo_unreserve;
+ return ret;
out:
++gbo->pin_count;
- ttm_bo_unreserve(&gbo->bo);
return 0;
-
-err_ttm_bo_unreserve:
- ttm_bo_unreserve(&gbo->bo);
- return ret;
}
-EXPORT_SYMBOL(drm_gem_vram_pin);
/**
- * drm_gem_vram_unpin() - Unpins a GEM VRAM object
+ * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
* @gbo: the GEM VRAM object
+ * @pl_flag: a bitmask of possible memory regions
+ *
+ * Pinning a buffer object ensures that it is not evicted from
+ * a memory region. A pinned buffer object has to be unpinned before
+ * it can be pinned to another region. If the pl_flag argument is 0,
+ * the buffer is pinned at its current location (video RAM or system
+ * memory).
+ *
+ * Small buffer objects, such as cursor images, can lead to memory
+ * fragmentation if they are pinned in the middle of video RAM. This
+ * is especially a problem on devices with only a small amount of
+ * video RAM. Fragmentation can prevent the primary framebuffer from
+ * fitting in, even though there's enough memory overall. The modifier
+ * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
+ * at the high end of the memory region to avoid fragmentation.
*
* Returns:
* 0 on success, or
* a negative error code otherwise.
*/
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
- int i, ret;
- struct ttm_operation_ctx ctx = { false, false };
+ int ret;
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
- if (ret < 0)
+ if (ret)
return ret;
+ ret = drm_gem_vram_pin_locked(gbo, pl_flag);
+ ttm_bo_unreserve(&gbo->bo);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_pin);
+
+static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
+{
+ int i, ret;
+ struct ttm_operation_ctx ctx = { false, false };
if (WARN_ON_ONCE(!gbo->pin_count))
- goto out;
+ return 0;
--gbo->pin_count;
if (gbo->pin_count)
- goto out;
+ return 0;
for (i = 0; i < gbo->placement.num_placement ; ++i)
gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
- goto err_ttm_bo_unreserve;
-
-out:
- ttm_bo_unreserve(&gbo->bo);
+ return ret;
return 0;
+}
-err_ttm_bo_unreserve:
+/**
+ * drm_gem_vram_unpin() - Unpins a GEM VRAM object
+ * @gbo: the GEM VRAM object
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret)
+ return ret;
+ ret = drm_gem_vram_unpin_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
+
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
+static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
+ bool map, bool *is_iomem)
+{
+ int ret;
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+
+ if (gbo->kmap_use_count > 0)
+ goto out;
+
+ if (kmap->virtual || !map)
+ goto out;
+
+ ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
+ if (ret)
+ return ERR_PTR(ret);
+
+out:
+ if (!kmap->virtual) {
+ if (is_iomem)
+ *is_iomem = false;
+ return NULL; /* not mapped; don't increment ref */
+ }
+ ++gbo->kmap_use_count;
+ if (is_iomem)
+ return ttm_kmap_obj_virtual(kmap, is_iomem);
+ return kmap->virtual;
+}
+
/**
* drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
* @gbo: the GEM VRAM object
@@ -304,43 +364,121 @@ void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
bool *is_iomem)
{
int ret;
- struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
-
- if (kmap->virtual || !map)
- goto out;
+ void *virtual;
- ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ERR_PTR(ret);
+ virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
+ ttm_bo_unreserve(&gbo->bo);
-out:
- if (!is_iomem)
- return kmap->virtual;
- if (!kmap->virtual) {
- *is_iomem = false;
- return NULL;
- }
- return ttm_kmap_obj_virtual(kmap, is_iomem);
+ return virtual;
}
EXPORT_SYMBOL(drm_gem_vram_kmap);
+static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
+{
+ if (WARN_ON_ONCE(!gbo->kmap_use_count))
+ return;
+ if (--gbo->kmap_use_count > 0)
+ return;
+
+ /*
+ * Permanently mapping and unmapping buffers adds overhead from
+ * updating the page tables and creates debugging output. Therefore,
+ * we delay the actual unmap operation until the BO gets evicted
+ * from memory. See drm_gem_vram_bo_driver_move_notify().
+ */
+}
+
/**
* drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
* @gbo: the GEM VRAM object
*/
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
{
- struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+ int ret;
- if (!kmap->virtual)
+ ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
+ if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
return;
-
- ttm_bo_kunmap(kmap);
- kmap->virtual = NULL;
+ drm_gem_vram_kunmap_locked(gbo);
+ ttm_bo_unreserve(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_kunmap);
/**
+ * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
+ * space
+ * @gbo: The GEM VRAM object to map
+ *
+ * The vmap function pins a GEM VRAM object to its current location, either
+ * system or video memory, and maps its buffer into kernel address space.
+ * As pinned object cannot be relocated, you should avoid pinning objects
+ * permanently. Call drm_gem_vram_vunmap() with the returned address to
+ * unmap and unpin the GEM VRAM object.
+ *
+ * If you have special requirements for the pinning or mapping operations,
+ * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly.
+ *
+ * Returns:
+ * The buffer's virtual address on success, or
+ * an ERR_PTR()-encoded error code otherwise.
+ */
+void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo)
+{
+ int ret;
+ void *base;
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_gem_vram_pin_locked(gbo, 0);
+ if (ret)
+ goto err_ttm_bo_unreserve;
+ base = drm_gem_vram_kmap_locked(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err_drm_gem_vram_unpin_locked;
+ }
+
+ ttm_bo_unreserve(&gbo->bo);
+
+ return base;
+
+err_drm_gem_vram_unpin_locked:
+ drm_gem_vram_unpin_locked(gbo);
+err_ttm_bo_unreserve:
+ ttm_bo_unreserve(&gbo->bo);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_vram_vmap);
+
+/**
+ * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
+ * @gbo: The GEM VRAM object to unmap
+ * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap()
+ *
+ * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
+ * the documentation for drm_gem_vram_vmap() for more information.
+ */
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
+ if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
+ return;
+
+ drm_gem_vram_kunmap_locked(gbo);
+ drm_gem_vram_unpin_locked(gbo);
+
+ ttm_bo_unreserve(&gbo->bo);
+}
+EXPORT_SYMBOL(drm_gem_vram_vunmap);
+
+/**
* drm_gem_vram_fill_create_dumb() - \
Helper for implementing &struct drm_driver.dumb_create
* @file: the DRM file
@@ -410,59 +548,27 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
return (bo->destroy == ttm_buffer_object_destroy);
}
-/**
- * drm_gem_vram_bo_driver_evict_flags() - \
- Implements &struct ttm_bo_driver.evict_flags
- * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
- * @pl: TTM placement information.
- */
-void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *pl)
+static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
+ struct ttm_placement *pl)
{
- struct drm_gem_vram_object *gbo;
-
- /* TTM may pass BOs that are not GEM VRAM BOs. */
- if (!drm_is_gem_vram(bo))
- return;
-
- gbo = drm_gem_vram_of_bo(bo);
drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
*pl = gbo->placement;
}
-EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags);
-/**
- * drm_gem_vram_bo_driver_verify_access() - \
- Implements &struct ttm_bo_driver.verify_access
- * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
- * @filp: File pointer.
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
+static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
+ bool evict,
+ struct ttm_mem_reg *new_mem)
{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
- return drm_vma_node_verify_access(&gbo->bo.base.vma_node,
- filp->private_data);
-}
-EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access);
+ if (WARN_ON_ONCE(gbo->kmap_use_count))
+ return;
-/*
- * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm
- *
- * Most users of @struct drm_gem_vram_object will also use
- * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs
- * can be used to connect both.
- */
-const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = {
- .evict_flags = drm_gem_vram_bo_driver_evict_flags,
- .verify_access = drm_gem_vram_bo_driver_verify_access
-};
-EXPORT_SYMBOL(drm_gem_vram_mm_funcs);
+ if (!kmap->virtual)
+ return;
+ ttm_bo_kunmap(kmap);
+ kmap->virtual = NULL;
+}
/*
* Helpers for struct drm_gem_object_funcs
@@ -544,6 +650,129 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
/*
+ * Helpers for struct drm_plane_helper_funcs
+ */
+
+/**
+ * drm_gem_vram_plane_helper_prepare_fb() - \
+ * Implements &struct drm_plane_helper_funcs.prepare_fb
+ * @plane: a DRM plane
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int
+drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
+ if (!new_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
+ if (ret)
+ goto err_drm_gem_vram_unpin;
+ }
+
+ return 0;
+
+err_drm_gem_vram_unpin:
+ while (i) {
+ --i;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
+
+/**
+ * drm_gem_vram_plane_helper_cleanup_fb() - \
+ * Implements &struct drm_plane_helper_funcs.cleanup_fb
+ * @plane: a DRM plane
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_plane_helper_prepare_fb().
+ */
+void
+drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ if (!old_state->fb)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
+ if (!old_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
+
+/*
+ * Helpers for struct drm_simple_display_pipe_funcs
+ */
+
+/**
+ * drm_gem_vram_simple_display_pipe_prepare_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.prepare_fb
+ * @pipe: a simple display pipe
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int drm_gem_vram_simple_display_pipe_prepare_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state)
+{
+ return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
+
+/**
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
+ * @pipe: a simple display pipe
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_simple_display_pipe_prepare_fb().
+ */
+void drm_gem_vram_simple_display_pipe_cleanup_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
+
+/*
* PRIME helpers
*/
@@ -595,17 +824,11 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- int ret;
void *base;
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret)
- return NULL;
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- drm_gem_vram_unpin(gbo);
+ base = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(base))
return NULL;
- }
return base;
}
@@ -620,8 +843,7 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(gbo, vaddr);
}
/*
@@ -633,5 +855,278 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.pin = drm_gem_vram_object_pin,
.unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
- .vunmap = drm_gem_vram_object_vunmap
+ .vunmap = drm_gem_vram_object_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .print_info = drm_gem_ttm_print_info,
+};
+
+/*
+ * VRAM memory manager
+ */
+
+/*
+ * TTM TT
+ */
+
+static void backend_func_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func backend_func = {
+ .destroy = backend_func_destroy
+};
+
+/*
+ * TTM BO device
+ */
+
+static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_tt *tt;
+ int ret;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->func = &backend_func;
+
+ ret = ttm_tt_init(tt, bo, page_flags);
+ if (ret < 0)
+ goto err_ttm_tt_init;
+
+ return tt;
+
+err_ttm_tt_init:
+ kfree(tt);
+ return NULL;
+}
+
+static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct drm_gem_vram_object *gbo;
+
+ /* TTM may pass BOs that are not GEM VRAM BOs. */
+ if (!drm_is_gem_vram(bo))
+ return;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ drm_gem_vram_bo_driver_evict_flags(gbo, placement);
+}
+
+static void bo_driver_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_mem_reg *new_mem)
+{
+ struct drm_gem_vram_object *gbo;
+
+ /* TTM may pass BOs that are not GEM VRAM BOs. */
+ if (!drm_is_gem_vram(bo))
+ return;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+}
+
+static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
+ struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+
+ mem->bus.addr = NULL;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM: /* nothing to do */
+ mem->bus.offset = 0;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = vmm->vram_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{ }
+
+static struct ttm_bo_driver bo_driver = {
+ .ttm_tt_create = bo_driver_ttm_tt_create,
+ .ttm_tt_populate = ttm_pool_populate,
+ .ttm_tt_unpopulate = ttm_pool_unpopulate,
+ .init_mem_type = bo_driver_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = bo_driver_evict_flags,
+ .move_notify = bo_driver_move_notify,
+ .io_mem_reserve = bo_driver_io_mem_reserve,
+ .io_mem_free = bo_driver_io_mem_free,
+};
+
+/*
+ * struct drm_vram_mm
+ */
+
+#if defined(CONFIG_DEBUG_FS)
+static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
+ struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+ drm_mm_print(mm, &p);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+ return 0;
+}
+
+static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
+ { "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
+#endif
+
+/**
+ * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
+ *
+ * @minor: drm minor device.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_vram_mm_debugfs_init(struct drm_minor *minor)
+{
+ int ret = 0;
+
+#if defined(CONFIG_DEBUG_FS)
+ ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list),
+ minor->debugfs_root, minor);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
+
+static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
+ uint64_t vram_base, size_t vram_size)
+{
+ int ret;
+
+ vmm->vram_base = vram_base;
+ vmm->vram_size = vram_size;
+
+ ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
+ dev->anon_inode->i_mapping,
+ dev->vma_offset_manager,
+ true);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
+{
+ ttm_bo_device_release(&vmm->bdev);
+}
+
+/*
+ * Helpers for integration with struct drm_device
+ */
+
+/**
+ * drm_vram_helper_alloc_mm - Allocates a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ * @vram_base: the base address of the video memory
+ * @vram_size: the size of the video memory in bytes
+ *
+ * Returns:
+ * The new instance of &struct drm_vram_mm on success, or
+ * an ERR_PTR()-encoded errno code otherwise.
+ */
+struct drm_vram_mm *drm_vram_helper_alloc_mm(
+ struct drm_device *dev, uint64_t vram_base, size_t vram_size)
+{
+ int ret;
+
+ if (WARN_ON(dev->vram_mm))
+ return dev->vram_mm;
+
+ dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
+ if (!dev->vram_mm)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
+ if (ret)
+ goto err_kfree;
+
+ return dev->vram_mm;
+
+err_kfree:
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
+
+/**
+ * drm_vram_helper_release_mm - Releases a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ */
+void drm_vram_helper_release_mm(struct drm_device *dev)
+{
+ if (!dev->vram_mm)
+ return;
+
+ drm_vram_mm_cleanup(dev->vram_mm);
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
+}
+EXPORT_SYMBOL(drm_vram_helper_release_mm);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 0bec6dbb0142..fbea69d6f909 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -40,6 +40,7 @@
#include <xen/xen.h>
#include <drm/drm_agpsupport.h>
+#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 1961f713aaab..e34058c721be 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -783,7 +783,7 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *dbi, int dc,
int i, ret;
u8 *dst;
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
@@ -907,7 +907,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
max_chunk = dbi->tx_buf9_len;
dst16 = dbi->tx_buf9;
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
@@ -955,7 +955,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd,
int ret;
if (mipi_dbi_command_is_read(dbi, *cmd))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
@@ -1021,7 +1021,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd,
unsigned int i;
for (i = 0; i < len; i++)
- data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
+ data[i] = (buf[i] << 1) | (buf[i + 1] >> 7);
}
MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
@@ -1187,8 +1187,7 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
struct mipi_dbi_dev *dbidev = m->private;
u8 val, cmd = 0, parameters[64];
char *buf, *pos, *token;
- unsigned int i;
- int ret, idx;
+ int i, ret, idx;
if (!drm_dev_enter(&dbidev->drm, &idx))
return -ENODEV;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 4581c5387372..2a6e34663146 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -174,7 +174,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
node->__subtree_last = LAST(node);
- if (hole_node->allocated) {
+ if (drm_mm_node_allocated(hole_node)) {
rb = &hole_node->rb;
while (rb) {
parent = rb_entry(rb, struct drm_mm_node, rb);
@@ -424,9 +424,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->mm = mm;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
node->hole_size = 0;
rm_hole(hole);
@@ -543,9 +543,9 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
node->color = color;
node->hole_size = 0;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
rm_hole(hole);
if (adj_start > hole_start)
@@ -561,6 +561,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
+static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
+{
+ return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+}
+
/**
* drm_mm_remove_node - Remove a memory node from the allocator.
* @node: drm_mm_node to remove
@@ -574,8 +579,8 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
prev_node = list_prev_entry(node, node_list);
@@ -584,11 +589,12 @@ void drm_mm_remove_node(struct drm_mm_node *node)
drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
- node->allocated = false;
if (drm_mm_hole_follows(prev_node))
rm_hole(prev_node);
add_hole(prev_node);
+
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
EXPORT_SYMBOL(drm_mm_remove_node);
@@ -605,10 +611,11 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
struct drm_mm *mm = old->mm;
- DRM_MM_BUG_ON(!old->allocated);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
*new = *old;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
list_replace(&old->node_list, &new->node_list);
rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
@@ -622,8 +629,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
&mm->holes_addr);
}
- old->allocated = false;
- new->allocated = true;
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
}
EXPORT_SYMBOL(drm_mm_replace_node);
@@ -731,9 +737,9 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
u64 adj_start, adj_end;
DRM_MM_BUG_ON(node->mm != mm);
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
- node->scanned_block = true;
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
+ __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
mm->scan_active++;
/* Remove this block from the node_list so that we enlarge the hole
@@ -818,8 +824,8 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(node->mm != scan->mm);
- DRM_MM_BUG_ON(!node->scanned_block);
- node->scanned_block = false;
+ DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
+ __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
DRM_MM_BUG_ON(!node->mm->scan_active);
node->mm->scan_active--;
@@ -917,7 +923,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- mm->head_node.allocated = false;
+ mm->head_node.flags = 0;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
mm->head_node.size = -size;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 7bc03c3c154f..3b570a404933 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -428,8 +428,6 @@ EXPORT_SYMBOL(drm_mode_config_init);
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 43d89dd59c6b..0ca58803ba46 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -250,11 +250,6 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (!remote)
return -ENODEV;
- if (!of_device_is_available(remote)) {
- of_node_put(remote);
- return -ENODEV;
- }
-
if (panel) {
*panel = of_drm_find_panel(remote);
if (!IS_ERR(*panel))
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 6b0bf42039cf..ed7985c0535a 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -44,13 +44,21 @@ static LIST_HEAD(panel_list);
/**
* drm_panel_init - initialize a panel
* @panel: DRM panel
+ * @dev: parent device of the panel
+ * @funcs: panel operations
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
*
- * Sets up internal fields of the panel so that it can subsequently be added
- * to the registry.
+ * Initialize the panel structure for subsequent registration with
+ * drm_panel_add().
*/
-void drm_panel_init(struct drm_panel *panel)
+void drm_panel_init(struct drm_panel *panel, struct device *dev,
+ const struct drm_panel_funcs *funcs, int connector_type)
{
INIT_LIST_HEAD(&panel->list);
+ panel->dev = dev;
+ panel->funcs = funcs;
+ panel->connector_type = connector_type;
}
EXPORT_SYMBOL(drm_panel_init);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 0a2316e0e812..0814211b0f3f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -713,6 +713,15 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct file *fil;
int ret;
+ if (obj->funcs && obj->funcs->mmap) {
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret)
+ return ret;
+ vma->vm_private_data = obj;
+ drm_gem_object_get(obj);
+ return 0;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
fil = kzalloc(sizeof(*fil), GFP_KERNEL);
if (!priv || !fil) {
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index a17c8a14dba4..9a25d73c155c 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -28,6 +28,7 @@
#include <stdarg.h>
#include <linux/io.h>
+#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -35,6 +36,24 @@
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
+/*
+ * drm_debug: Enable debug output.
+ * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
+ */
+unsigned int drm_debug;
+EXPORT_SYMBOL(drm_debug);
+
+MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
+"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
+"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
+module_param_named(debug, drm_debug, int, 0600);
+
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
struct drm_print_iterator *iterator = p->arg;
@@ -147,6 +166,12 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
}
EXPORT_SYMBOL(__drm_printfn_debug);
+void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ pr_err("*ERROR* %s %pV", p->prefix, vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_err);
+
/**
* drm_puts - print a const string to a &drm_printer stream
* @p: the &drm printer
@@ -179,6 +204,37 @@ void drm_printf(struct drm_printer *p, const char *f, ...)
}
EXPORT_SYMBOL(drm_printf);
+/**
+ * drm_print_bits - print bits to a &drm_printer stream
+ *
+ * Print bits (in flag fields for example) in human readable form.
+ *
+ * @p: the &drm_printer
+ * @value: field value.
+ * @bits: Array with bit names.
+ * @nbits: Size of bit names array.
+ */
+void drm_print_bits(struct drm_printer *p, unsigned long value,
+ const char * const bits[], unsigned int nbits)
+{
+ bool first = true;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(nbits > BITS_PER_TYPE(value)))
+ nbits = BITS_PER_TYPE(value);
+
+ for_each_set_bit(i, &value, nbits) {
+ if (WARN_ON_ONCE(!bits[i]))
+ continue;
+ drm_printf(p, "%s%s", first ? "" : ",",
+ bits[i]);
+ first = false;
+ }
+ if (first)
+ drm_printf(p, "(none)");
+}
+EXPORT_SYMBOL(drm_print_bits);
+
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...)
{
@@ -206,7 +262,7 @@ void drm_dev_dbg(const struct device *dev, unsigned int category,
struct va_format vaf;
va_list args;
- if (!(drm_debug & category))
+ if (!drm_debug_enabled(category))
return;
va_start(args, format);
@@ -229,7 +285,7 @@ void drm_dbg(unsigned int category, const char *format, ...)
struct va_format vaf;
va_list args;
- if (!(drm_debug & category))
+ if (!drm_debug_enabled(category))
return;
va_start(args, format);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ef2c468205a2..a7c87abe88d0 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -32,6 +32,7 @@
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -92,7 +93,6 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
struct drm_device *dev = connector->dev;
enum drm_mode_status ret = MODE_OK;
struct drm_encoder *encoder;
- int i;
/* Step 1: Validate against connector */
ret = drm_connector_mode_valid(connector, mode);
@@ -100,7 +100,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
return ret;
/* Step 2: Validate against encoders and crtcs */
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
struct drm_crtc *crtc;
ret = drm_encoder_mode_valid(encoder, mode);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index b11910f14c46..15fb516ae2d8 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -42,7 +43,7 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
/* Anything goes */
return MODE_OK;
- return pipe->funcs->mode_valid(crtc, mode);
+ return pipe->funcs->mode_valid(pipe, mode);
}
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4b5c7b0ed714..669c93fe2500 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -135,6 +135,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
#include "drm_internal.h"
@@ -1279,7 +1280,7 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags != 0)
return -EINVAL;
if (args->count_handles == 0)
@@ -1350,7 +1351,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
return -EINVAL;
if (args->count_handles == 0)
@@ -1371,25 +1372,32 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
fence = drm_syncobj_fence_get(syncobjs[i]);
chain = to_dma_fence_chain(fence);
if (chain) {
- struct dma_fence *iter, *last_signaled = NULL;
-
- dma_fence_chain_for_each(iter, fence) {
- if (iter->context != fence->context) {
- dma_fence_put(iter);
- /* It is most likely that timeline has
- * unorder points. */
- break;
+ struct dma_fence *iter, *last_signaled =
+ dma_fence_get(fence);
+
+ if (args->flags &
+ DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
+ point = fence->seqno;
+ } else {
+ dma_fence_chain_for_each(iter, fence) {
+ if (iter->context != fence->context) {
+ dma_fence_put(iter);
+ /* It is most likely that timeline has
+ * unorder points. */
+ break;
+ }
+ dma_fence_put(last_signaled);
+ last_signaled = dma_fence_get(iter);
}
- dma_fence_put(last_signaled);
- last_signaled = dma_fence_get(iter);
+ point = dma_fence_is_signaled(last_signaled) ?
+ last_signaled->seqno :
+ to_dma_fence_chain(last_signaled)->prev_seqno;
}
- point = dma_fence_is_signaled(last_signaled) ?
- last_signaled->seqno :
- to_dma_fence_chain(last_signaled)->prev_seqno;
dma_fence_put(last_signaled);
} else {
point = 0;
}
+ dma_fence_put(fence);
ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
ret = ret ? -EFAULT : 0;
if (ret)
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index 471eb927474b..11c6dd577e8e 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -13,17 +13,23 @@ struct drm_file;
#define TRACE_INCLUDE_FILE drm_trace
TRACE_EVENT(drm_vblank_event,
- TP_PROTO(int crtc, unsigned int seq),
- TP_ARGS(crtc, seq),
+ TP_PROTO(int crtc, unsigned int seq, ktime_t time, bool high_prec),
+ TP_ARGS(crtc, seq, time, high_prec),
TP_STRUCT__entry(
__field(int, crtc)
__field(unsigned int, seq)
+ __field(ktime_t, time)
+ __field(bool, high_prec)
),
TP_fast_assign(
__entry->crtc = crtc;
__entry->seq = seq;
- ),
- TP_printk("crtc=%d, seq=%u", __entry->crtc, __entry->seq)
+ __entry->time = time;
+ __entry->high_prec = high_prec;
+ ),
+ TP_printk("crtc=%d, seq=%u, time=%lld, high-prec=%s",
+ __entry->crtc, __entry->seq, __entry->time,
+ __entry->high_prec ? "true" : "false")
);
TRACE_EVENT(drm_vblank_event_queued,
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index fd1fbc77871f..1659b13b178c 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -106,7 +106,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
write_seqlock(&vblank->seqlock);
vblank->time = t_vblank;
- vblank->count += vblank_count_inc;
+ atomic64_add(vblank_count_inc, &vblank->count);
write_sequnlock(&vblank->seqlock);
}
@@ -272,7 +272,8 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%llu, diff=%u, hw=%u hw_last=%u\n",
- pipe, vblank->count, diff, cur_vblank, vblank->last);
+ pipe, atomic64_read(&vblank->count), diff,
+ cur_vblank, vblank->last);
if (diff == 0) {
WARN_ON_ONCE(cur_vblank != vblank->last);
@@ -294,11 +295,23 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ u64 count;
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
- return vblank->count;
+ count = atomic64_read(&vblank->count);
+
+ /*
+ * This read barrier corresponds to the implicit write barrier of the
+ * write seqlock in store_vblank(). Note that this is the only place
+ * where we need an explicit barrier, since all other access goes
+ * through drm_vblank_count_and_time(), which already has the required
+ * read barrier curtesy of the read seqlock.
+ */
+ smp_rmb();
+
+ return count;
}
/**
@@ -319,7 +332,7 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u64 vblank;
unsigned long flags;
- WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -693,7 +706,7 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
*/
*vblank_time = ktime_sub_ns(etime, delta_ns);
- if ((drm_debug & DRM_UT_VBL) == 0)
+ if (!drm_debug_enabled(DRM_UT_VBL))
return true;
ts_etime = ktime_to_timespec64(etime);
@@ -763,6 +776,14 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
* vblank interrupt (since it only reports the software vblank counter), see
* drm_crtc_accurate_vblank_count() for such use-cases.
*
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
+ *
* Returns:
* The software vblank counter.
*/
@@ -800,7 +821,7 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
do {
seq = read_seqbegin(&vblank->seqlock);
- vblank_count = vblank->count;
+ vblank_count = atomic64_read(&vblank->count);
*vblanktime = vblank->time;
} while (read_seqretry(&vblank->seqlock, seq));
@@ -817,6 +838,14 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
+ *
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
*/
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime)
@@ -1323,7 +1352,7 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
assert_spin_locked(&dev->vblank_time_lock);
vblank = &dev->vblank[pipe];
- WARN_ONCE((drm_debug & DRM_UT_VBL) && !vblank->framedur_ns,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns,
"Cannot compute missed vblanks without frame duration\n");
framedur_ns = vblank->framedur_ns;
@@ -1581,7 +1610,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
unsigned int flags, pipe, high_pipe;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
@@ -1731,7 +1760,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
send_vblank_event(dev, e, seq, now);
}
- trace_drm_vblank_event(pipe, seq);
+ trace_drm_vblank_event(pipe, seq, now,
+ dev->driver->get_vblank_timestamp != NULL);
}
/**
@@ -1806,6 +1836,14 @@ EXPORT_SYMBOL(drm_handle_vblank);
*
* This is the native KMS version of drm_handle_vblank().
*
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
+ *
* Returns:
* True if the event was successfully handled, false on failure.
*/
@@ -1838,7 +1876,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
if (!crtc)
@@ -1896,7 +1934,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
if (!crtc)
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
index e9c9f9a80ba3..2000d9b33fd5 100644
--- a/drivers/gpu/drm/drm_vram_helper_common.c
+++ b/drivers/gpu/drm/drm_vram_helper_common.c
@@ -7,9 +7,8 @@
*
* This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
* buffer object that is backed by video RAM. It can be used for
- * framebuffer devices with dedicated memory. The video RAM can be
- * managed with &struct drm_vram_mm (VRAM MM). Both data structures are
- * supposed to be used together, but can also be used individually.
+ * framebuffer devices with dedicated memory. The video RAM is managed
+ * by &struct drm_vram_mm (VRAM MM).
*
* With the GEM interface userspace applications create, manage and destroy
* graphics buffers, such as an on-screen framebuffer. GEM does not provide
@@ -50,8 +49,7 @@
* // setup device, vram base and size
* // ...
*
- * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size,
- * &drm_gem_vram_mm_funcs);
+ * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
* if (ret)
* return ret;
* return 0;
diff --git a/drivers/gpu/drm/drm_vram_mm_helper.c b/drivers/gpu/drm/drm_vram_mm_helper.c
deleted file mode 100644
index c911781d6728..000000000000
--- a/drivers/gpu/drm/drm_vram_mm_helper.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/drm_vram_mm_helper.h>
-
-#include <drm/ttm/ttm_page_alloc.h>
-
-/**
- * DOC: overview
- *
- * The data structure &struct drm_vram_mm and its helpers implement a memory
- * manager for simple framebuffer devices with dedicated video memory. Buffer
- * objects are either placed in video RAM or evicted to system memory. These
- * helper functions work well with &struct drm_gem_vram_object.
- */
-
-/*
- * TTM TT
- */
-
-static void backend_func_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func backend_func = {
- .destroy = backend_func_destroy
-};
-
-/*
- * TTM BO device
- */
-
-static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct ttm_tt *tt;
- int ret;
-
- tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- if (!tt)
- return NULL;
-
- tt->func = &backend_func;
-
- ret = ttm_tt_init(tt, bo, page_flags);
- if (ret < 0)
- goto err_ttm_tt_init;
-
- return tt;
-
-err_ttm_tt_init:
- kfree(tt);
- return NULL;
-}
-
-static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
-
- if (vmm->funcs && vmm->funcs->evict_flags)
- vmm->funcs->evict_flags(bo, placement);
-}
-
-static int bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
-
- if (!vmm->funcs || !vmm->funcs->verify_access)
- return 0;
- return vmm->funcs->verify_access(bo, filp);
-}
-
-static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
-
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
-
- mem->bus.addr = NULL;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
-
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM: /* nothing to do */
- mem->bus.offset = 0;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- break;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = vmm->vram_base;
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{ }
-
-static struct ttm_bo_driver bo_driver = {
- .ttm_tt_create = bo_driver_ttm_tt_create,
- .ttm_tt_populate = ttm_pool_populate,
- .ttm_tt_unpopulate = ttm_pool_unpopulate,
- .init_mem_type = bo_driver_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = bo_driver_evict_flags,
- .verify_access = bo_driver_verify_access,
- .io_mem_reserve = bo_driver_io_mem_reserve,
- .io_mem_free = bo_driver_io_mem_free,
-};
-
-/*
- * struct drm_vram_mm
- */
-
-/**
- * drm_vram_mm_init() - Initialize an instance of VRAM MM.
- * @vmm: the VRAM MM instance to initialize
- * @dev: the DRM device
- * @vram_base: the base address of the video memory
- * @vram_size: the size of the video memory in bytes
- * @funcs: callback functions for buffer objects
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
- uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs)
-{
- int ret;
-
- vmm->vram_base = vram_base;
- vmm->vram_size = vram_size;
- vmm->funcs = funcs;
-
- ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
- dev->anon_inode->i_mapping,
- true);
- if (ret)
- return ret;
-
- ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
- if (ret)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_vram_mm_init);
-
-/**
- * drm_vram_mm_cleanup() - Cleans up an initialized instance of VRAM MM.
- * @vmm: the VRAM MM instance to clean up
- */
-void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
-{
- ttm_bo_device_release(&vmm->bdev);
-}
-EXPORT_SYMBOL(drm_vram_mm_cleanup);
-
-/**
- * drm_vram_mm_mmap() - Helper for implementing &struct file_operations.mmap()
- * @filp: the mapping's file structure
- * @vma: the mapping's memory area
- * @vmm: the VRAM MM instance
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
- struct drm_vram_mm *vmm)
-{
- return ttm_bo_mmap(filp, vma, &vmm->bdev);
-}
-EXPORT_SYMBOL(drm_vram_mm_mmap);
-
-/*
- * Helpers for integration with struct drm_device
- */
-
-/**
- * drm_vram_helper_alloc_mm - Allocates a device's instance of \
- &struct drm_vram_mm
- * @dev: the DRM device
- * @vram_base: the base address of the video memory
- * @vram_size: the size of the video memory in bytes
- * @funcs: callback functions for buffer objects
- *
- * Returns:
- * The new instance of &struct drm_vram_mm on success, or
- * an ERR_PTR()-encoded errno code otherwise.
- */
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs)
-{
- int ret;
-
- if (WARN_ON(dev->vram_mm))
- return dev->vram_mm;
-
- dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
- if (!dev->vram_mm)
- return ERR_PTR(-ENOMEM);
-
- ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size, funcs);
- if (ret)
- goto err_kfree;
-
- return dev->vram_mm;
-
-err_kfree:
- kfree(dev->vram_mm);
- dev->vram_mm = NULL;
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
-
-/**
- * drm_vram_helper_release_mm - Releases a device's instance of \
- &struct drm_vram_mm
- * @dev: the DRM device
- */
-void drm_vram_helper_release_mm(struct drm_device *dev)
-{
- if (!dev->vram_mm)
- return;
-
- drm_vram_mm_cleanup(dev->vram_mm);
- kfree(dev->vram_mm);
- dev->vram_mm = NULL;
-}
-EXPORT_SYMBOL(drm_vram_helper_release_mm);
-
-/*
- * Helpers for &struct file_operations
- */
-
-/**
- * drm_vram_mm_file_operations_mmap() - \
- Implements &struct file_operations.mmap()
- * @filp: the mapping's file structure
- * @vma: the mapping's memory area
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_file_operations_mmap(
- struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
-
- if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
- return -EINVAL;
-
- return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
-}
-EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 7e4e2959bf4f..32d9fac587f9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -326,7 +326,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
lockdep_assert_held(&gpu->lock);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
link_target = etnaviv_cmdbuf_get_va(cmdbuf,
@@ -459,13 +459,13 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
return_target,
etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
cmdbuf->vaddr);
- if (drm_debug & DRM_UT_DRIVER) {
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
cmdbuf->vaddr, cmdbuf->size, 0);
@@ -484,6 +484,6 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
link_target);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 3a0f0ba8c63a..1e6aa24bf45e 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -19,6 +19,7 @@
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6926cee91b36..72726f2c7a9f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -24,6 +24,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index b78e8c5ba553..f41d75923557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -21,6 +21,7 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index bc1565f1822a..48159d5d2214 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <media/cec-notifier.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -852,6 +853,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
static void hdmi_connector_destroy(struct drm_connector *connector)
{
+ struct hdmi_context *hdata = connector_to_hdmi(connector);
+
+ cec_notifier_conn_unregister(hdata->notifier);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -935,6 +940,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct drm_connector *connector = &hdata->connector;
+ struct cec_connector_info conn_info;
int ret;
connector->interlace_allowed = true;
@@ -957,6 +963,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ hdata->notifier = cec_notifier_conn_register(hdata->dev, NULL,
+ &conn_info);
+ if (!hdata->notifier) {
+ ret = -ENOMEM;
+ DRM_DEV_ERROR(hdata->dev, "Failed to allocate CEC notifier\n");
+ }
+
return ret;
}
@@ -1528,8 +1543,8 @@ static void hdmi_disable(struct drm_encoder *encoder)
*/
mutex_unlock(&hdata->mutex);
cancel_delayed_work(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier,
- CEC_PHYS_ADDR_INVALID);
+ if (hdata->notifier)
+ cec_notifier_phys_addr_invalidate(hdata->notifier);
return;
}
@@ -2006,12 +2021,6 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
- hdata->notifier = cec_notifier_get(&pdev->dev);
- if (hdata->notifier == NULL) {
- ret = -ENOMEM;
- goto err_hdmiphy;
- }
-
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
@@ -2023,7 +2032,7 @@ static int hdmi_probe(struct platform_device *pdev)
ret = hdmi_register_audio_device(hdata);
if (ret)
- goto err_notifier_put;
+ goto err_rpm_disable;
ret = component_add(&pdev->dev, &hdmi_component_ops);
if (ret)
@@ -2034,8 +2043,7 @@ static int hdmi_probe(struct platform_device *pdev)
err_unregister_audio:
platform_device_unregister(hdata->audio.pdev);
-err_notifier_put:
- cec_notifier_put(hdata->notifier);
+err_rpm_disable:
pm_runtime_disable(dev);
err_hdmiphy:
@@ -2054,12 +2062,10 @@ static int hdmi_remove(struct platform_device *pdev)
struct hdmi_context *hdata = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
component_del(&pdev->dev, &hdmi_component_ops);
platform_device_unregister(hdata->audio.pdev);
- cec_notifier_put(hdata->notifier);
pm_runtime_disable(&pdev->dev);
if (!IS_ERR(hdata->reg_hdmi_en))
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7b24338fad3c..6cfdb95fef2f 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1069,9 +1069,9 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
struct mixer_context *ctx = crtc->ctx;
int width = mode->hdisplay, height = mode->vdisplay, i;
- struct {
+ static const struct {
int hdisplay, vdisplay, htotal, vtotal, scan_val;
- } static const modes[] = {
+ } modes[] = {
{ 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD },
{ 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD },
{ 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD },
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index a92fd6c70b09..82c972e9c024 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -9,6 +9,7 @@
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index f56852a503e8..8b784947ed3b 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock;
+ memset(&clock, 0, sizeof(clock));
+
switch (refclk) {
case 27000:
if (target < 200000) {
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 03023fa0fb6f..f350ac1ead18 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -498,7 +498,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
return;
}
- /*create a new connetor*/
+ /*create a new connector*/
dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
if (!dsi_connector) {
DRM_ERROR("No memory");
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 167c10767dd4..900e5499249d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -129,6 +129,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
s32 freq_error, min_error = 100000;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max;
@@ -185,6 +186,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 35a3c5f0c38c..dfc5aef62f7b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -4,7 +4,8 @@ config DRM_HISI_HIBMC
depends on DRM && PCI && MMU && ARM64
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
-
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Choose this option if you have a Hisilicon Hibmc soc chipset.
If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index cc4c41748cfb..6527a97f68a3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -96,7 +96,6 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *state = plane->state;
u32 reg;
- int ret;
s64 gpu_addr = 0;
unsigned int line_l;
struct hibmc_drm_private *priv = plane->dev->dev_private;
@@ -109,16 +108,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
hibmc_fb = to_hibmc_framebuffer(state->fb);
gbo = drm_gem_vram_of_gem(hibmc_fb->obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- DRM_ERROR("failed to pin bo: %d", ret);
- return;
- }
gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- drm_gem_vram_unpin(gbo);
- return;
- }
+ if (WARN_ON_ONCE(gpu_addr < 0))
+ return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
@@ -157,6 +149,8 @@ static struct drm_plane_funcs hibmc_plane_funcs = {
};
static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
.atomic_check = hibmc_plane_atomic_check,
.atomic_update = hibmc_plane_atomic_update,
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index c103005b0a33..2fd4ca91a62d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -22,15 +22,11 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
-#include <drm/drm_vram_mm_helper.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-static const struct file_operations hibmc_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
{
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 9f6e473e6295..21b684eab5c9 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -17,7 +17,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_print.h>
-#include <drm/drm_vram_mm_helper.h>
#include "hibmc_drm_drv.h"
@@ -29,7 +28,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
vmm = drm_vram_helper_alloc_mm(dev,
pci_resource_start(dev->pdev, 0),
- hibmc->fb_size, &drm_gem_vram_mm_funcs);
+ hibmc->fb_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5bf8138941de..bdcf9c6ae9e9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 8bcf0d199145..a839f78a4c8a 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -44,7 +44,7 @@ struct sil164_priv {
((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
#define sil164_dbg(client, format, ...) do { \
- if (drm_debug & DRM_UT_KMS) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
dev_printk(KERN_DEBUG, &client->dev, \
"%s: " format, __func__, ## __VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 8039fc0d83db..5b03fdd1eaa4 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -420,7 +420,8 @@ static int tda9950_probe(struct i2c_client *client,
priv->hdmi = glue->parent;
priv->adap = cec_allocate_adapter(&tda9950_cec_ops, priv, "tda9950",
- CEC_CAP_DEFAULTS,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(priv->adap))
return PTR_ERR(priv->adap);
@@ -457,13 +458,14 @@ static int tda9950_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- priv->notify = cec_notifier_get(priv->hdmi);
+ priv->notify = cec_notifier_cec_adap_register(priv->hdmi, NULL,
+ priv->adap);
if (!priv->notify)
return -ENOMEM;
ret = cec_register_adapter(priv->adap, priv->hdmi);
if (ret < 0) {
- cec_notifier_put(priv->notify);
+ cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
return ret;
}
@@ -473,8 +475,6 @@ static int tda9950_probe(struct i2c_client *client,
*/
devm_remove_action(dev, tda9950_cec_del, priv);
- cec_register_cec_notifier(priv->adap, priv->notify);
-
return 0;
}
@@ -482,8 +482,8 @@ static int tda9950_remove(struct i2c_client *client)
{
struct tda9950_priv *priv = i2c_get_clientdata(client);
+ cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
cec_unregister_adapter(priv->adap);
- cec_notifier_put(priv->notify);
return 0;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 84c6d4c91c65..a63790d32d75 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -14,6 +14,7 @@
#include <sound/hdmi-codec.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -805,8 +806,8 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
tda998x_edid_delay_start(priv);
} else {
schedule_work(&priv->detect_work);
- cec_notifier_set_phys_addr(priv->cec_notify,
- CEC_PHYS_ADDR_INVALID);
+ cec_notifier_phys_addr_invalidate(
+ priv->cec_notify);
}
handled = true;
@@ -1790,8 +1791,7 @@ static void tda998x_destroy(struct device *dev)
i2c_unregister_device(priv->cec);
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
+ cec_notifier_conn_unregister(priv->cec_notify);
}
static int tda998x_create(struct device *dev)
@@ -1916,7 +1916,7 @@ static int tda998x_create(struct device *dev)
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
- priv->cec_notify = cec_notifier_get(dev);
+ priv->cec_notify = cec_notifier_conn_register(dev, NULL, NULL);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 2a77823b8e9a..e66c38332df4 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -728,7 +728,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev,
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
if (sarea_priv->dirty)
@@ -1048,7 +1048,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in
if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
sarea_priv->dirty = 0x7f;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 0d21402945ab..ba9595960bbe 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -76,7 +76,7 @@ config DRM_I915_CAPTURE_ERROR
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
Please report any hang to
- https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+ https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
for triaging.
If in doubt, say "Y".
@@ -105,11 +105,11 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
config DRM_I915_GVT
- bool "Enable Intel GVT-g graphics virtualization host support"
- depends on DRM_I915
- depends on 64BIT
- default n
- help
+ bool "Enable Intel GVT-g graphics virtualization host support"
+ depends on DRM_I915
+ depends on 64BIT
+ default n
+ help
Choose this option if you want to enable Intel GVT-g graphics
virtualization technology host support with integrated graphics.
With GVT-g, it's possible to have one integrated graphics
@@ -148,3 +148,9 @@ menu "drm/i915 Profile Guided Optimisation"
depends on DRM_I915
source "drivers/gpu/drm/i915/Kconfig.profile"
endmenu
+
+menu "drm/i915 Unstable Evolution"
+ visible if EXPERT && STAGING && BROKEN
+ depends on DRM_I915
+ source "drivers/gpu/drm/i915/Kconfig.unstable"
+endmenu
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 1400fce39c58..438040ff0179 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -1,33 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_I915_WERROR
- bool "Force GCC to throw an error instead of a warning when compiling"
- # As this may inadvertently break the build, only allow the user
- # to shoot oneself in the foot iff they aim really hard
- depends on EXPERT
- # We use the dependency on !COMPILE_TEST to not be enabled in
- # allmodconfig or allyesconfig configurations
- depends on !COMPILE_TEST
- select HEADER_TEST
- default n
- help
- Add -Werror to the build flags for (and only for) i915.ko.
- Do not enable this unless you are writing code for the i915.ko module.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
+ bool "Force GCC to throw an error instead of a warning when compiling"
+ # As this may inadvertently break the build, only allow the user
+ # to shoot oneself in the foot iff they aim really hard
+ depends on EXPERT
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Add -Werror to the build flags for (and only for) i915.ko.
+ Do not enable this unless you are writing code for the i915.ko module.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
config DRM_I915_DEBUG
- bool "Enable additional driver debugging"
- depends on DRM_I915
- select DEBUG_FS
- select PREEMPT_COUNT
- select I2C_CHARDEV
- select STACKDEPOT
- select DRM_DP_AUX_CHARDEV
- select X86_MSR # used by igt/pm_rpm
- select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
- select DRM_DEBUG_MM if DRM=y
+ bool "Enable additional driver debugging"
+ depends on DRM_I915
+ select DEBUG_FS
+ select PREEMPT_COUNT
+ select I2C_CHARDEV
+ select STACKDEPOT
+ select DRM_DP_AUX_CHARDEV
+ select X86_MSR # used by igt/pm_rpm
+ select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
+ select DRM_DEBUG_MM if DRM=y
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
@@ -35,14 +34,14 @@ config DRM_I915_DEBUG
select DRM_I915_SELFTEST
select DRM_I915_DEBUG_RUNTIME_PM
select DRM_I915_DEBUG_MMIO
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_DEBUG_MMIO
bool "Always insert extra checks around mmio access by default"
@@ -58,16 +57,16 @@ config DRM_I915_DEBUG_MMIO
If in doubt, say "N".
config DRM_I915_DEBUG_GEM
- bool "Insert extra checks into the GEM internals"
- default n
- depends on DRM_I915_WERROR
- help
- Enable extra sanity checks (including BUGs) along the GEM driver
- paths that may slow the system down and if hit hang the machine.
+ bool "Insert extra checks into the GEM internals"
+ default n
+ depends on DRM_I915_WERROR
+ help
+ Enable extra sanity checks (including BUGs) along the GEM driver
+ paths that may slow the system down and if hit hang the machine.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_ERRLOG_GEM
bool "Insert extra logging (very verbose) for common GEM errors"
@@ -110,41 +109,41 @@ config DRM_I915_TRACE_GTT
If in doubt, say "N".
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
- bool "Enable additional driver debugging for fence objects"
- depends on DRM_I915
- select DEBUG_OBJECTS
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ bool "Enable additional driver debugging for fence objects"
+ depends on DRM_I915
+ select DEBUG_OBJECTS
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_SW_FENCE_CHECK_DAG
- bool "Enable additional driver debugging for detecting dependency cycles"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ bool "Enable additional driver debugging for detecting dependency cycles"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_DEBUG_GUC
- bool "Enable additional driver debugging for GuC"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will help resolve GuC related issues.
+ bool "Enable additional driver debugging for GuC"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will help resolve GuC related issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_SELFTEST
bool "Enable selftests upon driver load"
@@ -177,15 +176,15 @@ config DRM_I915_SELFTEST_BROKEN
If in doubt, say "N".
config DRM_I915_LOW_LEVEL_TRACEPOINTS
- bool "Enable low level request tracing events"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on low level request tracing events.
- This provides the ability to precisely monitor engine utilisation
- and also analyze the request dependency resolving timeline.
-
- If in doubt, say "N".
+ bool "Enable low level request tracing events"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on low level request tracing events.
+ This provides the ability to precisely monitor engine utilisation
+ and also analyze the request dependency resolving timeline.
+
+ If in doubt, say "N".
config DRM_I915_DEBUG_VBLANK_EVADE
bool "Enable extra debug warnings for vblank evasion"
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 48df8889a88a..1799537a3228 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -12,6 +12,29 @@ config DRM_I915_USERFAULT_AUTOSUSPEND
May be 0 to disable the extra delay and solely use the device level
runtime pm autosuspend delay tunable.
+config DRM_I915_HEARTBEAT_INTERVAL
+ int "Interval between heartbeat pulses (ms)"
+ default 2500 # milliseconds
+ help
+ The driver sends a periodic heartbeat down all active engines to
+ check the health of the GPU and undertake regular house-keeping of
+ internal driver state.
+
+ May be 0 to disable heartbeats and therefore disable automatic GPU
+ hang detection.
+
+config DRM_I915_PREEMPT_TIMEOUT
+ int "Preempt timeout (ms, jiffy granularity)"
+ default 100 # milliseconds
+ help
+ How long to wait (in milliseconds) for a preemption event to occur
+ when submitting a new context via execlists. If the current context
+ does not hit an arbitration point and yield to HW before the timer
+ expires, the HW will be reset to allow the more important context
+ to execute.
+
+ May be 0 to disable the timeout.
+
config DRM_I915_SPIN_REQUEST
int "Busywait for request completion (us)"
default 5 # microseconds
@@ -25,3 +48,29 @@ config DRM_I915_SPIN_REQUEST
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
+
+config DRM_I915_STOP_TIMEOUT
+ int "How long to wait for an engine to quiesce gracefully before reset (ms)"
+ default 100 # milliseconds
+ help
+ By stopping submission and sleeping for a short time before resetting
+ the GPU, we allow the innocent contexts also on the system to quiesce.
+ It is then less likely for a hanging context to cause collateral
+ damage as the system is reset in order to recover. The corollary is
+ that the reset itself may take longer and so be more disruptive to
+ interactive or low latency workloads.
+
+config DRM_I915_TIMESLICE_DURATION
+ int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
+ default 1 # milliseconds
+ help
+ When two user batches of equal priority are executing, we will
+ alternate execution of each batch to ensure forward progress of
+ all users. This is necessary in some cases where there may be
+ an implicit dependency between those batches that requires
+ concurrent execution in order for them to proceed, e.g. they
+ interact with each other via userspace semaphores. Each context
+ is scheduled for execution for the timeslice duration, before
+ switching to the next context.
+
+ May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Kconfig.unstable b/drivers/gpu/drm/i915/Kconfig.unstable
new file mode 100644
index 000000000000..0c2276155c2b
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig.unstable
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_I915_UNSTABLE
+ bool "Enable unstable API for early prototype development"
+ depends on EXPERT
+ depends on STAGING
+ depends on BROKEN # should never be enabled by distros!
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Enable prototype uAPI under general discussion before they are
+ finalized. Such prototypes may be withdrawn or substantially
+ changed before release. They are only enabled here so that a wide
+ number of interested parties (userspace driver developers) can
+ verify that the uAPI meet their expectations. These uAPI should
+ never be used in production.
+
+ Recommended for driver developers _only_.
+
+ If in the slightest bit of doubt, say "N".
+
+config DRM_I915_UNSTABLE_FAKE_LMEM
+ bool "Enable the experimental fake lmem"
+ depends on DRM_I915_UNSTABLE
+ default n
+ help
+ Convert some system memory into a fake local memory region for
+ testing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2587ea834f06..90dcf09f52cc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -46,10 +46,12 @@ i915-y += i915_drv.o \
i915_pci.o \
i915_scatterlist.o \
i915_suspend.o \
+ i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
intel_csr.o \
intel_device_info.o \
+ intel_memory_region.o \
intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
@@ -76,19 +78,24 @@ gt-y += \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
- gt/intel_engine_pool.o \
+ gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
+ gt/intel_engine_pool.o \
gt/intel_engine_user.o \
gt/intel_gt.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
- gt/intel_hangcheck.o \
+ gt/intel_gt_requests.o \
+ gt/intel_llc.o \
gt/intel_lrc.o \
+ gt/intel_mocs.o \
+ gt/intel_rc6.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
- gt/intel_ringbuffer.o \
- gt/intel_mocs.o \
+ gt/intel_ring.o \
+ gt/intel_ring_submission.o \
+ gt/intel_rps.o \
gt/intel_sseu.o \
gt/intel_timeline.o \
gt/intel_workarounds.o
@@ -114,10 +121,12 @@ gem-y += \
gem/i915_gem_internal.o \
gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \
+ gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
+ gem/i915_gem_region.o \
gem/i915_gem_shmem.o \
gem/i915_gem_shrinker.o \
gem/i915_gem_stolen.o \
@@ -141,6 +150,7 @@ i915-y += \
i915_scheduler.o \
i915_trace_points.o \
i915_vma.o \
+ intel_region_lmem.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -172,6 +182,7 @@ i915-y += \
display/intel_display_power.o \
display/intel_dpio_phy.o \
display/intel_dpll_mgr.o \
+ display/intel_dsb.o \
display/intel_fbc.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
@@ -182,7 +193,8 @@ i915-y += \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
- display/intel_tc.o
+ display/intel_tc.o \
+ display/intel_vga.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -235,7 +247,8 @@ i915-y += \
oa/i915_oa_cflgt2.o \
oa/i915_oa_cflgt3.o \
oa/i915_oa_cnl.o \
- oa/i915_oa_icl.o
+ oa/i915_oa_icl.o \
+ oa/i915_oa_tgl.o
i915-y += i915_perf.o
# Post-mortem debug and GPU hang state capture
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 6e398c33a524..325df29b0447 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1584,7 +1584,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->get_hw_state = gen11_dsi_get_hw_state;
encoder->type = INTEL_OUTPUT_DSI;
encoder->cloneable = 0;
- encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ encoder->pipe_mask = ~0;
encoder->power_domain = POWER_DOMAIN_PORT_DSI;
encoder->get_power_domains = gen11_dsi_get_power_domains;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 7cb2257bbb93..c2875b10adf9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -199,7 +199,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
- crtc_state->fb_changed = false;
crtc_state->fifo_changed = false;
crtc_state->preload_luts = false;
crtc_state->wm.need_postvbl_update = false;
@@ -265,10 +264,13 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
*/
mode = PS_SCALER_MODE_NORMAL;
} else {
+ struct intel_plane *linked =
+ plane_state->planar_linked_plane;
+
mode = PS_SCALER_MODE_PLANAR;
- if (plane_state->linked_plane)
- mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+ if (linked)
+ mode |= PS_PLANE_Y_SEL(linked->id);
}
} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
mode = PS_SCALER_MODE_NORMAL;
@@ -372,6 +374,15 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
*/
if (!plane) {
struct drm_plane_state *state;
+
+ /*
+ * GLK+ scalers don't have a HQ mode so it
+ * isn't necessary to change between HQ and dyn mode
+ * on those platforms.
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ continue;
+
plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
@@ -379,13 +390,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
plane->base.id);
return PTR_ERR(state);
}
-
- /*
- * the plane is added after plane checks are run,
- * but since this plane is unchanged just do the
- * minimum required validation.
- */
- crtc_state->base.planes_changed = true;
}
intel_plane = to_intel_plane(plane);
@@ -426,6 +430,13 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
state->dpll_set = state->modeset = false;
+ state->global_state_changed = false;
+ state->active_pipes = 0;
+ memset(&state->min_cdclk, 0, sizeof(state->min_cdclk));
+ memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level));
+ memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical));
+ memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual));
+ state->cdclk.pipe = INVALID_PIPE;
}
struct intel_crtc_state *
@@ -439,3 +450,40 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
+
+int intel_atomic_lock_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ state->global_state_changed = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ int ret;
+
+ ret = drm_modeset_lock(&crtc->base.mutex,
+ state->base.acquire_ctx);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_atomic_serialize_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ state->global_state_changed = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 58065d3161a3..49d5cb1b9e0a 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -16,6 +16,7 @@ struct drm_crtc_state;
struct drm_device;
struct drm_i915_private;
struct drm_property;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -46,4 +47,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
+int intel_atomic_lock_global_state(struct intel_atomic_state *state);
+
+int intel_atomic_serialize_global_state(struct intel_atomic_state *state);
+
#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index d1fcdf206da4..98f557a9f8ee 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -138,18 +138,58 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
return cpp * crtc_state->pixel_rate;
}
+bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ struct intel_crtc_state *crtc_state;
+
+ if (!plane_state->base.visible || !plane->min_cdclk)
+ return false;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->min_cdclk[plane->id] =
+ plane->min_cdclk(crtc_state, plane_state);
+
+ /*
+ * Does the cdclk need to be bumbed up?
+ *
+ * Note: we obviously need to be called before the new
+ * cdclk frequency is calculated so state->cdclk.logical
+ * hasn't been populated yet. Hence we look at the old
+ * cdclk state under dev_priv->cdclk.logical. This is
+ * safe as long we hold at least one crtc mutex (which
+ * must be true since we have crtc_state).
+ */
+ if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id],
+ dev_priv->cdclk.logical.cdclk);
+ return true;
+ }
+
+ return false;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state)
{
struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
+ const struct drm_framebuffer *fb = new_plane_state->base.fb;
int ret;
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
new_crtc_state->data_rate[plane->id] = 0;
+ new_crtc_state->min_cdclk[plane->id] = 0;
new_plane_state->base.visible = false;
if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@@ -164,11 +204,11 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
- is_planar_yuv_format(new_plane_state->base.fb->format->format))
+ drm_format_info_is_yuv_semiplanar(fb->format))
new_crtc_state->nv12_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
- new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
+ fb->format->format == DRM_FORMAT_C8)
new_crtc_state->c8_planes |= BIT(plane->id);
if (new_plane_state->base.visible || old_plane_state->base.visible)
@@ -194,14 +234,11 @@ get_crtc_from_states(const struct intel_plane_state *old_plane_state,
return NULL;
}
-static int intel_plane_atomic_check(struct drm_plane *_plane,
- struct drm_plane_state *_new_plane_state)
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_atomic_state *state =
- to_intel_atomic_state(_new_plane_state->state);
struct intel_plane_state *new_plane_state =
- to_intel_plane_state(_new_plane_state);
+ intel_atomic_get_new_plane_state(state, plane);
const struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
struct intel_crtc *crtc =
@@ -320,9 +357,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
if (new_plane_state->base.visible) {
intel_update_plane(plane, new_crtc_state, new_plane_state);
- } else if (new_plane_state->slave) {
+ } else if (new_plane_state->planar_slave) {
struct intel_plane *master =
- new_plane_state->linked_plane;
+ new_plane_state->planar_linked_plane;
/*
* We update the slave plane from this function because
@@ -368,5 +405,4 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
- .atomic_check = intel_plane_atomic_check,
};
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index cb7ef4f9eafd..e61e9a82aadf 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -41,9 +41,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *plane_state);
+bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ddcccf4408c3..85e6b2bbb34f 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -28,6 +28,7 @@
#include <drm/i915_component.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_display_types.h"
#include "intel_lpe_audio.h"
@@ -560,8 +561,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
- DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
- port_name(port), pipe_name(pipe));
+ DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe));
if (WARN_ON(port == PORT_A))
return;
@@ -609,8 +611,9 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
- DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
- port_name(port), pipe_name(pipe), drm_eld_size(eld));
+ DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe), drm_eld_size(eld));
if (WARN_ON(port == PORT_A))
return;
@@ -816,13 +819,8 @@ retry:
to_intel_atomic_state(state)->cdclk.force_min_cdclk =
enable ? 2 * 96000 : 0;
- /*
- * Protects dev_priv->cdclk.force_min_cdclk
- * Need to lock this here in case we have no active pipes
- * and thus wouldn't lock it during the commit otherwise.
- */
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
- &ctx);
+ /* Protects dev_priv->cdclk.force_min_cdclk */
+ ret = intel_atomic_lock_global_state(to_intel_atomic_state(state));
if (!ret)
ret = drm_atomic_commit(state);
@@ -850,11 +848,23 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
- if (dev_priv->audio_power_refcount++ == 0)
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (dev_priv->audio_power_refcount++ == 0) {
+ if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl);
+ DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n",
+ dev_priv->audio_freq_cntrl);
+ }
+
+ /* Force CDCLK to 2*BCLK as long as we need audio powered. */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ I915_WRITE(AUD_PIN_BUF_CTL,
+ (I915_READ(AUD_PIN_BUF_CTL) |
+ AUD_PIN_BUF_ENABLE));
+ }
+
return ret;
}
@@ -865,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev,
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
@@ -1114,6 +1124,12 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
return;
}
+ if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL);
+ DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n",
+ dev_priv->audio_freq_cntrl);
+ }
+
dev_priv->audio_component_registered = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 3250c1b8dcca..63c1bd4c2954 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1399,6 +1399,7 @@ static enum port dvo_port_to_port(u8 dvo_port)
[PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
[PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
[PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
+ [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1},
};
enum port port;
int i;
@@ -1625,7 +1626,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
expected_size = 37;
} else if (bdb->version <= 215) {
expected_size = 38;
- } else if (bdb->version <= 216) {
+ } else if (bdb->version <= 229) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
@@ -1843,7 +1844,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (!HAS_DISPLAY(dev_priv)) {
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
@@ -2258,6 +2259,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
case DP_AUX_F:
aux_ch = AUX_CH_F;
break;
+ case DP_AUX_G:
+ aux_ch = AUX_CH_G;
+ break;
default:
MISSING_CASE(info->alternate_aux_channel);
aux_ch = AUX_CH_A;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 4969189e620f..98f064828a57 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2016 Intel Corporation
+ * Copyright © 2016-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -35,6 +35,7 @@
#include <drm/i915_drm.h>
struct drm_i915_private;
+enum port;
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 688858ebe4d0..22e83f857de8 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -35,28 +35,54 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 1:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 2:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
+ if (IS_GEN(dev_priv, 12)) {
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+ } else if (IS_GEN(dev_priv, 11)) {
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+ } else {
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
}
qi->num_channels = (val & 0xf0) >> 4;
qi->num_points = (val & 0xf00) >> 8;
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
+ if (IS_GEN(dev_priv, 12))
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
+ else if (IS_GEN(dev_priv, 11))
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
return 0;
}
@@ -132,20 +158,25 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
}
struct intel_sa_info {
- u8 deburst, mpagesize, deprogbwlimit, displayrtids;
+ u16 displayrtids;
+ u8 deburst, deprogbwlimit;
};
static const struct intel_sa_info icl_sa_info = {
.deburst = 8,
- .mpagesize = 16,
.deprogbwlimit = 25, /* GB/s */
.displayrtids = 128,
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv)
+static const struct intel_sa_info tgl_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 34, /* GB/s */
+ .displayrtids = 256,
+};
+
+static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
- const struct intel_sa_info *sa = &icl_sa_info;
bool is_y_tile = true; /* assume y tile may be used */
int num_channels;
int deinterleave;
@@ -233,14 +264,16 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
- if (IS_GEN(dev_priv, 11))
- icl_get_bw_info(dev_priv);
+ if (IS_GEN(dev_priv, 12))
+ icl_get_bw_info(dev_priv, &tgl_sa_info);
+ else if (IS_GEN(dev_priv, 11))
+ icl_get_bw_info(dev_priv, &icl_sa_info);
}
static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
int num_planes)
{
- if (IS_GEN(dev_priv, 11))
+ if (INTEL_GEN(dev_priv) >= 11)
/*
* FIXME with SAGV disabled maybe we can assume
* point 1 will always be used? Seems to match
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index d0bc42e5039c..0caef2592a7e 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_atomic.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
@@ -1161,28 +1162,88 @@ static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-static int bxt_calc_cdclk(int min_cdclk)
-{
- if (min_cdclk > 576000)
- return 624000;
- else if (min_cdclk > 384000)
- return 576000;
- else if (min_cdclk > 288000)
- return 384000;
- else if (min_cdclk > 144000)
- return 288000;
- else
- return 144000;
+static const struct intel_cdclk_vals bxt_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 },
+ {}
+};
+
+static const struct intel_cdclk_vals glk_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 },
+ {}
+};
+
+static const struct intel_cdclk_vals cnl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 168000, .divider = 4, .ratio = 35 },
+ { .refclk = 19200, .cdclk = 336000, .divider = 2, .ratio = 35 },
+ { .refclk = 19200, .cdclk = 528000, .divider = 2, .ratio = 55 },
+
+ { .refclk = 24000, .cdclk = 168000, .divider = 4, .ratio = 28 },
+ { .refclk = 24000, .cdclk = 336000, .divider = 2, .ratio = 28 },
+ { .refclk = 24000, .cdclk = 528000, .divider = 2, .ratio = 44 },
+ {}
+};
+
+static const struct intel_cdclk_vals icl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+{
+ const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ int i;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ table[i].cdclk >= min_cdclk)
+ return table[i].cdclk;
+
+ WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n",
+ min_cdclk, dev_priv->cdclk.hw.ref);
+ return 0;
}
-static int glk_calc_cdclk(int min_cdclk)
+static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- if (min_cdclk > 158400)
- return 316800;
- else if (min_cdclk > 79200)
- return 158400;
- else
- return 79200;
+ const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ int i;
+
+ if (cdclk == dev_priv->cdclk.hw.bypass)
+ return 0;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ table[i].cdclk == cdclk)
+ return dev_priv->cdclk.hw.ref * table[i].ratio;
+
+ WARN(1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->cdclk.hw.ref);
+ return 0;
}
static u8 bxt_calc_voltage_level(int cdclk)
@@ -1190,69 +1251,99 @@ static u8 bxt_calc_voltage_level(int cdclk)
return DIV_ROUND_UP(cdclk, 25000);
}
-static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static u8 cnl_calc_voltage_level(int cdclk)
{
- int ratio;
-
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk > 336000)
+ return 2;
+ else if (cdclk > 168000)
+ return 1;
+ else
return 0;
+}
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 144000:
- case 288000:
- case 384000:
- case 576000:
- ratio = 60;
- break;
- case 624000:
- ratio = 65;
- break;
- }
+static u8 icl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
- return dev_priv->cdclk.hw.ref * ratio;
+static u8 ehl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 312000)
+ return 2;
+ else if (cdclk > 180000)
+ return 1;
+ else
+ return 0;
}
-static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
{
- int ratio;
+ if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
+ cdclk_state->ref = 24000;
+ else
+ cdclk_state->ref = 19200;
+}
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
+static void icl_readout_refclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
- switch (cdclk) {
+ switch (dssm) {
default:
- MISSING_CASE(cdclk);
+ MISSING_CASE(dssm);
/* fall through */
- case 79200:
- case 158400:
- case 316800:
- ratio = 33;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
+ cdclk_state->ref = 24000;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
+ cdclk_state->ref = 19200;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
+ cdclk_state->ref = 38400;
break;
}
-
- return dev_priv->cdclk.hw.ref * ratio;
}
-static void bxt_de_pll_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
{
- u32 val;
+ u32 val, ratio;
- cdclk_state->ref = 19200;
- cdclk_state->vco = 0;
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_readout_refclk(dev_priv, cdclk_state);
+ else if (IS_CANNONLAKE(dev_priv))
+ cnl_readout_refclk(dev_priv, cdclk_state);
+ else
+ cdclk_state->ref = 19200;
val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
+ (val & BXT_DE_PLL_LOCK) == 0) {
+ /*
+ * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
+ * setting it to zero is a way to signal that.
+ */
+ cdclk_state->vco = 0;
return;
+ }
- if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
- return;
+ /*
+ * CNL+ have the ratio directly in the PLL enable register, gen9lp had
+ * it in a separate PLL control register.
+ */
+ if (INTEL_GEN(dev_priv) >= 10)
+ ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
+ else
+ ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- val = I915_READ(BXT_DE_PLL_CTL);
- cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
+ cdclk_state->vco = ratio * cdclk_state->ref;
}
static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1261,12 +1352,19 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
u32 divider;
int div;
- bxt_de_pll_update(dev_priv, cdclk_state);
+ bxt_de_pll_readout(dev_priv, cdclk_state);
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+ if (INTEL_GEN(dev_priv) >= 12)
+ cdclk_state->bypass = cdclk_state->ref / 2;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ cdclk_state->bypass = 50000;
+ else
+ cdclk_state->bypass = cdclk_state->ref;
- if (cdclk_state->vco == 0)
+ if (cdclk_state->vco == 0) {
+ cdclk_state->cdclk = cdclk_state->bypass;
goto out;
+ }
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@@ -1275,13 +1373,15 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+ WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
+ WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
div = 8;
break;
default:
@@ -1297,7 +1397,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
- bxt_calc_voltage_level(cdclk_state->cdclk);
+ dev_priv->display.calc_voltage_level(cdclk_state->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
@@ -1332,259 +1432,6 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
dev_priv->cdclk.hw.vco = vco;
}
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
- enum pipe pipe)
-{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
- u32 val, divider;
- int ret;
-
- /* cdclk = vco / 2 / div{1,1.5,2,4} */
- switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
- /* fall through */
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
- case 3:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
- divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
- break;
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- break;
- case 8:
- divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- break;
- }
-
- /*
- * Inform power controller of upcoming frequency change. BSpec
- * requires us to wait up to 150usec, but that leads to timeouts;
- * the 2ms used here is based on experiment.
- */
- ret = sandybridge_pcode_write_timeout(dev_priv,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
- if (ret) {
- DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- bxt_de_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- bxt_de_pll_enable(dev_priv, vco);
-
- val = divider | skl_cdclk_decimal(cdclk);
- if (pipe == INVALID_PIPE)
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
- else
- val |= BXT_CDCLK_CD2X_PIPE(pipe);
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (cdclk >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- I915_WRITE(CDCLK_CTL, val);
-
- if (pipe != INVALID_PIPE)
- intel_wait_for_vblank(dev_priv, pipe);
-
- /*
- * The timeout isn't specified, the 2ms used here is based on
- * experiment.
- * FIXME: Waiting for the request completion could be delayed until
- * the next PCODE request based on BSpec.
- */
- ret = sandybridge_pcode_write_timeout(dev_priv,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level, 150, 2);
- if (ret) {
- DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- intel_update_cdclk(dev_priv);
-}
-
-static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
-{
- u32 cdctl, expected;
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
- goto sanitize;
-
- /* DPLL okay; verify the cdclock
- *
- * Some BIOS versions leave an incorrect decimal frequency value and
- * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
- * so sanitize this register.
- */
- cdctl = I915_READ(CDCLK_CTL);
- /*
- * Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
- * (PIPE_NONE).
- */
- cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
- expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (dev_priv->cdclk.hw.cdclk >= 500000)
- expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
-
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
- /* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
-
- /* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
-}
-
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state;
-
- bxt_sanitize_cdclk(dev_priv);
-
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0)
- return;
-
- cdclk_state = dev_priv->cdclk.hw;
-
- /*
- * FIXME:
- * - The initial CDCLK needs to be read from VBT.
- * Need to make this change after VBT has changes for BXT.
- */
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk_state.cdclk = glk_calc_cdclk(0);
- cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk);
- } else {
- cdclk_state.cdclk = bxt_calc_cdclk(0);
- cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
- }
- cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static int cnl_calc_cdclk(int min_cdclk)
-{
- if (min_cdclk > 336000)
- return 528000;
- else if (min_cdclk > 168000)
- return 336000;
- else
- return 168000;
-}
-
-static u8 cnl_calc_voltage_level(int cdclk)
-{
- if (cdclk > 336000)
- return 2;
- else if (cdclk > 168000)
- return 1;
- else
- return 0;
-}
-
-static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 val;
-
- if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
- cdclk_state->ref = 24000;
- else
- cdclk_state->ref = 19200;
-
- cdclk_state->vco = 0;
-
- val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
- return;
-
- if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
- return;
-
- cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref;
-}
-
-static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 divider;
- int div;
-
- cnl_cdclk_pll_update(dev_priv, cdclk_state);
-
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
-
- if (cdclk_state->vco == 0)
- goto out;
-
- divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
-
- switch (divider) {
- case BXT_CDCLK_CD2X_DIV_SEL_1:
- div = 2;
- break;
- case BXT_CDCLK_CD2X_DIV_SEL_2:
- div = 4;
- break;
- default:
- MISSING_CASE(divider);
- return;
- }
-
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
-
- out:
- /*
- * Can't read this out :( Let's assume it's
- * at least what the CDCLK frequency requires.
- */
- cdclk_state->voltage_level =
- cnl_calc_voltage_level(cdclk_state->cdclk);
-}
-
static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -1618,7 +1465,27 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
dev_priv->cdclk.hw.vco = vco;
}
-static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
+static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (pipe == INVALID_PIPE)
+ return TGL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return TGL_CDCLK_CD2X_PIPE(pipe);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ if (pipe == INVALID_PIPE)
+ return ICL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return ICL_CDCLK_CD2X_PIPE(pipe);
+ } else {
+ if (pipe == INVALID_PIPE)
+ return BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ return BXT_CDCLK_CD2X_PIPE(pipe);
+ }
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
@@ -1627,17 +1494,28 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
u32 val, divider;
int ret;
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ /* Inform power controller of upcoming frequency change. */
+ if (INTEL_GEN(dev_priv) >= 10)
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ else
+ /*
+ * BSpec requires us to wait up to 150usec, but that leads to
+ * timeouts; the 2ms used here is based on experiment.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 150, 2);
+
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
+ DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
- /* cdclk = vco / 2 / div{1,2} */
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
@@ -1646,67 +1524,87 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
+ case 3:
+ WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
+ break;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
+ case 8:
+ WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ divider = BXT_CDCLK_CD2X_DIV_SEL_4;
+ break;
}
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_enable(dev_priv, vco);
+ if (INTEL_GEN(dev_priv) >= 10) {
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_disable(dev_priv);
- val = divider | skl_cdclk_decimal(cdclk);
- if (pipe == INVALID_PIPE)
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
- else
- val |= BXT_CDCLK_CD2X_PIPE(pipe);
- I915_WRITE(CDCLK_CTL, val);
+ if (dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_enable(dev_priv, vco);
- if (pipe != INVALID_PIPE)
- intel_wait_for_vblank(dev_priv, pipe);
+ } else {
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_disable(dev_priv);
- /* inform PCU of the change */
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ if (dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_enable(dev_priv, vco);
+ }
- intel_update_cdclk(dev_priv);
+ val = divider | skl_cdclk_decimal(cdclk) |
+ bxt_cdclk_cd2x_pipe(dev_priv, pipe);
/*
- * Can't read out the voltage level :(
- * Let's just assume everything is as expected.
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
+ if (IS_GEN9_LP(dev_priv) && cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ I915_WRITE(CDCLK_CTL, val);
-static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
+ if (INTEL_GEN(dev_priv) >= 10) {
+ ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
+ } else {
+ /*
+ * The timeout isn't specified, the 2ms used here is based on
+ * experiment.
+ * FIXME: Waiting for the request completion could be delayed
+ * until the next PCODE request based on BSpec.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_state->voltage_level,
+ 150, 2);
+ }
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 168000:
- case 336000:
- ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
- break;
- case 528000:
- ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44;
- break;
+ if (ret) {
+ DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
}
- return dev_priv->cdclk.hw.ref * ratio;
+ intel_update_cdclk(dev_priv);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
-static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
+ int cdclk, vco;
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
@@ -1727,262 +1625,65 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* dividers both synching to an active pipe, or asynchronously
* (PIPE_NONE).
*/
- cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
- expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
-
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
-
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+ cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
- /* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
+ /* Make sure this is a legal cdclk value for the platform */
+ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk);
+ if (cdclk != dev_priv->cdclk.hw.cdclk)
+ goto sanitize;
- /* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
-}
+ /* Make sure the VCO is correct for the cdclk */
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
+ if (vco != dev_priv->cdclk.hw.vco)
+ goto sanitize;
-static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
-{
- static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 };
- static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 };
- const int *ranges;
- int len, i;
+ expected = skl_cdclk_decimal(cdclk);
- switch (ref) {
- default:
- MISSING_CASE(ref);
- /* fall through */
- case 24000:
- ranges = ranges_24;
- len = ARRAY_SIZE(ranges_24);
- break;
- case 19200:
- case 38400:
- ranges = ranges_19_38;
- len = ARRAY_SIZE(ranges_19_38);
+ /* Figure out what CD2X divider we should be using for this cdclk */
+ switch (DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.vco,
+ dev_priv->cdclk.hw.cdclk)) {
+ case 2:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_1;
break;
- }
-
- for (i = 0; i < len; i++) {
- if (min_cdclk <= ranges[i])
- return ranges[i];
- }
-
- WARN_ON(min_cdclk > ranges[len - 1]);
- return ranges[len - 1];
-}
-
-static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
-
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
-
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 172800:
- case 307200:
- case 556800:
- case 652800:
- WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
- dev_priv->cdclk.hw.ref != 38400);
+ case 3:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
- case 180000:
- case 312000:
- case 552000:
- case 648000:
- WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+ case 4:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_2;
break;
- case 192000:
- WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
- dev_priv->cdclk.hw.ref != 38400 &&
- dev_priv->cdclk.hw.ref != 24000);
+ case 8:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_4;
break;
- }
-
- ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
-
- return dev_priv->cdclk.hw.ref * ratio;
-}
-
-static void icl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
- enum pipe pipe)
-{
- unsigned int cdclk = cdclk_state->cdclk;
- unsigned int vco = cdclk_state->vco;
- int ret;
-
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
- if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
- return;
- }
-
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_enable(dev_priv, vco);
-
- /*
- * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
- * divider here synchronized to a pipe while CDCLK is on, nor will we
- * need the corresponding vblank wait.
- */
- I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
- skl_cdclk_decimal(cdclk));
-
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
-
- intel_update_cdclk(dev_priv);
-
- /*
- * Can't read out the voltage level :(
- * Let's just assume everything is as expected.
- */
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
-
-static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
-{
- if (IS_ELKHARTLAKE(dev_priv)) {
- if (cdclk > 312000)
- return 2;
- else if (cdclk > 180000)
- return 1;
- else
- return 0;
- } else {
- if (cdclk > 556800)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
- }
-}
-
-static void icl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 val;
-
- cdclk_state->bypass = 50000;
-
- val = I915_READ(SKL_DSSM);
- switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
default:
- MISSING_CASE(val);
- /* fall through */
- case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
- cdclk_state->ref = 24000;
- break;
- case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
- cdclk_state->ref = 19200;
- break;
- case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
- cdclk_state->ref = 38400;
- break;
- }
-
- val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
- (val & BXT_DE_PLL_LOCK) == 0) {
- /*
- * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
- * setting it to zero is a way to signal that.
- */
- cdclk_state->vco = 0;
- cdclk_state->cdclk = cdclk_state->bypass;
- goto out;
+ goto sanitize;
}
- cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
-
- val = I915_READ(CDCLK_CTL);
- WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
-
- cdclk_state->cdclk = cdclk_state->vco / 2;
-
-out:
/*
- * Can't read this out :( Let's assume it's
- * at least what the CDCLK frequency requires.
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
*/
- cdclk_state->voltage_level =
- icl_calc_voltage_level(dev_priv, cdclk_state->cdclk);
-}
-
-static void icl_init_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state sanitized_state;
- u32 val;
-
- /* This sets dev_priv->cdclk.hw. */
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
- /* This means CDCLK disabled. */
- if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
- goto sanitize;
-
- val = I915_READ(CDCLK_CTL);
-
- if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0)
- goto sanitize;
-
- if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
- goto sanitize;
+ if (IS_GEN9_LP(dev_priv) && dev_priv->cdclk.hw.cdclk >= 500000)
+ expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- return;
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
sanitize:
DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
- sanitized_state.ref = dev_priv->cdclk.hw.ref;
- sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
- sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
- sanitized_state.cdclk);
- sanitized_state.voltage_level =
- icl_calc_voltage_level(dev_priv,
- sanitized_state.cdclk);
-
- icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
-}
-
-static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv,
- cdclk_state.cdclk);
+ /* force cdclk programming */
+ dev_priv->cdclk.hw.cdclk = 0;
- icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ /* force full PLL disable + enable */
+ dev_priv->cdclk.hw.vco = -1;
}
-static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
- cnl_sanitize_cdclk(dev_priv);
+ bxt_sanitize_cdclk(dev_priv);
if (dev_priv->cdclk.hw.cdclk != 0 &&
dev_priv->cdclk.hw.vco != 0)
@@ -1990,22 +1691,29 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cnl_calc_cdclk(0);
- cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
- cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+ /*
+ * FIXME:
+ * - The initial CDCLK needs to be read from VBT.
+ * Need to make this change after VBT has changes for BXT.
+ */
+ cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0);
+ cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
+ cdclk_state.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
- cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_state.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
/**
@@ -2019,14 +1727,10 @@ static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
*/
void intel_cdclk_init(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
- icl_init_cdclk(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_init_cdclk(i915);
+ if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+ bxt_init_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_init_cdclk(i915);
- else if (IS_GEN9_LP(i915))
- bxt_init_cdclk(i915);
}
/**
@@ -2038,14 +1742,10 @@ void intel_cdclk_init(struct drm_i915_private *i915)
*/
void intel_cdclk_uninit(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
- icl_uninit_cdclk(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_uninit_cdclk(i915);
+ if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915))
+ bxt_uninit_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_uninit_cdclk(i915);
- else if (IS_GEN9_LP(i915))
- bxt_uninit_cdclk(i915);
}
/**
@@ -2073,9 +1773,9 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
* Returns:
* True if the CDCLK states require just a cd2x divider update, false if not.
*/
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
{
/* Older hw doesn't have the capability */
if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
@@ -2094,8 +1794,8 @@ bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
* Returns:
* True if the CDCLK states don't match, false if they do.
*/
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
{
return intel_cdclk_needs_modeset(a, b) ||
a->voltage_level != b->voltage_level;
@@ -2200,9 +1900,11 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
intel_set_cdclk(dev_priv, new_state, pipe);
}
-static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
- int pixel_rate)
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ int pixel_rate = crtc_state->pixel_rate;
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
else if (IS_GEN(dev_priv, 9) ||
@@ -2210,10 +1912,25 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
return DIV_ROUND_UP(pixel_rate * 100, 95);
+ else if (crtc_state->double_wide)
+ return DIV_ROUND_UP(pixel_rate * 100, 90 * 2);
else
return DIV_ROUND_UP(pixel_rate * 100, 90);
}
+static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk);
+
+ return min_cdclk;
+}
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
@@ -2223,7 +1940,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (!crtc_state->base.enable)
return 0;
- min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
+ min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
@@ -2282,6 +1999,9 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
IS_GEMINILAKE(dev_priv))
min_cdclk = max(158400, min_cdclk);
+ /* Account for additional needs from the planes */
+ min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
@@ -2303,11 +2023,20 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
sizeof(state->min_cdclk));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (min_cdclk < 0)
return min_cdclk;
+ if (state->min_cdclk[i] == min_cdclk)
+ continue;
+
state->min_cdclk[i] = min_cdclk;
+
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
}
min_cdclk = state->cdclk.force_min_cdclk;
@@ -2318,6 +2047,10 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
}
/*
+ * Account for port clock min voltage level requirements.
+ * This only really does something on CNL+ but can be
+ * called on earlier platforms as well.
+ *
* Note that this functions assumes that 0 is
* the lowest voltage value, and higher values
* correspond to increasingly higher voltages.
@@ -2326,7 +2059,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* future platforms this code will need to be
* adjusted.
*/
-static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
+static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -2339,11 +2072,21 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
sizeof(state->min_voltage_level));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
if (crtc_state->base.enable)
- state->min_voltage_level[i] =
- crtc_state->min_voltage_level;
+ min_voltage_level = crtc_state->min_voltage_level;
else
- state->min_voltage_level[i] = 0;
+ min_voltage_level = 0;
+
+ if (state->min_voltage_level[i] == min_voltage_level)
+ continue;
+
+ state->min_voltage_level[i] = min_voltage_level;
+
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
}
min_voltage_level = 0;
@@ -2369,7 +2112,7 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
state->cdclk.actual.cdclk = cdclk;
@@ -2400,7 +2143,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
bdw_calc_voltage_level(cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
state->cdclk.actual.cdclk = cdclk;
@@ -2470,7 +2213,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
skl_calc_voltage_level(cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
state->cdclk.actual.vco = vco;
@@ -2487,38 +2230,33 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int min_cdclk, cdclk, vco;
+ int min_cdclk, min_voltage_level, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(min_cdclk);
- vco = glk_de_pll_vco(dev_priv, cdclk);
- } else {
- cdclk = bxt_calc_cdclk(min_cdclk);
- vco = bxt_de_pll_vco(dev_priv, cdclk);
- }
+ min_voltage_level = bxt_compute_min_voltage_level(state);
+ if (min_voltage_level < 0)
+ return min_voltage_level;
+
+ cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
state->cdclk.logical.vco = vco;
state->cdclk.logical.cdclk = cdclk;
state->cdclk.logical.voltage_level =
- bxt_calc_voltage_level(cdclk);
-
- if (!state->active_crtcs) {
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = glk_de_pll_vco(dev_priv, cdclk);
- } else {
- cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = bxt_de_pll_vco(dev_priv, cdclk);
- }
+ max_t(int, min_voltage_level,
+ dev_priv->display.calc_voltage_level(cdclk));
+
+ if (!state->active_pipes) {
+ cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
state->cdclk.actual.vco = vco;
state->cdclk.actual.cdclk = cdclk;
state->cdclk.actual.voltage_level =
- bxt_calc_voltage_level(cdclk);
+ dev_priv->display.calc_voltage_level(cdclk);
} else {
state->cdclk.actual = state->cdclk.logical;
}
@@ -2526,70 +2264,138 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
-static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int intel_modeset_all_pipes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int min_cdclk, cdclk, vco;
+ struct intel_crtc *crtc;
- min_cdclk = intel_compute_min_cdclk(state);
- if (min_cdclk < 0)
- return min_cdclk;
+ /*
+ * Add all pipes to the state, and force
+ * a modeset on all the active ones.
+ */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
- cdclk = cnl_calc_cdclk(min_cdclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
- max(cnl_calc_voltage_level(cdclk),
- cnl_compute_min_voltage_level(state));
+ if (!crtc_state->base.active ||
+ drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ continue;
- if (!state->active_crtcs) {
- cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ crtc_state->base.mode_changed = true;
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
- cnl_calc_voltage_level(cdclk);
- } else {
- state->cdclk.actual = state->cdclk.logical;
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ crtc_state->update_planes |= crtc_state->active_planes;
}
return 0;
}
-static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- unsigned int ref = state->cdclk.logical.ref;
- int min_cdclk, cdclk, vco;
+ int min_cdclk;
+ /*
+ * We can't change the cdclk frequency, but we still want to
+ * check that the required minimum frequency doesn't exceed
+ * the actual cdclk frequency.
+ */
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- cdclk = icl_calc_cdclk(min_cdclk, ref);
- vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+ return 0;
+}
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
- max(icl_calc_voltage_level(dev_priv, cdclk),
- cnl_compute_min_voltage_level(state));
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum pipe pipe;
+ int ret;
- if (!state->active_crtcs) {
- cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref);
- vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+ ret = dev_priv->display.modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
- icl_calc_voltage_level(dev_priv, cdclk);
+ /*
+ * Writes to dev_priv->cdclk.{actual,logical} must protected
+ * by holding all the crtc mutexes even if we don't end up
+ * touching the hardware
+ */
+ if (intel_cdclk_changed(&dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ /*
+ * Also serialize commits across all crtcs
+ * if the actual hw needs to be poked.
+ */
+ ret = intel_atomic_serialize_global_state(state);
+ if (ret)
+ return ret;
+ } else if (intel_cdclk_changed(&dev_priv->cdclk.logical,
+ &state->cdclk.logical)) {
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
} else {
- state->cdclk.actual = state->cdclk.logical;
+ return 0;
+ }
+
+ if (is_power_of_2(state->active_pipes) &&
+ intel_cdclk_needs_cd2x_update(dev_priv,
+ &dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+
+ pipe = ilog2(state->active_pipes);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ pipe = INVALID_PIPE;
+ } else {
+ pipe = INVALID_PIPE;
+ }
+
+ if (pipe != INVALID_PIPE) {
+ state->cdclk.pipe = pipe;
+
+ DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n",
+ pipe_name(pipe));
+ } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ /* All pipes must be switched off while we change the cdclk. */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+
+ state->cdclk.pipe = INVALID_PIPE;
+
+ DRM_DEBUG_KMS("Modeset required for cdclk change\n");
}
+ DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+ state->cdclk.logical.cdclk,
+ state->cdclk.actual.cdclk);
+ DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
+ state->cdclk.logical.voltage_level,
+ state->cdclk.actual.voltage_level);
+
return 0;
}
@@ -2809,15 +2615,29 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11) {
- dev_priv->display.set_cdclk = icl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.set_cdclk = cnl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = cnl_calc_voltage_level;
+ dev_priv->cdclk.table = cnl_cdclk_table;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = bxt_calc_voltage_level;
+ if (IS_GEMINILAKE(dev_priv))
+ dev_priv->cdclk.table = glk_cdclk_table;
+ else
+ dev_priv->cdclk.table = bxt_cdclk_table;
} else if (IS_GEN9_BC(dev_priv)) {
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
@@ -2830,13 +2650,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = vlv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+ } else {
+ dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
}
- if (INTEL_GEN(dev_priv) >= 11)
- dev_priv->display.get_cdclk = icl_get_cdclk;
- else if (IS_CANNONLAKE(dev_priv))
- dev_priv->display.get_cdclk = cnl_get_cdclk;
- else if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 4d6f7f5f8930..cf71394cc79c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -15,6 +15,13 @@ struct intel_atomic_state;
struct intel_cdclk_state;
struct intel_crtc_state;
+struct intel_cdclk_vals {
+ u16 refclk;
+ u32 cdclk;
+ u8 divider; /* CD2X divider * 2 */
+ u8 ratio;
+};
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_cdclk_init(struct drm_i915_private *i915);
void intel_cdclk_uninit(struct drm_i915_private *i915);
@@ -22,13 +29,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
void intel_cdclk_swap_state(struct intel_atomic_state *state);
void
intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
@@ -42,5 +44,6 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context);
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index aa1e2c670bc4..aa3a063549c3 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -43,6 +43,21 @@
#define LEGACY_LUT_LENGTH 256
/*
+ * ILK+ csc matrix:
+ *
+ * |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0|
+ * |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1|
+ * |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2|
+ *
+ * ILK/SNB don't have explicit post offsets, and instead
+ * CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used:
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16
+ */
+
+/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
* number of fractional bits.
@@ -59,37 +74,38 @@
#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+/* Nop pre/post offsets */
static const u16 ilk_csc_off_zero[3] = {};
+/* Identity matrix */
static const u16 ilk_csc_coeff_identity[9] = {
ILK_CSC_COEFF_1_0, 0, 0,
0, ILK_CSC_COEFF_1_0, 0,
0, 0, ILK_CSC_COEFF_1_0,
};
+/* Limited range RGB post offsets */
static const u16 ilk_csc_postoff_limited_range[3] = {
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
};
+/* Full range RGB -> limited range RGB matrix */
static const u16 ilk_csc_coeff_limited_range[9] = {
ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
};
-/*
- * These values are direct register values specified in the Bspec,
- * for RGB->YUV conversion matrix (colorspace BT709)
- */
+/* BT.709 full range RGB -> limited range YCbCr matrix */
static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
0x1e08, 0x9cc0, 0xb528,
0x2ba8, 0x09d8, 0x37e8,
0xbce8, 0x9ad8, 0x1e08,
};
-/* Post offset values for RGB->YCBCR conversion */
+/* Limited range YCbCr post offsets */
static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
0x0800, 0x0100, 0x0800,
};
@@ -611,12 +627,13 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
/*
* Program the gc max 2 register to clamp values > 1.0.
@@ -624,10 +641,15 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
* from 3.0 to 7.0
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0),
+ 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1),
+ 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2),
+ 1 << 16);
}
+
+ intel_dsb_put(dsb);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
@@ -787,78 +809,83 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ intel_dsb_put(dsb);
}
static void
icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
const struct drm_color_lut *lut = blob->data;
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
- * Every entry in the multi-segment LUT is corresponding to a superfine
- * segment step which is 1/(8 * 128 * 256).
+ * Program Super Fine segment (let's call it seg1)...
*
- * Superfine segment has 9 entries, corresponding to values
- * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256).
+ * Super Fine segment's step is 1/(8 * 128 * 256) and it has
+ * 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
+ * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
- I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
+
+ intel_dsb_put(dsb);
}
static void
icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
- *
* Program Fine segment (let's call it seg2)...
*
- * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256), 2/(128*256)
- * ... 256/(128*256). So in order to program fine segment of LUT we
- * need to pick every 8'th entry in LUT, and program 256 indexes.
+ * Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256)
+ * ... 256/(128 * 256). So in order to program fine segment of LUT we
+ * need to pick every 8th entry in the LUT, and program 256 indexes.
*
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
- * with seg2[0] being unused by the hardware.
+ * seg2[0] being unused by the hardware.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/*
* Program Coarse segment (let's call it seg3)...
*
- * Coarse segment's starts from index 0 and it's step is 1/256 ie 0,
- * 1/256, 2/256 ...256/256. As per the description of each entry in LUT
+ * Coarse segment starts from index 0 and it's step is 1/256 ie 0,
+ * 1/256, 2/256 ... 256/256. As per the description of each entry in LUT
* above, we need to pick every (8 * 128)th entry in LUT, and
* program 256 of those.
*
@@ -868,20 +895,24 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
icl_load_gcmax(crtc_state, entry);
ivb_load_lut_ext_max(crtc);
+ intel_dsb_put(dsb);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
if (crtc_state->base.degamma_lut)
glk_load_degamma_lut(crtc_state);
@@ -890,16 +921,17 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
case GAMMA_MODE_MODE_8BIT:
i9xx_load_luts(crtc_state);
break;
-
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
icl_program_gamma_multi_segment(crtc_state);
break;
-
default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
}
+
+ intel_dsb_commit(dsb);
+ intel_dsb_put(dsb);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@@ -1250,6 +1282,21 @@ static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
return GAMMA_MODE_MODE_10BIT;
}
+static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * CSC comes after the LUT in RGB->YCbCr mode.
+ * RGB->YCbCr needs the limited range offsets added to
+ * the output. RGB limited range output is handled by
+ * the hw automagically elsewhere.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return CSC_BLACK_SCREEN_OFFSET;
+
+ return CSC_MODE_YUV_TO_RGB |
+ CSC_POSITION_BEFORE_GAMMA;
+}
+
static int ilk_color_check(struct intel_crtc_state *crtc_state)
{
int ret;
@@ -1263,15 +1310,15 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
!crtc_state->c8_planes;
/*
- * We don't expose the ctm on ilk/snb currently,
- * nor do we enable YCbCr output. Also RGB limited
- * range output is handled by the hw automagically.
+ * We don't expose the ctm on ilk/snb currently, also RGB
+ * limited range output is handled by the hw automagically.
*/
- crtc_state->csc_enable = false;
+ crtc_state->csc_enable =
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB;
crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
- crtc_state->csc_mode = 0;
+ crtc_state->csc_mode = ilk_csc_mode(crtc_state);
ret = intel_color_add_affected_planes(crtc_state);
if (ret)
@@ -1432,6 +1479,403 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
return 0;
}
+static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 16;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int chv_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ return 10;
+ else
+ return i9xx_gamma_precision(crtc_state);
+}
+
+static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ return chv_gamma_precision(crtc_state);
+ else
+ return i9xx_gamma_precision(crtc_state);
+ } else {
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ return glk_gamma_precision(crtc_state);
+ else if (IS_IRONLAKE(dev_priv))
+ return ilk_gamma_precision(crtc_state);
+ }
+
+ return 0;
+}
+
+static bool err_check(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2, u32 err)
+{
+ return ((abs((long)lut2->red - lut1->red)) <= err) &&
+ ((abs((long)lut2->blue - lut1->blue)) <= err) &&
+ ((abs((long)lut2->green - lut1->green)) <= err);
+}
+
+static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
+{
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (!err_check(&lut1[i], &lut2[i], err))
+ return false;
+ }
+
+ return true;
+}
+
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision)
+{
+ struct drm_color_lut *lut1, *lut2;
+ int lut_size1, lut_size2;
+ u32 err;
+
+ if (!blob1 != !blob2)
+ return false;
+
+ if (!blob1)
+ return true;
+
+ lut_size1 = drm_color_lut_size(blob1);
+ lut_size2 = drm_color_lut_size(blob2);
+
+ /* check sw and hw lut size */
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ case GAMMA_MODE_MODE_10BIT:
+ if (lut_size1 != lut_size2)
+ return false;
+ break;
+ default:
+ MISSING_CASE(gamma_mode);
+ return false;
+ }
+
+ lut1 = blob1->data;
+ lut2 = blob2->data;
+
+ err = 0xffff >> bit_precision;
+
+ /* check sw and hw lut entry to be equal */
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ case GAMMA_MODE_MODE_10BIT:
+ if (!intel_color_lut_entry_equal(lut1, lut2,
+ lut_size2, err))
+ return false;
+ break;
+ default:
+ MISSING_CASE(gamma_mode);
+ return false;
+ }
+
+ return true;
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
+}
+
+static struct drm_property_blob *
+i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ if (HAS_GMCH(dev_priv))
+ val = I915_READ(PALETTE(pipe, i));
+ else
+ val = I915_READ(LGC_PALETTE(pipe, i));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_RED_MASK, val), 8);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_GREEN_MASK, val), 8);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_BLUE_MASK, val), 8);
+ }
+
+ return blob;
+}
+
+static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+}
+
+static struct drm_property_blob *
+i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val1, val2;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ val1 = I915_READ(PALETTE(pipe, 2 * i + 0));
+ val2 = I915_READ(PALETTE(pipe, 2 * i + 1));
+
+ blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, val1);
+ blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
+ blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+ }
+
+ blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 0)));
+ blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 1)));
+ blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 2)));
+
+ return blob;
+}
+
+static void i965_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state);
+}
+
+static struct drm_property_blob *
+chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0));
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
+
+ val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1));
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_RED_MASK, val), 10);
+ }
+
+ return blob;
+}
+
+static void chv_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state);
+ else
+ i965_read_luts(crtc_state);
+}
+
+static struct drm_property_blob *
+ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ val = I915_READ(PREC_PALETTE(pipe, i));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_RED_MASK, val), 10);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_BLUE_MASK, val), 10);
+ }
+
+ return blob;
+}
+
+static void ilk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state);
+}
+
+static struct drm_property_blob *
+glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * hw_lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < hw_lut_size; i++) {
+ val = I915_READ(PREC_PAL_DATA(pipe));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_RED_MASK, val), 10);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_BLUE_MASK, val), 10);
+ }
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+
+ return blob;
+}
+
+static void glk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+}
+
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1444,14 +1888,17 @@ void intel_color_init(struct intel_crtc *crtc)
dev_priv->display.color_check = chv_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = chv_load_luts;
+ dev_priv->display.read_luts = chv_read_luts;
} else if (INTEL_GEN(dev_priv) >= 4) {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i965_load_luts;
+ dev_priv->display.read_luts = i965_read_luts;
} else {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i9xx_load_luts;
+ dev_priv->display.read_luts = i9xx_read_luts;
}
} else {
if (INTEL_GEN(dev_priv) >= 11)
@@ -1470,16 +1917,19 @@ void intel_color_init(struct intel_crtc *crtc)
else
dev_priv->display.color_commit = ilk_color_commit;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
- else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
dev_priv->display.load_luts = glk_load_luts;
- else if (INTEL_GEN(dev_priv) >= 8)
+ dev_priv->display.read_luts = glk_read_luts;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
dev_priv->display.load_luts = bdw_load_luts;
- else if (INTEL_GEN(dev_priv) >= 7)
+ } else if (INTEL_GEN(dev_priv) >= 7) {
dev_priv->display.load_luts = ivb_load_luts;
- else
+ } else {
dev_priv->display.load_luts = ilk_load_luts;
+ dev_priv->display.read_luts = ilk_read_luts;
+ }
}
drm_crtc_enable_color_mgmt(&crtc->base,
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 057e8ac63555..173727aaa24d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -6,13 +6,20 @@
#ifndef __INTEL_COLOR_H__
#define __INTEL_COLOR_H__
+#include <linux/types.h>
+
struct intel_crtc_state;
struct intel_crtc;
+struct drm_property_blob;
void intel_color_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
void intel_color_commit(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 308ec63207ee..1133c4e97bb4 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -277,7 +277,22 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
void
intel_attach_colorspace_property(struct drm_connector *connector)
{
- if (!drm_mode_create_colorspace_property(connector))
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (drm_mode_create_hdmi_colorspace_property(connector))
+ return;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (drm_mode_create_dp_colorspace_property(connector))
+ return;
+ break;
+ default:
+ DRM_DEBUG_KMS("Colorspace property not supported\n");
+ return;
+ }
+
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 0a08354a6183..39cc6d79dc85 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -844,7 +844,7 @@ load_detect:
}
/* for pre-945g platforms use load detect */
- ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
if (ret > 0) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
@@ -1001,9 +1001,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev_priv))
- crt->base.crtc_mask = (1 << 0);
+ crt->base.pipe_mask = BIT(PIPE_A);
else
- crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ crt->base.pipe_mask = ~0;
if (IS_GEN(dev_priv, 2))
connector->interlace_allowed = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 8eb2b3ec01ed..0d6e494b4508 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -45,6 +45,7 @@
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
@@ -586,6 +587,26 @@ static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
{ 0x0, 0x00, 0x00 }, /* 3 0 */
};
+struct tgl_dkl_phy_ddi_buf_trans {
+ u32 dkl_vswing_control;
+ u32 dkl_preshoot_control;
+ u32 dkl_de_emphasis_control;
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -872,7 +893,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ 0, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ default_entry = n_entries - 1;
+ } else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
@@ -1049,6 +1077,8 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
case DPLL_ID_ICL_MGPLL4:
+ case DPLL_ID_TGL_MGPLL5:
+ case DPLL_ID_TGL_MGPLL6:
return DDI_CLK_SEL_MG;
}
}
@@ -1413,11 +1443,30 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
ref_clock = dev_priv->cdclk.hw.ref;
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
- (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
- MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
switch (pll_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
@@ -1692,7 +1741,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
hsw_ddi_clock_get(encoder, pipe_config);
}
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1704,44 +1754,50 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
WARN_ON(transcoder_is_dsi(cpu_transcoder));
- temp = TRANS_MSA_SYNC_CLK;
-
- if (crtc_state->limited_color_range)
- temp |= TRANS_MSA_CEA_RANGE;
+ temp = DP_MSA_MISC_SYNC_CLOCK;
switch (crtc_state->pipe_bpp) {
case 18:
- temp |= TRANS_MSA_6_BPC;
+ temp |= DP_MSA_MISC_6_BPC;
break;
case 24:
- temp |= TRANS_MSA_8_BPC;
+ temp |= DP_MSA_MISC_8_BPC;
break;
case 30:
- temp |= TRANS_MSA_10_BPC;
+ temp |= DP_MSA_MISC_10_BPC;
break;
case 36:
- temp |= TRANS_MSA_12_BPC;
+ temp |= DP_MSA_MISC_12_BPC;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
break;
}
+ /* nonsense combination */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->limited_color_range)
+ temp |= DP_MSA_MISC_COLOR_CEA_RGB;
+
/*
* As per DP 1.2 spec section 2.3.4.3 while sending
* YCBCR 444 signals we should program MSA MISC1/0 fields with
- * colorspace information. The output colorspace encoding is BT601.
+ * colorspace information.
*/
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
+ temp |= DP_MSA_MISC_COLOR_YCBCR_444_BT709;
+
/*
* As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
* of Color Encoding Format and Content Color Gamut] while sending
- * YCBCR 420 signals we should program MSA MISC1 fields which
- * indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
+ * YCBCR 420, HDR BT.2020 signals we should program MSA MISC1 fields
+ * which indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
*/
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- temp |= TRANS_MSA_USE_VSC_SDP;
+ if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ temp |= DP_MSA_MISC_COLOR_VSC_SDP;
+
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
@@ -1761,7 +1817,14 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+/*
+ * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
+ *
+ * Only intended to be used by intel_ddi_enable_transcoder_func() and
+ * intel_ddi_config_transcoder_func().
+ */
+static u32
+intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
@@ -1840,11 +1903,42 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder);
} else {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
+ return temp;
+}
+
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 temp;
+
+ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+/*
+ * Same as intel_ddi_enable_transcoder_func(), but it does not set the enable
+ * bit.
+ */
+static void
+intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 temp;
+
+ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ temp &= ~TRANS_DDI_FUNC_ENABLE;
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
@@ -2045,18 +2139,20 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
- port_name(port));
+ DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
+ encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
- port_name(port), *pipe_mask);
+ DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask);
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
- port_name(port), *pipe_mask, mst_pipe_mask);
+ DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask, mst_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -2066,8 +2162,9 @@ out:
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- DRM_ERROR("Port %c enabled but PHY powered down? "
- "(PHY_CTL %08x)\n", port_name(port), tmp);
+ DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
+ "(PHY_CTL %08x)\n", encoder->base.base.id,
+ encoder->base.name, tmp);
}
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -2138,7 +2235,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
/*
* VDSC power is needed when DSC is enabled
*/
- if (crtc_state->dsc_params.compression_enable)
+ if (crtc_state->dsc.compression_enable)
intel_display_power_get(dev_priv,
intel_dsc_power_domain(crtc_state));
}
@@ -2269,7 +2366,13 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ } else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
@@ -2583,7 +2686,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val;
int ln;
@@ -2599,33 +2702,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
+ val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
+ I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
+ val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
+ I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
+ val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
+ I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
+ val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
+ I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(ln, port));
+ val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2633,9 +2736,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
+ I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_DRVCTRL(ln, port));
+ val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2643,7 +2746,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
+ I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2654,17 +2757,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(ln, port));
+ val = I915_READ(MG_CLKHUB(ln, tc_port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(ln, port), val);
+ I915_WRITE(MG_CLKHUB(ln, tc_port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(ln, port));
+ val = I915_READ(MG_TX1_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2672,9 +2775,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(ln, port), val);
+ I915_WRITE(MG_TX1_DCC(ln, tc_port), val);
- val = I915_READ(MG_TX2_DCC(ln, port));
+ val = I915_READ(MG_TX2_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2682,18 +2785,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(ln, port), val);
+ I915_WRITE(MG_TX2_DCC(ln, tc_port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
+ val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
+ I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val);
- val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
+ val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
+ I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val);
}
}
@@ -2711,6 +2814,64 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
+static void
+tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
+ u32 level)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
+ u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
+
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ ddi_translations = tgl_dkl_phy_ddi_translations;
+
+ if (level >= n_entries)
+ level = n_entries - 1;
+
+ dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK);
+ dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control);
+ dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control);
+ dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
+
+ for (ln = 0; ln < 2; ln++) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+
+ I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0);
+
+ /* All the registers are RMW */
+ val = I915_READ(DKL_TX_DPCNTL0(tc_port));
+ val &= ~dpcnt_mask;
+ val |= dpcnt_val;
+ I915_WRITE(DKL_TX_DPCNTL0(tc_port), val);
+
+ val = I915_READ(DKL_TX_DPCNTL1(tc_port));
+ val &= ~dpcnt_mask;
+ val |= dpcnt_val;
+ I915_WRITE(DKL_TX_DPCNTL1(tc_port), val);
+
+ val = I915_READ(DKL_TX_DPCNTL2(tc_port));
+ val &= ~DKL_TX_DP20BITMODE;
+ I915_WRITE(DKL_TX_DPCNTL2(tc_port), val);
+ }
+}
+
+static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int link_clock,
+ u32 level,
+ enum intel_output_type type)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
+ else
+ tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
+}
+
static u32 translate_signal_level(int signal_levels)
{
int i;
@@ -2742,7 +2903,10 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+ level, encoder->type);
+ else if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -2989,130 +3153,141 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
}
-static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+static void
+icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ u32 val, bits;
int ln;
if (tc_port == PORT_TC_NONE)
return;
- for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_DP_MODE(ln, port));
- val |= MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(MG_DP_MODE(ln, port), val);
- }
+ bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING;
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING;
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val;
- int ln;
+ for (ln = 0; ln < 2; ln++) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+ val = I915_READ(DKL_DP_MODE(tc_port));
+ } else {
+ val = I915_READ(MG_DP_MODE(ln, tc_port));
+ }
- if (tc_port == PORT_TC_NONE)
- return;
+ if (enable)
+ val |= bits;
+ else
+ val &= ~bits;
- for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_DP_MODE(ln, port));
- val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(MG_DP_MODE(ln, port), val);
+ if (INTEL_GEN(dev_priv) >= 12)
+ I915_WRITE(DKL_DP_MODE(tc_port), val);
+ else
+ I915_WRITE(MG_DP_MODE(ln, tc_port), val);
}
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING);
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
+ if (INTEL_GEN(dev_priv) == 11) {
+ bits = MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING;
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ if (enable)
+ val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3));
+ else
+ val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK);
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+ }
}
-static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
+static void
+icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->base.port;
- u32 ln0, ln1, lane_mask;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port);
+ u32 ln0, ln1, pin_assignment;
+ u8 width;
if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
- ln0 = I915_READ(MG_DP_MODE(0, port));
- ln1 = I915_READ(MG_DP_MODE(1, port));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
+ ln0 = I915_READ(DKL_DP_MODE(tc_port));
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
+ ln1 = I915_READ(DKL_DP_MODE(tc_port));
+ } else {
+ ln0 = I915_READ(MG_DP_MODE(0, tc_port));
+ ln1 = I915_READ(MG_DP_MODE(1, tc_port));
+ }
- switch (intel_dig_port->tc_mode) {
- case TC_PORT_DP_ALT:
- ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
+ ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- lane_mask = intel_tc_port_get_lane_mask(intel_dig_port);
+ /* DPPATC */
+ pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port);
+ width = crtc_state->lane_count;
- switch (lane_mask) {
- case 0x1:
- case 0x4:
- break;
- case 0x2:
+ switch (pin_assignment) {
+ case 0x0:
+ WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ if (width == 1) {
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x1:
+ if (width == 4) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x2:
+ if (width == 2) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x3:
+ case 0x5:
+ if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0x3:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0x8:
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0xC:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0xF:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- default:
- MISSING_CASE(lane_mask);
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
-
- case TC_PORT_LEGACY:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ case 0x4:
+ case 0x6:
+ if (width == 1) {
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
break;
-
default:
- MISSING_CASE(intel_dig_port->tc_mode);
- return;
+ MISSING_CASE(pin_assignment);
}
- I915_WRITE(MG_DP_MODE(0, port), ln0);
- I915_WRITE(MG_DP_MODE(1, port), ln1);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
+ I915_WRITE(DKL_DP_MODE(tc_port), ln0);
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
+ I915_WRITE(DKL_DP_MODE(tc_port), ln1);
+ } else {
+ I915_WRITE(MG_DP_MODE(0, tc_port), ln0);
+ I915_WRITE(MG_DP_MODE(1, tc_port), ln1);
+ }
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -3129,17 +3304,18 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
- val = I915_READ(DP_TP_CTL(port));
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(DP_TP_CTL(port), val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
- if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
}
@@ -3148,21 +3324,205 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
- val = I915_READ(DP_TP_CTL(port));
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
}
-static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void
+tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
+{
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ u32 val;
+
+ if (!cstate->dc3co_exitline)
+ return;
+
+ val = I915_READ(EXITLINE(cstate->cpu_transcoder));
+ val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
+ I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
+}
+
+static void
+tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
+{
+ u32 val, exit_scanlines;
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+
+ if (!cstate->dc3co_exitline)
+ return;
+
+ exit_scanlines = cstate->dc3co_exitline;
+ exit_scanlines <<= EXITLINE_SHIFT;
+ val = I915_READ(EXITLINE(cstate->cpu_transcoder));
+ val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
+ val |= exit_scanlines;
+ val |= EXITLINE_ENABLE;
+ I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
+}
+
+static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *cstate)
+{
+ u32 exit_scanlines;
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ u32 crtc_vdisplay = cstate->base.adjusted_mode.crtc_vdisplay;
+
+ cstate->dc3co_exitline = 0;
+
+ if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ return;
+
+ /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
+ if (to_intel_crtc(cstate->base.crtc)->pipe != PIPE_A ||
+ encoder->port != PORT_A)
+ return;
+
+ if (!cstate->has_psr2 || !cstate->base.active)
+ return;
+
+ /*
+ * DC3CO Exit time 200us B.Spec 49196
+ * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
+ */
+ exit_scanlines =
+ intel_usecs_to_scanlines(&cstate->base.adjusted_mode, 200) + 1;
+
+ if (WARN_ON(exit_scanlines > crtc_vdisplay))
+ return;
+
+ cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines;
+ DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline);
+}
+
+static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
+{
+ u32 val;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return;
+
+ val = I915_READ(EXITLINE(crtc_state->cpu_transcoder));
+
+ if (val & EXITLINE_ENABLE)
+ crtc_state->dc3co_exitline = val & EXITLINE_MASK;
+}
+
+static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int level = intel_ddi_dp_level(intel_dp);
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
+
+ tgl_set_psr2_transcoder_exitline(crtc_state);
+ intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+ crtc_state->lane_count, is_mst);
+
+ intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
+ intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
+
+ /* 1.a got on intel_atomic_commit_tail() */
+
+ /* 2. */
+ intel_edp_panel_on(intel_dp);
+
+ /*
+ * 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by:
+ * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and
+ * haswell_crtc_enable()->intel_enable_shared_dpll()
+ */
+
+ /* 4.b */
+ intel_ddi_clk_select(encoder, crtc_state);
+
+ /* 5. */
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+
+ /* 6. */
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+
+ /*
+ * 7.a - Steps in this function should only be executed over MST
+ * master, what will be taken in care by MST hook
+ * intel_mst_pre_enable_dp()
+ */
+ intel_ddi_enable_pipe_clock(crtc_state);
+
+ /* 7.b */
+ intel_ddi_config_transcoder_func(crtc_state);
+
+ /* 7.d */
+ icl_phy_set_clock_gating(dig_port, false);
+
+ /* 7.e */
+ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
+ encoder->type);
+
+ /* 7.f */
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(dev_priv, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+
+ /* 7.g */
+ intel_ddi_init_dp_buf_reg(encoder);
+
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ /*
+ * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
+ * in the FEC_CONFIGURATION register to 1 before initiating link
+ * training
+ */
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ /* 7.c, 7.h, 7.i, 7.j */
+ intel_dp_start_link_train(intel_dp);
+
+ /* 7.k */
+ if (!is_trans_port_sync_mode(crtc_state))
+ intel_dp_stop_link_train(intel_dp);
+
+ /*
+ * TODO: enable clock gating
+ *
+ * It is not written in DP enabling sequence but "PHY Clockgating
+ * programming" states that clock gating should be enabled after the
+ * link training but doing so causes all the following trainings to fail
+ * so not enabling it for now.
+ */
+
+ /* 7.l */
+ intel_ddi_enable_fec(encoder, crtc_state);
+ intel_dsc_enable(encoder, crtc_state);
+}
+
+static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -3177,6 +3537,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
+ intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
@@ -3186,8 +3549,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(dig_port);
- icl_disable_phy_clock_gating(dig_port);
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+ icl_phy_set_clock_gating(dig_port, false);
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3215,12 +3578,13 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp);
- if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+ if ((port != PORT_A || INTEL_GEN(dev_priv) >= 9) &&
+ !is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp);
intel_ddi_enable_fec(encoder, crtc_state);
- icl_enable_phy_clock_gating(dig_port);
+ icl_phy_set_clock_gating(dig_port, true);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
@@ -3228,6 +3592,24 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dsc_enable(encoder, crtc_state);
}
+static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ else
+ hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+
+ /* MST will call a setting of MSA after an allocating of Virtual Channel
+ * from MST encoder pre_enable callback.
+ */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
+}
+
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3244,10 +3626,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(dig_port);
- icl_disable_phy_clock_gating(dig_port);
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+ icl_phy_set_clock_gating(dig_port, false);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
+ level, INTEL_OUTPUT_HDMI);
+ else if (INTEL_GEN(dev_priv) == 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
@@ -3257,7 +3642,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
- icl_enable_phy_clock_gating(dig_port);
+ icl_phy_set_clock_gating(dig_port, true);
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
@@ -3330,10 +3715,14 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
wait = true;
}
- val = I915_READ(DP_TP_CTL(port));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ }
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@@ -3373,6 +3762,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
+ tgl_clear_psr2_transcoder_exitline(old_crtc_state);
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3475,7 +3865,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state);
- intel_dp_ycbcr_420_enable(intel_dp, crtc_state);
+ intel_dp_vsc_enable(intel_dp, crtc_state, conn_state);
+ intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -3486,12 +3877,12 @@ static i915_reg_t
gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
enum port port)
{
- static const i915_reg_t regs[] = {
- [PORT_A] = CHICKEN_TRANS_EDP,
- [PORT_B] = CHICKEN_TRANS_A,
- [PORT_C] = CHICKEN_TRANS_B,
- [PORT_D] = CHICKEN_TRANS_C,
- [PORT_E] = CHICKEN_TRANS_A,
+ static const enum transcoder trans[] = {
+ [PORT_A] = TRANSCODER_EDP,
+ [PORT_B] = TRANSCODER_A,
+ [PORT_C] = TRANSCODER_B,
+ [PORT_D] = TRANSCODER_C,
+ [PORT_E] = TRANSCODER_A,
};
WARN_ON(INTEL_GEN(dev_priv) < 9);
@@ -3499,7 +3890,7 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
if (WARN_ON(port < PORT_A || port > PORT_E))
port = PORT_A;
- return regs[port];
+ return CHICKEN_TRANS(trans[port]);
}
static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
@@ -3633,7 +4024,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_ddi_set_pipe_settings(crtc_state);
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
@@ -3761,7 +4152,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
u32 val;
bool wait = false;
- if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) {
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
@@ -3769,11 +4160,11 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
wait = true;
}
- val = I915_READ(DP_TP_CTL(port));
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@@ -3788,8 +4179,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
@@ -3891,6 +4282,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ i915_reg_t dp_tp_ctl;
+
+ if (IS_GEN(dev_priv, 11))
+ dp_tp_ctl = DP_TP_CTL(encoder->port);
+ else
+ dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder);
+
+ pipe_config->fec_enable =
+ I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_config->fec_enable);
+ }
+
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -3902,6 +4310,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ tgl_dc3co_exitline_get_config(pipe_config);
+
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -3979,10 +4390,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
- else
+ } else {
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
+ tgl_dc3co_exitline_compute_config(encoder, pipe_config);
+ }
+
if (ret)
return ret;
@@ -4276,7 +4690,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
- enum pipe pipe;
enum phy phy = intel_port_to_phy(dev_priv, port);
init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
@@ -4328,8 +4741,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
intel_encoder->cloneable = 0;
- for_each_pipe(dev_priv, pipe)
- intel_encoder->crtc_mask |= BIT(pipe);
+ intel_encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -4351,46 +4763,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->update_complete = intel_ddi_update_complete;
}
- switch (port) {
- case PORT_A:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_A_IO;
- break;
- case PORT_B:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_B_IO;
- break;
- case PORT_C:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_C_IO;
- break;
- case PORT_D:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_D_IO;
- break;
- case PORT_E:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_E_IO;
- break;
- case PORT_F:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_F_IO;
- break;
- case PORT_G:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_G_IO;
- break;
- case PORT_H:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_H_IO;
- break;
- case PORT_I:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_I_IO;
- break;
- default:
- MISSING_CASE(port);
- }
+ WARN_ON(port > PORT_I);
+ intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
+ port - PORT_A;
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index a08365da2643..19aeab1246ee 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -30,7 +30,8 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index af50f05f4e9d..6f5e3bd13ad1 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -31,7 +31,6 @@
#include <linux/module.h>
#include <linux/dma-resv.h>
#include <linux/slab.h>
-#include <linux/vgaarb.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -56,6 +55,8 @@
#include "display/intel_tv.h"
#include "display/intel_vdsc.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_acpi.h"
@@ -65,6 +66,7 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_display_types.h"
+#include "intel_dp_link_training.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
@@ -79,6 +81,7 @@
#include "intel_sideband.h"
#include "intel_sprite.h"
#include "intel_tc.h"
+#include "intel_vga.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -88,7 +91,17 @@ static const u32 i8xx_primary_formats[] = {
DRM_FORMAT_XRGB8888,
};
-/* Primary plane formats for gen >= 4 */
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+};
+
+/* Primary plane formats for gen >= 4, except ivb */
static const u32 i965_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -96,6 +109,7 @@ static const u32 i965_primary_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
};
static const u64 i9xx_format_modifiers[] = {
@@ -135,8 +149,6 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
-static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
@@ -490,7 +502,7 @@ static const struct intel_limit intel_limits_bxt = {
/* WA Display #0827: Gen9:all */
static void
-skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
+skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
@@ -521,6 +533,20 @@ needs_modeset(const struct intel_crtc_state *state)
return drm_atomic_crtc_needs_modeset(&state->base);
}
+bool
+is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
+ crtc_state->sync_mode_slaves_mask);
+}
+
+static bool
+is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
/*
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -1612,8 +1638,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
if (intel_de_wait_for_register(dev_priv, dpll_reg,
port_mask, expected_mask, 1000))
- WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
- port_name(dport->base.port),
+ WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ dport->base.base.base.id, dport->base.base.name,
I915_READ(dpll_reg) & port_mask, expected_mask);
}
@@ -2079,7 +2105,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int pinctl;
u32 alignment;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
@@ -2161,8 +2188,6 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
i915_gem_object_lock(vma->obj);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
@@ -2739,10 +2764,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
size++;
/* rotate the x/y offsets to match the GTT view */
- r.x1 = x;
- r.y1 = y;
- r.x2 = x + width;
- r.y2 = y + height;
+ drm_rect_init(&r, x, y, width, height);
drm_rect_rotate(&r,
rot_info->plane[i].width * tile_width,
rot_info->plane[i].height * tile_height,
@@ -2864,10 +2886,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
struct drm_rect r;
/* rotate the x/y offsets to match the GTT view */
- r.x1 = x;
- r.y1 = y;
- r.x2 = x + width;
- r.y2 = y + height;
+ drm_rect_init(&r, x, y, width, height);
drm_rect_rotate(&r,
info->plane[i].width * tile_width,
info->plane[i].height * tile_height,
@@ -2969,6 +2988,8 @@ static int i9xx_format_to_fourcc(int format)
return DRM_FORMAT_XRGB2101010;
case DISPPLANE_RGBX101010:
return DRM_FORMAT_XBGR2101010;
+ case DISPPLANE_RGBX161616:
+ return DRM_FORMAT_XBGR16161616F;
}
}
@@ -3066,13 +3087,11 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
}
- mutex_lock(&dev->struct_mutex);
obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
base_aligned,
base_aligned,
size_aligned);
- mutex_unlock(&dev->struct_mutex);
- if (!obj)
+ if (IS_ERR(obj))
return false;
switch (plane_config->tiling) {
@@ -3154,6 +3173,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
@@ -3233,13 +3253,11 @@ valid_fb:
intel_state->color_plane[0].stride =
intel_fb_pitch(fb, 0, intel_state->base.rotation);
- mutex_lock(&dev->struct_mutex);
intel_state->vma =
intel_pin_and_fence_fb_obj(fb,
&intel_state->view,
intel_plane_uses_fence(intel_state),
&intel_state->flags);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(intel_state->vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
intel_crtc->pipe, PTR_ERR(intel_state->vma));
@@ -3347,6 +3365,16 @@ static int icl_max_plane_width(const struct drm_framebuffer *fb,
return 5120;
}
+static int skl_max_plane_height(void)
+{
+ return 4096;
+}
+
+static int icl_max_plane_height(void)
+{
+ return 4320;
+}
+
static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
int main_x, int main_y, u32 main_offset)
{
@@ -3395,7 +3423,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
int max_width;
- int max_height = 4096;
+ int max_height;
u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
if (INTEL_GEN(dev_priv) >= 11)
@@ -3405,6 +3433,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
else
max_width = skl_max_plane_width(fb, 0, rotation);
+ if (INTEL_GEN(dev_priv) >= 11)
+ max_height = icl_max_plane_height();
+ else
+ max_height = skl_max_plane_height();
+
if (w > max_width || h > max_height) {
DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
w, h, max_width, max_height);
@@ -3471,9 +3504,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate(&plane_state->base.src,
- (x << 16) - plane_state->base.src.x1,
- (y << 16) - plane_state->base.src.y1);
+ drm_rect_translate_to(&plane_state->base.src,
+ x << 16, y << 16);
return 0;
}
@@ -3544,7 +3576,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since
* the main surface setup depends on it.
*/
- if (is_planar_yuv_format(fb->format->format)) {
+ if (drm_format_info_is_yuv_semiplanar(fb->format)) {
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3565,6 +3597,53 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * g4x bspec says 64bpp pixel rate can't exceed 80%
+ * of cdclk when the sprite plane is enabled on the
+ * same pipe. ilk/snb bspec says 64bpp pixel rate is
+ * never allowed to exceed 80% of cdclk. Let's just go
+ * with the ilk/snb limit always.
+ */
+ if (cpp == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock with double wide pipe */
+ if (crtc_state->double_wide)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
unsigned int
i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -3647,6 +3726,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XBGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
+ case DRM_FORMAT_XBGR16161616F:
+ dspcntr |= DISPPLANE_RGBX161616;
+ break;
default:
MISSING_CASE(fb->format->format);
return 0;
@@ -3669,7 +3751,8 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
- int src_x, src_y;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int src_x, src_y, src_w;
u32 offset;
int ret;
@@ -3680,9 +3763,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
if (!plane_state->base.visible)
return 0;
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
src_x = plane_state->base.src.x1 >> 16;
src_y = plane_state->base.src.y1 >> 16;
+ /* Undocumented hardware limit on i965/g4x/vlv/chv */
+ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ return -EINVAL;
+
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
@@ -3695,9 +3783,8 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate(&plane_state->base.src,
- (src_x << 16) - plane_state->base.src.x1,
- (src_y << 16) - plane_state->base.src.y1);
+ drm_rect_translate_to(&plane_state->base.src,
+ src_x << 16, src_y << 16);
/* HSW/BDW do this automagically in hardware */
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
@@ -4227,7 +4314,7 @@ __intel_display_resume(struct drm_device *dev,
int i, ret;
intel_modeset_setup_hw_state(dev, ctx);
- i915_redisable_vga(to_i915(dev));
+ intel_vga_redisable(to_i915(dev));
if (!state)
return 0;
@@ -4259,7 +4346,7 @@ __intel_display_resume(struct drm_device *dev,
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(dev_priv));
+ intel_has_gpu_reset(&dev_priv->gt));
}
void intel_prepare_reset(struct drm_i915_private *dev_priv)
@@ -4346,7 +4433,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
* so need a full re-initialization.
*/
intel_pps_unlock_regs_wa(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -4394,50 +4481,60 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
I915_WRITE(PIPE_CHICKEN(pipe), tmp);
}
-static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
+static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
- crtc->base.mode = new_crtc_state->base.mode;
+ u32 trans_ddi_func_ctl2_val;
+ u8 master_select;
/*
- * Update pipe size and adjust fitter if needed: the reason for this is
- * that in compute_mode_changes we check the native mode (not the pfit
- * mode) to see if we can flip rather than do a full mode set. In the
- * fastboot case, we'll flip, but if we don't update the pipesrc and
- * pfit state, we'll end up with a big fb scanned out into the wrong
- * sized surface.
+ * Configure the master select and enable Transcoder Port Sync for
+ * Slave CRTCs transcoder.
*/
+ if (crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
- I915_WRITE(PIPESRC(crtc->pipe),
- ((new_crtc_state->pipe_src_w - 1) << 16) |
- (new_crtc_state->pipe_src_h - 1));
+ if (crtc_state->master_transcoder == TRANSCODER_EDP)
+ master_select = 0;
+ else
+ master_select = crtc_state->master_transcoder + 1;
- /* on skylake this is done by detaching scalers */
- if (INTEL_GEN(dev_priv) >= 9) {
- skl_detach_scalers(new_crtc_state);
+ /* Set the master select bits for Tranascoder Port Sync */
+ trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
+ PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
+ PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
+ /* Enable Transcoder Port Sync */
+ trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
- if (new_crtc_state->pch_pfit.enabled)
- skylake_pfit_enable(new_crtc_state);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- if (new_crtc_state->pch_pfit.enabled)
- ironlake_pfit_enable(new_crtc_state);
- else if (old_crtc_state->pch_pfit.enabled)
- ironlake_pfit_disable(old_crtc_state);
- }
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
+ trans_ddi_func_ctl2_val);
+}
- if (INTEL_GEN(dev_priv) >= 11)
- icl_set_pipe_chicken(crtc);
+static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg;
+ u32 trans_ddi_func_ctl2_val;
+
+ if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
+
+ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
+ transcoder_name(old_crtc_state->cpu_transcoder));
+
+ reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
+ trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
+ PORT_SYNC_MODE_MASTER_SELECT_MASK);
+ I915_WRITE(reg, trans_ddi_func_ctl2_val);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4480,7 +4577,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, tries;
@@ -4581,7 +4678,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -4714,7 +4811,7 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, j;
@@ -4832,7 +4929,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4869,7 +4966,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4900,7 +4997,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -5215,7 +5312,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 temp;
assert_pch_transcoder_disabled(dev_priv, pipe);
@@ -5310,7 +5407,7 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t dslreg = PIPEDSL(pipe);
@@ -5462,7 +5559,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && is_planar_yuv_format(format->format) &&
+ if (format && drm_format_info_is_yuv_semiplanar(format) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
return -EINVAL;
@@ -5539,7 +5636,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
- fb && is_planar_yuv_format(fb->format->format))
+ fb && drm_format_info_is_yuv_semiplanar(fb->format))
need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
@@ -5571,10 +5668,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -5590,6 +5683,13 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ if (INTEL_GEN(dev_priv) >= 11)
+ break;
+ /* fall through */
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
intel_plane->base.base.id, intel_plane->base.name,
@@ -5649,7 +5749,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
if (crtc_state->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
@@ -5731,13 +5831,8 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
- if (intel_crtc->overlay) {
- struct drm_device *dev = intel_crtc->base.dev;
-
- mutex_lock(&dev->struct_mutex);
+ if (intel_crtc->overlay)
(void) intel_overlay_switch_off(intel_crtc->overlay);
- mutex_unlock(&dev->struct_mutex);
- }
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
@@ -5762,7 +5857,7 @@ intel_post_enable_primary(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
@@ -5786,7 +5881,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
@@ -6309,7 +6404,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
return;
@@ -6442,7 +6537,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+ enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
bool psl_clkgate_wa;
@@ -6462,6 +6557,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(pipe_config);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_enable_trans_port_sync(pipe_config);
+
intel_set_pipe_src_size(pipe_config);
if (cpu_transcoder != TRANSCODER_EDP &&
@@ -6507,7 +6605,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(intel_crtc);
- intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_transcoder_func(pipe_config);
@@ -6568,7 +6665,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* Sometimes spurious CPU pipe underruns happen when the
@@ -6640,6 +6737,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_disable_transcoder_port_sync(old_crtc_state);
+
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(old_crtc_state);
@@ -6737,6 +6837,8 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
return POWER_DOMAIN_PORT_DDI_E_LANES;
case PORT_F:
return POWER_DOMAIN_PORT_DDI_F_LANES;
+ case PORT_G:
+ return POWER_DOMAIN_PORT_DDI_G_LANES;
default:
MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
@@ -6753,16 +6855,18 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
dig_port->tc_mode == TC_PORT_TBT_ALT) {
switch (dig_port->aux_ch) {
case AUX_CH_C:
- return POWER_DOMAIN_AUX_TBT1;
+ return POWER_DOMAIN_AUX_C_TBT;
case AUX_CH_D:
- return POWER_DOMAIN_AUX_TBT2;
+ return POWER_DOMAIN_AUX_D_TBT;
case AUX_CH_E:
- return POWER_DOMAIN_AUX_TBT3;
+ return POWER_DOMAIN_AUX_E_TBT;
case AUX_CH_F:
- return POWER_DOMAIN_AUX_TBT4;
+ return POWER_DOMAIN_AUX_F_TBT;
+ case AUX_CH_G:
+ return POWER_DOMAIN_AUX_G_TBT;
default:
MISSING_CASE(dig_port->aux_ch);
- return POWER_DOMAIN_AUX_TBT1;
+ return POWER_DOMAIN_AUX_C_TBT;
}
}
@@ -6779,6 +6883,8 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
return POWER_DOMAIN_AUX_E;
case AUX_CH_F:
return POWER_DOMAIN_AUX_F;
+ case AUX_CH_G:
+ return POWER_DOMAIN_AUX_G;
default:
MISSING_CASE(dig_port->aux_ch);
return POWER_DOMAIN_AUX_A;
@@ -6855,7 +6961,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
return;
@@ -6987,7 +7093,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
@@ -7096,7 +7202,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
intel_display_power_put_unchecked(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
- dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
+ dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
@@ -7204,7 +7310,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_INFO(dev_priv)->num_pipes == 2)
+ if (INTEL_NUM_PIPES(dev_priv) == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7542,6 +7648,27 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
constant_n);
}
+static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
+{
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE;
+
+ if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
+ enableddisabled(bios_lvds_use_ssc),
+ enableddisabled(dev_priv->vbt.lvds_use_ssc));
+ dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ }
+ }
+}
+
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_modparams.panel_use_ssc >= 0)
@@ -8193,6 +8320,21 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
(crtc_state->pipe_src_h - 1));
}
+static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (IS_GEN(dev_priv, 2))
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ else
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+}
+
static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8231,7 +8373,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
- if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
+ if (intel_pipe_is_interlaced(pipe_config)) {
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
pipe_config->base.adjusted_mode.crtc_vtotal += 1;
pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
@@ -8563,7 +8705,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
struct dpll clock;
u32 mdiv;
int refclk = 100000;
@@ -8673,7 +8815,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
@@ -8702,47 +8844,24 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
-static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static enum intel_output_format
+bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
-
- pipe_config->lspcon_downsampling = false;
+ u32 tmp;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = I915_READ(PIPEMISC(crtc->pipe));
- if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
- bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
- bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+ if (tmp & PIPEMISC_YUV420_ENABLE) {
+ /* We support 4:2:0 in full blend mode only */
+ WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
- if (ycbcr420_enabled) {
- /* We support 4:2:0 in full blend mode only */
- if (!blend)
- output = INTEL_OUTPUT_FORMAT_INVALID;
- else if (!(IS_GEMINILAKE(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10))
- output = INTEL_OUTPUT_FORMAT_INVALID;
- else
- output = INTEL_OUTPUT_FORMAT_YCBCR420;
- } else {
- /*
- * Currently there is no interface defined to
- * check user preference between RGB/YCBCR444
- * or YCBCR420. So the only possible case for
- * YCBCR444 usage is driving YCBCR420 output
- * with LSPCON, when pipe is configured for
- * YCBCR444 output and LSPCON takes care of
- * downsampling it.
- */
- pipe_config->lspcon_downsampling = true;
- output = INTEL_OUTPUT_FORMAT_YCBCR444;
- }
- }
+ return INTEL_OUTPUT_FORMAT_YCBCR420;
+ } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ return INTEL_OUTPUT_FORMAT_YCBCR444;
+ } else {
+ return INTEL_OUTPUT_FORMAT_RGB;
}
-
- pipe_config->output_format = output;
}
static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
@@ -8780,6 +8899,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
@@ -9419,9 +9539,19 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
else
val |= PIPECONF_PROGRESSIVE;
+ /*
+ * This would end up with an odd purple hue over
+ * the entire display. Make sure we don't do it.
+ */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
+
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
I915_WRITE(PIPECONF(pipe), val);
@@ -9443,6 +9573,10 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
else
val |= PIPECONF_PROGRESSIVE;
+ if (IS_HASWELL(dev_priv) &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
+
I915_WRITE(PIPECONF(cpu_transcoder), val);
POSTING_READ(PIPECONF(cpu_transcoder));
}
@@ -9593,7 +9727,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
* clear if it''s a win or loss power wise. No point in doing
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
*/
- if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
+ if (INTEL_NUM_PIPES(dev_priv) == 3 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -9892,8 +10026,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
val = I915_READ(PLANE_SIZE(pipe, plane_id));
- fb->height = ((val >> 16) & 0xfff) + 1;
- fb->width = ((val >> 0) & 0x1fff) + 1;
+ fb->height = ((val >> 16) & 0xffff) + 1;
+ fb->width = ((val >> 0) & 0xffff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, plane_id));
stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
@@ -9954,9 +10088,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (!wakeref)
return false;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -9983,6 +10117,16 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (tmp & PIPECONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
+ switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
+ case PIPECONF_OUTPUT_COLORSPACE_YUV601:
+ case PIPECONF_OUTPUT_COLORSPACE_YUV709:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ break;
+ default:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ break;
+ }
+
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
PIPECONF_GAMMA_MODE_SHIFT;
@@ -10397,6 +10541,59 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
}
}
+static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 trans_port_sync, master_select;
+
+ trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+
+ if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = trans_port_sync &
+ PORT_SYNC_MODE_MASTER_SELECT_MASK;
+ if (master_select == 0)
+ return TRANSCODER_EDP;
+ else
+ return master_select - 1;
+}
+
+static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ u32 transcoders;
+ enum transcoder cpu_transcoder;
+
+ crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
+ crtc_state->cpu_transcoder);
+
+ transcoders = BIT(TRANSCODER_A) |
+ BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) |
+ BIT(TRANSCODER_D);
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t trans_wakeref;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+
+ if (!trans_wakeref)
+ continue;
+
+ if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ crtc_state->cpu_transcoder)
+ crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
+
+ intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ }
+
+ WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -10408,6 +10605,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_crtc_init_scalers(crtc, pipe_config);
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
+
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10438,7 +10637,30 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
}
intel_get_pipe_src_size(crtc, pipe_config);
- intel_get_crtc_ycbcr_config(crtc, pipe_config);
+
+ if (IS_HASWELL(dev_priv)) {
+ u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+
+ if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ else
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ } else {
+ pipe_config->output_format =
+ bdw_get_pipemisc_output_format(crtc);
+
+ /*
+ * Currently there is no interface defined to
+ * check user preference between RGB/YCBCR444
+ * or YCBCR420. So the only possible case for
+ * YCBCR444 usage is driving YCBCR420 output
+ * with LSPCON, when pipe is configured for
+ * YCBCR444 output and LSPCON takes care of
+ * downsampling it.
+ */
+ pipe_config->lspcon_downsampling =
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
+ }
pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
@@ -10493,6 +10715,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ icelake_get_trans_port_sync_config(pipe_config);
+
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv,
@@ -10514,21 +10740,13 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
else
base = intel_plane_ggtt_offset(plane_state);
- base += plane_state->color_plane[0].offset;
-
- /* ILK+ do this automagically */
- if (HAS_GMCH(dev_priv) &&
- plane_state->base.rotation & DRM_MODE_ROTATE_180)
- base += (plane_state->base.crtc_h *
- plane_state->base.crtc_w - 1) * fb->format->cpp[0];
-
- return base;
+ return base + plane_state->color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
{
- int x = plane_state->base.crtc_x;
- int y = plane_state->base.crtc_y;
+ int x = plane_state->base.dst.x1;
+ int y = plane_state->base.dst.y1;
u32 pos = 0;
if (x < 0) {
@@ -10550,8 +10768,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
{
const struct drm_mode_config *config =
&plane_state->base.plane->dev->mode_config;
- int width = plane_state->base.crtc_w;
- int height = plane_state->base.crtc_h;
+ int width = drm_rect_width(&plane_state->base.dst);
+ int height = drm_rect_height(&plane_state->base.dst);
return width > 0 && width <= config->cursor_width &&
height > 0 && height <= config->cursor_height;
@@ -10559,6 +10777,9 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ unsigned int rotation = plane_state->base.rotation;
int src_x, src_y;
u32 offset;
int ret;
@@ -10570,8 +10791,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
if (!plane_state->base.visible)
return 0;
- src_x = plane_state->base.src_x >> 16;
- src_y = plane_state->base.src_y >> 16;
+ src_x = plane_state->base.src.x1 >> 16;
+ src_y = plane_state->base.src.y1 >> 16;
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
@@ -10582,7 +10803,25 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
return -EINVAL;
}
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->base.src,
+ src_x << 16, src_y << 16);
+
+ /* ILK+ do this automagically in hardware */
+ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+
+ offset += (src_h * src_w - 1) * fb->format->cpp[0];
+ }
+
plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
return 0;
}
@@ -10606,6 +10845,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ /* Use the unclipped src/dst rectangles, which we program to hw */
+ plane_state->base.src = drm_plane_state_src(&plane_state->base);
+ plane_state->base.dst = drm_plane_state_dest(&plane_state->base);
+
ret = intel_cursor_check_surface(plane_state);
if (ret)
return ret;
@@ -10648,7 +10891,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
{
- int width = plane_state->base.crtc_w;
+ int width = drm_rect_width(&plane_state->base.dst);
/*
* 845g/865g are only limited by the width of their cursors,
@@ -10674,8 +10917,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- plane_state->base.crtc_w,
- plane_state->base.crtc_h);
+ drm_rect_width(&plane_state->base.dst),
+ drm_rect_height(&plane_state->base.dst));
return -EINVAL;
}
@@ -10708,8 +10951,8 @@ static void i845_update_cursor(struct intel_plane *plane,
unsigned long irqflags;
if (plane_state && plane_state->base.visible) {
- unsigned int width = plane_state->base.crtc_w;
- unsigned int height = plane_state->base.crtc_h;
+ unsigned int width = drm_rect_width(&plane_state->base.dst);
+ unsigned int height = drm_rect_height(&plane_state->base.dst);
cntl = plane_state->ctl |
i845_cursor_ctl_crtc(crtc_state);
@@ -10811,7 +11054,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- switch (plane_state->base.crtc_w) {
+ switch (drm_rect_width(&plane_state->base.dst)) {
case 64:
cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
@@ -10822,7 +11065,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(plane_state->base.crtc_w);
+ MISSING_CASE(drm_rect_width(&plane_state->base.dst));
return 0;
}
@@ -10836,8 +11079,8 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
- int width = plane_state->base.crtc_w;
- int height = plane_state->base.crtc_h;
+ int width = drm_rect_width(&plane_state->base.dst);
+ int height = drm_rect_height(&plane_state->base.dst);
if (!intel_cursor_size_ok(plane_state))
return false;
@@ -10890,17 +11133,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- plane_state->base.crtc_w,
- plane_state->base.crtc_h);
+ drm_rect_width(&plane_state->base.dst),
+ drm_rect_height(&plane_state->base.dst));
return -EINVAL;
}
WARN_ON(plane_state->base.visible &&
plane_state->color_plane[0].stride != fb->pitches[0]);
- if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
+ if (fb->pitches[0] !=
+ drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) {
DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0], plane_state->base.crtc_w);
+ fb->pitches[0],
+ drm_rect_width(&plane_state->base.dst));
return -EINVAL;
}
@@ -10915,7 +11160,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->base.visible && plane_state->base.crtc_x < 0) {
+ plane_state->base.visible && plane_state->base.dst.x1 < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -10935,11 +11180,14 @@ static void i9xx_update_cursor(struct intel_plane *plane,
unsigned long irqflags;
if (plane_state && plane_state->base.visible) {
+ unsigned width = drm_rect_width(&plane_state->base.dst);
+ unsigned height = drm_rect_height(&plane_state->base.dst);
+
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
- if (plane_state->base.crtc_h != plane_state->base.crtc_w)
- fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
+ if (width != height)
+ fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
base = intel_cursor_base(plane_state);
pos = intel_cursor_position(plane_state);
@@ -11084,7 +11332,6 @@ static int intel_modeset_disable_planes(struct drm_atomic_state *state,
}
int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -11191,10 +11438,8 @@ found:
crtc_state->base.active = crtc_state->base.enable = true;
- if (!mode)
- mode = &load_detect_mode;
-
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->base,
+ &load_detect_mode);
if (ret)
goto fail;
@@ -11286,7 +11531,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
struct dpll clock;
@@ -11510,7 +11755,6 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
bool was_crtc_enabled = old_crtc_state->base.active;
bool is_crtc_enabled = crtc_state->base.active;
bool turn_off, turn_on, visible, was_visible;
- struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
@@ -11539,24 +11783,18 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
plane_state->base.visible = visible = false;
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
}
if (!was_visible && !visible)
return 0;
- if (fb != old_plane_state->base.fb)
- crtc_state->fb_changed = true;
-
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
crtc->base.base.id, crtc->base.name,
plane->base.base.id, plane->base.name,
- fb ? fb->base.id : -1);
-
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
- plane->base.base.id, plane->base.name,
was_visible, visible,
turn_off, turn_on, mode_changed);
@@ -11665,7 +11903,7 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
int i;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- linked = plane_state->linked_plane;
+ linked = plane_state->planar_linked_plane;
if (!linked)
continue;
@@ -11674,8 +11912,8 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
if (IS_ERR(linked_plane_state))
return PTR_ERR(linked_plane_state);
- WARN_ON(linked_plane_state->linked_plane != plane);
- WARN_ON(linked_plane_state->slave == plane_state->slave);
+ WARN_ON(linked_plane_state->planar_linked_plane != plane);
+ WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
}
return 0;
@@ -11698,16 +11936,16 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
* in the crtc_state->active_planes mask.
*/
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+ if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
continue;
- plane_state->linked_plane = NULL;
- if (plane_state->slave && !plane_state->base.visible) {
+ plane_state->planar_linked_plane = NULL;
+ if (plane_state->planar_slave && !plane_state->base.visible) {
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
- plane_state->slave = false;
+ plane_state->planar_slave = false;
}
if (!crtc_state->nv12_planes)
@@ -11741,10 +11979,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
return -EINVAL;
}
- plane_state->linked_plane = linked;
+ plane_state->planar_linked_plane = linked;
- linked_state->slave = true;
- linked_state->linked_plane = plane;
+ linked_state->planar_slave = true;
+ linked_state->planar_linked_plane = plane;
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
@@ -11764,25 +12002,108 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
}
-static int intel_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
+static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc_state);
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_connector *master_connector, *connector;
+ struct drm_connector_state *connector_state;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_crtc *master_crtc = NULL;
+ struct drm_crtc_state *master_crtc_state;
+ struct intel_crtc_state *master_pipe_config;
+ int i, tile_group_id;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ /*
+ * In case of tiled displays there could be one or more slaves but there is
+ * only one master. Lets make the CRTC used by the connector corresponding
+ * to the last horizonal and last vertical tile a master/genlock CRTC.
+ * All the other CRTCs corresponding to other tiles of the same Tile group
+ * are the slave CRTCs and hold a pointer to their genlock CRTC.
+ */
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ if (connector_state->crtc != crtc)
+ continue;
+ if (!connector->has_tile)
+ continue;
+ if (crtc_state->base.mode.hdisplay != connector->tile_h_size ||
+ crtc_state->base.mode.vdisplay != connector->tile_v_size)
+ return 0;
+ if (connector->tile_h_loc == connector->num_h_tile - 1 &&
+ connector->tile_v_loc == connector->num_v_tile - 1)
+ continue;
+ crtc_state->sync_mode_slaves_mask = 0;
+ tile_group_id = connector->tile_group->id;
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(master_connector, &conn_iter) {
+ struct drm_connector_state *master_conn_state = NULL;
+
+ if (!master_connector->has_tile)
+ continue;
+ if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
+ master_connector->tile_v_loc != master_connector->num_v_tile - 1)
+ continue;
+ if (master_connector->tile_group->id != tile_group_id)
+ continue;
+
+ master_conn_state = drm_atomic_get_connector_state(&state->base,
+ master_connector);
+ if (IS_ERR(master_conn_state)) {
+ drm_connector_list_iter_end(&conn_iter);
+ return PTR_ERR(master_conn_state);
+ }
+ if (master_conn_state->crtc) {
+ master_crtc = master_conn_state->crtc;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!master_crtc) {
+ DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
+ connector_state->crtc->base.id);
+ return -EINVAL;
+ }
+
+ master_crtc_state = drm_atomic_get_crtc_state(&state->base,
+ master_crtc);
+ if (IS_ERR(master_crtc_state))
+ return PTR_ERR(master_crtc_state);
+
+ master_pipe_config = to_intel_crtc_state(master_crtc_state);
+ crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
+ master_pipe_config->sync_mode_slaves_mask |=
+ BIT(crtc_state->cpu_transcoder);
+ DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
+ transcoder_name(crtc_state->master_transcoder),
+ crtc_state->base.crtc->base.id,
+ master_pipe_config->sync_mode_slaves_mask);
+ }
+
+ return 0;
+}
+
+static int intel_crtc_atomic_check(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool mode_changed = needs_modeset(crtc_state);
int ret;
- bool mode_changed = needs_modeset(pipe_config);
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
- mode_changed && !crtc_state->active)
- pipe_config->update_wm_post = true;
+ mode_changed && !crtc_state->base.active)
+ crtc_state->update_wm_post = true;
- if (mode_changed && crtc_state->enable &&
+ if (mode_changed && crtc_state->base.enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(pipe_config->shared_dpll)) {
- ret = dev_priv->display.crtc_compute_clock(intel_crtc,
- pipe_config);
+ !WARN_ON(crtc_state->shared_dpll)) {
+ ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
if (ret)
return ret;
}
@@ -11791,19 +12112,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* May need to update pipe gamma enable bits
* when C8 planes are getting enabled/disabled.
*/
- if (c8_planes_changed(pipe_config))
- crtc_state->color_mgmt_changed = true;
+ if (c8_planes_changed(crtc_state))
+ crtc_state->base.color_mgmt_changed = true;
- if (mode_changed || pipe_config->update_pipe ||
- crtc_state->color_mgmt_changed) {
- ret = intel_color_check(pipe_config);
+ if (mode_changed || crtc_state->update_pipe ||
+ crtc_state->base.color_mgmt_changed) {
+ ret = intel_color_check(crtc_state);
if (ret)
return ret;
}
ret = 0;
if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(pipe_config);
+ ret = dev_priv->display.compute_pipe_wm(crtc_state);
if (ret) {
DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
return ret;
@@ -11819,7 +12140,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(pipe_config);
+ ret = dev_priv->display.compute_intermediate_wm(crtc_state);
if (ret) {
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
@@ -11827,29 +12148,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
if (INTEL_GEN(dev_priv) >= 9) {
- if (mode_changed || pipe_config->update_pipe)
- ret = skl_update_scaler_crtc(pipe_config);
-
- if (!ret)
- ret = icl_check_nv12_planes(pipe_config);
+ if (mode_changed || crtc_state->update_pipe)
+ ret = skl_update_scaler_crtc(crtc_state);
if (!ret)
- ret = skl_check_pipe_max_pixel_rate(intel_crtc,
- pipe_config);
- if (!ret)
- ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
- pipe_config);
+ ret = intel_atomic_setup_scalers(dev_priv, crtc,
+ crtc_state);
}
if (HAS_IPS(dev_priv))
- pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
+ crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .atomic_check = intel_crtc_atomic_check,
-};
-
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -12159,6 +12470,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
+ if (IS_CHERRYVIEW(dev_priv))
+ DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+ else
+ DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
dump_planes:
if (!state)
return;
@@ -12179,6 +12499,12 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
bool ret = true;
/*
+ * We're going to peek into connector->state,
+ * hence connection_mutex must be held.
+ */
+ drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
+
+ /*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
@@ -12260,6 +12586,13 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
+ /*
+ * Save the slave bitmask which gets filled for master crtc state during
+ * slave atomic check call.
+ */
+ if (is_trans_port_sync_master(crtc_state))
+ saved_state->sync_mode_slaves_mask =
+ crtc_state->sync_mode_slaves_mask;
/* Keep base drm_crtc_state intact, only clear our extended struct */
BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
@@ -12353,6 +12686,15 @@ encoder_retry:
drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
CRTC_STEREO_DOUBLE);
+ /* Set the crtc_state defaults for trans_port_sync */
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
+ ret = icl_add_sync_mode_crtcs(pipe_config);
+ if (ret) {
+ DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
+ ret);
+ return ret;
+ }
+
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
@@ -12485,22 +12827,23 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
if ((drm_debug & DRM_UT_KMS) == 0)
return;
- drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
- drm_dbg(DRM_UT_KMS, "expected:");
+ DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
+ DRM_DEBUG_KMS("expected:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- drm_dbg(DRM_UT_KMS, "found");
+ DRM_DEBUG_KMS("found:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
} else {
- drm_err("mismatch in %s infoframe", name);
- drm_err("expected:");
+ DRM_ERROR("mismatch in %s infoframe\n", name);
+ DRM_ERROR("expected:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- drm_err("found");
+ DRM_ERROR("found:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
}
}
-static void __printf(3, 4)
-pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
+static void __printf(4, 5)
+pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
+ const char *name, const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -12510,9 +12853,11 @@ pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
vaf.va = &args;
if (fastset)
- drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
else
- drm_err("mismatch in %s %pV", name, &vaf);
+ DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
va_end(args);
}
@@ -12540,7 +12885,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bool fastset)
{
struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
bool ret = true;
+ u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
@@ -12552,8 +12899,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected 0x%08x, found 0x%08x)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12562,8 +12909,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12572,8 +12919,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %s, found %s)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %s, found %s)", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
@@ -12589,8 +12936,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
PIPE_CONF_CHECK_BOOL(name); \
} else { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
@@ -12599,8 +12946,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %p, found %p)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %p, found %p)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12611,9 +12958,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
!fastset)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
+ "found tu %i, gmch %i/%i link %i/%i)", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
@@ -12638,10 +12985,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
&pipe_config->name, !fastset) && \
!intel_compare_link_m_n(&current_config->alt_name, \
&pipe_config->name, !fastset)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
"or tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
+ "found tu %i, gmch %i/%i link %i/%i)", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
@@ -12663,8 +13010,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(%x) (expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(%x) (expected %i, found %i)", \
(mask), \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -12674,8 +13021,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12692,6 +13039,24 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
+ if (current_config->name1 != pipe_config->name1) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name1), \
+ "(expected %i, found %i, won't compare lut values)", \
+ current_config->name1, \
+ pipe_config->name1); \
+ ret = false;\
+ } else { \
+ if (!intel_color_lut_equal(current_config->name2, \
+ pipe_config->name2, pipe_config->name1, \
+ bit_precision)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name2), \
+ "hw_state doesn't match sw_state"); \
+ ret = false; \
+ } \
+ } \
+} while (0)
+
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
@@ -12730,6 +13095,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(output_format);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -12738,6 +13104,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(fec_enable);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
@@ -12787,6 +13154,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(csc_mode);
PIPE_CONF_CHECK_BOOL(gamma_enable);
PIPE_CONF_CHECK_BOOL(csc_enable);
+
+ bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
+ if (bp_gamma)
+ PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma);
+
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -12842,6 +13214,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
+ PIPE_CONF_CHECK_I(master_transcoder);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -12849,6 +13224,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
+#undef PIPE_CONF_CHECK_COLOR_LUT
#undef PIPE_CONF_QUIRK
return ret;
@@ -13160,7 +13536,7 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
- assert_plane(plane, plane_state->slave ||
+ assert_plane(plane, plane_state->planar_slave ||
plane_state->base.visible);
}
@@ -13276,10 +13652,15 @@ intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
verify_disabled_dpll_state(dev_priv);
}
-static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
+static void
+intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
/*
* The scanline counter increments at the leading edge of hsync.
@@ -13309,7 +13690,6 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
* answer that's slightly in the future.
*/
if (IS_GEN(dev_priv, 2)) {
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
int vtotal;
vtotal = adjusted_mode->crtc_vtotal;
@@ -13320,8 +13700,9 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
} else if (HAS_DDI(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
- } else
+ } else {
crtc->scanline_offset = 1;
+ }
}
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
@@ -13403,158 +13784,43 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
-static int intel_lock_all_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- /* Add all pipes to the state */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- return 0;
-}
-
-static int intel_modeset_all_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- /*
- * Add all pipes to the state, and force
- * a modeset on all the active ones.
- */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
- int ret;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (!crtc_state->base.active || needs_modeset(crtc_state))
- continue;
-
- crtc_state->base.mode_changed = true;
-
- ret = drm_atomic_add_affected_connectors(&state->base,
- &crtc->base);
- if (ret)
- return ret;
-
- ret = drm_atomic_add_affected_planes(&state->base,
- &crtc->base);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int intel_modeset_checks(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
- int ret = 0, i;
-
- if (!check_digital_port_conflicts(state)) {
- DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
- return -EINVAL;
- }
+ int ret, i;
/* keep the current setting */
if (!state->cdclk.force_min_cdclk_changed)
state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
state->modeset = true;
- state->active_crtcs = dev_priv->active_crtcs;
+ state->active_pipes = dev_priv->active_pipes;
state->cdclk.logical = dev_priv->cdclk.logical;
state->cdclk.actual = dev_priv->cdclk.actual;
- state->cdclk.pipe = INVALID_PIPE;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (new_crtc_state->base.active)
- state->active_crtcs |= 1 << i;
+ state->active_pipes |= BIT(crtc->pipe);
else
- state->active_crtcs &= ~(1 << i);
+ state->active_pipes &= ~BIT(crtc->pipe);
if (old_crtc_state->base.active != new_crtc_state->base.active)
- state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
+ state->active_pipe_changes |= BIT(crtc->pipe);
}
- /*
- * See if the config requires any additional preparation, e.g.
- * to adjust global state with pipes off. We need to do this
- * here so we can get the modeset_pipe updated config for the new
- * mode set on this crtc. For other crtcs we need to use the
- * adjusted_mode bits in the crtc directly.
- */
- if (dev_priv->display.modeset_calc_cdclk) {
- enum pipe pipe;
-
- ret = dev_priv->display.modeset_calc_cdclk(state);
- if (ret < 0)
+ if (state->active_pipe_changes) {
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
return ret;
-
- /*
- * Writes to dev_priv->cdclk.logical must protected by
- * holding all the crtc locks, even if we don't end up
- * touching the hardware
- */
- if (intel_cdclk_changed(&dev_priv->cdclk.logical,
- &state->cdclk.logical)) {
- ret = intel_lock_all_pipes(state);
- if (ret < 0)
- return ret;
- }
-
- if (is_power_of_2(state->active_crtcs)) {
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
-
- pipe = ilog2(state->active_crtcs);
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- if (crtc_state && needs_modeset(crtc_state))
- pipe = INVALID_PIPE;
- } else {
- pipe = INVALID_PIPE;
- }
-
- /* All pipes must be switched off while we change the cdclk. */
- if (pipe != INVALID_PIPE &&
- intel_cdclk_needs_cd2x_update(dev_priv,
- &dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
- ret = intel_lock_all_pipes(state);
- if (ret < 0)
- return ret;
-
- state->cdclk.pipe = pipe;
- } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
- ret = intel_modeset_all_pipes(state);
- if (ret < 0)
- return ret;
-
- state->cdclk.pipe = INVALID_PIPE;
- }
-
- DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
- state->cdclk.logical.cdclk,
- state->cdclk.actual.cdclk);
- DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
- state->cdclk.logical.voltage_level,
- state->cdclk.actual.voltage_level);
}
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+
intel_modeset_clear_plls(state);
if (IS_HASWELL(dev_priv))
@@ -13603,6 +13869,114 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->has_drrs = old_crtc_state->has_drrs;
}
+static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ u8 plane_ids_mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if ((plane_ids_mask & BIT(plane->id)) == 0)
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
+{
+ /* See {hsw,vlv,ivb}_plane_ratio() */
+ return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv);
+}
+
+static int intel_atomic_check_planes(struct intel_atomic_state *state,
+ bool *need_modeset)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ ret = icl_add_linked_planes(state);
+ if (ret)
+ return ret;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_atomic_check(state, plane);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.base.id, plane->base.name);
+ return ret;
+ }
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ u8 old_active_planes, new_active_planes;
+
+ ret = icl_check_nv12_planes(new_crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * On some platforms the number of active planes affects
+ * the planes' minimum cdclk calculation. Add such planes
+ * to the state before we compute the minimum cdclk.
+ */
+ if (!active_planes_affects_min_cdclk(dev_priv))
+ continue;
+
+ old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+
+ if (hweight8(old_active_planes) == hweight8(new_active_planes))
+ continue;
+
+ ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * active_planes bitmask has been updated, and potentially
+ * affected planes are part of the state. We can now
+ * compute the minimum cdclk for each plane.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i)
+ *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
+
+ return 0;
+}
+
+static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret = intel_crtc_atomic_check(state, crtc);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -13616,7 +13990,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- bool any_ms = state->cdclk.force_min_cdclk_changed;
+ bool any_ms = false;
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -13650,10 +14024,22 @@ static int intel_atomic_check(struct drm_device *dev,
any_ms = true;
}
+ if (any_ms && !check_digital_port_conflicts(state)) {
+ DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ ret = EINVAL;
+ goto fail;
+ }
+
ret = drm_dp_mst_atomic_check(&state->base);
if (ret)
goto fail;
+ any_ms |= state->cdclk.force_min_cdclk_changed;
+
+ ret = intel_atomic_check_planes(state, &any_ms);
+ if (ret)
+ goto fail;
+
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
@@ -13662,11 +14048,7 @@ static int intel_atomic_check(struct drm_device *dev,
state->cdclk.logical = dev_priv->cdclk.logical;
}
- ret = icl_add_linked_planes(state);
- if (ret)
- goto fail;
-
- ret = drm_atomic_helper_check_planes(dev, &state->base);
+ ret = intel_atomic_check_crtcs(state);
if (ret)
goto fail;
@@ -13724,20 +14106,100 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!IS_GEN(dev_priv, 2))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
+ if (crtc_state->has_pch_encoder) {
+ enum pipe pch_transcoder =
+ intel_crtc_pch_transcoder(crtc);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ }
+}
+
+static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ */
+ intel_set_pipe_src_size(new_crtc_state);
+
+ /* on skylake this is done by detaching scalers */
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_detach_scalers(new_crtc_state);
+
+ if (new_crtc_state->pch_pfit.enabled)
+ skylake_pfit_enable(new_crtc_state);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ if (new_crtc_state->pch_pfit.enabled)
+ ironlake_pfit_enable(new_crtc_state);
+ else if (old_crtc_state->pch_pfit.enabled)
+ ironlake_pfit_disable(old_crtc_state);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_set_pipe_chicken(crtc);
+}
+
+static void commit_pipe_config(struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ bool modeset = needs_modeset(new_crtc_state);
+
+ /*
+ * During modesets pipe configuration was programmed as the
+ * CRTC was enabled.
+ */
+ if (!modeset) {
+ if (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe)
+ intel_color_commit(new_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_detach_scalers(new_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(new_crtc_state);
+
+ if (new_crtc_state->update_pipe)
+ intel_pipe_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.atomic_update_watermarks(state,
+ new_crtc_state);
+}
+
static void intel_update_crtc(struct intel_crtc *crtc,
struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
bool modeset = needs_modeset(new_crtc_state);
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state,
to_intel_plane(crtc->base.primary));
if (modeset) {
- update_scanline_offset(new_crtc_state);
+ intel_crtc_update_active_timings(new_crtc_state);
+
dev_priv->display.crtc_enable(new_crtc_state, state);
/* vblanks work again, re-enable pipe CRC. */
@@ -13759,17 +14221,151 @@ static void intel_update_crtc(struct intel_crtc *crtc,
else if (new_plane_state)
intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
- intel_begin_crtc_commit(state, crtc);
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(new_crtc_state);
+
+ commit_pipe_config(state, old_crtc_state, new_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
skl_update_planes_on_crtc(state, crtc);
else
i9xx_update_planes_on_crtc(state, crtc);
- intel_finish_crtc_commit(state, crtc);
+ intel_pipe_update_end(new_crtc_state);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+ * valid pipe configuration from the BIOS we need to take care
+ * of enabling them on the CRTC's first fastset.
+ */
+ if (new_crtc_state->update_pipe && !modeset &&
+ old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev);
+ enum transcoder slave_transcoder;
+
+ WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
+
+ slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
+ return intel_get_crtc_for_pipe(dev_priv,
+ (enum pipe)slave_transcoder);
}
-static void intel_update_crtcs(struct intel_atomic_state *state)
+static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ intel_crtc_disable_planes(state, crtc);
+
+ /*
+ * We need to disable pipe CRC before disabling the pipe,
+ * or we race against vblank off.
+ */
+ intel_crtc_disable_pipe_crc(crtc);
+
+ dev_priv->display.crtc_disable(old_crtc_state, state);
+ crtc->active = false;
+ intel_fbc_disable(crtc);
+ intel_disable_shared_dpll(old_crtc_state);
+
+ /*
+ * Underruns don't always raise interrupts,
+ * so check manually.
+ */
+ intel_check_cpu_fifo_underruns(dev_priv);
+ intel_check_pch_fifo_underruns(dev_priv);
+
+ /* FIXME unify this for all platforms */
+ if (!new_crtc_state->base.active &&
+ !HAS_GMCH(dev_priv) &&
+ dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state,
+ new_crtc_state);
+}
+
+static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ struct intel_crtc_state *new_slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc_state *old_slave_crtc_state =
+ intel_atomic_get_old_crtc_state(state, slave_crtc);
+
+ WARN_ON(!slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
+
+ /* Disable Slave first */
+ intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
+ if (old_slave_crtc_state->base.active)
+ intel_old_crtc_state_disables(state,
+ old_slave_crtc_state,
+ new_slave_crtc_state,
+ slave_crtc);
+
+ /* Disable Master */
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
+ if (old_crtc_state->base.active)
+ intel_old_crtc_state_disables(state,
+ old_crtc_state,
+ new_crtc_state,
+ crtc);
+}
+
+static void intel_commit_modeset_disables(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ /*
+ * Disable CRTC/pipes in reverse order because some features(MST in
+ * TGL+) requires master and slave relationship between pipes, so it
+ * should always pick the lowest pipe as master as it will be enabled
+ * first and disable in the reverse order so the master will be the
+ * last one to be disabled.
+ */
+ for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
+ continue;
+
+ /* In case of Transcoder port Sync master slave CRTCs can be
+ * assigned in any order and we need to make sure that
+ * slave CRTCs are disabled first and then master CRTC since
+ * Slave vblanks are masked till Master Vblanks.
+ */
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ if (is_trans_port_sync_master(new_crtc_state))
+ intel_trans_port_sync_modeset_disables(state,
+ crtc,
+ old_crtc_state,
+ new_crtc_state);
+ else
+ continue;
+ } else {
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
+
+ if (old_crtc_state->base.active)
+ intel_old_crtc_state_disables(state,
+ old_crtc_state,
+ new_crtc_state,
+ crtc);
+ }
+ }
+}
+
+static void intel_commit_modeset_enables(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -13784,14 +14380,120 @@ static void intel_update_crtcs(struct intel_atomic_state *state)
}
}
-static void skl_update_crtcs(struct intel_atomic_state *state)
+static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ intel_crtc_update_active_timings(new_crtc_state);
+ dev_priv->display.crtc_enable(new_crtc_state, state);
+ intel_crtc_enable_pipe_crc(crtc);
+}
+
+static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
+ struct intel_atomic_state *state)
+{
+ struct drm_connector *uninitialized_var(conn);
+ struct drm_connector_state *conn_state;
+ struct intel_dp *intel_dp;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ if (conn_state->crtc == &crtc->base)
+ break;
+ }
+ intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
+ intel_dp_stop_link_train(intel_dp);
+}
+
+static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
+ struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state,
+ to_intel_plane(crtc->base.primary));
+ bool modeset = needs_modeset(new_crtc_state);
+
+ if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
+ intel_fbc_disable(crtc);
+ else if (new_plane_state)
+ intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(new_crtc_state);
+ commit_pipe_config(state, old_crtc_state, new_crtc_state);
+ skl_update_planes_on_crtc(state, crtc);
+ intel_pipe_update_end(new_crtc_state);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+ * valid pipe configuration from the BIOS we need to take care
+ * of enabling them on the CRTC's first fastset.
+ */
+ if (new_crtc_state->update_pipe && !modeset &&
+ old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ struct intel_crtc_state *new_slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc_state *old_slave_crtc_state =
+ intel_atomic_get_old_crtc_state(state, slave_crtc);
+
+ WARN_ON(!slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
+
+ DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
+ crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
+ slave_crtc->base.name);
+
+ /* Enable seq for slave with with DP_TP_CTL left Idle until the
+ * master is ready
+ */
+ intel_crtc_enable_trans_port_sync(slave_crtc,
+ state,
+ new_slave_crtc_state);
+
+ /* Enable seq for master with with DP_TP_CTL left Idle */
+ intel_crtc_enable_trans_port_sync(crtc,
+ state,
+ new_crtc_state);
+
+ /* Set Slave's DP_TP_CTL to Normal */
+ intel_set_dp_tp_ctl_normal(slave_crtc,
+ state);
+
+ /* Set Master's DP_TP_CTL To Normal */
+ usleep_range(200, 400);
+ intel_set_dp_tp_ctl_normal(crtc,
+ state);
+
+ /* Now do the post crtc enable for all master and slaves */
+ intel_post_crtc_enable_updates(slave_crtc,
+ state);
+ intel_post_crtc_enable_updates(crtc,
+ state);
+}
+
+static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
unsigned int updated = 0;
bool progress;
- enum pipe pipe;
int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = state->wm_results.ddb.enabled_slices;
@@ -13816,20 +14518,19 @@ static void skl_update_crtcs(struct intel_atomic_state *state)
progress = false;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
bool vbl_wait = false;
- unsigned int cmask = drm_crtc_mask(&crtc->base);
-
- pipe = crtc->pipe;
+ bool modeset = needs_modeset(new_crtc_state);
- if (updated & cmask || !new_crtc_state->base.active)
+ if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries,
- INTEL_INFO(dev_priv)->num_pipes, i))
+ INTEL_NUM_PIPES(dev_priv), i))
continue;
- updated |= cmask;
+ updated |= BIT(pipe);
entries[i] = new_crtc_state->wm.skl.ddb;
/*
@@ -13840,12 +14541,22 @@ static void skl_update_crtcs(struct intel_atomic_state *state)
*/
if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
&old_crtc_state->wm.skl.ddb) &&
- !new_crtc_state->base.active_changed &&
+ !modeset &&
state->wm_results.dirty_pipes != updated)
vbl_wait = true;
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
+ if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
+ if (is_trans_port_sync_master(new_crtc_state))
+ intel_update_trans_port_sync_crtcs(crtc,
+ state,
+ old_crtc_state,
+ new_crtc_state);
+ else
+ continue;
+ } else {
+ intel_update_crtc(crtc, state, old_crtc_state,
+ new_crtc_state);
+ }
if (vbl_wait)
intel_wait_for_vblank(dev_priv, pipe);
@@ -13934,49 +14645,18 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
if (needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
put_domains[crtc->pipe] =
modeset_get_crtc_power_domains(new_crtc_state);
}
-
- if (!needs_modeset(new_crtc_state))
- continue;
-
- intel_pre_plane_update(old_crtc_state, new_crtc_state);
-
- if (old_crtc_state->base.active) {
- intel_crtc_disable_planes(state, crtc);
-
- /*
- * We need to disable pipe CRC before disabling the pipe,
- * or we race against vblank off.
- */
- intel_crtc_disable_pipe_crc(crtc);
-
- dev_priv->display.crtc_disable(old_crtc_state, state);
- crtc->active = false;
- intel_fbc_disable(crtc);
- intel_disable_shared_dpll(old_crtc_state);
-
- /*
- * Underruns don't always raise
- * interrupts, so check manually.
- */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
-
- /* FIXME unify this for all platforms */
- if (!new_crtc_state->base.active &&
- !HAS_GMCH(dev_priv) &&
- dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state,
- new_crtc_state);
- }
}
+ intel_commit_modeset_disables(state);
+
/* FIXME: Eventually get rid of our crtc->config pointer */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
crtc->config = new_crtc_state;
@@ -14017,7 +14697,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.update_crtcs(state);
+ dev_priv->display.commit_modeset_enables(state);
if (state->modeset) {
intel_encoders_update_complete(state);
@@ -14148,6 +14828,14 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
plane->frontbuffer_bit);
}
+static void assert_global_state_locked(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ drm_modeset_lock_assert_held(&crtc->base.mutex);
+}
+
static int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *_state,
bool nonblock)
@@ -14213,12 +14901,14 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
- if (state->modeset) {
+ if (state->global_state_changed) {
+ assert_global_state_locked(dev_priv);
+
memcpy(dev_priv->min_cdclk, state->min_cdclk,
sizeof(state->min_cdclk));
memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
sizeof(state->min_voltage_level));
- dev_priv->active_crtcs = state->active_crtcs;
+ dev_priv->active_pipes = state->active_pipes;
dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
intel_cdclk_swap_state(state);
@@ -14231,7 +14921,7 @@ static int intel_atomic_commit(struct drm_device *dev,
if (nonblock && state->modeset) {
queue_work(dev_priv->modeset_wq, &state->base.commit_work);
} else if (nonblock) {
- queue_work(system_unbound_wq, &state->base.commit_work);
+ queue_work(dev_priv->flip_wq, &state->base.commit_work);
} else {
if (state->modeset)
flush_workqueue(dev_priv->modeset_wq);
@@ -14260,7 +14950,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_request_started(rq))
- gen6_rps_boost(rq);
+ intel_rps_boost(rq);
i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -14341,7 +15031,7 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
{
struct i915_sched_attr attr = {
- .priority = I915_PRIORITY_DISPLAY,
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
};
i915_gem_object_wait_priority(obj, 0, &attr);
@@ -14350,25 +15040,25 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
- * @new_state: the plane state being prepared
+ * @_new_plane_state: the plane state being prepared
*
* Prepares a framebuffer for usage on a display plane. Generally this
* involves pinning the underlying object and updating the frontbuffer tracking
* bits. Some older platforms need special physical address handling for
* cursor planes.
*
- * Must be called with struct_mutex held.
- *
* Returns 0 on success, negative error code on failure.
*/
int
intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_plane_state *_new_plane_state)
{
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_state->state);
+ to_intel_atomic_state(new_plane_state->base.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = new_state->fb;
+ struct drm_framebuffer *fb = new_plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret;
@@ -14399,9 +15089,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
}
- if (new_state->fence) { /* explicit fencing */
+ if (new_plane_state->base.fence) { /* explicit fencing */
ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
- new_state->fence,
+ new_plane_state->base.fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
@@ -14415,15 +15105,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
return ret;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- return ret;
- }
-
- ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
+ ret = intel_plane_pin_fb(new_plane_state);
- mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
@@ -14431,7 +15114,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
fb_obj_bump_render_priority(obj);
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
- if (!new_state->fence) { /* implicit fencing */
+ if (!new_plane_state->base.fence) { /* implicit fencing */
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
@@ -14443,11 +15126,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
- add_rps_boost_after_vblank(new_state->crtc, fence);
+ add_rps_boost_after_vblank(new_plane_state->base.crtc,
+ fence);
dma_fence_put(fence);
}
} else {
- add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
+ add_rps_boost_after_vblank(new_plane_state->base.crtc,
+ new_plane_state->base.fence);
}
/*
@@ -14459,7 +15144,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
if (!intel_state->rps_interactive) {
- intel_rps_mark_interactive(dev_priv, true);
+ intel_rps_mark_interactive(&dev_priv->gt.rps, true);
intel_state->rps_interactive = true;
}
@@ -14469,130 +15154,27 @@ intel_prepare_plane_fb(struct drm_plane *plane,
/**
* intel_cleanup_plane_fb - Cleans up an fb after plane use
* @plane: drm plane to clean up for
- * @old_state: the state from the previous modeset
+ * @_old_plane_state: the state from the previous modeset
*
* Cleans up a framebuffer that has just been removed from a plane.
- *
- * Must be called with struct_mutex held.
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_plane_state *_old_plane_state)
{
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(_old_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(old_state->state);
+ to_intel_atomic_state(old_plane_state->base.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
if (intel_state->rps_interactive) {
- intel_rps_mark_interactive(dev_priv, false);
+ intel_rps_mark_interactive(&dev_priv->gt.rps, false);
intel_state->rps_interactive = false;
}
/* Should only be called after a successful intel_prepare_plane_fb()! */
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_plane_unpin_fb(to_intel_plane_state(old_state));
- mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-int
-skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int max_scale, mult;
- int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
-
- if (!crtc_state->base.enable)
- return DRM_PLANE_HELPER_NO_SCALING;
-
- crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- max_dotclk *= 2;
-
- if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
- return DRM_PLANE_HELPER_NO_SCALING;
-
- /*
- * skl max scale is lower of:
- * close to 3 but not 3, -1 is for that purpose
- * or
- * cdclk/crtc_clock
- */
- mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
- tmpclk1 = (1 << 16) * mult - 1;
- tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
- max_scale = min(tmpclk1, tmpclk2);
-
- return max_scale;
-}
-
-static void intel_begin_crtc_commit(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
-
- /* Perform vblank evasion around commit operation */
- intel_pipe_update_start(new_crtc_state);
-
- if (modeset)
- goto out;
-
- if (new_crtc_state->base.color_mgmt_changed ||
- new_crtc_state->update_pipe)
- intel_color_commit(new_crtc_state);
-
- if (new_crtc_state->update_pipe)
- intel_update_pipe_config(old_crtc_state, new_crtc_state);
- else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(new_crtc_state);
-
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
-
-out:
- if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(state,
- new_crtc_state);
-}
-
-void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
-
- if (crtc_state->has_pch_encoder) {
- enum pipe pch_transcoder =
- intel_crtc_pch_transcoder(crtc);
-
- intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
- }
-}
-
-static void intel_finish_crtc_commit(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- intel_pipe_update_end(new_crtc_state);
-
- if (new_crtc_state->update_pipe &&
- !needs_modeset(new_crtc_state) &&
- old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+ intel_plane_unpin_fb(old_plane_state);
}
/**
@@ -14649,6 +15231,7 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED;
default:
@@ -14682,8 +15265,8 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
};
static int
-intel_legacy_cursor_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
+intel_legacy_cursor_update(struct drm_plane *_plane,
+ struct drm_crtc *_crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -14691,11 +15274,13 @@ intel_legacy_cursor_update(struct drm_plane *plane,
u32 src_w, u32 src_h,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *new_plane_state;
struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->state);
+ to_intel_crtc_state(crtc->base.state);
struct intel_crtc_state *new_crtc_state;
int ret;
@@ -14707,14 +15292,13 @@ intel_legacy_cursor_update(struct drm_plane *plane,
crtc_state->update_pipe)
goto slow;
- old_plane_state = plane->state;
/*
* Don't do an async update if there is an outstanding commit modifying
* the plane. This prevents our async update's changes from getting
* overridden by a previous synchronous update's state.
*/
- if (old_plane_state->commit &&
- !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ if (old_plane_state->base.commit &&
+ !try_wait_for_completion(&old_plane_state->base.commit->hw_done))
goto slow;
/*
@@ -14722,56 +15306,51 @@ intel_legacy_cursor_update(struct drm_plane *plane,
* take the slowpath. Only changing fb or position should be
* in the fastpath.
*/
- if (old_plane_state->crtc != crtc ||
- old_plane_state->src_w != src_w ||
- old_plane_state->src_h != src_h ||
- old_plane_state->crtc_w != crtc_w ||
- old_plane_state->crtc_h != crtc_h ||
- !old_plane_state->fb != !fb)
+ if (old_plane_state->base.crtc != &crtc->base ||
+ old_plane_state->base.src_w != src_w ||
+ old_plane_state->base.src_h != src_h ||
+ old_plane_state->base.crtc_w != crtc_w ||
+ old_plane_state->base.crtc_h != crtc_h ||
+ !old_plane_state->base.fb != !fb)
goto slow;
- new_plane_state = intel_plane_duplicate_state(plane);
+ new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
if (!new_plane_state)
return -ENOMEM;
- new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
if (!new_crtc_state) {
ret = -ENOMEM;
goto out_free;
}
- drm_atomic_set_fb_for_plane(new_plane_state, fb);
+ drm_atomic_set_fb_for_plane(&new_plane_state->base, fb);
- new_plane_state->src_x = src_x;
- new_plane_state->src_y = src_y;
- new_plane_state->src_w = src_w;
- new_plane_state->src_h = src_h;
- new_plane_state->crtc_x = crtc_x;
- new_plane_state->crtc_y = crtc_y;
- new_plane_state->crtc_w = crtc_w;
- new_plane_state->crtc_h = crtc_h;
+ new_plane_state->base.src_x = src_x;
+ new_plane_state->base.src_y = src_y;
+ new_plane_state->base.src_w = src_w;
+ new_plane_state->base.src_h = src_h;
+ new_plane_state->base.crtc_x = crtc_x;
+ new_plane_state->base.crtc_y = crtc_y;
+ new_plane_state->base.crtc_w = crtc_w;
+ new_plane_state->base.crtc_h = crtc_h;
ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
- to_intel_plane_state(old_plane_state),
- to_intel_plane_state(new_plane_state));
+ old_plane_state, new_plane_state);
if (ret)
goto out_free;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+ ret = intel_plane_pin_fb(new_plane_state);
if (ret)
goto out_free;
- ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
- if (ret)
- goto out_unlock;
-
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
- to_intel_frontbuffer(fb),
- intel_plane->frontbuffer_bit);
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
+ to_intel_frontbuffer(new_plane_state->base.fb),
+ plane->frontbuffer_bit);
/* Swap plane state */
- plane->state = new_plane_state;
+ plane->base.state = &new_plane_state->base;
/*
* We cannot swap crtc_state as it may be in use by an atomic commit or
@@ -14785,27 +15364,24 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (plane->state->visible)
- intel_update_plane(intel_plane, crtc_state,
- to_intel_plane_state(plane->state));
+ if (new_plane_state->base.visible)
+ intel_update_plane(plane, crtc_state, new_plane_state);
else
- intel_disable_plane(intel_plane, crtc_state);
+ intel_disable_plane(plane, crtc_state);
- intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
+ intel_plane_unpin_fb(old_plane_state);
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
out_free:
if (new_crtc_state)
- intel_crtc_destroy_state(crtc, &new_crtc_state->base);
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base);
if (ret)
- intel_plane_destroy_state(plane, new_plane_state);
+ intel_plane_destroy_state(&plane->base, &new_plane_state->base);
else
- intel_plane_destroy_state(plane, old_plane_state);
+ intel_plane_destroy_state(&plane->base, &old_plane_state->base);
return ret;
slow:
- return drm_atomic_helper_update_plane(plane, crtc, fb,
+ return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
}
@@ -14846,7 +15422,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
const u64 *modifiers;
const u32 *formats;
int num_formats;
- int ret;
+ int ret, zpos;
if (INTEL_GEN(dev_priv) >= 9)
return skl_universal_plane_create(dev_priv, pipe,
@@ -14876,8 +15452,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
}
if (INTEL_GEN(dev_priv) >= 4) {
- formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
+ /*
+ * WaFP16GammaEnabling:ivb
+ * "Workaround : When using the 64-bit format, the plane
+ * output on each color channel has one quarter amplitude.
+ * It can be brought up to full amplitude by using pipe
+ * gamma correction or pipe color space conversion to
+ * multiply the plane output by four."
+ *
+ * There is no dedicated plane gamma for the primary plane,
+ * and using the pipe gamma/csc could conflict with other
+ * planes, so we choose not to expose fp16 on IVB primary
+ * planes. HSW primary planes no longer have this problem.
+ */
+ if (IS_IVYBRIDGE(dev_priv)) {
+ formats = ivb_primary_formats;
+ num_formats = ARRAY_SIZE(ivb_primary_formats);
+ } else {
+ formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+ }
modifiers = i9xx_format_modifiers;
plane->max_stride = i9xx_plane_max_stride;
@@ -14886,6 +15480,15 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else
+ plane->min_cdclk = i9xx_plane_min_cdclk;
+
plane_funcs = &i965_plane_funcs;
} else {
formats = i8xx_primary_formats;
@@ -14897,6 +15500,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->disable_plane = i9xx_disable_plane;
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
+ plane->min_cdclk = i9xx_plane_min_cdclk;
plane_funcs = &i8xx_plane_funcs;
}
@@ -14935,6 +15539,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
DRM_MODE_ROTATE_0,
supported_rotations);
+ zpos = 0;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
@@ -14951,7 +15558,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
{
unsigned int possible_crtcs;
struct intel_plane *cursor;
- int ret;
+ int ret, zpos;
cursor = intel_plane_alloc();
if (IS_ERR(cursor))
@@ -15000,6 +15607,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
+ zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return cursor;
@@ -15075,12 +15685,12 @@ static const struct drm_crtc_funcs i965_crtc_funcs = {
.disable_vblank = i965_disable_vblank,
};
-static const struct drm_crtc_funcs i945gm_crtc_funcs = {
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
INTEL_CRTC_FUNCS,
.get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i945gm_enable_vblank,
- .disable_vblank = i945gm_disable_vblank,
+ .enable_vblank = i915gm_enable_vblank,
+ .disable_vblank = i915gm_disable_vblank,
};
static const struct drm_crtc_funcs i915_crtc_funcs = {
@@ -15151,8 +15761,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
funcs = &g4x_crtc_funcs;
else if (IS_GEN(dev_priv, 4))
funcs = &i965_crtc_funcs;
- else if (IS_I945GM(dev_priv))
- funcs = &i945gm_crtc_funcs;
+ else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ funcs = &i915gm_crtc_funcs;
else if (IS_GEN(dev_priv, 3))
funcs = &i915_crtc_funcs;
else
@@ -15187,8 +15797,6 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
}
- drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
intel_color_init(intel_crtc);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -15223,21 +15831,32 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int intel_encoder_clones(struct intel_encoder *encoder)
+static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_encoder *source_encoder;
- int index_mask = 0;
- int entry = 0;
+ u32 possible_clones = 0;
for_each_intel_encoder(dev, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
- index_mask |= (1 << entry);
+ possible_clones |= drm_encoder_mask(&source_encoder->base);
+ }
- entry++;
+ return possible_clones;
+}
+
+static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_crtc *crtc;
+ u32 possible_crtcs = 0;
+
+ for_each_intel_crtc(dev, crtc) {
+ if (encoder->pipe_mask & BIT(crtc->pipe))
+ possible_crtcs |= drm_crtc_mask(&crtc->base);
}
- return index_mask;
+ return possible_crtcs;
}
static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
@@ -15319,13 +15938,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return;
if (INTEL_GEN(dev_priv) >= 12) {
- /* TODO: initialize TC ports as well */
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ intel_ddi_init(dev_priv, PORT_F);
+ intel_ddi_init(dev_priv, PORT_G);
+ intel_ddi_init(dev_priv, PORT_H);
+ intel_ddi_init(dev_priv, PORT_I);
icl_dsi_init(dev_priv);
} else if (IS_ELKHARTLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
@@ -15535,9 +16159,10 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_psr_init(dev_priv);
for_each_intel_encoder(&dev_priv->drm, encoder) {
- encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_crtcs =
+ intel_encoder_possible_crtcs(encoder);
encoder->base.possible_clones =
- intel_encoder_clones(encoder);
+ intel_encoder_possible_clones(encoder);
}
intel_init_pch_refclk(dev_priv);
@@ -15792,8 +16417,14 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
- if (INTEL_GEN(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ /* Transcoder timing limits */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ hdisplay_max = 16384;
+ vdisplay_max = 8192;
+ htotal_max = 16384;
+ vtotal_max = 8192;
+ } else if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
vdisplay_max = 4096;
htotal_max = 8192;
@@ -15822,6 +16453,56 @@ intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
+ if (INTEL_GEN(dev_priv) >= 5) {
+ if (mode->hdisplay < 64 ||
+ mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 5)
+ return MODE_V_ILLEGAL;
+ } else {
+ if (mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 3)
+ return MODE_V_ILLEGAL;
+ }
+
+ return MODE_OK;
+}
+
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode)
+{
+ int plane_width_max, plane_height_max;
+
+ /*
+ * intel_mode_valid() should be
+ * sufficient on older platforms.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ return MODE_OK;
+
+ /*
+ * Most people will probably want a fullscreen
+ * plane so let's not advertize modes that are
+ * too big for that.
+ */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ plane_width_max = 5120;
+ plane_height_max = 4320;
+ } else {
+ plane_width_max = 5120;
+ plane_height_max = 4096;
+ }
+
+ if (mode->hdisplay > plane_width_max)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > plane_height_max)
+ return MODE_V_ILLEGAL;
+
return MODE_OK;
}
@@ -15925,47 +16606,17 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
}
if (INTEL_GEN(dev_priv) >= 9)
- dev_priv->display.update_crtcs = skl_update_crtcs;
- else
- dev_priv->display.update_crtcs = intel_update_crtcs;
-}
-
-static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return VLV_VGACNTRL;
- else if (INTEL_GEN(dev_priv) >= 5)
- return CPU_VGACNTRL;
+ dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
else
- return VGACNTRL;
-}
-
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- u8 sr1;
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
+ dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
- /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(SR01, VGA_SR_INDEX);
- sr1 = inb(VGA_SR_DATA);
- outb(sr1 | 1<<5, VGA_SR_DATA);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- udelay(300);
-
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
}
-void intel_modeset_init_hw(struct drm_device *dev)
+void intel_modeset_init_hw(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
- dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
+ intel_update_cdclk(i915);
+ intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
+ i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
}
/*
@@ -16125,114 +16776,111 @@ out:
return ret;
}
-int intel_modeset_init(struct drm_device *dev)
+static void intel_mode_config_init(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe;
- struct intel_crtc *crtc;
- int ret;
+ struct drm_mode_config *mode_config = &i915->drm.mode_config;
- dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+ drm_mode_config_init(&i915->drm);
- drm_mode_config_init(dev);
+ mode_config->min_width = 0;
+ mode_config->min_height = 0;
- ret = intel_bw_init(dev_priv);
- if (ret)
- return ret;
-
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
-
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.prefer_shadow = 1;
-
- dev->mode_config.allow_fb_modifiers = true;
-
- dev->mode_config.funcs = &intel_mode_funcs;
-
- init_llist_head(&dev_priv->atomic_helper.free_list);
- INIT_WORK(&dev_priv->atomic_helper.free_work,
- intel_atomic_helper_free_state_worker);
+ mode_config->preferred_depth = 24;
+ mode_config->prefer_shadow = 1;
- intel_init_quirks(dev_priv);
+ mode_config->allow_fb_modifiers = true;
- intel_fbc_init(dev_priv);
-
- intel_init_pm(dev_priv);
-
- /*
- * There may be no VBT; and if the BIOS enabled SSC we can
- * just keep using it to avoid unnecessary flicker. Whereas if the
- * BIOS isn't using it, don't assume it will work even if the VBT
- * indicates as much.
- */
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
- DREF_SSC1_ENABLE);
-
- if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
- bios_lvds_use_ssc ? "en" : "dis",
- dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
- dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
- }
- }
+ mode_config->funcs = &intel_mode_funcs;
/*
* Maximum framebuffer dimensions, chosen to match
* the maximum render engine surface size on gen4+.
*/
- if (INTEL_GEN(dev_priv) >= 7) {
- dev->mode_config.max_width = 16384;
- dev->mode_config.max_height = 16384;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- } else if (IS_GEN(dev_priv, 3)) {
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
+ if (INTEL_GEN(i915) >= 7) {
+ mode_config->max_width = 16384;
+ mode_config->max_height = 16384;
+ } else if (INTEL_GEN(i915) >= 4) {
+ mode_config->max_width = 8192;
+ mode_config->max_height = 8192;
+ } else if (IS_GEN(i915, 3)) {
+ mode_config->max_width = 4096;
+ mode_config->max_height = 4096;
} else {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ mode_config->max_width = 2048;
+ mode_config->max_height = 2048;
}
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
- dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
- dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN(dev_priv, 2)) {
- dev->mode_config.cursor_width = 64;
- dev->mode_config.cursor_height = 64;
+ if (IS_I845G(i915) || IS_I865G(i915)) {
+ mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
+ mode_config->cursor_height = 1023;
+ } else if (IS_GEN(i915, 2)) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 64;
} else {
- dev->mode_config.cursor_width = 256;
- dev->mode_config.cursor_height = 256;
+ mode_config->cursor_width = 256;
+ mode_config->cursor_height = 256;
}
+}
- DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_INFO(dev_priv)->num_pipes,
- INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
- for_each_pipe(dev_priv, pipe) {
- ret = intel_crtc_init(dev_priv, pipe);
- if (ret) {
- drm_mode_config_cleanup(dev);
- return ret;
+ i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+ i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+
+ intel_mode_config_init(i915);
+
+ ret = intel_bw_init(i915);
+ if (ret)
+ return ret;
+
+ init_llist_head(&i915->atomic_helper.free_list);
+ INIT_WORK(&i915->atomic_helper.free_work,
+ intel_atomic_helper_free_state_worker);
+
+ intel_init_quirks(i915);
+
+ intel_fbc_init(i915);
+
+ intel_init_pm(i915);
+
+ intel_panel_sanitize_ssc(i915);
+
+ intel_gmbus_setup(i915);
+
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(i915),
+ INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+
+ if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+ for_each_pipe(i915, pipe) {
+ ret = intel_crtc_init(i915, pipe);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
+ }
}
}
intel_shared_dpll_init(dev);
- intel_update_fdi_pll_freq(dev_priv);
+ intel_update_fdi_pll_freq(i915);
- intel_update_czclk(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_update_czclk(i915);
+ intel_modeset_init_hw(i915);
- intel_hdcp_component_init(dev_priv);
+ intel_hdcp_component_init(i915);
- if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev_priv);
+ if (i915->max_cdclk_freq == 0)
+ intel_update_max_cdclk(i915);
/* Just disable it once at startup */
- i915_disable_vga(dev_priv);
- intel_setup_outputs(dev_priv);
+ intel_vga_disable(i915);
+ intel_setup_outputs(i915);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
@@ -16251,8 +16899,7 @@ int intel_modeset_init(struct drm_device *dev)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- dev_priv->display.get_initial_plane_config(crtc,
- &plane_config);
+ i915->display.get_initial_plane_config(crtc, &plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -16266,7 +16913,7 @@ int intel_modeset_init(struct drm_device *dev)
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(i915))
sanitize_watermarks(dev);
/*
@@ -16591,39 +17238,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
icl_sanitize_encoder_pll_mapping(encoder);
}
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
-{
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
-
- if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
- DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev_priv);
- }
-}
-
-void i915_redisable_vga(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- /*
- * This function can be called both from intel_modeset_setup_hw_state or
- * at a very early point in our resume sequence, where the power well
- * structures are not yet restored. Since this function is at a very
- * paranoid "someone might have enabled VGA while we were not looking"
- * level, just check if the power well is enabled instead of trying to
- * follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses.
- */
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_VGA);
- if (!wakeref)
- return;
-
- i915_redisable_vga_power_on(dev_priv);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
-}
-
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct drm_i915_private *dev_priv)
{
@@ -16667,7 +17281,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct drm_connector_list_iter conn_iter;
int i;
- dev_priv->active_crtcs = 0;
+ dev_priv->active_pipes = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -16684,7 +17298,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = crtc_state->base.active;
if (crtc_state->base.active)
- dev_priv->active_crtcs |= 1 << crtc->pipe;
+ dev_priv->active_pipes |= BIT(crtc->pipe);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
@@ -16744,24 +17358,28 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->get_hw_state(connector)) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
connector->base.dpms = DRM_MODE_DPMS_ON;
encoder = connector->encoder;
connector->base.encoder = &encoder->base;
- if (encoder->base.crtc &&
- encoder->base.crtc->state->active) {
+ crtc = to_intel_crtc(encoder->base.crtc);
+ crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
+
+ if (crtc_state && crtc_state->base.active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
- encoder->base.crtc->state->connector_mask |=
+ crtc_state->base.connector_mask |=
drm_connector_mask(&connector->base);
- encoder->base.crtc->state->encoder_mask |=
+ crtc_state->base.encoder_mask |=
drm_encoder_mask(&encoder->base);
}
-
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
@@ -16780,13 +17398,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane *plane;
int min_cdclk = 0;
- memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc_state->base.active) {
- intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
- crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
- crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
- intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
- WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+ struct drm_display_mode mode;
+
+ intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode,
+ crtc_state);
+
+ mode = crtc_state->base.adjusted_mode;
+ mode.hdisplay = crtc_state->pipe_src_w;
+ mode.vdisplay = crtc_state->pipe_src_h;
+ WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &mode));
/*
* The initial mode needs to be set in order to keep
@@ -16801,21 +17422,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_crtc_compute_pixel_rate(crtc_state);
- if (dev_priv->display.modeset_calc_cdclk) {
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (WARN_ON(min_cdclk < 0))
- min_cdclk = 0;
- }
-
- drm_calc_timestamping_constants(&crtc->base,
- &crtc_state->base.adjusted_mode);
- update_scanline_offset(crtc_state);
+ intel_crtc_update_active_timings(crtc_state);
}
- dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
- dev_priv->min_voltage_level[crtc->pipe] =
- crtc_state->min_voltage_level;
-
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -16827,8 +17436,34 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (plane_state->base.visible)
crtc_state->data_rate[plane->id] =
4 * crtc_state->pixel_rate;
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use plane->min_cdclk() :(
+ */
+ if (plane_state->base.visible && plane->min_cdclk) {
+ if (crtc_state->double_wide ||
+ INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ crtc_state->min_cdclk[plane->id] =
+ DIV_ROUND_UP(crtc_state->pixel_rate, 2);
+ else
+ crtc_state->min_cdclk[plane->id] =
+ crtc_state->pixel_rate;
+ }
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
+ }
+
+ if (crtc_state->base.active) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (WARN_ON(min_cdclk < 0))
+ min_cdclk = 0;
}
+ dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
+ dev_priv->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
+
intel_bw_crtc_update(bw_state, crtc_state);
intel_pipe_config_sanity_check(dev_priv, crtc_state);
@@ -17069,13 +17704,13 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
-static void intel_hpd_poll_fini(struct drm_device *dev)
+static void intel_hpd_poll_fini(struct drm_i915_private *i915)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
/* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
@@ -17087,78 +17722,49 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_modeset_driver_remove(struct drm_device *dev)
+void intel_modeset_driver_remove(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ flush_workqueue(i915->flip_wq);
+ flush_workqueue(i915->modeset_wq);
- flush_workqueue(dev_priv->modeset_wq);
-
- flush_work(&dev_priv->atomic_helper.free_work);
- WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+ flush_work(&i915->atomic_helper.free_work);
+ WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of connectors, ...) would
* experience fancy races otherwise.
*/
- intel_irq_uninstall(dev_priv);
+ intel_irq_uninstall(i915);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(dev);
+ intel_hpd_poll_fini(i915);
/* poll work can call into fbdev, hence clean that up afterwards */
- intel_fbdev_fini(dev_priv);
+ intel_fbdev_fini(i915);
intel_unregister_dsm_handler();
- intel_fbc_global_disable(dev_priv);
+ intel_fbc_global_disable(i915);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- intel_hdcp_component_fini(dev_priv);
-
- drm_mode_config_cleanup(dev);
+ intel_hdcp_component_fini(i915);
- intel_overlay_cleanup(dev_priv);
+ drm_mode_config_cleanup(&i915->drm);
- intel_gmbus_teardown(dev_priv);
+ intel_overlay_cleanup(i915);
- destroy_workqueue(dev_priv->modeset_wq);
+ intel_gmbus_teardown(i915);
- intel_fbc_cleanup_cfb(dev_priv);
-}
+ destroy_workqueue(i915->flip_wq);
+ destroy_workqueue(i915->modeset_wq);
-/*
- * set vga decode state - true == enable VGA decode
- */
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
-{
- unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
- u16 gmch_ctrl;
-
- if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
- DRM_ERROR("failed to read control word\n");
- return -EIO;
- }
-
- if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
- return 0;
-
- if (state)
- gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
- else
- gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-
- if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
- DRM_ERROR("failed to write control word\n");
- return -EIO;
- }
-
- return 0;
+ intel_fbc_cleanup_cfb(i915);
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -17221,7 +17827,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
@@ -17300,7 +17906,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
if (!error)
return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
+ err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 01fa87ad3270..f417e0948001 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006-2017 Intel Corporation
+ * Copyright © 2006-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,8 +32,10 @@ enum link_m_n_set;
struct dpll;
struct drm_connector;
struct drm_device;
+struct drm_display_mode;
struct drm_encoder;
struct drm_file;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
@@ -52,6 +54,7 @@ struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
+struct intel_crtc_state;
enum i915_gpio {
GPIOA,
@@ -91,6 +94,7 @@ enum pipe {
#define pipe_name(p) ((p) + 'A')
enum transcoder {
+ INVALID_TRANSCODER = -1,
/*
* The following transcoders have a 1:1 transcoder -> pipe mapping,
* keep their values fixed: the code assumes that TRANSCODER_A=0, the
@@ -182,6 +186,24 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+ PORT_G,
+ PORT_H,
+ PORT_I,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
/*
* Ports identifier referenced from other drivers.
* Expected to remain stable over time
@@ -251,6 +273,7 @@ enum aux_ch {
AUX_CH_D,
AUX_CH_E, /* ICL+ */
AUX_CH_F,
+ AUX_CH_G,
};
#define aux_ch_name(a) ((a) + 'A')
@@ -289,10 +312,10 @@ enum phy_fia {
};
#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+ for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -330,7 +353,7 @@ enum phy_fia {
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if((plane_mask) & \
- drm_plane_mask(&intel_plane->base)))
+ drm_plane_mask(&intel_plane->base))
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
@@ -411,6 +434,23 @@ enum phy_fia {
(__i)++) \
for_each_if(crtc)
+#define for_each_oldnew_intel_crtc_in_state_reverse(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
+ (__i) >= 0 && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)--) \
+ for_each_if(crtc)
+
+#define intel_atomic_crtc_state_for_each_plane_state( \
+ plane, plane_state, \
+ crtc_state) \
+ for_each_intel_plane_mask(((crtc_state)->base.state->dev), (plane), \
+ ((crtc_state)->base.plane_mask)) \
+ for_each_if ((plane_state = \
+ to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->base.state, &plane->base))))
+
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
@@ -420,7 +460,11 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -464,7 +508,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -499,8 +542,6 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
@@ -520,8 +561,6 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-int skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
@@ -544,13 +583,10 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
/* modesetting */
-void intel_modeset_init_hw(struct drm_device *dev);
-int intel_modeset_init(struct drm_device *dev);
-void intel_modeset_driver_remove(struct drm_device *dev);
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
+void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init(struct drm_i915_private *i915);
+void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
-void i915_redisable_vga(struct drm_i915_private *dev_priv);
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
/* modesetting asserts */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c002f234ff31..ce1b64f4dd44 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,8 +3,6 @@
* Copyright © 2019 Intel Corporation
*/
-#include <linux/vgaarb.h>
-
#include "display/intel_crt.h"
#include "display/intel_dp.h"
@@ -19,16 +17,14 @@
#include "intel_hotplug.h"
#include "intel_sideband.h"
#include "intel_tc.h"
+#include "intel_vga.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
const char *
-intel_display_power_domain_str(struct drm_i915_private *i915,
- enum intel_display_power_domain domain)
+intel_display_power_domain_str(enum intel_display_power_domain domain)
{
- bool ddi_tc_ports = IS_GEN(i915, 12);
-
switch (domain) {
case POWER_DOMAIN_DISPLAY_CORE:
return "DISPLAY_CORE";
@@ -71,23 +67,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_PORT_DDI_C_LANES:
return "PORT_DDI_C_LANES";
case POWER_DOMAIN_PORT_DDI_D_LANES:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES !=
- POWER_DOMAIN_PORT_DDI_TC1_LANES);
- return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES";
+ return "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES !=
- POWER_DOMAIN_PORT_DDI_TC2_LANES);
- return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES";
+ return "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DDI_F_LANES:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES !=
- POWER_DOMAIN_PORT_DDI_TC3_LANES);
- return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES";
- case POWER_DOMAIN_PORT_DDI_TC4_LANES:
- return "PORT_DDI_TC4_LANES";
- case POWER_DOMAIN_PORT_DDI_TC5_LANES:
- return "PORT_DDI_TC5_LANES";
- case POWER_DOMAIN_PORT_DDI_TC6_LANES:
- return "PORT_DDI_TC6_LANES";
+ return "PORT_DDI_F_LANES";
+ case POWER_DOMAIN_PORT_DDI_G_LANES:
+ return "PORT_DDI_G_LANES";
+ case POWER_DOMAIN_PORT_DDI_H_LANES:
+ return "PORT_DDI_H_LANES";
+ case POWER_DOMAIN_PORT_DDI_I_LANES:
+ return "PORT_DDI_I_LANES";
case POWER_DOMAIN_PORT_DDI_A_IO:
return "PORT_DDI_A_IO";
case POWER_DOMAIN_PORT_DDI_B_IO:
@@ -95,23 +85,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_PORT_DDI_C_IO:
return "PORT_DDI_C_IO";
case POWER_DOMAIN_PORT_DDI_D_IO:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO !=
- POWER_DOMAIN_PORT_DDI_TC1_IO);
- return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO";
+ return "PORT_DDI_D_IO";
case POWER_DOMAIN_PORT_DDI_E_IO:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO !=
- POWER_DOMAIN_PORT_DDI_TC2_IO);
- return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO";
+ return "PORT_DDI_E_IO";
case POWER_DOMAIN_PORT_DDI_F_IO:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO !=
- POWER_DOMAIN_PORT_DDI_TC3_IO);
- return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO";
- case POWER_DOMAIN_PORT_DDI_TC4_IO:
- return "PORT_DDI_TC4_IO";
- case POWER_DOMAIN_PORT_DDI_TC5_IO:
- return "PORT_DDI_TC5_IO";
- case POWER_DOMAIN_PORT_DDI_TC6_IO:
- return "PORT_DDI_TC6_IO";
+ return "PORT_DDI_F_IO";
+ case POWER_DOMAIN_PORT_DDI_G_IO:
+ return "PORT_DDI_G_IO";
+ case POWER_DOMAIN_PORT_DDI_H_IO:
+ return "PORT_DDI_H_IO";
+ case POWER_DOMAIN_PORT_DDI_I_IO:
+ return "PORT_DDI_I_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -129,34 +113,33 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_AUX_C:
return "AUX_C";
case POWER_DOMAIN_AUX_D:
- BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1);
- return ddi_tc_ports ? "AUX_TC1" : "AUX_D";
+ return "AUX_D";
case POWER_DOMAIN_AUX_E:
- BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2);
- return ddi_tc_ports ? "AUX_TC2" : "AUX_E";
+ return "AUX_E";
case POWER_DOMAIN_AUX_F:
- BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3);
- return ddi_tc_ports ? "AUX_TC3" : "AUX_F";
- case POWER_DOMAIN_AUX_TC4:
- return "AUX_TC4";
- case POWER_DOMAIN_AUX_TC5:
- return "AUX_TC5";
- case POWER_DOMAIN_AUX_TC6:
- return "AUX_TC6";
+ return "AUX_F";
+ case POWER_DOMAIN_AUX_G:
+ return "AUX_G";
+ case POWER_DOMAIN_AUX_H:
+ return "AUX_H";
+ case POWER_DOMAIN_AUX_I:
+ return "AUX_I";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
- case POWER_DOMAIN_AUX_TBT1:
- return "AUX_TBT1";
- case POWER_DOMAIN_AUX_TBT2:
- return "AUX_TBT2";
- case POWER_DOMAIN_AUX_TBT3:
- return "AUX_TBT3";
- case POWER_DOMAIN_AUX_TBT4:
- return "AUX_TBT4";
- case POWER_DOMAIN_AUX_TBT5:
- return "AUX_TBT5";
- case POWER_DOMAIN_AUX_TBT6:
- return "AUX_TBT6";
+ case POWER_DOMAIN_AUX_C_TBT:
+ return "AUX_C_TBT";
+ case POWER_DOMAIN_AUX_D_TBT:
+ return "AUX_D_TBT";
+ case POWER_DOMAIN_AUX_E_TBT:
+ return "AUX_E_TBT";
+ case POWER_DOMAIN_AUX_F_TBT:
+ return "AUX_F_TBT";
+ case POWER_DOMAIN_AUX_G_TBT:
+ return "AUX_G_TBT";
+ case POWER_DOMAIN_AUX_H_TBT:
+ return "AUX_H_TBT";
+ case POWER_DOMAIN_AUX_I_TBT:
+ return "AUX_I_TBT";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -283,23 +266,8 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 irq_pipe_mask, bool has_vga)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- /*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
- */
- if (has_vga) {
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- }
+ if (has_vga)
+ intel_vga_reset_io_mem(dev_priv);
if (irq_pipe_mask)
gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
@@ -578,6 +546,8 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#endif
+#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
@@ -594,6 +564,17 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
+
+ if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
+ enum tc_port tc_port;
+
+ tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
+ DKL_CMN_UC_DW27_UC_HEALTH, 1))
+ DRM_WARN("Timeout waiting TC uC health\n");
+ }
}
static void
@@ -714,7 +695,11 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (INTEL_GEN(dev_priv) >= 11)
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
+ | DC_STATE_EN_DC9;
+ else if (IS_GEN(dev_priv, 11))
mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
else if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
@@ -784,6 +769,52 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
dev_priv->csr.dc_state = val & mask;
}
+static u32
+sanitize_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 target_dc_state)
+{
+ u32 states[] = {
+ DC_STATE_EN_UPTO_DC6,
+ DC_STATE_EN_UPTO_DC5,
+ DC_STATE_EN_DC3CO,
+ DC_STATE_DISABLE,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+ if (target_dc_state != states[i])
+ continue;
+
+ if (dev_priv->csr.allowed_dc_mask & target_dc_state)
+ break;
+
+ target_dc_state = states[i + 1];
+ }
+
+ return target_dc_state;
+}
+
+static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ DRM_DEBUG_KMS("Enabling DC3CO\n");
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Disabling DC3CO\n");
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_DC3CO_STATUS;
+ I915_WRITE(DC_STATE_EN, val);
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ /*
+ * Delay of 200us DC3CO Exit time B.Spec 49196
+ */
+ usleep_range(200, 210);
+}
+
static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc9(dev_priv);
@@ -839,6 +870,51 @@ lookup_power_well(struct drm_i915_private *dev_priv,
return &dev_priv->power_domains.power_wells[0];
}
+/**
+ * intel_display_power_set_target_dc_state - Set target dc state.
+ * @dev_priv: i915 device
+ * @state: state which needs to be set as target_dc_state.
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ struct i915_power_well *power_well;
+ bool dc_off_enabled;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
+
+ if (WARN_ON(!power_well))
+ goto unlock;
+
+ state = sanitize_target_dc_state(dev_priv, state);
+
+ if (state == dev_priv->csr.target_dc_state)
+ goto unlock;
+
+ dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
+ power_well);
+ /*
+ * If DC off power well is disabled, need to enable and disable the
+ * DC off power well to effect target DC state.
+ */
+ if (!dc_off_enabled)
+ power_well->desc->ops->enable(dev_priv, power_well);
+
+ dev_priv->csr.target_dc_state = state;
+
+ if (!dc_off_enabled)
+ power_well->desc->ops->disable(dev_priv, power_well);
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+}
+
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
@@ -951,7 +1027,8 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
+ return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
@@ -967,6 +1044,11 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = {};
+ if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
+ tgl_disable_dc3co(dev_priv);
+ return;
+ }
+
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
@@ -999,10 +1081,17 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
if (!dev_priv->csr.dmc_payload)
return;
- if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
+ switch (dev_priv->csr.target_dc_state) {
+ case DC_STATE_EN_DC3CO:
+ tgl_enable_dc3co(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC6:
skl_enable_dc6(dev_priv);
- else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
+ break;
+ case DC_STATE_EN_UPTO_DC5:
gen9_enable_dc5(dev_priv);
+ break;
+ }
}
static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
@@ -1208,7 +1297,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(dev_priv);
+ intel_vga_redisable_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
@@ -1718,15 +1807,12 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, u64 mask)
{
- struct drm_i915_private *i915 =
- container_of(power_domains, struct drm_i915_private,
- power_domains);
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
for_each_power_domain(domain, mask)
DRM_DEBUG_DRIVER("%s use_count %d\n",
- intel_display_power_domain_str(i915, domain),
+ intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
@@ -1896,7 +1982,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
- const char *name = intel_display_power_domain_str(dev_priv, domain);
+ const char *name = intel_display_power_domain_str(domain);
power_domains = &dev_priv->power_domains;
@@ -2487,10 +2573,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUX_E) | \
BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2530,22 +2616,22 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A))
#define ICL_AUX_B_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B))
-#define ICL_AUX_C_IO_POWER_DOMAINS ( \
+#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C))
-#define ICL_AUX_D_IO_POWER_DOMAINS ( \
+#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D))
-#define ICL_AUX_E_IO_POWER_DOMAINS ( \
+#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_E))
-#define ICL_AUX_F_IO_POWER_DOMAINS ( \
+#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_F))
-#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1))
-#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2))
-#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3))
-#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
+#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
+#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
+#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
#define TGL_PW_5_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_D) | \
@@ -2565,24 +2651,24 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC6) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2596,37 +2682,54 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
TGL_PW_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
-#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO))
-#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO))
-#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO))
-#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO))
-#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO))
-#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO))
-
-#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC1))
-#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC2))
-#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC3))
-#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC4))
-#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC5))
-#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC6))
-#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5))
-#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6))
+#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
+#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
+#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
+
+#define TGL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A))
+#define TGL_AUX_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B))
+#define TGL_AUX_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C))
+#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D))
+#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E))
+#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F))
+#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_G))
+#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_H))
+#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_I))
+#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
+#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
+#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
+#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
+#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
+#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
@@ -2938,7 +3041,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3020,7 +3123,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3080,7 +3183,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
.name = "DC off",
.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3249,7 +3352,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
.name = "DC off",
.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3377,7 +3480,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3484,8 +3587,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX C",
- .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .name = "AUX C TC1",
+ .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3495,8 +3598,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX D",
- .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+ .name = "AUX D TC2",
+ .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3506,8 +3609,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX E",
- .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+ .name = "AUX E TC3",
+ .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3517,8 +3620,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX F",
- .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+ .name = "AUX F TC4",
+ .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3528,8 +3631,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT1",
- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .name = "AUX C TBT1",
+ .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3539,8 +3642,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT2",
- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .name = "AUX D TBT2",
+ .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3550,8 +3653,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT3",
- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .name = "AUX E TBT3",
+ .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3561,8 +3664,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT4",
- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .name = "AUX F TBT4",
+ .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3610,7 +3713,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "DC off",
.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3667,8 +3770,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
}
},
{
- .name = "DDI TC1 IO",
- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
+ .name = "DDI D TC1 IO",
+ .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3677,8 +3780,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC2 IO",
- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
+ .name = "DDI E TC2 IO",
+ .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3687,8 +3790,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC3 IO",
- .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
+ .name = "DDI F TC3 IO",
+ .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3697,8 +3800,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC4 IO",
- .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
+ .name = "DDI G TC4 IO",
+ .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3707,8 +3810,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC5 IO",
- .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
+ .name = "DDI H TC5 IO",
+ .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3717,8 +3820,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC6 IO",
- .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
+ .name = "DDI I TC6 IO",
+ .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3728,7 +3831,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .domains = TGL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3738,7 +3841,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .domains = TGL_AUX_B_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3748,7 +3851,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX C",
- .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .domains = TGL_AUX_C_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3757,8 +3860,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC1",
- .domains = TGL_AUX_TC1_IO_POWER_DOMAINS,
+ .name = "AUX D TC1",
+ .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3768,8 +3871,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC2",
- .domains = TGL_AUX_TC2_IO_POWER_DOMAINS,
+ .name = "AUX E TC2",
+ .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3779,8 +3882,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC3",
- .domains = TGL_AUX_TC3_IO_POWER_DOMAINS,
+ .name = "AUX F TC3",
+ .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3790,8 +3893,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC4",
- .domains = TGL_AUX_TC4_IO_POWER_DOMAINS,
+ .name = "AUX G TC4",
+ .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3801,8 +3904,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC5",
- .domains = TGL_AUX_TC5_IO_POWER_DOMAINS,
+ .name = "AUX H TC5",
+ .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3812,8 +3915,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC6",
- .domains = TGL_AUX_TC6_IO_POWER_DOMAINS,
+ .name = "AUX I TC6",
+ .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3823,8 +3926,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT1",
- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .name = "AUX D TBT1",
+ .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3834,8 +3937,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT2",
- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .name = "AUX E TBT2",
+ .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3845,8 +3948,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT3",
- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .name = "AUX F TBT3",
+ .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3856,8 +3959,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT4",
- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .name = "AUX G TBT4",
+ .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3867,8 +3970,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT5",
- .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS,
+ .name = "AUX H TBT5",
+ .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3878,8 +3981,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT6",
- .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS,
+ .name = "AUX I TBT6",
+ .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3931,14 +4034,17 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (INTEL_GEN(dev_priv) >= 11) {
- max_dc = 2;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ max_dc = 4;
/*
* DC9 has a separate HW flow from the rest of the DC states,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
mask = DC_STATE_EN_DC9;
+ } else if (IS_GEN(dev_priv, 11)) {
+ max_dc = 2;
+ mask = DC_STATE_EN_DC9;
} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
max_dc = 2;
mask = 0;
@@ -3957,7 +4063,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
requested_dc = enable_dc;
} else if (enable_dc == -1) {
requested_dc = max_dc;
- } else if (enable_dc > max_dc && enable_dc <= 2) {
+ } else if (enable_dc > max_dc && enable_dc <= 4) {
DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
enable_dc, max_dc);
requested_dc = max_dc;
@@ -3966,10 +4072,20 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
requested_dc = max_dc;
}
- if (requested_dc > 1)
+ switch (requested_dc) {
+ case 4:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
+ break;
+ case 3:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
+ break;
+ case 2:
mask |= DC_STATE_EN_UPTO_DC6;
- if (requested_dc > 0)
+ break;
+ case 1:
mask |= DC_STATE_EN_UPTO_DC5;
+ break;
+ }
DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
@@ -4030,6 +4146,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->csr.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+ dev_priv->csr.target_dc_state =
+ sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
mutex_init(&power_domains->lock);
@@ -5107,8 +5226,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
for_each_power_domain(domain, power_well->desc->domains)
DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(i915,
- domain),
+ intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index a50605b8b1ad..1da04f3e0fb3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -36,29 +36,20 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_TC4_LANES,
- POWER_DOMAIN_PORT_DDI_TC5_LANES,
- POWER_DOMAIN_PORT_DDI_TC6_LANES,
+ POWER_DOMAIN_PORT_DDI_G_LANES,
+ POWER_DOMAIN_PORT_DDI_H_LANES,
+ POWER_DOMAIN_PORT_DDI_I_LANES,
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_F_IO,
- POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO,
POWER_DOMAIN_PORT_DDI_G_IO,
- POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO,
POWER_DOMAIN_PORT_DDI_H_IO,
- POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO,
POWER_DOMAIN_PORT_DDI_I_IO,
- POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -68,21 +59,19 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
- POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_TC4,
- POWER_DOMAIN_AUX_TC5,
- POWER_DOMAIN_AUX_TC6,
+ POWER_DOMAIN_AUX_G,
+ POWER_DOMAIN_AUX_H,
+ POWER_DOMAIN_AUX_I,
POWER_DOMAIN_AUX_IO_A,
- POWER_DOMAIN_AUX_TBT1,
- POWER_DOMAIN_AUX_TBT2,
- POWER_DOMAIN_AUX_TBT3,
- POWER_DOMAIN_AUX_TBT4,
- POWER_DOMAIN_AUX_TBT5,
- POWER_DOMAIN_AUX_TBT6,
+ POWER_DOMAIN_AUX_C_TBT,
+ POWER_DOMAIN_AUX_D_TBT,
+ POWER_DOMAIN_AUX_E_TBT,
+ POWER_DOMAIN_AUX_F_TBT,
+ POWER_DOMAIN_AUX_G_TBT,
+ POWER_DOMAIN_AUX_H_TBT,
+ POWER_DOMAIN_AUX_I_TBT,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
@@ -111,6 +100,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
+ SKL_DISP_DC_OFF,
};
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
@@ -267,10 +257,11 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915);
void intel_display_power_resume_early(struct drm_i915_private *i915);
void intel_display_power_suspend(struct drm_i915_private *i915);
void intel_display_power_resume(struct drm_i915_private *i915);
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state);
const char *
-intel_display_power_domain_str(struct drm_i915_private *i915,
- enum intel_display_power_domain domain);
+intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 4075b0387c87..1a7334dbe802 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -128,7 +128,8 @@ struct intel_encoder {
enum intel_output_type type;
enum port port;
- unsigned int cloneable;
+ u16 cloneable;
+ u8 pipe_mask;
enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received);
@@ -187,7 +188,6 @@ struct intel_encoder {
* device interrupts are disabled.
*/
void (*suspend)(struct intel_encoder *);
- int crtc_mask;
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
@@ -388,6 +388,13 @@ struct intel_hdcp {
wait_queue_head_t cp_irq_queue;
atomic_t cp_irq_count;
int cp_irq_count_cached;
+
+ /*
+ * HDCP register access for gen12+ need the transcoder associated.
+ * Transcoder attached to the connector could be changed at modeset.
+ * Hence caching the transcoder here.
+ */
+ enum transcoder cpu_transcoder;
};
struct intel_connector {
@@ -481,9 +488,9 @@ struct intel_atomic_state {
* but the converse is not necessarily true; simply changing a mode may
* not flip the final active status of any CRTC's
*/
- unsigned int active_pipe_changes;
+ u8 active_pipe_changes;
- unsigned int active_crtcs;
+ u8 active_pipes;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -499,6 +506,14 @@ struct intel_atomic_state {
bool rps_interactive;
+ /*
+ * active_pipes
+ * min_cdclk[]
+ * min_voltage_level[]
+ * cdclk.*
+ */
+ bool global_state_changed;
+
/* Gen9+ only */
struct skl_ddb_values wm_results;
@@ -552,24 +567,24 @@ struct intel_plane_state {
int scaler_id;
/*
- * linked_plane:
+ * planar_linked_plane:
*
* ICL planar formats require 2 planes that are updated as pairs.
* This member is used to make sure the other plane is also updated
* when required, and for update_slave() to find the correct
* plane_state to pass as argument.
*/
- struct intel_plane *linked_plane;
+ struct intel_plane *planar_linked_plane;
/*
- * slave:
+ * planar_slave:
* If set don't update use the linked plane's state for updating
* this plane during atomic commit with the update_slave() callback.
*
* It's also used by the watermark code to ignore wm calculations on
* this plane. They're calculated by the linked plane's wm code.
*/
- u32 slave;
+ u32 planar_slave;
struct drm_intel_sprite_colorkey ckey;
};
@@ -759,7 +774,6 @@ struct intel_crtc_state {
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
- bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */
bool preload_luts;
@@ -865,6 +879,7 @@ struct intel_crtc_state {
bool has_psr;
bool has_psr2;
+ u32 dc3co_exitline;
/*
* Frequence the dpll for the port should run at. Differs from the
@@ -926,6 +941,8 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
+ int min_cdclk[I915_MAX_PLANES];
+
u32 data_rate[I915_MAX_PLANES];
/* Gamma mode programmed on the pipe */
@@ -980,11 +997,17 @@ struct intel_crtc_state {
bool dsc_split;
u16 compressed_bpp;
u8 slice_count;
- } dsc_params;
- struct drm_dsc_config dp_dsc_cfg;
+ struct drm_dsc_config config;
+ } dsc;
/* Forward Error correction State */
bool fec_enable;
+
+ /* Pointer to master transcoder in case of tiled displays */
+ enum transcoder master_transcoder;
+
+ /* Bitmask to indicate slaves attached */
+ u8 sync_mode_slaves_mask;
};
struct intel_crtc {
@@ -1027,6 +1050,9 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
+
+ /* per pipe DSB related info */
+ struct intel_dsb dsb;
};
struct intel_plane {
@@ -1062,6 +1088,8 @@ struct intel_plane {
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
+ int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
};
struct intel_watermark_params {
@@ -1177,6 +1205,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
+ u32 aux_busy_last_status;
u8 train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@@ -1212,6 +1241,15 @@ struct intel_dp {
bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
+
+ /*
+ * DP_TP_* registers may be either on port or transcoder register space.
+ */
+ struct {
+ i915_reg_t dp_tp_ctl;
+ i915_reg_t dp_tp_status;
+ } regs;
+
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
@@ -1270,6 +1308,7 @@ struct intel_digital_port {
char tc_port_name[8];
enum tc_port_mode tc_mode;
enum phy_fia tc_phy_fia;
+ u8 tc_phy_fia_idx;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1510,7 +1549,7 @@ intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
-intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 9b15ac4f2fb6..c61ac0c3acb5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -68,11 +68,6 @@
#define DP_DPRX_ESI_LEN 14
-/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
-#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
-#define DP_DSC_MIN_SUPPORTED_BPC 8
-#define DP_DSC_MAX_SUPPORTED_BPC 10
-
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@@ -500,7 +495,17 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
DP_DSC_FEC_OVERHEAD_FACTOR);
}
-static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
+static int
+small_joiner_ram_size_bits(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ return 7680 * 8;
+ else
+ return 6144 * 8;
+}
+
+static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
+ u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay)
{
u32 bits_per_pixel, max_bpp_small_joiner_ram;
@@ -517,7 +522,8 @@ static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
- max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
+ max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
+ mode_hdisplay;
DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
/*
@@ -585,6 +591,25 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
+static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
+ int hdisplay)
+{
+ /*
+ * Older platforms don't like hdisplay==4096 with DP.
+ *
+ * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
+ * and frame counter increment), but we don't get vblank interrupts,
+ * and the pipe underruns immediately. The link also doesn't seem
+ * to get trained properly.
+ *
+ * On CHV the vblank interrupts don't seem to disappear but
+ * otherwise the symptoms are similar.
+ *
+ * TODO: confirm the behaviour on HSW+
+ */
+ return hdisplay == 4096 && !HAS_DDI(dev_priv);
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -620,6 +645,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
+ if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
+ return MODE_H_ILLEGAL;
+
/*
* Output bpp is stored in 6.4 format so right shift by 4 to get the
* integer value since we support only integer values of bpp.
@@ -634,7 +662,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
true);
} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(max_link_clock,
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ max_link_clock,
max_lanes,
target_clock,
mode->hdisplay) >> 4;
@@ -655,7 +684,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
@@ -732,12 +761,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
u32 DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to port %c being active\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port)))
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name))
return;
- DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -855,9 +886,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
- DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
+ DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
pipe_name(intel_dp->pps_pipe),
- port_name(intel_dig_port->base.port));
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -965,13 +997,16 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps_pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
- port_name(port));
+ DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return;
}
- DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
- port_name(port), pipe_name(intel_dp->pps_pipe));
+ DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@@ -1144,18 +1179,20 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ const unsigned int timeout_ms = 10;
u32 status;
bool done;
#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
done = wait_event_timeout(i915->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(10));
+ msecs_to_jiffies_timeout(timeout_ms));
/* just trace the final value */
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (!done)
- DRM_ERROR("dp aux hw did not signal timeout!\n");
+ DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
#undef C
return status;
@@ -1338,13 +1375,12 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
- static u32 last_status = -1;
const u32 status = intel_uncore_read(uncore, ch_ctl);
- if (status != last_status) {
+ if (status != intel_dp->aux_busy_last_status) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
status);
- last_status = status;
+ intel_dp->aux_busy_last_status = status;
}
ret = -EBUSY;
@@ -1636,6 +1672,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_D:
case AUX_CH_E:
case AUX_CH_F:
+ case AUX_CH_G:
return DP_AUX_CH_CTL(aux_ch);
default:
MISSING_CASE(aux_ch);
@@ -1656,6 +1693,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_D:
case AUX_CH_E:
case AUX_CH_F:
+ case AUX_CH_G:
return DP_AUX_CH_DATA(aux_ch, index);
default:
MISSING_CASE(aux_ch);
@@ -1834,8 +1872,14 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- return INTEL_GEN(dev_priv) >= 11 &&
- pipe_config->cpu_transcoder != TRANSCODER_A;
+ /* On TGL, FEC is supported on all Pipes */
+ if (INTEL_GEN(dev_priv) >= 12)
+ return true;
+
+ if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
+ return true;
+
+ return false;
}
static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
@@ -1850,8 +1894,18 @@ static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- return INTEL_GEN(dev_priv) >= 10 &&
- pipe_config->cpu_transcoder != TRANSCODER_A;
+ if (!INTEL_INFO(dev_priv)->display.has_dsc)
+ return false;
+
+ /* On TGL, DSC is supported on all Pipes */
+ if (INTEL_GEN(dev_priv) >= 12)
+ return true;
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A)
+ return true;
+
+ return false;
}
static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
@@ -2010,11 +2064,17 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
- dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
- conn_state->max_requested_bpc);
+ /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
+ if (INTEL_GEN(dev_priv) >= 12)
+ dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
+ else
+ dsc_max_bpc = min_t(u8, 10,
+ conn_state->max_requested_bpc);
pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
- if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+
+ /* Min Input BPC for ICL+ is 8 */
+ if (pipe_bpp < 8 * 3) {
DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
return -EINVAL;
}
@@ -2029,10 +2089,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->lane_count = limits->max_lane_count;
if (intel_dp_is_edp(intel_dp)) {
- pipe_config->dsc_params.compressed_bpp =
+ pipe_config->dsc.compressed_bpp =
min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
pipe_config->pipe_bpp);
- pipe_config->dsc_params.slice_count =
+ pipe_config->dsc.slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
} else {
@@ -2040,7 +2100,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
u8 dsc_dp_slice_count;
dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ pipe_config->port_clock,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay);
@@ -2052,10 +2113,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
return -EINVAL;
}
- pipe_config->dsc_params.compressed_bpp = min_t(u16,
+ pipe_config->dsc.compressed_bpp = min_t(u16,
dsc_max_output_bpp >> 4,
pipe_config->pipe_bpp);
- pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+ pipe_config->dsc.slice_count = dsc_dp_slice_count;
}
/*
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
@@ -2063,8 +2124,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* then we need to use 2 VDSC instances.
*/
if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
- if (pipe_config->dsc_params.slice_count > 1) {
- pipe_config->dsc_params.dsc_split = true;
+ if (pipe_config->dsc.slice_count > 1) {
+ pipe_config->dsc.dsc_split = true;
} else {
DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
@@ -2076,16 +2137,16 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
"Compressed BPP = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp);
+ pipe_config->dsc.compressed_bpp);
return ret;
}
- pipe_config->dsc_params.compression_enable = true;
+ pipe_config->dsc.compression_enable = true;
DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
"Compressed Bpp = %d Slice Count = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp,
- pipe_config->dsc_params.slice_count);
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
return 0;
}
@@ -2159,15 +2220,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
- if (pipe_config->dsc_params.compression_enable) {
+ if (pipe_config->dsc.compression_enable) {
DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp);
+ pipe_config->dsc.compressed_bpp);
DRM_DEBUG_KMS("DP link rate required %i available %i\n",
intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc_params.compressed_bpp),
+ pipe_config->dsc.compressed_bpp),
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
} else {
@@ -2222,6 +2283,16 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
+
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
* See:
@@ -2259,6 +2330,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
if (lspcon->active)
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
else
@@ -2304,6 +2376,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
+ if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
+ return -EINVAL;
+
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
if (ret < 0)
return ret;
@@ -2311,8 +2386,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- if (pipe_config->dsc_params.compression_enable)
- output_bpp = pipe_config->dsc_params.compressed_bpp;
+ if (pipe_config->dsc.compression_enable)
+ output_bpp = pipe_config->dsc.compressed_bpp;
else
output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
@@ -2339,6 +2414,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_psr_compute_config(intel_dp, pipe_config);
+ intel_hdcp_transcoder_config(intel_connector,
+ pipe_config->cpu_transcoder);
+
return 0;
}
@@ -2366,6 +2444,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
+ intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+
/*
* There are four kinds of DP registers:
*
@@ -2567,8 +2648,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
- DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2587,8 +2669,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@@ -2613,8 +2696,9 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = false;
with_pps_lock(intel_dp, wakeref)
vdd = edp_panel_vdd_on(intel_dp);
- I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@@ -2632,8 +2716,9 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -2695,8 +2780,9 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!intel_dp_is_edp(intel_dp))
return;
- I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
intel_dp->want_panel_vdd = false;
@@ -2717,12 +2803,14 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
if (WARN(edp_have_panel_power(intel_dp),
- "eDP port %c panel power already on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port)))
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
return;
wait_panel_power_cycle(intel_dp);
@@ -2777,11 +2865,11 @@ static void edp_panel_off(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dig_port->base.port));
+ DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
- WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dig_port->base.port));
+ WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2926,8 +3014,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(cur_state != state,
- "DP port %c state assertion failure (expected %s, current %s)\n",
- port_name(dig_port->base.port),
+ "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@ -3023,7 +3111,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
{
int ret;
- if (!crtc_state->dsc_params.compression_enable)
+ if (!crtc_state->dsc.compression_enable)
return;
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
@@ -3315,7 +3403,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
- u32 temp = I915_READ(DP_TP_CTL(port));
+ u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3341,7 +3429,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
- I915_WRITE(DP_TP_CTL(port), temp);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -3505,8 +3593,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
I915_WRITE(pp_on_reg, 0);
POSTING_READ(pp_on_reg);
@@ -3522,17 +3611,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = encoder->port;
WARN(intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active (e)DP port %c\n",
- pipe_name(pipe), port_name(port));
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
if (intel_dp->pps_pipe != pipe)
continue;
- DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(port));
+ DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
@@ -3574,8 +3664,9 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
- DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
- pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
+ DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
+ encoder->base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -4039,22 +4130,22 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (!HAS_DDI(dev_priv))
return;
- val = I915_READ(DP_TP_CTL(port));
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(DP_TP_CTL(port), val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
/*
- * On PORT_A we can have only eDP in SST mode. There the only reason
- * we need to set idle transmission mode is to work around a HW issue
- * where we enable the pipe while not in idle link-training mode.
+ * Until TGL on PORT_A we can have only eDP in SST mode. There the only
+ * reason we need to set idle transmission mode is to work around a HW
+ * issue where we enable the pipe while not in idle link-training mode.
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A)
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
return;
- if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_IDLE_DONE, 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@@ -4396,9 +4487,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
&dp_to_dig_port(intel_dp)->base;
bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
- DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
- port_name(encoder->port), yesno(intel_dp->can_mst),
- yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
@@ -4418,9 +4510,36 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
+bool
+intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ /*
+ * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
+ * of Color Encoding Format and Content Color Gamut], in order to
+ * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ return true;
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static void
-intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct dp_sdp vsc_sdp = {};
@@ -4441,13 +4560,55 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
*/
vsc_sdp.sdp_header.HB3 = 0x13;
- /*
- * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
- * DB16[3:0] DP 1.4a spec, Table 2-120
- */
- vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
- /* RGB->YCBCR color conversion uses the BT.709 color space. */
- vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ /* DP 1.4a spec, Table 2-120 */
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ default:
+ /* RGB: DB16[7:4] = 0h */
+ break;
+ }
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ vsc_sdp.db[16] |= 0x1;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_601:
+ vsc_sdp.db[16] |= 0x2;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_709:
+ vsc_sdp.db[16] |= 0x3;
+ break;
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ vsc_sdp.db[16] |= 0x4;
+ break;
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ vsc_sdp.db[16] |= 0x5;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ vsc_sdp.db[16] |= 0x6;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ vsc_sdp.db[16] |= 0x7;
+ break;
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */
+ break;
+ default:
+ /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */
+
+ /* RGB->YCBCR color conversion uses the BT.709 color space. */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ break;
+ }
/*
* For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
@@ -4499,13 +4660,106 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
}
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void
+intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct dp_sdp infoframe_sdp = {};
+ struct hdmi_drm_infoframe drm_infoframe = {};
+ const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
+ unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
+ ssize_t len;
+ int ret;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
+ if (ret) {
+ DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
return;
+ }
- intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
+ len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
+ if (len < 0) {
+ DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
+ return;
+ }
+
+ if (len != infoframe_size) {
+ DRM_DEBUG_KMS("wrong static hdr metadata size\n");
+ return;
+ }
+
+ /*
+ * Set up the infoframe sdp packet for HDR static metadata.
+ * Prepare VSC Header for SU as per DP 1.4a spec,
+ * Table 2-100 and Table 2-101
+ */
+
+ /* Packet ID, 00h for non-Audio INFOFRAME */
+ infoframe_sdp.sdp_header.HB0 = 0;
+ /*
+ * Packet Type 80h + Non-audio INFOFRAME Type value
+ * HDMI_INFOFRAME_TYPE_DRM: 0x87,
+ */
+ infoframe_sdp.sdp_header.HB1 = drm_infoframe.type;
+ /*
+ * Least Significant Eight Bits of (Data Byte Count – 1)
+ * infoframe_size - 1,
+ */
+ infoframe_sdp.sdp_header.HB2 = 0x1D;
+ /* INFOFRAME SDP Version Number */
+ infoframe_sdp.sdp_header.HB3 = (0x13 << 2);
+ /* CTA Header Byte 2 (INFOFRAME Version Number) */
+ infoframe_sdp.db[0] = drm_infoframe.version;
+ /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+ infoframe_sdp.db[1] = drm_infoframe.length;
+ /*
+ * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
+ * HDMI_INFOFRAME_HEADER_SIZE
+ */
+ BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2);
+ memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_DRM_INFOFRAME_SIZE);
+
+ /*
+ * Size of DP infoframe sdp packet for HDR static metadata is consist of
+ * - DP SDP Header(struct dp_sdp_header): 4 bytes
+ * - Two Data Blocks: 2 bytes
+ * CTA Header Byte2 (INFOFRAME Version Number)
+ * CTA Header Byte3 (Length of INFOFRAME)
+ * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
+ *
+ * Prior to GEN11's GMP register size is identical to DP HDR static metadata
+ * infoframe size. But GEN11+ has larger than that size, write_infoframe
+ * will pad rest of the size.
+ */
+ intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ &infoframe_sdp,
+ sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE);
+}
+
+void intel_dp_vsc_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ return;
+
+ intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state);
+}
+
+void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ if (!conn_state->hdr_output_metadata)
+ return;
+
+ intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp,
+ crtc_state,
+ conn_state);
}
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
@@ -5227,6 +5481,9 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
{
enum port port = intel_dig_port->base.port;
+ if (HAS_PCH_MCC(dev_priv) && port == PORT_C)
+ return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
}
@@ -5506,7 +5763,6 @@ static int
intel_dp_connector_register(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = connector->dev;
int ret;
ret = intel_connector_register(connector);
@@ -5521,8 +5777,7 @@ intel_dp_connector_register(struct drm_connector *connector)
intel_dp->aux.dev = connector->kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
- drm_dp_cec_register_connector(&intel_dp->aux,
- connector->name, dev->dev);
+ drm_dp_cec_register_connector(&intel_dp->aux, connector);
return ret;
}
@@ -6280,13 +6535,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
* would end up in an endless cycle of
* "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
*/
- DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return IRQ_HANDLED;
}
- DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
- port_name(intel_dig_port->base.port),
+ DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
long_hpd ? "long" : "short");
if (long_hpd) {
@@ -6353,6 +6610,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
else if (INTEL_GEN(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
+ intel_attach_colorspace_property(connector);
+
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+
if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
@@ -7150,8 +7414,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_modeset_retry_work_fn);
if (WARN(intel_dig_port->max_lanes < 1,
- "Not enough lanes (%d) for DP on port %c\n",
- intel_dig_port->max_lanes, port_name(port)))
+ "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
@@ -7192,9 +7457,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
port != PORT_B && port != PORT_C))
return false;
- DRM_DEBUG_KMS("Adding %s connector on port %c\n",
- type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- port_name(port));
+ DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ intel_encoder->base.base.id, intel_encoder->base.name);
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -7218,11 +7483,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
- (port == PORT_B || port == PORT_C ||
- port == PORT_D || port == PORT_F))
- intel_dp_mst_encoder_init(intel_dig_port,
- intel_connector->base.base.id);
+ intel_dp_mst_encoder_init(intel_dig_port,
+ intel_connector->base.base.id);
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
@@ -7313,11 +7575,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->power_domain = intel_port_to_power_domain(port);
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
- intel_encoder->crtc_mask = 1 << 2;
+ intel_encoder->pipe_mask = BIT(PIPE_C);
else
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = 0;
intel_encoder->port = port;
@@ -7378,7 +7640,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
if (!intel_dp->can_mst)
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
+ true);
if (ret) {
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 00981fb9414b..3da166054788 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -13,6 +13,7 @@
#include "i915_reg.h"
enum pipe;
+enum port;
struct drm_connector_state;
struct drm_encoder;
struct drm_i915_private;
@@ -107,6 +108,14 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_dp_vsc_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool intel_digital_port_connected(struct intel_encoder *encoder);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 600873c796d0..03d1cba0b696 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -215,7 +215,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
- DRM_ERROR("failed to update payload %d\n", ret);
+ DRM_DEBUG_KMS("failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
@@ -295,7 +295,6 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->base.port;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
@@ -326,12 +325,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to allocate vcpi\n");
intel_dp->active_mst_links++;
- temp = I915_READ(DP_TP_STATUS(port));
- I915_WRITE(DP_TP_STATUS(port), temp);
+ temp = I915_READ(intel_dp->regs.dp_tp_status);
+ I915_WRITE(intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
intel_ddi_enable_pipe_clock(pipe_config);
+
+ intel_ddi_set_dp_msa(pipe_config, conn_state);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
@@ -342,11 +343,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->base.port;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
@@ -393,20 +393,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
-static enum drm_connector_status
-intel_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
-
- if (drm_connector_is_unregistered(connector))
- return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
- intel_connector->port);
-}
-
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
- .detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -426,6 +413,7 @@ static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -453,7 +441,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (mode_rate > max_rate || mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
@@ -466,11 +454,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
+static int
+intel_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ if (drm_connector_is_unregistered(connector))
+ return connector_status_disconnected;
+
+ return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
+ intel_connector->port);
+}
+
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
+ .detect_ctx = intel_dp_mst_detect,
};
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
@@ -615,8 +618,16 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
- intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
+ /*
+ * This is wrong, but broken userspace uses the intersection
+ * of possible_crtcs of all the encoders of a given connector
+ * to figure out which crtcs can drive said connector. What
+ * should be used instead is the union of possible_crtcs.
+ * To keep such userspace functioning we must misconfigure
+ * this to make sure the intersection is not empty :(
+ */
+ intel_encoder->pipe_mask = ~0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;
@@ -653,21 +664,31 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ enum port port = intel_dig_port->base.port;
int ret;
- intel_dp->can_mst = true;
+ if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ return 0;
+
+ if (INTEL_GEN(i915) < 12 && port == PORT_A)
+ return 0;
+
+ if (INTEL_GEN(i915) < 11 && port == PORT_E)
+ return 0;
+
intel_dp->mst_mgr.cbs = &mst_cbs;
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
&intel_dp->aux, 16, 3, conn_base_id);
- if (ret) {
- intel_dp->can_mst = false;
+ if (ret)
return ret;
- }
+
+ intel_dp->can_mst = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index d5a298c3c83b..3ce0a023eee0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -247,8 +247,7 @@ static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
const struct intel_dpll_hw_state *pll_state,
- enum intel_dpll_id range_min,
- enum intel_dpll_id range_max)
+ unsigned long dpll_mask)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll, *unused_pll = NULL;
@@ -257,7 +256,9 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- for (i = range_min; i <= range_max; i++) {
+ WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
+
+ for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
@@ -464,8 +465,8 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
} else {
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_PCH_PLL_A,
- DPLL_ID_PCH_PLL_B);
+ BIT(DPLL_ID_PCH_PLL_B) |
+ BIT(DPLL_ID_PCH_PLL_A));
}
if (!pll)
@@ -829,7 +830,8 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
if (!pll)
return NULL;
@@ -892,7 +894,7 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SPLL, DPLL_ID_SPLL);
+ BIT(DPLL_ID_SPLL));
} else {
return false;
}
@@ -1462,13 +1464,13 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SKL_DPLL0,
- DPLL_ID_SKL_DPLL0);
+ BIT(DPLL_ID_SKL_DPLL0));
else
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SKL_DPLL1,
- DPLL_ID_SKL_DPLL3);
+ BIT(DPLL_ID_SKL_DPLL3) |
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1));
if (!pll)
return false;
@@ -2416,8 +2418,9 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SKL_DPLL0,
- DPLL_ID_SKL_DPLL2);
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1) |
+ BIT(DPLL_ID_SKL_DPLL0));
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
return false;
@@ -2535,6 +2538,18 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};
+static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
+ .dco_integer = 0x54, .dco_fraction = 0x3000,
+ /* the following params are unused */
+ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
+ .dco_integer = 0x43, .dco_fraction = 0x4000,
+ /* the following params are unused */
+ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
@@ -2562,8 +2577,34 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ switch (dev_priv->cdclk.hw.ref) {
+ default:
+ MISSING_CASE(dev_priv->cdclk.hw.ref);
+ /* fall-through */
+ case 19200:
+ case 38400:
+ *pll_params = tgl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = tgl_tbt_pll_24MHz_values;
+ break;
+ }
+ } else {
+ switch (dev_priv->cdclk.hw.ref) {
+ default:
+ MISSING_CASE(dev_priv->cdclk.hw.ref);
+ /* fall-through */
+ case 19200:
+ case 38400:
+ *pll_params = icl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = icl_tbt_pll_24MHz_values;
+ break;
+ }
+ }
+
return true;
}
@@ -2622,7 +2663,8 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
- struct intel_dpll_hw_state *state)
+ struct intel_dpll_hw_state *state,
+ bool is_dkl)
{
u32 dco_min_freq, dco_max_freq;
int div1_vals[] = {7, 5, 3, 2};
@@ -2644,8 +2686,13 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
continue;
if (div2 >= 2) {
+ /*
+ * Note: a_divratio not matching TGL BSpec
+ * algorithm but matching hardcoded values and
+ * working on HW for DP alt-mode at least
+ */
a_divratio = is_dp ? 10 : 5;
- tlinedrv = 2;
+ tlinedrv = is_dkl ? 1 : 2;
} else {
a_divratio = 5;
tlinedrv = 0;
@@ -2708,11 +2755,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+ bool is_dkl = INTEL_GEN(dev_priv) >= 12;
memset(pll_state, 0, sizeof(*pll_state));
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
- pll_state)) {
+ pll_state, is_dkl)) {
DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
return false;
}
@@ -2720,8 +2768,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
m1div = 2;
m2div_int = dco_khz / (refclk_khz * m1div);
if (m2div_int > 255) {
- m1div = 4;
- m2div_int = dco_khz / (refclk_khz * m1div);
+ if (!is_dkl) {
+ m1div = 4;
+ m2div_int = dco_khz / (refclk_khz * m1div);
+ }
+
if (m2div_int > 255) {
DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
clock);
@@ -2801,60 +2852,94 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
ssc_steplog = 4;
- pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
- MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
- MG_PLL_DIV0_FBDIV_INT(m2div_int);
-
- pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
- MG_PLL_DIV1_DITHER_DIV_2 |
- MG_PLL_DIV1_NDIVRATIO(1) |
- MG_PLL_DIV1_FBPREDIV(m1div);
-
- pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
- MG_PLL_LF_AFCCNTSEL_512 |
- MG_PLL_LF_GAINCTRL(1) |
- MG_PLL_LF_INT_COEFF(int_coeff) |
- MG_PLL_LF_PROP_COEFF(prop_coeff);
-
- pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
- MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
- MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
- MG_PLL_FRAC_LOCK_DCODITHEREN |
- MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
- if (use_ssc || m2div_rem > 0)
- pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
-
- pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
- MG_PLL_SSC_TYPE(2) |
- MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
- MG_PLL_SSC_STEPNUM(ssc_steplog) |
- MG_PLL_SSC_FLLEN |
- MG_PLL_SSC_STEPSIZE(ssc_stepsize);
-
- pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
- MG_PLL_TDC_COLDST_IREFINT_EN |
- MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
- MG_PLL_TDC_TDCOVCCORR_EN |
- MG_PLL_TDC_TDCSEL(3);
-
- pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
- MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
- MG_PLL_BIAS_BIAS_BONUS(10) |
- MG_PLL_BIAS_BIASCAL_EN |
- MG_PLL_BIAS_CTRIM(12) |
- MG_PLL_BIAS_VREF_RDAC(4) |
- MG_PLL_BIAS_IREFTRIM(iref_trim);
-
- if (refclk_khz == 38400) {
- pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
- pll_state->mg_pll_bias_mask = 0;
+ /* write pll_state calculations */
+ if (is_dkl) {
+ pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
+ DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
+ DKL_PLL_DIV0_FBPREDIV(m1div) |
+ DKL_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
+ DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
+
+ pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
+ DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
+ DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
+ (use_ssc ? DKL_PLL_SSC_EN : 0);
+
+ pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
+ DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
+ DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
+
} else {
- pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
- pll_state->mg_pll_bias_mask = -1U;
- }
+ pll_state->mg_pll_div0 =
+ (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
+ MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
+ MG_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 =
+ MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
+ MG_PLL_DIV1_DITHER_DIV_2 |
+ MG_PLL_DIV1_NDIVRATIO(1) |
+ MG_PLL_DIV1_FBPREDIV(m1div);
+
+ pll_state->mg_pll_lf =
+ MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
+ MG_PLL_LF_AFCCNTSEL_512 |
+ MG_PLL_LF_GAINCTRL(1) |
+ MG_PLL_LF_INT_COEFF(int_coeff) |
+ MG_PLL_LF_PROP_COEFF(prop_coeff);
+
+ pll_state->mg_pll_frac_lock =
+ MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
+ MG_PLL_FRAC_LOCK_DCODITHEREN |
+ MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
+ if (use_ssc || m2div_rem > 0)
+ pll_state->mg_pll_frac_lock |=
+ MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
+
+ pll_state->mg_pll_ssc =
+ (use_ssc ? MG_PLL_SSC_EN : 0) |
+ MG_PLL_SSC_TYPE(2) |
+ MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
+ MG_PLL_SSC_STEPNUM(ssc_steplog) |
+ MG_PLL_SSC_FLLEN |
+ MG_PLL_SSC_STEPSIZE(ssc_stepsize);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ MG_PLL_TDC_COLDST_COLDSTART |
+ MG_PLL_TDC_COLDST_IREFINT_EN |
+ MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+ MG_PLL_TDC_TDCOVCCORR_EN |
+ MG_PLL_TDC_TDCSEL(3);
+
+ pll_state->mg_pll_bias =
+ MG_PLL_BIAS_BIAS_GB_SEL(3) |
+ MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+ MG_PLL_BIAS_BIAS_BONUS(10) |
+ MG_PLL_BIAS_BIASCAL_EN |
+ MG_PLL_BIAS_CTRIM(12) |
+ MG_PLL_BIAS_VREF_RDAC(4) |
+ MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+ if (refclk_khz == 38400) {
+ pll_state->mg_pll_tdc_coldst_bias_mask =
+ MG_PLL_TDC_COLDST_COLDSTART;
+ pll_state->mg_pll_bias_mask = 0;
+ } else {
+ pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ pll_state->mg_pll_bias_mask = -1U;
+ }
- pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
- pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ pll_state->mg_pll_tdc_coldst_bias &=
+ pll_state->mg_pll_tdc_coldst_bias_mask;
+ pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ }
return true;
}
@@ -2908,7 +2993,7 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum port port = encoder->port;
- bool has_dpll4 = false;
+ unsigned long dpll_mask;
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
@@ -2917,16 +3002,19 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
}
if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
- has_dpll4 = true;
+ dpll_mask =
+ BIT(DPLL_ID_EHL_DPLL4) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ else
+ dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
- DPLL_ID_ICL_DPLL0,
- has_dpll4 ? DPLL_ID_EHL_DPLL4
- : DPLL_ID_ICL_DPLL1);
+ dpll_mask);
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
- port_name(encoder->port));
+ DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
+ encoder->base.base.id, encoder->base.name);
return false;
}
@@ -2956,8 +3044,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
- DPLL_ID_ICL_TBTPLL,
- DPLL_ID_ICL_TBTPLL);
+ BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll) {
DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
return false;
@@ -2976,8 +3063,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
encoder->port));
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
- dpll_id,
- dpll_id);
+ BIT(dpll_id));
if (!port_dpll->pll) {
DRM_DEBUG_KMS("No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
@@ -3101,6 +3187,78 @@ out:
return ret;
}
+static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = I915_READ(MG_PLL_ENABLE(tc_port));
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ /*
+ * All registers read here have the same HIP_INDEX_REG even though
+ * they are on different building blocks
+ */
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+ hw_state->mg_clktop2_hsclkctl =
+ I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+ hw_state->mg_clktop2_coreclkctl1 =
+ I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+ hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
+ DKL_PLL_DIV0_PROP_COEFF_MASK |
+ DKL_PLL_DIV0_FBPREDIV_MASK |
+ DKL_PLL_DIV0_FBDIV_INT_MASK);
+
+ hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
+ hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+
+ hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+
+ hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+
+ hw_state->mg_pll_tdc_coldst_bias =
+ I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ return ret;
+}
+
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state,
@@ -3235,6 +3393,75 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
}
+static void dkl_pll_write(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
+ u32 val;
+
+ /*
+ * All registers programmed here have the same HIP_INDEX_REG even
+ * though on different building block
+ */
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ /* All the registers are RMW */
+ val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+ val |= hw_state->mg_refclkin_ctl;
+ I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
+
+ val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+ val |= hw_state->mg_clktop2_coreclkctl1;
+ I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+
+ val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+ val |= hw_state->mg_clktop2_hsclkctl;
+ I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+
+ val = I915_READ(DKL_PLL_DIV0(tc_port));
+ val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
+ DKL_PLL_DIV0_PROP_COEFF_MASK |
+ DKL_PLL_DIV0_FBPREDIV_MASK |
+ DKL_PLL_DIV0_FBDIV_INT_MASK);
+ val |= hw_state->mg_pll_div0;
+ I915_WRITE(DKL_PLL_DIV0(tc_port), val);
+
+ val = I915_READ(DKL_PLL_DIV1(tc_port));
+ val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+ val |= hw_state->mg_pll_div1;
+ I915_WRITE(DKL_PLL_DIV1(tc_port), val);
+
+ val = I915_READ(DKL_PLL_SSC(tc_port));
+ val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+ val |= hw_state->mg_pll_ssc;
+ I915_WRITE(DKL_PLL_SSC(tc_port), val);
+
+ val = I915_READ(DKL_PLL_BIAS(tc_port));
+ val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+ val |= hw_state->mg_pll_bias;
+ I915_WRITE(DKL_PLL_BIAS(tc_port), val);
+
+ val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+ val |= hw_state->mg_pll_tdc_coldst_bias;
+ I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+
+ POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+}
+
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
@@ -3327,7 +3554,10 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv,
icl_pll_power_enable(dev_priv, pll, enable_reg);
- icl_mg_pll_write(dev_priv, pll);
+ if (INTEL_GEN(dev_priv) >= 12)
+ dkl_pll_write(dev_priv, pll);
+ else
+ icl_mg_pll_write(dev_priv, pll);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3482,11 +3712,22 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
+ .enable = mg_pll_enable,
+ .disable = mg_pll_disable,
+ .get_hw_state = dkl_pll_get_hw_state,
+};
+
static const struct dpll_info tgl_plls[] = {
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
{ "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- /* TODO: Add typeC plls */
+ { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
+ { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
{ },
};
@@ -3494,6 +3735,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.dpll_info = tgl_plls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
.dump_hw_state = icl_dump_hw_state,
};
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 104cf6d42333..2a104c64291d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -337,6 +337,11 @@ struct intel_shared_dpll {
* @info: platform specific info
*/
const struct dpll_info *info;
+
+ /**
+ * @wakeref: In some platforms a device-level runtime pm reference may
+ * need to be grabbed to disable DC states while this DPLL is enabled
+ */
intel_wakeref_t wakeref;
};
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
new file mode 100644
index 000000000000..bb5a0e91b370
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+
+#define DSB_BUF_SIZE (2 * PAGE_SIZE)
+
+/**
+ * DOC: DSB
+ *
+ * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
+ * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
+ * engine that can be programmed to download the DSB from memory.
+ * It allows driver to batch submit display HW programming. This helps to
+ * reduce loading time and CPU activity, thereby making the context switch
+ * faster. DSB Support added from Gen12 Intel graphics based platform.
+ *
+ * DSB's can access only the pipe, plane, and transcoder Data Island Packet
+ * registers.
+ *
+ * DSB HW can support only register writes (both indexed and direct MMIO
+ * writes). There are no registers reads possible with DSB HW engine.
+ */
+
+/* DSB opcodes. */
+#define DSB_OPCODE_SHIFT 24
+#define DSB_OPCODE_MMIO_WRITE 0x1
+#define DSB_OPCODE_INDEXED_WRITE 0x9
+#define DSB_BYTE_EN 0xF
+#define DSB_BYTE_EN_SHIFT 20
+#define DSB_REG_VALUE_MASK 0xfffff
+
+static inline bool is_dsb_busy(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id));
+}
+
+static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dsb_ctrl;
+
+ dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ if (DSB_STATUS & dsb_ctrl) {
+ DRM_DEBUG_KMS("DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl |= DSB_ENABLE;
+ I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+
+ POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ return true;
+}
+
+static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dsb_ctrl;
+
+ dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ if (DSB_STATUS & dsb_ctrl) {
+ DRM_DEBUG_KMS("DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl &= ~DSB_ENABLE;
+ I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+
+ POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ return true;
+}
+
+/**
+ * intel_dsb_get() - Allocate DSB context and return a DSB instance.
+ * @crtc: intel_crtc structure to get pipe info.
+ *
+ * This function provides handle of a DSB instance, for the further DSB
+ * operations.
+ *
+ * Returns: address of Intel_dsb instance requested for.
+ * Failure: Returns the same DSB instance, but without a command buffer.
+ */
+
+struct intel_dsb *
+intel_dsb_get(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_dsb *dsb = &crtc->dsb;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_DSB(i915))
+ return dsb;
+
+ if (atomic_add_return(1, &dsb->refcount) != 1)
+ return dsb;
+
+ dsb->id = DSB1;
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Gem object creation failed\n");
+ goto err;
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ DRM_ERROR("Vma creation failed\n");
+ i915_gem_object_put(obj);
+ atomic_dec(&dsb->refcount);
+ goto err;
+ }
+
+ dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(dsb->cmd_buf)) {
+ DRM_ERROR("Command buffer creation failed\n");
+ i915_vma_unpin_and_release(&vma, 0);
+ dsb->cmd_buf = NULL;
+ atomic_dec(&dsb->refcount);
+ goto err;
+ }
+ dsb->vma = vma;
+
+err:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ return dsb;
+}
+
+/**
+ * intel_dsb_put() - To destroy DSB context.
+ * @dsb: intel_dsb structure.
+ *
+ * This function destroys the DSB context allocated by a dsb_get(), by
+ * unpinning and releasing the VMA object associated with it.
+ */
+
+void intel_dsb_put(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!HAS_DSB(i915))
+ return;
+
+ if (WARN_ON(atomic_read(&dsb->refcount) == 0))
+ return;
+
+ if (atomic_dec_and_test(&dsb->refcount)) {
+ i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
+ dsb->cmd_buf = NULL;
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ }
+}
+
+/**
+ * intel_dsb_indexed_reg_write() -Write to the DSB context for auto
+ * increment register.
+ * @dsb: intel_dsb structure.
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB for auto-increment register. During command buffer overflow,
+ * a warning is thrown and rest all erroneous condition register programming
+ * is done through mmio write.
+ */
+
+void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
+ u32 val)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 *buf = dsb->cmd_buf;
+ u32 reg_val;
+
+ if (!buf) {
+ I915_WRITE(reg, val);
+ return;
+ }
+
+ if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
+ DRM_DEBUG_KMS("DSB buffer overflow\n");
+ return;
+ }
+
+ /*
+ * For example the buffer will look like below for 3 dwords for auto
+ * increment register:
+ * +--------------------------------------------------------+
+ * | size = 3 | offset &| value1 | value2 | value3 | zero |
+ * | | opcode | | | | |
+ * +--------------------------------------------------------+
+ * + + + + + + +
+ * 0 4 8 12 16 20 24
+ * Byte
+ *
+ * As every instruction is 8 byte aligned the index of dsb instruction
+ * will start always from even number while dealing with u32 array. If
+ * we are writing odd no of dwords, Zeros will be added in the end for
+ * padding.
+ */
+ reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
+ if (reg_val != i915_mmio_reg_offset(reg)) {
+ /* Every instruction should be 8 byte aligned. */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
+
+ dsb->ins_start_offset = dsb->free_pos;
+
+ /* Update the size. */
+ buf[dsb->free_pos++] = 1;
+
+ /* Update the opcode and reg. */
+ buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE <<
+ DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg);
+
+ /* Update the value. */
+ buf[dsb->free_pos++] = val;
+ } else {
+ /* Update the new value. */
+ buf[dsb->free_pos++] = val;
+
+ /* Update the size. */
+ buf[dsb->ins_start_offset]++;
+ }
+
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ buf[dsb->free_pos] = 0;
+}
+
+/**
+ * intel_dsb_reg_write() -Write to the DSB context for normal
+ * register.
+ * @dsb: intel_dsb structure.
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB. During command buffer overflow, a warning is thrown
+ * and rest all erroneous condition register programming is done
+ * through mmio write.
+ */
+void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 *buf = dsb->cmd_buf;
+
+ if (!buf) {
+ I915_WRITE(reg, val);
+ return;
+ }
+
+ if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
+ DRM_DEBUG_KMS("DSB buffer overflow\n");
+ return;
+ }
+
+ dsb->ins_start_offset = dsb->free_pos;
+ buf[dsb->free_pos++] = val;
+ buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg);
+}
+
+/**
+ * intel_dsb_commit() - Trigger workload execution of DSB.
+ * @dsb: intel_dsb structure.
+ *
+ * This function is used to do actual write to hardware using DSB.
+ * On errors, fall back to MMIO. Also this function help to reset the context.
+ */
+void intel_dsb_commit(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tail;
+
+ if (!dsb->free_pos)
+ return;
+
+ if (!intel_dsb_enable_engine(dsb))
+ goto reset;
+
+ if (is_dsb_busy(dsb)) {
+ DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma));
+
+ tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
+ if (tail > dsb->free_pos * 4)
+ memset(&dsb->cmd_buf[dsb->free_pos], 0,
+ (tail - dsb->free_pos * 4));
+
+ if (is_dsb_busy(dsb)) {
+ DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
+ I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail);
+ if (wait_for(!is_dsb_busy(dsb), 1)) {
+ DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+ goto reset;
+ }
+
+reset:
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ intel_dsb_disable_engine(dsb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
new file mode 100644
index 000000000000..6f95c8e909e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _INTEL_DSB_H
+#define _INTEL_DSB_H
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+struct intel_crtc;
+struct i915_vma;
+
+enum dsb_id {
+ INVALID_DSB = -1,
+ DSB1,
+ DSB2,
+ DSB3,
+ MAX_DSB_PER_PIPE
+};
+
+struct intel_dsb {
+ atomic_t refcount;
+ enum dsb_id id;
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+
+ /*
+ * free_pos will point the first free entry position
+ * and help in calculating tail of command buffer.
+ */
+ int free_pos;
+
+ /*
+ * ins_start_offset will help to store start address of the dsb
+ * instuction and help in identifying the batch of auto-increment
+ * register.
+ */
+ u32 ins_start_offset;
+};
+
+struct intel_dsb *
+intel_dsb_get(struct intel_crtc *crtc);
+void intel_dsb_put(struct intel_dsb *dsb);
+void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val);
+void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
+ u32 val);
+void intel_dsb_commit(struct intel_dsb *dsb);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 5fec02aceaed..a2a937109a5a 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -55,6 +55,7 @@ int intel_dsi_get_modes(struct drm_connector *connector)
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -73,7 +74,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
}
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 93baf366692e..bcfbcb743e7d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -280,7 +280,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 dvo_val;
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
@@ -505,7 +505,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = ~0;
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 16ed44bfd734..3111ecaeabd0 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -343,8 +343,8 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
HSW_FBCQ_DIS);
}
- if (IS_GEN(dev_priv, 11))
- /* Wa_1409120013:icl,ehl */
+ if (INTEL_GEN(dev_priv) >= 11)
+ /* Wa_1409120013:icl,ehl,tgl */
I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -1320,6 +1320,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->enabled = false;
fbc->active = false;
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ mkwrite_device_info(dev_priv)->display.has_fbc = false;
+
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->display.has_fbc = false;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index b5c588e511dd..48c960ca12fb 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- obj = NULL;
+ obj = ERR_PTR(-ENODEV);
if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
- if (obj == NULL)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
@@ -204,7 +204,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
sizes->fb_height = intel_fb->base.height;
}
- mutex_lock(&dev->struct_mutex);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
/* Pin the GGTT vma for our access via info->screen_base.
@@ -267,7 +266,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma_flags = flags;
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -275,7 +273,6 @@ out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -292,11 +289,8 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
drm_fb_helper_fini(&ifbdev->helper);
- if (ifbdev->vma) {
- mutex_lock(&ifbdev->helper.dev->struct_mutex);
+ if (ifbdev->vma)
intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
- mutex_unlock(&ifbdev->helper.dev->struct_mutex);
- }
if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
@@ -445,7 +439,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(!HAS_DISPLAY(dev_priv)))
+ if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 719379774fa5..84b164f31895 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -206,6 +206,7 @@ static int frontbuffer_active(struct i915_active *ref)
return 0;
}
+__i915_active_call
static void frontbuffer_retire(struct i915_active *ref)
{
struct intel_frontbuffer *front =
@@ -220,11 +221,18 @@ static void frontbuffer_release(struct kref *ref)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), ref);
+ struct drm_i915_gem_object *obj = front->obj;
+ struct i915_vma *vma;
- front->obj->frontbuffer = NULL;
- spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj)
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ spin_unlock(&obj->vma.lock);
- i915_gem_object_put(front->obj);
+ obj->frontbuffer = NULL;
+ spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
+
+ i915_gem_object_put(obj);
kfree(front);
}
@@ -249,8 +257,9 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
front->obj = obj;
kref_init(&front->ref);
atomic_set(&front->bits, 0);
- i915_active_init(i915, &front->write,
- frontbuffer_active, frontbuffer_retire);
+ i915_active_init(&front->write,
+ frontbuffer_active,
+ i915_active_may_sleep(frontbuffer_retire));
spin_lock(&i915->fb_tracking.lock);
if (obj->frontbuffer) {
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index d6775a005726..3d4d19ac1d14 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -836,7 +836,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 6ec5ceeab601..f1f41ca8402b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1,9 +1,11 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2017 Google, Inc.
+ * Copyright _ 2017-2019, Intel Corporation.
*
* Authors:
* Sean Paul <seanpaul@chromium.org>
+ * Ramalingam C <ramalingam.c@intel.com>
*/
#include <linux/component.h>
@@ -18,6 +20,7 @@
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sideband.h"
+#include "intel_connector.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
@@ -105,24 +108,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return capable;
}
-static inline bool intel_hdcp_in_use(struct intel_connector *connector)
+static inline
+bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum port port = connector->encoder->port;
- u32 reg;
-
- reg = I915_READ(PORT_HDCP_STATUS(port));
- return reg & HDCP_STATUS_ENC;
+ return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ HDCP_STATUS_ENC;
}
-static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
+static inline
+bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum port port = connector->encoder->port;
- u32 reg;
-
- reg = I915_READ(HDCP2_STATUS_DDI(port));
- return reg & LINK_ENCRYPTION_STATUS;
+ return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS;
}
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
@@ -253,9 +252,29 @@ static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
}
static
-u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
+u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- enum port port = intel_dig_port->base.port;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ return HDCP_TRANSA_REP_PRESENT |
+ HDCP_TRANSA_SHA1_M0;
+ case TRANSCODER_B:
+ return HDCP_TRANSB_REP_PRESENT |
+ HDCP_TRANSB_SHA1_M0;
+ case TRANSCODER_C:
+ return HDCP_TRANSC_REP_PRESENT |
+ HDCP_TRANSC_SHA1_M0;
+ case TRANSCODER_D:
+ return HDCP_TRANSD_REP_PRESENT |
+ HDCP_TRANSD_SHA1_M0;
+ default:
+ DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
+ return -EINVAL;
+ }
+ }
+
switch (port) {
case PORT_A:
return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
@@ -268,18 +287,20 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
case PORT_E:
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
- break;
+ DRM_ERROR("Unknown port %d\n", port);
+ return -EINVAL;
}
- DRM_ERROR("Unknown port %d\n", port);
- return -EINVAL;
}
static
-int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
+int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct drm_i915_private *dev_priv;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
+ enum port port = intel_dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
@@ -306,7 +327,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
sha_idx = 0;
sha_text = 0;
sha_leftovers = 0;
- rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
+ rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
for (i = 0; i < num_downstream; i++) {
unsigned int sha_empty;
@@ -548,7 +569,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
* V prime atleast twice.
*/
for (i = 0; i < tries; i++) {
- ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
+ ret = intel_hdcp_validate_v_prime(connector, shim,
ksv_fifo, num_downstream,
bstatus);
if (!ret)
@@ -576,6 +597,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
const struct intel_hdcp_shim *shim = hdcp->shim;
struct drm_i915_private *dev_priv;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
@@ -615,18 +637,21 @@ static int intel_hdcp_auth(struct intel_connector *connector)
/* Initialize An with 2 random values and acquire it */
for (i = 0; i < 2; i++)
- I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
- I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
+ I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
+ get_random_u32());
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
return -ETIMEDOUT;
}
- an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
- an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
+ an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
+ an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
ret = shim->write_an_aksv(intel_dig_port, an.shim);
if (ret)
return ret;
@@ -644,24 +669,26 @@ static int intel_hdcp_auth(struct intel_connector *connector)
return -EPERM;
}
- I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
- I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
+ I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
+ I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
ret = shim->repeater_present(intel_dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
I915_WRITE(HDCP_REP_CTL,
- intel_hdcp_get_repeater_ctl(intel_dig_port));
+ intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
+ port));
ret = shim->toggle_signalling(intel_dig_port, true);
if (ret)
return ret;
- I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
@@ -689,22 +716,25 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ret = shim->read_ri_prime(intel_dig_port, ri.shim);
if (ret)
return ret;
- I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+ I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
break;
}
if (i == tries) {
DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
return -ETIMEDOUT;
}
/* Wait for encryption confirmation */
- if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
@@ -729,15 +759,17 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
hdcp->hdcp_encrypted = false;
- I915_WRITE(PORT_HDCP_CONF(port), 0);
- if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
+ if (intel_de_wait_for_clear(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
+ ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
@@ -808,9 +840,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
+ cpu_transcoder = hdcp->cpu_transcoder;
/* Check_link valid only when HDCP1.4 is enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@@ -819,10 +853,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp_in_use(connector))) {
+ if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
connector->base.name, connector->base.base.id,
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -887,7 +922,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
/* PORT E doesn't have HDCP, and PORT F is disabled */
- return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
+ return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
}
static int
@@ -1493,10 +1528,11 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
-
+ WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
if (ret) {
@@ -1506,14 +1542,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
}
}
- if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
+ if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_AUTH_STATUS) {
/* Link is Authenticated. Now set for Encryption */
- I915_WRITE(HDCP2_CTL_DDI(port),
- I915_READ(HDCP2_CTL_DDI(port)) |
+ I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
+ port)) |
CTL_LINK_ENCRYPTION_REQ);
}
- ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port),
+ ret = intel_de_wait_for_set(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
@@ -1526,14 +1566,19 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
+ WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
- I915_WRITE(HDCP2_CTL_DDI(port),
- I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
+ I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
+ ~CTL_LINK_ENCRYPTION_REQ);
- ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port),
+ ret = intel_de_wait_for_clear(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
@@ -1632,9 +1677,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
+ cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@@ -1643,9 +1690,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp2_in_use(connector))) {
+ if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
- I915_READ(HDCP2_STATUS_DDI(port)));
+ I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -1749,13 +1797,71 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
+static inline
+enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return MEI_DDI_A;
+ case PORT_B ... PORT_F:
+ return (enum mei_fw_ddi)port;
+ default:
+ return MEI_DDI_INVALID_PORT;
+ }
+}
+
+static inline
+enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+{
+ switch (cpu_transcoder) {
+ case TRANSCODER_A ... TRANSCODER_D:
+ return (enum mei_fw_tc)(cpu_transcoder | 0x10);
+ default: /* eDP, DSI TRANSCODERS are non HDCP capable */
+ return MEI_INVALID_TRANSCODER;
+ }
+}
+
+void intel_hdcp_transcoder_config(struct intel_connector *connector,
+ enum transcoder cpu_transcoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ if (!hdcp->shim)
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->cpu_transcoder = cpu_transcoder;
+ hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+ mutex_unlock(&hdcp->mutex);
+ }
+}
+
static inline int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp_port_data *data = &hdcp->port_data;
- data->port = connector->encoder->port;
+ if (INTEL_GEN(dev_priv) < 12)
+ data->fw_ddi =
+ intel_get_mei_fw_ddi_index(connector->encoder->port);
+ else
+ /*
+ * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
+ * with zero(INVALID PORT index).
+ */
+ data->fw_ddi = MEI_DDI_INVALID_PORT;
+
+ /*
+ * As associated transcoder is set and modified at modeset, here fw_tc
+ * is initialized to zero (invalid transcoder index). This will be
+ * retained for <Gen12 forever.
+ */
+ data->fw_tc = MEI_INVALID_TRANSCODER;
+
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 13555b054930..41c1053d9e38 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -15,10 +15,14 @@ struct drm_connector_state;
struct drm_i915_private;
struct intel_connector;
struct intel_hdcp_shim;
+enum port;
+enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
+void intel_hdcp_transcoder_config(struct intel_connector *connector,
+ enum transcoder cpu_transcoder);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index b030f7ae3302..f6f5312205c4 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -189,13 +189,19 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
}
-static int hsw_dip_data_size(unsigned int type)
+static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
+ unsigned int type)
{
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_VSC_DATA_SIZE;
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ if (INTEL_GEN(dev_priv) >= 11)
+ return VIDEO_DIP_GMP_DATA_SIZE;
+ else
+ return VIDEO_DIP_DATA_SIZE;
default:
return VIDEO_DIP_DATA_SIZE;
}
@@ -514,7 +520,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
int i;
u32 val = I915_READ(ctl_reg);
- data_size = hsw_dip_data_size(type);
+ data_size = hsw_dip_data_size(dev_priv, type);
+
+ WARN_ON(len > data_size);
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -724,11 +732,20 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
- drm_hdmi_avi_infoframe_quant_range(frame, connector,
- adjusted_mode,
- crtc_state->limited_color_range ?
- HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL);
+ /* nonsense combination */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(frame, connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
drm_hdmi_avi_infoframe_content_type(frame, conn_state);
@@ -1491,7 +1508,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv =
intel_dig_port->base.base.dev->dev_private;
+ struct intel_connector *connector =
+ intel_dig_port->hdmi.attached_connector;
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
union {
u32 reg;
@@ -1502,39 +1522,30 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (ret)
return false;
- I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+ I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
return false;
}
return true;
}
-struct hdcp2_hdmi_msg_data {
+struct hdcp2_hdmi_msg_timeout {
u8 msg_id;
- u32 timeout;
- u32 timeout2;
+ u16 timeout;
};
-static const struct hdcp2_hdmi_msg_data hdcp2_msg_data[] = {
- { HDCP_2_2_AKE_INIT, 0, 0 },
- { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
- { HDCP_2_2_AKE_NO_STORED_KM, 0, 0 },
- { HDCP_2_2_AKE_STORED_KM, 0, 0 },
- { HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
- { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
- { HDCP_2_2_LC_INIT, 0, 0 },
- { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0 },
- { HDCP_2_2_SKE_SEND_EKS, 0, 0 },
- { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
- { HDCP_2_2_REP_SEND_ACK, 0, 0 },
- { HDCP_2_2_REP_STREAM_MANAGE, 0, 0 },
- { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
+static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = {
+ { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, },
+ { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, },
+ { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, },
+ { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, },
};
static
@@ -1551,12 +1562,17 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
- if (hdcp2_msg_data[i].msg_id == msg_id &&
- (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired))
- return hdcp2_msg_data[i].timeout;
- else if (hdcp2_msg_data[i].msg_id == msg_id)
- return hdcp2_msg_data[i].timeout2;
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME) {
+ if (is_paired)
+ return HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS;
+ else
+ return HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_timeout); i++) {
+ if (hdcp2_msg_timeout[i].msg_id == msg_id)
+ return hdcp2_msg_timeout[i].timeout;
+ }
return -EINVAL;
}
@@ -2184,8 +2200,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
true, force_dvi);
}
+ if (status != MODE_OK)
+ return status;
- return status;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
@@ -2261,9 +2279,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
- struct intel_crtc_state *config,
- int *clock_12bpc, int *clock_10bpc,
- int *clock_8bpc)
+ struct intel_crtc_state *config)
{
struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
@@ -2272,11 +2288,6 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return false;
}
- /* YCBCR420 TMDS rate requirement is half the pixel clock */
- config->port_clock /= 2;
- *clock_12bpc /= 2;
- *clock_10bpc /= 2;
- *clock_8bpc /= 2;
config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
@@ -2291,6 +2302,104 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return true;
}
+static int intel_hdmi_port_clock(int clock, int bpc)
+{
+ /*
+ * Need to adjust the port link by:
+ * 1.5x for 12bpc
+ * 1.25x for 10bpc
+ */
+ return clock * bpc / 8;
+}
+
+static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int clock, bool force_dvi)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ int bpc;
+
+ for (bpc = 12; bpc >= 10; bpc -= 2) {
+ if (hdmi_deep_color_possible(crtc_state, bpc) &&
+ hdmi_port_clock_valid(intel_hdmi,
+ intel_hdmi_port_clock(clock, bpc),
+ true, force_dvi) == MODE_OK)
+ return bpc;
+ }
+
+ return 8;
+}
+
+static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ bool force_dvi)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ int bpc, clock = adjusted_mode->crtc_clock;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ clock *= 2;
+
+ /* YCBCR420 TMDS rate requirement is half the pixel clock */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ clock /= 2;
+
+ bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
+ clock, force_dvi);
+
+ crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
+
+ /*
+ * pipe_bpp could already be below 8bpc due to
+ * FDI bandwidth constraints. We shouldn't bump it
+ * back up to 8bpc in that case.
+ */
+ if (crtc_state->pipe_bpp > bpc * 3)
+ crtc_state->pipe_bpp = bpc * 3;
+
+ DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
+ bpc, crtc_state->pipe_bpp);
+
+ if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
+ false, force_dvi) != MODE_OK) {
+ DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
+ crtc_state->port_clock);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
+
+ if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ return crtc_state->has_hdmi_sink &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
+ }
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2302,11 +2411,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
- int clock_10bpc = clock_8bpc * 5 / 4;
- int clock_12bpc = clock_8bpc * 3 / 2;
- int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
+ int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -2317,33 +2423,19 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
- if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /* See CEA-861-E - 5.1 Default Encoding Parameters */
- pipe_config->limited_color_range =
- pipe_config->has_hdmi_sink &&
- drm_default_rgb_quant_range(adjusted_mode) ==
- HDMI_QUANTIZATION_RANGE_LIMITED;
- } else {
- pipe_config->limited_color_range =
- intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
- }
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
- clock_8bpc *= 2;
- clock_10bpc *= 2;
- clock_12bpc *= 2;
- }
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
- if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
- &clock_12bpc, &clock_10bpc,
- &clock_8bpc)) {
+ if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
DRM_ERROR("Can't support YCBCR420 output\n");
return -EINVAL;
}
}
+ pipe_config->limited_color_range =
+ intel_hdmi_limited_color_range(pipe_config, conn_state);
+
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
@@ -2355,43 +2447,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
- /*
- * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
- * to check that the higher clock still fits within limits.
- */
- if (hdmi_deep_color_possible(pipe_config, 12) &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
- true, force_dvi) == MODE_OK) {
- DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
- desired_bpp = 12*3;
-
- /* Need to adjust the port link by 1.5x for 12bpc. */
- pipe_config->port_clock = clock_12bpc;
- } else if (hdmi_deep_color_possible(pipe_config, 10) &&
- hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
- true, force_dvi) == MODE_OK) {
- DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
- desired_bpp = 10 * 3;
-
- /* Need to adjust the port link by 1.25x for 10bpc. */
- pipe_config->port_clock = clock_10bpc;
- } else {
- DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
- desired_bpp = 8*3;
-
- pipe_config->port_clock = clock_8bpc;
- }
-
- if (!pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp);
- pipe_config->pipe_bpp = desired_bpp;
- }
-
- if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
- false, force_dvi) != MODE_OK) {
- DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
- return -EINVAL;
- }
+ ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
+ if (ret)
+ return ret;
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
@@ -2431,6 +2489,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
+ intel_hdcp_transcoder_config(intel_hdmi->attached_connector,
+ pipe_config->cpu_transcoder);
+
return 0;
}
@@ -2757,8 +2818,9 @@ intel_hdmi_connector_register(struct drm_connector *connector)
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- if (intel_attached_hdmi(connector)->cec_notifier)
- cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
+ struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
+
+ cec_notifier_conn_unregister(n);
intel_connector_destroy(connector);
}
@@ -3007,7 +3069,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
@@ -3073,13 +3135,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
+ struct cec_connector_info conn_info;
- DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
- port_name(port));
+ DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
+ intel_encoder->base.base.id, intel_encoder->base.name);
if (WARN(intel_dig_port->max_lanes < 4,
- "Not enough lanes (%d) for HDMI on port %c\n",
- intel_dig_port->max_lanes, port_name(port)))
+ "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
@@ -3125,8 +3189,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
- intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
- port_identifier(port));
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ intel_hdmi->cec_notifier =
+ cec_notifier_conn_register(dev->dev, port_identifier(port),
+ &conn_info);
if (!intel_hdmi->cec_notifier)
DRM_DEBUG_KMS("CEC notifier get failed\n");
}
@@ -3216,11 +3283,11 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
- intel_encoder->crtc_mask = 1 << 2;
+ intel_encoder->pipe_mask = BIT(PIPE_C);
else
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 106c2e0bc3c9..cf1ea5427639 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -23,6 +23,7 @@ struct intel_crtc_state;
struct intel_hdmi;
struct drm_connector_state;
union hdmi_infoframe;
+enum port;
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 56be20f6f47e..fc29046d48ea 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -481,7 +481,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+ DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
queue_dig = true;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index b0cd447b7fbc..087b5f57b321 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -13,6 +13,7 @@
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
+enum port;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index b19800b58442..0b67f7887cd0 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -114,7 +114,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
pinfo.size_data = sizeof(*pdata);
pinfo.dma_mask = DMA_BIT_MASK(32);
- pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
+ pdata->num_pipes = INTEL_NUM_PIPES(dev_priv);
pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
pdata->port[0].pipe = -1;
pdata->port[1].pipe = -1;
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index b7c459a8931c..b1bc78623647 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -232,7 +232,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 temp;
if (HAS_PCH_SPLIT(dev_priv)) {
@@ -899,12 +899,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
- if (HAS_PCH_SPLIT(dev_priv))
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN(dev_priv, 4))
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ if (INTEL_GEN(dev_priv) < 4)
+ intel_encoder->pipe_mask = BIT(PIPE_B);
else
- intel_encoder->crtc_mask = (1 << 1);
+ intel_encoder->pipe_mask = ~0;
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 29edfc343716..848ce07a8ec2 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -30,6 +30,7 @@
#include <drm/i915_drm.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -230,7 +231,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
if (IS_ERR(rq))
return rq;
- err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
+ err = i915_active_add_request(&overlay->last_flip, rq);
if (err) {
i915_request_add(rq);
return ERR_PTR(err);
@@ -439,8 +440,6 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
/*
* Only wait if there is actually an old frame to release to
* guarantee forward progress.
@@ -751,7 +750,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct i915_vma *vma;
int ret, tmp_width;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
@@ -852,7 +850,6 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = overlay->i915;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
@@ -1068,11 +1065,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (!(params->flags & I915_OVERLAY_ENABLE)) {
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
-
ret = intel_overlay_switch_off(overlay);
-
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@@ -1088,7 +1081,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
if (i915_gem_object_is_tiled(new_bo)) {
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
@@ -1152,14 +1144,12 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
goto out_unlock;
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
return 0;
out_unlock:
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
@@ -1233,7 +1223,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
}
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
@@ -1290,7 +1279,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
ret = 0;
out_unlock:
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@@ -1303,15 +1291,11 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
struct i915_vma *vma;
int err;
- mutex_lock(&i915->drm.struct_mutex);
-
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
- if (obj == NULL)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_unlock;
- }
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
@@ -1332,13 +1316,10 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
}
overlay->reg_bo = obj;
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
err_put_bo:
i915_gem_object_put(obj);
-err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1367,8 +1348,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
- i915_active_init(dev_priv,
- &overlay->last_flip,
+ i915_active_init(&overlay->last_flip,
NULL, intel_overlay_last_flip_retire);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 3bfb720560c2..6a9f322d3fca 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -76,7 +76,7 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
/* Cannot enable DSC and PSR2 simultaneously */
- WARN_ON(crtc_state->dsc_params.compression_enable &&
+ WARN_ON(crtc_state->dsc.compression_enable &&
crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
@@ -88,48 +88,35 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
}
}
-static int edp_psr_shift(enum transcoder cpu_transcoder)
+static void psr_irq_control(struct drm_i915_private *dev_priv)
{
- switch (cpu_transcoder) {
- case TRANSCODER_A:
- return EDP_PSR_TRANSCODER_A_SHIFT;
- case TRANSCODER_B:
- return EDP_PSR_TRANSCODER_B_SHIFT;
- case TRANSCODER_C:
- return EDP_PSR_TRANSCODER_C_SHIFT;
- default:
- MISSING_CASE(cpu_transcoder);
- /* fallthrough */
- case TRANSCODER_EDP:
- return EDP_PSR_TRANSCODER_EDP_SHIFT;
- }
-}
-
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
-{
- u32 debug_mask, mask;
- enum transcoder cpu_transcoder;
- u32 transcoders = BIT(TRANSCODER_EDP);
+ enum transcoder trans_shift;
+ u32 mask, val;
+ i915_reg_t imr_reg;
- if (INTEL_GEN(dev_priv) >= 8)
- transcoders |= BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C);
-
- debug_mask = 0;
- mask = 0;
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- int shift = edp_psr_shift(cpu_transcoder);
-
- mask |= EDP_PSR_ERROR(shift);
- debug_mask |= EDP_PSR_POST_EXIT(shift) |
- EDP_PSR_PRE_ENTRY(shift);
+ /*
+ * gen12+ has registers relative to transcoder and one per transcoder
+ * using the same bit definition: handle it as TRANSCODER_EDP to force
+ * 0 shift in bit definition
+ */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ trans_shift = 0;
+ imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ } else {
+ trans_shift = dev_priv->psr.transcoder;
+ imr_reg = EDP_PSR_IMR;
}
- if (debug & I915_PSR_DEBUG_IRQ)
- mask |= debug_mask;
+ mask = EDP_PSR_ERROR(trans_shift);
+ if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
+ mask |= EDP_PSR_POST_EXIT(trans_shift) |
+ EDP_PSR_PRE_ENTRY(trans_shift);
- I915_WRITE(EDP_PSR_IMR, ~mask);
+ /* Warning: it is masking/setting reserved bits too */
+ val = I915_READ(imr_reg);
+ val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+ val |= ~mask;
+ I915_WRITE(imr_reg, val);
}
static void psr_event_print(u32 val, bool psr2_enabled)
@@ -171,60 +158,58 @@ static void psr_event_print(u32 val, bool psr2_enabled)
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
{
- u32 transcoders = BIT(TRANSCODER_EDP);
- enum transcoder cpu_transcoder;
+ enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
+ enum transcoder trans_shift;
+ i915_reg_t imr_reg;
ktime_t time_ns = ktime_get();
- u32 mask = 0;
- if (INTEL_GEN(dev_priv) >= 8)
- transcoders |= BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ trans_shift = 0;
+ imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ } else {
+ trans_shift = dev_priv->psr.transcoder;
+ imr_reg = EDP_PSR_IMR;
+ }
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- int shift = edp_psr_shift(cpu_transcoder);
+ if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+ dev_priv->psr.last_entry_attempt = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
+ }
- if (psr_iir & EDP_PSR_ERROR(shift)) {
- DRM_WARN("[transcoder %s] PSR aux error\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+ dev_priv->psr.last_exit = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
- dev_priv->psr.irq_aux_error = true;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+ bool psr2_enabled = dev_priv->psr.psr2_enabled;
- /*
- * If this interruption is not masked it will keep
- * interrupting so fast that it prevents the scheduled
- * work to run.
- * Also after a PSR error, we don't want to arm PSR
- * again so we don't care about unmask the interruption
- * or unset irq_aux_error.
- */
- mask |= EDP_PSR_ERROR(shift);
- }
-
- if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
- dev_priv->psr.last_entry_attempt = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
- transcoder_name(cpu_transcoder));
+ I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+ psr_event_print(val, psr2_enabled);
}
+ }
- if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
- dev_priv->psr.last_exit = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+ u32 val;
- if (INTEL_GEN(dev_priv) >= 9) {
- u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
- bool psr2_enabled = dev_priv->psr.psr2_enabled;
+ DRM_WARN("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
- I915_WRITE(PSR_EVENT(cpu_transcoder), val);
- psr_event_print(val, psr2_enabled);
- }
- }
- }
+ dev_priv->psr.irq_aux_error = true;
- if (mask) {
- mask |= I915_READ(EDP_PSR_IMR);
- I915_WRITE(EDP_PSR_IMR, mask);
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ val = I915_READ(imr_reg);
+ val |= EDP_PSR_ERROR(trans_shift);
+ I915_WRITE(imr_reg, val);
schedule_work(&dev_priv->psr.work);
}
@@ -283,6 +268,11 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ if (dev_priv->psr.dp) {
+ DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
+ return;
+ }
+
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
@@ -305,7 +295,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- WARN_ON(dev_priv->psr.dp);
dev_priv->psr.dp = intel_dp;
if (INTEL_GEN(dev_priv) >= 9 &&
@@ -390,7 +379,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
+ I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -401,7 +390,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
+ I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
@@ -491,8 +480,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
- I915_WRITE(EDP_PSR_CTL, val);
+ val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
@@ -528,9 +518,87 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- I915_WRITE(EDP_PSR_CTL, 0);
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+}
- I915_WRITE(EDP_PSR2_CTL, val);
+static bool
+transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
+{
+ if (INTEL_GEN(dev_priv) < 9)
+ return false;
+ else if (INTEL_GEN(dev_priv) >= 12)
+ return trans == TRANSCODER_A;
+ else
+ return trans == TRANSCODER_EDP;
+}
+
+static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
+{
+ if (!cstate || !cstate->base.active)
+ return 0;
+
+ return DIV_ROUND_UP(1000 * 1000,
+ drm_mode_vrefresh(&cstate->base.adjusted_mode));
+}
+
+static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
+ u32 idle_frames)
+{
+ u32 val;
+
+ idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val &= ~EDP_PSR2_IDLE_FRAME_MASK;
+ val |= idle_frames;
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+}
+
+static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ psr2_program_idle_frames(dev_priv, 0);
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ int idle_frames;
+
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ /*
+ * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
+ * cases including the off-by-one issue that HW has in some cases.
+ */
+ idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ psr2_program_idle_frames(dev_priv, idle_frames);
+}
+
+static void tgl_dc5_idle_thread(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.idle_work.work);
+
+ mutex_lock(&dev_priv->psr.lock);
+ /* If delayed work is pending, it is not idle */
+ if (delayed_work_pending(&dev_priv->psr.idle_work))
+ goto unlock;
+
+ DRM_DEBUG_KMS("DC5/6 idle thread\n");
+ tgl_psr2_disable_dc3co(dev_priv);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->psr.dc3co_enabled)
+ return;
+
+ cancel_delayed_work(&dev_priv->psr.idle_work);
+ /* Before PSR2 exit disallow dc3co*/
+ tgl_psr2_disable_dc3co(dev_priv);
}
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
@@ -544,17 +612,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!dev_priv->psr.sink_psr2_support)
return false;
+ if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
+ DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
+ transcoder_name(crtc_state->cpu_transcoder));
+ return false;
+ }
+
/*
* DSC and PSR2 cannot be enabled simultaneously. If a requested
* resolution requires DSC to be enabled, priority is given to DSC
* over PSR2.
*/
- if (crtc_state->dsc_params.compression_enable) {
+ if (crtc_state->dsc.compression_enable) {
DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ psr_max_h = 5120;
+ psr_max_v = 3200;
+ } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
} else if (IS_GEN(dev_priv, 9)) {
@@ -606,10 +683,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
/*
* HSW spec explicitly says PSR is tied to port A.
- * BDW+ platforms with DDI implementation of PSR have different
- * PSR registers per transcoder and we only implement transcoder EDP
- * ones. Since by Display design transcoder EDP is tied to port A
- * we can safely escape based on the port A.
+ * BDW+ platforms have a instance of PSR registers per transcoder but
+ * for now it only supports one instance of PSR, so lets keep it
+ * hardcoded to PORT_A
*/
if (dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
@@ -648,9 +724,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (INTEL_GEN(dev_priv) >= 9)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
+ WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
@@ -663,25 +740,6 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
dev_priv->psr.active = true;
}
-static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- static const i915_reg_t regs[] = {
- [TRANSCODER_A] = CHICKEN_TRANS_A,
- [TRANSCODER_B] = CHICKEN_TRANS_B,
- [TRANSCODER_C] = CHICKEN_TRANS_C,
- [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
- };
-
- WARN_ON(INTEL_GEN(dev_priv) < 9);
-
- if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
- !regs[cpu_transcoder].reg))
- cpu_transcoder = TRANSCODER_A;
-
- return regs[cpu_transcoder];
-}
-
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -697,8 +755,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))) {
- i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
- cpu_transcoder);
+ i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = I915_READ(reg);
chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
@@ -720,19 +777,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- I915_WRITE(EDP_PSR_DEBUG, mask);
+ I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
+
+ psr_irq_control(dev_priv);
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
+ u32 val;
WARN_ON(dev_priv->psr.enabled);
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
+ dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
+ dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ val &= EDP_PSR_ERROR(0);
+ } else {
+ val = I915_READ(EDP_PSR_IIR);
+ val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
+ }
+ if (val) {
+ dev_priv->psr.sink_not_reliable = true;
+ DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
+ return;
+ }
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@@ -782,20 +866,28 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
u32 val;
if (!dev_priv->psr.active) {
- if (INTEL_GEN(dev_priv) >= 9)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ WARN_ON(val & EDP_PSR2_ENABLE);
+ }
+
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
+ WARN_ON(val & EDP_PSR_ENABLE);
+
return;
}
if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
+ tgl_disallow_dc3co_on_psr2_exit(dev_priv);
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
+ val &= ~EDP_PSR2_ENABLE;
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
} else {
- val = I915_READ(EDP_PSR_CTL);
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+ val &= ~EDP_PSR_ENABLE;
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
dev_priv->psr.active = false;
}
@@ -817,10 +909,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_psr_exit(dev_priv);
if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS;
+ psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = EDP_PSR_STATUS;
+ psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -859,6 +951,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
+ cancel_delayed_work_sync(&dev_priv->psr.idle_work);
}
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
@@ -963,7 +1056,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
- return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
+ return __intel_wait_for_register(&dev_priv->uncore,
+ EDP_PSR_STATUS(dev_priv->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@@ -979,10 +1073,10 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return false;
if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS;
+ reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = EDP_PSR_STATUS;
+ reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -1067,7 +1161,13 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+
+ /*
+ * Do it right away if it's already enabled, otherwise it will be done
+ * when enabling the source.
+ */
+ if (dev_priv->psr.enabled)
+ psr_irq_control(dev_priv);
mutex_unlock(&dev_priv->psr.lock);
@@ -1159,6 +1259,44 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->psr.lock);
}
+/*
+ * When we will be completely rely on PSR2 S/W tracking in future,
+ * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
+ * event also therefore tgl_dc3co_flush() require to be changed
+ * accrodingly in future.
+ */
+static void
+tgl_dc3co_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits, enum fb_op_origin origin)
+{
+ u32 delay;
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ if (!dev_priv->psr.dc3co_enabled)
+ goto unlock;
+
+ if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
+ goto unlock;
+
+ /*
+ * At every frontbuffer flush flip event modified delay of delayed work,
+ * when delayed work schedules that means display has been idle.
+ */
+ if (!(frontbuffer_bits &
+ INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
+ goto unlock;
+
+ tgl_psr2_enable_dc3co(dev_priv);
+ /* DC5/DC6 required idle frames = 6 */
+ delay = 6 * dev_priv->psr.dc3co_exit_delay;
+ mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
+ usecs_to_jiffies(delay));
+
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
/**
* intel_psr_flush - Flush PSR
* @dev_priv: i915 device
@@ -1178,8 +1316,10 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP) {
+ tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
return;
+ }
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
@@ -1208,45 +1348,34 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
- u32 val;
-
if (!HAS_PSR(dev_priv))
return;
- dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
- HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
-
if (!dev_priv->psr.sink_support)
return;
+ if (IS_HASWELL(dev_priv))
+ /*
+ * HSW don't have PSR registers on the same space as transcoder
+ * so set this to a value that when subtract to the register
+ * in transcoder space results in the right offset for HSW
+ */
+ dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
+
if (i915_modparams.enable_psr == -1)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
i915_modparams.enable_psr = 0;
- /*
- * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
- * will still keep the error set even after the reset done in the
- * irq_preinstall and irq_uninstall hooks.
- * And enabling in this situation cause the screen to freeze in the
- * first time that PSR HW tries to activate so lets keep PSR disabled
- * to avoid any rendering problems.
- */
- val = I915_READ(EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
- if (val) {
- DRM_DEBUG_KMS("PSR interruption error set\n");
- dev_priv->psr.sink_not_reliable = true;
- }
-
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else
- /* For new platforms let's respect VBT back again */
+ else if (INTEL_GEN(dev_priv) < 12)
+ /* For new platforms up to TGL let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
mutex_init(&dev_priv->psr.lock);
}
@@ -1288,7 +1417,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
if (val & DP_PSR_LINK_CRC_ERROR)
- DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+ DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
if (val & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index dc818826f36d..46e4de8b8cd5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -30,7 +30,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index adeb1c840976..5b7f4baf7348 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2921,7 +2921,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
bytes[0], bytes[1]);
return false;
}
- intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_sdvo->base.pipe_mask = ~0;
return true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index c9e05bcdd141..a66f224aa17d 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -14,6 +14,7 @@
struct drm_i915_private;
enum pipe;
+enum port;
bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t sdvo_reg, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index cae25e493128..72fda0430062 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -48,19 +48,6 @@
#include "intel_psr.h"
#include "intel_sprite.h"
-bool is_planar_yuv_format(u32 pixelformat)
-{
- switch (pixelformat) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return true;
- default:
- return false;
- }
-}
-
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@@ -300,10 +287,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
- src->x1 = src_x << 16;
- src->x2 = (src_x + src_w) << 16;
- src->y1 = src_y << 16;
- src->y2 = (src_y + src_h) << 16;
+ drm_rect_init(src, src_x << 16, src_y << 16,
+ src_w << 16, src_h << 16);
if (!fb->format->is_yuv)
return 0;
@@ -337,6 +322,55 @@ bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
icl_hdr_plane_mask() & BIT(plane_id);
}
+static void
+skl_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+
+ if (fb->format->cpp[0] == 8) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 9;
+ *den = 8;
+ }
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+ unsigned int src_w, src_h, dst_w, dst_h;
+ unsigned int num, den;
+
+ skl_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock on glk+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ den *= 2;
+
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+ dst_h = drm_rect_height(&plane_state->base.dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
+ mul_u32_u32(den, dst_w * dst_h));
+}
+
static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -361,6 +395,7 @@ skl_program_scaler(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler =
@@ -381,7 +416,7 @@ skl_program_scaler(struct intel_plane *plane,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
+ if (drm_format_info_is_yuv_semiplanar(fb->format) &&
!icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -554,7 +589,7 @@ skl_program_plane(struct intel_plane *plane,
u32 y = plane_state->color_plane[color_plane].y;
u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
- struct intel_plane *linked = plane_state->linked_plane;
+ struct intel_plane *linked = plane_state->planar_linked_plane;
const struct drm_framebuffer *fb = plane_state->base.fb;
u8 alpha = plane_state->base.alpha >> 8;
u32 plane_color_ctl = 0;
@@ -653,7 +688,7 @@ skl_update_plane(struct intel_plane *plane,
{
int color_plane = 0;
- if (plane_state->linked_plane) {
+ if (plane_state->planar_linked_plane) {
/* Program the UV plane */
color_plane = 1;
}
@@ -825,6 +860,85 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
+static void
+vlv_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * VLV bspec only considers cases where all three planes are
+ * enabled, and cases where the primary and one sprite is enabled.
+ * Let's assume the case with just two sprites enabled also
+ * maps to the latter case.
+ */
+ if (hweight8(active_planes) == 3) {
+ switch (cpp) {
+ case 8:
+ *num = 11;
+ *den = 8;
+ break;
+ case 4:
+ *num = 18;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ vlv_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -1031,6 +1145,164 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ switch (cpp) {
+ case 8:
+ *num = 12;
+ *den = 8;
+ break;
+ case 4:
+ *num = 19;
+ *den = 16;
+ break;
+ case 2:
+ *num = 33;
+ *den = 32;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+}
+
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, dst_w, pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+
+ if (src_w != dst_w)
+ ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den);
+ else
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, num * src_w),
+ den * dst_w);
+}
+
+static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+ unsigned int num, den;
+
+ hsw_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -1044,6 +1316,16 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return sprctl;
}
+static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+
+ return fb->format->cpp[0] == 8 &&
+ (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv));
+}
+
static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -1066,6 +1348,12 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616;
+ break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
break;
@@ -1083,7 +1371,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- sprctl |= SPRITE_INT_GAMMA_DISABLE;
+ if (!ivb_need_sprite_gamma(plane_state))
+ sprctl |= SPRITE_INT_GAMMA_DISABLE;
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
@@ -1105,12 +1394,26 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
-static void ivb_sprite_linear_gamma(u16 gamma[18])
+static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
+ u16 gamma[18])
{
- int i;
+ int scale, i;
- for (i = 0; i < 17; i++)
- gamma[i] = (i << 10) / 16;
+ /*
+ * WaFP16GammaEnabling:ivb,hsw
+ * "Workaround : When using the 64-bit format, the sprite output
+ * on each color channel has one quarter amplitude. It can be
+ * brought up to full amplitude by using sprite internal gamma
+ * correction, pipe gamma correction, or pipe color space
+ * conversion to multiply the sprite output by four."
+ */
+ scale = 4;
+
+ for (i = 0; i < 16; i++)
+ gamma[i] = min((scale * i << 10) / 16, (1 << 10) - 1);
+
+ gamma[i] = min((scale * i << 10) / 16, 1 << 10);
+ i++;
gamma[i] = 3 << 10;
i++;
@@ -1124,7 +1427,10 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state)
u16 gamma[18];
int i;
- ivb_sprite_linear_gamma(gamma);
+ if (!ivb_need_sprite_gamma(plane_state))
+ return;
+
+ ivb_sprite_linear_gamma(plane_state, gamma);
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
@@ -1257,6 +1563,53 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int hscale, pixel_rate;
+ unsigned int limit, decimate;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ hscale = drm_rect_calc_hscale(&plane_state->base.src,
+ &plane_state->base.dst,
+ 0, INT_MAX);
+ if (hscale < 0x10000)
+ return pixel_rate;
+
+ /* Decimation steps at 2x,4x,8x,16x */
+ decimate = ilog2(hscale >> 16);
+ hscale >>= decimate;
+
+ /* Starting limit is 90% of cdclk */
+ limit = 9;
+
+ /* -10% per decimation step */
+ limit -= decimate;
+
+ /* -10% for RGB */
+ if (fb->format->cpp[0] >= 4)
+ limit--; /* -10% for RGB */
+
+ /*
+ * We should also do -10% if sprite scaling is enabled
+ * on the other pipe, but we can't really check for that,
+ * so we ignore it.
+ */
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, 10 * hscale),
+ limit << 16);
+}
+
static unsigned int
g4x_sprite_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -1300,6 +1653,12 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616;
+ break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
break;
@@ -1513,6 +1872,11 @@ static bool intel_fb_scalable(const struct drm_framebuffer *fb)
switch (fb->format->format) {
case DRM_FORMAT_C8:
return false;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return INTEL_GEN(to_i915(fb->dev)) >= 11;
default:
return true;
}
@@ -1791,7 +2155,7 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
/* Display WA #1106 */
- if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
+ if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
@@ -1801,6 +2165,22 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
return 0;
}
+static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
+ const struct drm_framebuffer *fb)
+{
+ /*
+ * We don't yet know the final source width nor
+ * whether we can use the HQ scaler mode. Assume
+ * the best case.
+ * FIXME need to properly check this later.
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+ !drm_format_info_is_yuv_semiplanar(fb->format))
+ return 0x30000 - 1;
+ else
+ return 0x20000 - 1;
+}
+
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
@@ -1818,7 +2198,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
/* use scaler when colorkey is not required */
if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
min_scale = 1;
- max_scale = skl_max_scale(crtc_state, fb->format->format);
+ max_scale = skl_plane_max_scale(dev_priv, fb);
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1993,8 +2373,10 @@ static const u64 i9xx_plane_format_modifiers[] = {
};
static const u32 snb_plane_formats[] = {
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2024,6 +2406,8 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2039,6 +2423,8 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2055,6 +2441,8 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2158,6 +2546,13 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
+static const u64 gen12_plane_format_modifiers_noccs[] = {
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -2198,6 +2593,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2306,6 +2703,55 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
+static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -2342,6 +2788,15 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.format_mod_supported = skl_plane_format_mod_supported,
};
+static const struct drm_plane_funcs gen12_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = gen12_plane_format_mod_supported,
+};
+
static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2430,6 +2885,7 @@ struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
+ const struct drm_plane_funcs *plane_funcs;
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
@@ -2459,6 +2915,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = skl_disable_plane;
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
+ plane->min_cdclk = skl_plane_min_cdclk;
if (icl_is_nv12_y_plane(plane_id))
plane->update_slave = icl_update_slave;
@@ -2472,11 +2929,19 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
- if (plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /* TODO: Implement support for gen-12 CCS modifiers */
+ plane->has_ccs = false;
+ modifiers = gen12_plane_format_modifiers_noccs;
+ plane_funcs = &gen12_plane_funcs;
+ } else {
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+ if (plane->has_ccs)
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
+ plane_funcs = &skl_plane_funcs;
+ }
if (plane_id == PLANE_PRIMARY)
plane_type = DRM_PLANE_TYPE_PRIMARY;
@@ -2486,7 +2951,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
possible_crtcs = BIT(pipe);
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, &skl_plane_funcs,
+ possible_crtcs, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@@ -2519,6 +2984,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
@@ -2540,7 +3007,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
const u64 *modifiers;
const u32 *formats;
int num_formats;
- int ret;
+ int ret, zpos;
if (INTEL_GEN(dev_priv) >= 9)
return skl_universal_plane_create(dev_priv, pipe,
@@ -2556,6 +3023,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = vlv_disable_plane;
plane->get_hw_state = vlv_plane_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->min_cdclk = vlv_plane_min_cdclk;
formats = vlv_plane_formats;
num_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -2569,6 +3037,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = ivb_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else
+ plane->min_cdclk = ivb_sprite_min_cdclk;
+
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
@@ -2580,6 +3053,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = g4x_disable_plane;
plane->get_hw_state = g4x_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN(dev_priv, 6)) {
@@ -2630,6 +3104,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
+ zpos = sprite + 1;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 093a2d156f1e..5eeaa92420d1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -17,7 +17,6 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
-bool is_planar_yuv_format(u32 pixelformat);
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@ -50,4 +49,11 @@ static inline u8 icl_hdr_plane_mask(void)
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 85743a43bee2..7773169b7331 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -23,32 +23,38 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
-static bool has_modular_fia(struct drm_i915_private *i915)
-{
- if (!INTEL_INFO(i915)->display.has_modular_fia)
- return false;
-
- return intel_uncore_read(&i915->uncore,
- PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK;
-}
-
-static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915,
- enum tc_port tc_port)
+static void
+tc_port_load_fia_params(struct drm_i915_private *i915,
+ struct intel_digital_port *dig_port)
{
- if (!has_modular_fia(i915))
- return FIA1;
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+ u32 modular_fia;
+
+ if (INTEL_INFO(i915)->display.has_modular_fia) {
+ modular_fia = intel_uncore_read(&i915->uncore,
+ PORT_TX_DFLEXDPSP(FIA1));
+ modular_fia &= MODULAR_FIA_MASK;
+ } else {
+ modular_fia = 0;
+ }
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
* than two TC ports, there are multiple instances of Modular FIA.
*/
- return tc_port / 2;
+ if (modular_fia) {
+ dig_port->tc_phy_fia = tc_port / 2;
+ dig_port->tc_phy_fia_idx = tc_port % 2;
+ } else {
+ dig_port->tc_phy_fia = FIA1;
+ dig_port->tc_phy_fia_idx = tc_port;
+ }
}
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 lane_mask;
@@ -57,8 +63,23 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
WARN_ON(lane_mask == 0xffffffff);
- return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+ lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
+ return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
+}
+
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 pin_mask;
+
+ pin_mask = intel_uncore_read(uncore,
+ PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
+
+ WARN_ON(pin_mask == 0xffffffff);
+
+ return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
+ DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
}
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
@@ -95,7 +116,6 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -104,19 +124,21 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
- val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
switch (required_lanes) {
case 1:
- val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
break;
case 2:
- val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
break;
case 4:
- val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
break;
default:
MISSING_CASE(required_lanes);
@@ -164,9 +186,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
return mask;
}
- if (val & TC_LIVE_STATE_TBT(tc_port))
+ if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_TBT_ALT);
- if (val & TC_LIVE_STATE_TC(tc_port))
+ if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_DP_ALT);
if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
@@ -182,7 +204,6 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -194,14 +215,13 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
return false;
}
- return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port);
+ return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
}
static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
bool enable)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -215,9 +235,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
return false;
}
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
if (!enable)
- val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
intel_uncore_write(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
@@ -232,7 +252,6 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -244,7 +263,7 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
return true;
}
- return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port));
+ return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
}
/*
@@ -540,5 +559,5 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
mutex_init(&dig_port->tc_lock);
dig_port->tc_legacy_port = is_legacy;
dig_port->tc_link_refcount = 0;
- dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port);
+ tc_port_load_fia_params(i915, dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 783d75531435..463f1b3c836f 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -13,6 +13,7 @@ struct intel_digital_port;
bool intel_tc_port_connected(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index b70221f5112a..9983fadf6c28 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -961,11 +961,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* Ensure TV refresh is close to desired refresh */
- if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
- < 1000)
- return MODE_OK;
+ if (abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) >= 1000)
+ return MODE_CLOCK_RANGE;
- return MODE_CLOCK_RANGE;
+ return MODE_OK;
}
static int
@@ -1702,7 +1701,7 @@ intel_tv_detect(struct drm_connector *connector,
struct intel_load_detect_pipe tmp;
int ret;
- ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
if (ret < 0)
return ret;
@@ -1948,9 +1947,8 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = ~0;
intel_encoder->cloneable = 0;
- intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index dfcd156b5094..69a7cb1fa121 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -114,6 +114,7 @@ enum bdb_block_id {
BDB_LVDS_POWER = 44,
BDB_MIPI_CONFIG = 52,
BDB_MIPI_SEQUENCE = 53,
+ BDB_COMPRESSION_PARAMETERS = 56,
BDB_SKIP = 254, /* VBIOS private block, ignore */
};
@@ -291,6 +292,8 @@ struct bdb_general_features {
#define DVO_PORT_HDMIE 12 /* 193 */
#define DVO_PORT_DPF 13 /* N/A */
#define DVO_PORT_HDMIF 14 /* N/A */
+#define DVO_PORT_DPG 15
+#define DVO_PORT_HDMIG 16
#define DVO_PORT_MIPIA 21 /* 171 */
#define DVO_PORT_MIPIB 22 /* 171 */
#define DVO_PORT_MIPIC 23 /* 171 */
@@ -325,6 +328,7 @@ enum vbt_gmbus_ddi {
#define DP_AUX_D 0x30
#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
+#define DP_AUX_G 0x70
#define VBT_DP_MAX_LINK_RATE_HBR3 0
#define VBT_DP_MAX_LINK_RATE_HBR2 1
@@ -808,4 +812,55 @@ struct bdb_mipi_sequence {
u8 data[0]; /* up to 6 variable length blocks */
} __packed;
+/*
+ * Block 56 - Compression Parameters
+ */
+
+#define VBT_RC_BUFFER_BLOCK_SIZE_1KB 0
+#define VBT_RC_BUFFER_BLOCK_SIZE_4KB 1
+#define VBT_RC_BUFFER_BLOCK_SIZE_16KB 2
+#define VBT_RC_BUFFER_BLOCK_SIZE_64KB 3
+
+#define VBT_DSC_LINE_BUFFER_DEPTH(vbt_value) ((vbt_value) + 8) /* bits */
+#define VBT_DSC_MAX_BPP(vbt_value) (6 + (vbt_value) * 2)
+
+struct dsc_compression_parameters_entry {
+ u8 version_major:4;
+ u8 version_minor:4;
+
+ u8 rc_buffer_block_size:2;
+ u8 reserved1:6;
+
+ /*
+ * Buffer size in bytes:
+ *
+ * 4 ^ rc_buffer_block_size * 1024 * (rc_buffer_size + 1) bytes
+ */
+ u8 rc_buffer_size;
+ u32 slices_per_line;
+
+ u8 line_buffer_depth:4;
+ u8 reserved2:4;
+
+ /* Flag Bits 1 */
+ u8 block_prediction_enable:1;
+ u8 reserved3:7;
+
+ u8 max_bpp; /* mapping */
+
+ /* Color depth capabilities */
+ u8 reserved4:1;
+ u8 support_8bpc:1;
+ u8 support_10bpc:1;
+ u8 support_12bpc:1;
+ u8 reserved5:4;
+
+ u16 slice_height;
+} __packed;
+
+struct bdb_compression_parameters {
+ u16 entry_size;
+ struct dsc_compression_parameters_entry data[16];
+} __packed;
+
#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index d4fb7f16f9f6..896b0c334f5e 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -322,8 +322,8 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
- struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
- u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
+ u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
u8 i = 0;
int row_index = 0;
int column_index = 0;
@@ -332,7 +332,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
- pipe_config->dsc_params.slice_count);
+ pipe_config->dsc.slice_count);
/*
* Slice Height of 8 works for all currently available panels. So start
* with that if pic_height is an integral multiple of 8.
@@ -485,13 +485,13 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 pps_val = 0;
u32 rc_buf_thresh_dword[4];
u32 rc_range_params_dword[8];
- u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+ u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
int i = 0;
/* Populate PICTURE_PARAMETER_SET_0 registers */
@@ -514,11 +514,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
pps_val);
}
@@ -533,11 +533,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
pps_val);
}
@@ -553,11 +553,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
pps_val);
}
@@ -573,11 +573,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
pps_val);
}
@@ -593,11 +593,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
pps_val);
}
@@ -613,11 +613,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
pps_val);
}
@@ -635,11 +635,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
pps_val);
}
@@ -655,11 +655,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
pps_val);
}
@@ -675,11 +675,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
pps_val);
}
@@ -695,11 +695,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
pps_val);
}
@@ -717,11 +717,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
pps_val);
}
@@ -740,11 +740,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
pps_val);
}
@@ -763,7 +763,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(DSCC_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
@@ -782,7 +782,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_buf_thresh_dword[2]);
I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
rc_buf_thresh_dword[3]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
rc_buf_thresh_dword[0]);
I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
@@ -824,7 +824,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_range_params_dword[6]);
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
rc_range_params_dword[7]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
@@ -859,7 +859,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_range_params_dword[6]);
I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
rc_range_params_dword[7]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
rc_range_params_dword[0]);
I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
@@ -885,7 +885,7 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
@@ -909,7 +909,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
u32 dss_ctl1_val = 0;
u32 dss_ctl2_val = 0;
- if (!crtc_state->dsc_params.compression_enable)
+ if (!crtc_state->dsc.compression_enable)
return;
/* Enable Power wells for VDSC/joining */
@@ -928,7 +928,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
}
dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
@@ -944,7 +944,7 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
- if (!old_crtc_state->dsc_params.compression_enable)
+ if (!old_crtc_state->dsc.compression_enable)
return;
if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
new file mode 100644
index 000000000000..2ff7293986d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pci.h>
+#include <linux/vgaarb.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_vga.h"
+
+static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return VLV_VGACNTRL;
+ else if (INTEL_GEN(i915) >= 5)
+ return CPU_VGACNTRL;
+ else
+ return VGACNTRL;
+}
+
+/* Disable the VGA plane that we never use */
+void intel_vga_disable(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+ u8 sr1;
+
+ /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(SR01, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1 << 5, VGA_SR_DATA);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ udelay(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
+void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
+{
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+
+ if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ intel_vga_disable(dev_priv);
+ }
+}
+
+void intel_vga_redisable(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ /*
+ * This function can be called both from intel_modeset_setup_hw_state or
+ * at a very early point in our resume sequence, where the power well
+ * structures are not yet restored. Since this function is at a very
+ * paranoid "someone might have enabled VGA while we were not looking"
+ * level, just check if the power well is enabled instead of trying to
+ * follow the "don't touch the power well if we don't need it" policy
+ * the rest of the driver uses.
+ */
+ wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_VGA);
+ if (!wakeref)
+ return;
+
+ intel_vga_redisable_power_on(i915);
+
+ intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref);
+}
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+}
+
+static int
+intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
+{
+ unsigned int reg = INTEL_GEN(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+ if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
+ DRM_ERROR("failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
+ return 0;
+
+ if (enable_decode)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+ if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
+ DRM_ERROR("failed to write control word\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static unsigned int
+intel_vga_set_decode(void *cookie, bool enable_decode)
+{
+ struct drm_i915_private *i915 = cookie;
+
+ intel_vga_set_state(i915, enable_decode);
+
+ if (enable_decode)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+int intel_vga_register(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ int ret;
+
+ /*
+ * If we have > 1 VGA cards, then we need to arbitrate access to the
+ * common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
+ ret = vga_client_register(pdev, i915, NULL, intel_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+void intel_vga_unregister(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ vga_client_register(pdev, NULL, NULL, NULL);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
new file mode 100644
index 000000000000..ba5b55b917f0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VGA_H__
+#define __INTEL_VGA_H__
+
+struct drm_i915_private;
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915);
+void intel_vga_disable(struct drm_i915_private *i915);
+void intel_vga_redisable(struct drm_i915_private *i915);
+void intel_vga_redisable_power_on(struct drm_i915_private *i915);
+int intel_vga_register(struct drm_i915_private *i915);
+void intel_vga_unregister(struct drm_i915_private *i915);
+
+#endif /* __INTEL_VGA_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index a71b22bdd95b..0ca49b1604c6 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -749,7 +749,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
enum port port;
u32 val;
bool glk_cold_boot = false;
@@ -1870,11 +1870,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* port C. BXT isn't limited like this.
*/
if (IS_GEN9_LP(dev_priv))
- intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ intel_encoder->pipe_mask = ~0;
else if (port == PORT_A)
- intel_encoder->crtc_mask = BIT(PIPE_A);
+ intel_encoder->pipe_mask = BIT(PIPE_A);
else
- intel_encoder->crtc_mask = BIT(PIPE_B);
+ intel_encoder->pipe_mask = BIT(PIPE_B);
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index f99920652751..81366aa4812b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -155,7 +155,6 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
- struct drm_i915_private *i915 = w->ce->engine->i915;
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
@@ -173,11 +172,9 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
- /* XXX: we need to kill this */
- mutex_lock(&i915->drm.struct_mutex);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
- goto out_unlock;
+ goto out_signal;
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
if (IS_ERR(batch)) {
@@ -211,7 +208,7 @@ static void clear_pages_worker(struct work_struct *work)
* keep track of the GPU activity within this vma/request, and
* propagate the signal from the request to w->dma.
*/
- err = i915_active_ref(&vma->active, rq->timeline, rq);
+ err = __i915_vma_move_to_active(vma, rq);
if (err)
goto out_request;
@@ -229,8 +226,6 @@ out_batch:
intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
out_signal:
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 755c4542629f..e553ca8d98eb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,8 +69,10 @@
#include <drm/i915_drm.h>
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
@@ -167,97 +169,6 @@ lookup_user_engine(struct i915_gem_context *ctx,
return i915_gem_context_get_engine(ctx, idx);
}
-static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
-{
- unsigned int max;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- if (INTEL_GEN(i915) >= 12)
- max = GEN12_MAX_CONTEXT_HW_ID;
- else if (INTEL_GEN(i915) >= 11)
- max = GEN11_MAX_CONTEXT_HW_ID;
- else if (USES_GUC_SUBMISSION(i915))
- /*
- * When using GuC in proxy submission, GuC consumes the
- * highest bit in the context id to indicate proxy submission.
- */
- max = MAX_GUC_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
-
- return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
-}
-
-static int steal_hw_id(struct drm_i915_private *i915)
-{
- struct i915_gem_context *ctx, *cn;
- LIST_HEAD(pinned);
- int id = -ENOSPC;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- list_for_each_entry_safe(ctx, cn,
- &i915->contexts.hw_id_list, hw_id_link) {
- if (atomic_read(&ctx->hw_id_pin_count)) {
- list_move_tail(&ctx->hw_id_link, &pinned);
- continue;
- }
-
- GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
- list_del_init(&ctx->hw_id_link);
- id = ctx->hw_id;
- break;
- }
-
- /*
- * Remember how far we got up on the last repossesion scan, so the
- * list is kept in a "least recently scanned" order.
- */
- list_splice_tail(&pinned, &i915->contexts.hw_id_list);
- return id;
-}
-
-static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
-{
- int ret;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- /*
- * We prefer to steal/stall ourselves and our users over that of the
- * entire system. That may be a little unfair to our users, and
- * even hurt high priority clients. The choice is whether to oomkill
- * something else, or steal a context id.
- */
- ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- if (unlikely(ret < 0)) {
- ret = steal_hw_id(i915);
- if (ret < 0) /* once again for the correct errno code */
- ret = new_hw_id(i915, GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
- *out = ret;
- return 0;
-}
-
-static void release_hw_id(struct i915_gem_context *ctx)
-{
- struct drm_i915_private *i915 = ctx->i915;
-
- if (list_empty(&ctx->hw_id_link))
- return;
-
- mutex_lock(&i915->contexts.mutex);
- if (!list_empty(&ctx->hw_id_link)) {
- ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
- list_del_init(&ctx->hw_id_link);
- }
- mutex_unlock(&i915->contexts.mutex);
-}
-
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
{
while (count--) {
@@ -294,27 +205,33 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
for_each_engine(engine, gt, id) {
struct intel_context *ce;
+ if (engine->legacy_idx == INVALID_ENGINE)
+ continue;
+
+ GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
+ GEM_BUG_ON(e->engines[engine->legacy_idx]);
+
ce = intel_context_create(ctx, engine);
if (IS_ERR(ce)) {
- __free_engines(e, id);
+ __free_engines(e, e->num_engines + 1);
return ERR_CAST(ce);
}
- e->engines[id] = ce;
- e->num_engines = id + 1;
+ e->engines[engine->legacy_idx] = ce;
+ e->num_engines = max(e->num_engines, engine->legacy_idx);
}
+ e->num_engines++;
return e;
}
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
- release_hw_id(ctx);
- if (ctx->vm)
- i915_vm_put(ctx->vm);
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
@@ -327,70 +244,202 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
kfree(ctx->name);
put_pid(ctx->pid);
- list_del(&ctx->link);
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
-static void contexts_free(struct drm_i915_private *i915)
+static void contexts_free_all(struct llist_node *list)
{
- struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
struct i915_gem_context *ctx, *cn;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- llist_for_each_entry_safe(ctx, cn, freed, free_link)
+ llist_for_each_entry_safe(ctx, cn, list, free_link)
i915_gem_context_free(ctx);
}
-static void contexts_free_first(struct drm_i915_private *i915)
+static void contexts_flush_free(struct i915_gem_contexts *gc)
{
- struct i915_gem_context *ctx;
- struct llist_node *freed;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- freed = llist_del_first(&i915->contexts.free_list);
- if (!freed)
- return;
-
- ctx = container_of(freed, typeof(*ctx), free_link);
- i915_gem_context_free(ctx);
+ contexts_free_all(llist_del_all(&gc->free_list));
}
static void contexts_free_worker(struct work_struct *work)
{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), contexts.free_work);
+ struct i915_gem_contexts *gc =
+ container_of(work, typeof(*gc), free_work);
- mutex_lock(&i915->drm.struct_mutex);
- contexts_free(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ contexts_flush_free(gc);
}
void i915_gem_context_release(struct kref *ref)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- struct drm_i915_private *i915 = ctx->i915;
+ struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
trace_i915_context_free(ctx);
- if (llist_add(&ctx->free_link, &i915->contexts.free_list))
- queue_work(i915->wq, &i915->contexts.free_work);
+ if (llist_add(&ctx->free_link, &gc->free_list))
+ schedule_work(&gc->free_work);
}
-static void context_close(struct i915_gem_context *ctx)
+static inline struct i915_gem_engines *
+__context_engines_static(const struct i915_gem_context *ctx)
{
- mutex_lock(&ctx->mutex);
+ return rcu_dereference_protected(ctx->engines, true);
+}
- i915_gem_context_set_closed(ctx);
- ctx->file_priv = ERR_PTR(-EBADF);
+static bool __reset_engine(struct intel_engine_cs *engine)
+{
+ struct intel_gt *gt = engine->gt;
+ bool success = false;
+
+ if (!intel_has_reset_engine(gt))
+ return false;
+
+ if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ success = intel_engine_reset(engine, NULL) == 0;
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ }
+
+ return success;
+}
+
+static void __reset_context(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "context closure in %s", ctx->name);
+}
+
+static bool __cancel_engine(struct intel_engine_cs *engine)
+{
+ /*
+ * Send a "high priority pulse" down the engine to cause the
+ * current request to be momentarily preempted. (If it fails to
+ * be preempted, it will be reset). As we have marked our context
+ * as banned, any incomplete request, including any running, will
+ * be skipped following the preemption.
+ *
+ * If there is no hangchecking (one of the reasons why we try to
+ * cancel the context) and no forced preemption, there may be no
+ * means by which we reset the GPU and evict the persistent hog.
+ * Ergo if we are unable to inject a preemptive pulse that can
+ * kill the banned context, we fallback to doing a local reset
+ * instead.
+ */
+ if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
+ !intel_engine_pulse(engine))
+ return true;
+
+ /* If we are unable to send a pulse, try resetting this engine. */
+ return __reset_engine(engine);
+}
+
+static struct intel_engine_cs *__active_engine(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine, *locked;
+
+ /*
+ * Serialise with __i915_request_submit() so that it sees
+ * is-banned?, or we know the request is already inflight.
+ */
+ locked = READ_ONCE(rq->engine);
+ spin_lock_irq(&locked->active.lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->active.lock);
+ spin_lock(&engine->active.lock);
+ locked = engine;
+ }
+
+ engine = NULL;
+ if (i915_request_is_active(rq) && !rq->fence.error)
+ engine = rq->engine;
+
+ spin_unlock_irq(&locked->active.lock);
+
+ return engine;
+}
+
+static struct intel_engine_cs *active_engine(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct i915_request *rq;
+
+ if (!ce->timeline)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ /* Check with the backend if the request is inflight */
+ engine = __active_engine(rq);
+ if (engine)
+ break;
+ }
+ rcu_read_unlock();
+
+ return engine;
+}
+
+static void kill_context(struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ /*
+ * If we are already banned, it was due to a guilty request causing
+ * a reset and the entire context being evicted from the GPU.
+ */
+ if (i915_gem_context_is_banned(ctx))
+ return;
+
+ i915_gem_context_set_banned(ctx);
/*
- * This context will never again be assinged to HW, so we can
- * reuse its ID for the next context.
+ * Map the user's engine back to the actual engines; one virtual
+ * engine will be mapped to multiple engines, and using ctx->engine[]
+ * the same engine may be have multiple instances in the user's map.
+ * However, we only care about pending requests, so only include
+ * engines on which there are incomplete requests.
*/
- release_hw_id(ctx);
+ for_each_gem_engine(ce, __context_engines_static(ctx), it) {
+ struct intel_engine_cs *engine;
+
+ /*
+ * Check the current active state of this context; if we
+ * are currently executing on the GPU we need to evict
+ * ourselves. On the other hand, if we haven't yet been
+ * submitted to the GPU or if everything is complete,
+ * we have nothing to do.
+ */
+ engine = active_engine(ce);
+
+ /* First attempt to gracefully cancel the context */
+ if (engine && !__cancel_engine(engine))
+ /*
+ * If we are unable to send a preemptive pulse to bump
+ * the context from the GPU, we have to resort to a full
+ * reset. We hope the collateral damage is worth it.
+ */
+ __reset_context(ctx, engine);
+ }
+}
+
+static void context_close(struct i915_gem_context *ctx)
+{
+ struct i915_address_space *vm;
+
+ i915_gem_context_set_closed(ctx);
+
+ mutex_lock(&ctx->mutex);
+
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ i915_vm_close(vm);
+
+ ctx->file_priv = ERR_PTR(-EBADF);
/*
* The LUT uses the VMA as a backpointer to unref the object,
@@ -400,9 +449,47 @@ static void context_close(struct i915_gem_context *ctx)
lut_close(ctx);
mutex_unlock(&ctx->mutex);
+
+ /*
+ * If the user has disabled hangchecking, we can not be sure that
+ * the batches will ever complete after the context is closed,
+ * keeping the context and all resources pinned forever. So in this
+ * case we opt to forcibly kill off all remaining requests on
+ * context close.
+ */
+ if (!i915_gem_context_is_persistent(ctx) ||
+ !i915_modparams.enable_hangcheck)
+ kill_context(ctx);
+
i915_gem_context_put(ctx);
}
+static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
+{
+ if (i915_gem_context_is_persistent(ctx) == state)
+ return 0;
+
+ if (state) {
+ /*
+ * Only contexts that are short-lived [that will expire or be
+ * reset] are allowed to survive past termination. We require
+ * hangcheck to ensure that the persistent requests are healthy.
+ */
+ if (!i915_modparams.enable_hangcheck)
+ return -EINVAL;
+
+ i915_gem_context_set_persistence(ctx);
+ } else {
+ /* To cancel a context we use "preempt-to-idle" */
+ if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ return -ENODEV;
+
+ i915_gem_context_clear_persistence(ctx);
+ }
+
+ return 0;
+}
+
static struct i915_gem_context *
__create_context(struct drm_i915_private *i915)
{
@@ -416,7 +503,6 @@ __create_context(struct drm_i915_private *i915)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &i915->contexts.list);
ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
@@ -430,7 +516,6 @@ __create_context(struct drm_i915_private *i915)
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->hw_id_link);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@@ -439,6 +524,7 @@ __create_context(struct drm_i915_private *i915)
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
+ __context_set_persistence(ctx, true /* cgroup hook? */);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -446,6 +532,10 @@ __create_context(struct drm_i915_private *i915)
ctx->jump_whitelist = NULL;
ctx->jump_whitelist_cmds = 0;
+ spin_lock(&i915->gem.contexts.lock);
+ list_add_tail(&ctx->link, &i915->gem.contexts.list);
+ spin_unlock(&i915->gem.contexts.lock);
+
return ctx;
err_free:
@@ -475,11 +565,11 @@ static void __apply_ppgtt(struct intel_context *ce, void *vm)
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
- struct i915_address_space *old = ctx->vm;
+ struct i915_address_space *old = i915_gem_context_vm(ctx);
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
- ctx->vm = i915_vm_get(vm);
+ rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
context_apply_all(ctx, __apply_ppgtt, vm);
return old;
@@ -488,12 +578,12 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
static void __assign_ppgtt(struct i915_gem_context *ctx,
struct i915_address_space *vm)
{
- if (vm == ctx->vm)
+ if (vm == rcu_access_pointer(ctx->vm))
return;
vm = __set_ppgtt(ctx, vm);
if (vm)
- i915_vm_put(vm);
+ i915_vm_close(vm);
}
static void __set_timeline(struct intel_timeline **dst,
@@ -520,27 +610,25 @@ static void __assign_timeline(struct i915_gem_context *ctx,
}
static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
+i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_context *ctx;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
- !HAS_EXECLISTS(dev_priv))
+ !HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
- /* Reap the most stale context */
- contexts_free_first(dev_priv);
+ /* Reap the stale contexts */
+ contexts_flush_free(&i915->gem.contexts);
- ctx = __create_context(dev_priv);
+ ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
- if (HAS_FULL_PPGTT(dev_priv)) {
+ if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -548,14 +636,17 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ERR_CAST(ppgtt);
}
+ mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, &ppgtt->vm);
+ mutex_unlock(&ctx->mutex);
+
i915_vm_put(&ppgtt->vm);
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
struct intel_timeline *timeline;
- timeline = intel_timeline_create(&dev_priv->gt, NULL);
+ timeline = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
@@ -587,19 +678,13 @@ struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
- int err;
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
- err = i915_gem_context_pin_hw_id(ctx);
- if (err) {
- destroy_kernel_context(&ctx);
- return ERR_PTR(err);
- }
-
i915_gem_context_clear_bannable(ctx);
+ i915_gem_context_set_persistence(ctx);
ctx->sched.priority = I915_USER_PRIORITY(prio);
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -607,62 +692,42 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
return ctx;
}
-static void init_contexts(struct drm_i915_private *i915)
+static void init_contexts(struct i915_gem_contexts *gc)
{
- mutex_init(&i915->contexts.mutex);
- INIT_LIST_HEAD(&i915->contexts.list);
-
- /* Using the simple ida interface, the max is limited by sizeof(int) */
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
- ida_init(&i915->contexts.hw_ida);
- INIT_LIST_HEAD(&i915->contexts.hw_id_list);
+ spin_lock_init(&gc->lock);
+ INIT_LIST_HEAD(&gc->list);
- INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
- init_llist_head(&i915->contexts.free_list);
+ INIT_WORK(&gc->free_work, contexts_free_worker);
+ init_llist_head(&gc->free_list);
}
-int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
+int i915_gem_init_contexts(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
- GEM_BUG_ON(dev_priv->kernel_context);
+ GEM_BUG_ON(i915->kernel_context);
- init_contexts(dev_priv);
+ init_contexts(&i915->gem.contexts);
/* lowest priority; idle task */
- ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
+ ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n");
return PTR_ERR(ctx);
}
- /*
- * For easy recognisablity, we want the kernel context to be 0 and then
- * all user contexts will have non-zero hw_id. Kernel contexts are
- * permanently pinned, so that we never suffer a stall and can
- * use them from any allocation context (e.g. for evicting other
- * contexts and from inside the shrinker).
- */
- GEM_BUG_ON(ctx->hw_id);
- GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
- dev_priv->kernel_context = ctx;
+ i915->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
- DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+ DRIVER_CAPS(i915)->has_logical_contexts ?
"logical" : "fake");
return 0;
}
-void i915_gem_contexts_fini(struct drm_i915_private *i915)
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
- lockdep_assert_held(&i915->drm.struct_mutex);
-
destroy_kernel_context(&i915->kernel_context);
-
- /* Must free all deferred contexts (via flush_workqueue) first */
- GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
- ida_destroy(&i915->contexts.hw_ida);
+ flush_work(&i915->gem.contexts.free_work);
}
static int context_idr_cleanup(int id, void *p, void *data)
@@ -680,11 +745,16 @@ static int vm_idr_cleanup(int id, void *p, void *data)
static int gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv)
{
+ struct i915_address_space *vm;
int ret;
ctx->file_priv = fpriv;
- if (ctx->vm)
- ctx->vm->file = fpriv;
+
+ mutex_lock(&ctx->mutex);
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ WRITE_ONCE(vm->file, fpriv); /* XXX */
+ mutex_unlock(&ctx->mutex);
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
@@ -721,9 +791,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
idr_init(&file_priv->context_idr);
idr_init_base(&file_priv->vm_idr, 1);
- mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, 0);
- mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err;
@@ -751,6 +819,7 @@ err:
void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *i915 = file_priv->dev_priv;
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
@@ -759,6 +828,8 @@ void i915_gem_context_close(struct drm_file *file)
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
idr_destroy(&file_priv->vm_idr);
mutex_destroy(&file_priv->vm_idr_lock);
+
+ contexts_flush_free(&i915->gem.contexts);
}
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -851,6 +922,7 @@ struct context_barrier_task {
void *data;
};
+__i915_active_call
static void cb_retire(struct i915_active *base)
{
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
@@ -870,20 +942,18 @@ static int context_barrier_task(struct i915_gem_context *ctx,
void (*task)(void *data),
void *data)
{
- struct drm_i915_private *i915 = ctx->i915;
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
struct intel_context *ce;
int err = 0;
- lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!task);
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
- i915_active_init(i915, &cb->base, NULL, cb_retire);
+ i915_active_init(&cb->base, NULL, cb_retire);
err = i915_active_acquire(&cb->base);
if (err) {
kfree(cb);
@@ -915,7 +985,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (emit)
err = emit(rq, data);
if (err == 0)
- err = i915_active_ref(&cb->base, rq->timeline, rq);
+ err = i915_active_add_request(&cb->base, rq);
i915_request_add(rq);
if (err)
@@ -938,16 +1008,12 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_address_space *vm;
int ret;
- if (!ctx->vm)
+ if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
- /* XXX rcu acquire? */
- ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
- if (ret)
- return ret;
-
+ rcu_read_lock();
vm = i915_vm_get(ctx->vm);
- mutex_unlock(&ctx->i915->drm.struct_mutex);
+ rcu_read_unlock();
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
if (ret)
@@ -958,7 +1024,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (ret < 0)
goto err_unlock;
- i915_vm_get(vm);
+ i915_vm_open(vm);
args->size = 0;
args->value = ret;
@@ -978,7 +1044,7 @@ static void set_ppgtt_barrier(void *data)
if (INTEL_GEN(old->i915) < 8)
gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
- i915_vm_put(old);
+ i915_vm_close(old);
}
static int emit_ppgtt_update(struct i915_request *rq, void *data)
@@ -1008,12 +1074,18 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
intel_ring_advance(rq, cs);
} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ int err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
@@ -1050,34 +1122,34 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (args->size)
return -EINVAL;
- if (!ctx->vm)
+ if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
if (upper_32_bits(args->value))
return -ENOENT;
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (err)
- return err;
-
+ rcu_read_lock();
vm = idr_find(&file_priv->vm_idr, args->value);
- if (vm)
- i915_vm_get(vm);
- mutex_unlock(&file_priv->vm_idr_lock);
+ if (vm && !kref_get_unless_zero(&vm->ref))
+ vm = NULL;
+ rcu_read_unlock();
if (!vm)
return -ENOENT;
- err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ err = mutex_lock_interruptible(&ctx->mutex);
if (err)
goto out;
- if (vm == ctx->vm)
+ if (i915_gem_context_is_closed(ctx)) {
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ if (vm == rcu_access_pointer(ctx->vm))
goto unlock;
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
- mutex_lock(&ctx->mutex);
lut_close(ctx);
- mutex_unlock(&ctx->mutex);
old = __set_ppgtt(ctx, vm);
@@ -1092,13 +1164,12 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
- i915_vm_put(__set_ppgtt(ctx, old));
- i915_vm_put(old);
+ i915_vm_close(__set_ppgtt(ctx, old));
+ i915_vm_close(old);
}
unlock:
- mutex_unlock(&ctx->i915->drm.struct_mutex);
-
+ mutex_unlock(&ctx->mutex);
out:
i915_vm_put(vm);
return err;
@@ -1117,7 +1188,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
offset = i915_ggtt_offset(ce->state) +
LRC_STATE_PN * PAGE_SIZE +
- (CTX_R_PWR_CLK_STATE + 1) * 4;
+ CTX_R_PWR_CLK_STATE * 4;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
@@ -1160,8 +1231,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
}
static int
-__intel_context_reconfigure_sseu(struct intel_context *ce,
- struct intel_sseu sseu)
+intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
int ret;
@@ -1185,23 +1255,6 @@ unlock:
}
static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
-{
- struct drm_i915_private *i915 = ce->engine->i915;
- int ret;
-
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- ret = __intel_context_reconfigure_sseu(ce, sseu);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- return ret;
-}
-
-static int
user_to_context_sseu(struct drm_i915_private *i915,
const struct drm_i915_gem_context_param_sseu *user,
struct intel_sseu *context)
@@ -1743,6 +1796,16 @@ err_free:
return err;
}
+static int
+set_persistence(struct i915_gem_context *ctx,
+ const struct drm_i915_gem_context_param *args)
+{
+ if (args->size)
+ return -EINVAL;
+
+ return __context_set_persistence(ctx, args->value);
+}
+
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1820,6 +1883,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_engines(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PERSISTENCE:
+ ret = set_persistence(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1972,10 +2039,11 @@ static int clone_vm(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
struct i915_address_space *vm;
+ int err = 0;
rcu_read_lock();
do {
- vm = READ_ONCE(src->vm);
+ vm = rcu_dereference(src->vm);
if (!vm)
break;
@@ -1997,7 +2065,7 @@ static int clone_vm(struct i915_gem_context *dst,
* it cannot be reallocated elsewhere.
*/
- if (vm == READ_ONCE(src->vm))
+ if (vm == rcu_access_pointer(src->vm))
break;
i915_vm_put(vm);
@@ -2005,11 +2073,16 @@ static int clone_vm(struct i915_gem_context *dst,
rcu_read_unlock();
if (vm) {
- __assign_ppgtt(dst, vm);
+ if (!mutex_lock_interruptible(&dst->mutex)) {
+ __assign_ppgtt(dst, vm);
+ mutex_unlock(&dst->mutex);
+ } else {
+ err = -EINTR;
+ }
i915_vm_put(vm);
}
- return 0;
+ return err;
}
static int create_clone(struct i915_user_extension __user *ext, void *data)
@@ -2099,12 +2172,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
ext_data.ctx = i915_gem_create_context(i915, args->flags);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ext_data.ctx))
return PTR_ERR(ext_data.ctx);
@@ -2231,12 +2299,12 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
- if (ctx->vm)
- args->value = ctx->vm->total;
- else if (to_i915(dev)->ggtt.alias)
- args->value = to_i915(dev)->ggtt.alias->vm.total;
+ rcu_read_lock();
+ if (rcu_access_pointer(ctx->vm))
+ args->value = rcu_dereference(ctx->vm)->total;
else
args->value = to_i915(dev)->ggtt.vm.total;
+ rcu_read_unlock();
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
@@ -2271,6 +2339,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = get_engines(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PERSISTENCE:
+ args->size = 0;
+ args->value = i915_gem_context_is_persistent(ctx);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -2302,7 +2375,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_reset_stats *args = data;
struct i915_gem_context *ctx;
int ret;
@@ -2324,7 +2397,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
*/
if (capable(CAP_SYS_ADMIN))
- args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ args->reset_count = i915_reset_count(&i915->gpu_error);
else
args->reset_count = 0;
@@ -2337,33 +2410,6 @@ out:
return ret;
}
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
-{
- struct drm_i915_private *i915 = ctx->i915;
- int err = 0;
-
- mutex_lock(&i915->contexts.mutex);
-
- GEM_BUG_ON(i915_gem_context_is_closed(ctx));
-
- if (list_empty(&ctx->hw_id_link)) {
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
-
- err = assign_hw_id(i915, &ctx->hw_id);
- if (err)
- goto out_unlock;
-
- list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
- }
-
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
- atomic_inc(&ctx->hw_id_pin_count);
-
-out_unlock:
- mutex_unlock(&i915->contexts.mutex);
- return err;
-}
-
/* GEM context-engines iterator: for_each_gem_engine() */
struct intel_context *
i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 176978608b6f..18e50a769a6e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -11,7 +11,9 @@
#include "gt/intel_context.h"
+#include "i915_drv.h"
#include "i915_gem.h"
+#include "i915_gem_gtt.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
@@ -74,6 +76,21 @@ static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *c
clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
+static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
+{
+ return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
+{
+ set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
+{
+ clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNED, &ctx->flags);
@@ -112,19 +129,22 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
-static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
+static inline bool
+i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
{
- if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
- return 0;
+ return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
+}
- return __i915_gem_context_pin_hw_id(ctx);
+static inline void
+i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
+{
+ set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
-static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
+static inline void
+i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
{
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
- atomic_dec(&ctx->hw_id_pin_count);
+ clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
@@ -133,8 +153,8 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
}
/* i915_gem_context.c */
-int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
@@ -173,6 +193,27 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
+static inline struct i915_address_space *
+i915_gem_context_vm(struct i915_gem_context *ctx)
+{
+ return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
+}
+
+static inline struct i915_address_space *
+i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
+{
+ struct i915_address_space *vm;
+
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm);
+ if (!vm)
+ vm = &ctx->i915->ggtt.vm;
+ vm = i915_vm_get(vm);
+ rcu_read_unlock();
+
+ return vm;
+}
+
static inline struct i915_gem_engines *
i915_gem_context_engines(struct i915_gem_context *ctx)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 00537b9d7006..3870dd5daaa0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -88,7 +88,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
- struct i915_address_space *vm;
+ struct i915_address_space __rcu *vm;
/**
* @pid: process id of creator
@@ -137,6 +137,7 @@ struct i915_gem_context {
#define UCONTEXT_NO_ERROR_CAPTURE 1
#define UCONTEXT_BANNABLE 2
#define UCONTEXT_RECOVERABLE 3
+#define UCONTEXT_PERSISTENCE 4
/**
* @flags: small set of booleans
@@ -146,24 +147,7 @@ struct i915_gem_context {
#define CONTEXT_CLOSED 1
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
#define CONTEXT_USER_ENGINES 3
-
- /**
- * @hw_id: - unique identifier for the context
- *
- * The hardware needs to uniquely identify the context for a few
- * functions like fault reporting, PASID, scheduling. The
- * &drm_i915_private.context_hw_ida is used to assign a unqiue
- * id for the lifetime of the context.
- *
- * @hw_id_pin_count: - number of times this context had been pinned
- * for use (should be, at most, once per engine).
- *
- * @hw_id_link: - all contexts with an assigned id are tracked
- * for possible repossession.
- */
- unsigned int hw_id;
- atomic_t hw_id_pin_count;
- struct list_head hw_id_link;
+#define CONTEXT_NOPREEMPT 4
struct mutex mutex;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 96ce95c8ac5a..eaea49d08eb5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -256,6 +256,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+ static struct lock_class_key lock_class;
struct dma_buf_attachment *attach;
struct drm_i915_gem_object *obj;
int ret;
@@ -287,7 +288,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
obj->base.import_attach = attach;
obj->base.resv = dma_buf->resv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 9c58e8fac1d9..9937b4c341f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -27,7 +27,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
- if (!READ_ONCE(obj->pin_global))
+ if (!i915_gem_object_is_framebuffer(obj))
return;
i915_gem_object_lock(obj);
@@ -288,14 +288,21 @@ restart:
if (!drm_mm_node_allocated(&vma->node))
continue;
- ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
+ /* Wait for an earlier async bind, need to rewrite it */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
+ ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
if (ret)
return ret;
}
}
- list_for_each_entry(vma, &obj->vma.list, obj_link)
- vma->node.color = cache_level;
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (i915_vm_has_cache_coloring(vma->vm))
+ vma->node.color = cache_level;
+ }
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->cache_dirty = true; /* Always invalidate stale cachelines */
@@ -389,16 +396,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- goto out;
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret == 0) {
ret = i915_gem_object_set_cache_level(obj, level);
i915_gem_object_unlock(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
out:
i915_gem_object_put(obj);
@@ -422,12 +424,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
assert_object_held(obj);
- /* Mark the global pin early so that we account for the
- * display coherency whilst setting up the cache domains.
- */
- obj->pin_global++;
-
- /* The display engine is not coherent with the LLC cache on gen6. As
+ /*
+ * The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* done with uncached PTEs. This is lowest common denominator for all
* chipsets.
@@ -439,12 +437,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
ret = i915_gem_object_set_cache_level(obj,
HAS_WT(to_i915(obj->base.dev)) ?
I915_CACHE_WT : I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err_unpin_global;
- }
+ if (ret)
+ return ERR_PTR(ret);
- /* As the user may map the buffer once pinned in the display plane
+ /*
+ * As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers. However,
* it may simply be too big to fit into mappable, in which case
@@ -461,22 +458,19 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
if (IS_ERR(vma))
- goto err_unpin_global;
+ return vma;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
__i915_gem_object_flush_for_display(obj);
- /* It should now be out of any other write domains, and we can update
+ /*
+ * It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
-
-err_unpin_global:
- obj->pin_global--;
- return vma;
}
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
@@ -491,6 +485,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
if (!drm_mm_node_allocated(&vma->node))
continue;
+ GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
mutex_unlock(&i915->ggtt.vm.mutex);
@@ -500,7 +495,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
- if (obj->mm.madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED &&
+ !atomic_read(&obj->mm.shrink_pin))
list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
@@ -514,12 +510,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
assert_object_held(obj);
- if (WARN_ON(obj->pin_global == 0))
- return;
-
- if (--obj->pin_global == 0)
- vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
-
/* Bump the LRU to try and avoid premature eviction whilst flipping */
i915_gem_object_bump_inactive_ggtt(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index e635e1e5f4d3..f0998f1225af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -19,6 +19,7 @@
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
@@ -252,6 +253,7 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
+ struct intel_context *ce;
struct i915_request *rq;
u32 *rq_cmd;
unsigned int rq_size;
@@ -699,7 +701,9 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
+ mutex_lock(&eb->context->vm->mutex);
err = i915_gem_evict_vm(eb->context->vm);
+ mutex_unlock(&eb->context->vm->mutex);
if (err)
return err;
break;
@@ -727,7 +731,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
- if (ctx->vm)
+ if (rcu_access_pointer(ctx->vm))
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
eb->context_flags = 0;
@@ -882,6 +886,9 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
+ if (eb->reloc_cache.ce)
+ intel_context_put(eb->reloc_cache.ce);
+
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -904,7 +911,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
- cache->node.allocated = false;
+ cache->node.flags = 0;
+ cache->ce = NULL;
cache->rq = NULL;
cache->rq_size = 0;
}
@@ -965,11 +973,13 @@ static void reloc_cache_reset(struct reloc_cache *cache)
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
- if (cache->node.allocated) {
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&cache->node);
+ mutex_unlock(&ggtt->vm.mutex);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
@@ -1044,11 +1054,13 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
+ mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
@@ -1058,7 +1070,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
offset = cache->node.start;
- if (cache->node.allocated) {
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -1147,7 +1159,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd;
int err;
- pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
+ pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -1170,7 +1182,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = i915_request_create(eb->context);
+ rq = intel_context_create_request(cache->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1241,6 +1253,29 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV);
+ if (!cache->ce) {
+ struct intel_context *ce;
+
+ /*
+ * The CS pre-parser can pre-fetch commands across
+ * memory sync points and starting gen12 it is able to
+ * pre-fetch across BB_START and BB_END boundaries
+ * (within the same context). We therefore use a
+ * separate context gen12+ to guarantee that the reloc
+ * writes land before the parser gets to the target
+ * memory location.
+ */
+ if (cache->gen >= 12)
+ ce = intel_context_create(eb->context->gem_context,
+ eb->engine);
+ else
+ ce = intel_context_get(eb->context);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ cache->ce = ce;
+ }
+
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
@@ -1390,7 +1425,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
- PIN_GLOBAL);
+ PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
return err;
@@ -1992,7 +2027,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
u64 shadow_batch_start;
int err;
- pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
+ pool = intel_engine_get_pool(eb->engine, eb->batch_len);
if (IS_ERR(pool))
return ERR_CAST(pool);
@@ -2099,6 +2134,9 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err)
return err;
+ if (i915_gem_context_nopreempt(eb->gem_context))
+ eb->request->flags |= I915_REQUEST_NOPREEMPT;
+
return 0;
}
@@ -2168,35 +2206,6 @@ static struct i915_request *eb_throttle(struct intel_context *ce)
return i915_request_get(rq);
}
-static int
-__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
-{
- int err;
-
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
- return 0;
-
- err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
- if (err)
- return err;
-
- err = __intel_context_do_pin(ce);
- mutex_unlock(&eb->i915->drm.struct_mutex);
-
- return err;
-}
-
-static void
-__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
-{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
- return;
-
- mutex_lock(&eb->i915->drm.struct_mutex);
- intel_context_unpin(ce);
- mutex_unlock(&eb->i915->drm.struct_mutex);
-}
-
static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
struct intel_timeline *tl;
@@ -2216,7 +2225,7 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- err = __eb_pin_context(eb, ce);
+ err = intel_context_pin(ce);
if (err)
return err;
@@ -2260,7 +2269,7 @@ err_exit:
intel_context_exit(ce);
intel_context_timeline_unlock(tl);
err_unpin:
- __eb_unpin_context(eb, ce);
+ intel_context_unpin(ce);
return err;
}
@@ -2273,7 +2282,7 @@ static void eb_unpin_engine(struct i915_execbuffer *eb)
intel_context_exit(ce);
mutex_unlock(&tl->mutex);
- __eb_unpin_context(eb, ce);
+ intel_context_unpin(ce);
}
static unsigned int
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 0c41e04ab8fa..9cfb0e41ff06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -117,13 +117,6 @@ create_st:
goto err;
}
- /* Mark the pages as dontneed whilst they are still pinned. As soon
- * as they are unpinned they are allowed to be reaped by the shrinker,
- * and the caller is expected to repopulate - the contents of this
- * object are only valid whilst active and pinned.
- */
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
internal_free_pages(pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -172,6 +164,7 @@ struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
phys_addr_t size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -186,7 +179,16 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class);
+
+ /*
+ * Mark the object as volatile, such that the pages are marked as
+ * dontneed whilst they are still pinned. As soon as they are unpinned
+ * they are allowed to be reaped by the shrinker, and the caller is
+ * expected to repopulate - the contents of this object are only valid
+ * whilst active and pinned.
+ */
+ i915_gem_object_set_volatile(obj);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index 000000000000..0e2bf6b7e143
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+ .flags = I915_GEM_OBJECT_HAS_IOMEM,
+
+ .get_pages = i915_gem_object_get_pages_buddy,
+ .put_pages = i915_gem_object_put_pages_buddy,
+ .release = i915_gem_object_release_memory_region,
+};
+
+/* XXX: Time to vfunc your life up? */
+void __iomem *
+i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+ resource_size_t offset;
+
+ GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_lmem_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+ resource_size_t size,
+ unsigned int flags)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+ size, flags);
+}
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
+
+ obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ i915_gem_object_init_memory_region(obj, mem, flags);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index 000000000000..7c176b8b7d2f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+ resource_size_t size,
+ unsigned int flags);
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 05289edbafe3..e3002849844b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -8,6 +8,7 @@
#include <linux/sizes.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_gem_gtt.h"
@@ -249,16 +250,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_rpm;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err_reset;
-
- /* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
- ret = -EFAULT;
- goto err_unlock;
- }
-
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
@@ -285,10 +276,19 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
}
+
+ /* The entire mappable GGTT is pinned? Unexpected! */
+ GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err_unlock;
+ goto err_reset;
+ }
+
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
+ ret = -EFAULT;
+ goto err_unpin;
}
ret = i915_vma_pin_fence(vma);
@@ -312,7 +312,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
mutex_unlock(&i915->ggtt.vm.mutex);
- if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -326,8 +326,6 @@ err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
err_reset:
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
@@ -335,23 +333,20 @@ err_rpm:
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
- case -EIO:
- /*
- * We eat errors when the gpu is terminally wedged to avoid
- * userspace unduly crashing (gl has no provisions for mmaps to
- * fail). But any other -EIO isn't ours (e.g. swap in failure)
- * and so needs to be reported.
- */
- if (!intel_gt_is_wedged(ggtt->vm.gt))
- return VM_FAULT_SIGBUS;
- /* else, fall through */
- case -EAGAIN:
- /*
- * EAGAIN means the gpu is hung and we'll wait for the error
- * handler to reset everything when re-faulting in
- * i915_mutex_lock_interruptible.
- */
+ default:
+ WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
+ /* fallthrough */
+ case -EIO: /* shmemfs failure from swap device */
+ case -EFAULT: /* purged object */
+ case -ENODEV: /* bad object, how did you get here! */
+ return VM_FAULT_SIGBUS;
+
+ case -ENOSPC: /* shmemfs allocation failure */
+ case -ENOMEM: /* our allocation failure */
+ return VM_FAULT_OOM;
+
case 0:
+ case -EAGAIN:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
@@ -360,15 +355,6 @@ err:
* already did the job.
*/
return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -ENOSPC:
- case -EFAULT:
- case -ENODEV: /* bad object, how did you get here! */
- return VM_FAULT_SIGBUS;
- default:
- WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
- return VM_FAULT_SIGBUS;
}
}
@@ -439,6 +425,7 @@ out:
static int create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt = &i915->gt;
int err;
err = drm_gem_create_mmap_offset(&obj->base);
@@ -446,21 +433,12 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
return 0;
/* Attempt to reap some mmap space from dead objects */
- do {
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- break;
-
- i915_gem_drain_freed_objects(i915);
- err = drm_gem_create_mmap_offset(&obj->base);
- if (!err)
- break;
-
- } while (flush_delayed_work(&i915->gem.retire_work));
+ err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
- return err;
+ i915_gem_drain_freed_objects(i915);
+ return drm_gem_create_mmap_offset(&obj->base);
}
int
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d7855dc5a5c5..a50296cce0d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -47,9 +47,10 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops)
+ const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key)
{
- mutex_init(&obj->mm.lock);
+ __mutex_init(&obj->mm.lock, "obj->mm.lock", key);
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
@@ -155,21 +156,30 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_vma *vma, *vn;
-
trace_i915_gem_object_destroy(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
- list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_destroy(vma);
+ if (!list_empty(&obj->vma.list)) {
+ struct i915_vma *vma;
+
+ /*
+ * Note that the vma keeps an object reference while
+ * it is active, so it *should* not sleep while we
+ * destroy it. Our debug code errs insits it *might*.
+ * For the moment, play along.
+ */
+ spin_lock(&obj->vma.lock);
+ while ((vma = list_first_entry_or_null(&obj->vma.list,
+ struct i915_vma,
+ obj_link))) {
+ GEM_BUG_ON(vma->obj != obj);
+ spin_unlock(&obj->vma.lock);
+
+ i915_vma_destroy(vma);
+
+ spin_lock(&obj->vma.lock);
+ }
+ spin_unlock(&obj->vma.lock);
}
- GEM_BUG_ON(!list_empty(&obj->vma.list));
- GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
-
- mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index ddf3605bea8e..458cd51331f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -23,12 +23,14 @@ struct drm_i915_gem_object *i915_gem_object_alloc(void);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops);
+ const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key);
struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+ resource_size_t size);
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
- const void *data, size_t size);
+ const void *data, resource_size_t size);
extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
@@ -106,6 +108,11 @@ static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
dma_resv_lock(obj->base.resv, NULL);
}
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+{
+ return dma_resv_trylock(obj->base.resv);
+}
+
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
@@ -135,33 +142,58 @@ i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
+}
+
+static inline bool
+i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_ALLOC_VOLATILE;
+}
+
+static inline void
+i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
+{
+ obj->flags |= I915_BO_ALLOC_VOLATILE;
+}
+
+static inline bool
+i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
+ unsigned long flags)
+{
+ return obj->ops->flags & flags;
+}
+
+static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
}
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
}
static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
}
static inline bool
i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
}
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
}
static inline bool
@@ -412,7 +444,8 @@ static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true;
- return obj->pin_global; /* currently in use by HW, keep flushed */
+ /* Currently in use by HW (display engine)? Keep flushed. */
+ return i915_gem_object_is_framebuffer(obj);
}
static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
@@ -429,6 +462,5 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 6415f9a17e2d..70809d8897cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -8,6 +8,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
+#include "gt/intel_ring.h"
#include "i915_gem_clflush.h"
#include "i915_gem_object_blt.h"
@@ -16,7 +17,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
u32 value)
{
struct drm_i915_private *i915 = ce->vm->i915;
- const u32 block_size = S16_MAX * PAGE_SIZE;
+ const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
struct intel_engine_pool_node *pool;
struct i915_vma *batch;
u64 offset;
@@ -29,10 +30,10 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
intel_engine_pm_get(ce->engine);
- count = div_u64(vma->size, block_size);
+ count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_pool_get(&ce->engine->pool, size);
+ pool = intel_engine_get_pool(ce->engine, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -200,7 +201,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *dst)
{
struct drm_i915_private *i915 = ce->vm->i915;
- const u32 block_size = S16_MAX * PAGE_SIZE;
+ const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
struct intel_engine_pool_node *pool;
struct i915_vma *batch;
u64 src_offset, dst_offset;
@@ -213,10 +214,10 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
intel_engine_pm_get(ce->engine);
- count = div_u64(dst->size, block_size);
+ count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_pool_get(&ce->engine->pool, size);
+ pool = intel_engine_get_pool(ce->engine, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 646859fea224..96008374a412 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -8,6 +8,7 @@
#define __I915_GEM_OBJECT_TYPES_H__
#include <drm/drm_gem.h>
+#include <uapi/drm/i915_drm.h>
#include "i915_active.h"
#include "i915_selftest.h"
@@ -30,10 +31,11 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_NO_GGTT BIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT BIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -118,6 +120,11 @@ struct drm_i915_gem_object {
I915_SELFTEST_DECLARE(struct list_head st_link);
+ unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_VOLATILE BIT(1)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
+
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
@@ -153,17 +160,30 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
- /** Count of how many global VMA are currently pinned for use by HW */
- unsigned int pin_global;
struct {
struct mutex lock; /* protects the pages and their use */
atomic_t pages_pin_count;
+ atomic_t shrink_pin;
+
+ /**
+ * Memory region for this object.
+ */
+ struct intel_memory_region *region;
+ /**
+ * List of memory region blocks allocated for this object.
+ */
+ struct list_head blocks;
+ /**
+ * Element within memory_region->objects or region->purgeable
+ * if the object is marked as DONTNEED. Access is protected by
+ * region->obj_lock.
+ */
+ struct list_head region_link;
struct sg_table *pages;
void *mapping;
- /* TODO: whack some of this into the error state */
struct i915_page_sizes {
/**
* The sg mask of the pages sg_table. i.e the mask of
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 18f0ce0135c1..29f4c2850745 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -18,6 +19,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock);
+ if (i915_gem_object_is_volatile(obj))
+ obj->mm.madv = I915_MADV_DONTNEED;
+
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
obj->write_domain = 0;
@@ -71,6 +75,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
list = &i915->mm.shrink_list;
list_add_tail(&obj->mm.link, list);
+ atomic_set(&obj->mm.shrink_pin, 0);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
@@ -150,6 +155,16 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_unlock();
}
+static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
+{
+ if (i915_gem_object_is_lmem(obj))
+ io_mapping_unmap((void __force __iomem *)ptr);
+ else if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
@@ -159,17 +174,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
if (IS_ERR_OR_NULL(pages))
return pages;
+ if (i915_gem_object_is_volatile(obj))
+ obj->mm.madv = I915_MADV_WILLNEED;
+
i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
- void *ptr;
-
- ptr = page_mask_bits(obj->mm.mapping);
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
+ unmap_object(obj, page_mask_bits(obj->mm.mapping));
obj->mm.mapping = NULL;
}
@@ -224,7 +235,7 @@ unlock:
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
@@ -237,6 +248,16 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
pgprot_t pgprot;
void *addr;
+ if (i915_gem_object_is_lmem(obj)) {
+ void __iomem *io;
+
+ if (type != I915_MAP_WC)
+ return NULL;
+
+ io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+ return (void __force *)io;
+ }
+
/* A single page can always be kmapped */
if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
@@ -278,11 +299,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
enum i915_map_type has_type;
+ unsigned int flags;
bool pinned;
void *ptr;
int err;
- if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+ if (!i915_gem_object_type_has(obj, flags))
return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible(&obj->mm.lock);
@@ -314,10 +337,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto err_unpin;
}
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
+ unmap_object(obj, ptr);
ptr = obj->mm.mapping = NULL;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 768356908160..8043ff63d73f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,6 +16,7 @@
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
+#include "i915_gem_region.h"
#include "i915_scatterlist.h"
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
- if (!IS_ERR_OR_NULL(pages))
+ if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+ i915_gem_object_release_memory_region(obj);
+ }
mutex_unlock(&obj->mm.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index ad2a63dbcac2..f88ee1317bb4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -7,138 +7,9 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
-#include "i915_globals.h"
-
-static void call_idle_barriers(struct intel_engine_cs *engine)
-{
- struct llist_node *node, *next;
-
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
- struct i915_active_request *active =
- container_of((struct list_head *)node,
- typeof(*active), link);
-
- INIT_LIST_HEAD(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, NULL);
- }
-}
-
-static void i915_gem_park(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- for_each_engine(engine, i915, id)
- call_idle_barriers(engine); /* cleanup after wedging */
-
- i915_vma_parked(i915);
-
- i915_globals_park();
-}
-
-static void idle_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gem.idle_work);
- bool park;
-
- cancel_delayed_work_sync(&i915->gem.retire_work);
- mutex_lock(&i915->drm.struct_mutex);
-
- intel_wakeref_lock(&i915->gt.wakeref);
- park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
- !work_pending(work));
- intel_wakeref_unlock(&i915->gt.wakeref);
- if (park)
- i915_gem_park(i915);
- else
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
-static void retire_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gem.retire_work.work);
-
- /* Come back later if the device is busy... */
- if (mutex_trylock(&i915->drm.struct_mutex)) {
- i915_retire_requests(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
-
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
-static int pm_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
-{
- struct drm_i915_private *i915 =
- container_of(nb, typeof(*i915), gem.pm_notifier);
-
- switch (action) {
- case INTEL_GT_UNPARK:
- i915_globals_unpark();
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
- break;
-
- case INTEL_GT_PARK:
- queue_work(i915->wq, &i915->gem.idle_work);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static bool switch_to_kernel_context_sync(struct intel_gt *gt)
-{
- bool result = !intel_gt_is_wedged(gt);
-
- do {
- if (i915_gem_wait_for_idle(gt->i915,
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- I915_GEM_IDLE_TIMEOUT) == -ETIME) {
- /* XXX hide warning from gem_eio */
- if (i915_modparams.reset) {
- dev_err(gt->i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- }
-
- /*
- * Forcibly cancel outstanding work and leave
- * the gpu quiet.
- */
- intel_gt_set_wedged(gt);
- result = false;
- }
- } while (i915_retire_requests(gt->i915) && result);
-
- if (intel_gt_pm_wait_for_idle(gt))
- result = false;
-
- return result;
-}
-
-bool i915_gem_load_power_context(struct drm_i915_private *i915)
-{
- return switch_to_kernel_context_sync(&i915->gt);
-}
void i915_gem_suspend(struct drm_i915_private *i915)
{
@@ -147,8 +18,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
- mutex_lock(&i915->drm.struct_mutex);
-
/*
* We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
@@ -158,15 +27,9 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- switch_to_kernel_context_sync(&i915->gt);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- cancel_delayed_work_sync(&i915->gt.hangcheck.work);
+ intel_gt_suspend_prepare(&i915->gt);
i915_gem_drain_freed_objects(i915);
-
- intel_uc_suspend(&i915->gt.uc);
}
static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
@@ -206,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/
+ intel_gt_suspend_late(&i915->gt);
+
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
LIST_HEAD(keep);
@@ -230,18 +95,15 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
list_splice_tail(&keep, *phase);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
- i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
- mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- if (i915_gem_init_hw(i915))
+ if (intel_gt_init_hw(&i915->gt))
goto err_wedged;
/*
@@ -252,15 +114,8 @@ void i915_gem_resume(struct drm_i915_private *i915)
if (intel_gt_resume(&i915->gt))
goto err_wedged;
- intel_uc_resume(&i915->gt.uc);
-
- /* Always reload a context for powersaving. */
- if (!i915_gem_load_power_context(i915))
- goto err_wedged;
-
out_unlock:
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- mutex_unlock(&i915->drm.struct_mutex);
return;
err_wedged:
@@ -271,13 +126,3 @@ err_wedged:
}
goto out_unlock;
}
-
-void i915_gem_init__pm(struct drm_i915_private *i915)
-{
- INIT_WORK(&i915->gem.idle_work, idle_work_handler);
- INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
-
- i915->gem.pm_notifier.notifier_call = pm_notifier;
- blocking_notifier_chain_register(&i915->gt.pm_notifications,
- &i915->gem.pm_notifier);
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index 6f7d5d11ac3b..26b78dbdc225 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -12,9 +12,6 @@
struct drm_i915_private;
struct work_struct;
-void i915_gem_init__pm(struct drm_i915_private *i915);
-
-bool i915_gem_load_power_context(struct drm_i915_private *i915);
void i915_gem_resume(struct drm_i915_private *i915);
void i915_gem_idle_work_handler(struct work_struct *work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
new file mode 100644
index 000000000000..2f7bcfb9c964
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_gem_region.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+void
+i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
+
+ obj->mm.dirty = false;
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mem = obj->mm.region;
+ struct list_head *blocks = &obj->mm.blocks;
+ resource_size_t size = obj->base.size;
+ resource_size_t prev_end;
+ struct i915_buddy_block *block;
+ unsigned int flags;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ int ret;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ flags = I915_ALLOC_MIN_PAGE_SIZE;
+ if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+ flags |= I915_ALLOC_CONTIGUOUS;
+
+ ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+ if (ret)
+ goto err_free_sg;
+
+ GEM_BUG_ON(list_empty(blocks));
+
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+ prev_end = (resource_size_t)-1;
+
+ list_for_each_entry(block, blocks, link) {
+ u64 block_size, offset;
+
+ block_size = min_t(u64, size,
+ i915_buddy_block_size(&mem->mm, block));
+ offset = i915_buddy_block_offset(block);
+
+ GEM_BUG_ON(overflows_type(block_size, sg->length));
+
+ if (offset != prev_end ||
+ add_overflows_t(typeof(sg->length), sg->length, block_size)) {
+ if (st->nents) {
+ sg_page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ sg_dma_address(sg) = mem->region.start + offset;
+ sg_dma_len(sg) = block_size;
+
+ sg->length = block_size;
+
+ st->nents++;
+ } else {
+ sg->length += block_size;
+ sg_dma_len(sg) += block_size;
+ }
+
+ prev_end = offset + block_size;
+ };
+
+ sg_page_sizes |= sg->length;
+ sg_mark_end(sg);
+ i915_sg_trim(st);
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err_free_sg:
+ sg_free_table(st);
+ kfree(st);
+ return ret;
+}
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mem,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&obj->mm.blocks);
+ obj->mm.region = intel_memory_region_get(mem);
+ obj->flags |= flags;
+
+ mutex_lock(&mem->objects.lock);
+
+ if (obj->flags & I915_BO_ALLOC_VOLATILE)
+ list_add(&obj->mm.region_link, &mem->objects.purgeable);
+ else
+ list_add(&obj->mm.region_link, &mem->objects.list);
+
+ mutex_unlock(&mem->objects.lock);
+}
+
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mem = obj->mm.region;
+
+ mutex_lock(&mem->objects.lock);
+ list_del(&obj->mm.region_link);
+ mutex_unlock(&mem->objects.lock);
+
+ intel_memory_region_put(mem);
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+
+ /*
+ * NB: Our use of resource_size_t for the size stems from using struct
+ * resource for the mem->region. We might need to revisit this in the
+ * future.
+ */
+
+ GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+
+ if (!mem)
+ return ERR_PTR(-ENODEV);
+
+ size = round_up(size, mem->min_page_size);
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
+
+ /*
+ * XXX: There is a prevalence of the assumption that we fit the
+ * object's page count inside a 32bit _signed_ variable. Let's document
+ * this and catch if we ever need to fix it. In the meantime, if you do
+ * spot such a local variable, please consider fixing!
+ */
+
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = mem->ops->create_object(mem, size, flags);
+ if (!IS_ERR(obj))
+ trace_i915_gem_object_create(obj);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
new file mode 100644
index 000000000000..f2ff6f8bff74
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_REGION_H__
+#define __I915_GEM_REGION_H__
+
+#include <linux/types.h>
+
+struct intel_memory_region;
+struct drm_i915_gem_object;
+struct sg_table;
+
+int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj);
+void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mem,
+ unsigned long flags);
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4c4954e8ce0a..4d69c3fc3439 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,7 +7,9 @@
#include <linux/pagevec.h>
#include <linux/swap.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
+#include "i915_gemfs.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
@@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
static int shmem_get_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
- if (page_count > totalram_pages())
+ if (obj->base.size > resource_size(&mem->region))
return -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,6 +420,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
static void shmem_release(struct drm_i915_gem_object *obj)
{
+ i915_gem_object_release_memory_region(obj);
+
fput(obj->base.filp);
}
@@ -434,9 +439,9 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.release = shmem_release,
};
-static int create_shmem(struct drm_i915_private *i915,
- struct drm_gem_object *obj,
- size_t size)
+static int __create_shmem(struct drm_i915_private *i915,
+ struct drm_gem_object *obj,
+ resource_size_t size)
{
unsigned long flags = VM_NORESERVE;
struct file *filp;
@@ -455,31 +460,24 @@ static int create_shmem(struct drm_i915_private *i915,
return 0;
}
-struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
+static struct drm_i915_gem_object *
+create_shmem(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
unsigned int cache_level;
gfp_t mask;
int ret;
- /* There is a prevalence of the assumption that we fit the object's
- * page count inside a 32bit _signed_ variable. Let's document this and
- * catch if we ever need to fix it. In the meantime, if you do spot
- * such a local variable, please consider fixing!
- */
- if (size >> PAGE_SHIFT > INT_MAX)
- return ERR_PTR(-E2BIG);
-
- if (overflows_type(size, obj->base.size))
- return ERR_PTR(-E2BIG);
-
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
- ret = create_shmem(i915, &obj->base, size);
+ ret = __create_shmem(i915, &obj->base, size);
if (ret)
goto fail;
@@ -494,7 +492,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -518,7 +516,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
i915_gem_object_set_cache_coherency(obj, cache_level);
- trace_i915_gem_object_create(obj);
+ i915_gem_object_init_memory_region(obj, mem, 0);
return obj;
@@ -527,14 +525,22 @@ fail:
return ERR_PTR(ret);
}
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+ resource_size_t size)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+ size, 0);
+}
+
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
- const void *data, size_t size)
+ const void *data, resource_size_t size)
{
struct drm_i915_gem_object *obj;
struct file *file;
- size_t offset;
+ resource_size_t offset;
int err;
obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
@@ -577,3 +583,35 @@ fail:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
+
+static int init_shmem(struct intel_memory_region *mem)
+{
+ int err;
+
+ err = i915_gemfs_init(mem->i915);
+ if (err) {
+ DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
+ err);
+ }
+
+ return 0; /* Don't error, we can simply fallback to the kernel mnt */
+}
+
+static void release_shmem(struct intel_memory_region *mem)
+{
+ i915_gemfs_fini(mem->i915);
+}
+
+static const struct intel_memory_region_ops shmem_region_ops = {
+ .init = init_shmem,
+ .release = release_shmem,
+ .create_object = create_shmem,
+};
+
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
+{
+ return intel_memory_region_create(i915, 0,
+ totalram_pages() << PAGE_SHIFT,
+ PAGE_SIZE, 0,
+ &shmem_region_ops);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 1a51b3598d63..f2418a1cfe68 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -16,40 +16,6 @@
#include "i915_trace.h"
-static bool shrinker_lock(struct drm_i915_private *i915,
- unsigned int flags,
- bool *unlock)
-{
- struct mutex *m = &i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(m)) {
- case MUTEX_TRYLOCK_RECURSIVE:
- *unlock = false;
- return true;
-
- case MUTEX_TRYLOCK_FAILED:
- *unlock = false;
- if (flags & I915_SHRINK_ACTIVE &&
- mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
- *unlock = true;
- return *unlock;
-
- case MUTEX_TRYLOCK_SUCCESS:
- *unlock = true;
- return true;
- }
-
- BUG();
-}
-
-static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
-{
- if (!unlock)
- return;
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
@@ -61,7 +27,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (!i915_gem_object_is_shrinkable(obj))
return false;
- /* Only report true if by unbinding the object and putting its pages
+ /*
+ * Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
*
@@ -72,16 +39,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
return false;
- /* If any vma are "permanently" pinned, it will prevent us from
- * reclaiming the obj->mm.pages. We only allow scanout objects to claim
- * a permanent pin, along with a few others like the context objects.
- * To simplify the scan, and to avoid walking the list of vma under the
- * object, we just check the count of its permanently pinned.
- */
- if (READ_ONCE(obj->pin_global))
- return false;
-
- /* We can only return physical pages to the system if we can either
+ /*
+ * We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
@@ -162,10 +121,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
- bool unlock;
-
- if (!shrinker_lock(i915, shrink, &unlock))
- return 0;
/*
* When shrinking the active list, we should also consider active
@@ -275,8 +230,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- shrinker_unlock(i915, unlock);
-
if (nr_scanned)
*nr_scanned += scanned;
return count;
@@ -346,19 +299,14 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
unsigned long freed;
- bool unlock;
sc->nr_scanned = 0;
- if (!shrinker_lock(i915, 0, &unlock))
- return SHRINK_STOP;
-
freed = i915_gem_shrink(i915,
sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_WRITEBACK);
+ I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
@@ -373,8 +321,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
}
}
- shrinker_unlock(i915, unlock);
-
return sc->nr_scanned ? freed : SHRINK_STOP;
}
@@ -391,6 +337,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
@@ -426,10 +373,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
intel_wakeref_t wakeref;
- bool unlock;
-
- if (!shrinker_lock(i915, 0, &unlock))
- return NOTIFY_DONE;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
@@ -446,15 +389,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
if (!vma->iomap || i915_vma_is_active(vma))
continue;
- mutex_unlock(&i915->ggtt.vm.mutex);
- if (i915_vma_unbind(vma) == 0)
+ if (__i915_vma_unbind(vma) == 0)
freed_pages += count;
- mutex_lock(&i915->ggtt.vm.mutex);
}
mutex_unlock(&i915->ggtt.vm.mutex);
- shrinker_unlock(i915, unlock);
-
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
@@ -497,22 +436,9 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
fs_reclaim_acquire(GFP_KERNEL);
- /*
- * As we invariably rely on the struct_mutex within the shrinker,
- * but have a complicated recursion dance, taint all the mutexes used
- * within the shrinker with the struct_mutex. For completeness, we
- * taint with all subclass of struct_mutex, even though we should
- * only need tainting by I915_MM_NORMAL to catch possible ABBA
- * deadlocks from using struct_mutex inside @mutex.
- */
- mutex_acquire(&i915->drm.struct_mutex.dep_map,
- I915_MM_SHRINKER, 0, _RET_IP_);
-
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
mutex_release(&mutex->dep_map, _RET_IP_);
- mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
-
fs_reclaim_release(GFP_KERNEL);
if (unlock)
@@ -523,46 +449,52 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
/*
* We can only be called while the pages are pinned or when
* the pages are released. If pinned, we should only be called
* from a single caller under controlled conditions; and on release
* only one caller may release us. Neither the two may cross.
*/
- if (!list_empty(&obj->mm.link)) { /* pinned by caller */
- struct drm_i915_private *i915 = obj_to_i915(obj);
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- GEM_BUG_ON(list_empty(&obj->mm.link));
+ if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
+ return;
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
+ !list_empty(&obj->mm.link)) {
list_del_init(&obj->mm.link);
i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size;
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
struct list_head *head)
{
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
- GEM_BUG_ON(!list_empty(&obj->mm.link));
+ if (!i915_gem_object_is_shrinkable(obj))
+ return;
- if (i915_gem_object_is_shrinkable(obj)) {
- struct drm_i915_private *i915 = obj_to_i915(obj);
- unsigned long flags;
+ if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
+ return;
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- GEM_BUG_ON(!kref_read(&obj->base.refcount));
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ GEM_BUG_ON(!kref_read(&obj->base.refcount));
+ if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
list_add_tail(&obj->mm.link, head);
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index aa533b4ab5f5..a2d49c04e6a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
#include <drm/drm_mm.h>
#include <drm/i915_drm.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
@@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
return 0;
}
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
{
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
@@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
}
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -425,8 +426,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
bdw_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
- case 11:
default:
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ /* fall-through */
+ case 11:
+ case 12:
icl_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
@@ -536,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
+
+ if (obj->mm.region)
+ i915_gem_object_release_memory_region(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -545,65 +552,116 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
};
static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *stolen,
+ struct intel_memory_region *mem)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
+ int err = -ENOMEM;
obj = i915_gem_object_alloc();
- if (obj == NULL)
- return NULL;
+ if (!obj)
+ goto err;
drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
- i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
- if (i915_gem_object_pin_pages(obj))
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
goto cleanup;
+ if (mem)
+ i915_gem_object_init_memory_region(obj, mem, 0);
+
return obj;
cleanup:
i915_gem_object_free(obj);
- return NULL;
+err:
+ return ERR_PTR(err);
}
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
{
+ struct drm_i915_private *dev_priv = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return NULL;
+ return ERR_PTR(-ENODEV);
if (size == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
if (ret) {
- kfree(stolen);
- return NULL;
+ obj = ERR_PTR(ret);
+ goto err_free;
}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
- if (obj)
- return obj;
+ obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
+ if (IS_ERR(obj))
+ goto err_remove;
+ return obj;
+
+err_remove:
i915_gem_stolen_remove_node(dev_priv, stolen);
+err_free:
kfree(stolen);
- return NULL;
+ return obj;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
+{
+ return i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN],
+ size, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+ /*
+ * Initialise stolen early so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ return i915_gem_init_stolen(mem->i915);
+}
+
+static void release_stolen(struct intel_memory_region *mem)
+{
+ i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_ops = {
+ .init = init_stolen,
+ .release = release_stolen,
+ .create_object = _i915_gem_object_create_stolen,
+};
+
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
+{
+ return intel_memory_region_create(i915,
+ intel_graphics_stolen_res.start,
+ resource_size(&intel_graphics_stolen_res),
+ PAGE_SIZE, 0,
+ &i915_region_stolen_ops);
}
struct drm_i915_gem_object *
@@ -619,9 +677,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return NULL;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ return ERR_PTR(-ENODEV);
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
&stolen_offset, &gtt_offset, &size);
@@ -630,11 +686,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (WARN_ON(size == 0) ||
WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
- return NULL;
+ return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return NULL;
+ return ERR_PTR(-ENOMEM);
stolen->start = stolen_offset;
stolen->size = size;
@@ -644,15 +700,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen);
- return NULL;
+ return ERR_PTR(ret);
}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
- if (obj == NULL) {
+ obj = __i915_gem_object_create_stolen(dev_priv, stolen, NULL);
+ if (IS_ERR(obj)) {
DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
- return NULL;
+ return obj;
}
/* Some objects just need physical mem from stolen space */
@@ -674,22 +730,26 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
+ mutex_lock(&ggtt->vm.mutex);
ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
+ mutex_unlock(&ggtt->vm.mutex);
goto err_pages;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(vma->pages);
vma->pages = obj->mm.pages;
- vma->flags |= I915_VMA_GLOBAL_BIND;
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+
+ set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
__i915_vma_set_map_and_fenceable(vma);
- mutex_lock(&ggtt->vm.mutex);
- list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
+ list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
@@ -701,5 +761,5 @@ err_pages:
i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
- return NULL;
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 2289644d8604..c1040627fbf3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -21,8 +21,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index 1e372420771b..540ef0551789 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -50,10 +50,8 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- if (target) {
+ if (target && xchg(&target->file_priv, NULL))
list_del(&target->client_link);
- target->file_priv = NULL;
- }
target = request;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ca0c2f451742..1fa592d82af5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -181,22 +181,25 @@ static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct i915_vma *vma;
- int ret;
+ int ret = 0;
if (tiling_mode == I915_TILING_NONE)
return 0;
+ mutex_lock(&ggtt->vm.mutex);
for_each_ggtt_vma(vma, obj) {
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
if (ret)
- return ret;
+ break;
}
+ mutex_unlock(&ggtt->vm.mutex);
- return 0;
+ return ret;
}
int
@@ -212,7 +215,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
- lockdep_assert_held(&i915->drm.struct_mutex);
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
@@ -233,16 +235,18 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* whilst executing a fenced command for an untiled object.
*/
- err = i915_gem_object_fence_prepare(obj, tiling, stride);
- if (err)
- return err;
-
i915_gem_object_lock(obj);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
}
+ err = i915_gem_object_fence_prepare(obj, tiling, stride);
+ if (err) {
+ i915_gem_object_unlock(obj);
+ return err;
+ }
+
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
@@ -313,10 +317,14 @@ int
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_gem_object *obj;
int err;
+ if (!dev_priv->ggtt.num_fences)
+ return -EOPNOTSUPP;
+
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -340,9 +348,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
+ args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
else
- args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
+ args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -364,12 +372,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
}
}
- err = mutex_lock_interruptible(&dev->struct_mutex);
- if (err)
- goto err;
-
err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
- mutex_unlock(&dev->struct_mutex);
/* We have to maintain this existing ABI... */
args->stride = i915_gem_object_get_stride(obj);
@@ -402,6 +405,9 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err = -ENOENT;
+ if (!dev_priv->ggtt.num_fences)
+ return -EOPNOTSUPP;
+
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, args->handle);
if (obj) {
@@ -415,10 +421,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index abfbac49b8e8..4c72d74d6576 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -92,7 +92,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it;
- struct mutex *unlock = NULL;
unsigned long end;
int ret = 0;
@@ -129,33 +128,13 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
- if (!unlock) {
- unlock = &mn->mm->i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(unlock)) {
- default:
- case MUTEX_TRYLOCK_FAILED:
- if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
- i915_gem_object_put(obj);
- return -EINTR;
- }
- /* fall through */
- case MUTEX_TRYLOCK_SUCCESS:
- break;
-
- case MUTEX_TRYLOCK_RECURSIVE:
- unlock = ERR_PTR(-EEXIST);
- break;
- }
- }
-
ret = i915_gem_object_unbind(obj,
I915_GEM_OBJECT_UNBIND_ACTIVE);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
if (ret)
- goto unlock;
+ return ret;
spin_lock(&mn->lock);
@@ -168,10 +147,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
-unlock:
- if (!IS_ERR_OR_NULL(unlock))
- mutex_unlock(unlock);
-
return ret;
}
@@ -770,6 +745,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
+ static struct lock_class_key lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
@@ -803,7 +779,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- vm = dev_priv->kernel_context->vm;
+ vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
+ true); /* static vm */
if (!vm || !vm->has_read_only)
return -ENODEV;
}
@@ -813,7 +790,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 3c5d17b2b670..892d12db6c49 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -96,6 +96,7 @@ huge_gem_object(struct drm_i915_private *i915,
phys_addr_t phys_size,
dma_addr_t dma_size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -111,7 +112,7 @@ huge_gem_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
- i915_gem_object_init(obj, &huge_ops);
+ i915_gem_object_init(obj, &huge_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 8de83c6d81f5..688c49a24f32 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -8,6 +8,8 @@
#include "i915_selftest.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
@@ -17,6 +19,7 @@
#include "selftests/mock_drm.h"
#include "selftests/mock_gem_device.h"
+#include "selftests/mock_region.h"
#include "selftests/i915_random.h"
static const unsigned int page_sizes[] = {
@@ -113,8 +116,6 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, st))
goto err;
- obj->mm.madv = I915_MADV_DONTNEED;
-
GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
@@ -135,7 +136,6 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
huge_pages_free_pages(pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
@@ -150,6 +150,7 @@ huge_pages_object(struct drm_i915_private *i915,
u64 size,
unsigned int page_mask)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -166,7 +167,9 @@ huge_pages_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &huge_page_ops);
+ i915_gem_object_init(obj, &huge_page_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -227,8 +230,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -261,8 +262,6 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
sg_dma_address(sg) = page_size;
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
@@ -281,7 +280,6 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
{
fake_free_huge_pages(obj, pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -299,6 +297,7 @@ static const struct drm_i915_gem_object_ops fake_ops_single = {
static struct drm_i915_gem_object *
fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -317,9 +316,11 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
drm_gem_private_object_init(&i915->drm, &obj->base, size);
if (single)
- i915_gem_object_init(obj, &fake_ops_single);
+ i915_gem_object_init(obj, &fake_ops_single, &lock_class);
else
- i915_gem_object_init(obj, &fake_ops);
+ i915_gem_object_init(obj, &fake_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -333,7 +334,12 @@ static int igt_check_page_sizes(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
- int err = 0;
+ int err;
+
+ /* We have to wait for the async bind to complete before our asserts */
+ err = i915_vma_sync(vma);
+ if (err)
+ return err;
if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
@@ -447,6 +453,88 @@ out_device:
return err;
}
+static int igt_mock_memory_region_huge_pages(void *arg)
+{
+ const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
+ struct i915_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct intel_memory_region *mem;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int bit;
+ int err = 0;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("%s failed to create memory region\n", __func__);
+ return PTR_ERR(mem);
+ }
+
+ for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ unsigned int page_size = BIT(bit);
+ resource_size_t phys;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(flags); ++i) {
+ obj = i915_gem_object_create_region(mem, page_size,
+ flags[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_region;
+ }
+
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ phys = i915_gem_object_get_dma_address(obj, 0);
+ if (!IS_ALIGNED(phys, page_size)) {
+ pr_err("%s addr misaligned(%pa) page_size=%u\n",
+ __func__, &phys, page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ if (vma->page_sizes.gtt != page_size) {
+ pr_err("%s page_sizes.gtt=%u, expected=%u\n",
+ __func__, vma->page_sizes.gtt,
+ page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ goto out_region;
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_region:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
@@ -879,9 +967,8 @@ out_object_put:
return err;
}
-static int gpu_write(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int gpu_write(struct intel_context *ce,
+ struct i915_vma *vma,
u32 dw,
u32 val)
{
@@ -893,11 +980,12 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
- return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
+ return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
vma->size >> PAGE_SHIFT, val);
}
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int
+__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
unsigned int needs_flush;
unsigned long n;
@@ -929,18 +1017,61 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
-static int __igt_write_huge(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n;
+ int err;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 __iomem *base;
+ u32 read_val;
+
+ base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+ read_val = ioread32(base + dword);
+ io_mapping_unmap_atomic(base);
+ if (read_val != val) {
+ pr_err("n=%lu base[%u]=%u, val=%u\n",
+ n, dword, read_val, val);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ if (i915_gem_object_has_struct_page(obj))
+ return __cpu_check_shmem(obj, dword, val);
+ else if (i915_gem_object_is_lmem(obj))
+ return __cpu_check_lmem(obj, dword, val);
+
+ return -ENODEV;
+}
+
+static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
u32 dword, u32 val)
{
- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -954,7 +1085,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
* The ggtt may have some pages reserved so
* refrain from erroring out.
*/
- if (err == -ENOSPC && i915_is_ggtt(vm))
+ if (err == -ENOSPC && i915_is_ggtt(ce->vm))
err = 0;
goto out_vma_close;
@@ -964,7 +1095,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
if (err)
goto out_vma_unpin;
- err = gpu_write(vma, ctx, engine, dword, val);
+ err = gpu_write(ce, vma, dword, val);
if (err) {
pr_err("gpu-write failed at offset=%llx\n", offset);
goto out_vma_unpin;
@@ -987,14 +1118,13 @@ out_vma_close:
static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
- static struct intel_engine_cs *engines[I915_NUM_ENGINES];
- struct intel_engine_cs *engine;
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
unsigned int max_page_size;
- unsigned int id;
+ unsigned int count;
u64 max;
u64 num;
u64 size;
@@ -1008,19 +1138,18 @@ static int igt_write_huge(struct i915_gem_context *ctx,
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
- max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
- max = div_u64((vm->total - size), max_page_size);
-
n = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n",
- id);
+ count = 0;
+ max = U64_MAX;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- }
- engines[n++] = engine;
- }
+ max = min(max, ce->vm->total);
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
if (!n)
return 0;
@@ -1029,23 +1158,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
* randomized order, lets also make feeding to the same engine a few
* times in succession a possibility by enlarging the permutation array.
*/
- order = i915_random_order(n * I915_NUM_ENGINES, &prng);
+ order = i915_random_order(count * count, &prng);
if (!order)
return -ENOMEM;
+ max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+ max = div_u64(max - size, max_page_size);
+
/*
* Try various offsets in an ascending/descending fashion until we
* timeout -- we want to avoid issues hidden by effectively always using
* offset = 0.
*/
i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
for_each_prime_number_from(num, 0, max) {
u64 offset_low = num * max_page_size;
u64 offset_high = (max - num) * max_page_size;
u32 dword = offset_in_page(num) / 4;
+ struct intel_context *ce;
- engine = engines[order[i] % n];
- i = (i + 1) % (n * I915_NUM_ENGINES);
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
/*
* In order to utilize 64K pages we need to both pad the vma
@@ -1057,22 +1193,23 @@ static int igt_write_huge(struct i915_gem_context *ctx,
offset_low = round_down(offset_low,
I915_GTT_PAGE_SIZE_2M);
- err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+ err = __igt_write_huge(ce, obj, size, offset_low,
dword, num + 1);
if (err)
break;
- err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+ err = __igt_write_huge(ce, obj, size, offset_high,
dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
- "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
- __func__, engine->id, offset_low, offset_high,
+ "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
+ __func__, ce->engine->name, offset_low, offset_high,
max_page_size))
break;
}
+ i915_gem_context_unlock_engines(ctx);
kfree(order);
@@ -1180,131 +1317,235 @@ out_device:
return err;
}
-static int igt_ppgtt_internal_huge(void *arg)
+typedef struct drm_i915_gem_object *
+(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
+
+static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
+{
+ return i915->mm.gemfs && has_transparent_hugepage();
+}
+
+static struct drm_i915_gem_object *
+igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("%s missing THP support, skipping\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return i915_gem_object_create_shmem(i915, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return i915_gem_object_create_internal(i915, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return huge_pages_object(i915, size, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return i915_gem_object_create_lmem(i915, size, flags);
+}
+
+static u32 igt_random_size(struct rnd_state *prng,
+ u32 min_page_size,
+ u32 max_page_size)
+{
+ u64 mask;
+ u32 size;
+
+ GEM_BUG_ON(!is_power_of_2(min_page_size));
+ GEM_BUG_ON(!is_power_of_2(max_page_size));
+ GEM_BUG_ON(min_page_size < PAGE_SIZE);
+ GEM_BUG_ON(min_page_size > max_page_size);
+
+ mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
+ size = prandom_u32_state(prng) & mask;
+ if (size < min_page_size)
+ size |= min_page_size;
+
+ return size;
+}
+
+static int igt_ppgtt_smoke_huge(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_64K,
- SZ_128K,
- SZ_256K,
- SZ_512K,
- SZ_1M,
- SZ_2M,
+ I915_RND_STATE(prng);
+ struct {
+ igt_create_fn fn;
+ u32 min;
+ u32 max;
+ } backends[] = {
+ { igt_create_internal, SZ_64K, SZ_2M, },
+ { igt_create_shmem, SZ_64K, SZ_32M, },
+ { igt_create_local, SZ_64K, SZ_1G, },
};
- int i;
int err;
+ int i;
/*
- * Sanity check that the HW uses huge pages correctly through internal
- * -- ensure that our writes land in the right place.
+ * Sanity check that the HW uses huge pages correctly through our
+ * various backends -- ensure that our writes land in the right place.
*/
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
+ for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+ u32 min = backends[i].min;
+ u32 max = backends[i].max;
+ u32 size = max;
+try_again:
+ size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ obj = backends[i].fn(i915, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -E2BIG) {
+ size >>= 1;
+ goto try_again;
+ } else if (err == -ENODEV) {
+ err = 0;
+ continue;
+ }
+
+ return err;
+ }
err = i915_gem_object_pin_pages(obj);
- if (err)
+ if (err) {
+ if (err == -ENXIO) {
+ i915_gem_object_put(obj);
+ size >>= 1;
+ goto try_again;
+ }
goto out_put;
+ }
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
- pr_info("internal unable to allocate huge-page(s) with size=%u\n",
- size);
+ if (obj->mm.page_sizes.phys < min) {
+ pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
+ __func__, size, i);
+ err = -ENOMEM;
goto out_unpin;
}
err = igt_write_huge(ctx, obj);
if (err) {
- pr_err("internal write-huge failed with size=%u\n",
- size);
- goto out_unpin;
+ pr_err("%s write-huge failed with size=%u, i=%d\n",
+ __func__, size, i);
}
-
+out_unpin:
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+out_put:
i915_gem_object_put(obj);
- }
- return 0;
+ if (err == -ENOMEM || err == -ENXIO)
+ err = 0;
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
+ if (err)
+ break;
- return err;
-}
+ cond_resched();
+ }
-static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
-{
- return i915->mm.gemfs && has_transparent_hugepage();
+ return err;
}
-static int igt_ppgtt_gemfs_huge(void *arg)
+static int igt_ppgtt_sanity_check(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_2M,
- SZ_4M,
- SZ_8M,
- SZ_16M,
- SZ_32M,
+ unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ struct {
+ igt_create_fn fn;
+ unsigned int flags;
+ } backends[] = {
+ { igt_create_system, 0, },
+ { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
};
- int i;
+ struct {
+ u32 size;
+ u32 pages;
+ } combos[] = {
+ { SZ_64K, SZ_64K },
+ { SZ_2M, SZ_2M },
+ { SZ_2M, SZ_64K },
+ { SZ_2M - SZ_64K, SZ_64K },
+ { SZ_2M - SZ_4K, SZ_64K | SZ_4K },
+ { SZ_2M + SZ_4K, SZ_64K | SZ_4K },
+ { SZ_2M + SZ_4K, SZ_2M | SZ_4K },
+ { SZ_2M + SZ_64K, SZ_2M | SZ_64K },
+ };
+ int i, j;
int err;
+ if (supported == I915_GTT_PAGE_SIZE_4K)
+ return 0;
+
/*
- * Sanity check that the HW uses huge pages correctly through gemfs --
- * ensure that our writes land in the right place.
+ * Sanity check that the HW behaves with a limited set of combinations.
+ * We already have a bunch of randomised testing, which should give us
+ * a decent amount of variation between runs, however we should keep
+ * this to limit the chances of introducing a temporary regression, by
+ * testing the most obvious cases that might make something blow up.
*/
- if (!igt_can_allocate_thp(i915)) {
- pr_info("missing THP support, skipping\n");
- return 0;
- }
+ for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+ for (j = 0; j < ARRAY_SIZE(combos); ++j) {
+ struct drm_i915_gem_object *obj;
+ u32 size = combos[j].size;
+ u32 pages = combos[j].pages;
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
+ obj = backends[i].fn(i915, size, backends[i].flags);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -ENODEV) {
+ pr_info("Device lacks local memory, skipping\n");
+ err = 0;
+ break;
+ }
- obj = i915_gem_object_create_shmem(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ return err;
+ }
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out_put;
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
- pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
- size);
- goto out_unpin;
- }
+ GEM_BUG_ON(pages > obj->base.size);
+ pages = pages & supported;
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("gemfs write-huge failed with size=%u\n",
- size);
- goto out_unpin;
+ if (pages)
+ obj->mm.page_sizes.sg = pages;
+
+ err = igt_write_huge(ctx, obj);
+
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ i915_gem_object_put(obj);
+
+ if (err) {
+ pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
+ __func__, size, pages, i, j);
+ goto out;
+ }
}
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
+ cond_resched();
}
- return 0;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
+out:
+ if (err == -ENOMEM)
+ err = 0;
return err;
}
@@ -1314,15 +1555,15 @@ static int igt_ppgtt_pin_update(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
- struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
unsigned int n;
int first, last;
- int err;
+ int err = 0;
/*
* Make sure there's no funny business when doing a PIN_UPDATE -- in the
@@ -1332,9 +1573,10 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!vm || !i915_vm_is_4lvl(vm)) {
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (!i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n");
- return 0;
+ goto out_vm;
}
first = ilog2(I915_GTT_PAGE_SIZE_64K);
@@ -1387,7 +1629,7 @@ static int igt_ppgtt_pin_update(void *arg)
goto out_unpin;
}
- err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+ err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
if (err)
goto out_unpin;
@@ -1419,14 +1661,18 @@ static int igt_ppgtt_pin_update(void *arg)
*/
n = 0;
- for_each_engine(engine, dev_priv, id) {
- if (!intel_engine_can_store_dword(engine))
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
- goto out_unpin;
+ break;
}
+ i915_gem_context_unlock_engines(ctx);
+ if (err)
+ goto out_unpin;
+
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
@@ -1439,6 +1685,8 @@ out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
+out_vm:
+ i915_vm_put(vm);
return err;
}
@@ -1448,7 +1696,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1498,6 +1746,7 @@ out_put:
out_restore:
i915->mm.gemfs = gemfs;
+ i915_vm_put(vm);
return err;
}
@@ -1505,14 +1754,14 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
unsigned int n;
- int err;
+ int err = 0;
/*
* Sanity check shrinking huge-paged object -- make sure nothing blows
@@ -1521,12 +1770,14 @@ static int igt_shrink_thp(void *arg)
if (!igt_can_allocate_thp(i915)) {
pr_info("missing THP support, skipping\n");
- return 0;
+ goto out_vm;
}
obj = i915_gem_object_create_shmem(i915, SZ_2M);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_vm;
+ }
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
@@ -1548,16 +1799,19 @@ static int igt_shrink_thp(void *arg)
goto out_unpin;
n = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine))
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
- goto out_unpin;
+ break;
}
-
+ i915_gem_context_unlock_engines(ctx);
i915_vma_unpin(vma);
+ if (err)
+ goto out_close;
/*
* Now that the pages are *unpinned* shrink-all should invoke
@@ -1583,16 +1837,17 @@ static int igt_shrink_thp(void *arg)
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
- goto out_unpin;
+ break;
}
-
out_unpin:
i915_vma_unpin(vma);
out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
+out_vm:
+ i915_vm_put(vm);
return err;
}
@@ -1601,6 +1856,7 @@ int i915_gem_huge_page_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_mock_exhaust_device_supported_pages),
+ SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
SUBTEST(igt_mock_ppgtt_huge_fill),
SUBTEST(igt_mock_ppgtt_64K),
@@ -1617,7 +1873,6 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
@@ -1643,9 +1898,7 @@ out_close:
i915_vm_put(&ppgtt->vm);
out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
drm_dev_put(&dev_priv->drm);
-
return err;
}
@@ -1656,12 +1909,12 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
SUBTEST(igt_ppgtt_exhaust_huge),
- SUBTEST(igt_ppgtt_gemfs_huge),
- SUBTEST(igt_ppgtt_internal_huge),
+ SUBTEST(igt_ppgtt_smoke_huge),
+ SUBTEST(igt_ppgtt_sanity_check),
};
struct drm_file *file;
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
+ struct i915_address_space *vm;
int err;
if (!HAS_PPGTT(i915)) {
@@ -1676,25 +1929,21 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
- if (ctx->vm)
- ctx->vm->scrub_64K = true;
+ mutex_lock(&ctx->mutex);
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ WRITE_ONCE(vm->scrub_64K, true);
+ mutex_unlock(&ctx->mutex);
err = i915_subtests(tests, ctx);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
+out_file:
mock_file_free(i915, file);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index d8804a847945..da8edee4fe0a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,6 +5,7 @@
#include "i915_selftest.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "selftests/igt_flush_test.h"
@@ -12,10 +13,9 @@
#include "huge_gem_object.h"
#include "mock_context.h"
-static int igt_client_fill(void *arg)
+static int __igt_client_fill(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = arg;
- struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+ struct intel_context *ce = engine->kernel_context;
struct drm_i915_gem_object *obj;
struct rnd_state prng;
IGT_TIMEOUT(end);
@@ -37,7 +37,7 @@ static int igt_client_fill(void *arg)
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
- obj = huge_gem_object(i915, phys_sz, sz);
+ obj = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -103,6 +103,28 @@ err_flush:
return err;
}
+static int igt_client_fill(void *arg)
+{
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ int err;
+
+ engine = intel_engine_lookup_user(arg,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ err = __igt_client_fill(engine);
+ if (err == -ENOMEM)
+ err = 0;
+ if (err)
+ return err;
+ } while (1);
+}
+
int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 0ff7a89aadca..2b29f6b4e1dd 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -7,13 +7,18 @@
#include <linux/prime_numbers.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_ring.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
-static int cpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+struct context {
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+};
+
+static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
{
unsigned int needs_clflush;
struct page *page;
@@ -21,11 +26,11 @@ static int cpu_set(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_object_prepare_write(obj, &needs_clflush);
+ err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
if (err)
return err;
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
@@ -38,14 +43,12 @@ static int cpu_set(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map);
- i915_gem_object_finish_access(obj);
+ i915_gem_object_finish_access(ctx->obj);
return 0;
}
-static int cpu_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
{
unsigned int needs_clflush;
struct page *page;
@@ -53,11 +56,11 @@ static int cpu_get(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_object_prepare_read(obj, &needs_clflush);
+ err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
if (err)
return err;
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
@@ -67,136 +70,137 @@ static int cpu_get(struct drm_i915_gem_object *obj,
*v = *cpu;
kunmap_atomic(map);
- i915_gem_object_finish_access(obj);
+ i915_gem_object_finish_access(ctx->obj);
return 0;
}
-static int gtt_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
iowrite32(v, &map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
-static int gtt_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
*v = ioread32(&map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
-static int wc_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int wc_set(struct context *ctx, unsigned long offset, u32 v)
{
u32 *map;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
map[offset / sizeof(*map)] = v;
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(ctx->obj);
return 0;
}
-static int wc_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
{
u32 *map;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
*v = map[offset / sizeof(*map)];
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(ctx->obj);
return 0;
}
-static int gpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_create(i915->engine[RCS0]->kernel_context);
+ rq = i915_request_create(ctx->engine->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -209,12 +213,12 @@ static int gpu_set(struct drm_i915_gem_object *obj,
return PTR_ERR(cs);
}
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(ctx->engine->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
- } else if (INTEL_GEN(i915) >= 4) {
+ } else if (INTEL_GEN(ctx->engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
@@ -239,32 +243,34 @@ static int gpu_set(struct drm_i915_gem_object *obj,
return err;
}
-static bool always_valid(struct drm_i915_private *i915)
+static bool always_valid(struct context *ctx)
{
return true;
}
-static bool needs_fence_registers(struct drm_i915_private *i915)
+static bool needs_fence_registers(struct context *ctx)
{
- return !intel_gt_is_wedged(&i915->gt);
-}
+ struct intel_gt *gt = ctx->engine->gt;
-static bool needs_mi_store_dword(struct drm_i915_private *i915)
-{
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(gt))
return false;
- if (!HAS_ENGINE(i915, RCS0))
+ return gt->ggtt->num_fences;
+}
+
+static bool needs_mi_store_dword(struct context *ctx)
+{
+ if (intel_gt_is_wedged(ctx->engine->gt))
return false;
- return intel_engine_can_store_dword(i915->engine[RCS0]);
+ return intel_engine_can_store_dword(ctx->engine);
}
static const struct igt_coherency_mode {
const char *name;
- int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
- int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
- bool (*valid)(struct drm_i915_private *i915);
+ int (*set)(struct context *ctx, unsigned long offset, u32 v);
+ int (*get)(struct context *ctx, unsigned long offset, u32 *v);
+ bool (*valid)(struct context *ctx);
} igt_coherency_mode[] = {
{ "cpu", cpu_set, cpu_get, always_valid },
{ "gtt", gtt_set, gtt_get, needs_fence_registers },
@@ -273,19 +279,37 @@ static const struct igt_coherency_mode {
{ },
};
+static struct intel_engine_cs *
+random_engine(struct drm_i915_private *i915, struct rnd_state *prng)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for_each_uabi_engine(engine, i915)
+ count++;
+
+ count = i915_prandom_u32_max_state(count, prng);
+ for_each_uabi_engine(engine, i915)
+ if (count-- == 0)
+ return engine;
+
+ return NULL;
+}
+
static int igt_gem_coherency(void *arg)
{
const unsigned int ncachelines = PAGE_SIZE/64;
- I915_RND_STATE(prng);
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
- struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
+ I915_RND_STATE(prng);
+ struct context ctx;
int err = 0;
- /* We repeatedly write, overwrite and read from a sequence of
+ /*
+ * We repeatedly write, overwrite and read from a sequence of
* cachelines in order to try and detect incoherency (unflushed writes
* from either the CPU or GPU). Each setter/getter uses our cache
* domain API which should prevent incoherency.
@@ -299,34 +323,36 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ ctx.engine = random_engine(i915, &prng);
+ GEM_BUG_ON(!ctx.engine);
+ pr_info("%s: using %s\n", __func__, ctx.engine->name);
+
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
- if (!over->valid(i915))
+ if (!over->valid(&ctx))
continue;
for (write = igt_coherency_mode; write->name; write++) {
if (!write->set)
continue;
- if (!write->valid(i915))
+ if (!write->valid(&ctx))
continue;
for (read = igt_coherency_mode; read->name; read++) {
if (!read->get)
continue;
- if (!read->valid(i915))
+ if (!read->valid(&ctx))
continue;
for_each_prime_number_from(count, 1, ncachelines) {
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto unlock;
+ ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(ctx.obj)) {
+ err = PTR_ERR(ctx.obj);
+ goto free;
}
i915_random_reorder(offsets, ncachelines, &prng);
@@ -334,7 +360,7 @@ static int igt_gem_coherency(void *arg)
values[n] = prandom_u32_state(&prng);
for (n = 0; n < count; n++) {
- err = over->set(obj, offsets[n], ~values[n]);
+ err = over->set(&ctx, offsets[n], ~values[n]);
if (err) {
pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
n, count, over->name, err);
@@ -343,7 +369,7 @@ static int igt_gem_coherency(void *arg)
}
for (n = 0; n < count; n++) {
- err = write->set(obj, offsets[n], values[n]);
+ err = write->set(&ctx, offsets[n], values[n]);
if (err) {
pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
n, count, write->name, err);
@@ -354,7 +380,7 @@ static int igt_gem_coherency(void *arg)
for (n = 0; n < count; n++) {
u32 found;
- err = read->get(obj, offsets[n], &found);
+ err = read->get(&ctx, offsets[n], &found);
if (err) {
pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
n, count, read->name, err);
@@ -372,20 +398,18 @@ static int igt_gem_coherency(void *arg)
}
}
- i915_gem_object_put(obj);
+ i915_gem_object_put(ctx.obj);
}
}
}
}
-unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+free:
kfree(offsets);
return err;
put_object:
- i915_gem_object_put(obj);
- goto unlock;
+ i915_gem_object_put(ctx.obj);
+ goto free;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 3e6f4a65d356..62fabc023a83 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -31,7 +32,6 @@ static int live_nop_switch(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
- enum intel_engine_id id;
struct igt_live_test t;
struct drm_file *file;
unsigned long n;
@@ -52,23 +52,21 @@ static int live_nop_switch(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
err = -ENOMEM;
- goto out_unlock;
+ goto out_file;
}
for (n = 0; n < nctx; n++) {
ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
- goto out_unlock;
+ goto out_file;
}
}
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
unsigned long end_time, prime;
ktime_t times[2] = {};
@@ -78,7 +76,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unlock;
+ goto out_file;
}
i915_request_add(rq);
}
@@ -86,7 +84,7 @@ static int live_nop_switch(void *arg)
pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt);
err = -EIO;
- goto out_unlock;
+ goto out_file;
}
times[1] = ktime_get_raw();
@@ -96,7 +94,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ goto out_file;
end_time = jiffies + i915_selftest.timeout_jiffies;
for_each_prime_number_from(prime, 2, 8192) {
@@ -106,7 +104,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n % nctx], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unlock;
+ goto out_file;
}
/*
@@ -142,7 +140,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_end(&t);
if (err)
- goto out_unlock;
+ goto out_file;
pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@@ -150,8 +148,235 @@ static int live_nop_switch(void *arg)
prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
+struct parallel_switch {
+ struct task_struct *tsk;
+ struct intel_context *ce[2];
+};
+
+static int __live_parallel_switch1(void *data)
+{
+ struct parallel_switch *arg = data;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_request *rq = NULL;
+ int err, n;
+
+ err = 0;
+ for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
+ struct i915_request *prev = rq;
+
+ rq = i915_request_create(arg->ce[n]);
+ if (IS_ERR(rq)) {
+ i915_request_put(prev);
+ return PTR_ERR(rq);
+ }
+
+ i915_request_get(rq);
+ if (prev) {
+ err = i915_request_await_dma_fence(rq, &prev->fence);
+ i915_request_put(prev);
+ }
+
+ i915_request_add(rq);
+ }
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+ return 0;
+}
+
+static int __live_parallel_switchN(void *data)
+{
+ struct parallel_switch *arg = data;
+ struct i915_request *rq = NULL;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int n;
+
+ count = 0;
+ do {
+ for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
+ struct i915_request *prev = rq;
+ int err = 0;
+
+ rq = i915_request_create(arg->ce[n]);
+ if (IS_ERR(rq)) {
+ i915_request_put(prev);
+ return PTR_ERR(rq);
+ }
+
+ i915_request_get(rq);
+ if (prev) {
+ err = i915_request_await_dma_fence(rq, &prev->fence);
+ i915_request_put(prev);
+ }
+
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return err;
+ }
+ }
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(rq);
+
+ pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+ return 0;
+}
+
+static int live_parallel_switch(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ __live_parallel_switch1,
+ __live_parallel_switchN,
+ NULL,
+ };
+ struct parallel_switch *data = NULL;
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ int (* const *fn)(void *arg);
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ int n, m, count;
+ int err = 0;
+
+ /*
+ * Check we can process switches on all engines simultaneously.
+ */
+
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ engines = i915_gem_context_lock_engines(ctx);
+ count = engines->num_engines;
+
+ data = kcalloc(count, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ i915_gem_context_unlock_engines(ctx);
+ err = -ENOMEM;
+ goto out_file;
+ }
+
+ m = 0; /* Use the first context as our template for the engines */
+ for_each_gem_engine(ce, engines, it) {
+ err = intel_context_pin(ce);
+ if (err) {
+ i915_gem_context_unlock_engines(ctx);
+ goto out;
+ }
+ data[m++].ce[0] = intel_context_get(ce);
+ }
+ i915_gem_context_unlock_engines(ctx);
+
+ /* Clone the same set of engines into the other contexts */
+ for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ for (m = 0; m < count; m++) {
+ if (!data[m].ce[0])
+ continue;
+
+ ce = intel_context_create(ctx, data[m].ce[0]->engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ data[m].ce[n] = ce;
+ }
+ }
+
+ for (fn = func; !err && *fn; fn++) {
+ struct igt_live_test t;
+ int n;
+
+ err = igt_live_test_begin(&t, i915, __func__, "");
+ if (err)
+ break;
+
+ for (n = 0; n < count; n++) {
+ if (!data[n].ce[0])
+ continue;
+
+ data[n].tsk = kthread_run(*fn, &data[n],
+ "igt/parallel:%s",
+ data[n].ce[0]->engine->name);
+ if (IS_ERR(data[n].tsk)) {
+ err = PTR_ERR(data[n].tsk);
+ break;
+ }
+ get_task_struct(data[n].tsk);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ for (n = 0; n < count; n++) {
+ int status;
+
+ if (IS_ERR_OR_NULL(data[n].tsk))
+ continue;
+
+ status = kthread_stop(data[n].tsk);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(data[n].tsk);
+ data[n].tsk = NULL;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ }
+
+out:
+ for (n = 0; n < count; n++) {
+ for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
+ if (!data[n].ce[m])
+ continue;
+
+ intel_context_unpin(data[n].ce[m]);
+ intel_context_put(data[n].ce[m]);
+ }
+ }
+ kfree(data);
+out_file:
mock_file_free(i915, file);
return err;
}
@@ -166,28 +391,20 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
}
-static int gpu_fill(struct drm_i915_gem_object *obj,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int gpu_fill(struct intel_context *ce,
+ struct drm_i915_gem_object *obj,
unsigned int dw)
{
- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_vma *vma;
int err;
- GEM_BUG_ON(obj->base.size > vm->total);
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(obj->base.size > ce->vm->total);
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
-
err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
if (err)
return err;
@@ -200,9 +417,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
* whilst checking that each context provides a unique view
* into the object.
*/
- err = igt_gpu_fill_dw(vma,
- ctx,
- engine,
+ err = igt_gpu_fill_dw(ce, vma,
(dw * real_page_count(obj)) << PAGE_SHIFT |
(dw * sizeof(u32)),
real_page_count(obj),
@@ -305,22 +520,21 @@ static int file_add_object(struct drm_file *file,
}
static struct drm_i915_gem_object *
-create_test_object(struct i915_gem_context *ctx,
+create_test_object(struct i915_address_space *vm,
struct drm_file *file,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
u64 size;
int err;
/* Keep in GEM's good graces */
- i915_retire_requests(ctx->i915);
+ intel_gt_retire_requests(vm->gt);
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
- obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
+ obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
if (IS_ERR(obj))
return obj;
@@ -348,11 +562,49 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
return npages / DW_PER_PAGE;
}
+static void throttle_release(struct i915_request **q, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (IS_ERR_OR_NULL(q[i]))
+ continue;
+
+ i915_request_put(fetch_and_zero(&q[i]));
+ }
+}
+
+static int throttle(struct intel_context *ce,
+ struct i915_request **q, int count)
+{
+ int i;
+
+ if (!IS_ERR_OR_NULL(q[0])) {
+ if (i915_request_wait(q[0],
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ return -EINTR;
+
+ i915_request_put(q[0]);
+ }
+
+ for (i = 0; i < count - 1; i++)
+ q[i] = q[i + 1];
+
+ q[i] = intel_context_create_request(ce);
+ if (IS_ERR(q[i]))
+ return PTR_ERR(q[i]);
+
+ i915_request_get(q[i]);
+ i915_request_add(q[i]);
+
+ return 0;
+}
+
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
int err = -ENODEV;
/*
@@ -364,9 +616,10 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
+ struct i915_request *tq[5] = {};
struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
@@ -382,39 +635,53 @@ static int igt_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ goto out_file;
ncontexts = 0;
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
+ struct intel_context *ce;
- ctx = live_context(i915, file);
+ ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
}
}
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
- goto out_unlock;
+ engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
}
if (++dw == max_dwords(obj)) {
@@ -424,6 +691,9 @@ static int igt_ctx_exec(void *arg)
ndwords++;
ncontexts++;
+
+ intel_context_put(ce);
+ kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@@ -441,10 +711,10 @@ static int igt_ctx_exec(void *arg)
dw += rem;
}
-out_unlock:
+out_file:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
if (err)
@@ -459,9 +729,9 @@ out_unlock:
static int igt_shared_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct i915_request *tq[5] = {};
struct i915_gem_context *parent;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
struct igt_live_test t;
struct drm_file *file;
int err = 0;
@@ -478,24 +748,22 @@ static int igt_shared_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
parent = live_context(i915, file);
if (IS_ERR(parent)) {
err = PTR_ERR(parent);
- goto out_unlock;
+ goto out_file;
}
if (!parent->vm) { /* not full-ppgtt; nothing to share */
err = 0;
- goto out_unlock;
+ goto out_file;
}
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
unsigned long ncontexts, ndwords, dw;
struct drm_i915_gem_object *obj = NULL;
IGT_TIMEOUT(end_time);
@@ -509,6 +777,7 @@ static int igt_shared_ctx_exec(void *arg)
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
+ struct intel_context *ce;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
@@ -516,23 +785,38 @@ static int igt_shared_ctx_exec(void *arg)
goto out_test;
}
+ mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, parent->vm);
+ mutex_unlock(&ctx->mutex);
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
- obj = create_test_object(parent, file, &objects);
+ obj = create_test_object(parent->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
+ intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
}
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
+ engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_test;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
@@ -545,6 +829,7 @@ static int igt_shared_ctx_exec(void *arg)
ndwords++;
ncontexts++;
+ intel_context_put(ce);
kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@@ -562,16 +847,13 @@ static int igt_shared_ctx_exec(void *arg)
dw += rem;
}
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mutex_lock(&i915->drm.struct_mutex);
}
out_test:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
+out_file:
mock_file_free(i915, file);
return err;
}
@@ -604,6 +886,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
+ intel_gt_chipset_flush(vma->vm->gt);
+
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -681,10 +965,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err)
goto skip_request;
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
-
+ i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma);
*rq_out = i915_request_get(rq);
@@ -698,8 +979,7 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
err_vma:
i915_vma_unpin(vma);
@@ -860,8 +1140,8 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = i915_gem_wait_for_idle(ce->engine->i915,
- 0, MAX_SCHEDULE_TIMEOUT);
+ ret = intel_gt_wait_for_idle(ce->engine->gt,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -887,7 +1167,7 @@ __sseu_test(const char *name,
if (ret)
return ret;
- ret = __intel_context_reconfigure_sseu(ce, sseu);
+ ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
goto out_spin;
@@ -908,106 +1188,97 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
const char *name,
unsigned int flags)
{
- struct intel_engine_cs *engine = i915->engine[RCS0];
struct drm_i915_gem_object *obj;
- struct i915_gem_context *ctx;
- struct intel_context *ce;
- struct intel_sseu pg_sseu;
- struct drm_file *file;
- int ret;
-
- if (INTEL_GEN(i915) < 9 || !engine)
- return 0;
-
- if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
- return 0;
+ int inst = 0;
+ int ret = 0;
- if (hweight32(engine->sseu.slice_mask) < 2)
+ if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
return 0;
- /*
- * Gen11 VME friendly power-gated configuration with half enabled
- * sub-slices.
- */
- pg_sseu = engine->sseu;
- pg_sseu.slice_mask = 1;
- pg_sseu.subslice_mask =
- ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
-
- pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
- name, flags, hweight32(engine->sseu.slice_mask),
- hweight32(pg_sseu.slice_mask));
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
if (flags & TEST_RESET)
igt_global_reset_lock(&i915->gt);
- mutex_lock(&i915->drm.struct_mutex);
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto out_unlock;
- }
- i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
-
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto out_unlock;
}
- ce = i915_gem_context_get_engine(ctx, RCS0);
- if (IS_ERR(ce)) {
- ret = PTR_ERR(ce);
- goto out_put;
- }
+ do {
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ struct intel_sseu pg_sseu;
- ret = intel_context_pin(ce);
- if (ret)
- goto out_context;
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_RENDER,
+ inst++);
+ if (!engine)
+ break;
- /* First set the default mask. */
- ret = __sseu_test(name, flags, ce, obj, engine->sseu);
- if (ret)
- goto out_fail;
+ if (hweight32(engine->sseu.slice_mask) < 2)
+ continue;
- /* Then set a power-gated configuration. */
- ret = __sseu_test(name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
+ /*
+ * Gen11 VME friendly power-gated configuration with
+ * half enabled sub-slices.
+ */
+ pg_sseu = engine->sseu;
+ pg_sseu.slice_mask = 1;
+ pg_sseu.subslice_mask =
+ ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
+
+ pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
+ engine->name, name, flags,
+ hweight32(engine->sseu.slice_mask),
+ hweight32(pg_sseu.slice_mask));
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ goto out_put;
+ }
- /* Back to defaults. */
- ret = __sseu_test(name, flags, ce, obj, engine->sseu);
- if (ret)
- goto out_fail;
+ ret = intel_context_pin(ce);
+ if (ret)
+ goto out_ce;
- /* One last power-gated configuration for the road. */
- ret = __sseu_test(name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
+ /* First set the default mask. */
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
+ if (ret)
+ goto out_unpin;
+
+ /* Then set a power-gated configuration. */
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
+ if (ret)
+ goto out_unpin;
+
+ /* Back to defaults. */
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
+ if (ret)
+ goto out_unpin;
-out_fail:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ /* One last power-gated configuration for the road. */
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
+ if (ret)
+ goto out_unpin;
+
+out_unpin:
+ intel_context_unpin(ce);
+out_ce:
+ intel_context_put(ce);
+ } while (!ret);
+
+ if (igt_flush_test(i915))
ret = -EIO;
- intel_context_unpin(ce);
-out_context:
- intel_context_put(ce);
out_put:
i915_gem_object_put(obj);
out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
if (flags & TEST_RESET)
igt_global_reset_unlock(&i915->gt);
- mock_file_free(i915, file);
-
if (ret)
pr_err("%s: Failed with %d!\n", name, ret);
@@ -1041,6 +1312,7 @@ static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ struct i915_request *tq[5] = {};
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx, ndwords, dw;
@@ -1061,52 +1333,63 @@ static int igt_ctx_readonly(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
- vm = ctx->vm ?: &i915->ggtt.alias->vm;
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
+ rcu_read_unlock();
err = 0;
- goto out_unlock;
+ goto out_file;
}
+ rcu_read_unlock();
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
- struct intel_engine_cs *engine;
- unsigned int id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine))
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
}
if (prandom_u32_state(&prng) & 1)
i915_gem_object_set_readonly(obj);
}
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
- goto out_unlock;
+ ce->engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
}
if (++dw == max_dwords(obj)) {
@@ -1115,6 +1398,7 @@ static int igt_ctx_readonly(void *arg)
}
ndwords++;
}
+ i915_gem_context_unlock_engines(ctx);
}
pr_info("Submitted %lu dwords (across %u engines)\n",
ndwords, RUNTIME_INFO(i915)->num_engines);
@@ -1137,19 +1421,19 @@ static int igt_ctx_readonly(void *arg)
dw += rem;
}
-out_unlock:
+out_file:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
-static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+static int check_scratch(struct i915_address_space *vm, u64 offset)
{
struct drm_mm_node *node =
- __drm_mm_interval_first(&ctx->vm->mm,
+ __drm_mm_interval_first(&vm->mm,
offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset)
return 0;
@@ -1167,6 +1451,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cmd;
@@ -1197,17 +1482,20 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ intel_gt_chipset_flush(engine->gt);
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ goto err_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err;
+ goto err_vm;
- err = check_scratch(ctx, offset);
+ err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@@ -1229,12 +1517,11 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
- i915_vma_unpin(vma);
- i915_vma_close(vma);
- i915_vma_put(vma);
+ i915_vma_unpin_and_release(&vma, 0);
i915_request_add(rq);
+ i915_vm_put(vm);
return 0;
skip_request:
@@ -1243,6 +1530,8 @@ err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
+err_vm:
+ i915_vm_put(vm);
err:
i915_gem_object_put(obj);
return err;
@@ -1254,6 +1543,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100;
struct i915_request *rq;
@@ -1296,17 +1586,20 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ intel_gt_chipset_flush(engine->gt);
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ goto err_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err;
+ goto err_vm;
- err = check_scratch(ctx, offset);
+ err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@@ -1337,12 +1630,12 @@ static int read_from_scratch(struct i915_gem_context *ctx,
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
- goto err;
+ goto err_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto err_vm;
}
*value = cmd[result / sizeof(*cmd)];
@@ -1357,6 +1650,8 @@ err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
+err_vm:
+ i915_vm_put(vm);
err:
i915_gem_object_put(obj);
return err;
@@ -1371,7 +1666,6 @@ static int igt_vm_isolation(void *arg)
struct drm_file *file;
I915_RND_STATE(prng);
unsigned long count;
- unsigned int id;
u64 vm_total;
int err;
@@ -1387,34 +1681,32 @@ static int igt_vm_isolation(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
- goto out_unlock;
+ goto out_file;
}
ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
- goto out_unlock;
+ goto out_file;
}
/* We can only test vm isolation, if the vm are distinct */
if (ctx_a->vm == ctx_b->vm)
- goto out_unlock;
+ goto out_file;
vm_total = ctx_a->vm->total;
GEM_BUG_ON(ctx_b->vm->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
unsigned long this = 0;
@@ -1436,7 +1728,7 @@ static int igt_vm_isolation(void *arg)
err = read_from_scratch(ctx_b, engine,
offset, &value);
if (err)
- goto out_unlock;
+ goto out_file;
if (value) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@@ -1445,7 +1737,7 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset),
this);
err = -EINVAL;
- goto out_unlock;
+ goto out_file;
}
this++;
@@ -1455,30 +1747,13 @@ static int igt_vm_isolation(void *arg)
pr_info("Checked %lu scratch offsets across %d engines\n",
count, RUNTIME_INFO(i915)->num_engines);
-out_unlock:
+out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
mock_file_free(i915, file);
return err;
}
-static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
-
- if (engines == ALL_ENGINES)
- return "all";
-
- for_each_engine_masked(engine, i915, engines, tmp)
- return engine->name;
-
- return "none";
-}
-
static bool skip_unused_engines(struct intel_context *ce, void *data)
{
return !ce->state;
@@ -1506,13 +1781,9 @@ static int mock_context_barrier(void *arg)
* a request; useful for retiring old state after loading new.
*/
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = mock_context(i915, "mock");
- if (!ctx) {
- err = -ENOMEM;
- goto unlock;
- }
+ if (!ctx)
+ return -ENOMEM;
counter = 0;
err = context_barrier_task(ctx, 0,
@@ -1585,8 +1856,6 @@ static int mock_context_barrier(void *arg)
out:
mock_context_close(ctx);
-unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
#undef pr_fmt
#define pr_fmt(x) x
@@ -1614,6 +1883,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_switch),
+ SUBTEST(live_parallel_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 1d27babff0ce..29b2077b73d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -10,6 +10,7 @@
#include "gt/intel_gt_pm.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
+#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
struct tile {
@@ -76,18 +77,103 @@ static u64 tiled_offset(const struct tile *tile, u64 v)
static int check_partial_mapping(struct drm_i915_gem_object *obj,
const struct tile *tile,
- unsigned long end_time)
+ struct rnd_state *prng)
{
- const unsigned int nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long page;
+ u32 __iomem *io;
+ struct page *p;
+ unsigned int n;
+ u64 offset;
+ u32 *cpu;
int err;
- if (igt_timeout(end_time,
- "%s: timed out before tiling=%d stride=%d\n",
- __func__, tile->tiling, tile->stride))
- return -EINTR;
+ err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
+ if (err) {
+ pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
+ tile->tiling, tile->stride, err);
+ return err;
+ }
+
+ GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
+ GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err) {
+ pr_err("Failed to flush to GTT write domain; err=%d\n", err);
+ return err;
+ }
+
+ page = i915_prandom_u32_max_state(npages, prng);
+ view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(vma));
+ return PTR_ERR(vma);
+ }
+
+ n = page - view.partial.offset;
+ GEM_BUG_ON(n >= view.partial.size);
+
+ io = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(io)) {
+ pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(io));
+ err = PTR_ERR(io);
+ goto out;
+ }
+
+ iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
+ i915_vma_unpin_iomap(vma);
+
+ offset = tiled_offset(tile, page << PAGE_SHIFT);
+ if (offset >= obj->base.size)
+ goto out;
+
+ intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+
+ p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ cpu = kmap(p) + offset_in_page(offset);
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ if (*cpu != (u32)page) {
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ page, n,
+ view.partial.offset,
+ view.partial.size,
+ vma->size >> PAGE_SHIFT,
+ tile->tiling ? tile_row_pages(obj) : 0,
+ vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
+ offset >> PAGE_SHIFT,
+ (unsigned int)offset_in_page(offset),
+ offset,
+ (u32)page, *cpu);
+ err = -EINVAL;
+ }
+ *cpu = 0;
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ kunmap(p);
+
+out:
+ i915_vma_destroy(vma);
+ return err;
+}
+
+static int check_partial_mappings(struct drm_i915_gem_object *obj,
+ const struct tile *tile,
+ unsigned long end_time)
+{
+ const unsigned int nreal = obj->scratch / PAGE_SIZE;
+ const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_vma *vma;
+ unsigned long page;
+ int err;
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
if (err) {
@@ -170,11 +256,42 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return err;
i915_vma_destroy(vma);
+
+ if (igt_timeout(end_time,
+ "%s: timed out after tiling=%d stride=%d\n",
+ __func__, tile->tiling, tile->stride))
+ return -EINTR;
}
return 0;
}
+static unsigned int
+setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) <= 2) {
+ tile->height = 16;
+ tile->width = 128;
+ tile->size = 11;
+ } else if (tile->tiling == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(i915)) {
+ tile->height = 32;
+ tile->width = 128;
+ tile->size = 12;
+ } else {
+ tile->height = 8;
+ tile->width = 512;
+ tile->size = 12;
+ }
+
+ if (INTEL_GEN(i915) < 4)
+ return 8192 / tile->width;
+ else if (INTEL_GEN(i915) < 7)
+ return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
+ else
+ return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
+}
+
static int igt_partial_tiling(void *arg)
{
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
@@ -184,6 +301,9 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
/* We want to check the page mapping and fencing of a large object
* mmapped through the GTT. The object we create is larger than can
* possibly be mmaped as a whole, and so we must use partial GGTT vma.
@@ -205,7 +325,6 @@ static int igt_partial_tiling(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (1) {
@@ -219,7 +338,7 @@ static int igt_partial_tiling(void *arg)
tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
tile.tiling = I915_TILING_NONE;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err && err != -EINTR)
goto out_unlock;
}
@@ -241,10 +360,10 @@ static int igt_partial_tiling(void *arg)
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
- tile.swizzle = i915->mm.bit_6_swizzle_x;
+ tile.swizzle = i915->ggtt.bit_6_swizzle_x;
break;
case I915_TILING_Y:
- tile.swizzle = i915->mm.bit_6_swizzle_y;
+ tile.swizzle = i915->ggtt.bit_6_swizzle_y;
break;
}
@@ -253,31 +372,11 @@ static int igt_partial_tiling(void *arg)
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
continue;
- if (INTEL_GEN(i915) <= 2) {
- tile.height = 16;
- tile.width = 128;
- tile.size = 11;
- } else if (tile.tiling == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(i915)) {
- tile.height = 32;
- tile.width = 128;
- tile.size = 12;
- } else {
- tile.height = 8;
- tile.width = 512;
- tile.size = 12;
- }
-
- if (INTEL_GEN(i915) < 4)
- max_pitch = 8192 / tile.width;
- else if (INTEL_GEN(i915) < 7)
- max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
- else
- max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
+ max_pitch = setup_tile_size(&tile, i915);
for (pitch = max_pitch; pitch; pitch >>= 1) {
tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -285,7 +384,7 @@ static int igt_partial_tiling(void *arg)
if (pitch > 2 && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch - 1);
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -294,7 +393,7 @@ static int igt_partial_tiling(void *arg)
if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch + 1);
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -305,7 +404,7 @@ static int igt_partial_tiling(void *arg)
if (INTEL_GEN(i915) >= 4) {
for_each_prime_number(pitch, max_pitch) {
tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -318,7 +417,100 @@ next_tiling: ;
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_smoke_tiling(void *arg)
+{
+ const unsigned int nreal = 1 << 12; /* largest tile row x2 */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ I915_RND_STATE(prng);
+ unsigned long count;
+ IGT_TIMEOUT(end);
+ int err;
+
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
+ /*
+ * igt_partial_tiling() does an exhastive check of partial tiling
+ * chunking, but will undoubtably run out of time. Here, we do a
+ * randomised search and hope over many runs of 1s with different
+ * seeds we will do a thorough check.
+ *
+ * Remember to look at the st_seed if we see a flip-flop in BAT!
+ */
+
+ if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ return 0;
+
+ obj = huge_gem_object(i915,
+ nreal << PAGE_SHIFT,
+ (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
+ }
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ count = 0;
+ do {
+ struct tile tile;
+
+ tile.tiling =
+ i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
+ switch (tile.tiling) {
+ case I915_TILING_NONE:
+ tile.height = 1;
+ tile.width = 1;
+ tile.size = 0;
+ tile.stride = 0;
+ tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
+ break;
+
+ case I915_TILING_X:
+ tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+ break;
+ }
+
+ if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
+ tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
+ continue;
+
+ if (tile.tiling != I915_TILING_NONE) {
+ unsigned int max_pitch = setup_tile_size(&tile, i915);
+
+ tile.stride =
+ i915_prandom_u32_max_state(max_pitch, &prng);
+ tile.stride = (1 + tile.stride) * tile.width;
+ if (INTEL_GEN(i915) < 4)
+ tile.stride = rounddown_pow_of_two(tile.stride);
+ }
+
+ err = check_partial_mapping(obj, &tile, &prng);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end, NULL));
+
+ pr_info("%s: Completed %lu trials\n", __func__, count);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
@@ -329,20 +521,19 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- int err;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- for_each_engine(engine, i915, id) {
- struct i915_request *rq;
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
@@ -358,12 +549,13 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
i915_vma_unlock(vma);
i915_request_add(rq);
+ i915_vma_unpin(vma);
+ if (err)
+ return err;
}
- i915_vma_unpin(vma);
i915_gem_object_put(obj); /* leave it only alive via its active ref */
-
- return err;
+ return 0;
}
static bool assert_mmap_offset(struct drm_i915_private *i915,
@@ -386,21 +578,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
-
intel_gt_pm_get(&i915->gt);
-
- cancel_delayed_work_sync(&i915->gem.retire_work);
- flush_work(&i915->gem.idle_work);
+ cancel_delayed_work_sync(&i915->gt.requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
+ igt_flush_test(i915);
intel_gt_pm_put(&i915->gt);
-
- mutex_lock(&i915->drm.struct_mutex);
- igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_driver_register__shrinker(i915);
}
@@ -490,9 +675,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
err = make_obj_busy(obj);
- mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
goto err_obj;
@@ -515,6 +698,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
+ SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index c21d747e7d05..e8132aca0bb6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -3,40 +3,241 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/sort.h>
+
#include "gt/intel_gt.h"
+#include "gt/intel_engine_user.h"
#include "i915_selftest.h"
+#include "gem/i915_gem_context.h"
#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
#include "selftests/mock_drm.h"
#include "huge_gem_object.h"
#include "mock_context.h"
-static int igt_fill_blt(void *arg)
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+ const ktime_t *a = A, *b = B;
+
+ return ktime_compare(*a, *b);
+}
+
+static int __perf_fill_blt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ ktime_t t[5];
+ int pass;
+ int err;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct intel_context *ce = engine->kernel_context;
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ err = i915_gem_object_fill_blt(obj, ce, 0);
+ if (err)
+ return err;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: blt %zd KiB fill: %lld MiB/s\n",
+ engine->name,
+ obj->base.size >> 10,
+ div64_u64(mul_u32_u32(4 * obj->base.size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ } while (1);
+}
+
+static int perf_fill_blt(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_context *ce = i915->engine[BCS0]->kernel_context;
- struct drm_i915_gem_object *obj;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __perf_fill_blt(obj);
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __perf_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst)
+{
+ struct drm_i915_private *i915 = to_i915(src->base.dev);
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ ktime_t t[5];
+ int pass;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct intel_context *ce = engine->kernel_context;
+ ktime_t t0, t1;
+ int err;
+
+ t0 = ktime_get();
+
+ err = i915_gem_object_copy_blt(src, dst, ce);
+ if (err)
+ return err;
+
+ err = i915_gem_object_wait(dst,
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: blt %zd KiB copy: %lld MiB/s\n",
+ engine->name,
+ src->base.size >> 10,
+ div64_u64(mul_u32_u32(4 * src->base.size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ } while (1);
+}
+
+static int perf_copy_blt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *src, *dst;
+ int err;
+
+ src = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
+ dst = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err_src;
+ }
+
+ err = __perf_copy_blt(src, dst);
+
+ i915_gem_object_put(dst);
+err_src:
+ i915_gem_object_put(src);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct igt_thread_arg {
+ struct drm_i915_private *i915;
struct rnd_state prng;
+ unsigned int n_cpus;
+};
+
+static int igt_fill_blt_thread(void *arg)
+{
+ struct igt_thread_arg *thread = arg;
+ struct drm_i915_private *i915 = thread->i915;
+ struct rnd_state *prng = &thread->prng;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ unsigned int prio;
IGT_TIMEOUT(end);
- u32 *vaddr;
- int err = 0;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
- prandom_seed_state(&prng, i915_selftest.random_seed);
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
- /*
- * XXX: needs some threads to scale all these tests, also maybe throw
- * in submission from higher priority context to see if we are
- * preempted for very large objects...
- */
+ ce = i915_gem_context_get_engine(ctx, BCS0);
+ GEM_BUG_ON(IS_ERR(ce));
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
- u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
- u32 phys_sz = sz % (max_block_size + 1);
- u32 val = prandom_u32_state(&prng);
+ u32 val = prandom_u32_state(prng);
+ u64 total = ce->vm->total;
+ u32 phys_sz;
+ u32 sz;
+ u32 *vaddr;
u32 i;
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ if (i915_is_ggtt(ce->vm))
+ total = div64_u64(total, thread->n_cpus);
+
+ sz = min_t(u64, total >> 4, prandom_u32_state(prng));
+ phys_sz = sz % (max_block_size + 1);
+
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -65,9 +266,7 @@ static int igt_fill_blt(void *arg)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_fill_blt(obj, ce, val);
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
@@ -100,28 +299,56 @@ err_flush:
if (err == -ENOMEM)
err = 0;
+ intel_context_put(ce);
+out_file:
+ mock_file_free(i915, file);
return err;
}
-static int igt_copy_blt(void *arg)
+static int igt_copy_blt_thread(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+ struct igt_thread_arg *thread = arg;
+ struct drm_i915_private *i915 = thread->i915;
+ struct rnd_state *prng = &thread->prng;
struct drm_i915_gem_object *src, *dst;
- struct rnd_state prng;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ unsigned int prio;
IGT_TIMEOUT(end);
- u32 *vaddr;
- int err = 0;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
- prandom_seed_state(&prng, i915_selftest.random_seed);
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
+
+ ce = i915_gem_context_get_engine(ctx, BCS0);
+ GEM_BUG_ON(IS_ERR(ce));
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
- u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
- u32 phys_sz = sz % (max_block_size + 1);
- u32 val = prandom_u32_state(&prng);
+ u32 val = prandom_u32_state(prng);
+ u64 total = ce->vm->total;
+ u32 phys_sz;
+ u32 sz;
+ u32 *vaddr;
u32 i;
+ if (i915_is_ggtt(ce->vm))
+ total = div64_u64(total, thread->n_cpus);
+
+ sz = min_t(u64, total >> 4, prandom_u32_state(prng));
+ phys_sz = sz % (max_block_size + 1);
+
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -166,9 +393,7 @@ static int igt_copy_blt(void *arg)
if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
dst->cache_dirty = true;
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_copy_blt(src, dst, ce);
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
@@ -205,12 +430,85 @@ err_flush:
if (err == -ENOMEM)
err = 0;
+ intel_context_put(ce);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
+static int igt_threaded_blt(struct drm_i915_private *i915,
+ int (*blt_fn)(void *arg))
+{
+ struct igt_thread_arg *thread;
+ struct task_struct **tsk;
+ I915_RND_STATE(prng);
+ unsigned int n_cpus;
+ unsigned int i;
+ int err = 0;
+
+ n_cpus = num_online_cpus() + 1;
+
+ tsk = kcalloc(n_cpus, sizeof(struct task_struct *), GFP_KERNEL);
+ if (!tsk)
+ return 0;
+
+ thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL);
+ if (!thread) {
+ kfree(tsk);
+ return 0;
+ }
+
+ for (i = 0; i < n_cpus; ++i) {
+ thread[i].i915 = i915;
+ thread[i].n_cpus = n_cpus;
+ thread[i].prng =
+ I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
+
+ tsk[i] = kthread_run(blt_fn, &thread[i], "igt/blt-%d", i);
+ if (IS_ERR(tsk[i])) {
+ err = PTR_ERR(tsk[i]);
+ break;
+ }
+
+ get_task_struct(tsk[i]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ for (i = 0; i < n_cpus; ++i) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[i]))
+ continue;
+
+ status = kthread_stop(tsk[i]);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(tsk[i]);
+ }
+
+ kfree(tsk);
+ kfree(thread);
+
return err;
}
+static int igt_fill_blt(void *arg)
+{
+ return igt_threaded_blt(arg, igt_fill_blt_thread);
+}
+
+static int igt_copy_blt(void *arg)
+{
+ return igt_threaded_blt(arg, igt_copy_blt_thread);
+}
+
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(perf_fill_blt),
+ SUBTEST(perf_copy_blt),
SUBTEST(igt_fill_blt),
SUBTEST(igt_copy_blt),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
index 94a15e3f6db8..34932871b3a5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -25,9 +25,7 @@ static int mock_phys_object(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
- mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
goto out_obj;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 57ece53c1075..6718da20f35d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
#include "i915_vma.h"
#include "i915_drv.h"
@@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma,
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
+ intel_gt_chipset_flush(vma->vm->gt);
+
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -101,40 +104,35 @@ err:
return ERR_PTR(err);
}
-int igt_gpu_fill_dw(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u64 offset,
- unsigned long count,
- u32 val)
+int igt_gpu_fill_dw(struct intel_context *ce,
+ struct i915_vma *vma, u64 offset,
+ unsigned long count, u32 val)
{
- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq;
struct i915_vma *batch;
unsigned int flags;
int err;
- GEM_BUG_ON(vma->size > vm->total);
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
GEM_BUG_ON(!i915_vma_is_pinned(vma));
batch = igt_emit_store_dw(vma, offset, count, val);
if (IS_ERR(batch))
return PTR_ERR(batch);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
+ if (INTEL_GEN(ce->vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
if (err)
goto err_request;
@@ -156,9 +154,7 @@ int igt_gpu_fill_dw(struct i915_vma *vma,
i915_request_add(rq);
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
return 0;
@@ -167,7 +163,6 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 361a7ef866b0..4221cf84d175 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -11,9 +11,11 @@
struct i915_request;
struct i915_gem_context;
-struct intel_engine_cs;
struct i915_vma;
+struct intel_context;
+struct intel_engine_cs;
+
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
@@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma,
unsigned long count,
u32 val);
-int igt_gpu_fill_dw(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u64 offset,
- unsigned long count,
- u32 val);
+int igt_gpu_fill_dw(struct intel_context *ce,
+ struct i915_vma *vma, u64 offset,
+ unsigned long count, u32 val);
#endif /* __IGT_GEM_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index be8974ccff24..29b8984f0e47 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -13,7 +13,6 @@ mock_context(struct drm_i915_private *i915,
{
struct i915_gem_context *ctx;
struct i915_gem_engines *e;
- int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -23,6 +22,8 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ i915_gem_context_set_persistence(ctx);
+
mutex_init(&ctx->engines_mutex);
e = default_engines(ctx);
if (IS_ERR(e))
@@ -30,13 +31,8 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->hw_id_link);
mutex_init(&ctx->mutex);
- ret = i915_gem_context_pin_hw_id(ctx);
- if (ret < 0)
- goto err_engines;
-
if (name) {
struct i915_ppgtt *ppgtt;
@@ -48,14 +44,15 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt)
goto err_put;
+ mutex_lock(&ctx->mutex);
__set_ppgtt(ctx, &ppgtt->vm);
+ mutex_unlock(&ctx->mutex);
+
i915_vm_put(&ppgtt->vm);
}
return ctx;
-err_engines:
- free_engines(rcu_access_pointer(ctx->engines));
err_free:
kfree(ctx);
return NULL;
@@ -73,7 +70,7 @@ void mock_context_close(struct i915_gem_context *ctx)
void mock_init_contexts(struct drm_i915_private *i915)
{
- init_contexts(i915);
+ init_contexts(&i915->gem.contexts);
}
struct i915_gem_context *
@@ -82,8 +79,6 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
struct i915_gem_context *ctx;
int err;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 09c68dda2098..55317081d48b 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -120,7 +120,6 @@ __dma_fence_signal__notify(struct dma_fence *fence,
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
- lockdep_assert_irqs_disabled();
list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
@@ -134,9 +133,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
+ unsigned long flags;
LIST_HEAD(signal);
- spin_lock(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
if (b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
@@ -182,30 +182,23 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
}
}
- spin_unlock(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
struct list_head cb_list;
- spin_lock(&rq->lock);
+ spin_lock_irqsave(&rq->lock, flags);
list_replace(&rq->fence.cb_list, &cb_list);
__dma_fence_signal__timestamp(&rq->fence, timestamp);
__dma_fence_signal__notify(&rq->fence, &cb_list);
- spin_unlock(&rq->lock);
+ spin_unlock_irqrestore(&rq->lock, flags);
i915_request_put(rq);
}
}
-void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
-{
- local_irq_disable();
- intel_engine_breadcrumbs_irq(engine);
- local_irq_enable();
-}
-
static void signal_irq_work(struct irq_work *work)
{
struct intel_engine_cs *engine =
@@ -275,7 +268,6 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
lockdep_assert_held(&rq->lock);
- lockdep_assert_irqs_disabled();
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
@@ -325,7 +317,6 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
lockdep_assert_held(&rq->lock);
- lockdep_assert_irqs_disabled();
/*
* We must wait for b->irq_lock so that we know the interrupt handler
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index f55691d151ae..ee9d2bcd2c13 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -13,6 +13,7 @@
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_ring.h"
static struct i915_global_context {
struct i915_global base;
@@ -62,7 +63,7 @@ int __intel_context_do_pin(struct intel_context *ce)
}
err = 0;
- with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
err = ce->ops->pin(ce);
if (err)
goto err;
@@ -134,10 +135,11 @@ static int __context_pin_state(struct i915_vma *vma)
static void __context_unpin_state(struct i915_vma *vma)
{
- __i915_vma_unpin(vma);
i915_vma_make_shrinkable(vma);
+ __i915_vma_unpin(vma);
}
+__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
@@ -150,6 +152,7 @@ static void __intel_context_retire(struct i915_active *active)
intel_timeline_unpin(ce->timeline);
intel_ring_unpin(ce->ring);
+
intel_context_put(ce);
}
@@ -219,12 +222,20 @@ intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ struct i915_address_space *vm;
+
GEM_BUG_ON(!engine->cops);
kref_init(&ce->ref);
ce->gem_context = ctx;
- ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm);
+ if (vm)
+ ce->vm = i915_vm_get(vm);
+ else
+ ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
+ rcu_read_unlock();
if (ctx->timeline)
ce->timeline = intel_timeline_get(ctx->timeline);
@@ -238,7 +249,7 @@ intel_context_init(struct intel_context *ce,
mutex_init(&ce->pin_mutex);
- i915_active_init(ctx->i915, &ce->active,
+ i915_active_init(&ce->active,
__intel_context_active, __intel_context_retire);
}
@@ -298,14 +309,14 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
/* Only suitable for use in remotely modifying this context */
GEM_BUG_ON(rq->hw_context == ce);
- if (rq->timeline != tl) { /* beware timeline sharing */
+ if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
err = mutex_lock_interruptible_nested(&tl->mutex,
SINGLE_DEPTH_NESTING);
if (err)
return err;
/* Queue this switch after current activity by this context. */
- err = i915_active_request_set(&tl->last_request, rq);
+ err = i915_active_fence_set(&tl->last_request, rq);
mutex_unlock(&tl->mutex);
if (err)
return err;
@@ -319,7 +330,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
* words transfer the pinned ce object to tracked active request.
*/
GEM_BUG_ON(i915_active_is_idle(&ce->active));
- return i915_active_ref(&ce->active, rq->timeline, rq);
+ return i915_active_add_request(&ce->active, rq);
}
struct i915_request *intel_context_create_request(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index dd742ac2fbdb..68b3d317d959 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
#include "i915_active.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
+#include "intel_ring_types.h"
#include "intel_timeline_types.h"
void intel_context_init(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index bf9cedfccbf0..6959b05ae5f8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -58,6 +58,7 @@ struct intel_context {
u32 *lrc_reg_state;
u64 lrc_desc;
+ u32 tag; /* cookie passed to HW to track this context on submission */
unsigned int active_count; /* protected by timeline->mutex */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 22aab8593abf..bc3b72bfa9e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -19,6 +19,7 @@
#include "intel_workarounds.h"
struct drm_printer;
+struct intel_gt;
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
@@ -89,38 +90,6 @@ struct drm_printer;
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
-enum intel_engine_hangcheck_action {
- ENGINE_IDLE = 0,
- ENGINE_WAIT,
- ENGINE_ACTIVE_SEQNO,
- ENGINE_ACTIVE_HEAD,
- ENGINE_ACTIVE_SUBUNITS,
- ENGINE_WAIT_KICK,
- ENGINE_DEAD,
-};
-
-static inline const char *
-hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
-{
- switch (a) {
- case ENGINE_IDLE:
- return "idle";
- case ENGINE_WAIT:
- return "wait";
- case ENGINE_ACTIVE_SEQNO:
- return "active seqno";
- case ENGINE_ACTIVE_HEAD:
- return "active head";
- case ENGINE_ACTIVE_SUBUNITS:
- return "active subunits";
- case ENGINE_WAIT_KICK:
- return "wait kick";
- case ENGINE_DEAD:
- return "dead";
- }
-
- return "unknown";
-}
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
@@ -206,126 +175,13 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_HWS_CSB_WRITE_INDEX 0x1f
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
-struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size);
-int intel_ring_pin(struct intel_ring *ring);
-void intel_ring_reset(struct intel_ring *ring, u32 tail);
-unsigned int intel_ring_update_space(struct intel_ring *ring);
-void intel_ring_unpin(struct intel_ring *ring);
-void intel_ring_free(struct kref *ref);
-
-static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
-{
- kref_get(&ring->ref);
- return ring;
-}
-
-static inline void intel_ring_put(struct intel_ring *ring)
-{
- kref_put(&ring->ref, intel_ring_free);
-}
-
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
-int __must_check intel_ring_cacheline_align(struct i915_request *rq);
-
-u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
-
-static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
-{
- /* Dummy function.
- *
- * This serves as a placeholder in the code so that the reader
- * can compare against the preceding intel_ring_begin() and
- * check that the number of dwords emitted matches the space
- * reserved for the command packet (i.e. the value passed to
- * intel_ring_begin()).
- */
- GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
-}
-
-static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
-{
- return pos & (ring->size - 1);
-}
-
-static inline bool
-intel_ring_offset_valid(const struct intel_ring *ring,
- unsigned int pos)
-{
- if (pos & -ring->size) /* must be strictly within the ring */
- return false;
-
- if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
- return false;
-
- return true;
-}
-
-static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
-{
- /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- u32 offset = addr - rq->ring->vaddr;
- GEM_BUG_ON(offset > rq->ring->size);
- return intel_ring_wrap(rq->ring, offset);
-}
-
-static inline void
-assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
-{
- GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
-
- /*
- * "Ring Buffer Use"
- * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
- * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
- * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
- * same cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- *
- * We use ring->head as the last known location of the actual RING_HEAD,
- * it may have advanced but in the worst case it is equally the same
- * as ring->head and so we should never program RING_TAIL to advance
- * into the same cacheline as ring->head.
- */
-#define cacheline(a) round_down(a, CACHELINE_BYTES)
- GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
- tail < ring->head);
-#undef cacheline
-}
-
-static inline unsigned int
-intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
-{
- /* Whilst writes to the tail are strictly order, there is no
- * serialisation between readers and the writers. The tail may be
- * read by i915_request_retire() just as it is being updated
- * by execlists, as although the breadcrumb is complete, the context
- * switch hasn't been seen.
- */
- assert_ring_tail_valid(ring, tail);
- ring->tail = tail;
- return tail;
-}
-
-static inline unsigned int
-__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
-{
- /*
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
- * same cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- */
- GEM_BUG_ON(!is_power_of_2(size));
- return (head - tail - CACHELINE_BYTES) & (size - 1);
-}
-
-int intel_engines_init_mmio(struct drm_i915_private *i915);
-int intel_engines_setup(struct drm_i915_private *i915);
-int intel_engines_init(struct drm_i915_private *i915);
-void intel_engines_cleanup(struct drm_i915_private *i915);
+int intel_engines_init_mmio(struct intel_gt *gt);
+int intel_engines_setup(struct intel_gt *gt);
+int intel_engines_init(struct intel_gt *gt);
+void intel_engines_cleanup(struct intel_gt *gt);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
@@ -349,7 +205,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine);
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void
@@ -422,8 +277,9 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
engine->serial++; /* contexts lost */
}
-bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct intel_gt *gt);
+bool intel_engine_is_idle(struct intel_engine_cs *engine);
+void intel_engine_flush_submission(struct intel_engine_cs *engine);
void intel_engines_reset_default_submission(struct intel_gt *gt);
@@ -434,61 +290,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-static inline void intel_engine_context_in(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (READ_ONCE(engine->stats.enabled) == 0)
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- if (engine->stats.active++ == 0)
- engine->stats.start = ktime_get();
- GEM_BUG_ON(engine->stats.active == 0);
- }
-
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
-static inline void intel_engine_context_out(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (READ_ONCE(engine->stats.enabled) == 0)
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- ktime_t last;
-
- if (engine->stats.active && --engine->stats.active == 0) {
- /*
- * Decrement the active context count and in case GPU
- * is now idle add up to the running total.
- */
- last = ktime_sub(ktime_get(), engine->stats.start);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- } else if (engine->stats.active == 0) {
- /*
- * After turning on engine stats, context out might be
- * the first event in which case we account from the
- * time stats gathering was turned on.
- */
- last = ktime_sub(ktime_get(), engine->stats.enabled_at);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- }
- }
-
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
int intel_enable_engine_stats(struct intel_engine_cs *engine);
void intel_disable_engine_stats(struct intel_engine_cs *engine);
@@ -525,4 +326,22 @@ void intel_engine_init_active(struct intel_engine_cs *engine,
#define ENGINE_MOCK 1
#define ENGINE_VIRTUAL 2
+static inline bool
+intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return false;
+
+ return intel_engine_has_preemption(engine);
+}
+
+static inline bool
+intel_engine_has_timeslices(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return false;
+
+ return intel_engine_has_semaphores(engine);
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4ce8626b140e..5ca3ec911e50 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -37,6 +37,7 @@
#include "intel_context.h"
#include "intel_lrc.h"
#include "intel_reset.h"
+#include "intel_ring.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -277,6 +278,9 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
+ if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
+ return -EINVAL;
+
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
@@ -293,6 +297,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
engine->id = id;
+ engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
engine->i915 = gt->i915;
engine->gt = gt;
@@ -304,6 +309,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->instance = info->instance;
__sprint_engine_name(engine);
+ engine->props.heartbeat_interval_ms =
+ CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.preempt_timeout_ms =
+ CONFIG_DRM_I915_PREEMPT_TIMEOUT;
+ engine->props.stop_timeout_ms =
+ CONFIG_DRM_I915_STOP_TIMEOUT;
+ engine->props.timeslice_duration_ms =
+ CONFIG_DRM_I915_TIMESLICE_DURATION;
+
/*
* To be overridden by the backend on setup. However to facilitate
* cleanup on error during setup, we always provide the destroy vfunc.
@@ -328,6 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
intel_engine_sanitize_mmio(engine);
gt->engine_class[info->class][info->instance] = engine;
+ gt->engine[id] = engine;
intel_engine_add_user(engine);
gt->i915->engine[id] = engine;
@@ -365,38 +380,40 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
}
}
-static void intel_setup_engine_capabilities(struct drm_i915_private *i915)
+static void intel_setup_engine_capabilities(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
__setup_engine_capabilities(engine);
}
/**
* intel_engines_cleanup() - free the resources allocated for Command Streamers
- * @i915: the i915 devic
+ * @gt: pointer to struct intel_gt
*/
-void intel_engines_cleanup(struct drm_i915_private *i915)
+void intel_engines_cleanup(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
engine->destroy(engine);
- i915->engine[id] = NULL;
+ gt->engine[id] = NULL;
+ gt->i915->engine[id] = NULL;
}
}
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
- * @i915: the i915 device
+ * @gt: pointer to struct intel_gt
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init_mmio(struct drm_i915_private *i915)
+int intel_engines_init_mmio(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
struct intel_device_info *device_info = mkwrite_device_info(i915);
const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
unsigned int mask = 0;
@@ -414,7 +431,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
if (!HAS_ENGINE(i915, i))
continue;
- err = intel_engine_setup(&i915->gt, i);
+ err = intel_engine_setup(gt, i);
if (err)
goto cleanup;
@@ -431,36 +448,36 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
- intel_gt_check_and_clear_faults(&i915->gt);
+ intel_gt_check_and_clear_faults(gt);
- intel_setup_engine_capabilities(i915);
+ intel_setup_engine_capabilities(gt);
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_cleanup(gt);
return err;
}
/**
* intel_engines_init() - init the Engine Command Streamers
- * @i915: i915 device private
+ * @gt: pointer to struct intel_gt
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init(struct drm_i915_private *i915)
+int intel_engines_init(struct intel_gt *gt)
{
int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(i915))
+ if (HAS_EXECLISTS(gt->i915))
init = intel_execlists_submission_init;
else
init = intel_ring_submission_init;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
err = init(engine);
if (err)
goto cleanup;
@@ -469,7 +486,7 @@ int intel_engines_init(struct drm_i915_private *i915)
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_cleanup(gt);
return err;
}
@@ -513,7 +530,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
unsigned int flags;
flags = PIN_GLOBAL;
- if (!HAS_LLC(engine->i915))
+ if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
/*
* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
@@ -597,7 +614,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_active(engine, ENGINE_PHYSICAL);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
- intel_engine_init_hangcheck(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
@@ -616,26 +632,26 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
/**
* intel_engines_setup- setup engine state not requiring hw access
- * @i915: Device to setup.
+ * @gt: pointer to struct intel_gt
*
* Initializes engine structure members shared between legacy and execlists
* submission modes which do not require hardware access.
*
* Typically done early in the submission mode specific engine setup stage.
*/
-int intel_engines_setup(struct drm_i915_private *i915)
+int intel_engines_setup(struct intel_gt *gt)
{
int (*setup)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(i915))
+ if (HAS_EXECLISTS(gt->i915))
setup = intel_execlists_submission_setup;
else
setup = intel_ring_submission_setup;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
err = intel_engine_setup_common(engine);
if (err)
goto cleanup;
@@ -653,7 +669,7 @@ int intel_engines_setup(struct drm_i915_private *i915)
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_cleanup(gt);
return err;
}
@@ -680,6 +696,8 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
engine->status_page.vma))
goto out_frame;
+ mutex_lock(&frame->timeline.mutex);
+
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
@@ -688,18 +706,22 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.ring = &frame->ring;
- frame->rq.timeline = &frame->timeline;
+ rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
dw = intel_timeline_pin(&frame->timeline);
if (dw < 0)
goto out_timeline;
+ spin_lock_irq(&engine->active.lock);
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+ spin_unlock_irq(&engine->active.lock);
+
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
intel_timeline_unpin(&frame->timeline);
out_timeline:
+ mutex_unlock(&frame->timeline.mutex);
intel_timeline_fini(&frame->timeline);
out_frame:
kfree(frame);
@@ -730,6 +752,7 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
+ static struct lock_class_key kernel;
struct intel_context *ce;
int err;
@@ -745,6 +768,14 @@ create_kernel_context(struct intel_engine_cs *engine)
return ERR_PTR(err);
}
+ /*
+ * Give our perma-pinned kernel timelines a separate lockdep class,
+ * so that we can use them from within the normal user timelines
+ * should we need to inject GPU operations during their request
+ * construction.
+ */
+ lockdep_set_class(&ce->timeline->mutex, &kernel);
+
return ce;
}
@@ -814,8 +845,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- intel_context_unpin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
+ if (engine->kernel_context) {
+ intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+ }
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list);
@@ -851,6 +884,21 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
return bbaddr;
}
+static unsigned long stop_timeout(const struct intel_engine_cs *engine)
+{
+ if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
+ return 0;
+
+ /*
+ * If we are doing a normal GPU reset, we can take our time and allow
+ * the engine to quiesce. We've stopped submission to the engine, and
+ * if we wait long enough an innocent context should complete and
+ * leave the engine idle. So they should not be caught unaware by
+ * the forthcoming GPU reset (which usually follows the stop_cs)!
+ */
+ return READ_ONCE(engine->props.stop_timeout_ms);
+}
+
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
@@ -868,7 +916,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
err = 0;
if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
- 1000, 0,
+ 1000, stop_timeout(engine),
NULL)) {
GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
err = -ETIMEDOUT;
@@ -948,6 +996,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
@@ -965,7 +1014,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
- for_each_instdone_slice_subslice(i915, slice, subslice) {
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(engine, slice, subslice,
GEN7_SAMPLER_INSTDONE);
@@ -1031,6 +1080,25 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
+void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ if (__tasklet_is_scheduled(t)) {
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->func(t->data);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
+ }
+
+ /* Otherwise flush the tasklet if it was running on another cpu */
+ tasklet_unlock_wait(t);
+}
+
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
@@ -1049,21 +1117,9 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
/* Waiting to drain ELSP? */
if (execlists_active(&engine->execlists)) {
- struct tasklet_struct *t = &engine->execlists.tasklet;
-
synchronize_hardirq(engine->i915->drm.pdev->irq);
- local_bh_disable();
- if (tasklet_trylock(t)) {
- /* Must wait for any GPU reset in progress. */
- if (__tasklet_is_enabled(t))
- t->func(t->data);
- tasklet_unlock(t);
- }
- local_bh_enable();
-
- /* Otherwise flush the tasklet if it was on another cpu */
- tasklet_unlock_wait(t);
+ intel_engine_flush_submission(engine);
if (execlists_active(&engine->execlists))
return false;
@@ -1093,7 +1149,7 @@ bool intel_engines_are_idle(struct intel_gt *gt)
if (!READ_ONCE(gt->awake))
return true;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@@ -1106,7 +1162,7 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
engine->set_default_submission(engine);
}
@@ -1118,6 +1174,8 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
case 3:
/* maybe only uses physical not virtual addresses */
return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
+ case 4:
+ return !IS_I965G(engine->i915); /* who knows! */
case 6:
return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
default:
@@ -1193,6 +1251,38 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
+static struct intel_timeline *get_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *tl;
+
+ /*
+ * Even though we are holding the engine->active.lock here, there
+ * is no control over the submission queue per-se and we are
+ * inspecting the active state at a random point in time, with an
+ * unknown queue. Play safe and make sure the timeline remains valid.
+ * (Only being used for pretty printing, one extra kref shouldn't
+ * cause a camel stampede!)
+ */
+ rcu_read_lock();
+ tl = rcu_dereference(rq->timeline);
+ if (!kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+
+ return tl;
+}
+
+static const char *repr_timer(const struct timer_list *t)
+{
+ if (!READ_ONCE(t->expires))
+ return "inactive";
+
+ if (timer_pending(t))
+ return "active";
+
+ return "expired";
+}
+
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
@@ -1254,19 +1344,21 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
unsigned int idx;
u8 read, write;
- drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
- ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
- ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
- num_entries);
+ drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
+ yesno(test_bit(TASKLET_STATE_SCHED,
+ &engine->execlists.tasklet.state)),
+ enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
+ repr_timer(&engine->execlists.preempt),
+ repr_timer(&engine->execlists.timer));
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
- drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
- read, write,
- yesno(test_bit(TASKLET_STATE_SCHED,
- &engine->execlists.tasklet.state)),
- enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
+ drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
+ read, write, num_entries);
+
if (read >= num_entries)
read = 0;
if (write >= num_entries)
@@ -1280,33 +1372,45 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
execlists_active_lock_bh(execlists);
+ rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) {
char hdr[80];
int len;
len = snprintf(hdr, sizeof(hdr),
- "\t\tActive[%d: ",
+ "\t\tActive[%d]: ",
(int)(port - execlists->active));
- if (!i915_request_signaled(rq))
+ if (!i915_request_signaled(rq)) {
+ struct intel_timeline *tl = get_timeline(rq);
+
len += snprintf(hdr + len, sizeof(hdr) - len,
"ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset,
+ tl ? tl->hwsp_offset : 0,
hwsp_seqno(rq));
+
+ if (tl)
+ intel_timeline_put(tl);
+ }
snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
+ struct intel_timeline *tl = get_timeline(rq);
char hdr[80];
snprintf(hdr, sizeof(hdr),
"\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
(int)(port - execlists->pending),
i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset,
+ tl ? tl->hwsp_offset : 0,
hwsp_seqno(rq));
print_request(m, rq, hdr);
+
+ if (tl)
+ intel_timeline_put(tl);
}
+ rcu_read_unlock();
execlists_active_unlock_bh(execlists);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
@@ -1372,8 +1476,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
- drm_printf(m, "\tHangcheck: %d ms ago\n",
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ drm_printf(m, "\tHeartbeat: %d ms ago\n",
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+ rcu_read_unlock();
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
i915_reset_count(error));
@@ -1383,6 +1492,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
+ struct intel_timeline *tl = get_timeline(rq);
+
print_request(m, rq, "\t\tactive ");
drm_printf(m, "\t\tring->start: 0x%08x\n",
@@ -1395,18 +1506,27 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
- drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
- rq->timeline->hwsp_offset);
+
+ if (tl) {
+ drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
+ tl->hwsp_offset);
+ intel_timeline_put(tl);
+ }
print_request_ring(m, rq);
+
+ if (rq->hw_context->lrc_reg_state) {
+ drm_printf(m, "Logical Ring Context:\n");
+ hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
+ }
}
spin_unlock_irqrestore(&engine->active.lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
- wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
new file mode 100644
index 000000000000..06aa14c7aa8c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -0,0 +1,234 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_request.h"
+
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gt.h"
+#include "intel_reset.h"
+
+/*
+ * While the engine is active, we send a periodic pulse along the engine
+ * to check on its health and to flush any idle-barriers. If that request
+ * is stuck, and we fail to preempt it, we declare the engine hung and
+ * issue a reset -- in the hope that restores progress.
+ */
+
+static bool next_heartbeat(struct intel_engine_cs *engine)
+{
+ long delay;
+
+ delay = READ_ONCE(engine->props.heartbeat_interval_ms);
+ if (!delay)
+ return false;
+
+ delay = msecs_to_jiffies_timeout(delay);
+ if (delay >= HZ)
+ delay = round_jiffies_up_relative(delay);
+ schedule_delayed_work(&engine->heartbeat.work, delay);
+
+ return true;
+}
+
+static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+ engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
+ i915_request_add_active_barriers(rq);
+}
+
+static void show_heartbeat(const struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct drm_printer p = drm_debug_printer("heartbeat");
+
+ intel_engine_dump(engine, &p,
+ "%s heartbeat {prio:%d} not ticking\n",
+ engine->name,
+ rq->sched.attr.priority);
+}
+
+static void heartbeat(struct work_struct *wrk)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+ };
+ struct intel_engine_cs *engine =
+ container_of(wrk, typeof(*engine), heartbeat.work.work);
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return;
+
+ rq = engine->heartbeat.systole;
+ if (rq && i915_request_completed(rq)) {
+ i915_request_put(rq);
+ engine->heartbeat.systole = NULL;
+ }
+
+ if (intel_gt_is_wedged(engine->gt))
+ goto out;
+
+ if (engine->heartbeat.systole) {
+ if (engine->schedule &&
+ rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
+ /*
+ * Gradually raise the priority of the heartbeat to
+ * give high priority work [which presumably desires
+ * low latency and no jitter] the chance to naturally
+ * complete before being preempted.
+ */
+ attr.priority = I915_PRIORITY_MASK;
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority = I915_PRIORITY_BARRIER;
+
+ local_bh_disable();
+ engine->schedule(rq, &attr);
+ local_bh_enable();
+ } else {
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ show_heartbeat(rq, engine);
+
+ intel_gt_handle_error(engine->gt, engine->mask,
+ I915_ERROR_CAPTURE,
+ "stopped heartbeat on %s",
+ engine->name);
+ }
+ goto out;
+ }
+
+ if (engine->wakeref_serial == engine->serial)
+ goto out;
+
+ mutex_lock(&ce->timeline->mutex);
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ intel_context_exit(ce);
+ if (IS_ERR(rq))
+ goto unlock;
+
+ idle_pulse(engine, rq);
+ if (i915_modparams.enable_hangcheck)
+ engine->heartbeat.systole = i915_request_get(rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out:
+ if (!next_heartbeat(engine))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ intel_engine_pm_put(engine);
+}
+
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ return;
+
+ next_heartbeat(engine);
+}
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
+{
+ if (cancel_delayed_work(&engine->heartbeat.work))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+}
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
+{
+ INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
+}
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ int err;
+
+ /* Send one last pulse before to cleanup persistent hogs */
+ if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) {
+ err = intel_engine_pulse(engine);
+ if (err)
+ return err;
+ }
+
+ WRITE_ONCE(engine->props.heartbeat_interval_ms, delay);
+
+ if (intel_engine_pm_get_if_awake(engine)) {
+ if (delay)
+ intel_engine_unpark_heartbeat(engine);
+ else
+ intel_engine_park_heartbeat(engine);
+ intel_engine_pm_put(engine);
+ }
+
+ return 0;
+}
+
+int intel_engine_pulse(struct intel_engine_cs *engine)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (!intel_engine_has_preemption(engine))
+ return -ENODEV;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
+ if (mutex_lock_interruptible(&ce->timeline->mutex))
+ goto out_rpm;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ intel_context_exit(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unlock;
+ }
+
+ rq->flags |= I915_REQUEST_SENTINEL;
+ idle_pulse(engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+out_unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+int intel_engine_flush_barriers(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ if (llist_empty(&engine->barrier_tasks))
+ return 0;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ idle_pulse(engine, rq);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_heartbeat.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
new file mode 100644
index 000000000000..a7b8c0f9e005
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_HEARTBEAT_H
+#define INTEL_ENGINE_HEARTBEAT_H
+
+struct intel_engine_cs;
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay);
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
+
+int intel_engine_pulse(struct intel_engine_cs *engine);
+int intel_engine_flush_barriers(struct intel_engine_cs *engine);
+
+#endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 7f647243b3b9..874d82677179 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -7,10 +7,13 @@
#include "i915_drv.h"
#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_ring.h"
static int __engine_unpark(struct intel_wakeref *wf)
{
@@ -33,7 +36,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
if (engine->unpark)
engine->unpark(engine);
- intel_engine_init_hangcheck(engine);
+ intel_engine_unpark_heartbeat(engine);
return 0;
}
@@ -103,14 +106,14 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
/* Context switch failed, hope for the best! Maybe reset? */
goto out_unlock;
- intel_timeline_enter(rq->timeline);
+ intel_timeline_enter(i915_request_timeline(rq));
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
i915_request_add_active_barriers(rq);
/* Install ourselves as a preemption barrier */
- rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_commit(rq);
/* Release our exclusive hold on the engine */
@@ -123,6 +126,19 @@ out_unlock:
return result;
}
+static void call_idle_barriers(struct intel_engine_cs *engine)
+{
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
+ struct dma_fence_cb *cb =
+ container_of((struct list_head *)node,
+ typeof(*cb), node);
+
+ cb->func(NULL, cb);
+ }
+}
+
static int __engine_park(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
@@ -142,6 +158,9 @@ static int __engine_park(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name);
+ call_idle_barriers(engine); /* cleanup after wedging */
+
+ intel_engine_park_heartbeat(engine);
intel_engine_disarm_breadcrumbs(engine);
intel_engine_pool_park(&engine->pool);
@@ -169,9 +188,10 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
+ struct intel_runtime_pm *rpm = engine->uncore->rpm;
intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
+ intel_engine_init_heartbeat(engine);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 379a91780bd4..397186818305 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -61,6 +61,7 @@ static int pool_active(struct i915_active *ref)
return 0;
}
+__i915_active_call
static void pool_retire(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
@@ -94,7 +95,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_PTR(-ENOMEM);
node->pool = pool;
- i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
+ i915_active_init(&node->active, pool_active, pool_retire);
obj = i915_gem_object_create_internal(engine->i915, sz);
if (IS_ERR(obj)) {
@@ -109,9 +110,19 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return node;
}
+static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
+{
+ if (intel_engine_is_virtual(engine))
+ engine = intel_virtual_engine_get_sibling(engine, 0);
+
+ GEM_BUG_ON(!engine);
+ return &engine->pool;
+}
+
struct intel_engine_pool_node *
-intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
+intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
{
+ struct intel_engine_pool *pool = lookup_pool(engine);
struct intel_engine_pool_node *node;
struct list_head *list;
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
index 8d069efd9457..1bd89cadc3b7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -12,13 +12,13 @@
#include "i915_request.h"
struct intel_engine_pool_node *
-intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
+intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
static inline int
intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
struct i915_request *rq)
{
- return i915_active_ref(&node->active, rq->timeline, rq);
+ return i915_active_add_request(&node->active, rq);
}
static inline void
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 9dd8c299cb2d..758f0e8ec672 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -15,6 +15,7 @@
#include <linux/rbtree.h>
#include <linux/timer.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "i915_gem.h"
#include "i915_pmu.h"
@@ -58,6 +59,7 @@ struct i915_gem_context;
struct i915_request;
struct i915_sched_attr;
struct intel_gt;
+struct intel_ring;
struct intel_uncore;
typedef u8 intel_engine_mask_t;
@@ -76,40 +78,6 @@ struct intel_instdone {
u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};
-struct intel_engine_hangcheck {
- u64 acthd;
- u32 last_ring;
- u32 last_head;
- unsigned long action_timestamp;
- struct intel_instdone instdone;
-};
-
-struct intel_ring {
- struct kref ref;
- struct i915_vma *vma;
- void *vaddr;
-
- /*
- * As we have two types of rings, one global to the engine used
- * by ringbuffer submission and those that are exclusive to a
- * context used by execlists, we have to play safe and allow
- * atomic updates to the pin_count. However, the actual pinning
- * of the context is either done during initialisation for
- * ringbuffer submission or serialised as part of the context
- * pinning for execlists, and so we do not need a mutex ourselves
- * to serialise intel_ring_pin/intel_ring_unpin.
- */
- atomic_t pin_count;
-
- u32 head;
- u32 tail;
- u32 emit;
-
- u32 space;
- u32 size;
- u32 effective_size;
-};
-
/*
* we use a single page to load ctx workarounds so all of these
* values are referred in terms of dwords
@@ -148,6 +116,7 @@ enum intel_engine_id {
VECS1,
#define _VECS(n) (VECS0 + (n))
I915_NUM_ENGINES
+#define INVALID_ENGINE ((enum intel_engine_id)-1)
};
struct st_preempt_hang {
@@ -174,6 +143,11 @@ struct intel_engine_execlists {
struct timer_list timer;
/**
+ * @preempt: reset the current context if it fails to give way
+ */
+ struct timer_list preempt;
+
+ /**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
*/
struct i915_priolist default_priolist;
@@ -303,10 +277,12 @@ struct intel_engine_cs {
u8 uabi_class;
u8 uabi_instance;
+ u32 uabi_capabilities;
u32 context_size;
u32 mmio_base;
- u32 uabi_capabilities;
+ unsigned int context_tag;
+#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
struct rb_node uabi_node;
@@ -323,6 +299,11 @@ struct intel_engine_cs {
intel_engine_mask_t saturated; /* submitting semaphores too late? */
+ struct {
+ struct delayed_work work;
+ struct i915_request *systole;
+ } heartbeat;
+
unsigned long serial;
unsigned long wakeref_serial;
@@ -473,14 +454,13 @@ struct intel_engine_cs {
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
- struct intel_engine_hangcheck hangcheck;
-
#define I915_ENGINE_USING_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags;
@@ -539,6 +519,13 @@ struct intel_engine_cs {
*/
ktime_t total;
} stats;
+
+ struct {
+ unsigned long heartbeat_interval_ms;
+ unsigned long preempt_timeout_ms;
+ unsigned long stop_timeout_ms;
+ unsigned long timeslice_duration_ms;
+ } props;
};
static inline bool
@@ -583,20 +570,24 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_IS_VIRTUAL;
}
-#define instdone_slice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
+static inline bool
+intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
+{
+ return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
+}
-#define instdone_subslice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
+#define instdone_has_slice(dev_priv___, sseu___, slice___) \
+ ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
-#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
- for ((slice__) = 0, (subslice__) = 0; \
- (slice__) < I915_MAX_SLICES; \
- (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
- (slice__) += ((subslice__) == 0)) \
- for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
- (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
+ (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
+ intel_sseu_has_subslice(sseu__, 0, subslice__))
+#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
+ for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
+ (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
+ (slice_) += ((subslice_) == 0)) \
+ for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
+ (instdone_has_subslice(dev_priv_, sseu_, slice_, \
+ subslice_)))
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 77cd5de83930..7f7150a733f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -160,10 +160,10 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
};
if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
- return -1;
+ return INVALID_ENGINE;
if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
- return -1;
+ return INVALID_ENGINE;
return map[ring->class].base + ring->instance;
}
@@ -171,23 +171,15 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
static void add_legacy_ring(struct legacy_ring *ring,
struct intel_engine_cs *engine)
{
- int idx;
-
if (engine->gt != ring->gt || engine->class != ring->class) {
ring->gt = engine->gt;
ring->class = engine->class;
ring->instance = 0;
}
- idx = legacy_ring_idx(ring);
- if (unlikely(idx == -1))
- return;
-
- GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine));
- ring->gt->engine[idx] = engine;
- ring->instance++;
-
- engine->legacy_idx = idx;
+ engine->legacy_idx = legacy_ring_idx(ring);
+ if (engine->legacy_idx != INVALID_ENGINE)
+ ring->instance++;
}
void intel_engines_driver_register(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 86e00a2db8a4..4294f146f13c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -112,6 +112,7 @@
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
#define MI_SEMAPHORE_POLL (1 << 15)
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
@@ -119,6 +120,8 @@
#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
+#define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5)
+#define MI_SEMAPHORE_TOKEN_SHIFT 5
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
@@ -132,7 +135,10 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
+/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
+#define MI_LRI_CS_MMIO (1<<19)
#define MI_LRI_FORCE_POSTED (1<<12)
+#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
@@ -147,6 +153,7 @@
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
+#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -156,7 +163,8 @@
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
-#define MI_BATCH_RESOURCE_STREAMER (1<<10)
+#define MI_BATCH_RESOURCE_STREAMER REG_BIT(10)
+#define MI_BATCH_PREDICATE REG_BIT(15) /* HSW+ on RCS only*/
/*
* 3D instructions used by the kernel
@@ -217,6 +225,7 @@
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
+#define PIPE_CONTROL_WRITE_TIMESTAMP (3<<14)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -224,7 +233,9 @@
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_L3_RO_CACHE_INVALIDATE REG_BIT(10) /* gen12 */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
@@ -235,6 +246,29 @@
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1)
+#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2))
+/* Opcodes for MI_MATH_INSTR */
+#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0)
+#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2)
+#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2)
+#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1)
+#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1)
+#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0)
+#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0)
+#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0)
+#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0)
+#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0)
+#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2)
+#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2)
+/* Registers used as operands in MI_MATH_INSTR */
+#define MI_MATH_REG(x) (x)
+#define MI_MATH_REG_SRCA 0x20
+#define MI_MATH_REG_SRCB 0x21
+#define MI_MATH_REG_ACCU 0x31
+#define MI_MATH_REG_ZF 0x32
+#define MI_MATH_REG_CF 0x33
+
/*
* Commands used only by the command parser
*/
@@ -251,7 +285,6 @@
#define MI_CLFLUSH MI_INSTR(0x27, 0)
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d48ec9a76ed1..4c26daf7ee46 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -6,7 +6,12 @@
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_mocs.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
#include "intel_uncore.h"
+#include "intel_pm.h"
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
@@ -18,15 +23,108 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
- intel_gt_init_hangcheck(gt);
intel_gt_init_reset(gt);
+ intel_gt_init_requests(gt);
intel_gt_pm_init_early(gt);
+
+ intel_rps_init_early(&gt->rps);
intel_uc_init_early(&gt->uc);
}
-void intel_gt_init_hw(struct drm_i915_private *i915)
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
+{
+ gt->ggtt = ggtt;
+
+ intel_gt_sanitize(gt, false);
+}
+
+static void init_unused_ring(struct intel_gt *gt, u32 base)
{
- i915->gt.ggtt = &i915->ggtt;
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_write(uncore, RING_CTL(base), 0);
+ intel_uncore_write(uncore, RING_HEAD(base), 0);
+ intel_uncore_write(uncore, RING_TAIL(base), 0);
+ intel_uncore_write(uncore, RING_START(base), 0);
+}
+
+static void init_unused_rings(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_I830(i915)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ init_unused_ring(gt, SRB2_BASE);
+ init_unused_ring(gt, SRB3_BASE);
+ } else if (IS_GEN(i915, 2)) {
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ } else if (IS_GEN(i915, 3)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, PRB2_BASE);
+ }
+}
+
+int intel_gt_init_hw(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ BUG_ON(!i915->kernel_context);
+ ret = intel_gt_terminally_wedged(gt);
+ if (ret)
+ return ret;
+
+ gt->last_init_time = ktime_get();
+
+ /* Double layer security blanket, see i915_gem_init() */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+ intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
+
+ if (IS_HASWELL(i915))
+ intel_uncore_write(uncore,
+ MI_PREDICATE_RESULT_2,
+ IS_HSW_GT3(i915) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(gt);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(gt, "init");
+
+ intel_gt_init_swizzling(gt);
+
+ /*
+ * At least 830 can leave some of the unused rings
+ * "active" (ie. head != tail) after resume which
+ * will prevent c3 entry. Makes sure all unused rings
+ * are totally idle.
+ */
+ init_unused_rings(gt);
+
+ ret = i915_ppgtt_init_hw(gt);
+ if (ret) {
+ DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
+ goto out;
+ }
+
+ /* We can't enable contexts until all firmware is loaded */
+ ret = intel_uc_init_hw(&gt->uc);
+ if (ret) {
+ i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
+ goto out;
+ }
+
+ intel_mocs_init(gt);
+
+out:
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ return ret;
}
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
@@ -89,7 +187,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine_masked(engine, i915, engine_mask, id)
+ for_each_engine_masked(engine, gt, engine_mask, id)
gen8_clear_engine_error_register(engine);
}
}
@@ -100,7 +198,7 @@ static void gen6_check_faults(struct intel_gt *gt)
enum intel_engine_id id;
u32 fault;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
@@ -176,7 +274,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
/*
@@ -200,18 +298,18 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
wmb();
- if (INTEL_INFO(i915)->has_coherent_ggtt)
+ if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
return;
intel_gt_chipset_flush(gt);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- struct intel_uncore *uncore = gt->uncore;
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ unsigned long flags;
- spin_lock_irq(&uncore->lock);
+ spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
RING_HEAD(RENDER_RING_BASE));
- spin_unlock_irq(&uncore->lock);
+ spin_unlock_irqrestore(&uncore->lock, flags);
}
}
@@ -222,7 +320,12 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
intel_gtt_chipset_flush();
}
-int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+void intel_gt_driver_register(struct intel_gt *gt)
+{
+ intel_rps_driver_register(&gt->rps);
+}
+
+static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
@@ -230,7 +333,7 @@ int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
int ret;
obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
@@ -256,11 +359,40 @@ err_unref:
return ret;
}
-void intel_gt_fini_scratch(struct intel_gt *gt)
+static void intel_gt_fini_scratch(struct intel_gt *gt)
{
i915_vma_unpin_and_release(&gt->scratch, 0);
}
+int intel_gt_init(struct intel_gt *gt)
+{
+ int err;
+
+ err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+ if (err)
+ return err;
+
+ intel_gt_pm_init(gt);
+
+ return 0;
+}
+
+void intel_gt_driver_remove(struct intel_gt *gt)
+{
+ GEM_BUG_ON(gt->awake);
+}
+
+void intel_gt_driver_unregister(struct intel_gt *gt)
+{
+ intel_rps_driver_unregister(&gt->rps);
+}
+
+void intel_gt_driver_release(struct intel_gt *gt)
+{
+ intel_gt_pm_fini(gt);
+ intel_gt_fini_scratch(gt);
+}
+
void intel_gt_driver_late_release(struct intel_gt *gt)
{
intel_uc_driver_late_release(&gt->uc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 4920cb351f10..5436f8c30708 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -28,7 +28,14 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
}
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
-void intel_gt_init_hw(struct drm_i915_private *i915);
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int __must_check intel_gt_init_hw(struct intel_gt *gt);
+int intel_gt_init(struct intel_gt *gt);
+void intel_gt_driver_register(struct intel_gt *gt);
+
+void intel_gt_driver_unregister(struct intel_gt *gt);
+void intel_gt_driver_remove(struct intel_gt *gt);
+void intel_gt_driver_release(struct intel_gt *gt);
void intel_gt_driver_late_release(struct intel_gt *gt);
@@ -39,11 +46,6 @@ void intel_gt_clear_error_registers(struct intel_gt *gt,
void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
void intel_gt_chipset_flush(struct intel_gt *gt);
-void intel_gt_init_hangcheck(struct intel_gt *gt);
-
-int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
-void intel_gt_fini_scratch(struct intel_gt *gt);
-
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
enum intel_gt_scratch_field field)
{
@@ -55,6 +57,4 @@ static inline bool intel_gt_is_wedged(struct intel_gt *gt)
return __intel_reset_failed(&gt->reset);
}
-void intel_gt_queue_hangcheck(struct intel_gt *gt);
-
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 34a4fb624bf7..973ee7eded64 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -11,6 +11,7 @@
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_uncore.h"
+#include "intel_rps.h"
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
@@ -77,7 +78,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
return guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
- return gen11_rps_irq_handler(gt, iir);
+ return gen11_rps_irq_handler(&gt->rps, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
@@ -336,7 +337,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gen6_rps_irq_handler(gt->i915, gt_iir[2]);
+ gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index fac75afed35b..6187cdd06646 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -4,17 +4,38 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/suspend.h>
+
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_params.h"
+#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_llc.h"
#include "intel_pm.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
#include "intel_wakeref.h"
-static void pm_notify(struct drm_i915_private *i915, int state)
+static void user_forcewake(struct intel_gt *gt, bool suspend)
{
- blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
+ int count = atomic_read(&gt->user_wakeref);
+
+ /* Inside suspend/resume so single threaded, no races to worry about. */
+ if (likely(!count))
+ return;
+
+ intel_gt_pm_get(gt);
+ if (suspend) {
+ GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
+ atomic_sub(count, &gt->wakeref.count);
+ } else {
+ atomic_add(count, &gt->wakeref.count);
+ }
+ intel_gt_pm_put(gt);
}
static int __gt_unpark(struct intel_wakeref *wf)
@@ -24,6 +45,8 @@ static int __gt_unpark(struct intel_wakeref *wf)
GEM_TRACE("\n");
+ i915_globals_unpark();
+
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@@ -41,46 +64,41 @@ static int __gt_unpark(struct intel_wakeref *wf)
if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- intel_enable_gt_powersave(i915);
-
- i915_update_gfx_val(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_busy(i915);
-
+ intel_rps_unpark(&gt->rps);
i915_pmu_gt_unparked(i915);
- intel_gt_queue_hangcheck(gt);
-
- pm_notify(i915, INTEL_GT_UNPARK);
+ intel_gt_unpark_requests(gt);
return 0;
}
static int __gt_park(struct intel_wakeref *wf)
{
- struct drm_i915_private *i915 =
- container_of(wf, typeof(*i915), gt.wakeref);
- intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
+ struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+ intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
+ struct drm_i915_private *i915 = gt->i915;
GEM_TRACE("\n");
- pm_notify(i915, INTEL_GT_PARK);
+ intel_gt_park_requests(gt);
+ i915_vma_parked(gt);
i915_pmu_gt_parked(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_idle(i915);
+ intel_rps_park(&gt->rps);
+
+ /* Everything switched off, flush any residual interrupt just in case */
+ intel_synchronize_irq(i915);
if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
- i915_rc6_ctx_wa_check(i915);
+ intel_rc6_ctx_wa_check(&i915->gt.rc6);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
}
- /* Everything switched off, flush any residual interrupt just in case */
- intel_synchronize_irq(i915);
-
GEM_BUG_ON(!wakeref);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+ i915_globals_park();
+
return 0;
}
@@ -92,9 +110,18 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
+ intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+}
- BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
+void intel_gt_pm_init(struct intel_gt *gt)
+{
+ /*
+ * Enabling power-management should be "self-healing". If we cannot
+ * enable a feature, simply leave it disabled with a notice to the
+ * user.
+ */
+ intel_rc6_init(&gt->rc6);
+ intel_rps_init(&gt->rps);
}
static bool reset_engines(struct intel_gt *gt)
@@ -119,16 +146,47 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
- GEM_TRACE("\n");
+ GEM_TRACE("force:%s\n", yesno(force));
+
+ /* Use a raw wakeref to avoid calling intel_display_power_get early */
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (intel_gt_is_wedged(gt))
+ intel_gt_unset_wedged(gt);
intel_uc_sanitize(&gt->uc);
- if (!reset_engines(gt) && !force)
- return;
+ for_each_engine(engine, gt, id)
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
- for_each_engine(engine, gt->i915, id)
- __intel_engine_reset(engine, false);
+ intel_uc_reset_prepare(&gt->uc);
+
+ if (reset_engines(gt) || force) {
+ for_each_engine(engine, gt, id)
+ __intel_engine_reset(engine, false);
+ }
+
+ for_each_engine(engine, gt, id)
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
+
+void intel_gt_pm_fini(struct intel_gt *gt)
+{
+ intel_rc6_fini(&gt->rc6);
}
int intel_gt_resume(struct intel_gt *gt)
@@ -137,6 +195,8 @@ int intel_gt_resume(struct intel_gt *gt)
enum intel_engine_id id;
int err = 0;
+ GEM_TRACE("\n");
+
/*
* After resume, we may need to poke into the pinned kernel
* contexts to paper over any damage caused by the sudden suspend.
@@ -144,14 +204,23 @@ int intel_gt_resume(struct intel_gt *gt)
* allowing us to fixup the user contexts on their first pin.
*/
intel_gt_pm_get(gt);
- for_each_engine(engine, gt->i915, id) {
+
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ intel_rc6_sanitize(&gt->rc6);
+
+ intel_rps_enable(&gt->rps);
+ intel_llc_enable(&gt->llc);
+
+ for_each_engine(engine, gt, id) {
struct intel_context *ce;
intel_engine_pm_get(engine);
ce = engine->kernel_context;
- if (ce)
+ if (ce) {
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
ce->ops->reset(ce);
+ }
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
@@ -164,19 +233,99 @@ int intel_gt_resume(struct intel_gt *gt)
break;
}
}
+
+ intel_rc6_enable(&gt->rc6);
+
+ intel_uc_resume(&gt->uc);
+
+ user_forcewake(gt, false);
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
return err;
}
+static void wait_for_suspend(struct intel_gt *gt)
+{
+ if (!intel_gt_pm_is_awake(gt))
+ return;
+
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ /*
+ * Forcibly cancel outstanding work and leave
+ * the gpu quiet.
+ */
+ intel_gt_set_wedged(gt);
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+}
+
+void intel_gt_suspend_prepare(struct intel_gt *gt)
+{
+ user_forcewake(gt, true);
+ wait_for_suspend(gt);
+
+ intel_uc_suspend(&gt->uc);
+}
+
+static suspend_state_t pm_suspend_target(void)
+{
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+ return pm_suspend_target_state;
+#else
+ return PM_SUSPEND_TO_IDLE;
+#endif
+}
+
+void intel_gt_suspend_late(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+
+ /* We expect to be idle already; but also want to be independent */
+ wait_for_suspend(gt);
+
+ /*
+ * On disabling the device, we want to turn off HW access to memory
+ * that we no longer own.
+ *
+ * However, not all suspend-states disable the device. S0 (s2idle)
+ * is effectively runtime-suspend, the device is left powered on
+ * but needs to be put into a low power state. We need to keep
+ * powermanagement enabled, but we also retain system state and so
+ * it remains safe to keep on using our allocated memory.
+ */
+ if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
+ return;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ intel_rps_disable(&gt->rps);
+ intel_rc6_disable(&gt->rc6);
+ intel_llc_disable(&gt->llc);
+ }
+
+ intel_gt_sanitize(gt, false);
+
+ GEM_TRACE("\n");
+}
+
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_uc_runtime_suspend(&gt->uc);
+
+ GEM_TRACE("\n");
}
int intel_gt_runtime_resume(struct intel_gt *gt)
{
+ GEM_TRACE("\n");
+
intel_gt_init_swizzling(gt);
return intel_uc_runtime_resume(&gt->uc);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_gt_pm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index fb39d99cd6ee..b3e17399be9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -12,11 +12,6 @@
#include "intel_gt_types.h"
#include "intel_wakeref.h"
-enum {
- INTEL_GT_UNPARK,
- INTEL_GT_PARK,
-};
-
static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
{
return intel_wakeref_is_active(&gt->wakeref);
@@ -43,10 +38,21 @@ static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
}
void intel_gt_pm_init_early(struct intel_gt *gt);
+void intel_gt_pm_init(struct intel_gt *gt);
+void intel_gt_pm_fini(struct intel_gt *gt);
void intel_gt_sanitize(struct intel_gt *gt, bool force);
+
+void intel_gt_suspend_prepare(struct intel_gt *gt);
+void intel_gt_suspend_late(struct intel_gt *gt);
int intel_gt_resume(struct intel_gt *gt);
+
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
+static inline bool is_mock_gt(const struct intel_gt *gt)
+{
+ return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
+}
+
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
new file mode 100644
index 000000000000..353809ac2754
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -0,0 +1,137 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h" /* for_each_engine() */
+#include "i915_request.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_timeline.h"
+
+static void retire_requests(struct intel_timeline *tl)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (!i915_request_retire(rq))
+ break;
+}
+
+static void flush_submission(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_engine_flush_submission(engine);
+}
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ unsigned long active_count = 0;
+ unsigned long flags;
+ bool interruptible;
+ LIST_HEAD(free);
+
+ interruptible = true;
+ if (unlikely(timeout < 0))
+ timeout = -timeout, interruptible = false;
+
+ flush_submission(gt); /* kick the ksoftirqd tasklets */
+
+ spin_lock_irqsave(&timelines->lock, flags);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++; /* report busy to caller, try again? */
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!tl->active_count);
+ tl->active_count++; /* pin the list element */
+ spin_unlock_irqrestore(&timelines->lock, flags);
+
+ if (timeout > 0) {
+ struct dma_fence *fence;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ timeout = dma_fence_wait_timeout(fence,
+ interruptible,
+ timeout);
+ dma_fence_put(fence);
+ }
+ }
+
+ retire_requests(tl);
+
+ spin_lock_irqsave(&timelines->lock, flags);
+
+ /* Resume iteration after dropping lock */
+ list_safe_reset_next(tl, tn, link);
+ if (!--tl->active_count)
+ list_del(&tl->link);
+ else
+ active_count += !!rcu_access_pointer(tl->last_request.fence);
+
+ mutex_unlock(&tl->mutex);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(tl->active_count);
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock_irqrestore(&timelines->lock, flags);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+
+ return active_count ? timeout : 0;
+}
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
+{
+ /* If the device is asleep, we have no requests outstanding */
+ if (!intel_gt_pm_is_awake(gt))
+ return 0;
+
+ while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return timeout;
+}
+
+static void retire_work_handler(struct work_struct *work)
+{
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), requests.retire_work.work);
+
+ intel_gt_retire_requests(gt);
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+void intel_gt_init_requests(struct intel_gt *gt)
+{
+ INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
+}
+
+void intel_gt_park_requests(struct intel_gt *gt)
+{
+ cancel_delayed_work(&gt->requests.retire_work);
+}
+
+void intel_gt_unpark_requests(struct intel_gt *gt)
+{
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
new file mode 100644
index 000000000000..bd31cbce47e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_REQUESTS_H
+#define INTEL_GT_REQUESTS_H
+
+struct intel_gt;
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
+static inline void intel_gt_retire_requests(struct intel_gt *gt)
+{
+ intel_gt_retire_requests_timeout(gt, 0);
+}
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
+
+void intel_gt_init_requests(struct intel_gt *gt);
+void intel_gt_park_requests(struct intel_gt *gt);
+void intel_gt_unpark_requests(struct intel_gt *gt);
+
+#endif /* INTEL_GT_REQUESTS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index dc295c196d11..d4e14dbd172e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -17,7 +17,10 @@
#include "i915_vma.h"
#include "intel_engine_types.h"
+#include "intel_llc_types.h"
#include "intel_reset_types.h"
+#include "intel_rc6_types.h"
+#include "intel_rps_types.h"
#include "intel_wakeref.h"
struct drm_i915_private;
@@ -25,14 +28,6 @@ struct i915_ggtt;
struct intel_engine_cs;
struct intel_uncore;
-struct intel_hangcheck {
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
-
- struct delayed_work work;
-};
-
struct intel_gt {
struct drm_i915_private *i915;
struct intel_uncore *uncore;
@@ -49,12 +44,23 @@ struct intel_gt {
struct list_head hwsp_free_list;
} timelines;
+ struct intel_gt_requests {
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+ } requests;
+
struct intel_wakeref wakeref;
+ atomic_t user_wakeref;
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
- struct intel_hangcheck hangcheck;
struct intel_reset reset;
/**
@@ -66,7 +72,9 @@ struct intel_gt {
*/
intel_wakeref_t awake;
- struct blocking_notifier_head pm_notifications;
+ struct intel_llc llc;
+ struct intel_rc6 rc6;
+ struct intel_rps rps;
ktime_t last_init_time;
@@ -89,14 +97,16 @@ enum intel_gt_scratch_field {
INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
/* 8 bytes */
- INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128,
-
- /* 8 bytes */
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
/* 8 bytes */
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
+ /* 6 * 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
+
+ /* 4 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
};
#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
deleted file mode 100644
index 05d042cdefe2..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_drv.h"
-#include "intel_engine.h"
-#include "intel_gt.h"
-#include "intel_reset.h"
-
-struct hangcheck {
- u64 acthd;
- u32 ring;
- u32 head;
- enum intel_engine_hangcheck_action action;
- unsigned long action_timestamp;
- int deadlock;
- struct intel_instdone instdone;
- bool wedged:1;
- bool stalled:1;
-};
-
-static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
-{
- u32 tmp = current_instdone | *old_instdone;
- bool unchanged;
-
- unchanged = tmp == *old_instdone;
- *old_instdone |= tmp;
-
- return unchanged;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_instdone instdone;
- struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
- bool stuck;
- int slice;
- int subslice;
-
- intel_engine_get_instdone(engine, &instdone);
-
- /* There might be unstable subunit states even when
- * actual head is not moving. Filter out the unstable ones by
- * accumulating the undone -> done transitions and only
- * consider those as progress.
- */
- stuck = instdone_unchanged(instdone.instdone,
- &accu_instdone->instdone);
- stuck &= instdone_unchanged(instdone.slice_common,
- &accu_instdone->slice_common);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
- stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
- &accu_instdone->sampler[slice][subslice]);
- stuck &= instdone_unchanged(instdone.row[slice][subslice],
- &accu_instdone->row[slice][subslice]);
- }
-
- return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- if (acthd != engine->hangcheck.acthd) {
-
- /* Clear subunit states on head movement */
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- return ENGINE_ACTIVE_HEAD;
- }
-
- if (!subunits_stuck(engine))
- return ENGINE_ACTIVE_SUBUNITS;
-
- return ENGINE_DEAD;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- enum intel_engine_hangcheck_action ha;
- u32 tmp;
-
- ha = head_stuck(engine, acthd);
- if (ha != ENGINE_DEAD)
- return ha;
-
- if (IS_GEN(engine->i915, 2))
- return ENGINE_DEAD;
-
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- tmp = ENGINE_READ(engine, RING_CTL);
- if (tmp & RING_WAIT) {
- intel_gt_handle_error(engine->gt, engine->mask, 0,
- "stuck wait on %s", engine->name);
- ENGINE_WRITE(engine, RING_CTL, tmp);
- return ENGINE_WAIT_KICK;
- }
-
- return ENGINE_DEAD;
-}
-
-static void hangcheck_load_sample(struct intel_engine_cs *engine,
- struct hangcheck *hc)
-{
- hc->acthd = intel_engine_get_active_head(engine);
- hc->ring = ENGINE_READ(engine, RING_START);
- hc->head = ENGINE_READ(engine, RING_HEAD);
-}
-
-static void hangcheck_store_sample(struct intel_engine_cs *engine,
- const struct hangcheck *hc)
-{
- engine->hangcheck.acthd = hc->acthd;
- engine->hangcheck.last_ring = hc->ring;
- engine->hangcheck.last_head = hc->head;
-}
-
-static enum intel_engine_hangcheck_action
-hangcheck_get_action(struct intel_engine_cs *engine,
- const struct hangcheck *hc)
-{
- if (intel_engine_is_idle(engine))
- return ENGINE_IDLE;
-
- if (engine->hangcheck.last_ring != hc->ring)
- return ENGINE_ACTIVE_SEQNO;
-
- if (engine->hangcheck.last_head != hc->head)
- return ENGINE_ACTIVE_SEQNO;
-
- return engine_stuck(engine, hc->acthd);
-}
-
-static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
- struct hangcheck *hc)
-{
- unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
-
- hc->action = hangcheck_get_action(engine, hc);
-
- /* We always increment the progress
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, make it as a progress as the seqno
- * advancement might ensure and if not, it
- * will catch the hanging engine.
- */
-
- switch (hc->action) {
- case ENGINE_IDLE:
- case ENGINE_ACTIVE_SEQNO:
- /* Clear head and subunit states on seqno movement */
- hc->acthd = 0;
-
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- /* Intentional fall through */
- case ENGINE_WAIT_KICK:
- case ENGINE_WAIT:
- engine->hangcheck.action_timestamp = jiffies;
- break;
-
- case ENGINE_ACTIVE_HEAD:
- case ENGINE_ACTIVE_SUBUNITS:
- /*
- * Seqno stuck with still active engine gets leeway,
- * in hopes that it is just a long shader.
- */
- timeout = I915_SEQNO_DEAD_TIMEOUT;
- break;
-
- case ENGINE_DEAD:
- break;
-
- default:
- MISSING_CASE(hc->action);
- }
-
- hc->stalled = time_after(jiffies,
- engine->hangcheck.action_timestamp + timeout);
- hc->wedged = time_after(jiffies,
- engine->hangcheck.action_timestamp +
- I915_ENGINE_WEDGED_TIMEOUT);
-}
-
-static void hangcheck_declare_hang(struct intel_gt *gt,
- intel_engine_mask_t hung,
- intel_engine_mask_t stuck)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
- char msg[80];
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "no progress" : "hang");
- for_each_engine_masked(engine, gt->i915, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg);
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void hangcheck_elapsed(struct work_struct *work)
-{
- struct intel_gt *gt =
- container_of(work, typeof(*gt), hangcheck.work.work);
- intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
-
- if (!i915_modparams.enable_hangcheck)
- return;
-
- if (!READ_ONCE(gt->awake))
- return;
-
- if (intel_gt_is_wedged(gt))
- return;
-
- wakeref = intel_runtime_pm_get_if_in_use(&gt->i915->runtime_pm);
- if (!wakeref)
- return;
-
- /* As enabling the GPU requires fairly extensive mmio access,
- * periodically arm the mmio checker to see if we are triggering
- * any invalid access.
- */
- intel_uncore_arm_unclaimed_mmio_detection(gt->uncore);
-
- for_each_engine(engine, gt->i915, id) {
- struct hangcheck hc;
-
- intel_engine_signal_breadcrumbs(engine);
-
- hangcheck_load_sample(engine, &hc);
- hangcheck_accumulate_sample(engine, &hc);
- hangcheck_store_sample(engine, &hc);
-
- if (hc.stalled) {
- hung |= engine->mask;
- if (hc.action != ENGINE_DEAD)
- stuck |= engine->mask;
- }
-
- if (hc.wedged)
- wedged |= engine->mask;
- }
-
- if (GEM_SHOW_DEBUG() && (hung | stuck)) {
- struct drm_printer p = drm_debug_printer("hangcheck");
-
- for_each_engine(engine, gt->i915, id) {
- if (intel_engine_is_idle(engine))
- continue;
-
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
- }
-
- if (wedged) {
- dev_err(gt->i915->drm.dev,
- "GPU recovery timed out,"
- " cancelling all in-flight rendering.\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- }
-
- if (hung)
- hangcheck_declare_hang(gt, hung, stuck);
-
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
-
- /* Reset timer in case GPU hangs without another request being added */
- intel_gt_queue_hangcheck(gt);
-}
-
-void intel_gt_queue_hangcheck(struct intel_gt *gt)
-{
- unsigned long delay;
-
- if (unlikely(!i915_modparams.enable_hangcheck))
- return;
-
- /*
- * Don't continually defer the hangcheck so that it is always run at
- * least once after work has been scheduled on any ring. Otherwise,
- * we will ignore a hung ring if a second ring is kept busy.
- */
-
- delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
- queue_delayed_work(system_long_wq, &gt->hangcheck.work, delay);
-}
-
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
- memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
- engine->hangcheck.action_timestamp = jiffies;
-}
-
-void intel_gt_init_hangcheck(struct intel_gt *gt)
-{
- INIT_DELAYED_WORK(&gt->hangcheck.work, hangcheck_elapsed);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_hangcheck.c"
-#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
new file mode 100644
index 000000000000..ceb785b75c25
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -0,0 +1,161 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/cpufreq.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_sideband.h"
+
+struct ia_constants {
+ unsigned int min_gpu_freq;
+ unsigned int max_gpu_freq;
+
+ unsigned int min_ring_freq;
+ unsigned int max_ia_freq;
+};
+
+static struct intel_gt *llc_to_gt(struct intel_llc *llc)
+{
+ return container_of(llc, struct intel_gt, llc);
+}
+
+static unsigned int cpu_max_MHz(void)
+{
+ struct cpufreq_policy *policy;
+ unsigned int max_khz;
+
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ max_khz = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ } else {
+ /*
+ * Default to measured freq if none found, PCU will ensure we
+ * don't go over
+ */
+ max_khz = tsc_khz;
+ }
+
+ return max_khz / 1000;
+}
+
+static bool get_ia_constants(struct intel_llc *llc,
+ struct ia_constants *consts)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ if (rps->max_freq <= rps->min_freq)
+ return false;
+
+ consts->max_ia_freq = cpu_max_MHz();
+
+ consts->min_ring_freq =
+ intel_uncore_read(llc_to_gt(llc)->uncore, DCLK) & 0xf;
+ /* convert DDR frequency from units of 266.6MHz to bandwidth */
+ consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
+
+ consts->min_gpu_freq = rps->min_freq;
+ consts->max_gpu_freq = rps->max_freq;
+ if (INTEL_GEN(i915) >= 9) {
+ /* Convert GT frequency to 50 HZ units */
+ consts->min_gpu_freq /= GEN9_FREQ_SCALER;
+ consts->max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ return true;
+}
+
+static void calc_ia_freq(struct intel_llc *llc,
+ unsigned int gpu_freq,
+ const struct ia_constants *consts,
+ unsigned int *out_ia_freq,
+ unsigned int *out_ring_freq)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ const int diff = consts->max_gpu_freq - gpu_freq;
+ unsigned int ia_freq = 0, ring_freq = 0;
+
+ if (INTEL_GEN(i915) >= 9) {
+ /*
+ * ring_freq = 2 * GT. ring_freq is in 100MHz units
+ * No floor required for ring frequency on SKL.
+ */
+ ring_freq = gpu_freq;
+ } else if (INTEL_GEN(i915) >= 8) {
+ /* max(2 * GT, DDR). NB: GT is 50MHz units */
+ ring_freq = max(consts->min_ring_freq, gpu_freq);
+ } else if (IS_HASWELL(i915)) {
+ ring_freq = mult_frac(gpu_freq, 5, 4);
+ ring_freq = max(consts->min_ring_freq, ring_freq);
+ /* leave ia_freq as the default, chosen by cpufreq */
+ } else {
+ const int min_freq = 15;
+ const int scale = 180;
+
+ /*
+ * On older processors, there is no separate ring
+ * clock domain, so in order to boost the bandwidth
+ * of the ring, we need to upclock the CPU (ia_freq).
+ *
+ * For GPU frequencies less than 750MHz,
+ * just use the lowest ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = consts->max_ia_freq - diff * scale / 2;
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ }
+
+ *out_ia_freq = ia_freq;
+ *out_ring_freq = ring_freq;
+}
+
+static void gen6_update_ring_freq(struct intel_llc *llc)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct ia_constants consts;
+ unsigned int gpu_freq;
+
+ if (!get_ia_constants(llc, &consts))
+ return;
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = consts.max_gpu_freq;
+ gpu_freq >= consts.min_gpu_freq;
+ gpu_freq--) {
+ unsigned int ia_freq, ring_freq;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+ sandybridge_pcode_write(i915,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+ ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+ gpu_freq);
+ }
+}
+
+void intel_llc_enable(struct intel_llc *llc)
+{
+ if (HAS_LLC(llc_to_gt(llc)->i915))
+ gen6_update_ring_freq(llc);
+}
+
+void intel_llc_disable(struct intel_llc *llc)
+{
+ /* Currently there is no HW configuration to be done to disable. */
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_llc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h
new file mode 100644
index 000000000000..ef09a890d2b7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_H
+#define INTEL_LLC_H
+
+struct intel_llc;
+
+void intel_llc_enable(struct intel_llc *llc);
+void intel_llc_disable(struct intel_llc *llc);
+
+#endif /* INTEL_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h
new file mode 100644
index 000000000000..ecad4687b930
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_TYPES_H
+#define INTEL_LLC_TYPES_H
+
+struct intel_llc {
+};
+
+#endif /* INTEL_LLC_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 06a506c29463..0ac3b26674ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -145,6 +145,7 @@
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
@@ -230,17 +231,42 @@ static int __execlists_context_alloc(struct intel_context *ce,
struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
- struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_ring *ring);
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring,
+ bool close);
+static void
+__execlists_update_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
static void mark_eio(struct i915_request *rq)
{
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
+ if (i915_request_completed(rq))
+ return;
+
+ GEM_BUG_ON(i915_request_signaled(rq));
+
+ dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+ struct i915_request *active = rq;
+
+ rcu_read_lock();
+ list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ active = rq;
+ }
+ rcu_read_unlock();
+
+ return active;
+}
+
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
@@ -337,10 +363,15 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* However, the priority hint is a mere hint that we may need to
* preempt. If that hint is stale or we may be trying to preempt
* ourselves, ignore the request.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
*/
- last_prio = effective_prio(rq);
- if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
- last_prio))
+ last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+ if (engine->execlists.queue_priority_hint <= last_prio)
return false;
/*
@@ -429,12 +460,8 @@ assert_priority_queue(const struct i915_request *prev,
static u64
lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct i915_gem_context *ctx = ce->gem_context;
u64 desc;
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
-
desc = INTEL_LEGACY_32B_CONTEXT;
if (i915_vm_is_4lvl(ce->vm))
desc = INTEL_LEGACY_64B_CONTEXT;
@@ -444,33 +471,379 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
if (IS_GEN(engine->i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
- desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
- /* bits 12-31 */
+ desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */
/*
* The following 32bits are copied into the OA reports (dword 2).
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
* anything below.
*/
if (INTEL_GEN(engine->i915) >= 11) {
- GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
- desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
- /* bits 37-47 */
-
desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
/* bits 48-53 */
- /* TODO: decide what to do with SW counter (bits 55-60) */
-
desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
/* bits 61-63 */
- } else {
- GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
- desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
}
return desc;
}
+static u32 *set_offsets(u32 *regs,
+ const u8 *data,
+ const struct intel_engine_cs *engine)
+#define NOP(x) (BIT(7) | (x))
+#define LRI(count, flags) ((flags) << 6 | (count))
+#define POSTED BIT(0)
+#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
+#define REG16(x) \
+ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
+ (((x) >> 2) & 0x7f)
+#define END() 0
+{
+ const u32 base = engine->mmio_base;
+
+ while (*data) {
+ u8 count, flags;
+
+ if (*data & BIT(7)) { /* skip */
+ regs += *data++ & ~BIT(7);
+ continue;
+ }
+
+ count = *data & 0x3f;
+ flags = *data >> 6;
+ data++;
+
+ *regs = MI_LOAD_REGISTER_IMM(count);
+ if (flags & POSTED)
+ *regs |= MI_LRI_FORCE_POSTED;
+ if (INTEL_GEN(engine->i915) >= 11)
+ *regs |= MI_LRI_CS_MMIO;
+ regs++;
+
+ GEM_BUG_ON(!count);
+ do {
+ u32 offset = 0;
+ u8 v;
+
+ do {
+ v = *data++;
+ offset <<= 7;
+ offset |= v & ~BIT(7);
+ } while (v & BIT(7));
+
+ *regs = base + (offset << 2);
+ regs += 2;
+ } while (--count);
+ }
+
+ return regs;
+}
+
+static const u8 gen8_xcs_offsets[] = {
+ NOP(1),
+ LRI(11, 0),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+
+ NOP(9),
+ LRI(9, 0),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(2, 0),
+ REG16(0x200),
+ REG(0x028),
+
+ END(),
+};
+
+static const u8 gen9_xcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, POSTED),
+ REG16(0x200),
+
+ NOP(13),
+ LRI(44, POSTED),
+ REG(0x028),
+ REG(0x09c),
+ REG(0x0c0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x068),
+
+ END(),
+};
+
+static const u8 gen12_xcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END(),
+};
+
+static const u8 gen8_rcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(),
+};
+
+static const u8 gen11_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(1, POSTED),
+ REG(0x1b0),
+
+ NOP(10),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(),
+};
+
+static const u8 gen12_rcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(),
+};
+
+#undef END
+#undef REG16
+#undef REG
+#undef LRI
+#undef NOP
+
+static const u8 *reg_offsets(const struct intel_engine_cs *engine)
+{
+ /*
+ * The gen12+ lists only have the registers we program in the basic
+ * default state. We rely on the context image using relative
+ * addressing to automatic fixup the register state between the
+ * physical engines for virtual engine.
+ */
+ GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
+ !intel_engine_has_relative_mmio(engine));
+
+ if (engine->class == RENDER_CLASS) {
+ if (INTEL_GEN(engine->i915) >= 12)
+ return gen12_rcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return gen11_rcs_offsets;
+ else
+ return gen8_rcs_offsets;
+ } else {
+ if (INTEL_GEN(engine->i915) >= 12)
+ return gen12_xcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return gen9_xcs_offsets;
+ else
+ return gen8_xcs_offsets;
+ }
+}
+
static void unwind_wa_tail(struct i915_request *rq)
{
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -489,7 +862,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe_reverse(rq, rn,
&engine->active.requests,
sched.link) {
- struct intel_engine_cs *owner;
if (i915_request_completed(rq))
continue; /* XXX */
@@ -504,8 +876,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* engine so that it can be moved across onto another physical
* engine as load dictates.
*/
- owner = rq->hw_context->engine;
- if (likely(owner == engine)) {
+ if (likely(rq->execution_mask == engine->mask)) {
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
@@ -516,6 +887,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl);
active = rq;
} else {
+ struct intel_engine_cs *owner = rq->hw_context->engine;
+
/*
* Decouple the virtual breadcrumb before moving it
* back to the virtual engine -- we don't want the
@@ -525,7 +898,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
*/
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags)) {
- spin_lock(&rq->lock);
+ spin_lock_nested(&rq->lock,
+ SINGLE_DEPTH_NESTING);
i915_request_cancel_breadcrumb(rq);
spin_unlock(&rq->lock);
}
@@ -561,6 +935,114 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
+static void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ write_seqlock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ if (engine->stats.active++ == 0)
+ engine->stats.start = ktime_get();
+ GEM_BUG_ON(engine->stats.active == 0);
+ }
+
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ write_seqlock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ ktime_t last;
+
+ if (engine->stats.active && --engine->stats.active == 0) {
+ /*
+ * Decrement the active context count and in case GPU
+ * is now idle add up to the running total.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.start);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ } else if (engine->stats.active == 0) {
+ /*
+ * After turning on engine stats, context out might be
+ * the first event in which case we account from the
+ * time stats gathering was turned on.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ }
+ }
+
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static void restore_default_state(struct intel_context *ce,
+ struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+
+ if (engine->pinned_default_state)
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+
+ execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->hw_context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
+ __func__, engine->name, rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (i915_request_completed(rq))
+ head = rq->tail;
+ else
+ head = active_request(ce->timeline, rq)->head;
+ ce->ring->head = intel_ring_wrap(ce->ring, head);
+ intel_ring_update_space(ce->ring);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ restore_default_state(ce, engine);
+ __execlists_update_reg_state(ce, engine);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+}
+
static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
@@ -569,6 +1051,21 @@ __execlists_schedule_in(struct i915_request *rq)
intel_context_get(ce);
+ if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+ reset_active(rq, engine);
+
+ if (ce->tag) {
+ /* Use a fixed tag for OA and friends */
+ ce->lrc_desc |= (u64)ce->tag << 32;
+ } else {
+ /* We don't need a strict matching tag, just different values */
+ ce->lrc_desc &= ~GENMASK_ULL(47, 37);
+ ce->lrc_desc |=
+ (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
+ GEN11_SW_CTX_ID_SHIFT;
+ BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
+ }
+
intel_gt_pm_get(engine->gt);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@@ -612,6 +1109,12 @@ __execlists_schedule_out(struct i915_request *rq,
{
struct intel_context * const ce = rq->hw_context;
+ /*
+ * NB process_csb() is not under the engine->active.lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
+ */
+
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
intel_gt_pm_put(engine->gt);
@@ -654,7 +1157,7 @@ static u64 execlists_update_context(const struct i915_request *rq)
struct intel_context *ce = rq->hw_context;
u64 desc;
- ce->lrc_reg_state[CTX_RING_TAIL + 1] =
+ ce->lrc_reg_state[CTX_RING_TAIL] =
intel_ring_set_tail(rq->ring, rq->tail);
/*
@@ -677,6 +1180,10 @@ static u64 execlists_update_context(const struct i915_request *rq)
desc = ce->lrc_desc;
ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+ /* Wa_1607138340:tgl */
+ if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
+ desc |= CTX_DESC_FORCE_RESTORE;
+
return desc;
}
@@ -699,6 +1206,9 @@ trace_ports(const struct intel_engine_execlists *execlists,
const struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
+ if (!ports[0])
+ return;
+
GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
engine->name, msg,
ports[0]->fence.context,
@@ -719,25 +1229,45 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
trace_ports(execlists, msg, execlists->pending);
- if (!execlists->pending[0])
+ if (!execlists->pending[0]) {
+ GEM_TRACE_ERR("Nothing pending for promotion!\n");
return false;
+ }
- if (execlists->pending[execlists_num_ports(execlists)])
+ if (execlists->pending[execlists_num_ports(execlists)]) {
+ GEM_TRACE_ERR("Excess pending[%d] for promotion!\n",
+ execlists_num_ports(execlists));
return false;
+ }
for (port = execlists->pending; (rq = *port); port++) {
- if (ce == rq->hw_context)
+ if (ce == rq->hw_context) {
+ GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
+ port - execlists->pending);
return false;
+ }
ce = rq->hw_context;
if (i915_request_completed(rq))
continue;
- if (i915_active_is_idle(&ce->active))
+ if (i915_active_is_idle(&ce->active)) {
+ GEM_TRACE_ERR("Inactive context in pending[%zd]\n",
+ port - execlists->pending);
+ return false;
+ }
+
+ if (!i915_vma_is_pinned(ce->state)) {
+ GEM_TRACE_ERR("Unpinned context in pending[%zd]\n",
+ port - execlists->pending);
return false;
+ }
- if (!i915_vma_is_pinned(ce->state))
+ if (!i915_vma_is_pinned(ce->ring->vma)) {
+ GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n",
+ port - execlists->pending);
return false;
+ }
}
return ce;
@@ -814,6 +1344,10 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
+ if (unlikely((prev->flags ^ next->flags) &
+ (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
+ return false;
+
if (!can_merge_ctx(prev->hw_context, next->hw_context))
return false;
@@ -823,47 +1357,7 @@ static bool can_merge_rq(const struct i915_request *prev,
static void virtual_update_register_offsets(u32 *regs,
struct intel_engine_cs *engine)
{
- u32 base = engine->mmio_base;
-
- /* Must match execlists_init_reg_state()! */
-
- regs[CTX_CONTEXT_CONTROL] =
- i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base));
- regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base));
- regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base));
- regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base));
- regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base));
-
- regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base));
- regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base));
- regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base));
- regs[CTX_SECOND_BB_HEAD_U] =
- i915_mmio_reg_offset(RING_SBBADDR_UDW(base));
- regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base));
- regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base));
-
- regs[CTX_CTX_TIMESTAMP] =
- i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base));
- regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3));
- regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3));
- regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2));
- regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2));
- regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1));
- regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1));
- regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
- regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
-
- if (engine->class == RENDER_CLASS) {
- regs[CTX_RCS_INDIRECT_CTX] =
- i915_mmio_reg_offset(RING_INDIRECT_CTX(base));
- regs[CTX_RCS_INDIRECT_CTX_OFFSET] =
- i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base));
- regs[CTX_BB_PER_CTX_PTR] =
- i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base));
-
- regs[CTX_R_PWR_CLK_STATE] =
- i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
- }
+ set_offsets(regs, reg_offsets(engine), engine);
}
static bool virtual_matches(const struct virtual_engine *ve,
@@ -978,7 +1472,7 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
{
int hint;
- if (!intel_engine_has_semaphores(engine))
+ if (!intel_engine_has_timeslices(engine))
return false;
if (list_is_last(&rq->sched.link, &engine->active.requests))
@@ -999,15 +1493,32 @@ switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
return rq_prio(list_next_entry(rq, sched.link));
}
-static bool
-enable_timeslice(const struct intel_engine_execlists *execlists)
+static inline unsigned long
+timeslice(const struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static unsigned long
+active_timeslice(const struct intel_engine_cs *engine)
{
- const struct i915_request *rq = *execlists->active;
+ const struct i915_request *rq = *engine->execlists.active;
if (i915_request_completed(rq))
- return false;
+ return 0;
+
+ if (engine->execlists.switch_priority_hint < effective_prio(rq))
+ return 0;
+
+ return timeslice(engine);
+}
+
+static void set_timeslice(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_has_timeslices(engine))
+ return;
- return execlists->switch_priority_hint >= effective_prio(rq);
+ set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
}
static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1015,6 +1526,30 @@ static void record_preemption(struct intel_engine_execlists *execlists)
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = last_active(&engine->execlists);
+ if (!rq)
+ return 0;
+
+ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
+ if (unlikely(i915_gem_context_is_banned(rq->gem_context)))
+ return 1;
+
+ return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_has_preempt_reset(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine));
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1111,7 +1646,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
last = NULL;
} else if (need_timeslice(engine, last) &&
- !timer_pending(&engine->execlists.timer)) {
+ timer_expired(&engine->execlists.timer)) {
GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
engine->name,
last->fence.context,
@@ -1147,8 +1682,18 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* submission.
*/
if (!list_is_last(&last->sched.link,
- &engine->active.requests))
+ &engine->active.requests)) {
+ /*
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+ if (!execlists->timer.expires &&
+ need_timeslice(engine, last))
+ set_timer_ms(&execlists->timer,
+ timeslice(engine));
+
return;
+ }
/*
* WaIdleLiteRestore:bdw,skl
@@ -1216,7 +1761,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
unsigned int n;
GEM_BUG_ON(READ_ONCE(ve->context.inflight));
- virtual_update_register_offsets(regs, engine);
+
+ if (!intel_engine_has_relative_mmio(engine))
+ virtual_update_register_offsets(regs,
+ engine);
if (!list_empty(&ve->context.signals))
virtual_xfer_breadcrumbs(ve, engine);
@@ -1299,6 +1847,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last->hw_context == rq->hw_context)
goto done;
+ if (i915_request_has_sentinel(last))
+ goto done;
+
/*
* If GVT overrides us we only ever submit
* port[0], leaving port[1] empty. Note that we
@@ -1357,11 +1908,28 @@ done:
if (submit) {
*port = execlists_schedule_in(last, port - execlists->pending);
- memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists->switch_priority_hint =
switch_prio(engine, *execlists->pending);
+
+ /*
+ * Skip if we ended up with exactly the same set of requests,
+ * e.g. trying to timeslice a pair of ordered contexts
+ */
+ if (!memcmp(execlists->active, execlists->pending,
+ (port - execlists->pending + 1) * sizeof(*port))) {
+ do
+ execlists_schedule_out(fetch_and_zero(port));
+ while (port-- != execlists->pending);
+
+ goto skip_submit;
+ }
+
+ memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists_submit_ports(engine);
+
+ set_preempt_timeout(engine);
} else {
+skip_submit:
ring_set_paused(engine, 0);
}
}
@@ -1394,13 +1962,6 @@ reset_in_progress(const struct intel_engine_execlists *execlists)
return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
}
-enum csb_step {
- CSB_NOP,
- CSB_PROMOTE,
- CSB_PREEMPT,
- CSB_COMPLETE,
-};
-
/*
* Starting with Gen12, the status has a new format:
*
@@ -1427,7 +1988,7 @@ enum csb_step {
* bits 47-57: sw context id of the lrc the GT switched away from
* bits 58-63: sw counter of the lrc the GT switched away from
*/
-static inline enum csb_step
+static inline bool
gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{
u32 lower_dw = csb[0];
@@ -1436,9 +1997,6 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw);
bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
- if (!ctx_away_valid && ctx_to_valid)
- return CSB_PROMOTE;
-
/*
* The context switch detail is not guaranteed to be 5 when a preemption
* occurs, so we can't just check for that. The check below works for
@@ -1446,8 +2004,10 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* instructions and lite-restore. Preempt-to-idle via the CTRL register
* would require some extra handling, but we don't support that.
*/
- if (new_queue && ctx_away_valid)
- return CSB_PREEMPT;
+ if (!ctx_away_valid || new_queue) {
+ GEM_BUG_ON(!ctx_to_valid);
+ return true;
+ }
/*
* switch detail = 5 is covered by the case above and we do not expect a
@@ -1455,30 +2015,13 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* use polling mode.
*/
GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw));
-
- if (*execlists->active) {
- GEM_BUG_ON(!ctx_away_valid);
- return CSB_COMPLETE;
- }
-
- return CSB_NOP;
+ return false;
}
-static inline enum csb_step
+static inline bool
gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{
- unsigned int status = *csb;
-
- if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
- return CSB_PROMOTE;
-
- if (status & GEN8_CTX_STATUS_PREEMPTED)
- return CSB_PREEMPT;
-
- if (*execlists->active)
- return CSB_COMPLETE;
-
- return CSB_NOP;
+ return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
}
static void process_csb(struct intel_engine_cs *engine)
@@ -1488,7 +2031,14 @@ static void process_csb(struct intel_engine_cs *engine)
const u8 num_entries = execlists->csb_size;
u8 head, tail;
- GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+ !reset_in_progress(execlists));
+ GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -1517,7 +2067,7 @@ static void process_csb(struct intel_engine_cs *engine)
rmb();
do {
- enum csb_step csb_step;
+ bool promote;
if (++head == num_entries)
head = 0;
@@ -1545,20 +2095,19 @@ static void process_csb(struct intel_engine_cs *engine)
buf[2 * head + 0], buf[2 * head + 1]);
if (INTEL_GEN(engine->i915) >= 12)
- csb_step = gen12_csb_parse(execlists, buf + 2 * head);
+ promote = gen12_csb_parse(execlists, buf + 2 * head);
else
- csb_step = gen8_csb_parse(execlists, buf + 2 * head);
+ promote = gen8_csb_parse(execlists, buf + 2 * head);
+ if (promote) {
+ if (!inject_preempt_hang(execlists))
+ ring_set_paused(engine, 0);
- switch (csb_step) {
- case CSB_PREEMPT: /* cancel old inflight, prepare for switch */
+ /* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", execlists->active);
-
while (*execlists->active)
execlists_schedule_out(*execlists->active++);
- /* fallthrough */
- case CSB_PROMOTE: /* switch pending to inflight */
- GEM_BUG_ON(*execlists->active);
+ /* switch pending to inflight */
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
execlists->active =
memcpy(execlists->inflight,
@@ -1566,16 +2115,13 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_num_ports(execlists) *
sizeof(*execlists->pending));
- if (enable_timeslice(execlists))
- mod_timer(&execlists->timer, jiffies + 1);
-
- if (!inject_preempt_hang(execlists))
- ring_set_paused(engine, 0);
+ set_timeslice(engine);
WRITE_ONCE(execlists->pending[0], NULL);
- break;
+ } else {
+ GEM_BUG_ON(!*execlists->active);
- case CSB_COMPLETE: /* port0 completed, advanced to port1 */
+ /* port0 completed, advanced to port1 */
trace_ports(execlists, "completed", execlists->active);
/*
@@ -1590,10 +2136,6 @@ static void process_csb(struct intel_engine_cs *engine)
GEM_BUG_ON(execlists->active - execlists->inflight >
execlists_num_ports(execlists));
- break;
-
- case CSB_NOP:
- break;
}
} while (head != tail);
@@ -1623,6 +2165,43 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
}
}
+static noinline void preempt_reset(struct intel_engine_cs *engine)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (i915_modparams.reset < 3)
+ return;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+
+ GEM_TRACE("%s: preempt timeout %lu+%ums\n",
+ engine->name,
+ READ_ONCE(engine->props.preempt_timeout_ms),
+ jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+ intel_engine_reset(engine, "preemption time out");
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+ const struct timer_list *t = &engine->execlists.preempt;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!timer_expired(t))
+ return false;
+
+ return READ_ONCE(engine->execlists.pending[0]);
+}
+
/*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
@@ -1630,23 +2209,39 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- unsigned long flags;
+ bool timeout = preempt_timeout(engine);
process_csb(engine);
- if (!READ_ONCE(engine->execlists.pending[0])) {
+ if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
+ unsigned long flags;
+
spin_lock_irqsave(&engine->active.lock, flags);
__execlists_submission_tasklet(engine);
spin_unlock_irqrestore(&engine->active.lock, flags);
+
+ /* Recheck after serialising with direct-submission */
+ if (timeout && preempt_timeout(engine))
+ preempt_reset(engine);
}
}
-static void execlists_submission_timer(struct timer_list *timer)
+static void __execlists_kick(struct intel_engine_execlists *execlists)
{
- struct intel_engine_cs *engine =
- from_timer(engine, timer, execlists.timer);
-
/* Kick the tasklet for some interrupt coalescing and reset handling */
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+ __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+ execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+ execlists_kick(timer, preempt);
}
static void queue_request(struct intel_engine_cs *engine,
@@ -1726,7 +2321,6 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
vaddr += engine->context_size;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
@@ -1738,7 +2332,6 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
vaddr += engine->context_size;
if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
@@ -1752,14 +2345,13 @@ static void execlists_context_unpin(struct intel_context *ce)
check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE,
ce->engine);
- i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
intel_ring_reset(ce->ring, ce->ring->tail);
}
static void
-__execlists_update_reg_state(struct intel_context *ce,
- struct intel_engine_cs *engine)
+__execlists_update_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
@@ -1767,16 +2359,16 @@ __execlists_update_reg_state(struct intel_context *ce,
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
- regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
- regs[CTX_RING_HEAD + 1] = ring->head;
- regs[CTX_RING_TAIL + 1] = ring->tail;
+ regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma);
+ regs[CTX_RING_HEAD] = ring->head;
+ regs[CTX_RING_TAIL] = ring->tail;
/* RPCS */
if (engine->class == RENDER_CLASS) {
- regs[CTX_R_PWR_CLK_STATE + 1] =
+ regs[CTX_R_PWR_CLK_STATE] =
intel_sseu_make_rpcs(engine->i915, &ce->sseu);
- i915_oa_init_reg_state(engine, ce, regs);
+ i915_oa_init_reg_state(ce, engine);
}
}
@@ -1802,18 +2394,12 @@ __execlists_context_pin(struct intel_context *ce,
goto unpin_active;
}
- ret = i915_gem_context_pin_hw_id(ce->gem_context);
- if (ret)
- goto unpin_map;
-
ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
return 0;
-unpin_map:
- i915_gem_object_unpin_map(ce->state->obj);
unpin_active:
intel_context_active_release(ce);
err:
@@ -1869,7 +2455,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
- GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1885,7 +2471,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = MI_NOOP;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = i915_request_timeline(rq)->hwsp_offset;
*cs++ = 0;
*cs++ = rq->fence.seqno - 1;
@@ -1897,60 +2483,6 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
return 0;
}
-static int emit_pdps(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm);
- int err, i;
- u32 *cs;
-
- GEM_BUG_ON(intel_vgpu_active(rq->i915));
-
- /*
- * Beware ye of the dragons, this sequence is magic!
- *
- * Small changes to this sequence can cause anything from
- * GPU hangs to forcewake errors and machine lockups!
- */
-
- /* Flush any residual operations from the context load */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
-
- /* Magic required to prevent forcewake errors! */
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- return err;
-
- cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Ensure the LRI have landed before we invalidate & continue */
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- u32 base = engine->mmio_base;
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
- *cs++ = lower_32_bits(pd_daddr);
- }
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- /* Be doubly sure the LRI have landed before proceeding */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
-
- /* Re-invalidate the TLB for luck */
- return engine->emit_flush(rq, EMIT_INVALIDATE);
-}
-
static int execlists_request_alloc(struct i915_request *request)
{
int ret;
@@ -1973,10 +2505,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_4lvl(request->hw_context->vm))
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
- else
- ret = emit_pdps(request);
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
@@ -2028,12 +2557,6 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-static u32 slm_offset(struct intel_engine_cs *engine)
-{
- return intel_gt_scratch_offset(engine->gt,
- INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA);
-}
-
/*
* Typically we only have one indirect_ctx and per_ctx batch buffer which are
* initialized at the beginning and shared across all contexts but this field
@@ -2062,10 +2585,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* Actual scratch location is at 128 bytes offset */
batch = gen8_emit_pipe_control(batch,
PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- slm_offset(engine));
+ LRC_PPHWSP_SCRATCH_ADDR);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2423,27 +2946,29 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
&execlists->csb_status[reset_value]);
}
-static struct i915_request *active_request(struct i915_request *rq)
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- const struct intel_context * const ce = rq->hw_context;
- struct i915_request *active = NULL;
- struct list_head *list;
-
- if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
- return rq;
-
- list = &rq->timeline->requests;
- list_for_each_entry_from_reverse(rq, list, link) {
- if (i915_request_completed(rq))
- break;
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
+}
- if (rq->hw_context != ce)
- break;
+static void __execlists_reset_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+ int x;
- active = rq;
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1) {
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
}
-
- return active;
}
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
@@ -2451,7 +2976,10 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
struct i915_request *rq;
- u32 *regs;
+
+ mb(); /* paranoia: read the CSB pointers from after the reset */
+ clflush(execlists->csb_write);
+ mb();
process_csb(engine); /* drain preemption events */
@@ -2467,16 +2995,23 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!rq)
goto unwind;
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
ce = rq->hw_context;
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
- rq = active_request(rq);
- if (!rq) {
- ce->ring->head = ce->ring->tail;
+
+ if (i915_request_completed(rq)) {
+ /* Idle context; tidy up the ring so we can restart afresh */
+ ce->ring->head = intel_ring_wrap(ce->ring, rq->tail);
goto out_replay;
}
+ /* Context has requests still in-flight; it should not be idle! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ rq = active_request(ce->timeline, rq);
ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(ce->ring->head == ce->ring->tail);
/*
* If this request hasn't started yet, e.g. it is waiting on a
@@ -2516,19 +3051,16 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = ce->lrc_reg_state;
- if (engine->pinned_default_state) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- }
- execlists_init_reg_state(regs, ce, engine, ce->ring);
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ restore_default_state(ce, engine);
out_replay:
- GEM_TRACE("%s replay {head:%04x, tail:%04x\n",
+ GEM_TRACE("%s replay {head:%04x, tail:%04x}\n",
engine->name, ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
+ __execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine);
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
unwind:
/* Push back any incomplete requests for replay after the reset. */
@@ -2749,7 +3281,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
}
*cs++ = cmd;
- *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
*cs++ = 0; /* upper addr */
*cs++ = 0; /* value */
intel_ring_advance(request, cs);
@@ -2760,10 +3292,6 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
static int gen8_emit_flush_render(struct i915_request *request,
u32 mode)
{
- struct intel_engine_cs *engine = request->engine;
- u32 scratch_addr =
- intel_gt_scratch_offset(engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2785,7 +3313,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
/*
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
@@ -2818,7 +3346,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
if (dc_flush_wa)
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
@@ -2831,11 +3359,6 @@ static int gen8_emit_flush_render(struct i915_request *request,
static int gen11_emit_flush_render(struct i915_request *request,
u32 mode)
{
- struct intel_engine_cs *engine = request->engine;
- const u32 scratch_addr =
- intel_gt_scratch_offset(engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
-
if (mode & EMIT_FLUSH) {
u32 *cs;
u32 flags = 0;
@@ -2848,13 +3371,13 @@ static int gen11_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
cs = intel_ring_begin(request, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(request, cs);
}
@@ -2872,14 +3395,106 @@ static int gen11_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+
+ return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static int gen12_emit_flush_render(struct i915_request *request,
+ u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /* Wa_1409600907:tgl */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
cs = intel_ring_begin(request, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_L3_RO_CACHE_INVALIDATE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(request, 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Prevent the pre-parser from skipping past the TLB
+ * invalidate and loading a stale page for the batch
+ * buffer / request payload.
+ */
+ *cs++ = preparser_disable(true);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ *cs++ = preparser_disable(false);
intel_ring_advance(request, cs);
+
+ /*
+ * Wa_1604544889:tgl
+ */
+ if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ flags = 0;
+ flags |= PIPE_CONTROL_CS_STALL;
+ flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags,
+ LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
}
return 0;
@@ -2933,7 +3548,7 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
- request->timeline->hwsp_offset,
+ i915_request_active_timeline(request)->hwsp_offset,
0);
return gen8_emit_fini_breadcrumb_footer(request, cs);
@@ -2941,28 +3556,28 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- request->timeline->hwsp_offset,
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE);
-
- /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
return gen8_emit_fini_breadcrumb_footer(request, cs);
}
-static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request,
- u32 *cs)
+static u32 *
+gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- request->timeline->hwsp_offset,
+ i915_request_active_timeline(request)->hwsp_offset,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
@@ -2973,9 +3588,88 @@ static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request,
return gen8_emit_fini_breadcrumb_footer(request, cs);
}
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+
+static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
+{
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = intel_hws_preempt_address(request->engine);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(request->engine))
+ cs = gen12_emit_preempt_busywait(request, cs);
+
+ request->tail = intel_ring_offset(request, cs);
+ assert_ring_tail_valid(request->ring, request->tail);
+
+ return gen8_emit_wa_tail(request, cs);
+}
+
+static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ 0);
+
+ return gen12_emit_fini_breadcrumb_footer(request, cs);
+}
+
+static u32 *
+gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_HDC_PIPELINE_FLUSH);
+
+ return gen12_emit_fini_breadcrumb_footer(request, cs);
+}
+
static void execlists_park(struct intel_engine_cs *engine)
{
- del_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
@@ -2998,6 +3692,9 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
}
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
}
static void execlists_destroy(struct intel_engine_cs *engine)
@@ -3025,6 +3722,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_flush = gen8_emit_flush;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
engine->set_default_submission = intel_execlists_set_default_submission;
@@ -3070,6 +3769,9 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
case 12:
+ engine->emit_flush = gen12_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
case 11:
engine->emit_flush = gen11_emit_flush_render;
engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
@@ -3085,7 +3787,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
- timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
+ timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+ timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
@@ -3142,7 +3845,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
return 0;
}
-static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
+static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
@@ -3175,86 +3878,50 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
return indirect_ctx_offset;
}
-static void execlists_init_reg_state(u32 *regs,
- struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_ring *ring)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
- bool rcs = engine->class == RENDER_CLASS;
- u32 base = engine->mmio_base;
- /*
- * A context is actually a big batch buffer with several
- * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
- * values we are setting here are only for the first context restore:
- * on a subsequent save, the GPU will recreate this batchbuffer with new
- * values (including all the missing MI_LOAD_REGISTER_IMM commands that
- * we are not initializing here).
- *
- * Must keep consistent with virtual_update_register_offsets().
- */
- regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
- MI_LRI_FORCE_POSTED;
-
- CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
+static void init_common_reg_state(u32 * const regs,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring)
+{
+ regs[CTX_CONTEXT_CONTROL] =
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
- if (INTEL_GEN(engine->i915) < 11) {
- regs[CTX_CONTEXT_CONTROL + 1] |=
+ _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ if (INTEL_GEN(engine->i915) < 11)
+ regs[CTX_CONTEXT_CONTROL] |=
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
- }
- CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
- CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
- CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
- CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
- RING_CTL_SIZE(ring->size) | RING_VALID);
- CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
- CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
- CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
- CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
- CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
- CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
- if (rcs) {
- struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
-
- CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
- CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
- RING_INDIRECT_CTX_OFFSET(base), 0);
- if (wa_ctx->indirect_ctx.size) {
- u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- regs[CTX_RCS_INDIRECT_CTX + 1] =
- (ggtt_offset + wa_ctx->indirect_ctx.offset) |
- (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
-
- regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
- intel_lr_indirect_ctx_offset(engine) << 6;
- }
- CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
- if (wa_ctx->per_ctx.size) {
- u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+ regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ regs[CTX_BB_STATE] = RING_BB_PPGTT;
+}
- regs[CTX_BB_PER_CTX_PTR + 1] =
- (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
- }
+static void init_wa_bb_reg_state(u32 * const regs,
+ const struct intel_engine_cs *engine,
+ u32 pos_bb_per_ctx)
+{
+ const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
+
+ if (wa_ctx->per_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+
+ regs[pos_bb_per_ctx] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
- regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
+ if (wa_ctx->indirect_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+
+ regs[pos_bb_per_ctx + 2] =
+ (ggtt_offset + wa_ctx->indirect_ctx.offset) |
+ (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
- CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
- /* PDP values well be assigned later if needed */
- CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
- CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
- CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
- CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
- CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
- CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
- CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
- CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
+ regs[pos_bb_per_ctx + 4] =
+ intel_lr_indirect_ctx_offset(engine) << 6;
+ }
+}
+static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt)
+{
if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
@@ -3267,15 +3934,47 @@ static void execlists_init_reg_state(u32 *regs,
ASSIGN_CTX_PDP(ppgtt, regs, 1);
ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
+}
- if (rcs) {
- regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
- CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
+static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ return i915_vm_to_ggtt(vm)->alias;
+ else
+ return i915_vm_to_ppgtt(vm);
+}
+
+static void execlists_init_reg_state(u32 *regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring,
+ bool close)
+{
+ /*
+ * A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
+ *
+ * Must keep consistent with virtual_update_register_offsets().
+ */
+ u32 *bbe = set_offsets(regs, reg_offsets(engine), engine);
+
+ if (close) { /* Close the batch; used mainly by live_lrc_layout() */
+ *bbe = MI_BATCH_BUFFER_END;
+ if (INTEL_GEN(engine->i915) >= 10)
+ *bbe |= BIT(0);
}
- regs[CTX_END] = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(engine->i915) >= 10)
- regs[CTX_END] |= BIT(0);
+ init_common_reg_state(regs, engine, ring);
+ init_ppgtt_reg_state(regs, vm_alias(ce->vm));
+
+ init_wa_bb_reg_state(regs, engine,
+ INTEL_GEN(engine->i915) >= 12 ?
+ GEN12_CTX_BB_PER_CTX_PTR :
+ CTX_BB_PER_CTX_PTR);
}
static int
@@ -3284,6 +3983,7 @@ populate_lr_context(struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
+ bool inhibit = true;
void *vaddr;
u32 *regs;
int ret;
@@ -3298,12 +3998,6 @@ populate_lr_context(struct intel_context *ce,
set_redzone(vaddr, engine);
if (engine->default_state) {
- /*
- * We only want to copy over the template context state;
- * skipping over the headers reserved for GuC communication,
- * leaving those as zero.
- */
- const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
void *defaults;
defaults = i915_gem_object_pin_map(engine->default_state,
@@ -3313,23 +4007,22 @@ populate_lr_context(struct intel_context *ce,
goto err_unpin_ctx;
}
- memcpy(vaddr + start, defaults + start, engine->context_size);
+ memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
+ inhibit = false;
}
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
- execlists_init_reg_state(regs, ce, engine, ring);
- if (!engine->default_state)
- regs[CTX_CONTEXT_CONTROL + 1] |=
+ execlists_init_reg_state(regs, ce, engine, ring, inhibit);
+ if (inhibit)
+ regs[CTX_CONTEXT_CONTROL] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
ret = 0;
err_unpin_ctx:
- __i915_gem_object_flush_map(ctx_obj,
- LRC_HEADER_PAGES * PAGE_SIZE,
- engine->context_size);
+ __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
return ret;
}
@@ -3346,11 +4039,6 @@ static int __execlists_context_alloc(struct intel_context *ce,
GEM_BUG_ON(ce->state);
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
- /*
- * Before the actual start of the context image, we insert a few pages
- * for our own use and for sharing with the GuC.
- */
- context_size += LRC_HEADER_PAGES * PAGE_SIZE;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
@@ -3462,8 +4150,9 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
return;
swap(ve->siblings[swp], ve->siblings[0]);
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- ve->siblings[0]);
+ if (!intel_engine_has_relative_mmio(ve->siblings[0]))
+ virtual_update_register_offsets(ve->context.lrc_reg_state,
+ ve->siblings[0]);
}
static int virtual_context_pin(struct intel_context *ce)
@@ -3714,6 +4403,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.i915 = ctx->i915;
ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
@@ -3737,6 +4427,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
+ intel_engine_init_breadcrumbs(&ve->base);
intel_engine_init_execlists(&ve->base);
@@ -3899,6 +4590,18 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
return 0;
}
+struct intel_engine_cs *
+intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
+ unsigned int sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+
+ if (sibling >= ve->num_siblings)
+ return NULL;
+
+ return ve->siblings[sibling];
+}
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -3987,6 +4690,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
u32 head,
bool scrub)
{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+
/*
* We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
@@ -3995,16 +4700,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- if (scrub) {
- u32 *regs = ce->lrc_reg_state;
-
- if (engine->pinned_default_state) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- }
- execlists_init_reg_state(regs, ce, engine, ce->ring);
- }
+ if (scrub)
+ restore_default_state(ce, engine);
/* Rerun the request; its payload has been neutered (if guilty). */
ce->ring->head = head;
@@ -4013,6 +4710,13 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
__execlists_update_reg_state(ce, engine);
}
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission ==
+ intel_execlists_set_default_submission;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index c2bba82bcc16..04511d8ebdc1 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -43,6 +43,7 @@ struct intel_engine_cs;
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
+#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8)
#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
@@ -66,6 +67,12 @@ struct intel_engine_cs;
#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
+
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
INTEL_CONTEXT_SCHEDULE_OUT,
@@ -79,30 +86,15 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
-
-/*
- * We allocate a header at the start of the context image for our own
- * use, therefore the actual location of the logical state is offset
- * from the start of the VMA. The layout is
- *
- * | [guc] | [hwsp] [logical state] |
- * |<- our header ->|<- context image ->|
- *
- */
-/* The first page is used for sharing data with the GuC */
-#define LRC_GUCSHR_PN (0)
-#define LRC_GUCSHR_SZ (1)
/* At the start of the context image is its per-process HWS page */
-#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ)
+#define LRC_PPHWSP_PN (0)
#define LRC_PPHWSP_SZ (1)
-/* Finally we have the logical state for the context */
+/* After the PPHWSP we have the logical state for the context */
#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
-/*
- * Currently we include the PPHWSP in __intel_engine_context_size() so
- * the size of the header is synonymous with the start of the PPHWSP.
- */
-#define LRC_HEADER_PAGES LRC_PPHWSP_PN
+/* Space within PPHWSP reserved to be used as scratch */
+#define LRC_PPHWSP_SCRATCH 0x34
+#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
@@ -131,4 +123,11 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
const struct intel_engine_cs *sibling);
+struct intel_engine_cs *
+intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
+ unsigned int sibling);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index b8f20ad71169..06ab0276e10e 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,55 +9,41 @@
#include <linux/types.h>
-/* GEN8+ Reg State Context */
-#define CTX_LRI_HEADER_0 0x01
-#define CTX_CONTEXT_CONTROL 0x02
-#define CTX_RING_HEAD 0x04
-#define CTX_RING_TAIL 0x06
-#define CTX_RING_BUFFER_START 0x08
-#define CTX_RING_BUFFER_CONTROL 0x0a
-#define CTX_BB_HEAD_U 0x0c
-#define CTX_BB_HEAD_L 0x0e
-#define CTX_BB_STATE 0x10
-#define CTX_SECOND_BB_HEAD_U 0x12
-#define CTX_SECOND_BB_HEAD_L 0x14
-#define CTX_SECOND_BB_STATE 0x16
-#define CTX_BB_PER_CTX_PTR 0x18
-#define CTX_RCS_INDIRECT_CTX 0x1a
-#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
-#define CTX_LRI_HEADER_1 0x21
-#define CTX_CTX_TIMESTAMP 0x22
-#define CTX_PDP3_UDW 0x24
-#define CTX_PDP3_LDW 0x26
-#define CTX_PDP2_UDW 0x28
-#define CTX_PDP2_LDW 0x2a
-#define CTX_PDP1_UDW 0x2c
-#define CTX_PDP1_LDW 0x2e
-#define CTX_PDP0_UDW 0x30
-#define CTX_PDP0_LDW 0x32
-#define CTX_LRI_HEADER_2 0x41
-#define CTX_R_PWR_CLK_STATE 0x42
-#define CTX_END 0x44
-
-#define CTX_REG(reg_state, pos, reg, val) do { \
- u32 *reg_state__ = (reg_state); \
- const u32 pos__ = (pos); \
- (reg_state__)[(pos__) + 0] = i915_mmio_reg_offset(reg); \
- (reg_state__)[(pos__) + 1] = (val); \
-} while (0)
+/* GEN8 to GEN11 Reg State Context */
+#define CTX_CONTEXT_CONTROL (0x02 + 1)
+#define CTX_RING_HEAD (0x04 + 1)
+#define CTX_RING_TAIL (0x06 + 1)
+#define CTX_RING_BUFFER_START (0x08 + 1)
+#define CTX_RING_BUFFER_CONTROL (0x0a + 1)
+#define CTX_BB_STATE (0x10 + 1)
+#define CTX_BB_PER_CTX_PTR (0x18 + 1)
+#define CTX_PDP3_UDW (0x24 + 1)
+#define CTX_PDP3_LDW (0x26 + 1)
+#define CTX_PDP2_UDW (0x28 + 1)
+#define CTX_PDP2_LDW (0x2a + 1)
+#define CTX_PDP1_UDW (0x2c + 1)
+#define CTX_PDP1_LDW (0x2e + 1)
+#define CTX_PDP0_UDW (0x30 + 1)
+#define CTX_PDP0_LDW (0x32 + 1)
+#define CTX_R_PWR_CLK_STATE (0x42 + 1)
+
+#define GEN9_CTX_RING_MI_MODE 0x54
+
+/* GEN12+ Reg State Context */
+#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1)
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
- (reg_state__)[CTX_PDP ## n ## _UDW + 1] = upper_32_bits(addr__); \
- (reg_state__)[CTX_PDP ## n ## _LDW + 1] = lower_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _LDW] = lower_32_bits(addr__); \
} while (0)
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = px_dma(ppgtt->pd); \
- (reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \
- (reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \
} while (0)
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index cea184a7dde9..2b977991b785 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -26,6 +26,7 @@
#include "intel_gt.h"
#include "intel_mocs.h"
#include "intel_lrc.h"
+#include "intel_ring.h"
/* structures required */
struct drm_i915_mocs_entry {
@@ -279,10 +280,9 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
GEN11_MOCS_ENTRIES
};
-static bool get_mocs_settings(struct intel_gt *gt,
+static bool get_mocs_settings(const struct drm_i915_private *i915,
struct drm_i915_mocs_table *table)
{
- struct drm_i915_private *i915 = gt->i915;
bool result = false;
if (INTEL_GEN(i915) >= 12) {
@@ -323,9 +323,9 @@ static bool get_mocs_settings(struct intel_gt *gt,
return result;
}
-static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
+static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index)
{
- switch (engine_id) {
+ switch (engine->id) {
case RCS0:
return GEN9_GFX_MOCS(index);
case VCS0:
@@ -339,7 +339,7 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
case VCS2:
return GEN11_MFX2_MOCS(index);
default:
- MISSING_CASE(engine_id);
+ MISSING_CASE(engine->id);
return INVALID_MMIO_REG;
}
}
@@ -357,118 +357,25 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table,
return table->table[I915_MOCS_PTE].control_value;
}
-/**
- * intel_mocs_init_engine() - emit the mocs control table
- * @engine: The engine for whom to emit the registers.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address.
- */
-void intel_mocs_init_engine(struct intel_engine_cs *engine)
+static void init_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
{
- struct intel_gt *gt = engine->gt;
- struct intel_uncore *uncore = gt->uncore;
- struct drm_i915_mocs_table table;
- unsigned int index;
- u32 unused_value;
-
- /* Platforms with global MOCS do not need per-engine initialization. */
- if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
- return;
-
- /* Called under a blanket forcewake */
- assert_forcewakes_active(uncore, FORCEWAKE_ALL);
-
- if (!get_mocs_settings(gt, &table))
- return;
-
- /* Set unused values to PTE */
- unused_value = table.table[I915_MOCS_PTE].control_value;
-
- for (index = 0; index < table.size; index++) {
- u32 value = get_entry_control(&table, index);
+ struct intel_uncore *uncore = engine->uncore;
+ u32 unused_value = table->table[I915_MOCS_PTE].control_value;
+ unsigned int i;
+ for (i = 0; i < table->size; i++)
intel_uncore_write_fw(uncore,
- mocs_register(engine->id, index),
- value);
- }
+ mocs_register(engine, i),
+ get_entry_control(table, i));
- /* All remaining entries are also unused */
- for (; index < table.n_entries; index++)
+ /* All remaining entries are unused */
+ for (; i < table->n_entries; i++)
intel_uncore_write_fw(uncore,
- mocs_register(engine->id, index),
+ mocs_register(engine, i),
unused_value);
}
-static void intel_mocs_init_global(struct intel_gt *gt)
-{
- struct intel_uncore *uncore = gt->uncore;
- struct drm_i915_mocs_table table;
- unsigned int index;
-
- GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
-
- if (!get_mocs_settings(gt, &table))
- return;
-
- if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
- return;
-
- for (index = 0; index < table.size; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[index].control_value);
-
- /*
- * Ok, now set the unused entries to the invalid entry (index 0). These
- * entries are officially undefined and no contract for the contents and
- * settings is given for these entries.
- */
- for (; index < table.n_entries; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[0].control_value);
-}
-
-static int emit_mocs_control_table(struct i915_request *rq,
- const struct drm_i915_mocs_table *table)
-{
- enum intel_engine_id engine = rq->engine->id;
- unsigned int index;
- u32 unused_value;
- u32 *cs;
-
- if (GEM_WARN_ON(table->size > table->n_entries))
- return -ENODEV;
-
- /* Set unused values to PTE */
- unused_value = table->table[I915_MOCS_PTE].control_value;
-
- cs = intel_ring_begin(rq, 2 + 2 * table->n_entries);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries);
-
- for (index = 0; index < table->size; index++) {
- u32 value = get_entry_control(table, index);
-
- *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = value;
- }
-
- /* All remaining entries are also unused */
- for (; index < table->n_entries; index++) {
- *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = unused_value;
- }
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
/*
* Get l3cc_value from MOCS entry taking into account when it's not used:
* I915_MOCS_PTE's value is returned in this case.
@@ -486,141 +393,99 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
u16 low,
u16 high)
{
- return low | high << 16;
+ return low | (u32)high << 16;
}
-static int emit_mocs_l3cc_table(struct i915_request *rq,
- const struct drm_i915_mocs_table *table)
+static void init_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
{
- u16 unused_value;
+ struct intel_uncore *uncore = engine->uncore;
+ u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value;
unsigned int i;
- u32 *cs;
-
- if (GEM_WARN_ON(table->size > table->n_entries))
- return -ENODEV;
-
- /* Set unused values to PTE */
- unused_value = table->table[I915_MOCS_PTE].l3cc_value;
-
- cs = intel_ring_begin(rq, 2 + table->n_entries);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2);
for (i = 0; i < table->size / 2; i++) {
u16 low = get_entry_l3cc(table, 2 * i);
u16 high = get_entry_l3cc(table, 2 * i + 1);
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, low, high);
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(table, low, high));
}
/* Odd table size - 1 left over */
- if (table->size & 0x01) {
+ if (table->size & 1) {
u16 low = get_entry_l3cc(table, 2 * i);
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, low, unused_value);
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(table, low, unused_value));
i++;
}
/* All remaining entries are also unused */
- for (; i < table->n_entries / 2; i++) {
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, unused_value, unused_value);
- }
+ for (; i < table->n_entries / 2; i++)
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(table, unused_value,
+ unused_value));
+}
+
+void intel_mocs_init_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_mocs_table table;
+
+ /* Called under a blanket forcewake */
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ if (!get_mocs_settings(engine->i915, &table))
+ return;
+
+ /* Platforms with global MOCS do not need per-engine initialization. */
+ if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ init_mocs_table(engine, &table);
- return 0;
+ if (engine->class == RENDER_CLASS)
+ init_l3cc_table(engine, &table);
}
-static void intel_mocs_init_l3cc_table(struct intel_gt *gt)
+static void intel_mocs_init_global(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
- unsigned int i;
- u16 unused_value;
+ unsigned int index;
- if (!get_mocs_settings(gt, &table))
+ /*
+ * LLC and eDRAM control values are not applicable to dgfx
+ */
+ if (IS_DGFX(gt->i915))
return;
- /* Set unused values to PTE */
- unused_value = table.table[I915_MOCS_PTE].l3cc_value;
-
- for (i = 0; i < table.size / 2; i++) {
- u16 low = get_entry_l3cc(&table, 2 * i);
- u16 high = get_entry_l3cc(&table, 2 * i + 1);
+ GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
- intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, high));
- }
+ if (!get_mocs_settings(gt->i915, &table))
+ return;
- /* Odd table size - 1 left over */
- if (table.size & 0x01) {
- u16 low = get_entry_l3cc(&table, 2 * i);
+ if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
+ return;
+ for (index = 0; index < table.size; index++)
intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, unused_value));
- i++;
- }
+ GEN12_GLOBAL_MOCS(index),
+ table.table[index].control_value);
- /* All remaining entries are also unused */
- for (; i < table.n_entries / 2; i++)
+ /*
+ * Ok, now set the unused entries to the invalid entry (index 0). These
+ * entries are officially undefined and no contract for the contents and
+ * settings is given for these entries.
+ */
+ for (; index < table.n_entries; index++)
intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, unused_value,
- unused_value));
-}
-
-/**
- * intel_mocs_emit() - program the MOCS register.
- * @rq: Request to use to set up the MOCS tables.
- *
- * This function will emit a batch buffer with the values required for
- * programming the MOCS register values for all the currently supported
- * rings.
- *
- * These registers are partially stored in the RCS context, so they are
- * emitted at the same time so that when a context is created these registers
- * are set up. These registers have to be emitted into the start of the
- * context as setting the ELSP will re-init some of these registers back
- * to the hw values.
- *
- * Return: 0 on success, otherwise the error status.
- */
-int intel_mocs_emit(struct i915_request *rq)
-{
- struct drm_i915_mocs_table t;
- int ret;
-
- if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915) ||
- rq->engine->class != RENDER_CLASS)
- return 0;
-
- if (get_mocs_settings(rq->engine->gt, &t)) {
- /* Program the RCS control registers */
- ret = emit_mocs_control_table(rq, &t);
- if (ret)
- return ret;
-
- /* Now program the l3cc registers */
- ret = emit_mocs_l3cc_table(rq, &t);
- if (ret)
- return ret;
- }
-
- return 0;
+ GEN12_GLOBAL_MOCS(index),
+ table.table[0].control_value);
}
void intel_mocs_init(struct intel_gt *gt)
{
- intel_mocs_init_l3cc_table(gt);
-
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
intel_mocs_init_global(gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 2ae816b7ca19..83371f3e6ba1 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -49,13 +49,10 @@
* context handling keep the MOCS in step.
*/
-struct i915_request;
struct intel_engine_cs;
struct intel_gt;
void intel_mocs_init(struct intel_gt *gt);
void intel_mocs_init_engine(struct intel_engine_cs *engine);
-int intel_mocs_emit(struct i915_request *rq);
-
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
new file mode 100644
index 000000000000..700104b90163
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -0,0 +1,787 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_sideband.h"
+
+/**
+ * DOC: RC6
+ *
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as
+ * well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+
+static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6)
+{
+ return container_of(rc6, struct intel_gt, rc6);
+}
+
+static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->uncore;
+}
+
+static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->i915;
+}
+
+static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
+static void gen11_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ set(uncore, GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1));
+
+ set(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE);
+}
+
+static void gen9_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 rc6_mode;
+
+ /* 2b: Program RC6 thresholds.*/
+ if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+ } else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
+ /*
+ * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
+ * when CPG is enabled
+ */
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+ } else {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ }
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ /* WaRsUseTimeoutMode:cnl (pre-prod) */
+ if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
+ else
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+ set(uncore, GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ rc6_mode);
+
+ /*
+ * WaRsDisableCoarsePowerGating:skl,cnl
+ * - Render/Media PG need to be disabled with RC6.
+ */
+ if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
+ set(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
+}
+
+static void gen8_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+
+ /* 3: Enable RC6 */
+ set(uncore, GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_RC6_ENABLE);
+}
+
+static void gen6_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 rc6vids, rc6_mask;
+ int ret;
+
+ set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC1e_THRESHOLD, 1000);
+ if (IS_IVYBRIDGE(i915))
+ set(uncore, GEN6_RC6_THRESHOLD, 125000);
+ else
+ set(uncore, GEN6_RC6_THRESHOLD, 50000);
+ set(uncore, GEN6_RC6p_THRESHOLD, 150000);
+ set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ /* We don't use those on Haswell */
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ if (HAS_RC6p(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ if (HAS_RC6pp(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ set(uncore, GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids, NULL);
+ if (IS_GEN(i915, 6) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN(i915, 6) &&
+ (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+}
+
+/* Check that the pcbr address is not empty. */
+static int chv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ resource_size_t pctx_paddr, paddr;
+ resource_size_t pctx_size = 32 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+ paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
+ GEM_BUG_ON(paddr > U32_MAX);
+
+ pctx_paddr = (paddr & ~4095);
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+ }
+
+ return 0;
+}
+
+static int vlv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_gem_object *pctx;
+ resource_size_t pctx_paddr;
+ resource_size_t pctx_size = 24 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if (pcbr) {
+ /* BIOS set it up already, grab the pre-alloc'd space */
+ resource_size_t pcbr_offset;
+
+ pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
+ pctx = i915_gem_object_create_stolen_for_preallocated(i915,
+ pcbr_offset,
+ I915_GTT_OFFSET_NONE,
+ pctx_size);
+ if (IS_ERR(pctx))
+ return PTR_ERR(pctx);
+
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+
+ /*
+ * From the Gunit register HAS:
+ * The Gfx driver is expected to program this register and ensure
+ * proper allocation within Gfx stolen memory. For example, this
+ * register should be programmed such than the PCBR range does not
+ * overlap with other ranges, such as the frame buffer, protected
+ * memory, or any other relevant ranges.
+ */
+ pctx = i915_gem_object_create_stolen(i915, pctx_size);
+ if (IS_ERR(pctx)) {
+ DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+ return PTR_ERR(pctx);
+ }
+
+ GEM_BUG_ON(range_overflows_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
+ pctx_paddr = i915->dsm.start + pctx->stolen->start;
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+
+out:
+ rc6->pctx = pctx;
+ return 0;
+}
+
+static void chv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2a: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /* TO threshold set to 500 us (0x186 * 1.28 us) */
+ set(uncore, GEN6_RC6_THRESHOLD, 0x186);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ /* 3: Enable RC6 */
+ set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE);
+}
+
+static void vlv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 0x557);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ set(uncore, GEN6_RC_CONTROL,
+ GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
+}
+
+static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ u32 rc6_ctx_base, rc_ctl, rc_sw_target;
+ bool enable_rc6 = true;
+
+ rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+ rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
+ rc_sw_target &= RC_SW_TARGET_STATE_MASK;
+ rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
+ DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+ "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+ onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+ onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+ rc_sw_target);
+
+ if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
+ DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ /*
+ * The exact context size is not known for BXT, so assume a page size
+ * for this check.
+ */
+ rc6_ctx_base =
+ intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
+ if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
+ rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
+ DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
+ enable_rc6 = false;
+ }
+
+ if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
+ DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
+ DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
+ DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
+ DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ return enable_rc6;
+}
+
+static bool rc6_supported(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!HAS_RC6(i915))
+ return false;
+
+ if (intel_vgpu_active(i915))
+ return false;
+
+ if (is_mock_gt(rc6_to_gt(rc6)))
+ return false;
+
+ if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
+ dev_notice(i915->drm.dev,
+ "RC6 and powersaving disabled by BIOS\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void rpm_get(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(rc6->wakeref);
+ pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev);
+ rc6->wakeref = true;
+}
+
+static void rpm_put(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(!rc6->wakeref);
+ pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev);
+ rc6->wakeref = false;
+}
+
+static bool intel_rc6_ctx_corrupted(struct intel_rc6 *rc6)
+{
+ return !intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO);
+}
+
+static void intel_rc6_ctx_wa_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return;
+
+ if (intel_rc6_ctx_corrupted(rc6)) {
+ DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
+ rc6->ctx_corrupted = true;
+ }
+}
+
+/**
+ * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
+ * @rc6: rc6 state
+ *
+ * Perform any steps needed to re-init the RC6 CTX WA after system resume.
+ */
+void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6)
+{
+ if (rc6->ctx_corrupted && !intel_rc6_ctx_corrupted(rc6)) {
+ DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
+ rc6->ctx_corrupted = false;
+ }
+}
+
+/**
+ * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption
+ * @rc6: rc6 state
+ *
+ * Check if an RC6 CTX corruption has happened since the last check and if so
+ * disable RC6 and runtime power management.
+*/
+void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return;
+
+ if (rc6->ctx_corrupted)
+ return;
+
+ if (!intel_rc6_ctx_corrupted(rc6))
+ return;
+
+ DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
+
+ intel_rc6_disable(rc6);
+ rc6->ctx_corrupted = true;
+
+ return;
+}
+
+static void __intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (INTEL_GEN(i915) >= 9)
+ set(uncore, GEN9_PG_ENABLE, 0);
+ set(uncore, GEN6_RC_CONTROL, 0);
+ set(uncore, GEN6_RC_STATE, 0);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+}
+
+void intel_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ int err;
+
+ /* Disable runtime-pm until we can save the GPU state with rc6 pctx */
+ rpm_get(rc6);
+
+ if (!rc6_supported(rc6))
+ return;
+
+ intel_rc6_ctx_wa_init(rc6);
+
+ if (IS_CHERRYVIEW(i915))
+ err = chv_rc6_init(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ err = vlv_rc6_init(rc6);
+ else
+ err = 0;
+
+ /* Sanitize rc6, ensure it is disabled before we are ready. */
+ __intel_rc6_disable(rc6);
+
+ rc6->supported = err == 0;
+}
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6)
+{
+ if (rc6->enabled) { /* unbalanced suspend/resume */
+ rpm_get(rc6);
+ rc6->enabled = false;
+ }
+
+ if (rc6->supported)
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->supported)
+ return;
+
+ GEM_BUG_ON(rc6->enabled);
+
+ if (rc6->ctx_corrupted)
+ return;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rc6_enable(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 11)
+ gen11_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 9)
+ gen9_rc6_enable(rc6);
+ else if (IS_BROADWELL(i915))
+ gen8_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_rc6_enable(rc6);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ /* rc6 is ready, runtime-pm is go! */
+ rpm_put(rc6);
+ rc6->enabled = true;
+}
+
+void intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ if (!rc6->enabled)
+ return;
+
+ rpm_get(rc6);
+ rc6->enabled = false;
+
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_fini(struct intel_rc6 *rc6)
+{
+ struct drm_i915_gem_object *pctx;
+
+ intel_rc6_disable(rc6);
+
+ pctx = fetch_and_zero(&rc6->pctx);
+ if (pctx)
+ i915_gem_object_put(pctx);
+
+ if (rc6->wakeref)
+ rpm_put(rc6);
+}
+
+static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
+{
+ u32 lower, upper, tmp;
+ int loop = 2;
+
+ /*
+ * The register accessed do not need forcewake. We borrow
+ * uncore lock to prevent concurrent access to range reg.
+ */
+ lockdep_assert_held(&uncore->lock);
+
+ /*
+ * vlv and chv residency counters are 40 bits in width.
+ * With a control bit, we can choose between upper or lower
+ * 32bit window into this counter.
+ *
+ * Although we always use the counter in high-range mode elsewhere,
+ * userspace may attempt to read the value before rc6 is initialised,
+ * before we have set the default VLV_COUNTER_CONTROL value. So always
+ * set the high bit to be safe.
+ */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ do {
+ tmp = upper;
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ lower = intel_uncore_read_fw(uncore, reg);
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ } while (upper != tmp && --loop);
+
+ /*
+ * Everywhere else we always use VLV_COUNTER_CONTROL with the
+ * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
+ * now.
+ */
+
+ return lower | (u64)upper << 8;
+}
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ u64 time_hw, prev_hw, overflow_hw;
+ unsigned int fw_domains;
+ unsigned long flags;
+ unsigned int i;
+ u32 mul, div;
+
+ if (!rc6->supported)
+ return 0;
+
+ /*
+ * Store previous hw counter values for counter wrap-around handling.
+ *
+ * There are only four interesting registers and they live next to each
+ * other so we can use the relative address, compared to the smallest
+ * one as the index into driver storage.
+ */
+ i = (i915_mmio_reg_offset(reg) -
+ i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
+ if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency)))
+ return 0;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
+
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ mul = 1000000;
+ div = i915->czclk_freq;
+ overflow_hw = BIT_ULL(40);
+ time_hw = vlv_residency_raw(uncore, reg);
+ } else {
+ /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
+ if (IS_GEN9_LP(i915)) {
+ mul = 10000;
+ div = 12;
+ } else {
+ mul = 1280;
+ div = 1;
+ }
+
+ overflow_hw = BIT_ULL(32);
+ time_hw = intel_uncore_read_fw(uncore, reg);
+ }
+
+ /*
+ * Counter wrap handling.
+ *
+ * But relying on a sufficient frequency of queries otherwise counters
+ * can still wrap.
+ */
+ prev_hw = rc6->prev_hw_residency[i];
+ rc6->prev_hw_residency[i] = time_hw;
+
+ /* RC6 delta from last sample. */
+ if (time_hw >= prev_hw)
+ time_hw -= prev_hw;
+ else
+ time_hw += overflow_hw - prev_hw;
+
+ /* Add delta to RC6 extended raw driver copy. */
+ time_hw += rc6->cur_residency[i];
+ rc6->cur_residency[i] = time_hw;
+
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, flags);
+
+ return mul_u64_u32_div(time_hw, mul, div);
+}
+
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
+{
+ return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
new file mode 100644
index 000000000000..1370f6834a4c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_H
+#define INTEL_RC6_H
+
+#include "i915_reg.h"
+
+struct intel_engine_cs;
+struct intel_rc6;
+
+void intel_rc6_init(struct intel_rc6 *rc6);
+void intel_rc6_fini(struct intel_rc6 *rc6);
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6);
+void intel_rc6_enable(struct intel_rc6 *rc6);
+void intel_rc6_disable(struct intel_rc6 *rc6);
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg);
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg);
+
+void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6);
+void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6);
+
+#endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
new file mode 100644
index 000000000000..89ad5697a8d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_TYPES_H
+#define INTEL_RC6_TYPES_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "intel_engine_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_rc6 {
+ u64 prev_hw_residency[4];
+ u64 cur_residency[4];
+
+ struct drm_i915_gem_object *pctx;
+
+ bool supported : 1;
+ bool enabled : 1;
+ bool wakeref : 1;
+ bool ctx_corrupted : 1;
+};
+
+#endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 6d05f9c64178..c4edc35e7d89 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
+#include "intel_ring.h"
struct intel_renderstate {
const struct intel_renderstate_rodata *rodata;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 8cea42379dd7..f03e000051c1 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -282,14 +282,14 @@ static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct intel_engine_cs *engine;
- const u32 hw_engine_mask[] = {
+ static const u32 hw_engine_mask[] = {
[RCS0] = GEN6_GRDOM_RENDER,
[BCS0] = GEN6_GRDOM_BLT,
[VCS0] = GEN6_GRDOM_MEDIA,
[VCS1] = GEN8_GRDOM_MEDIA2,
[VECS0] = GEN6_GRDOM_VECS,
};
+ struct intel_engine_cs *engine;
u32 hw_mask;
if (engine_mask == ALL_ENGINES) {
@@ -298,7 +298,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t tmp;
hw_mask = 0;
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
}
@@ -307,7 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
return gen6_hw_domain_reset(gt, hw_mask);
}
-static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
+static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
@@ -316,6 +316,7 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
i915_reg_t sfc_usage;
u32 sfc_usage_bit;
u32 sfc_reset_bit;
+ int ret;
switch (engine->class) {
case VIDEO_DECODE_CLASS:
@@ -350,27 +351,33 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
}
/*
- * Tell the engine that a software reset is going to happen. The engine
- * will then try to force lock the SFC (if currently locked, it will
- * remain so until we tell the engine it is safe to unlock; if currently
- * unlocked, it will ignore this and all new lock requests). If SFC
- * ends up being locked to the engine we want to reset, we have to reset
- * it as well (we will unlock it once the reset sequence is completed).
+ * If the engine is using a SFC, tell the engine that a software reset
+ * is going to happen. The engine will then try to force lock the SFC.
+ * If SFC ends up being locked to the engine we want to reset, we have
+ * to reset it as well (we will unlock it once the reset sequence is
+ * completed).
*/
+ if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
+ return 0;
+
rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
- if (__intel_wait_for_register_fw(uncore,
- sfc_forced_lock_ack,
- sfc_forced_lock_ack_bit,
- sfc_forced_lock_ack_bit,
- 1000, 0, NULL)) {
- DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ ret = __intel_wait_for_register_fw(uncore,
+ sfc_forced_lock_ack,
+ sfc_forced_lock_ack_bit,
+ sfc_forced_lock_ack_bit,
+ 1000, 0, NULL);
+
+ /* Was the SFC released while we were trying to lock it? */
+ if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
return 0;
- }
- if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
- return sfc_reset_bit;
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ return ret;
+ }
+ *hw_mask |= sfc_reset_bit;
return 0;
}
@@ -406,7 +413,7 @@ static int gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- const u32 hw_engine_mask[] = {
+ static const u32 hw_engine_mask[] = {
[RCS0] = GEN11_GRDOM_RENDER,
[BCS0] = GEN11_GRDOM_BLT,
[VCS0] = GEN11_GRDOM_MEDIA,
@@ -425,17 +432,26 @@ static int gen11_reset_engines(struct intel_gt *gt,
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
- hw_mask |= gen11_lock_sfc(engine);
+ ret = gen11_lock_sfc(engine, &hw_mask);
+ if (ret)
+ goto sfc_unlock;
}
}
ret = gen6_hw_domain_reset(gt, hw_mask);
+sfc_unlock:
+ /*
+ * We unlock the SFC based on the lock status and not the result of
+ * gen11_lock_sfc to make sure that we clean properly if something
+ * wrong happened during the lock (e.g. lock acquired after timeout
+ * expiration).
+ */
if (engine_mask != ALL_ENGINES)
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt, engine_mask, tmp)
gen11_unlock_sfc(engine);
return ret;
@@ -494,7 +510,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
intel_engine_mask_t tmp;
int ret;
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
goto skip_reset;
@@ -520,19 +536,30 @@ static int gen8_reset_engines(struct intel_gt *gt,
ret = gen6_reset_engines(gt, engine_mask, retry);
skip_reset:
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
return ret;
}
+static int mock_reset(struct intel_gt *gt,
+ intel_engine_mask_t mask,
+ unsigned int retry)
+{
+ return 0;
+}
+
typedef int (*reset_func)(struct intel_gt *,
intel_engine_mask_t engine_mask,
unsigned int retry);
-static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
+static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
{
- if (INTEL_GEN(i915) >= 8)
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (is_mock_gt(gt))
+ return mock_reset;
+ else if (INTEL_GEN(i915) >= 8)
return gen8_reset_engines;
else if (INTEL_GEN(i915) >= 6)
return gen6_reset_engines;
@@ -555,7 +582,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
int ret = -ETIMEDOUT;
int retry;
- reset = intel_get_gpu_reset(gt->i915);
+ reset = intel_get_gpu_reset(gt);
if (!reset)
return -ENODEV;
@@ -575,17 +602,20 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
return ret;
}
-bool intel_has_gpu_reset(struct drm_i915_private *i915)
+bool intel_has_gpu_reset(const struct intel_gt *gt)
{
if (!i915_modparams.reset)
return NULL;
- return intel_get_gpu_reset(i915);
+ return intel_get_gpu_reset(gt);
}
-bool intel_has_reset_engine(struct drm_i915_private *i915)
+bool intel_has_reset_engine(const struct intel_gt *gt)
{
- return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
+ if (i915_modparams.reset < 2)
+ return false;
+
+ return INTEL_INFO(gt->i915)->has_reset_engine;
}
int intel_reset_guc(struct intel_gt *gt)
@@ -652,7 +682,7 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
@@ -682,10 +712,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
if (err)
return err;
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
- i915_gem_restore_fences(gt->i915);
+ i915_gem_restore_fences(gt->ggtt);
return err;
}
@@ -695,7 +725,7 @@ static void reset_finish_engine(struct intel_engine_cs *engine)
engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
- intel_engine_signal_breadcrumbs(engine);
+ intel_engine_breadcrumbs_irq(engine);
}
static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
@@ -703,7 +733,7 @@ static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
reset_finish_engine(engine);
if (awake & engine->mask)
intel_engine_pm_put(engine);
@@ -739,7 +769,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
struct drm_printer p = drm_debug_printer(__func__);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
@@ -756,7 +786,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
__intel_gt_reset(gt, ALL_ENGINES);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
engine->submit_request = nop_submit_request;
/*
@@ -768,7 +798,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
engine->cancel_requests(engine);
reset_finish(gt, awake);
@@ -781,7 +811,7 @@ void intel_gt_set_wedged(struct intel_gt *gt)
intel_wakeref_t wakeref;
mutex_lock(&gt->reset.mutex);
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
__intel_gt_set_wedged(gt);
mutex_unlock(&gt->reset.mutex);
}
@@ -791,11 +821,13 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl;
unsigned long flags;
+ bool ok;
if (!test_bit(I915_WEDGED, &gt->reset.flags))
return true;
- if (!gt->scratch) /* Never full initialised, recovery impossible */
+ /* Never fully initialised, recovery impossible */
+ if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
return false;
GEM_TRACE("start\n");
@@ -812,10 +844,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) {
- struct i915_request *rq;
+ struct dma_fence *fence;
- rq = i915_active_request_get_unlocked(&tl->last_request);
- if (!rq)
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
continue;
spin_unlock_irqrestore(&timelines->lock, flags);
@@ -827,8 +859,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* (I915_FENCE_TIMEOUT) so this wait should not be unbounded
* in the worst case.
*/
- dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
+ dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(fence);
/* Restart iteration after droping lock */
spin_lock_irqsave(&timelines->lock, flags);
@@ -836,7 +868,18 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
}
spin_unlock_irqrestore(&timelines->lock, flags);
- intel_gt_sanitize(gt, false);
+ /* We must reset pending GPU events before restoring our submission */
+ ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ if (!ok) {
+ /*
+ * Warn CI about the unrecoverable wedged condition.
+ * Time for a reboot.
+ */
+ add_taint_for_CI(TAINT_WARN);
+ return false;
+ }
/*
* Undo nop_submit_request. We prevent all new i915 requests from
@@ -891,7 +934,7 @@ static int resume(struct intel_gt *gt)
enum intel_engine_id id;
int ret;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
ret = engine->resume(engine);
if (ret)
return ret;
@@ -941,7 +984,7 @@ void intel_gt_reset(struct intel_gt *gt,
awake = reset_prepare(gt);
- if (!intel_has_gpu_reset(gt->i915)) {
+ if (!intel_has_gpu_reset(gt)) {
if (i915_modparams.reset)
dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
else
@@ -970,7 +1013,7 @@ void intel_gt_reset(struct intel_gt *gt,
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(gt->i915);
+ ret = intel_gt_init_hw(gt);
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
@@ -981,8 +1024,6 @@ void intel_gt_reset(struct intel_gt *gt,
if (ret)
goto taint;
- intel_gt_queue_hangcheck(gt);
-
finish:
reset_finish(gt, awake);
unlock:
@@ -1149,7 +1190,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
@@ -1162,8 +1203,8 @@ void intel_gt_handle_error(struct intel_gt *gt,
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) {
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
@@ -1191,7 +1232,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
synchronize_rcu_expedited();
/* Prevent any other reset-engine attempt. */
- for_each_engine(engine, gt->i915, tmp) {
+ for_each_engine(engine, gt, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
wait_on_bit(&gt->reset.flags,
@@ -1201,7 +1242,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
intel_gt_reset_global(gt, engine_mask, msg);
- for_each_engine(engine, gt->i915, tmp)
+ for_each_engine(engine, gt, tmp)
clear_bit_unlock(I915_RESET_ENGINE + engine->id,
&gt->reset.flags);
clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
@@ -1209,7 +1250,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
wake_up_all(&gt->reset.queue);
out:
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
@@ -1251,10 +1292,6 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
return -EIO;
- /* XXX intel_reset_finish() still takes struct_mutex!!! */
- if (mutex_is_locked(&gt->i915->drm.struct_mutex))
- return -EAGAIN;
-
if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
&gt->reset.flags)))
@@ -1263,6 +1300,14 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
return intel_gt_is_wedged(gt) ? -EIO : 0;
}
+void intel_gt_set_wedged_on_init(struct intel_gt *gt)
+{
+ BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
+ I915_WEDGED_ON_INIT);
+ intel_gt_set_wedged(gt);
+ set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+}
+
void intel_gt_init_reset(struct intel_gt *gt)
{
init_waitqueue_head(&gt->reset.queue);
@@ -1306,4 +1351,5 @@ void __intel_fini_wedge(struct intel_wedge_me *w)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_reset.c"
+#include "selftest_hangcheck.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 52c00199e069..8e8d5f761166 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -14,7 +14,6 @@
#include "intel_engine_types.h"
#include "intel_reset_types.h"
-struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
struct intel_gt;
@@ -45,6 +44,12 @@ void intel_gt_set_wedged(struct intel_gt *gt);
bool intel_gt_unset_wedged(struct intel_gt *gt);
int intel_gt_terminally_wedged(struct intel_gt *gt);
+/*
+ * There's no unset_wedged_on_init paired with this one.
+ * Once we're wedged on init, there's no going back.
+ */
+void intel_gt_set_wedged_on_init(struct intel_gt *gt);
+
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
int intel_reset_guc(struct intel_gt *gt);
@@ -68,10 +73,13 @@ void __intel_fini_wedge(struct intel_wedge_me *w);
static inline bool __intel_reset_failed(const struct intel_reset *reset)
{
+ GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
+ !test_bit(I915_WEDGED, &reset->flags) : false);
+
return unlikely(test_bit(I915_WEDGED, &reset->flags));
}
-bool intel_has_gpu_reset(struct drm_i915_private *i915);
-bool intel_has_reset_engine(struct drm_i915_private *i915);
+bool intel_has_gpu_reset(const struct intel_gt *gt);
+bool intel_has_reset_engine(const struct intel_gt *gt);
#endif /* I915_RESET_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 31968356e0c0..f43bc3a0fe4f 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -29,11 +29,17 @@ struct intel_reset {
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_request_alloc(), this bit is checked and the sequence
* aborted (with -EIO reported to userspace) if set.
+ *
+ * #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no
+ * longer use the GPU - similar to #I915_WEDGED bit. The difference in
+ * in the way we're handling "forced" unwedged (e.g. through debugfs),
+ * which is not allowed in case we failed to initialize.
*/
unsigned long flags;
#define I915_RESET_BACKOFF 0
#define I915_RESET_MODESET 1
#define I915_RESET_ENGINE 2
+#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2)
#define I915_WEDGED (BITS_PER_LONG - 1)
struct mutex mutex; /* serialises wedging/unwedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
new file mode 100644
index 000000000000..ece20504d240
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -0,0 +1,323 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_engine.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
+
+unsigned int intel_ring_update_space(struct intel_ring *ring)
+{
+ unsigned int space;
+
+ space = __intel_ring_space(ring->head, ring->emit, ring->size);
+
+ ring->space = space;
+ return space;
+}
+
+int intel_ring_pin(struct intel_ring *ring)
+{
+ struct i915_vma *vma = ring->vma;
+ unsigned int flags;
+ void *addr;
+ int ret;
+
+ if (atomic_fetch_inc(&ring->pin_count))
+ return 0;
+
+ flags = PIN_GLOBAL;
+
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
+ if (vma->obj->stolen)
+ flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
+
+ ret = i915_vma_pin(vma, 0, 0, flags);
+ if (unlikely(ret))
+ goto err_unpin;
+
+ if (i915_vma_is_map_and_fenceable(vma))
+ addr = (void __force *)i915_vma_pin_iomap(vma);
+ else
+ addr = i915_gem_object_pin_map(vma->obj,
+ i915_coherent_map_type(vma->vm->i915));
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err_ring;
+ }
+
+ i915_vma_make_unshrinkable(vma);
+
+ GEM_BUG_ON(ring->vaddr);
+ ring->vaddr = addr;
+
+ return 0;
+
+err_ring:
+ i915_vma_unpin(vma);
+err_unpin:
+ atomic_dec(&ring->pin_count);
+ return ret;
+}
+
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+ tail = intel_ring_wrap(ring, tail);
+ ring->tail = tail;
+ ring->head = tail;
+ ring->emit = tail;
+ intel_ring_update_space(ring);
+}
+
+void intel_ring_unpin(struct intel_ring *ring)
+{
+ struct i915_vma *vma = ring->vma;
+
+ if (!atomic_dec_and_test(&ring->pin_count))
+ return;
+
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->emit);
+
+ i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_is_map_and_fenceable(vma))
+ i915_vma_unpin_iomap(vma);
+ else
+ i915_gem_object_unpin_map(vma->obj);
+
+ GEM_BUG_ON(!ring->vaddr);
+ ring->vaddr = NULL;
+
+ i915_vma_unpin(vma);
+ i915_vma_make_purgeable(vma);
+}
+
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+{
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = ERR_PTR(-ENODEV);
+ if (i915_ggtt_has_aperture(ggtt))
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ /*
+ * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+ * if supported by the platform's GGTT.
+ */
+ if (vm->has_read_only)
+ i915_gem_object_set_readonly(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!is_power_of_2(size));
+ GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ring->ref);
+ ring->size = size;
+
+ /*
+ * Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = size;
+ if (IS_I830(i915) || IS_I845G(i915))
+ ring->effective_size -= 2 * CACHELINE_BYTES;
+
+ intel_ring_update_space(ring);
+
+ vma = create_ring_vma(engine->gt->ggtt, size);
+ if (IS_ERR(vma)) {
+ kfree(ring);
+ return ERR_CAST(vma);
+ }
+ ring->vma = vma;
+
+ return ring;
+}
+
+void intel_ring_free(struct kref *ref)
+{
+ struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
+
+ i915_vma_put(ring->vma);
+ kfree(ring);
+}
+
+static noinline int
+wait_for_space(struct intel_ring *ring,
+ struct intel_timeline *tl,
+ unsigned int bytes)
+{
+ struct i915_request *target;
+ long timeout;
+
+ if (intel_ring_update_space(ring) >= bytes)
+ return 0;
+
+ GEM_BUG_ON(list_empty(&tl->requests));
+ list_for_each_entry(target, &tl->requests, link) {
+ if (target->ring != ring)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ if (bytes <= __intel_ring_space(target->postfix,
+ ring->emit, ring->size))
+ break;
+ }
+
+ if (GEM_WARN_ON(&target->link == &tl->requests))
+ return -ENOSPC;
+
+ timeout = i915_request_wait(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ return timeout;
+
+ i915_request_retire_upto(target);
+
+ intel_ring_update_space(ring);
+ GEM_BUG_ON(ring->space < bytes);
+ return 0;
+}
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
+{
+ struct intel_ring *ring = rq->ring;
+ const unsigned int remain_usable = ring->effective_size - ring->emit;
+ const unsigned int bytes = num_dwords * sizeof(u32);
+ unsigned int need_wrap = 0;
+ unsigned int total_bytes;
+ u32 *cs;
+
+ /* Packets must be qword aligned. */
+ GEM_BUG_ON(num_dwords & 1);
+
+ total_bytes = bytes + rq->reserved_space;
+ GEM_BUG_ON(total_bytes > ring->effective_size);
+
+ if (unlikely(total_bytes > remain_usable)) {
+ const int remain_actual = ring->size - ring->emit;
+
+ if (bytes > remain_usable) {
+ /*
+ * Not enough space for the basic request. So need to
+ * flush out the remainder and then wait for
+ * base + reserved.
+ */
+ total_bytes += remain_actual;
+ need_wrap = remain_actual | 1;
+ } else {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate
+ * wrap and only need to effectively wait for the
+ * reserved size from the start of ringbuffer.
+ */
+ total_bytes = rq->reserved_space + remain_actual;
+ }
+ }
+
+ if (unlikely(total_bytes > ring->space)) {
+ int ret;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the
+ * request, as that cannot be allowed to fail. During request
+ * finalisation, reserved_space is set to 0 to stop the
+ * overallocation and the assumption is that then we never need
+ * to wait (which has the risk of failing with EINTR).
+ *
+ * See also i915_request_alloc() and i915_request_add().
+ */
+ GEM_BUG_ON(!rq->reserved_space);
+
+ ret = wait_for_space(ring,
+ i915_request_timeline(rq),
+ total_bytes);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+ }
+
+ if (unlikely(need_wrap)) {
+ need_wrap &= ~1;
+ GEM_BUG_ON(need_wrap > ring->space);
+ GEM_BUG_ON(ring->emit + need_wrap > ring->size);
+ GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
+
+ /* Fill the tail with MI_NOOP */
+ memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
+ ring->space -= need_wrap;
+ ring->emit = 0;
+ }
+
+ GEM_BUG_ON(ring->emit > ring->size - bytes);
+ GEM_BUG_ON(ring->space < bytes);
+ cs = ring->vaddr + ring->emit;
+ GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
+ ring->emit += bytes;
+ ring->space -= bytes;
+
+ return cs;
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct i915_request *rq)
+{
+ int num_dwords;
+ void *cs;
+
+ num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
+ if (num_dwords == 0)
+ return 0;
+
+ num_dwords = CACHELINE_DWORDS - num_dwords;
+ GEM_BUG_ON(num_dwords & 1);
+
+ cs = intel_ring_begin(rq, num_dwords);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
+ intel_ring_advance(rq, cs + num_dwords);
+
+ GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
new file mode 100644
index 000000000000..ea2839d9e044
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -0,0 +1,131 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_H
+#define INTEL_RING_H
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "i915_request.h"
+#include "intel_ring_types.h"
+
+struct intel_engine_cs;
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
+int intel_ring_cacheline_align(struct i915_request *rq);
+
+unsigned int intel_ring_update_space(struct intel_ring *ring);
+
+int intel_ring_pin(struct intel_ring *ring);
+void intel_ring_unpin(struct intel_ring *ring);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+
+void intel_ring_free(struct kref *ref);
+
+static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
+{
+ kref_get(&ring->ref);
+ return ring;
+}
+
+static inline void intel_ring_put(struct intel_ring *ring)
+{
+ kref_put(&ring->ref, intel_ring_free);
+}
+
+static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
+{
+ /* Dummy function.
+ *
+ * This serves as a placeholder in the code so that the reader
+ * can compare against the preceding intel_ring_begin() and
+ * check that the number of dwords emitted matches the space
+ * reserved for the command packet (i.e. the value passed to
+ * intel_ring_begin()).
+ */
+ GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
+}
+
+static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+ return pos & (ring->size - 1);
+}
+
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+ unsigned int pos)
+{
+ if (pos & -ring->size) /* must be strictly within the ring */
+ return false;
+
+ if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+ return false;
+
+ return true;
+}
+
+static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
+{
+ /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
+ u32 offset = addr - rq->ring->vaddr;
+ GEM_BUG_ON(offset > rq->ring->size);
+ return intel_ring_wrap(rq->ring, offset);
+}
+
+static inline void
+assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
+{
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
+ /*
+ * "Ring Buffer Use"
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
+ * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
+ * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ *
+ * We use ring->head as the last known location of the actual RING_HEAD,
+ * it may have advanced but in the worst case it is equally the same
+ * as ring->head and so we should never program RING_TAIL to advance
+ * into the same cacheline as ring->head.
+ */
+#define cacheline(a) round_down(a, CACHELINE_BYTES)
+ GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
+ tail < ring->head);
+#undef cacheline
+}
+
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+ /* Whilst writes to the tail are strictly order, there is no
+ * serialisation between readers and the writers. The tail may be
+ * read by i915_request_retire() just as it is being updated
+ * by execlists, as although the breadcrumb is complete, the context
+ * switch hasn't been seen.
+ */
+ assert_ring_tail_valid(ring, tail);
+ ring->tail = tail;
+ return tail;
+}
+
+static inline unsigned int
+__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
+{
+ /*
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+ GEM_BUG_ON(!is_power_of_2(size));
+ return (head - tail - CACHELINE_BYTES) & (size - 1);
+}
+
+#endif /* INTEL_RING_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index bacaa7bb8c9a..a47d5a7c32c9 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -40,6 +40,7 @@
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
/* Rough estimate of the typical request size, performing a flush,
@@ -47,16 +48,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
-unsigned int intel_ring_update_space(struct intel_ring *ring)
-{
- unsigned int space;
-
- space = __intel_ring_space(ring->head, ring->emit, ring->size);
-
- ring->space = space;
- return space;
-}
-
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
@@ -322,7 +313,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
- *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -425,7 +417,7 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL);
- *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -439,8 +431,8 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
@@ -459,8 +451,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
@@ -930,6 +922,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
static void i9xx_submit_request(struct i915_request *request)
{
i915_request_submit(request);
+ wmb(); /* paranoid flush writes out of the WCB before mmio */
ENGINE_WRITE(request->engine, RING_TAIL,
intel_ring_set_tail(request->ring, request->tail));
@@ -937,8 +930,8 @@ static void i9xx_submit_request(struct i915_request *request)
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
@@ -960,8 +953,8 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
@@ -1184,167 +1177,9 @@ i915_emit_bb_start(struct i915_request *rq,
return 0;
}
-int intel_ring_pin(struct intel_ring *ring)
-{
- struct i915_vma *vma = ring->vma;
- unsigned int flags;
- void *addr;
- int ret;
-
- if (atomic_fetch_inc(&ring->pin_count))
- return 0;
-
- flags = PIN_GLOBAL;
-
- /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
- if (vma->obj->stolen)
- flags |= PIN_MAPPABLE;
- else
- flags |= PIN_HIGH;
-
- ret = i915_vma_pin(vma, 0, 0, flags);
- if (unlikely(ret))
- goto err_unpin;
-
- if (i915_vma_is_map_and_fenceable(vma))
- addr = (void __force *)i915_vma_pin_iomap(vma);
- else
- addr = i915_gem_object_pin_map(vma->obj,
- i915_coherent_map_type(vma->vm->i915));
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto err_ring;
- }
-
- i915_vma_make_unshrinkable(vma);
-
- GEM_BUG_ON(ring->vaddr);
- ring->vaddr = addr;
-
- return 0;
-
-err_ring:
- i915_vma_unpin(vma);
-err_unpin:
- atomic_dec(&ring->pin_count);
- return ret;
-}
-
-void intel_ring_reset(struct intel_ring *ring, u32 tail)
-{
- tail = intel_ring_wrap(ring, tail);
- ring->tail = tail;
- ring->head = tail;
- ring->emit = tail;
- intel_ring_update_space(ring);
-}
-
-void intel_ring_unpin(struct intel_ring *ring)
-{
- struct i915_vma *vma = ring->vma;
-
- if (!atomic_dec_and_test(&ring->pin_count))
- return;
-
- /* Discard any unused bytes beyond that submitted to hw. */
- intel_ring_reset(ring, ring->emit);
-
- i915_vma_unset_ggtt_write(vma);
- if (i915_vma_is_map_and_fenceable(vma))
- i915_vma_unpin_iomap(vma);
- else
- i915_gem_object_unpin_map(vma->obj);
-
- GEM_BUG_ON(!ring->vaddr);
- ring->vaddr = NULL;
-
- i915_vma_unpin(vma);
- i915_vma_make_purgeable(vma);
-}
-
-static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
-{
- struct i915_address_space *vm = &ggtt->vm;
- struct drm_i915_private *i915 = vm->i915;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
-
- obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- /*
- * Mark ring buffers as read-only from GPU side (so no stray overwrites)
- * if supported by the platform's GGTT.
- */
- if (vm->has_read_only)
- i915_gem_object_set_readonly(obj);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma))
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return vma;
-}
-
-struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size)
-{
- struct drm_i915_private *i915 = engine->i915;
- struct intel_ring *ring;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!is_power_of_2(size));
- GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
-
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&ring->ref);
-
- ring->size = size;
- /* Workaround an erratum on the i830 which causes a hang if
- * the TAIL pointer points to within the last 2 cachelines
- * of the buffer.
- */
- ring->effective_size = size;
- if (IS_I830(i915) || IS_I845G(i915))
- ring->effective_size -= 2 * CACHELINE_BYTES;
-
- intel_ring_update_space(ring);
-
- vma = create_ring_vma(engine->gt->ggtt, size);
- if (IS_ERR(vma)) {
- kfree(ring);
- return ERR_CAST(vma);
- }
- ring->vma = vma;
-
- return ring;
-}
-
-void intel_ring_free(struct kref *ref)
-{
- struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
-
- i915_vma_close(ring->vma);
- i915_vma_put(ring->vma);
-
- kfree(ring);
-}
-
static void __ring_context_fini(struct intel_context *ce)
{
- i915_gem_object_put(ce->state->obj);
+ i915_vma_put(ce->state);
}
static void ring_context_destroy(struct kref *ref)
@@ -1609,7 +1444,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct intel_engine_cs *signaller;
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
- for_each_engine(signaller, i915, id) {
+ for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
@@ -1663,7 +1498,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
i915_reg_t last_reg = {}; /* keep gcc quiet */
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
- for_each_engine(signaller, i915, id) {
+ for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
@@ -1676,7 +1511,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ *cs++ = intel_gt_scratch_offset(engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_NOOP;
}
@@ -1741,46 +1576,22 @@ static int remap_l3(struct i915_request *rq)
static int switch_context(struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
- struct i915_address_space *vm = vm_alias(rq->hw_context);
- unsigned int unwind_mm = 0;
- u32 hw_flags = 0;
+ struct intel_context *ce = rq->hw_context;
+ struct i915_address_space *vm = vm_alias(ce);
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
if (vm) {
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- int loops;
-
- /*
- * Baytail takes a little more convincing that it really needs
- * to reload the PD between contexts. It is not just a little
- * longer, as adding more stalls after the load_pd_dir (i.e.
- * adding a long loop around flush_pd_dir) is not as effective
- * as reloading the PD umpteen times. 32 is derived from
- * experimentation (gem_exec_parallel/fds) and has no good
- * explanation.
- */
- loops = 1;
- if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
- loops = 32;
-
- do {
- ret = load_pd_dir(rq, ppgtt);
- if (ret)
- goto err;
- } while (--loops);
-
- if (ppgtt->pd_dirty_engines & engine->mask) {
- unwind_mm = engine->mask;
- ppgtt->pd_dirty_engines &= ~unwind_mm;
- hw_flags = MI_FORCE_RESTORE;
- }
+ ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm));
+ if (ret)
+ return ret;
}
- if (rq->hw_context->state) {
- GEM_BUG_ON(engine->id != RCS0);
+ if (ce->state) {
+ u32 hw_flags;
+
+ GEM_BUG_ON(rq->engine->id != RCS0);
/*
* The kernel context(s) is treated as pure scratch and is not
@@ -1789,22 +1600,25 @@ static int switch_context(struct i915_request *rq)
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
+ hw_flags = 0;
if (i915_gem_context_is_kernel(rq->gem_context))
hw_flags = MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, hw_flags);
if (ret)
- goto err_mm;
+ return ret;
}
if (vm) {
+ struct intel_engine_cs *engine = rq->engine;
+
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
- goto err_mm;
+ return ret;
ret = flush_pd_dir(rq);
if (ret)
- goto err_mm;
+ return ret;
/*
* Not only do we need a full barrier (post-sync write) after
@@ -1816,24 +1630,18 @@ static int switch_context(struct i915_request *rq)
*/
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
- goto err_mm;
+ return ret;
ret = engine->emit_flush(rq, EMIT_FLUSH);
if (ret)
- goto err_mm;
+ return ret;
}
ret = remap_l3(rq);
if (ret)
- goto err_mm;
+ return ret;
return 0;
-
-err_mm:
- if (unwind_mm)
- i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm;
-err:
- return ret;
}
static int ring_request_alloc(struct i915_request *request)
@@ -1841,7 +1649,7 @@ static int ring_request_alloc(struct i915_request *request)
int ret;
GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
- GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -1863,146 +1671,6 @@ static int ring_request_alloc(struct i915_request *request)
return 0;
}
-static noinline int
-wait_for_space(struct intel_ring *ring,
- struct intel_timeline *tl,
- unsigned int bytes)
-{
- struct i915_request *target;
- long timeout;
-
- if (intel_ring_update_space(ring) >= bytes)
- return 0;
-
- GEM_BUG_ON(list_empty(&tl->requests));
- list_for_each_entry(target, &tl->requests, link) {
- if (target->ring != ring)
- continue;
-
- /* Would completion of this request free enough space? */
- if (bytes <= __intel_ring_space(target->postfix,
- ring->emit, ring->size))
- break;
- }
-
- if (GEM_WARN_ON(&target->link == &tl->requests))
- return -ENOSPC;
-
- timeout = i915_request_wait(target,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (timeout < 0)
- return timeout;
-
- i915_request_retire_upto(target);
-
- intel_ring_update_space(ring);
- GEM_BUG_ON(ring->space < bytes);
- return 0;
-}
-
-u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
-{
- struct intel_ring *ring = rq->ring;
- const unsigned int remain_usable = ring->effective_size - ring->emit;
- const unsigned int bytes = num_dwords * sizeof(u32);
- unsigned int need_wrap = 0;
- unsigned int total_bytes;
- u32 *cs;
-
- /* Packets must be qword aligned. */
- GEM_BUG_ON(num_dwords & 1);
-
- total_bytes = bytes + rq->reserved_space;
- GEM_BUG_ON(total_bytes > ring->effective_size);
-
- if (unlikely(total_bytes > remain_usable)) {
- const int remain_actual = ring->size - ring->emit;
-
- if (bytes > remain_usable) {
- /*
- * Not enough space for the basic request. So need to
- * flush out the remainder and then wait for
- * base + reserved.
- */
- total_bytes += remain_actual;
- need_wrap = remain_actual | 1;
- } else {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So we don't need an immediate
- * wrap and only need to effectively wait for the
- * reserved size from the start of ringbuffer.
- */
- total_bytes = rq->reserved_space + remain_actual;
- }
- }
-
- if (unlikely(total_bytes > ring->space)) {
- int ret;
-
- /*
- * Space is reserved in the ringbuffer for finalising the
- * request, as that cannot be allowed to fail. During request
- * finalisation, reserved_space is set to 0 to stop the
- * overallocation and the assumption is that then we never need
- * to wait (which has the risk of failing with EINTR).
- *
- * See also i915_request_alloc() and i915_request_add().
- */
- GEM_BUG_ON(!rq->reserved_space);
-
- ret = wait_for_space(ring, rq->timeline, total_bytes);
- if (unlikely(ret))
- return ERR_PTR(ret);
- }
-
- if (unlikely(need_wrap)) {
- need_wrap &= ~1;
- GEM_BUG_ON(need_wrap > ring->space);
- GEM_BUG_ON(ring->emit + need_wrap > ring->size);
- GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
-
- /* Fill the tail with MI_NOOP */
- memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
- ring->space -= need_wrap;
- ring->emit = 0;
- }
-
- GEM_BUG_ON(ring->emit > ring->size - bytes);
- GEM_BUG_ON(ring->space < bytes);
- cs = ring->vaddr + ring->emit;
- GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
- ring->emit += bytes;
- ring->space -= bytes;
-
- return cs;
-}
-
-/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct i915_request *rq)
-{
- int num_dwords;
- void *cs;
-
- num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
- if (num_dwords == 0)
- return 0;
-
- num_dwords = CACHELINE_DWORDS - num_dwords;
- GEM_BUG_ON(num_dwords & 1);
-
- cs = intel_ring_begin(rq, num_dwords);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
- intel_ring_advance(rq, cs);
-
- GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
- return 0;
-}
-
static void gen6_bsd_submit_request(struct i915_request *request)
{
struct intel_uncore *uncore = request->engine->uncore;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
new file mode 100644
index 000000000000..d9f17f38e0cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -0,0 +1,51 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_TYPES_H
+#define INTEL_RING_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+/*
+ * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+
+struct i915_vma;
+
+struct intel_ring {
+ struct kref ref;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ /*
+ * As we have two types of rings, one global to the engine used
+ * by ringbuffer submission and those that are exclusive to a
+ * context used by execlists, we have to play safe and allow
+ * atomic updates to the pin_count. However, the actual pinning
+ * of the context is either done during initialisation for
+ * ringbuffer submission or serialised as part of the context
+ * pinning for execlists, and so we do not need a mutex ourselves
+ * to serialise intel_ring_pin/intel_ring_unpin.
+ */
+ atomic_t pin_count;
+
+ u32 head;
+ u32 tail;
+ u32 emit;
+
+ u32 space;
+ u32 size;
+ u32 effective_size;
+};
+
+#endif /* INTEL_RING_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
new file mode 100644
index 000000000000..20d6ee148afc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -0,0 +1,1872 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_rps.h"
+#include "intel_sideband.h"
+#include "../../../platform/x86/intel_ips.h"
+
+/*
+ * Lock protecting IPS related data structures
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+static struct intel_gt *rps_to_gt(struct intel_rps *rps)
+{
+ return container_of(rps, struct intel_gt, rps);
+}
+
+static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->i915;
+}
+
+static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->uncore;
+}
+
+static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
+{
+ return mask & ~rps->pm_intrmsk_mbz;
+}
+
+static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
+{
+ u32 mask = 0;
+
+ /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
+ if (val > rps->min_freq_softlimit)
+ mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ if (val < rps->max_freq_softlimit)
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+
+ mask &= rps->pm_events;
+
+ return rps_pm_sanitize_mask(rps, ~mask);
+}
+
+static void rps_reset_ei(struct intel_rps *rps)
+{
+ memset(&rps->ei, 0, sizeof(rps->ei));
+}
+
+static void rps_enable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ rps_reset_ei(rps);
+
+ if (IS_VALLEYVIEW(gt->i915))
+ /* WaGsvRC0ResidencyMethod:vlv */
+ rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ else
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_enable_irq(gt, rps->pm_events);
+ spin_unlock_irq(&gt->irq_lock);
+
+ intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
+ rps_pm_mask(rps, rps->cur_freq));
+}
+
+static void gen6_rps_reset_interrupts(struct intel_rps *rps)
+{
+ gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
+}
+
+static void gen11_rps_reset_interrupts(struct intel_rps *rps)
+{
+ while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
+ ;
+}
+
+static void rps_reset_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (INTEL_GEN(gt->i915) >= 11)
+ gen11_rps_reset_interrupts(rps);
+ else
+ gen6_rps_reset_interrupts(rps);
+
+ rps->pm_iir = 0;
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void rps_disable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ rps->pm_events = 0;
+
+ intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
+ rps_pm_sanitize_mask(rps, ~0u));
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&gt->irq_lock);
+
+ intel_synchronize_irq(gt->i915);
+
+ /*
+ * Now that we will not be generating any more work, flush any
+ * outstanding tasks. As we are called on the RPS idle path,
+ * we will reset the GPU to minimum frequencies, so the current
+ * state of the worker can be discarded.
+ */
+ cancel_work_sync(&rps->work);
+
+ rps_reset_interrupts(rps);
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+static void gen5_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fmax, fmin, fstart;
+ u32 rgvmodectl;
+ int c_m, i;
+
+ if (i915->fsb_freq <= 3200)
+ c_m = 0;
+ else if (i915->fsb_freq <= 4800)
+ c_m = 1;
+ else
+ c_m = 2;
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
+ rps->ips.m = cparams[i].m;
+ rps->ips.c = cparams[i].c;
+ break;
+ }
+ }
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ rps->min_freq = fmax;
+ rps->max_freq = fmin;
+
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
+}
+
+static unsigned long
+__ips_chipset_val(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ unsigned long now = jiffies_to_msecs(jiffies), dt;
+ unsigned long result;
+ u64 total, delta;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ /*
+ * Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ dt = now - ips->last_time1;
+ if (dt <= 10)
+ return ips->chipset_power;
+
+ /* FIXME: handle per-counter overflow */
+ total = intel_uncore_read(uncore, DMIEC);
+ total += intel_uncore_read(uncore, DDREC);
+ total += intel_uncore_read(uncore, CSIEC);
+
+ delta = total - ips->last_count1;
+
+ result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
+
+ ips->last_count1 = total;
+ ips->last_time1 = now;
+
+ ips->chipset_power = result;
+
+ return result;
+}
+
+static unsigned long ips_mch_val(struct intel_uncore *uncore)
+{
+ unsigned int m, x, b;
+ u32 tsfs;
+
+ tsfs = intel_uncore_read(uncore, TSFS);
+ x = intel_uncore_read8(uncore, TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+ m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
+
+ return m * x / 127 - b;
+}
+
+static int _pxvid_to_vd(u8 pxvid)
+{
+ if (pxvid == 0)
+ return 0;
+
+ if (pxvid >= 8 && pxvid < 31)
+ pxvid = 31;
+
+ return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
+{
+ const int vd = _pxvid_to_vd(pxvid);
+
+ if (INTEL_INFO(i915)->is_mobile)
+ return max(vd - 1125, 0);
+
+ return vd;
+}
+
+static void __gen5_ips_update(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ u64 now, delta, dt;
+ u32 count;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ now = ktime_get_raw_ns();
+ dt = now - ips->last_time2;
+ do_div(dt, NSEC_PER_MSEC);
+
+ /* Don't divide by 0 */
+ if (dt <= 10)
+ return;
+
+ count = intel_uncore_read(uncore, GFXEC);
+ delta = count - ips->last_count2;
+
+ ips->last_count2 = count;
+ ips->last_time2 = now;
+
+ /* More magic constants... */
+ ips->gfx_power = div_u64(delta * 1181, dt * 10);
+}
+
+static void gen5_rps_update(struct intel_rps *rps)
+{
+ spin_lock_irq(&mchdev_lock);
+ __gen5_ips_update(&rps->ips);
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ /* Invert the frequency bin into an ips delay */
+ val = rps->max_freq - val;
+ val = rps->min_freq + val;
+
+ rgvswctl =
+ (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) |
+ MEMCTL_SFCAVM;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+ intel_uncore_posting_read16(uncore, MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ return div * 133333 / (pre << post);
+}
+
+static unsigned int init_emon(struct intel_uncore *uncore)
+{
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ intel_uncore_write(uncore, ECR, 0);
+ intel_uncore_posting_read(uncore, ECR);
+
+ /* Program energy weights for various events */
+ intel_uncore_write(uncore, SDEW, 0x15040d00);
+ intel_uncore_write(uncore, CSIEW0, 0x007f0000);
+ intel_uncore_write(uncore, CSIEW1, 0x1e220004);
+ intel_uncore_write(uncore, CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ intel_uncore_write(uncore, PEW(i), 0);
+ for (i = 0; i < 3; i++)
+ intel_uncore_write(uncore, DEW(i), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
+ unsigned int freq = intel_pxfreq(pxvidfreq);
+ unsigned int vid =
+ (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+ unsigned int val;
+
+ val = vid * vid * freq / 1000 * 255;
+ val /= 127 * 127 * 900;
+
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ intel_uncore_write(uncore, PXW(i),
+ pxw[i * 4 + 0] << 24 |
+ pxw[i * 4 + 1] << 16 |
+ pxw[i * 4 + 2] << 8 |
+ pxw[i * 4 + 3] << 0);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ intel_uncore_write(uncore, OGW0, 0);
+ intel_uncore_write(uncore, OGW1, 0);
+ intel_uncore_write(uncore, EG0, 0x00007f00);
+ intel_uncore_write(uncore, EG1, 0x0000000e);
+ intel_uncore_write(uncore, EG2, 0x000e0000);
+ intel_uncore_write(uncore, EG3, 0x68000300);
+ intel_uncore_write(uncore, EG4, 0x42000000);
+ intel_uncore_write(uncore, EG5, 0x00140031);
+ intel_uncore_write(uncore, EG6, 0);
+ intel_uncore_write(uncore, EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ intel_uncore_write(uncore, PXWL(i), 0);
+
+ /* Enable PMON + select events */
+ intel_uncore_write(uncore, ECR, 0x80000019);
+
+ return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
+}
+
+static bool gen5_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fstart, vstart;
+ u32 rgvmodectl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Enable temp reporting */
+ intel_uncore_write16(uncore, PMMISC,
+ intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
+ intel_uncore_write16(uncore, TSC1,
+ intel_uncore_read16(uncore, TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ intel_uncore_write(uncore, RCUPEI, 100000);
+ intel_uncore_write(uncore, RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ intel_uncore_write(uncore, RCBMAXAVG, 90000);
+ intel_uncore_write(uncore, RCBMINAVG, 80000);
+
+ intel_uncore_write(uncore, MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
+ PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+
+ intel_uncore_write(uncore,
+ MEMINTREN,
+ MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ intel_uncore_write(uncore, VIDSTART, vstart);
+ intel_uncore_posting_read(uncore, VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
+
+ if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
+ MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ mdelay(1);
+
+ gen5_rps_set(rps, rps->cur_freq);
+
+ rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
+ rps->ips.last_time1 = jiffies_to_msecs(jiffies);
+
+ rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
+ rps->ips.last_time2 = ktime_get_raw_ns();
+
+ spin_unlock_irq(&mchdev_lock);
+
+ rps->ips.corr = init_emon(uncore);
+
+ return true;
+}
+
+static void gen5_rps_disable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ intel_uncore_write(uncore, MEMINTREN,
+ intel_uncore_read(uncore, MEMINTREN) &
+ ~MEMINT_EVAL_CHG_EN);
+ intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ intel_uncore_write(uncore, DEIER,
+ intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIMR,
+ intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ gen5_rps_set(rps, rps->idle_freq);
+ mdelay(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
+ mdelay(1);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static u32 rps_limits(struct intel_rps *rps, u8 val)
+{
+ u32 limits;
+
+ /*
+ * Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt.
+ */
+ if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
+ limits = rps->max_freq_softlimit << 23;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 14;
+ } else {
+ limits = rps->max_freq_softlimit << 24;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 16;
+ }
+
+ return limits;
+}
+
+static void rps_set_power(struct intel_rps *rps, int new_power)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 threshold_up = 0, threshold_down = 0; /* in % */
+ u32 ei_up = 0, ei_down = 0;
+
+ lockdep_assert_held(&rps->power.mutex);
+
+ if (new_power == rps->power.mode)
+ return;
+
+ /* Note the units here are not exactly 1us, but 1280ns. */
+ switch (new_power) {
+ case LOW_POWER:
+ /* Upclock if more than 95% busy over 16ms */
+ ei_up = 16000;
+ threshold_up = 95;
+
+ /* Downclock if less than 85% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 85;
+ break;
+
+ case BETWEEN:
+ /* Upclock if more than 90% busy over 13ms */
+ ei_up = 13000;
+ threshold_up = 90;
+
+ /* Downclock if less than 75% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 75;
+ break;
+
+ case HIGH_POWER:
+ /* Upclock if more than 85% busy over 10ms */
+ ei_up = 10000;
+ threshold_up = 85;
+
+ /* Downclock if less than 60% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 60;
+ break;
+ }
+
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(i915))
+ goto skip_hw_write;
+
+ intel_uncore_write(uncore, GEN6_RP_UP_EI,
+ GT_INTERVAL_FROM_US(i915, ei_up));
+ intel_uncore_write(uncore, GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915,
+ ei_up * threshold_up / 100));
+
+ intel_uncore_write(uncore, GEN6_RP_DOWN_EI,
+ GT_INTERVAL_FROM_US(i915, ei_down));
+ intel_uncore_write(uncore, GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915,
+ ei_down * threshold_down / 100));
+
+ intel_uncore_write(uncore, GEN6_RP_CONTROL,
+ (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+skip_hw_write:
+ rps->power.mode = new_power;
+ rps->power.up_threshold = threshold_up;
+ rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
+{
+ int new_power;
+
+ new_power = rps->power.mode;
+ switch (rps->power.mode) {
+ case LOW_POWER:
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
+ new_power = LOW_POWER;
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val <= rps->min_freq_softlimit)
+ new_power = LOW_POWER;
+ if (val >= rps->max_freq_softlimit)
+ new_power = HIGH_POWER;
+
+ mutex_lock(&rps->power.mutex);
+ if (rps->power.interactive)
+ new_power = HIGH_POWER;
+ rps_set_power(rps, new_power);
+ mutex_unlock(&rps->power.mutex);
+}
+
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
+{
+ mutex_lock(&rps->power.mutex);
+ if (interactive) {
+ if (!rps->power.interactive++ && rps->active)
+ rps_set_power(rps, HIGH_POWER);
+ } else {
+ GEM_BUG_ON(!rps->power.interactive);
+ rps->power.interactive--;
+ }
+ mutex_unlock(&rps->power.mutex);
+}
+
+static int gen6_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 swreq;
+
+ if (INTEL_GEN(i915) >= 9)
+ swreq = GEN9_FREQUENCY(val);
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ swreq = HSW_FREQUENCY(val);
+ else
+ swreq = (GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ intel_uncore_write(uncore, GEN6_RPNSWREQ, swreq);
+
+ return 0;
+}
+
+static int vlv_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ vlv_punit_get(i915);
+ err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_punit_put(i915);
+
+ return err;
+}
+
+static int rps_set(struct intel_rps *rps, u8 val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ if (INTEL_GEN(i915) < 6)
+ return 0;
+
+ if (val == rps->last_freq)
+ return 0;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ err = vlv_rps_set(rps, val);
+ else
+ err = gen6_rps_set(rps, val);
+ if (err)
+ return err;
+
+ gen6_rps_set_thresholds(rps, val);
+ rps->last_freq = val;
+
+ return 0;
+}
+
+void intel_rps_unpark(struct intel_rps *rps)
+{
+ u8 freq;
+
+ if (!rps->enabled)
+ return;
+
+ /*
+ * Use the user's desired frequency as a guide, but for better
+ * performance, jump directly to RPe as our starting frequency.
+ */
+ mutex_lock(&rps->lock);
+ rps->active = true;
+ freq = max(rps->cur_freq, rps->efficient_freq),
+ freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
+ intel_rps_set(rps, freq);
+ rps->last_adj = 0;
+ mutex_unlock(&rps->lock);
+
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+ rps_enable_interrupts(rps);
+
+ if (IS_GEN(rps_to_i915(rps), 5))
+ gen5_rps_update(rps);
+}
+
+void intel_rps_park(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (!rps->enabled)
+ return;
+
+ if (INTEL_GEN(i915) >= 6)
+ rps_disable_interrupts(rps);
+
+ rps->active = false;
+ if (rps->last_freq <= rps->idle_freq)
+ return;
+
+ /*
+ * The punit delays the write of the frequency and voltage until it
+ * determines the GPU is awake. During normal usage we don't want to
+ * waste power changing the frequency if the GPU is sleeping (rc6).
+ * However, the GPU and driver is now idle and we do not want to delay
+ * switching to minimum voltage (reducing power whilst idle) as we do
+ * not expect to be woken in the near future and so must flush the
+ * change by waking the device.
+ *
+ * We choose to take the media powerwell (either would do to trick the
+ * punit into committing the voltage change) as that takes a lot less
+ * power than the render powerwell.
+ */
+ intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+ rps_set(rps, rps->idle_freq);
+ intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+}
+
+void intel_rps_boost(struct i915_request *rq)
+{
+ struct intel_rps *rps = &rq->engine->gt->rps;
+ unsigned long flags;
+
+ if (i915_request_signaled(rq) || !rps->active)
+ return;
+
+ /* Serializes with i915_request_retire() */
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!i915_request_has_waitboost(rq) &&
+ !dma_fence_is_signaled_locked(&rq->fence)) {
+ rq->flags |= I915_REQUEST_WAITBOOST;
+
+ if (!atomic_fetch_inc(&rps->num_waiters) &&
+ READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ schedule_work(&rps->work);
+
+ atomic_inc(&rps->boosts);
+ }
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+int intel_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err = 0;
+
+ lockdep_assert_held(&rps->lock);
+ GEM_BUG_ON(val > rps->max_freq);
+ GEM_BUG_ON(val < rps->min_freq);
+
+ if (rps->active) {
+ err = rps_set(rps, val);
+
+ /*
+ * Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_write(uncore, GEN6_RP_INTERRUPT_LIMITS,
+ rps_limits(rps, val));
+
+ intel_uncore_write(uncore, GEN6_PMINTRMSK,
+ rps_pm_mask(rps, val));
+ }
+ }
+
+ if (err == 0)
+ rps->cur_freq = val;
+
+ return err;
+}
+
+static void gen6_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* All of these values are in units of 50MHz */
+
+ /* static values from HW: RP0 > RP1 > RPn (min_freq) */
+ if (IS_GEN9_LP(i915)) {
+ u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+
+ rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 0) & 0xff;
+ } else {
+ u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+
+ rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 16) & 0xff;
+ }
+
+ /* hw_max = RP0 until we check for overclocking */
+ rps->max_freq = rps->rp0_freq;
+
+ rps->efficient_freq = rps->rp1_freq;
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+ IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ u32 ddcc_status = 0;
+
+ if (sandybridge_pcode_read(i915,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ &ddcc_status, NULL) == 0)
+ rps->efficient_freq =
+ clamp_t(u8,
+ (ddcc_status >> 8) & 0xff,
+ rps->min_freq,
+ rps->max_freq);
+ }
+
+ if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ * the natural hardware unit for SKL
+ */
+ rps->rp0_freq *= GEN9_FREQ_SCALER;
+ rps->rp1_freq *= GEN9_FREQ_SCALER;
+ rps->min_freq *= GEN9_FREQ_SCALER;
+ rps->max_freq *= GEN9_FREQ_SCALER;
+ rps->efficient_freq *= GEN9_FREQ_SCALER;
+ }
+}
+
+static bool rps_reset(struct intel_rps *rps)
+{
+ /* force a reset */
+ rps->power.mode = -1;
+ rps->last_freq = -1;
+
+ if (rps_set(rps, rps->min_freq)) {
+ DRM_ERROR("Failed to reset RPS to initial values\n");
+ return false;
+ }
+
+ rps->cur_freq = rps->min_freq;
+ return true;
+}
+
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
+static bool gen9_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* Program defaults and thresholds for RPS */
+ if (IS_GEN(i915, 9))
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(rps->rp1_freq));
+
+ /* 1 second timeout */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+ GT_INTERVAL_FROM_US(i915, 1000000));
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
+
+ return rps_reset(rps);
+}
+
+static bool gen8_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ HSW_FREQUENCY(rps->rp1_freq));
+
+ /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+ 100000000 / 128); /* 1 second timeout */
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ return rps_reset(rps);
+}
+
+static bool gen6_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* Power down if completely idle for over 50ms */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ return rps_reset(rps);
+}
+
+static int chv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ switch (RUNTIME_INFO(i915)->sseu.eu_total) {
+ case 8:
+ /* (2 * 4) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
+ break;
+ case 12:
+ /* (2 * 6) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
+ break;
+ case 16:
+ /* (2 * 8) config */
+ default:
+ /* Setting (2 * 8) Min RP0 for any other combination */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
+ break;
+ }
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static int chv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
+ val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
+
+ return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+}
+
+static int chv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static u32 chv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
+ val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static bool chv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ /* 1: Program defaults and thresholds for RPS*/
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* 2: Enable RPS */
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ /* Setting Fixed Bias */
+ vlv_punit_get(i915);
+
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static int vlv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp1;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
+ rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+ return rp1;
+}
+
+static int vlv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp0;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+ /* Clamp to max */
+ rp0 = min_t(u32, rp0, 0xea);
+
+ return rp0;
+}
+
+static int vlv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rpe;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+ return rpe;
+}
+
+static int vlv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
+ /*
+ * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
+ * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
+ * a BYT-M B0 the above register contains 0xbf. Moreover when setting
+ * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
+ * to make sure it matches what Punit accepts.
+ */
+ return max_t(u32, val, 0xc0);
+}
+
+static bool vlv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ vlv_punit_get(i915);
+
+ /* Setting Fixed Bias */
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static unsigned long __ips_gfx_val(struct intel_ips *ips)
+{
+ struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
+
+ state1 = ext_v;
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ t = ips_mch_val(uncore);
+ if (t > 80)
+ corr = t * 2349 + 135940;
+ else if (t >= 50)
+ corr = t * 964 + 29317;
+ else /* < 50 */
+ corr = t * 301 + 1004;
+
+ corr = corr * 150142 * state1 / 10000 - 78642;
+ corr /= 100000;
+ corr2 = corr * ips->corr;
+
+ state2 = corr2 * state1 / 10000;
+ state2 /= 100; /* convert to mW */
+
+ __gen5_ips_update(ips);
+
+ return ips->gfx_power + state2;
+}
+
+void intel_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (IS_CHERRYVIEW(i915))
+ rps->enabled = chv_rps_enable(rps);
+ else if (IS_VALLEYVIEW(i915))
+ rps->enabled = vlv_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 9)
+ rps->enabled = gen9_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 8)
+ rps->enabled = gen8_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ rps->enabled = gen6_rps_enable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ rps->enabled = gen5_rps_enable(rps);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ if (!rps->enabled)
+ return;
+
+ WARN_ON(rps->max_freq < rps->min_freq);
+ WARN_ON(rps->idle_freq > rps->max_freq);
+
+ WARN_ON(rps->efficient_freq < rps->min_freq);
+ WARN_ON(rps->efficient_freq > rps->max_freq);
+}
+
+static void gen6_rps_disable(struct intel_rps *rps)
+{
+ intel_uncore_write(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
+}
+
+void intel_rps_disable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ rps->enabled = false;
+
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_disable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_disable(rps);
+}
+
+static int byt_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val - 0xb7
+ * Slow = Fast = GPLL ref * N
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
+}
+
+static int byt_freq_opcode(struct intel_rps *rps, int val)
+{
+ return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
+}
+
+static int chv_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val / 2
+ * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
+}
+
+static int chv_freq_opcode(struct intel_rps *rps, int val)
+{
+ /* CHV needs even values */
+ return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
+}
+
+int intel_gpu_freq(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (INTEL_GEN(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
+ GEN9_FREQ_SCALER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_gpu_freq(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_gpu_freq(rps, val);
+ else
+ return val * GT_FREQUENCY_MULTIPLIER;
+}
+
+int intel_freq_opcode(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (INTEL_GEN(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
+ GT_FREQUENCY_MULTIPLIER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_freq_opcode(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_freq_opcode(rps, val);
+ else
+ return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
+}
+
+static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ rps->gpll_ref_freq =
+ vlv_get_cck_clock(i915, "GPLL ref",
+ CCK_GPLL_CLOCK_CONTROL,
+ i915->czclk_freq);
+
+ DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
+}
+
+static void vlv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ i915->mem_freq = 800;
+ break;
+ case 2:
+ i915->mem_freq = 1066;
+ break;
+ case 3:
+ i915->mem_freq = 1333;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = vlv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq),
+ rps->max_freq);
+
+ rps->efficient_freq = vlv_rps_rpe_freq(rps);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq),
+ rps->efficient_freq);
+
+ rps->rp1_freq = vlv_rps_guar_freq(rps);
+ DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq),
+ rps->rp1_freq);
+
+ rps->min_freq = vlv_rps_min_freq(rps);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+}
+
+static void chv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_cck_read(i915, CCK_FUSE_REG);
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ i915->mem_freq = 2000;
+ break;
+ default:
+ i915->mem_freq = 1600;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = chv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq),
+ rps->max_freq);
+
+ rps->efficient_freq = chv_rps_rpe_freq(rps);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq),
+ rps->efficient_freq);
+
+ rps->rp1_freq = chv_rps_guar_freq(rps);
+ DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq),
+ rps->rp1_freq);
+
+ rps->min_freq = chv_rps_min_freq(rps);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
+ rps->min_freq) & 1,
+ "Odd GPU freq values\n");
+}
+
+static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
+{
+ ei->ktime = ktime_get_raw();
+ ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
+ ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
+}
+
+static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ const struct intel_rps_ei *prev = &rps->ei;
+ struct intel_rps_ei now;
+ u32 events = 0;
+
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+ return 0;
+
+ vlv_c0_read(uncore, &now);
+
+ if (prev->ktime) {
+ u64 time, c0;
+ u32 render, media;
+
+ time = ktime_us_delta(now.ktime, prev->ktime);
+
+ time *= rps_to_i915(rps)->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ render = now.render_c0 - prev->render_c0;
+ media = now.media_c0 - prev->media_c0;
+ c0 = max(render, media);
+ c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
+
+ if (c0 > time * rps->power.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * rps->power.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
+ }
+
+ rps->ei = now;
+ return events;
+}
+
+static void rps_work(struct work_struct *work)
+{
+ struct intel_rps *rps = container_of(work, typeof(*rps), work);
+ struct intel_gt *gt = rps_to_gt(rps);
+ bool client_boost = false;
+ int new_freq, adj, min, max;
+ u32 pm_iir = 0;
+
+ spin_lock_irq(&gt->irq_lock);
+ pm_iir = fetch_and_zero(&rps->pm_iir);
+ client_boost = atomic_read(&rps->num_waiters);
+ spin_unlock_irq(&gt->irq_lock);
+
+ /* Make sure we didn't queue anything we're not going to process. */
+ if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ goto out;
+
+ mutex_lock(&rps->lock);
+
+ pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
+
+ adj = rps->last_adj;
+ new_freq = rps->cur_freq;
+ min = rps->min_freq_softlimit;
+ max = rps->max_freq_softlimit;
+ if (client_boost)
+ max = rps->max_freq;
+ if (client_boost && new_freq < rps->boost_freq) {
+ new_freq = rps->boost_freq;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
+
+ if (new_freq >= rps->max_freq_softlimit)
+ adj = 0;
+ } else if (client_boost) {
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+ if (rps->cur_freq > rps->efficient_freq)
+ new_freq = rps->efficient_freq;
+ else if (rps->cur_freq > rps->min_freq_softlimit)
+ new_freq = rps->min_freq_softlimit;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+ if (adj < 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
+
+ if (new_freq <= rps->min_freq_softlimit)
+ adj = 0;
+ } else { /* unknown event */
+ adj = 0;
+ }
+
+ rps->last_adj = adj;
+
+ /*
+ * Limit deboosting and boosting to keep ourselves at the extremes
+ * when in the respective power modes (i.e. slowly decrease frequencies
+ * while in the HIGH_POWER zone and slowly increase frequencies while
+ * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+ * to the next level quickly, and conversely if busy we expect to
+ * hit a waitboost and rapidly switch into max power.
+ */
+ if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+ (adj > 0 && rps->power.mode == LOW_POWER))
+ rps->last_adj = 0;
+
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ new_freq += adj;
+ new_freq = clamp_t(int, new_freq, min, max);
+
+ if (intel_rps_set(rps, new_freq)) {
+ DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
+ rps->last_adj = 0;
+ }
+
+ mutex_unlock(&rps->lock);
+
+out:
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_unmask_irq(gt, rps->pm_events);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ const u32 events = rps->pm_events & pm_iir;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ if (unlikely(!events))
+ return;
+
+ gen6_gt_pm_mask_irq(gt, events);
+
+ rps->pm_iir |= events;
+ schedule_work(&rps->work);
+}
+
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ if (pm_iir & rps->pm_events) {
+ spin_lock(&gt->irq_lock);
+ gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
+ rps->pm_iir |= pm_iir & rps->pm_events;
+ schedule_work(&rps->work);
+ spin_unlock(&gt->irq_lock);
+ }
+
+ if (INTEL_GEN(gt->i915) >= 8)
+ return;
+
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine[VECS0]);
+
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
+}
+
+void gen5_rps_irq_handler(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_freq;
+
+ spin_lock(&mchdev_lock);
+
+ intel_uncore_write16(uncore,
+ MEMINTRSTS,
+ intel_uncore_read(uncore, MEMINTRSTS));
+
+ intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
+ busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
+ max_avg = intel_uncore_read(uncore, RCBMAXAVG);
+ min_avg = intel_uncore_read(uncore, RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ new_freq = rps->cur_freq;
+ if (busy_up > max_avg)
+ new_freq++;
+ else if (busy_down < min_avg)
+ new_freq--;
+ new_freq = clamp(new_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
+
+ if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+ rps->cur_freq = new_freq;
+
+ spin_unlock(&mchdev_lock);
+}
+
+void intel_rps_init_early(struct intel_rps *rps)
+{
+ mutex_init(&rps->lock);
+ mutex_init(&rps->power.mutex);
+
+ INIT_WORK(&rps->work, rps_work);
+
+ atomic_set(&rps->num_waiters, 0);
+}
+
+void intel_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rps_init(rps);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rps_init(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_rps_init(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_init(rps);
+
+ /* Derive initial user preferences/limits from the hardware limits */
+ rps->max_freq_softlimit = rps->max_freq;
+ rps->min_freq_softlimit = rps->min_freq;
+
+ /* After setting max-softlimit, find the overclock max freq */
+ if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
+ u32 params = 0;
+
+ sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
+ &params, NULL);
+ if (params & BIT(31)) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+ (rps->max_freq & 0xff) * 50,
+ (params & 0xff) * 50);
+ rps->max_freq = params & 0xff;
+ }
+ }
+
+ /* Finally allow us to boost to max by default */
+ rps->boost_freq = rps->max_freq;
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
+
+ rps->pm_intrmsk_mbz = 0;
+
+ /*
+ * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_GEN(i915) <= 7)
+ rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_GEN(i915) >= 8)
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+}
+
+u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 cagf;
+
+ if (INTEL_GEN(i915) >= 9)
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+
+ return cagf;
+}
+
+/* External interface for intel_ips.ko */
+
+static struct drm_i915_private __rcu *ips_mchdev;
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_rps_driver_register(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ /*
+ * We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values.
+ */
+ if (IS_GEN(gt->i915, 5)) {
+ rcu_assign_pointer(ips_mchdev, gt->i915);
+ ips_ping_for_i915_load();
+ }
+}
+
+void intel_rps_driver_unregister(struct intel_rps *rps)
+{
+ rcu_assign_pointer(ips_mchdev, NULL);
+}
+
+static struct drm_i915_private *mchdev_get(void)
+{
+ struct drm_i915_private *i915;
+
+ rcu_read_lock();
+ i915 = rcu_dereference(ips_mchdev);
+ if (!kref_get_unless_zero(&i915->drm.ref))
+ i915 = NULL;
+ rcu_read_unlock();
+
+ return i915;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *i915;
+ unsigned long chipset_val = 0;
+ unsigned long graphics_val = 0;
+ intel_wakeref_t wakeref;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ struct intel_ips *ips = &i915->gt.rps.ips;
+
+ spin_lock_irq(&mchdev_lock);
+ chipset_val = __ips_chipset_val(ips);
+ graphics_val = __ips_gfx_val(ips);
+ spin_unlock_irq(&mchdev_lock);
+ }
+
+ drm_dev_put(&i915->drm);
+ return chipset_val + graphics_val;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit < rps->max_freq)
+ rps->max_freq_softlimit++;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit > rps->min_freq)
+ rps->max_freq_softlimit--;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *i915;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ ret = i915->gt.awake;
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ rps->max_freq_softlimit = rps->min_freq;
+ ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
new file mode 100644
index 000000000000..9518c66c9792
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -0,0 +1,38 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_H
+#define INTEL_RPS_H
+
+#include "intel_rps_types.h"
+
+struct i915_request;
+
+void intel_rps_init_early(struct intel_rps *rps);
+void intel_rps_init(struct intel_rps *rps);
+
+void intel_rps_driver_register(struct intel_rps *rps);
+void intel_rps_driver_unregister(struct intel_rps *rps);
+
+void intel_rps_enable(struct intel_rps *rps);
+void intel_rps_disable(struct intel_rps *rps);
+
+void intel_rps_park(struct intel_rps *rps);
+void intel_rps_unpark(struct intel_rps *rps);
+void intel_rps_boost(struct i915_request *rq);
+
+int intel_rps_set(struct intel_rps *rps, u8 val);
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
+
+int intel_gpu_freq(struct intel_rps *rps, int val);
+int intel_freq_opcode(struct intel_rps *rps, int val);
+u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1);
+
+void gen5_rps_irq_handler(struct intel_rps *rps);
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+
+#endif /* INTEL_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
new file mode 100644
index 000000000000..c2e279154bd5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -0,0 +1,93 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_TYPES_H
+#define INTEL_RPS_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_ips {
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ u64 last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c, m;
+};
+
+struct intel_rps_ei {
+ ktime_t ktime;
+ u32 render_c0;
+ u32 media_c0;
+};
+
+struct intel_rps {
+ struct mutex lock; /* protects enabling and the worker */
+
+ /*
+ * work, interrupts_enabled and pm_iir are protected by
+ * dev_priv->irq_lock
+ */
+ struct work_struct work;
+ bool enabled;
+ bool active;
+ u32 pm_iir;
+
+ /* PM interrupt bits that should never be masked */
+ u32 pm_intrmsk_mbz;
+ u32 pm_events;
+
+ /* Frequencies are stored in potentially platform dependent multiples.
+ * In other words, *_freq needs to be multiplied by X to be interesting.
+ * Soft limits are those which are used for the dynamic reclocking done
+ * by the driver (raise frequencies under heavy loads, and lower for
+ * lighter loads). Hard limits are those imposed by the hardware.
+ *
+ * A distinction is made for overclocking, which is never enabled by
+ * default, and is considered to be above the hard limit if it's
+ * possible at all.
+ */
+ u8 cur_freq; /* Current frequency (cached, may not == HW) */
+ u8 last_freq; /* Last SWREQ frequency */
+ u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
+ u8 max_freq_softlimit; /* Max frequency permitted by the driver */
+ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
+ u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 boost_freq; /* Frequency to request when wait boosting */
+ u8 idle_freq; /* Frequency to request when we are idle */
+ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
+ u8 rp1_freq; /* "less than" RP0 power/freqency */
+ u8 rp0_freq; /* Non-overclocked max frequency. */
+ u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
+
+ int last_adj;
+
+ struct {
+ struct mutex mutex;
+
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+ unsigned int interactive;
+
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+ } power;
+
+ atomic_t num_waiters;
+ atomic_t boosts;
+
+ /* manual wa residency calculations */
+ struct intel_rps_ei ei;
+ struct intel_ips ips;
+};
+
+#endif /* INTEL_RPS_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 6bf2d87da109..74f793423231 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -8,6 +8,19 @@
#include "intel_lrc_reg.h"
#include "intel_sseu.h"
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice)
+{
+ sseu->max_slices = max_slices;
+ sseu->max_subslices = max_subslices;
+ sseu->max_eus_per_subslice = max_eus_per_subslice;
+
+ sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE);
+ sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE);
+}
+
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
{
@@ -19,10 +32,32 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
return total;
}
+u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
+{
+ int i, offset = slice * sseu->ss_stride;
+ u32 mask = 0;
+
+ GEM_BUG_ON(slice >= sseu->max_slices);
+
+ for (i = 0; i < sseu->ss_stride; i++)
+ mask |= (u32)sseu->subslice_mask[offset + i] <<
+ i * BITS_PER_BYTE;
+
+ return mask;
+}
+
+void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
+ u32 ss_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride);
+}
+
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
{
- return hweight8(sseu->subslice_mask[slice]);
+ return hweight32(intel_sseu_get_subslices(sseu, slice));
}
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index b50d0401a4e2..d1d225204f09 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -10,15 +10,21 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include "i915_gem.h"
+
struct drm_i915_private;
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
+#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES)
+#define GEN_MAX_EUS (16) /* TGL upper bound */
+#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS)
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SLICES];
+ u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
+ u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -33,11 +39,8 @@ struct sseu_dev_info {
u8 max_subslices;
u8 max_eus_per_subslice;
- /* We don't have more than 8 eus per subslice at the moment and as we
- * store eus enabled using bits, no need to multiply by eus per
- * subslice.
- */
- u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
+ u8 ss_stride;
+ u8 eu_stride;
};
/*
@@ -63,12 +66,34 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
return value;
}
+static inline bool
+intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ u8 mask;
+ int ss_idx = subslice / BITS_PER_BYTE;
+
+ GEM_BUG_ON(ss_idx >= sseu->ss_stride);
+
+ mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx];
+
+ return mask & BIT(subslice % BITS_PER_BYTE);
+}
+
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice);
+
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
+u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
+
+void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
+ u32 ss_mask);
+
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 9cb01d9828f1..14ad10acd548 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -4,13 +4,13 @@
* Copyright © 2016-2018 Intel Corporation
*/
-#include "gt/intel_gt_types.h"
-
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_syncmap.h"
-#include "gt/intel_timeline.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
@@ -136,6 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
kfree(cl);
}
+__i915_active_call
static void __cacheline_retire(struct i915_active *active)
{
struct intel_timeline_cacheline *cl =
@@ -177,8 +178,7 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
- i915_active_init(hwsp->gt->i915, &cl->active,
- __cacheline_active, __cacheline_retire);
+ i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
return cl;
}
@@ -254,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -442,7 +442,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
* free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete.
*/
- err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
+ err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
if (err)
goto err_cacheline;
@@ -493,24 +493,39 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
static int cacheline_ref(struct intel_timeline_cacheline *cl,
struct i915_request *rq)
{
- return i915_active_ref(&cl->active, rq->timeline, rq);
+ return i915_active_add_request(&cl->active, rq);
}
int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *to,
u32 *hwsp)
{
- struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
- struct intel_timeline *tl = from->timeline;
+ struct intel_timeline *tl;
int err;
- GEM_BUG_ON(to->timeline == tl);
+ rcu_read_lock();
+ tl = rcu_dereference(from->timeline);
+ if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+ if (!tl) /* already completed */
+ return 1;
+
+ GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl);
+
+ err = -EBUSY;
+ if (mutex_trylock(&tl->mutex)) {
+ struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
+
+ if (i915_request_completed(from)) {
+ err = 1;
+ goto unlock;
+ }
- mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
- err = i915_request_completed(from);
- if (!err)
err = cacheline_ref(cl, to);
- if (!err) {
+ if (err)
+ goto unlock;
+
if (likely(cl == tl->hwsp_cacheline)) {
*hwsp = tl->hwsp_offset;
} else { /* across a seqno wrap, recover the original offset */
@@ -518,8 +533,11 @@ int intel_timeline_read_hwsp(struct i915_request *from,
ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
CACHELINE_BYTES;
}
+
+unlock:
+ mutex_unlock(&tl->mutex);
}
- mutex_unlock(&tl->mutex);
+ intel_timeline_put(tl);
return err;
}
@@ -541,7 +559,7 @@ void __intel_timeline_free(struct kref *kref)
container_of(kref, typeof(*timeline), kref);
intel_timeline_fini(timeline);
- kfree(timeline);
+ kfree_rcu(timeline, rcu);
}
static void timelines_fini(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 2b1baf2fcc8e..98d9ee166379 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -58,12 +58,13 @@ struct intel_timeline {
*/
struct list_head requests;
- /* Contains an RCU guarded pointer to the last request. No reference is
+ /*
+ * Contains an RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to
- * the request using i915_active_request_get_request_rcu(), or hold the
- * struct_mutex.
+ * the request using i915_active_fence_get(), or manage the RCU
+ * protection themselves (cf the i915_active_fence API).
*/
- struct i915_active_request last_request;
+ struct i915_active_fence last_request;
/**
* We track the most recent seqno that we wait on in every context so
@@ -80,6 +81,7 @@ struct intel_timeline {
struct intel_gt *gt;
struct kref kref;
+ struct rcu_head rcu;
};
#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 5f6ec2fd29a0..e4bccc14602f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gt.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
/**
@@ -567,6 +568,9 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ /* Wa_1409142259:tgl */
+ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
}
static void
@@ -796,11 +800,10 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
slice = fls(sseu->slice_mask) - 1;
- GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
- subslice = fls(l3_en & sseu->subslice_mask[slice]);
+ subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
if (!subslice) {
DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
- sseu->subslice_mask[slice], l3_en);
+ intel_sseu_get_subslices(sseu, slice), l3_en);
subslice = fls(l3_en);
WARN_ON(!subslice);
}
@@ -890,11 +893,27 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+
+ /* Wa_1607087056:icl */
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
}
static void
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
+ /* Wa_1409420604:tgl */
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
+
+ /* Wa_1409180338:tgl */
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
}
static void
@@ -1197,6 +1216,26 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ *
+ * This covers 4 registers which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+ break;
+ default:
+ break;
+ }
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
@@ -1258,6 +1297,26 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ /* Wa_1606700617:tgl */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+
+ /* Wa_1607138336:tgl */
+ wa_write_or(wal,
+ GEN9_CTX_PREEMPT_REG,
+ GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
+
+ /* Wa_1607030317:tgl */
+ /* Wa_1607186500:tgl */
+ /* Wa_1607297627:tgl */
+ wa_masked_en(wal,
+ GEN6_RC_SLEEP_PSMI_CONTROL,
+ GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+ GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+ }
+
if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
@@ -1452,7 +1511,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
- if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff))
+ if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
return true;
return false;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 5d43cbc3f345..83f549d203a0 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -23,6 +23,7 @@
*/
#include "gem/i915_gem_context.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "intel_context.h"
@@ -240,6 +241,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
struct mock_engine *engine;
GEM_BUG_ON(id >= I915_NUM_ENGINES);
+ GEM_BUG_ON(!i915->gt.uncore);
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
if (!engine)
@@ -248,9 +250,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
engine->base.gt = &i915->gt;
+ engine->base.uncore = i915->gt.uncore;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
+ engine->base.legacy_idx = INVALID_ENGINE;
engine->base.instance = id;
engine->base.status_page.addr = (void *)(engine + 1);
@@ -265,6 +269,9 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.reset.finish = mock_reset_finish;
engine->base.cancel_requests = mock_cancel_requests;
+ i915->gt.engine[id] = &engine->base;
+ i915->gt.engine_class[0][id] = &engine->base;
+
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 9d1ea26c7a2d..bc720defc6b8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -14,22 +14,28 @@
static int request_sync(struct i915_request *rq)
{
+ struct intel_timeline *tl = i915_request_timeline(rq);
long timeout;
int err = 0;
+ intel_timeline_get(tl);
i915_request_get(rq);
- i915_request_add(rq);
+ /* Opencode i915_request_add() so we can keep the timeline locked. */
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, NULL);
+
timeout = i915_request_wait(rq, 0, HZ / 10);
- if (timeout < 0) {
+ if (timeout < 0)
err = timeout;
- } else {
- mutex_lock(&rq->timeline->mutex);
+ else
i915_request_retire_upto(rq);
- mutex_unlock(&rq->timeline->mutex);
- }
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+ mutex_unlock(&tl->mutex);
i915_request_put(rq);
+ intel_timeline_put(tl);
return err;
}
@@ -41,24 +47,20 @@ static int context_sync(struct intel_context *ce)
mutex_lock(&tl->mutex);
do {
- struct i915_request *rq;
+ struct dma_fence *fence;
long timeout;
- rcu_read_lock();
- rq = rcu_dereference(tl->last_request.request);
- if (rq)
- rq = i915_request_get_rcu(rq);
- rcu_read_unlock();
- if (!rq)
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
break;
- timeout = i915_request_wait(rq, 0, HZ / 10);
+ timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
if (timeout < 0)
err = timeout;
else
- i915_request_retire_upto(rq);
+ i915_request_retire_upto(to_request(fence));
- i915_request_put(rq);
+ dma_fence_put(fence);
} while (!err);
mutex_unlock(&tl->mutex);
@@ -101,9 +103,6 @@ static int __live_context_size(struct intel_engine_cs *engine,
*
* TLDR; this overlaps with the execlists redzone.
*/
- if (HAS_EXECLISTS(engine->i915))
- vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
-
vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
@@ -153,15 +152,11 @@ static int live_context_size(void *arg)
* HW tries to write past the end of one.
*/
- mutex_lock(&gt->i915->drm.struct_mutex);
-
fixme = kernel_context(gt->i915);
- if (IS_ERR(fixme)) {
- err = PTR_ERR(fixme);
- goto unlock;
- }
+ if (IS_ERR(fixme))
+ return PTR_ERR(fixme);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct {
struct drm_i915_gem_object *state;
void *pinned;
@@ -199,8 +194,6 @@ static int live_context_size(void *arg)
}
kernel_context_close(fixme);
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -303,26 +296,23 @@ static int live_active_context(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
-
fixme = live_context(gt->i915, file);
if (IS_ERR(fixme)) {
err = PTR_ERR(fixme);
- goto unlock;
+ goto out_file;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
err = __live_active_context(engine, fixme);
if (err)
break;
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
+out_file:
mock_file_free(gt->i915, file);
return err;
}
@@ -416,26 +406,23 @@ static int live_remote_context(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
-
fixme = live_context(gt->i915, file);
if (IS_ERR(fixme)) {
err = PTR_ERR(fixme);
- goto unlock;
+ goto out_file;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
err = __live_remote_context(engine, fixme);
if (err)
break;
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
+out_file:
mock_file_free(gt->i915, file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
new file mode 100644
index 000000000000..e864406bd2d9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -0,0 +1,350 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "i915_drv.h"
+
+#include "intel_gt_requests.h"
+#include "i915_selftest.h"
+
+struct pulse {
+ struct i915_active active;
+ struct kref kref;
+};
+
+static int pulse_active(struct i915_active *active)
+{
+ kref_get(&container_of(active, struct pulse, active)->kref);
+ return 0;
+}
+
+static void pulse_free(struct kref *kref)
+{
+ kfree(container_of(kref, struct pulse, kref));
+}
+
+static void pulse_put(struct pulse *p)
+{
+ kref_put(&p->kref, pulse_free);
+}
+
+static void pulse_retire(struct i915_active *active)
+{
+ pulse_put(container_of(active, struct pulse, active));
+}
+
+static struct pulse *pulse_create(void)
+{
+ struct pulse *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return p;
+
+ kref_init(&p->kref);
+ i915_active_init(&p->active, pulse_active, pulse_retire);
+
+ return p;
+}
+
+static void pulse_unlock_wait(struct pulse *p)
+{
+ mutex_lock(&p->active.mutex);
+ mutex_unlock(&p->active.mutex);
+ flush_work(&p->active.work);
+}
+
+static int __live_idle_pulse(struct intel_engine_cs *engine,
+ int (*fn)(struct intel_engine_cs *cs))
+{
+ struct pulse *p;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ p = pulse_create();
+ if (!p)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&p->active);
+ if (err)
+ goto out;
+
+ err = i915_active_acquire_preallocate_barrier(&p->active, engine);
+ if (err) {
+ i915_active_release(&p->active);
+ goto out;
+ }
+
+ i915_active_acquire_barrier(&p->active);
+ i915_active_release(&p->active);
+
+ GEM_BUG_ON(i915_active_is_idle(&p->active));
+ GEM_BUG_ON(llist_empty(&engine->barrier_tasks));
+
+ err = fn(engine);
+ if (err)
+ goto out;
+
+ GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+
+ if (intel_gt_retire_requests_timeout(engine->gt, HZ / 5)) {
+ err = -ETIME;
+ goto out;
+ }
+
+ GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial);
+
+ pulse_unlock_wait(p); /* synchronize with the retirement callback */
+
+ if (!i915_active_is_idle(&p->active)) {
+ struct drm_printer m = drm_err_printer("pulse");
+
+ pr_err("%s: heartbeat pulse did not flush idle tasks\n",
+ engine->name);
+ i915_active_print(&p->active, &m);
+
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ pulse_put(p);
+ return err;
+}
+
+static int live_idle_flush(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_idle_pulse(engine, intel_engine_flush_barriers);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int live_idle_pulse(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that heartbeat pulses flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_idle_pulse(engine, intel_engine_pulse);
+ intel_engine_pm_put(engine);
+ if (err && err != -ENODEV)
+ break;
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int cmp_u32(const void *_a, const void *_b)
+{
+ const u32 *a = _a, *b = _b;
+
+ return *a - *b;
+}
+
+static int __live_heartbeat_fast(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u32 times[5];
+ int err;
+ int i;
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ intel_engine_pm_get(engine);
+
+ err = intel_engine_set_heartbeat(engine, 1);
+ if (err)
+ goto err_pm;
+
+ for (i = 0; i < ARRAY_SIZE(times); i++) {
+ /* Manufacture a tick */
+ do {
+ while (READ_ONCE(engine->heartbeat.systole))
+ flush_delayed_work(&engine->heartbeat.work);
+
+ engine->serial++; /* quick, pretend we are not idle! */
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat did not start\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ rq = i915_request_get_rcu(rq);
+ rcu_read_unlock();
+ } while (!rq);
+
+ t0 = ktime_get();
+ while (rq == READ_ONCE(engine->heartbeat.systole))
+ yield(); /* work is on the local cpu! */
+ t1 = ktime_get();
+
+ i915_request_put(rq);
+ times[i] = ktime_us_delta(t1, t0);
+ }
+
+ sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL);
+
+ pr_info("%s: Heartbeat delay: %uus [%u, %u]\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ times[0],
+ times[ARRAY_SIZE(times) - 1]);
+
+ /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
+ if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
+ pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ jiffies_to_usecs(6));
+ err = -EINVAL;
+ }
+
+ intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+err_pm:
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_heartbeat_fast(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that the heartbeat ticks at the desired rate. */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __live_heartbeat_fast(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __live_heartbeat_off(struct intel_engine_cs *engine)
+{
+ int err;
+
+ intel_engine_pm_get(engine);
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat not running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ err = intel_engine_set_heartbeat(engine, 0);
+ if (err)
+ goto err_pm;
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat still running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+ if (READ_ONCE(engine->heartbeat.systole)) {
+ pr_err("%s: heartbeat still allocated\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+err_beat:
+ intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+err_pm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+static int live_heartbeat_off(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can turn off heartbeat and not interrupt VIP */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ err = __live_heartbeat_off(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_idle_flush),
+ SUBTEST(live_idle_pulse),
+ SUBTEST(live_heartbeat_fast),
+ SUBTEST(live_heartbeat_off),
+ };
+ int saved_hangcheck;
+ int err;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ saved_hangcheck = i915_modparams.enable_hangcheck;
+ i915_modparams.enable_hangcheck = INT_MAX;
+
+ err = intel_gt_live_subtests(tests, &i915->gt);
+
+ i915_modparams.enable_hangcheck = saved_hangcheck;
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 3a1419376912..20b9c83f43ad 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -25,7 +25,7 @@ static int live_engine_pm(void *arg)
}
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
const typeof(*igt_atomic_phases) *p;
for (p = igt_atomic_phases; p->name; p++) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
new file mode 100644
index 000000000000..d1752f15702a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -0,0 +1,60 @@
+
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "selftest_llc.h"
+
+static int live_gt_resume(void *arg)
+{
+ struct intel_gt *gt = arg;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ /* Do several suspend/resume cycles to check we don't explode! */
+ do {
+ intel_gt_suspend_prepare(gt);
+ intel_gt_suspend_late(gt);
+
+ if (gt->rc6.enabled) {
+ pr_err("rc6 still enabled after suspend!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = intel_gt_resume(gt);
+ if (err)
+ break;
+
+ if (gt->rc6.supported && !gt->rc6.enabled) {
+ pr_err("rc6 not enabled upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = st_llc_verify(&gt->llc);
+ if (err) {
+ pr_err("llc state not restored upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_gt_resume),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index a0098fc35921..85e9ccf5c304 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -131,7 +131,7 @@ static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct intel_gt *gt = h->gt;
- struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
@@ -141,12 +141,15 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
int err;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ i915_vm_put(vm);
return ERR_CAST(obj);
+ }
vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
+ i915_vm_put(vm);
return ERR_CAST(vaddr);
}
@@ -157,16 +160,22 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
h->batch = vaddr;
vma = i915_vma_instance(h->obj, vm, NULL);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ i915_vm_put(vm);
return ERR_CAST(vma);
+ }
hws = i915_vma_instance(h->hws, vm, NULL);
- if (IS_ERR(hws))
+ if (IS_ERR(hws)) {
+ i915_vm_put(vm);
return ERR_CAST(hws);
+ }
err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
+ if (err) {
+ i915_vm_put(vm);
return ERR_PTR(err);
+ }
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
@@ -264,6 +273,7 @@ unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
+ i915_vm_put(vm);
return err ? ERR_PTR(err) : rq;
}
@@ -285,7 +295,7 @@ static void hang_fini(struct hang *h)
kernel_context_close(h->ctx);
- igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
+ igt_flush_test(h->gt->i915);
}
static bool wait_until_running(struct hang *h, struct i915_request *rq)
@@ -309,12 +319,11 @@ static int igt_hang_sanitycheck(void *arg)
/* Basic check that we can execute our hanging batch */
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_wedge_me w;
long timeout;
@@ -355,8 +364,6 @@ static int igt_hang_sanitycheck(void *arg)
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -383,9 +390,7 @@ static int igt_reset_nop(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
@@ -395,9 +400,7 @@ static int igt_reset_nop(void *arg)
reset_count = i915_reset_count(global);
count = 0;
do {
- mutex_lock(&gt->i915->drm.struct_mutex);
-
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
int i;
for (i = 0; i < 16; i++) {
@@ -417,7 +420,6 @@ static int igt_reset_nop(void *arg)
intel_gt_reset(gt, ALL_ENGINES, NULL);
igt_global_reset_unlock(gt);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (intel_gt_is_wedged(gt)) {
err = -EIO;
break;
@@ -429,16 +431,13 @@ static int igt_reset_nop(void *arg)
break;
}
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
- mutex_lock(&gt->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
+ err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
@@ -458,23 +457,21 @@ static int igt_reset_nop_engine(void *arg)
/* Check that we can engine-reset during non-user portions */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
i915_gem_context_clear_bannable(ctx);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
unsigned int count;
IGT_TIMEOUT(end_time);
@@ -494,7 +491,6 @@ static int igt_reset_nop_engine(void *arg)
break;
}
- mutex_lock(&gt->i915->drm.struct_mutex);
for (i = 0; i < 16; i++) {
struct i915_request *rq;
@@ -507,7 +503,6 @@ static int igt_reset_nop_engine(void *arg)
i915_request_add(rq);
}
err = intel_engine_reset(engine, NULL);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
@@ -533,15 +528,12 @@ static int igt_reset_nop_engine(void *arg)
if (err)
break;
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
- mutex_lock(&gt->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
+ err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
@@ -559,18 +551,16 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
/* Check that we can issue an engine reset on an idle engine (no-op) */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (active) {
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
IGT_TIMEOUT(end_time);
@@ -593,17 +583,14 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (active) {
struct i915_request *rq;
- mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -647,7 +634,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (err)
break;
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -655,11 +642,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (intel_gt_is_wedged(gt))
err = -EIO;
- if (active) {
- mutex_lock(&gt->i915->drm.struct_mutex);
+ if (active)
hang_fini(&h);
- mutex_unlock(&gt->i915->drm.struct_mutex);
- }
return err;
}
@@ -725,9 +709,7 @@ static int active_engine(void *data)
return PTR_ERR(file);
for (count = 0; count < ARRAY_SIZE(ctx); count++) {
- mutex_lock(&engine->i915->drm.struct_mutex);
ctx[count] = live_context(engine->i915, file);
- mutex_unlock(&engine->i915->drm.struct_mutex);
if (IS_ERR(ctx[count])) {
err = PTR_ERR(ctx[count]);
while (--count)
@@ -741,10 +723,8 @@ static int active_engine(void *data)
struct i915_request *old = rq[idx];
struct i915_request *new;
- mutex_lock(&engine->i915->drm.struct_mutex);
new = igt_request_alloc(ctx[idx], engine);
if (IS_ERR(new)) {
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
break;
}
@@ -755,7 +735,6 @@ static int active_engine(void *data)
rq[idx] = i915_request_get(new);
i915_request_add(new);
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = active_request_put(old);
if (err)
@@ -791,13 +770,11 @@ static int __igt_reset_engines(struct intel_gt *gt,
* with any other engine.
*/
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (flags & TEST_ACTIVE) {
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
@@ -805,7 +782,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
h.ctx->sched.priority = 1024;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
@@ -823,7 +800,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
memset(threads, 0, sizeof(threads));
- for_each_engine(other, gt->i915, tmp) {
+ for_each_engine(other, gt, tmp) {
struct task_struct *tsk;
threads[tmp].resets =
@@ -849,23 +826,22 @@ static int __igt_reset_engines(struct intel_gt *gt,
get_task_struct(tsk);
}
+ yield(); /* start all threads before we begin */
+
intel_engine_pm_get(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
if (flags & TEST_ACTIVE) {
- mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -940,7 +916,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
unwind:
- for_each_engine(other, gt->i915, tmp) {
+ for_each_engine(other, gt, tmp) {
int ret;
if (!threads[tmp].task)
@@ -977,9 +953,7 @@ unwind:
if (err)
break;
- mutex_lock(&gt->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -987,11 +961,8 @@ unwind:
if (intel_gt_is_wedged(gt))
err = -EIO;
- if (flags & TEST_ACTIVE) {
- mutex_lock(&gt->i915->drm.struct_mutex);
+ if (flags & TEST_ACTIVE)
hang_fini(&h);
- mutex_unlock(&gt->i915->drm.struct_mutex);
- }
return err;
}
@@ -1047,7 +1018,7 @@ static int igt_reset_wait(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->i915->engine[RCS0];
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
@@ -1061,7 +1032,6 @@ static int igt_reset_wait(void *arg)
igt_global_reset_lock(gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
@@ -1109,7 +1079,6 @@ out_rq:
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@@ -1127,15 +1096,14 @@ static int evict_vma(void *data)
{
struct evict_vma *arg = data;
struct i915_address_space *vm = arg->vma->vm;
- struct drm_i915_private *i915 = vm->i915;
struct drm_mm_node evict = arg->vma->node;
int err;
complete(&arg->completion);
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&vm->mutex);
err = i915_gem_evict_for_node(vm, &evict, 0);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&vm->mutex);
return err;
}
@@ -1143,39 +1111,33 @@ static int evict_vma(void *data)
static int evict_fence(void *data)
{
struct evict_vma *arg = data;
- struct drm_i915_private *i915 = arg->vma->vm->i915;
int err;
complete(&arg->completion);
- mutex_lock(&i915->drm.struct_mutex);
-
/* Mark the fence register as dirty to force the mmio update. */
err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
if (err) {
pr_err("Invalid Y-tiling settings; err:%d\n", err);
- goto out_unlock;
+ return err;
}
err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
if (err) {
pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
- goto out_unlock;
+ return err;
}
err = i915_vma_pin_fence(arg->vma);
i915_vma_unpin(arg->vma);
if (err) {
pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
- goto out_unlock;
+ return err;
}
i915_vma_unpin_fence(arg->vma);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
+ return 0;
}
static int __igt_reset_evict_vma(struct intel_gt *gt,
@@ -1183,23 +1145,26 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
int (*fn)(void *),
unsigned int flags)
{
- struct intel_engine_cs *engine = gt->i915->engine[RCS0];
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
struct evict_vma arg;
struct hang h;
+ unsigned int pin_flags;
int err;
+ if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
+ return 0;
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
if (IS_ERR(obj)) {
@@ -1227,10 +1192,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
goto out_obj;
}
- err = i915_vma_pin(arg.vma, 0, 0,
- i915_vma_is_ggtt(arg.vma) ?
- PIN_GLOBAL | PIN_MAPPABLE :
- PIN_USER);
+ pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ pin_flags |= PIN_MAPPABLE;
+
+ err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
if (err) {
i915_request_add(rq);
goto out_obj;
@@ -1262,8 +1229,6 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
if (err)
goto out_rq;
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -1312,16 +1277,12 @@ out_reset:
put_task_struct(tsk);
}
- mutex_lock(&gt->i915->drm.struct_mutex);
out_rq:
i915_request_put(rq);
out_obj:
i915_gem_object_put(obj);
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
if (intel_gt_is_wedged(gt))
return -EIO;
@@ -1340,6 +1301,7 @@ static int igt_reset_evict_ppgtt(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gem_context *ctx;
+ struct i915_address_space *vm;
struct drm_file *file;
int err;
@@ -1347,18 +1309,20 @@ static int igt_reset_evict_ppgtt(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
err = 0;
- if (ctx->vm) /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(gt, ctx->vm,
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (!i915_is_ggtt(vm)) {
+ /* aliasing == global gtt locking, covered above */
+ err = __igt_reset_evict_vma(gt, vm,
evict_vma, EXEC_OBJECT_WRITE);
+ }
+ i915_vm_put(vm);
out:
mock_file_free(gt->i915, file);
@@ -1379,7 +1343,7 @@ static int wait_for_others(struct intel_gt *gt,
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
if (engine == exclude)
continue;
@@ -1403,12 +1367,11 @@ static int igt_reset_queue(void *arg)
igt_global_reset_lock(gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
@@ -1518,7 +1481,7 @@ static int igt_reset_queue(void *arg)
i915_request_put(prev);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -1526,7 +1489,6 @@ static int igt_reset_queue(void *arg)
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@@ -1539,7 +1501,7 @@ static int igt_handle_error(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->i915->engine[RCS0];
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct hang h;
struct i915_request *rq;
struct i915_gpu_state *error;
@@ -1547,17 +1509,15 @@ static int igt_handle_error(void *arg)
/* Check that we can issue a global GPU and engine reset */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
- mutex_lock(&gt->i915->drm.struct_mutex);
-
err = hang_init(&h, gt);
if (err)
- goto err_unlock;
+ return err;
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
@@ -1581,8 +1541,6 @@ static int igt_handle_error(void *arg)
goto err_request;
}
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
/* Temporarily disable error capture */
error = xchg(&global->first_error, (void *)-1);
@@ -1590,8 +1548,6 @@ static int igt_handle_error(void *arg)
xchg(&global->first_error, error);
- mutex_lock(&gt->i915->drm.struct_mutex);
-
if (rq->fence.error != -EIO) {
pr_err("Guilty request not identified!\n");
err = -EINVAL;
@@ -1602,8 +1558,6 @@ err_request:
i915_request_put(rq);
err_fini:
hang_fini(&h);
-err_unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -1617,7 +1571,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
engine->name, mode, p->name);
- tasklet_disable_nosync(t);
+ tasklet_disable(t);
p->critical_section_begin();
err = intel_engine_reset(engine, NULL);
@@ -1689,14 +1643,13 @@ static int igt_reset_engines_atomic(void *arg)
/* Check that the engines resets are usable from atomic context */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (USES_GUC_SUBMISSION(gt->i915))
return 0;
igt_global_reset_lock(gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(gt))
@@ -1706,7 +1659,7 @@ static int igt_reset_engines_atomic(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
err = igt_atomic_reset_engine(engine, p);
if (err)
goto out;
@@ -1716,9 +1669,7 @@ static int igt_reset_engines_atomic(void *arg)
out:
/* As we poke around the guts, do a full reset before continuing. */
igt_force_reset(gt);
-
unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
return err;
@@ -1743,27 +1694,19 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
};
struct intel_gt *gt = &i915->gt;
intel_wakeref_t wakeref;
- bool saved_hangcheck;
int err;
- if (!intel_has_gpu_reset(gt->i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
- drain_delayed_work(&gt->hangcheck.work); /* flush param */
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = intel_gt_live_subtests(tests, gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
- igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
- i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
new file mode 100644
index 000000000000..fd3770e48ac7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_pm.h" /* intel_gpu_freq() */
+#include "selftest_llc.h"
+#include "intel_rps.h"
+
+static int gen6_verify_ring_freq(struct intel_llc *llc)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct ia_constants consts;
+ intel_wakeref_t wakeref;
+ unsigned int gpu_freq;
+ int err = 0;
+
+ wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
+
+ if (!get_ia_constants(llc, &consts)) {
+ err = -ENODEV;
+ goto out_rpm;
+ }
+
+ for (gpu_freq = consts.min_gpu_freq;
+ gpu_freq <= consts.max_gpu_freq;
+ gpu_freq++) {
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ unsigned int ia_freq, ring_freq, found;
+ u32 val;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+
+ val = gpu_freq;
+ if (sandybridge_pcode_read(i915,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &val, NULL)) {
+ pr_err("Failed to read freq table[%d], range [%d, %d]\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
+ err = -ENXIO;
+ break;
+ }
+
+ found = (val >> 0) & 0xff;
+ if (found != ia_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ia_freq);
+ err = -EINVAL;
+ break;
+ }
+
+ found = (val >> 8) & 0xff;
+ if (found != ring_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ring_freq);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(llc_to_gt(llc)->uncore->rpm, wakeref);
+ return err;
+}
+
+int st_llc_verify(struct intel_llc *llc)
+{
+ int err = 0;
+
+ if (HAS_LLC(llc_to_gt(llc)->i915))
+ err = gen6_verify_ring_freq(llc);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.h b/drivers/gpu/drm/i915/gt/selftest_llc.h
new file mode 100644
index 000000000000..873f896e72f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_LLC_H
+#define SELFTEST_LLC_H
+
+struct intel_llc;
+
+int st_llc_verify(struct intel_llc *llc);
+
+#endif /* SELFTEST_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index d791158988d6..eb71ac2f992c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -19,26 +20,52 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
static int live_sanitycheck(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
struct intel_context *ce;
struct igt_spinner spin;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+ if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (!ctx)
goto err_spin;
@@ -55,13 +82,13 @@ static int live_sanitycheck(void *arg)
if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx;
}
igt_spinner_end(&spin);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_ctx;
}
@@ -73,12 +100,175 @@ err_ctx:
kernel_context_close(ctx);
err_spin:
igt_spinner_fini(&spin);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we can correctly context switch between 2 instances
+ * on the same engine from the same parent context.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return err;
+
+ ctx = kernel_context(gt->i915);
+ if (!ctx)
+ goto err_spin;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq[2];
+ struct igt_live_test t;
+ int n;
+
+ if (prio && !intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(ctx, engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ /*
+ * Setup the pair of contexts such that if we
+ * lite-restore using the RING_TAIL from ce[1] it
+ * will execute garbage from ce[0]->ring.
+ */
+ memset(tmp->ring->vaddr,
+ POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+ tmp->ring->vma->size);
+
+ ce[n] = tmp;
+ }
+ GEM_BUG_ON(!ce[1]->ring->size);
+ intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+ __execlists_update_reg_state(ce[1], engine);
+
+ rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto err_ce;
+ }
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+ if (!igt_wait_for_spinner(&spin, rq[0])) {
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ rq[1] = i915_request_create(ce[1]);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ if (!prio) {
+ /*
+ * Ensure we do the switch to ce[1] on completion.
+ *
+ * rq[0] is already submitted, so this should reduce
+ * to a no-op (a wait on a request on the same engine
+ * uses the submit fence, not the completion fence),
+ * but it will install a dependency on rq[1] for rq[0]
+ * that will prevent the pair being reordered by
+ * timeslicing.
+ */
+ i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ }
+
+ i915_request_get(rq[1]);
+ i915_request_add(rq[1]);
+ GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+ i915_request_put(rq[0]);
+
+ if (prio) {
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+
+ /* Alternatively preempt the spinner with ce[1] */
+ engine->schedule(rq[1], &attr);
+ }
+
+ /* And switch back to ce[0] for good measure */
+ rq[0] = i915_request_create(ce[0]);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ i915_request_put(rq[1]);
+ goto err_ce;
+ }
+
+ i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+
+err_ce:
+ tasklet_kill(&engine->execlists.tasklet); /* flush submission */
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ kernel_context_close(ctx);
+err_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+ return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+ return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+}
+
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@@ -131,7 +321,13 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
if (IS_ERR(rq))
goto out_ctx;
- err = emit_semaphore_chain(rq, vma, idx);
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = emit_semaphore_chain(rq, vma, idx);
+ if (err == 0)
+ i915_request_get(rq);
i915_request_add(rq);
if (err)
rq = ERR_PTR(err);
@@ -144,10 +340,10 @@ out_ctx:
static int
release_queue(struct intel_engine_cs *engine,
struct i915_vma *vma,
- int idx)
+ int idx, int prio)
{
struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ .priority = prio,
};
struct i915_request *rq;
u32 *cs;
@@ -168,9 +364,15 @@ release_queue(struct intel_engine_cs *engine,
*cs++ = 1;
intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
i915_request_add(rq);
+ local_bh_disable();
engine->schedule(rq, &attr);
+ local_bh_enable(); /* kick tasklet */
+
+ i915_request_put(rq);
return 0;
}
@@ -189,8 +391,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
if (IS_ERR(head))
return PTR_ERR(head);
- i915_request_get(head);
- for_each_engine(engine, outer->i915, id) {
+ for_each_engine(engine, outer->gt, id) {
for (i = 0; i < count; i++) {
struct i915_request *rq;
@@ -199,15 +400,16 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
err = PTR_ERR(rq);
goto out;
}
+
+ i915_request_put(rq);
}
}
- err = release_queue(outer, vma, n);
+ err = release_queue(outer, vma, n, INT_MAX);
if (err)
goto out;
- if (i915_request_wait(head,
- I915_WAIT_LOCKED,
+ if (i915_request_wait(head, 0,
2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
count, n);
@@ -223,9 +425,8 @@ out:
static int live_timeslice_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
struct i915_vma *vma;
void *vaddr;
int err = 0;
@@ -239,17 +440,14 @@ static int live_timeslice_preempt(void *arg)
* need to preempt the current task and replace it with another
* ready task.
*/
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_unlock;
- }
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -269,7 +467,7 @@ static int live_timeslice_preempt(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_has_preemption(engine))
continue;
@@ -279,7 +477,7 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_pin;
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_pin;
}
@@ -292,22 +490,168 @@ err_map:
i915_gem_object_unpin_map(obj);
err_obj:
i915_gem_object_put(obj);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ return rq;
+}
+
+static void wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (!i915_request_is_active(rq));
+}
+
+static long timeslice_threshold(const struct intel_engine_cs *engine)
+{
+ return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that even if ELSP[0] and ELSP[1] are filled with
+ * timeslicing between them disabled, we *do* enable timeslicing
+ * if the queue demands it. (Normally, we do not submit if
+ * ELSP[1] is already occupied, so must rely on timeslicing to
+ * eject ELSP[0] in favour of the queue.)
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq, *nop;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* ELSP[0]: semaphore wait */
+ rq = semaphore_queue(engine, vma, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_pin;
+ }
+ engine->schedule(rq, &attr);
+ wait_for_submit(engine, rq);
+
+ /* ELSP[1]: nop request */
+ nop = nop_request(engine);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ i915_request_put(rq);
+ goto err_pin;
+ }
+ wait_for_submit(engine, nop);
+ i915_request_put(nop);
+
+ GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Queue: semaphore signal, matching priority as semaphore */
+ err = release_queue(engine, vma, 1, effective_prio(rq));
+ if (err) {
+ i915_request_put(rq);
+ goto err_pin;
+ }
+
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.timer.expires) &&
+ !i915_request_completed(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ GEM_TRACE_ERR("%s: Failed to enable timeslicing!\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EINVAL;
+ }
+
+ /* Timeslice every jiffy, so within 2 we should signal */
+ if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to timeslice into queue\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+ if (err)
+ break;
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
return err;
}
static int live_busywait_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
u32 *map;
@@ -316,22 +660,19 @@ static int live_busywait_preempt(void *arg)
* preempt the busywaits used to synchronise between rings.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
- goto err_unlock;
+ return -ENOMEM;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_ctx_lo;
@@ -343,7 +684,7 @@ static int live_busywait_preempt(void *arg)
goto err_obj;
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_map;
@@ -353,7 +694,7 @@ static int live_busywait_preempt(void *arg)
if (err)
goto err_map;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *lo, *hi;
struct igt_live_test t;
u32 *cs;
@@ -364,7 +705,7 @@ static int live_busywait_preempt(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_vma;
}
@@ -444,7 +785,7 @@ static int live_busywait_preempt(void *arg)
i915_request_add(hi);
if (i915_request_wait(lo, 0, HZ / 5) < 0) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to preempt semaphore busywait!\n",
engine->name);
@@ -452,7 +793,7 @@ static int live_busywait_preempt(void *arg)
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_vma;
}
@@ -475,9 +816,6 @@ err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -501,49 +839,45 @@ spinner_create_request(struct igt_spinner *spin,
static int live_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
pr_err("Logical preemption supported, but not exposed\n");
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, &i915->gt))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -559,7 +893,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -576,7 +910,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -599,54 +933,47 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int live_late_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, &i915->gt))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
ctx_lo->sched.priority = I915_USER_PRIORITY(1);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -705,15 +1032,12 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -723,14 +1047,13 @@ struct preempt_client {
struct i915_gem_context *ctx;
};
-static int preempt_client_init(struct drm_i915_private *i915,
- struct preempt_client *c)
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
{
- c->ctx = kernel_context(i915);
+ c->ctx = kernel_context(gt->i915);
if (!c->ctx)
return -ENOMEM;
- if (igt_spinner_init(&c->spin, &i915->gt))
+ if (igt_spinner_init(&c->spin, gt))
goto err_ctx;
return 0;
@@ -748,11 +1071,10 @@ static void preempt_client_fini(struct preempt_client *c)
static int live_nopreempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client a, b;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -760,19 +1082,16 @@ static int live_nopreempt(void *arg)
* that may be being observed and not want to be interrupted.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &a))
- goto err_unlock;
- if (preempt_client_init(i915, &b))
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
goto err_client_a;
b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
if (!intel_engine_has_preemption(engine))
@@ -832,7 +1151,7 @@ static int live_nopreempt(void *arg)
goto err_wedged;
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
goto err_wedged;
}
@@ -841,29 +1160,344 @@ err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_b;
}
+struct live_preempt_cancel {
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP0 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ i915_gem_context_set_banned(arg->a.ctx);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[2] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP1 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+ rq[1] = spinner_create_request(&arg->b.spin,
+ arg->b.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ i915_gem_context_set_banned(arg->b.ctx);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ igt_spinner_end(&arg->a.spin);
+ if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[0]->fence.error != 0) {
+ pr_err("Normal inflight0 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != -EIO) {
+ pr_err("Cancelled inflight1 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[3] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Full ELSP and one in the wings */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+ rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ rq[2] = spinner_create_request(&arg->b.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[2])) {
+ err = PTR_ERR(rq[2]);
+ goto out;
+ }
+
+ i915_request_get(rq[2]);
+ err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+ i915_request_add(rq[2]);
+ if (err)
+ goto out;
+
+ i915_gem_context_set_banned(arg->a.ctx);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[0]->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != 0) {
+ pr_err("Normal inflight1 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[2]->fence.error != -EIO) {
+ pr_err("Cancelled queued request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[2]);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ int err;
+
+ /* Preempt cancel non-preemptible spinner in ELSP0 */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ i915_gem_context_set_banned(arg->a.ctx);
+ err = intel_engine_pulse(arg->engine); /* force reset */
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(arg->engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct live_preempt_cancel data;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * To cancel an inflight context, we need to first remove it from the
+ * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ if (preempt_client_init(gt, &data.a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &data.b))
+ goto err_client_a;
+
+ for_each_engine(data.engine, gt, id) {
+ if (!intel_engine_has_preemption(data.engine))
+ continue;
+
+ err = __cancel_active0(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_active1(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_queued(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_hostile(&data);
+ if (err)
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&data.b);
+err_client_a:
+ preempt_client_fini(&data.a);
+ return err;
+
+err_wedged:
+ GEM_TRACE_DUMP();
+ igt_spinner_end(&data.b.spin);
+ igt_spinner_end(&data.a.spin);
+ intel_gt_set_wedged(gt);
+ goto err_client_b;
+}
+
static int live_suppress_self_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
};
struct preempt_client a, b;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -873,30 +1507,31 @@ static int live_suppress_self_preempt(void *arg)
* completion event.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0; /* presume black blox */
- if (intel_vgpu_active(i915))
+ if (intel_vgpu_active(gt->i915))
return 0; /* GVT forces single port & request submission */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &a))
- goto err_unlock;
- if (preempt_client_init(i915, &b))
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
goto err_client_a;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
int depth;
if (!intel_engine_has_preemption(engine))
continue;
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+
+ intel_engine_pm_get(engine);
engine->execlists.preempt_hang.count = 0;
rq_a = spinner_create_request(&a.spin,
@@ -904,12 +1539,14 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
+ intel_engine_pm_put(engine);
goto err_client_b;
}
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
pr_err("First client failed to start\n");
+ intel_engine_pm_put(engine);
goto err_wedged;
}
@@ -921,6 +1558,7 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
+ intel_engine_pm_put(engine);
goto err_client_b;
}
i915_request_add(rq_b);
@@ -931,6 +1569,7 @@ static int live_suppress_self_preempt(void *arg)
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client failed to start\n");
+ intel_engine_pm_put(engine);
goto err_wedged;
}
@@ -944,11 +1583,13 @@ static int live_suppress_self_preempt(void *arg)
engine->name,
engine->execlists.preempt_hang.count,
depth);
+ intel_engine_pm_put(engine);
err = -EINVAL;
goto err_client_b;
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
goto err_wedged;
}
@@ -957,15 +1598,12 @@ err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_b;
}
@@ -984,9 +1622,13 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
if (!rq)
return NULL;
- INIT_LIST_HEAD(&rq->active_list);
rq->engine = engine;
+ spin_lock_init(&rq->lock);
+ INIT_LIST_HEAD(&rq->fence.cb_list);
+ rq->fence.lock = &rq->lock;
+ rq->fence.ops = &i915_fence_ops;
+
i915_sched_node_init(&rq->sched);
/* mark this request as permanently incomplete */
@@ -1021,11 +1663,10 @@ static void dummy_request_free(struct i915_request *dummy)
static int live_suppress_wait_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct preempt_client client[4];
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
int i;
@@ -1035,22 +1676,19 @@ static int live_suppress_wait_preempt(void *arg)
* not needlessly generate preempt-to-idle cycles.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
- goto err_unlock;
- if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
+ if (preempt_client_init(gt, &client[0])) /* ELSP[0] */
+ return -ENOMEM;
+ if (preempt_client_init(gt, &client[1])) /* ELSP[1] */
goto err_client_0;
- if (preempt_client_init(i915, &client[2])) /* head of queue */
+ if (preempt_client_init(gt, &client[2])) /* head of queue */
goto err_client_1;
- if (preempt_client_init(i915, &client[3])) /* bystander */
+ if (preempt_client_init(gt, &client[3])) /* bystander */
goto err_client_2;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
int depth;
if (!intel_engine_has_preemption(engine))
@@ -1079,8 +1717,8 @@ static int live_suppress_wait_preempt(void *arg)
}
/* Disable NEWCLIENT promotion */
- __i915_active_request_set(&rq[i]->timeline->last_request,
- dummy);
+ __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request,
+ &dummy->fence);
i915_request_add(rq[i]);
}
@@ -1105,7 +1743,7 @@ static int live_suppress_wait_preempt(void *arg)
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
goto err_wedged;
if (engine->execlists.preempt_hang.count) {
@@ -1128,26 +1766,22 @@ err_client_1:
preempt_client_fini(&client[1]);
err_client_0:
preempt_client_fini(&client[0]);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_3;
}
static int live_chain_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client hi, lo;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -1156,19 +1790,16 @@ static int live_chain_preempt(void *arg)
* the previously submitted spinner in B.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &hi))
- goto err_unlock;
+ if (preempt_client_init(gt, &hi))
+ return -ENOMEM;
- if (preempt_client_init(i915, &lo))
+ if (preempt_client_init(gt, &lo))
goto err_client_hi;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
@@ -1199,7 +1830,7 @@ static int live_chain_preempt(void *arg)
goto err_wedged;
}
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_wedged;
}
@@ -1237,7 +1868,7 @@ static int live_chain_preempt(void *arg)
igt_spinner_end(&hi.spin);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to preempt over chain of %d\n",
count);
@@ -1253,7 +1884,7 @@ static int live_chain_preempt(void *arg)
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to flush low priority chain of %d requests\n",
count);
@@ -1274,57 +1905,50 @@ err_client_lo:
preempt_client_fini(&lo);
err_client_hi:
preempt_client_fini(&hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&hi.spin);
igt_spinner_end(&lo.spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_lo;
}
static int live_preempt_hang(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, &i915->gt))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
@@ -1341,7 +1965,7 @@ static int live_preempt_hang(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -1363,28 +1987,28 @@ static int live_preempt_hang(void *arg)
HZ / 10)) {
pr_err("Preemption did not occur within timeout!");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
- set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
intel_engine_reset(engine, NULL);
- clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine->execlists.preempt_hang.inject_hang = false;
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -1399,9 +2023,105 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we force preemption to occur by cancelling the previous
+ * context if it refuses to yield the GPU.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ return -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_timeout;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ /* Flush the previous CS ack before changing timeouts */
+ while (READ_ONCE(engine->execlists.pending[0]))
+ cpu_relax();
+
+ saved_timeout = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ engine->props.preempt_timeout_ms = saved_timeout;
+
+ if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_lo);
+ i915_request_put(rq);
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
return err;
}
@@ -1416,7 +2136,7 @@ static int random_priority(struct rnd_state *rnd)
}
struct preempt_smoke {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch;
@@ -1440,7 +2160,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
int err = 0;
if (batch) {
- vma = i915_vma_instance(batch, ctx->vm, NULL);
+ struct i915_address_space *vm;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(batch, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1489,11 +2213,9 @@ static int smoke_crescendo_thread(void *arg)
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
- mutex_lock(&smoke->i915->drm.struct_mutex);
err = smoke_submit(smoke,
ctx, count % I915_PRIORITY_MAX,
smoke->batch);
- mutex_unlock(&smoke->i915->drm.struct_mutex);
if (err)
return err;
@@ -1514,9 +2236,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
unsigned long count;
int err = 0;
- mutex_unlock(&smoke->i915->drm.struct_mutex);
-
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(engine, smoke->gt, id) {
arg[id] = *smoke;
arg[id].engine = engine;
if (!(flags & BATCH))
@@ -1532,8 +2252,10 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
get_task_struct(tsk[id]);
}
+ yield(); /* start all threads before we kthread_stop() */
+
count = 0;
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(engine, smoke->gt, id) {
int status;
if (IS_ERR_OR_NULL(tsk[id]))
@@ -1548,11 +2270,9 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
put_task_struct(tsk[id]);
}
- mutex_lock(&smoke->i915->drm.struct_mutex);
-
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
+ RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -1564,7 +2284,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
count = 0;
do {
- for_each_engine(smoke->engine, smoke->i915, id) {
+ for_each_engine(smoke->engine, smoke->gt, id) {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
@@ -1580,25 +2300,24 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
+ RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
return 0;
}
static int live_preempt_smoke(void *arg)
{
struct preempt_smoke smoke = {
- .i915 = arg,
+ .gt = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 1024,
};
const unsigned int phase[] = { 0, BATCH };
- intel_wakeref_t wakeref;
struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
- if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
return 0;
smoke.contexts = kmalloc_array(smoke.ncontext,
@@ -1607,13 +2326,11 @@ static int live_preempt_smoke(void *arg)
if (!smoke.contexts)
return -ENOMEM;
- mutex_lock(&smoke.i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
-
- smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+ smoke.batch =
+ i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
err = PTR_ERR(smoke.batch);
- goto err_unlock;
+ goto err_free;
}
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
@@ -1627,13 +2344,13 @@ static int live_preempt_smoke(void *arg)
i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
- if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
+ if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
err = -EIO;
goto err_batch;
}
for (n = 0; n < smoke.ncontext; n++) {
- smoke.contexts[n] = kernel_context(smoke.i915);
+ smoke.contexts[n] = kernel_context(smoke.gt->i915);
if (!smoke.contexts[n])
goto err_ctx;
}
@@ -1660,15 +2377,13 @@ err_ctx:
err_batch:
i915_gem_object_put(smoke.batch);
-err_unlock:
- intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
- mutex_unlock(&smoke.i915->drm.struct_mutex);
+err_free:
kfree(smoke.contexts);
return err;
}
-static int nop_virtual_engine(struct drm_i915_private *i915,
+static int nop_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling,
unsigned int nctx,
@@ -1687,7 +2402,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
for (n = 0; n < nctx; n++) {
- ctx[n] = kernel_context(i915);
+ ctx[n] = kernel_context(gt->i915);
if (!ctx[n]) {
err = -ENOMEM;
nctx = n;
@@ -1712,7 +2427,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
}
}
- err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
if (err)
goto out;
@@ -1759,7 +2474,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
break;
}
}
@@ -1781,7 +2496,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
prime, div64_u64(ktime_to_ns(times[1]), prime));
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
@@ -1794,25 +2509,22 @@ out:
static int live_virtual_engine(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
- struct intel_gt *gt = &i915->gt;
enum intel_engine_id id;
unsigned int class, inst;
- int err = -ENODEV;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
- for_each_engine(engine, i915, id) {
- err = nop_virtual_engine(i915, &engine, 1, 1, 0);
+ for_each_engine(engine, gt, id) {
+ err = nop_virtual_engine(gt, &engine, 1, 1, 0);
if (err) {
pr_err("Failed to wrap engine %s: err=%d\n",
engine->name, err);
- goto out_unlock;
+ return err;
}
}
@@ -1830,23 +2542,21 @@ static int live_virtual_engine(void *arg)
continue;
for (n = 1; n <= nsibling + 1; n++) {
- err = nop_virtual_engine(i915, siblings, nsibling,
+ err = nop_virtual_engine(gt, siblings, nsibling,
n, 0);
if (err)
- goto out_unlock;
+ return err;
}
- err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
+ err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
if (err)
- goto out_unlock;
+ return err;
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
-static int mask_virtual_engine(struct drm_i915_private *i915,
+static int mask_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
@@ -1862,7 +2572,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
* restrict it to our desired engine within the virtual engine.
*/
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (!ctx)
return -ENOMEM;
@@ -1876,7 +2586,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
if (err)
goto out_put;
- err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
if (err)
goto out_unpin;
@@ -1907,7 +2617,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
request[n]->fence.context,
request[n]->fence.seqno);
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto out;
}
@@ -1922,11 +2632,8 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
}
err = igt_live_test_end(&t);
- if (err)
- goto out;
-
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < nsibling; n++)
@@ -1943,17 +2650,14 @@ out_close:
static int live_virtual_mask(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
- int err = 0;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
unsigned int nsibling;
@@ -1967,17 +2671,166 @@ static int live_virtual_mask(void *arg)
if (nsibling < 2)
continue;
- err = mask_virtual_engine(i915, siblings, nsibling);
+ err = mask_virtual_engine(gt, siblings, nsibling);
if (err)
- goto out_unlock;
+ return err;
+ }
+
+ return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *last = NULL;
+ struct i915_gem_context *ctx;
+ struct intel_context *ve;
+ struct i915_vma *scratch;
+ struct igt_live_test t;
+ unsigned int n;
+ int err = 0;
+ u32 *cs;
+
+ ctx = kernel_context(gt->i915);
+ if (!ctx)
+ return -ENOMEM;
+
+ scratch = create_scratch(siblings[0]->gt);
+ if (IS_ERR(scratch)) {
+ err = PTR_ERR(scratch);
+ goto out_close;
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_scratch;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ struct intel_engine_cs *engine = siblings[n % nsibling];
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_end;
+ }
+
+ i915_request_put(last);
+ last = i915_request_get(rq);
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+ *cs++ = n + 1;
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Restrict this request to run on a particular engine */
+ rq->execution_mask = engine->mask;
+ i915_request_add(rq);
+ }
+
+ if (i915_request_wait(last, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_end;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n] != n) {
+ pr_err("Incorrect value[%d] found for GPR[%d]\n",
+ cs[n], n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ i915_request_put(last);
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+out_close:
+ kernel_context_close(ctx);
return err;
}
-static int bond_virtual_engine(struct drm_i915_private *i915,
+static int live_virtual_preserved(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+
+ /*
+ * Check that the context image retains non-privileged (user) registers
+ * from one engine to the next. For this we check that the CS_GPR
+ * are preserved.
+ */
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ /* As we use CS_GPR we cannot run before they existed on all engines. */
+ if (INTEL_GEN(gt->i915) < 9)
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = gt->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = preserved_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bond_virtual_engine(struct intel_gt *gt,
unsigned int class,
struct intel_engine_cs **siblings,
unsigned int nsibling,
@@ -1993,13 +2846,13 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (!ctx)
return -ENOMEM;
err = 0;
rq[0] = ERR_PTR(-ENOMEM);
- for_each_engine(master, i915, id) {
+ for_each_engine(master, gt, id) {
struct i915_sw_fence fence = {};
if (master->class == class)
@@ -2104,7 +2957,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
out:
for (n = 0; !IS_ERR(rq[n]); n++)
i915_request_put(rq[n]);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
kernel_context_close(ctx);
@@ -2121,17 +2974,14 @@ static int live_virtual_bond(void *arg)
{ "schedule", BOND_SCHEDULE },
{ },
};
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
- int err = 0;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
const struct phase *p;
int nsibling;
@@ -2148,38 +2998,42 @@ static int live_virtual_bond(void *arg)
continue;
for (p = phases; p->name; p++) {
- err = bond_virtual_engine(i915,
+ err = bond_virtual_engine(gt,
class, siblings, nsibling,
p->flags);
if (err) {
pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
__func__, p->name, class, nsibling, err);
- goto out_unlock;
+ return err;
}
}
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_unlite_switch),
+ SUBTEST(live_unlite_preempt),
SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_nopreempt),
+ SUBTEST(live_preempt_cancel),
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
+ SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_bond),
};
@@ -2189,5 +3043,512 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_live_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
+
+static void hexdump(const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ pr_info("*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ pr_info("[%04zx] %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
+static int live_lrc_layout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 *mem;
+ int err;
+
+ /*
+ * Check the registers offsets we use to create the initial reg state
+ * match the layout saved by HW.
+ */
+
+ mem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ u32 *hw, *lrc;
+ int dw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ break;
+ }
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ lrc = memset(mem, 0, PAGE_SIZE);
+ execlists_init_reg_state(lrc,
+ engine->kernel_context,
+ engine,
+ engine->kernel_context->ring,
+ true);
+
+ dw = 0;
+ do {
+ u32 lri = hw[dw];
+
+ if (lri == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ pr_err("%s: Expected LRI command at dword %d, found %08x\n",
+ engine->name, dw, lri);
+ err = -EINVAL;
+ break;
+ }
+
+ if (lrc[dw] != lri) {
+ pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n",
+ engine->name, dw, lri, lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ lri &= 0x7f;
+ lri++;
+ dw++;
+
+ while (lri) {
+ if (hw[dw] != lrc[dw]) {
+ pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
+ engine->name, dw, hw[dw], lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * Skip over the actual register value as we
+ * expect that to differ.
+ */
+ dw += 2;
+ lri -= 2;
+ }
+ } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ if (err) {
+ pr_info("%s: HW register image:\n", engine->name);
+ hexdump(hw, PAGE_SIZE);
+
+ pr_info("%s: SW register image:\n", engine->name);
+ hexdump(lrc, PAGE_SIZE);
+ }
+
+ i915_gem_object_unpin_map(engine->default_state);
+ if (err)
+ break;
+ }
+
+ kfree(mem);
+ return err;
+}
+
+static int find_offset(const u32 *lri, u32 offset)
+{
+ int i;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ if (lri[i] == offset)
+ return i;
+
+ return -1;
+}
+
+static int live_lrc_fixed(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the assumed register offsets match the actual locations in
+ * the context image.
+ */
+
+ for_each_engine(engine, gt, id) {
+ const struct {
+ u32 reg;
+ u32 offset;
+ const char *name;
+ } tbl[] = {
+ {
+ i915_mmio_reg_offset(RING_START(engine->mmio_base)),
+ CTX_RING_BUFFER_START - 1,
+ "RING_START"
+ },
+ {
+ i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
+ CTX_RING_BUFFER_CONTROL - 1,
+ "RING_CTL"
+ },
+ {
+ i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)),
+ CTX_RING_HEAD - 1,
+ "RING_HEAD"
+ },
+ {
+ i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)),
+ CTX_RING_TAIL - 1,
+ "RING_TAIL"
+ },
+ {
+ i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)),
+ lrc_ring_mi_mode(engine),
+ "RING_MI_MODE"
+ },
+ {
+ engine->mmio_base + 0x110,
+ CTX_BB_STATE - 1,
+ "BB_STATE"
+ },
+ { },
+ }, *t;
+ u32 *hw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ break;
+ }
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ for (t = tbl; t->name; t++) {
+ int dw = find_offset(hw, t->reg);
+
+ if (dw != t->offset) {
+ pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n",
+ engine->name,
+ t->name,
+ t->reg,
+ dw,
+ t->offset);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(engine->default_state);
+ }
+
+ return err;
+}
+
+static int __live_lrc_state(struct i915_gem_context *fixme,
+ struct intel_engine_cs *engine,
+ struct i915_vma *scratch)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ enum {
+ RING_START_IDX = 0,
+ RING_TAIL_IDX,
+ MAX_IDX
+ };
+ u32 expected[MAX_IDX];
+ u32 *cs;
+ int err;
+ int n;
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto err_put;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ cs = intel_ring_begin(rq, 4 * MAX_IDX);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto err_unpin;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ expected[RING_TAIL_IDX] = ce->ring->tail;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < MAX_IDX; n++) {
+ if (cs[n] != expected[n]) {
+ pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n",
+ engine->name, n, cs[n], expected[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_unpin:
+ intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_state(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the live register state matches what we expect for this
+ * intel_context.
+ */
+
+ fixme = kernel_context(gt->i915);
+ if (!fixme)
+ return -ENOMEM;
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch)) {
+ err = PTR_ERR(scratch);
+ goto out_close;
+ }
+
+ for_each_engine(engine, gt, id) {
+ err = __live_lrc_state(fixme, engine, scratch);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&scratch, 0);
+out_close:
+ kernel_context_close(fixme);
+ return err;
+}
+
+static int gpr_make_dirty(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+ u32 *cs;
+ int n;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static int __live_gpr_clear(struct i915_gem_context *fixme,
+ struct intel_engine_cs *engine,
+ struct i915_vma *scratch)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int n;
+
+ if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
+ return 0; /* GPR only on rcs0 for gen8 */
+
+ err = gpr_make_dirty(engine);
+ if (err)
+ return err;
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_put;
+ }
+
+ cs = intel_ring_begin(rq, 4 * NUM_GPR_DW);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto err_put;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n]) {
+ pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n",
+ engine->name,
+ n / 2, n & 1 ? "udw" : "ldw",
+ cs[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_gpr_clear(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that GPR registers are cleared in new contexts as we need
+ * to avoid leaking any information from previous contexts.
+ */
+
+ fixme = kernel_context(gt->i915);
+ if (!fixme)
+ return -ENOMEM;
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch)) {
+ err = PTR_ERR(scratch);
+ goto out_close;
+ }
+
+ for_each_engine(engine, gt, id) {
+ err = __live_gpr_clear(fixme, engine, scratch);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&scratch, 0);
+out_close:
+ kernel_context_close(fixme);
+ return err;
+}
+
+int intel_lrc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_lrc_layout),
+ SUBTEST(live_lrc_fixed),
+ SUBTEST(live_lrc_state),
+ SUBTEST(live_gpr_clear),
+ };
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 00a4f60cdfd5..6ad6aca315f6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -17,7 +17,7 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
igt_global_reset_lock(gt);
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reset_count = i915_reset_count(&gt->i915->gpu_error);
@@ -28,7 +28,7 @@ static int igt_global_reset(void *arg)
err = -EINVAL;
}
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@@ -45,14 +45,14 @@ static int igt_wedged_reset(void *arg)
/* Check that we can recover a wedged device with a GPU reset */
igt_global_reset_lock(gt);
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
intel_gt_set_wedged(gt);
GEM_BUG_ON(!intel_gt_is_wedged(gt));
intel_gt_reset(gt, ALL_ENGINES, NULL);
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
return intel_gt_is_wedged(gt) ? -EIO : 0;
@@ -112,7 +112,7 @@ static int igt_atomic_engine_reset(void *arg)
/* Check that the resets are usable from atomic context */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (USES_GUC_SUBMISSION(gt->i915))
@@ -125,8 +125,8 @@ static int igt_atomic_engine_reset(void *arg)
if (!igt_force_reset(gt))
goto out_unlock;
- for_each_engine(engine, gt->i915, id) {
- tasklet_disable_nosync(&engine->execlists.tasklet);
+ for_each_engine(engine, gt, id) {
+ tasklet_disable(&engine->execlists.tasklet);
intel_engine_pm_get(engine);
for (p = igt_atomic_phases; p->name; p++) {
@@ -170,7 +170,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
};
struct intel_gt *gt = &i915->gt;
- if (!intel_has_gpu_reset(gt->i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
if (intel_gt_is_wedged(gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 321481403165..f04a59fe5d2c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -6,8 +6,10 @@
#include <linux/prime_numbers.h>
-#include "gem/i915_gem_pm.h"
+#include "intel_engine_pm.h"
#include "intel_gt.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
#include "../selftests/i915_random.h"
#include "../i915_selftest.h"
@@ -34,7 +36,7 @@ static unsigned long hwsp_cacheline(struct intel_timeline *tl)
#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
struct mock_hwsp_freelist {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct radix_tree_root cachelines;
struct intel_timeline **history;
unsigned long count, max;
@@ -67,7 +69,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned long cacheline;
int err;
- tl = intel_timeline_create(&state->i915->gt, NULL);
+ tl = intel_timeline_create(state->gt, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -105,6 +107,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
static int mock_hwsp_freelist(void *arg)
{
struct mock_hwsp_freelist state;
+ struct drm_i915_private *i915;
const struct {
const char *name;
unsigned int flags;
@@ -116,12 +119,14 @@ static int mock_hwsp_freelist(void *arg)
unsigned int na;
int err = 0;
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
- state.i915 = mock_gem_device();
- if (!state.i915)
- return -ENOMEM;
+ state.gt = &i915->gt;
/*
* Create a bunch of timelines and check that their HWSP do not overlap.
@@ -136,7 +141,6 @@ static int mock_hwsp_freelist(void *arg)
goto err_put;
}
- mutex_lock(&state.i915->drm.struct_mutex);
for (p = phases; p->name; p++) {
pr_debug("%s(%s)\n", __func__, p->name);
for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
@@ -149,10 +153,9 @@ static int mock_hwsp_freelist(void *arg)
out:
for (na = 0; na < state.max; na++)
__mock_hwsp_record(&state, na, NULL);
- mutex_unlock(&state.i915->drm.struct_mutex);
kfree(state.history);
err_put:
- drm_dev_put(&state.i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -449,8 +452,6 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
struct i915_request *rq;
int err;
- lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
-
err = intel_timeline_pin(tl);
if (err) {
rq = ERR_PTR(err);
@@ -461,10 +462,14 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
if (IS_ERR(rq))
goto out_unpin;
+ i915_request_get(rq);
+
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
i915_request_add(rq);
- if (err)
+ if (err) {
+ i915_request_put(rq);
rq = ERR_PTR(err);
+ }
out_unpin:
intel_timeline_unpin(tl);
@@ -475,11 +480,11 @@ out:
}
static struct intel_timeline *
-checked_intel_timeline_create(struct drm_i915_private *i915)
+checked_intel_timeline_create(struct intel_gt *gt)
{
struct intel_timeline *tl;
- tl = intel_timeline_create(&i915->gt, NULL);
+ tl = intel_timeline_create(gt, NULL);
if (IS_ERR(tl))
return tl;
@@ -496,11 +501,10 @@ checked_intel_timeline_create(struct drm_i915_private *i915)
static int live_hwsp_engine(void *arg)
{
#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@@ -515,37 +519,40 @@ static int live_hwsp_engine(void *arg)
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
for (n = 0; n < NUM_TIMELINES; n++) {
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
+
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
}
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
@@ -559,11 +566,7 @@ out:
intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
@@ -571,11 +574,10 @@ out:
static int live_hwsp_alternate(void *arg)
{
#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@@ -591,25 +593,25 @@ static int live_hwsp_alternate(void *arg)
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_timeline *tl;
struct i915_request *rq;
if (!intel_engine_can_store_dword(engine))
continue;
- tl = checked_intel_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
+ intel_engine_pm_put(engine);
err = PTR_ERR(tl);
goto out;
}
+ intel_engine_pm_get(engine);
rq = tl_write(tl, engine, count);
+ intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
@@ -617,11 +619,12 @@ static int live_hwsp_alternate(void *arg)
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
@@ -635,22 +638,17 @@ out:
intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
static int live_hwsp_wrap(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct intel_timeline *tl;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = 0;
/*
@@ -658,14 +656,10 @@ static int live_hwsp_wrap(void *arg)
* foreign GPU references.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ tl = intel_timeline_create(gt, NULL);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
- tl = intel_timeline_create(&i915->gt, NULL);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out_rpm;
- }
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
goto out_free;
@@ -673,7 +667,7 @@ static int live_hwsp_wrap(void *arg)
if (err)
goto out_free;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
const u32 *hwsp_seqno[2];
struct i915_request *rq;
u32 seqno[2];
@@ -681,7 +675,9 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
rq = i915_request_create(engine->kernel_context);
+ intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
@@ -743,29 +739,24 @@ static int live_hwsp_wrap(void *arg)
goto out;
}
- i915_retire_requests(i915); /* recycle HWSP */
+ intel_gt_retire_requests(gt); /* recycle HWSP */
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
intel_timeline_unpin(tl);
out_free:
intel_timeline_put(tl);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
static int live_hwsp_recycle(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count;
int err = 0;
@@ -775,38 +766,38 @@ static int live_hwsp_recycle(void *arg)
* want to confuse ourselves or the GPU.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
IGT_TIMEOUT(end_time);
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
do {
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
+ i915_request_put(rq);
intel_timeline_put(tl);
err = -EIO;
- goto out;
+ break;
}
if (*tl->hwsp_seqno != count) {
@@ -815,17 +806,18 @@ static int live_hwsp_recycle(void *arg)
err = -EINVAL;
}
+ i915_request_put(rq);
intel_timeline_put(tl);
count++;
if (err)
- goto out;
+ break;
} while (!__igt_timeout(end_time, NULL));
- }
-out:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
return err;
}
@@ -842,5 +834,5 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_live_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index d06d68ac2a3b..abce6e4ec9c0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -33,8 +33,32 @@ struct wa_lists {
} engine[I915_NUM_ENGINES];
};
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIMEDOUT;
+ i915_request_put(rq);
+
+ return err;
+}
+
static void
-reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -42,10 +66,10 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
memset(lists, 0, sizeof(*lists));
wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
- gt_init_workarounds(i915, &lists->gt_wa_list);
+ gt_init_workarounds(gt->i915, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_wa_list *wal = &lists->engine[id].wa_list;
wa_init_start(wal, "REF", engine->name);
@@ -59,12 +83,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
}
static void
-reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
intel_wa_list_free(&lists->engine[id].wa_list);
intel_wa_list_free(&lists->gt_wa_list);
@@ -191,10 +215,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
i915_gem_object_lock(results);
- intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
+ intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results);
- if (intel_gt_is_wedged(&ctx->i915->gt))
+ if (intel_gt_is_wedged(engine->gt))
err = -EIO;
if (err)
goto out_put;
@@ -243,7 +267,6 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
struct i915_gem_context *ctx;
struct intel_context *ce;
struct i915_request *rq;
- intel_wakeref_t wakeref;
int err = 0;
ctx = kernel_context(engine->i915);
@@ -255,12 +278,9 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
- rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
- rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+ rq = igt_spinner_create_request(spin, ce, MI_NOOP);
intel_context_put(ce);
- kernel_context_close(ctx);
if (IS_ERR(rq)) {
spin = NULL;
@@ -268,17 +288,12 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
goto err;
}
- i915_request_add(rq);
-
- if (spin && !igt_wait_for_spinner(spin, rq)) {
- pr_err("Spinner failed to start\n");
- err = -ETIMEDOUT;
- }
-
+ err = request_add_spin(rq, spin);
err:
if (err && spin)
igt_spinner_end(spin);
+ kernel_context_close(ctx);
return err;
}
@@ -313,7 +328,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (err)
goto out_spin;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(engine->uncore->rpm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
@@ -355,6 +370,7 @@ out_ctx:
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
struct i915_vma *vma;
int err;
@@ -362,7 +378,9 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -463,12 +481,15 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
0xffff00ff,
0xffffffff,
};
+ struct i915_address_space *vm;
struct i915_vma *scratch;
struct i915_vma *batch;
int err = 0, i, v;
u32 *cs, *results;
- scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
+ i915_vm_put(vm);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -492,6 +513,9 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
ro_reg = ro_register(reg);
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
if (INTEL_GEN(ctx->i915) >= 8)
@@ -565,6 +589,14 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
goto err_request;
}
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto err_request;
+
err = engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE,
0);
@@ -572,15 +604,11 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
goto err_request;
err_request:
- i915_request_add(rq);
- if (err)
- goto out_batch;
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = request_add_sync(rq, err);
+ if (err) {
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
- intel_gt_set_wedged(&ctx->i915->gt);
- err = -EIO;
+ intel_gt_set_wedged(engine->gt);
goto out_batch;
}
@@ -668,7 +696,7 @@ out_unpin:
break;
}
- if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(ctx->i915))
err = -EIO;
out_batch:
i915_vma_unpin_and_release(&batch, 0);
@@ -679,36 +707,29 @@ out_scratch:
static int live_dirty_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct drm_file *file;
int err = 0;
/* Can the user write to the whitelisted registers? */
- if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ file = mock_file(gt->i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
- mutex_unlock(&i915->drm.struct_mutex);
- file = mock_file(i915);
- mutex_lock(&i915->drm.struct_mutex);
- if (IS_ERR(file)) {
- err = PTR_ERR(file);
- goto out_rpm;
- }
-
- ctx = live_context(i915, file);
+ ctx = live_context(gt->i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_file;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (engine->whitelist.count == 0)
continue;
@@ -718,45 +739,43 @@ static int live_dirty_whitelist(void *arg)
}
out_file:
- mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(i915, file);
- mutex_lock(&i915->drm.struct_mutex);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mock_file_free(gt->i915, file);
return err;
}
static int live_reset_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
+ igt_global_reset_lock(gt);
- if (!engine || engine->whitelist.count == 0)
- return 0;
-
- igt_global_reset_lock(&i915->gt);
+ for_each_engine(engine, gt, id) {
+ if (engine->whitelist.count == 0)
+ continue;
- if (intel_has_reset_engine(i915)) {
- err = check_whitelist_across_reset(engine,
- do_engine_reset,
- "engine");
- if (err)
- goto out;
- }
+ if (intel_has_reset_engine(gt)) {
+ err = check_whitelist_across_reset(engine,
+ do_engine_reset,
+ "engine");
+ if (err)
+ goto out;
+ }
- if (intel_has_gpu_reset(i915)) {
- err = check_whitelist_across_reset(engine,
- do_device_reset,
- "device");
- if (err)
- goto out;
+ if (intel_has_gpu_reset(gt)) {
+ err = check_whitelist_across_reset(engine,
+ do_device_reset,
+ "device");
+ if (err)
+ goto out;
+ }
}
out:
- igt_global_reset_unlock(&i915->gt);
+ igt_global_reset_unlock(gt);
return err;
}
@@ -772,6 +791,14 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
if (IS_ERR(rq))
return PTR_ERR(rq);
+ i915_vma_lock(results);
+ err = i915_request_await_object(rq, results->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(results);
+ if (err)
+ goto err_req;
+
srm = MI_STORE_REGISTER_MEM;
if (INTEL_GEN(ctx->i915) >= 8)
srm++;
@@ -786,8 +813,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
u64 offset = results->node.start + sizeof(u32) * i;
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
- /* Clear access permission field */
- reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
*cs++ = srm;
*cs++ = reg;
@@ -797,12 +824,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
intel_ring_advance(rq, cs);
err_req:
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
- err = -EIO;
-
- return err;
+ return request_add_sync(rq, err);
}
static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
@@ -830,6 +852,9 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
if (ro_register(reg))
continue;
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
*cs++ = reg;
*cs++ = 0xffffffff;
}
@@ -850,13 +875,19 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
goto err_request;
}
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto err_request;
+
/* Perform the writes from an unprivileged "user" batch */
err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
err_request:
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
- err = -EIO;
+ err = request_add_sync(rq, err);
err_unpin:
i915_gem_object_unpin_map(batch->obj);
@@ -973,7 +1004,7 @@ err_a:
static int live_isolated_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct {
struct i915_gem_context *ctx;
struct i915_vma *scratch[2];
@@ -987,40 +1018,46 @@ static int live_isolated_whitelist(void *arg)
* invisible to a second context.
*/
- if (!intel_engines_has_context_isolation(i915))
- return 0;
-
- if (!i915->kernel_context->vm)
+ if (!intel_engines_has_context_isolation(gt->i915))
return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_address_space *vm;
struct i915_gem_context *c;
- c = kernel_context(i915);
+ c = kernel_context(gt->i915);
if (IS_ERR(c)) {
err = PTR_ERR(c);
goto err;
}
- client[i].scratch[0] = create_scratch(c->vm, 1024);
+ vm = i915_gem_context_get_vm_rcu(c);
+
+ client[i].scratch[0] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
+ i915_vm_put(vm);
kernel_context_close(c);
goto err;
}
- client[i].scratch[1] = create_scratch(c->vm, 1024);
+ client[i].scratch[1] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ i915_vm_put(vm);
kernel_context_close(c);
goto err;
}
client[i].ctx = c;
+ i915_vm_put(vm);
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
+ if (!engine->kernel_context->vm)
+ continue;
+
if (!whitelist_writable_count(engine))
continue;
@@ -1074,7 +1111,7 @@ err:
kernel_context_close(client[i].ctx);
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
return err;
@@ -1109,16 +1146,16 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
static int
live_gpu_reset_workarounds(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1126,25 +1163,25 @@ live_gpu_reset_workarounds(void *arg)
pr_info("Verifying after GPU reset...\n");
- igt_global_reset_lock(&i915->gt);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- reference_lists_init(i915, &lists);
+ reference_lists_init(gt, &lists);
ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok)
goto out;
- intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
+ intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after reset");
out:
i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx);
- reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(&i915->gt);
+ reference_lists_fini(gt, &lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
return ok ? 0 : -ESRCH;
}
@@ -1152,7 +1189,7 @@ out:
static int
live_engine_reset_workarounds(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
struct intel_context *ce;
@@ -1162,17 +1199,17 @@ live_engine_reset_workarounds(void *arg)
struct wa_lists lists;
int ret = 0;
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- igt_global_reset_lock(&i915->gt);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- reference_lists_init(i915, &lists);
+ reference_lists_init(gt, &lists);
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct intel_engine_cs *engine = ce->engine;
@@ -1205,12 +1242,10 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
pr_err("Spinner failed to start\n");
igt_spinner_fini(&spin);
- ret = -ETIMEDOUT;
goto err;
}
@@ -1227,12 +1262,12 @@ live_engine_reset_workarounds(void *arg)
}
err:
i915_gem_context_unlock_engines(ctx);
- reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(&i915->gt);
+ reference_lists_fini(gt, &lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
kernel_context_close(ctx);
- igt_flush_test(i915, I915_WAIT_LOCKED);
+ igt_flush_test(gt->i915);
return ret;
}
@@ -1246,14 +1281,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_gpu_reset_workarounds),
SUBTEST(live_engine_reset_workarounds),
};
- int err;
if (intel_gt_is_wedged(&i915->gt))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 598170efcaf6..2a77c051f36a 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 249c747e9756..3ee4a4e7689d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -4,11 +4,34 @@
*/
#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm_irq.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
+/**
+ * DOC: GuC
+ *
+ * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
+ * designed to offload some of the functionality usually performed by the host
+ * driver; currently the main operations it can take care of are:
+ *
+ * - Authentication of the HuC, which is required to fully enable HuC usage.
+ * - Low latency graphics context scheduling (a.k.a. GuC submission).
+ * - GT Power management.
+ *
+ * The enable_guc module parameter can be used to select which of those
+ * operations to enable within GuC. Note that not all the operations are
+ * supported on all gen9+ platforms.
+ *
+ * Enabling the GuC is not mandatory and therefore the firmware is only loaded
+ * if at least one of the operations is selected. However, not loading the GuC
+ * might result in the loss of some features that do require the GuC (currently
+ * just the HuC, but more are expected to land in the future).
+ */
+
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -56,6 +79,93 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
guc->send_regs.fw_domains = fw_domains;
}
+static void gen9_reset_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen9_enable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
+ gt->pm_guc_events);
+ guc->interrupts.enabled = true;
+ gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
+ }
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen9_disable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
+
+ gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
+
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ gen9_reset_guc_interrupts(guc);
+}
+
+static void gen11_reset_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen11_enable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
+
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_ENABLE, events);
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_MASK, ~events);
+ guc->interrupts.enabled = true;
+ }
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen11_disable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
+
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ gen11_reset_guc_interrupts(guc);
+}
+
void intel_guc_init_early(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
@@ -82,32 +192,6 @@ void intel_guc_init_early(struct intel_guc *guc)
}
}
-static int guc_shared_data_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- void *vaddr;
-
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
- }
-
- guc->shared_data = vma;
- guc->shared_data_vaddr = vaddr;
-
- return 0;
-}
-
-static void guc_shared_data_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
-}
-
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
u32 level = intel_guc_log_get_level(&guc->log);
@@ -254,14 +338,9 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_fetch;
- ret = guc_shared_data_create(guc);
- if (ret)
- goto err_fw;
- GEM_BUG_ON(!guc->shared_data);
-
ret = intel_guc_log_create(&guc->log);
if (ret)
- goto err_shared;
+ goto err_fw;
ret = intel_guc_ads_create(guc);
if (ret)
@@ -296,8 +375,6 @@ err_ads:
intel_guc_ads_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
-err_shared:
- guc_shared_data_destroy(guc);
err_fw:
intel_uc_fw_fini(&guc->fw);
err_fetch:
@@ -322,7 +399,6 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
- guc_shared_data_destroy(guc);
intel_uc_fw_fini(&guc->fw);
intel_uc_fw_cleanup_fetch(&guc->fw);
}
@@ -478,6 +554,13 @@ int intel_guc_suspend(struct intel_guc *guc)
};
/*
+ * If GuC communication is enabled but submission is not supported,
+ * we do not need to suspend the GuC.
+ */
+ if (!intel_guc_submission_is_enabled(guc))
+ return 0;
+
+ /*
* The ENTER_S_STATE action queues the save/restore operation in GuC FW
* and then returns, so waiting on the H2G is not enough to guarantee
* GuC is done. When all the processing is done, GuC writes
@@ -518,19 +601,9 @@ int intel_guc_suspend(struct intel_guc *guc)
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine)
{
- u32 data[7];
-
- GEM_BUG_ON(!guc->execbuf_client);
-
- data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
- data[1] = engine->guc_id;
- data[2] = 0;
- data[3] = 0;
- data[4] = 0;
- data[5] = guc->execbuf_client->stage_id;
- data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
+ /* XXX: to be implemented with submission interface rework */
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return -ENODEV;
}
/**
@@ -544,13 +617,27 @@ int intel_guc_resume(struct intel_guc *guc)
GUC_POWER_D0,
};
+ /*
+ * If GuC communication is enabled but submission is not supported,
+ * we do not need to resume the GuC but we do need to enable the
+ * GuC communication on resume (above).
+ */
+ if (!intel_guc_submission_is_enabled(guc))
+ return 0;
+
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/**
- * DOC: GuC Address Space
+ * DOC: GuC Memory Management
*
- * The layout of GuC address space is shown below:
+ * GuC can't allocate any memory for its own usage, so all the allocations must
+ * be handled by the host driver. GuC accesses the memory via the GGTT, with the
+ * exception of the top and bottom parts of the 4GB address space, which are
+ * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
+ * or other parts of the HW. The driver must take care not to place objects that
+ * the GuC is going to access in these reserved ranges. The layout of the GuC
+ * address space is shown below:
*
* ::
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2b2f046d3cc3..e6400204a2bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -47,8 +47,6 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
struct ida stage_ids;
- struct i915_vma *shared_data;
- void *shared_data_vaddr;
struct intel_guc_client *execbuf_client;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 1d3cdd67ca2f..a26a85d50209 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -548,6 +548,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
@@ -556,7 +557,6 @@ enum intel_guc_action {
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
- INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 36332064de9c..caed0d57e704 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -226,7 +226,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
- if (WARN_ON(!intel_guc_log_relay_enabled(log)))
+ if (WARN_ON(!intel_guc_log_relay_created(log)))
goto out_unlock;
/* Get the pointer to shared GuC log buffer */
@@ -361,6 +361,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
{
mutex_init(&log->relay.lock);
INIT_WORK(&log->relay.flush_work, capture_logs_work);
+ log->relay.started = false;
}
static int guc_log_relay_create(struct intel_guc_log *log)
@@ -546,7 +547,7 @@ out_unlock:
return ret;
}
-bool intel_guc_log_relay_enabled(const struct intel_guc_log *log)
+bool intel_guc_log_relay_created(const struct intel_guc_log *log)
{
return log->relay.buf_addr;
}
@@ -560,7 +561,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
- if (intel_guc_log_relay_enabled(log)) {
+ if (intel_guc_log_relay_created(log)) {
ret = -EEXIST;
goto out_unlock;
}
@@ -585,6 +586,21 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
mutex_unlock(&log->relay.lock);
+ return 0;
+
+out_relay:
+ guc_log_relay_destroy(log);
+out_unlock:
+ mutex_unlock(&log->relay.lock);
+
+ return ret;
+}
+
+int intel_guc_log_relay_start(struct intel_guc_log *log)
+{
+ if (log->relay.started)
+ return -EEXIST;
+
guc_log_enable_flush_events(log);
/*
@@ -594,47 +610,59 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
*/
queue_work(system_highpri_wq, &log->relay.flush_work);
- return 0;
+ log->relay.started = true;
-out_relay:
- guc_log_relay_destroy(log);
-out_unlock:
- mutex_unlock(&log->relay.lock);
-
- return ret;
+ return 0;
}
void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
+ if (!log->relay.started)
+ return;
+
/*
* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
flush_work(&log->relay.flush_work);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(log);
}
-void intel_guc_log_relay_close(struct intel_guc_log *log)
+/*
+ * Stops the relay log. Called from intel_guc_log_relay_close(), so no
+ * possibility of race with start/flush since relay_write cannot race
+ * relay_close.
+ */
+static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ if (!log->relay.started)
+ return;
+
guc_log_disable_flush_events(log);
intel_synchronize_irq(i915);
flush_work(&log->relay.flush_work);
+ log->relay.started = false;
+}
+
+void intel_guc_log_relay_close(struct intel_guc_log *log)
+{
+ guc_log_relay_stop(log);
+
mutex_lock(&log->relay.lock);
- GEM_BUG_ON(!intel_guc_log_relay_enabled(log));
+ GEM_BUG_ON(!intel_guc_log_relay_created(log));
guc_log_unmap(log);
guc_log_relay_destroy(log);
mutex_unlock(&log->relay.lock);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 6f764879acb1..c252c022c5fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -47,6 +47,7 @@ struct intel_guc_log {
struct i915_vma *vma;
struct {
void *buf_addr;
+ bool started;
struct work_struct flush_work;
struct rchan *channel;
struct mutex lock;
@@ -65,8 +66,9 @@ int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
-bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
+bool intel_guc_log_relay_created(const struct intel_guc_log *log);
int intel_guc_log_relay_open(struct intel_guc_log *log);
+int intel_guc_log_relay_start(struct intel_guc_log *log);
void intel_guc_log_relay_flush(struct intel_guc_log *log);
void intel_guc_log_relay_close(struct intel_guc_log *log);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index edf194d23c6b..1949346e714e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -83,6 +83,9 @@
#define GEN8_GTCR _MMIO(0x4274)
#define GEN8_GTCR_INVALIDATE (1<<0)
+#define GEN12_GUC_TLB_INV_CR _MMIO(0xcee8)
+#define GEN12_GUC_TLB_INV_CR_INVALIDATE (1 << 0)
+
#define GUC_ARAT_C6DIS _MMIO(0xA178)
#define GUC_SHIM_CONTROL _MMIO(0xc064)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f325d3dd564f..2498c55e0ea5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -6,12 +6,13 @@
#include <linux/circ_buf.h>
#include "gem/i915_gem_context.h"
-
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
+
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -29,6 +30,12 @@ enum {
/**
* DOC: GuC-based command submission
*
+ * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
+ * firmware is moving to an updated submission interface and we plan to
+ * turn submission back on when that lands. The below documentation (and related
+ * code) matches the old submission model and will be updated as part of the
+ * upgrade to the new flow.
+ *
* GuC client:
* A intel_guc_client refers to a submission path through GuC. Currently, there
* is only one client, which is charged with all submissions to the GuC. This
@@ -1004,7 +1011,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
static void guc_interrupts_capture(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1014,7 +1021,7 @@ static void guc_interrupts_capture(struct intel_gt *gt)
* to GuC
*/
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@@ -1050,7 +1057,7 @@ static void guc_interrupts_capture(struct intel_gt *gt)
static void guc_interrupts_release(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1062,7 +1069,7 @@ static void guc_interrupts_release(struct intel_gt *gt)
*/
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route all GT interrupts to the host */
@@ -1119,7 +1126,7 @@ int intel_guc_submission_enable(struct intel_guc *guc)
enum intel_engine_id id;
int err;
- err = i915_inject_load_error(gt->i915, -ENXIO);
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
@@ -1145,7 +1152,7 @@ int intel_guc_submission_enable(struct intel_guc *guc)
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(gt);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
engine->set_default_submission = guc_set_default_submission;
engine->set_default_submission(engine);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d4625c97b4f9..32a069841c14 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -9,6 +9,34 @@
#include "intel_huc.h"
#include "i915_drv.h"
+/**
+ * DOC: HuC
+ *
+ * The HuC is a dedicated microcontroller for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can directly use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * The kernel driver is only responsible for loading the HuC firmware and
+ * triggering its security authentication, which is performed by the GuC. For
+ * The GuC to correctly perform the authentication, the HuC binary must be
+ * loaded before the GuC one. Loading the HuC is optional; however, not using
+ * the HuC might negatively impact power usage and/or performance of media
+ * workloads, depending on the use-cases.
+ *
+ * See https://github.com/intel/media-driver for the latest details on HuC
+ * functionality.
+ */
+
+/**
+ * DOC: HuC Memory Management
+ *
+ * Similarly to the GuC, the HuC can't do any memory allocations on its own,
+ * with the difference being that the allocations for HuC usage are handled by
+ * the userspace driver instead of the kernel one. The HuC accesses the memory
+ * via the PPGTT belonging to the context loaded on the VCS executing the
+ * HuC-specific commands.
+ */
+
void intel_huc_init_early(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
@@ -35,7 +63,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
void *vaddr;
int err;
- err = i915_inject_load_error(gt->i915, -ENXIO);
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
@@ -118,10 +146,9 @@ void intel_huc_fini(struct intel_huc *huc)
*
* Called after HuC and GuC firmware loading during intel_uc_init_hw().
*
- * This function pins HuC firmware image object into GGTT.
- * Then it invokes GuC action to authenticate passing the offset to RSA
- * signature through intel_guc_auth_huc(). It then waits for 50ms for
- * firmware verification ACK and unpins the object.
+ * This function invokes the GuC action to authenticate the HuC firmware,
+ * passing the offset of the RSA signature to intel_guc_auth_huc(). It then
+ * waits for up to 50ms for firmware verification ACK.
*/
int intel_huc_auth(struct intel_huc *huc)
{
@@ -134,7 +161,7 @@ int intel_huc_auth(struct intel_huc *huc)
if (!intel_uc_fw_is_loaded(&huc->fw))
return -ENOEXEC;
- ret = i915_inject_load_error(gt->i915, -ENXIO);
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
goto fail;
@@ -185,7 +212,7 @@ int intel_huc_check_status(struct intel_huc *huc)
if (!intel_huc_is_supported(huc))
return -ENODEV;
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
status = intel_uncore_read(gt->uncore, huc->status.reg);
return (status & huc->status.mask) == huc->status.value;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 74602487ed67..d654340d4d03 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -8,21 +8,6 @@
#include "i915_drv.h"
/**
- * DOC: HuC Firmware
- *
- * Motivation:
- * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
- * Efficiency Video Coding) operations. Userspace can use the firmware
- * capabilities by adding HuC specific commands to batch buffers.
- *
- * Implementation:
- * The same firmware loader is used as the GuC. However, the actual
- * loading to HW is deferred until GEM initialization is done.
- *
- * Note that HuC firmware loading must be done before GuC loading.
- */
-
-/**
* intel_huc_fw_init_early() - initializes HuC firmware struct
* @huc: intel_huc struct
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 71ee7ab035cc..629b19377a29 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -20,7 +20,7 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
int ret;
u32 guc_status;
- ret = i915_inject_load_error(gt->i915, -ENXIO);
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
return ret;
@@ -197,7 +197,7 @@ static int guc_enable_communication(struct intel_guc *guc)
GEM_BUG_ON(guc_communication_enabled(guc));
- ret = i915_inject_load_error(i915, -ENXIO);
+ ret = i915_inject_probe_error(i915, -ENXIO);
if (ret)
return ret;
@@ -224,17 +224,7 @@ static int guc_enable_communication(struct intel_guc *guc)
return 0;
}
-static void guc_stop_communication(struct intel_guc *guc)
-{
- intel_guc_ct_stop(&guc->ct);
-
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-
- guc_clear_mmio_msg(guc);
-}
-
-static void guc_disable_communication(struct intel_guc *guc)
+static void __guc_stop_communication(struct intel_guc *guc)
{
/*
* Events generated during or after CT disable are logged by guc in
@@ -247,6 +237,20 @@ static void guc_disable_communication(struct intel_guc *guc)
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
+}
+
+static void guc_stop_communication(struct intel_guc *guc)
+{
+ intel_guc_ct_stop(&guc->ct);
+
+ __guc_stop_communication(guc);
+
+ DRM_INFO("GuC communication stopped\n");
+}
+
+static void guc_disable_communication(struct intel_guc *guc)
+{
+ __guc_stop_communication(guc);
intel_guc_ct_disable(&guc->ct);
@@ -368,7 +372,7 @@ static int uc_init_wopcm(struct intel_uc *uc)
GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
- err = i915_inject_load_error(gt->i915, -ENXIO);
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
@@ -537,7 +541,9 @@ void intel_uc_fini_hw(struct intel_uc *uc)
if (intel_uc_supports_guc_submission(uc))
intel_guc_submission_disable(guc);
- guc_disable_communication(guc);
+ if (guc_communication_enabled(guc))
+ guc_disable_communication(guc);
+
__uc_sanitize(uc);
}
@@ -581,7 +587,7 @@ void intel_uc_suspend(struct intel_uc *uc)
if (!intel_guc_is_running(guc))
return;
- with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
intel_uc_runtime_suspend(uc);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index bd22bf11adad..66a30ab7044a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -37,27 +37,34 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
/*
* List of required GuC and HuC binaries per-platform.
* Must be ordered based on platform + revid, from newer to older.
+ *
+ * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas
+ * between 33.0 and 35.2 are only related to new additions to support new Gen12
+ * features.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
- fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398))
-
-#define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
+ fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 2, 0, 0))
+
+#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
"i915/" \
__stringify(prefix_) name_ \
- __stringify(major_) separator_ \
- __stringify(minor_) separator_ \
+ __stringify(major_) "." \
+ __stringify(minor_) "." \
__stringify(patch_) ".bin"
#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH(prefix_, "_guc_", ".", major_, minor_, patch_)
+ __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
- __MAKE_UC_FW_PATH(prefix_, "_huc_ver", "_", major_, minor_, bld_num_)
+ __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
/* All blobs need to be declared via MODULE_FIRMWARE() */
#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
@@ -218,29 +225,31 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
{
bool user = e == -EINVAL;
- if (i915_inject_load_error(i915, e)) {
+ if (i915_inject_probe_error(i915, e)) {
/* non-existing blob */
uc_fw->path = "<invalid>";
uc_fw->user_overridden = user;
- } else if (i915_inject_load_error(i915, e)) {
+ } else if (i915_inject_probe_error(i915, e)) {
/* require next major version */
uc_fw->major_ver_wanted += 1;
uc_fw->minor_ver_wanted = 0;
uc_fw->user_overridden = user;
- } else if (i915_inject_load_error(i915, e)) {
+ } else if (i915_inject_probe_error(i915, e)) {
/* require next minor version */
uc_fw->minor_ver_wanted += 1;
uc_fw->user_overridden = user;
- } else if (uc_fw->major_ver_wanted && i915_inject_load_error(i915, e)) {
+ } else if (uc_fw->major_ver_wanted &&
+ i915_inject_probe_error(i915, e)) {
/* require prev major version */
uc_fw->major_ver_wanted -= 1;
uc_fw->minor_ver_wanted = 0;
uc_fw->user_overridden = user;
- } else if (uc_fw->minor_ver_wanted && i915_inject_load_error(i915, e)) {
+ } else if (uc_fw->minor_ver_wanted &&
+ i915_inject_probe_error(i915, e)) {
/* require prev minor version - hey, this should work! */
uc_fw->minor_ver_wanted -= 1;
uc_fw->user_overridden = user;
- } else if (user && i915_inject_load_error(i915, e)) {
+ } else if (user && i915_inject_probe_error(i915, e)) {
/* officially unsupported platform */
uc_fw->major_ver_wanted = 0;
uc_fw->minor_ver_wanted = 0;
@@ -269,7 +278,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
GEM_BUG_ON(!i915->wopcm.size);
GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
- err = i915_inject_load_error(i915, -ENXIO);
+ err = i915_inject_probe_error(i915, -ENXIO);
if (err)
return err;
@@ -337,25 +346,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
}
/* Get version numbers from the CSS header */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
- css->sw_version);
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
- css->sw_version);
- break;
-
- default:
- MISSING_CASE(uc_fw->type);
- break;
- }
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
+ css->sw_version);
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
@@ -400,7 +394,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{
struct drm_mm_node *node = &ggtt->uc_fw;
- GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
@@ -445,7 +439,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
u64 offset;
int ret;
- ret = i915_inject_load_error(gt->i915, -ETIMEDOUT);
+ ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
if (ret)
return ret;
@@ -506,7 +500,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
/* make sure the status was cleared the last time we reset the uc */
GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
- err = i915_inject_load_error(gt->i915, -ENOEXEC);
+ err = i915_inject_probe_error(gt->i915, -ENOEXEC);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index ae58e8a8c53b..029214cdedd5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -39,9 +39,6 @@
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
- *
- * The only difference between GuC and HuC firmwares is how the version
- * information is saved.
*/
struct uc_css_header {
@@ -69,11 +66,9 @@ struct uc_css_header {
char username[8];
char buildnumber[12];
u32 sw_version;
-#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
-#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
-#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
-#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
-#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
+#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
u32 reserved[14];
u32 header_info;
} __packed;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index bba0eafe1cdb..d8a80388bd31 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -108,23 +108,15 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client)
* validating that the doorbells status expected by the driver matches what the
* GuC/HW have.
*/
-static int igt_guc_clients(void *args)
+static int igt_guc_clients(void *arg)
{
- struct drm_i915_private *dev_priv = args;
+ struct intel_gt *gt = arg;
+ struct intel_guc *guc = &gt->uc.guc;
intel_wakeref_t wakeref;
- struct intel_guc *guc;
int err = 0;
- GEM_BUG_ON(!HAS_GT_UC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->gt.uc.guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = check_all_doorbells(guc);
if (err)
@@ -189,8 +181,7 @@ out:
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
@@ -201,22 +192,14 @@ unlock:
*/
static int igt_guc_doorbells(void *arg)
{
- struct drm_i915_private *dev_priv = arg;
+ struct intel_gt *gt = arg;
+ struct intel_guc *guc = &gt->uc.guc;
intel_wakeref_t wakeref;
- struct intel_guc *guc;
int i, err = 0;
u16 db_id;
- GEM_BUG_ON(!HAS_GT_UC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->gt.uc.guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = check_all_doorbells(guc);
if (err)
@@ -298,20 +281,19 @@ out:
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
-int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
+int intel_guc_live_selftest(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_guc_clients),
SUBTEST(igt_guc_doorbells),
};
- if (!USES_GUC_SUBMISSION(dev_priv))
+ if (!USES_GUC_SUBMISSION(i915))
return 0;
- return i915_subtests(tests, dev_priv);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 5ff2437b2998..771420453f82 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
mmio_hw_access_pre(dev_priv);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
mmio_hw_access_post(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
@@ -98,9 +98,9 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0;
out_free_aperture:
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
return ret;
}
@@ -108,10 +108,10 @@ static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
}
/**
@@ -198,7 +198,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
mutex_lock(&dev_priv->ggtt.vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
- reg = i915_reserve_fence(dev_priv);
+ reg = i915_reserve_fence(&dev_priv->ggtt);
if (IS_ERR(reg))
goto out_free_fence;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index e753b1e706e2..6a3ac8cde95d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -35,7 +35,9 @@
*/
#include <linux/slab.h>
+
#include "i915_drv.h"
+#include "gt/intel_ring.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 4bfaefdf548d..e451298d11c3 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -152,6 +152,7 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct intel_vgpu_fb_info *info)
{
+ static struct lock_class_key lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
@@ -161,7 +162,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base,
roundup(info->size, PAGE_SIZE));
- i915_gem_object_init(obj, &intel_vgpu_gem_ops);
+ i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f21b8fb5b37e..d6e7a1189bad 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -534,7 +534,7 @@ static void clean_execlist(struct intel_vgpu *vgpu,
struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
s->ring_scan_buffer[engine->id] = NULL;
s->ring_scan_buffer_size[engine->id] = 0;
@@ -548,7 +548,7 @@ static void reset_execlist(struct intel_vgpu *vgpu,
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 25f78196b964..bd12af349123 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -819,13 +819,16 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
enum intel_gvt_event_type event;
- if (reg == _DPA_AUX_CH_CTL)
+ if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
event = AUX_CHANNEL_A;
- else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
+ else if (reg == _PCH_DPB_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
event = AUX_CHANNEL_B;
- else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
+ else if (reg == _PCH_DPC_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
event = AUX_CHANNEL_C;
- else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
+ else if (reg == _PCH_DPD_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
event = AUX_CHANNEL_D;
else {
WARN_ON(true);
@@ -2796,7 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
- MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
+ MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
@@ -2872,11 +2875,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
@@ -3417,6 +3420,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
}
for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
+ /* pvinfo data doesn't come from hw mmio */
+ if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
+ continue;
+
for (j = 0; j < block->size; j += 4) {
ret = handler(gvt,
i915_mmio_reg_offset(block->offset) + j,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 343d79c1cb7e..04a5a0d90823 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1564,27 +1564,10 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "\n");
}
-static ssize_t
-hw_id_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct mdev_device *mdev = mdev_from_dev(dev);
-
- if (mdev) {
- struct intel_vgpu *vgpu = (struct intel_vgpu *)
- mdev_get_drvdata(mdev);
- return sprintf(buf, "%u\n",
- vgpu->submission.shadow[0]->gem_context->hw_id);
- }
- return sprintf(buf, "\n");
-}
-
static DEVICE_ATTR_RO(vgpu_id);
-static DEVICE_ATTR_RO(hw_id);
static struct attribute *intel_vgpu_attrs[] = {
&dev_attr_vgpu_id.attr,
- &dev_attr_hw_id.attr,
NULL
};
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 4208e40445b1..aaf15916d29a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6c79d16b381e..5b2a7d072ec9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -38,6 +38,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "gvt.h"
@@ -194,7 +195,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ page = i915_gem_object_get_page(ctx_obj, i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
I915_GTT_PAGE_SIZE);
@@ -365,7 +366,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
+ struct i915_ppgtt *ppgtt =
+ i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -378,6 +380,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
+
+ i915_vm_put(&ppgtt->vm);
}
static int
@@ -385,11 +389,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (workload->req)
return 0;
@@ -415,10 +416,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&vgpu->vgpu_lock);
if (workload->shadow)
return 0;
@@ -580,8 +580,6 @@ static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_shadow_bb *bb, *pos;
if (list_empty(&workload->shadow_bb))
@@ -590,8 +588,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb = list_first_entry(&workload->shadow_bb,
struct intel_vgpu_shadow_bb, list);
- mutex_lock(&dev_priv->drm.struct_mutex);
-
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
if (bb->accessing)
@@ -609,8 +605,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_del(&bb->list);
kfree(bb);
}
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
static int prepare_workload(struct intel_vgpu_workload *workload)
@@ -685,7 +679,6 @@ err_unpin_mm:
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -694,7 +687,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ring_id, workload);
mutex_lock(&vgpu->vgpu_lock);
- mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
@@ -729,7 +721,6 @@ out:
err_req:
if (ret)
workload->status = ret;
- mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -844,7 +835,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ page = i915_gem_object_get_page(ctx_obj, i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
I915_GTT_PAGE_SIZE);
@@ -887,7 +878,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
@@ -1233,20 +1224,18 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ struct i915_ppgtt *ppgtt;
enum intel_engine_id i;
int ret;
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto out_unlock;
- }
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
i915_gem_context_set_force_single_submission(ctx);
- i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
+ ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
+ i915_context_ppgtt_root_save(s, ppgtt);
for_each_engine(engine, i915, i) {
struct intel_context *ce;
@@ -1291,12 +1280,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
+ i915_vm_put(&ppgtt->vm);
i915_gem_context_put(ctx);
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
out_shadow_ctx:
- i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
+ i915_context_ppgtt_root_restore(s, ppgtt);
for_each_engine(engine, i915, i) {
if (IS_ERR(s->shadow[i]))
break;
@@ -1304,9 +1293,8 @@ out_shadow_ctx:
intel_context_unpin(s->shadow[i]);
intel_context_put(s->shadow[i]);
}
+ i915_vm_put(&ppgtt->vm);
i915_gem_context_put(ctx);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1597,9 +1585,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
- mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 48e16ad93bbd..3c424cb90702 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -7,13 +7,12 @@
#include <linux/debugobjects.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
-#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
-
/*
* Active refs memory management
*
@@ -27,35 +26,35 @@ static struct i915_global_active {
} global;
struct active_node {
- struct i915_active_request base;
+ struct i915_active_fence base;
struct i915_active *ref;
struct rb_node node;
u64 timeline;
};
static inline struct active_node *
-node_from_active(struct i915_active_request *active)
+node_from_active(struct i915_active_fence *active)
{
return container_of(active, struct active_node, base);
}
#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
-static inline bool is_barrier(const struct i915_active_request *active)
+static inline bool is_barrier(const struct i915_active_fence *active)
{
- return IS_ERR(rcu_access_pointer(active->request));
+ return IS_ERR(rcu_access_pointer(active->fence));
}
static inline struct llist_node *barrier_to_ll(struct active_node *node)
{
GEM_BUG_ON(!is_barrier(&node->base));
- return (struct llist_node *)&node->base.link;
+ return (struct llist_node *)&node->base.cb.node;
}
static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node *node)
{
- return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
+ return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
}
static inline struct intel_engine_cs *
@@ -68,7 +67,7 @@ barrier_to_engine(struct active_node *node)
static inline struct active_node *barrier_from_ll(struct llist_node *x)
{
return container_of((struct list_head *)x,
- struct active_node, base.link);
+ struct active_node, base.cb.node);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
@@ -92,12 +91,17 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
- debug_object_activate(ref, &active_debug_desc);
+ spin_lock_irq(&ref->tree_lock);
+ if (!atomic_read(&ref->count)) /* before the first inc */
+ debug_object_activate(ref, &active_debug_desc);
+ spin_unlock_irq(&ref->tree_lock);
}
static void debug_active_deactivate(struct i915_active *ref)
{
- debug_object_deactivate(ref, &active_debug_desc);
+ lockdep_assert_held(&ref->tree_lock);
+ if (!atomic_read(&ref->count)) /* after the last dec */
+ debug_object_deactivate(ref, &active_debug_desc);
}
static void debug_active_fini(struct i915_active *ref)
@@ -125,31 +129,46 @@ __active_retire(struct i915_active *ref)
{
struct active_node *it, *n;
struct rb_root root;
- bool retire = false;
+ unsigned long flags;
- lockdep_assert_held(&ref->mutex);
+ GEM_BUG_ON(i915_active_is_idle(ref));
/* return the unused nodes to our slabcache -- flushing the allocator */
- if (atomic_dec_and_test(&ref->count)) {
- debug_active_deactivate(ref);
- root = ref->tree;
- ref->tree = RB_ROOT;
- ref->cache = NULL;
- retire = true;
- }
-
- mutex_unlock(&ref->mutex);
- if (!retire)
+ if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
return;
- rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
- GEM_BUG_ON(i915_active_request_isset(&it->base));
- kmem_cache_free(global.slab_cache, it);
- }
+ GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
+ debug_active_deactivate(ref);
+
+ root = ref->tree;
+ ref->tree = RB_ROOT;
+ ref->cache = NULL;
+
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
/* After the final retire, the entire struct may be freed */
if (ref->retire)
ref->retire(ref);
+
+ /* ... except if you wait on it, you must manage your own references! */
+ wake_up_var(ref);
+
+ rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
+ GEM_BUG_ON(i915_active_fence_isset(&it->base));
+ kmem_cache_free(global.slab_cache, it);
+ }
+}
+
+static void
+active_work(struct work_struct *wrk)
+{
+ struct i915_active *ref = container_of(wrk, typeof(*ref), work);
+
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ if (atomic_add_unless(&ref->count, -1, 1))
+ return;
+
+ __active_retire(ref);
}
static void
@@ -159,18 +178,29 @@ active_retire(struct i915_active *ref)
if (atomic_add_unless(&ref->count, -1, 1))
return;
- /* One active may be flushed from inside the acquire of another */
- mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
+ queue_work(system_unbound_wq, &ref->work);
+ return;
+ }
+
__active_retire(ref);
}
static void
-node_retire(struct i915_active_request *base, struct i915_request *rq)
+node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- active_retire(node_from_active(base)->ref);
+ i915_active_fence_cb(fence, cb);
+ active_retire(container_of(cb, struct active_node, base.cb)->ref);
}
-static struct i915_active_request *
+static void
+excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ i915_active_fence_cb(fence, cb);
+ active_retire(container_of(cb, struct i915_active, excl.cb));
+}
+
+static struct i915_active_fence *
active_instance(struct i915_active *ref, struct intel_timeline *tl)
{
struct active_node *node, *prealloc;
@@ -193,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
if (!prealloc)
return NULL;
- mutex_lock(&ref->mutex);
+ spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
parent = NULL;
@@ -214,7 +244,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
}
node = prealloc;
- i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
+ __i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
@@ -223,29 +253,36 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
out:
ref->cache = node;
- mutex_unlock(&ref->mutex);
+ spin_unlock_irq(&ref->tree_lock);
BUILD_BUG_ON(offsetof(typeof(*node), base));
return &node->base;
}
-void __i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
+void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
struct lock_class_key *key)
{
+ unsigned long bits;
+
debug_active_init(ref);
- ref->i915 = i915;
ref->flags = 0;
ref->active = active;
- ref->retire = retire;
+ ref->retire = ptr_unpack_bits(retire, &bits, 2);
+ if (bits & I915_ACTIVE_MAY_SLEEP)
+ ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+
+ spin_lock_init(&ref->tree_lock);
ref->tree = RB_ROOT;
ref->cache = NULL;
+
init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0);
__mutex_init(&ref->mutex, "i915_active", key);
+ __i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire);
+ INIT_WORK(&ref->work, active_work);
}
static bool ____active_del_barrier(struct i915_active *ref,
@@ -298,9 +335,9 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node)
int i915_active_ref(struct i915_active *ref,
struct intel_timeline *tl,
- struct i915_request *rq)
+ struct dma_fence *fence)
{
- struct i915_active_request *active;
+ struct i915_active_fence *active;
int err;
lockdep_assert_held(&tl->mutex);
@@ -323,26 +360,44 @@ int i915_active_ref(struct i915_active *ref,
* request that we want to emit on the kernel_context.
*/
__active_del_barrier(ref, node_from_active(active));
- RCU_INIT_POINTER(active->request, NULL);
- INIT_LIST_HEAD(&active->link);
- } else {
- if (!i915_active_request_isset(active))
- atomic_inc(&ref->count);
+ RCU_INIT_POINTER(active->fence, NULL);
+ atomic_dec(&ref->count);
}
- GEM_BUG_ON(!atomic_read(&ref->count));
- __i915_active_request_set(active, rq);
+ if (!__i915_active_fence_set(active, fence))
+ atomic_inc(&ref->count);
out:
i915_active_release(ref);
return err;
}
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+{
+ /* We expect the caller to manage the exclusive timeline ordering */
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ /*
+ * As we don't know which mutex the caller is using, we told a small
+ * lie to the debug code that it is using the i915_active.mutex;
+ * and now we must stick to that lie.
+ */
+ mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_);
+ if (!__i915_active_fence_set(&ref->excl, f))
+ atomic_inc(&ref->count);
+ mutex_release(&ref->mutex.dep_map, _THIS_IP_);
+}
+
+bool i915_active_acquire_if_busy(struct i915_active *ref)
+{
+ debug_active_assert(ref);
+ return atomic_add_unless(&ref->count, 1, 0);
+}
+
int i915_active_acquire(struct i915_active *ref)
{
int err;
- debug_active_assert(ref);
- if (atomic_add_unless(&ref->count, 1, 0))
+ if (i915_active_acquire_if_busy(ref))
return 0;
err = mutex_lock_interruptible(&ref->mutex);
@@ -367,109 +422,66 @@ void i915_active_release(struct i915_active *ref)
active_retire(ref);
}
-static void __active_ungrab(struct i915_active *ref)
-{
- clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags);
-}
-
-bool i915_active_trygrab(struct i915_active *ref)
+static void enable_signaling(struct i915_active_fence *active)
{
- debug_active_assert(ref);
-
- if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags))
- return false;
+ struct dma_fence *fence;
- if (!atomic_add_unless(&ref->count, 1, 0)) {
- __active_ungrab(ref);
- return false;
- }
+ fence = i915_active_fence_get(active);
+ if (!fence)
+ return;
- return true;
-}
-
-void i915_active_ungrab(struct i915_active *ref)
-{
- GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
-
- active_retire(ref);
- __active_ungrab(ref);
+ dma_fence_enable_sw_signaling(fence);
+ dma_fence_put(fence);
}
int i915_active_wait(struct i915_active *ref)
{
struct active_node *it, *n;
- int err;
+ int err = 0;
might_sleep();
- might_lock(&ref->mutex);
- if (i915_active_is_idle(ref))
+ if (!i915_active_acquire_if_busy(ref))
return 0;
- err = mutex_lock_interruptible(&ref->mutex);
- if (err)
- return err;
-
- if (!atomic_add_unless(&ref->count, 1, 0)) {
- mutex_unlock(&ref->mutex);
- return 0;
- }
-
+ /* Flush lazy signals */
+ enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- if (is_barrier(&it->base)) { /* unconnected idle-barrier */
- err = -EBUSY;
- break;
- }
+ if (is_barrier(&it->base)) /* unconnected idle barrier */
+ continue;
- err = i915_active_request_retire(&it->base, BKL(ref));
- if (err)
- break;
+ enable_signaling(&it->base);
}
+ /* Any fence added after the wait begins will not be auto-signaled */
- __active_retire(ref);
+ i915_active_release(ref);
if (err)
return err;
- if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
+ if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
return -EINTR;
- if (!i915_active_is_idle(ref))
- return -EBUSY;
-
return 0;
}
-int i915_request_await_active_request(struct i915_request *rq,
- struct i915_active_request *active)
-{
- struct i915_request *barrier =
- i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
-
- return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
-}
-
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
- struct active_node *it, *n;
- int err;
-
- if (RB_EMPTY_ROOT(&ref->tree))
- return 0;
+ int err = 0;
- /* await allocates and so we need to avoid hitting the shrinker */
- err = i915_active_acquire(ref);
- if (err)
- return err;
+ if (rcu_access_pointer(ref->excl.fence)) {
+ struct dma_fence *fence;
- mutex_lock(&ref->mutex);
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- err = i915_request_await_active_request(rq, &it->base);
- if (err)
- break;
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&ref->excl.fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
}
- mutex_unlock(&ref->mutex);
- i915_active_release(ref);
+ /* In the future we may choose to await on all fences */
+
return err;
}
@@ -477,15 +489,16 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
void i915_active_fini(struct i915_active *ref)
{
debug_active_fini(ref);
- GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
GEM_BUG_ON(atomic_read(&ref->count));
+ GEM_BUG_ON(work_pending(&ref->work));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
mutex_destroy(&ref->mutex);
}
#endif
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
{
- return node->timeline == idx && !i915_active_request_isset(&node->base);
+ return node->timeline == idx && !i915_active_fence_isset(&node->base);
}
static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
@@ -495,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
if (RB_EMPTY_ROOT(&ref->tree))
return NULL;
- mutex_lock(&ref->mutex);
+ spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
/*
@@ -560,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
goto match;
}
- mutex_unlock(&ref->mutex);
+ spin_unlock_irq(&ref->tree_lock);
return NULL;
@@ -568,7 +581,7 @@ match:
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
if (p == &ref->cache->node)
ref->cache = NULL;
- mutex_unlock(&ref->mutex);
+ spin_unlock_irq(&ref->tree_lock);
return rb_entry(p, struct active_node, node);
}
@@ -576,11 +589,12 @@ match:
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
intel_engine_mask_t tmp, mask = engine->mask;
+ struct intel_gt *gt = engine->gt;
struct llist_node *pos, *next;
int err;
+ GEM_BUG_ON(i915_active_is_idle(ref));
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
/*
@@ -589,7 +603,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
* We can then use the preallocated nodes in
* i915_active_acquire_barrier()
*/
- for_each_engine_masked(engine, i915, mask, tmp) {
+ for_each_engine_masked(engine, gt, mask, tmp) {
u64 idx = engine->kernel_context->timeline->fence_context;
struct active_node *node;
@@ -605,13 +619,13 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
node->base.lock =
&engine->kernel_context->timeline->mutex;
#endif
- RCU_INIT_POINTER(node->base.request, NULL);
- node->base.retire = node_retire;
+ RCU_INIT_POINTER(node->base.fence, NULL);
+ node->base.cb.func = node_retire;
node->timeline = idx;
node->ref = ref;
}
- if (!i915_active_request_isset(&node->base)) {
+ if (!i915_active_fence_isset(&node->base)) {
/*
* Mark this as being *our* unconnected proto-node.
*
@@ -621,8 +635,8 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
* and then we can use the rb_node and list pointers
* for our tracking of the pending barrier.
*/
- RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
- node->base.link.prev = (void *)engine;
+ RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
+ node->base.cb.node.prev = (void *)engine;
atomic_inc(&ref->count);
}
@@ -648,6 +662,7 @@ unwind:
void i915_active_acquire_barrier(struct i915_active *ref)
{
struct llist_node *pos, *next;
+ unsigned long flags;
GEM_BUG_ON(i915_active_is_idle(ref));
@@ -657,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
* populated by i915_request_add_active_barriers() to point to the
* request that will eventually release them.
*/
- mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
struct active_node *node = barrier_from_ll(pos);
struct intel_engine_cs *engine = barrier_to_engine(node);
@@ -679,54 +694,124 @@ void i915_active_acquire_barrier(struct i915_active *ref)
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine);
}
- mutex_unlock(&ref->mutex);
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
}
void i915_request_add_active_barriers(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct llist_node *node, *next;
+ unsigned long flags;
GEM_BUG_ON(intel_engine_is_virtual(engine));
- GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
+ GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
+ node = llist_del_all(&engine->barrier_tasks);
+ if (!node)
+ return;
/*
* Attach the list of proto-fences to the in-flight request such
* that the parent i915_active will be released when this request
* is retired.
*/
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
- RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
+ spin_lock_irqsave(&rq->lock, flags);
+ llist_for_each_safe(node, next, node) {
+ RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence);
smp_wmb(); /* serialise with reuse_idle_barrier */
- list_add_tail((struct list_head *)node, &rq->active_list);
+ list_add_tail((struct list_head *)node, &rq->fence.cb_list);
}
+ spin_unlock_irqrestore(&rq->lock, flags);
}
-int i915_active_request_set(struct i915_active_request *active,
- struct i915_request *rq)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+#define active_is_held(active) lockdep_is_held((active)->lock)
+#else
+#define active_is_held(active) true
+#endif
+
+/*
+ * __i915_active_fence_set: Update the last active fence along its timeline
+ * @active: the active tracker
+ * @fence: the new fence (under construction)
+ *
+ * Records the new @fence as the last active fence along its timeline in
+ * this active tracker, moving the tracking callbacks from the previous
+ * fence onto this one. Returns the previous fence (if not already completed),
+ * which the caller must ensure is executed before the new fence. To ensure
+ * that the order of fences within the timeline of the i915_active_fence is
+ * maintained, it must be locked by the caller.
+ */
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+ struct dma_fence *fence)
{
- int err;
+ struct dma_fence *prev;
+ unsigned long flags;
+
+ /* NB: must be serialised by an outer timeline mutex (active->lock) */
+ spin_lock_irqsave(fence->lock, flags);
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+
+ prev = rcu_dereference_protected(active->fence, active_is_held(active));
+ if (prev) {
+ GEM_BUG_ON(prev == fence);
+ spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+ __list_del_entry(&active->cb.node);
+ spin_unlock(prev->lock); /* serialise with prev->cb_list */
+
+ /*
+ * active->fence is reset by the callback from inside
+ * interrupt context. We need to serialise our list
+ * manipulation with the fence->lock to prevent the prev
+ * being lost inside an interrupt (it can't be replaced as
+ * no other caller is allowed to enter __i915_active_fence_set
+ * as we hold the timeline lock). After serialising with
+ * the callback, we need to double check which ran first,
+ * our list_del() [decoupling prev from the callback] or
+ * the callback...
+ */
+ prev = rcu_access_pointer(active->fence);
+ }
+
+ rcu_assign_pointer(active->fence, fence);
+ list_add_tail(&active->cb.node, &fence->cb_list);
+
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return prev;
+}
+
+int i915_active_fence_set(struct i915_active_fence *active,
+ struct i915_request *rq)
+{
+ struct dma_fence *fence;
+ int err = 0;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
lockdep_assert_held(active->lock);
#endif
- /* Must maintain ordering wrt previous active requests */
- err = i915_request_await_active_request(rq, active);
- if (err)
- return err;
+ /* Must maintain timeline ordering wrt previous active requests */
+ rcu_read_lock();
+ fence = __i915_active_fence_set(active, &rq->fence);
+ if (fence) /* but the previous fence may not belong to that timeline! */
+ fence = dma_fence_get_rcu(fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
- __i915_active_request_set(active, rq);
- return 0;
+ return err;
}
-void i915_active_retire_noop(struct i915_active_request *active,
- struct i915_request *request)
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- /* Space left intentionally blank */
+ i915_active_fence_cb(fence, cb);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index f95058f99057..44859356ce97 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -12,6 +12,10 @@
#include "i915_active_types.h"
#include "i915_request.h"
+struct i915_request;
+struct intel_engine_cs;
+struct intel_timeline;
+
/*
* We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
@@ -28,308 +32,108 @@
* write access so that we can perform concurrent read operations between
* the CPU and GPU engines, as well as waiting for all rendering to
* complete, or waiting for the last GPU user of a "fence register". The
- * object then embeds a #i915_active_request to track the most recent (in
+ * object then embeds a #i915_active_fence to track the most recent (in
* retirement order) request relevant for the desired mode of access.
- * The #i915_active_request is updated with i915_active_request_set() to
+ * The #i915_active_fence is updated with i915_active_fence_set() to
* track the most recent fence request, typically this is done as part of
* i915_vma_move_to_active().
*
- * When the #i915_active_request completes (is retired), it will
+ * When the #i915_active_fence completes (is retired), it will
* signal its completion to the owner through a callback as well as mark
- * itself as idle (i915_active_request.request == NULL). The owner
+ * itself as idle (i915_active_fence.request == NULL). The owner
* can then perform any action, such as delayed freeing of an active
* resource including itself.
*/
-void i915_active_retire_noop(struct i915_active_request *active,
- struct i915_request *request);
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
/**
- * i915_active_request_init - prepares the activity tracker for use
+ * __i915_active_fence_init - prepares the activity tracker for use
* @active - the active tracker
- * @rq - initial request to track, can be NULL
+ * @fence - initial fence to track, can be NULL
* @func - a callback when then the tracker is retired (becomes idle),
* can be NULL
*
- * i915_active_request_init() prepares the embedded @active struct for use as
- * an activity tracker, that is for tracking the last known active request
- * associated with it. When the last request becomes idle, when it is retired
+ * i915_active_fence_init() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active fence
+ * associated with it. When the last fence becomes idle, when it is retired
* after completion, the optional callback @func is invoked.
*/
static inline void
-i915_active_request_init(struct i915_active_request *active,
+__i915_active_fence_init(struct i915_active_fence *active,
struct mutex *lock,
- struct i915_request *rq,
- i915_active_retire_fn retire)
+ void *fence,
+ dma_fence_func_t fn)
{
- RCU_INIT_POINTER(active->request, rq);
- INIT_LIST_HEAD(&active->link);
- active->retire = retire ?: i915_active_retire_noop;
+ RCU_INIT_POINTER(active->fence, fence);
+ active->cb.func = fn ?: i915_active_noop;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
active->lock = lock;
#endif
}
-#define INIT_ACTIVE_REQUEST(name, lock) \
- i915_active_request_init((name), (lock), NULL, NULL)
-
-/**
- * i915_active_request_set - updates the tracker to watch the current request
- * @active - the active tracker
- * @request - the request to watch
- *
- * __i915_active_request_set() watches the given @request for completion. Whilst
- * that @request is busy, the @active reports busy. When that @request is
- * retired, the @active tracker is updated to report idle.
- */
-static inline void
-__i915_active_request_set(struct i915_active_request *active,
- struct i915_request *request)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- lockdep_assert_held(active->lock);
-#endif
- list_move(&active->link, &request->active_list);
- rcu_assign_pointer(active->request, request);
-}
+#define INIT_ACTIVE_FENCE(A, LOCK) \
+ __i915_active_fence_init((A), (LOCK), NULL, NULL)
-int __must_check
-i915_active_request_set(struct i915_active_request *active,
- struct i915_request *rq);
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+ struct dma_fence *fence);
/**
- * i915_active_request_raw - return the active request
+ * i915_active_fence_set - updates the tracker to watch the current fence
* @active - the active tracker
+ * @rq - the request to watch
*
- * i915_active_request_raw() returns the current request being tracked, or NULL.
- * It does not obtain a reference on the request for the caller, so the caller
- * must hold struct_mutex.
+ * i915_active_fence_set() watches the given @rq for completion. While
+ * that @rq is busy, the @active reports busy. When that @rq is signaled
+ * (or else retired) the @active tracker is updated to report idle.
*/
-static inline struct i915_request *
-i915_active_request_raw(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- return rcu_dereference_protected(active->request,
- lockdep_is_held(mutex));
-}
-
-/**
- * i915_active_request_peek - report the active request being monitored
- * @active - the active tracker
- *
- * i915_active_request_peek() returns the current request being tracked if
- * still active, or NULL. It does not obtain a reference on the request
- * for the caller, so the caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_peek(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- struct i915_request *request;
-
- request = i915_active_request_raw(active, mutex);
- if (!request || i915_request_completed(request))
- return NULL;
-
- return request;
-}
-
-/**
- * i915_active_request_get - return a reference to the active request
- * @active - the active tracker
- *
- * i915_active_request_get() returns a reference to the active request, or NULL
- * if the active tracker is idle. The caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_get(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- return i915_request_get(i915_active_request_peek(active, mutex));
-}
-
-/**
- * __i915_active_request_get_rcu - return a reference to the active request
- * @active - the active tracker
- *
- * __i915_active_request_get() returns a reference to the active request,
- * or NULL if the active tracker is idle. The caller must hold the RCU read
- * lock, but the returned pointer is safe to use outside of RCU.
- */
-static inline struct i915_request *
-__i915_active_request_get_rcu(const struct i915_active_request *active)
-{
- /*
- * Performing a lockless retrieval of the active request is super
- * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
- * slab of request objects will not be freed whilst we hold the
- * RCU read lock. It does not guarantee that the request itself
- * will not be freed and then *reused*. Viz,
- *
- * Thread A Thread B
- *
- * rq = active.request
- * retire(rq) -> free(rq);
- * (rq is now first on the slab freelist)
- * active.request = NULL
- *
- * rq = new submission on a new object
- * ref(rq)
- *
- * To prevent the request from being reused whilst the caller
- * uses it, we take a reference like normal. Whilst acquiring
- * the reference we check that it is not in a destroyed state
- * (refcnt == 0). That prevents the request being reallocated
- * whilst the caller holds on to it. To check that the request
- * was not reallocated as we acquired the reference we have to
- * check that our request remains the active request across
- * the lookup, in the same manner as a seqlock. The visibility
- * of the pointer versus the reference counting is controlled
- * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
- *
- * In the middle of all that, we inspect whether the request is
- * complete. Retiring is lazy so the request may be completed long
- * before the active tracker is updated. Querying whether the
- * request is complete is far cheaper (as it involves no locked
- * instructions setting cachelines to exclusive) than acquiring
- * the reference, so we do it first. The RCU read lock ensures the
- * pointer dereference is valid, but does not ensure that the
- * seqno nor HWS is the right one! However, if the request was
- * reallocated, that means the active tracker's request was complete.
- * If the new request is also complete, then both are and we can
- * just report the active tracker is idle. If the new request is
- * incomplete, then we acquire a reference on it and check that
- * it remained the active request.
- *
- * It is then imperative that we do not zero the request on
- * reallocation, so that we can chase the dangling pointers!
- * See i915_request_alloc().
- */
- do {
- struct i915_request *request;
-
- request = rcu_dereference(active->request);
- if (!request || i915_request_completed(request))
- return NULL;
-
- /*
- * An especially silly compiler could decide to recompute the
- * result of i915_request_completed, more specifically
- * re-emit the load for request->fence.seqno. A race would catch
- * a later seqno value, which could flip the result from true to
- * false. Which means part of the instructions below might not
- * be executed, while later on instructions are executed. Due to
- * barriers within the refcounting the inconsistency can't reach
- * past the call to i915_request_get_rcu, but not executing
- * that while still executing i915_request_put() creates
- * havoc enough. Prevent this with a compiler barrier.
- */
- barrier();
-
- request = i915_request_get_rcu(request);
-
- /*
- * What stops the following rcu_access_pointer() from occurring
- * before the above i915_request_get_rcu()? If we were
- * to read the value before pausing to get the reference to
- * the request, we may not notice a change in the active
- * tracker.
- *
- * The rcu_access_pointer() is a mere compiler barrier, which
- * means both the CPU and compiler are free to perform the
- * memory read without constraint. The compiler only has to
- * ensure that any operations after the rcu_access_pointer()
- * occur afterwards in program order. This means the read may
- * be performed earlier by an out-of-order CPU, or adventurous
- * compiler.
- *
- * The atomic operation at the heart of
- * i915_request_get_rcu(), see dma_fence_get_rcu(), is
- * atomic_inc_not_zero() which is only a full memory barrier
- * when successful. That is, if i915_request_get_rcu()
- * returns the request (and so with the reference counted
- * incremented) then the following read for rcu_access_pointer()
- * must occur after the atomic operation and so confirm
- * that this request is the one currently being tracked.
- *
- * The corresponding write barrier is part of
- * rcu_assign_pointer().
- */
- if (!request || request == rcu_access_pointer(active->request))
- return rcu_pointer_handoff(request);
-
- i915_request_put(request);
- } while (1);
-}
-
+int __must_check
+i915_active_fence_set(struct i915_active_fence *active,
+ struct i915_request *rq);
/**
- * i915_active_request_get_unlocked - return a reference to the active request
+ * i915_active_fence_get - return a reference to the active fence
* @active - the active tracker
*
- * i915_active_request_get_unlocked() returns a reference to the active request,
+ * i915_active_fence_get() returns a reference to the active fence,
* or NULL if the active tracker is idle. The reference is obtained under RCU,
* so no locking is required by the caller.
*
- * The reference should be freed with i915_request_put().
+ * The reference should be freed with dma_fence_put().
*/
-static inline struct i915_request *
-i915_active_request_get_unlocked(const struct i915_active_request *active)
+static inline struct dma_fence *
+i915_active_fence_get(struct i915_active_fence *active)
{
- struct i915_request *request;
+ struct dma_fence *fence;
rcu_read_lock();
- request = __i915_active_request_get_rcu(active);
+ fence = dma_fence_get_rcu_safe(&active->fence);
rcu_read_unlock();
- return request;
+ return fence;
}
/**
- * i915_active_request_isset - report whether the active tracker is assigned
+ * i915_active_fence_isset - report whether the active tracker is assigned
* @active - the active tracker
*
- * i915_active_request_isset() returns true if the active tracker is currently
- * assigned to a request. Due to the lazy retiring, that request may be idle
+ * i915_active_fence_isset() returns true if the active tracker is currently
+ * assigned to a fence. Due to the lazy retiring, that fence may be idle
* and this may report stale information.
*/
static inline bool
-i915_active_request_isset(const struct i915_active_request *active)
+i915_active_fence_isset(const struct i915_active_fence *active)
{
- return rcu_access_pointer(active->request);
+ return rcu_access_pointer(active->fence);
}
-/**
- * i915_active_request_retire - waits until the request is retired
- * @active - the active request on which to wait
- *
- * i915_active_request_retire() waits until the request is completed,
- * and then ensures that at least the retirement handler for this
- * @active tracker is called before returning. If the @active
- * tracker is idle, the function returns immediately.
- */
-static inline int __must_check
-i915_active_request_retire(struct i915_active_request *active,
- struct mutex *mutex)
+static inline void
+i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- struct i915_request *request;
- long ret;
-
- request = i915_active_request_raw(active, mutex);
- if (!request)
- return 0;
-
- ret = i915_request_wait(request,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0)
- return ret;
+ struct i915_active_fence *active =
+ container_of(cb, typeof(*active), cb);
- list_del_init(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, request);
-
- return 0;
+ RCU_INIT_POINTER(active->fence, NULL);
}
/*
@@ -358,34 +162,40 @@ i915_active_request_retire(struct i915_active_request *active,
* synchronisation.
*/
-void __i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
+void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
struct lock_class_key *key);
-#define i915_active_init(i915, ref, active, retire) do { \
+#define i915_active_init(ref, active, retire) do { \
static struct lock_class_key __key; \
\
- __i915_active_init(i915, ref, active, retire, &__key); \
+ __i915_active_init(ref, active, retire, &__key); \
} while (0)
int i915_active_ref(struct i915_active *ref,
struct intel_timeline *tl,
- struct i915_request *rq);
+ struct dma_fence *fence);
+
+static inline int
+i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+{
+ return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
+}
+
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
+
+static inline bool i915_active_has_exclusive(struct i915_active *ref)
+{
+ return rcu_access_pointer(ref->excl.fence);
+}
int i915_active_wait(struct i915_active *ref);
-int i915_request_await_active(struct i915_request *rq,
- struct i915_active *ref);
-int i915_request_await_active_request(struct i915_request *rq,
- struct i915_active_request *active);
+int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
int i915_active_acquire(struct i915_active *ref);
+bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
-void __i915_active_release_nested(struct i915_active *ref, int subclass);
-
-bool i915_active_trygrab(struct i915_active *ref);
-void i915_active_ungrab(struct i915_active *ref);
static inline bool
i915_active_is_idle(const struct i915_active *ref)
@@ -404,4 +214,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_active_barriers(struct i915_request *rq);
+void i915_active_print(struct i915_active *ref, struct drm_printer *m);
+
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 1854e7d168c1..96aed0ee700a 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -8,22 +8,18 @@
#define _I915_ACTIVE_TYPES_H_
#include <linux/atomic.h>
+#include <linux/dma-fence.h>
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
-struct drm_i915_private;
-struct i915_active_request;
-struct i915_request;
+#include "i915_utils.h"
-typedef void (*i915_active_retire_fn)(struct i915_active_request *,
- struct i915_request *);
-
-struct i915_active_request {
- struct i915_request __rcu *request;
- struct list_head link;
- i915_active_retire_fn retire;
+struct i915_active_fence {
+ struct dma_fence __rcu *fence;
+ struct dma_fence_cb cb;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
/*
* Incorporeal!
@@ -43,20 +39,30 @@ struct i915_active_request {
struct active_node;
+#define I915_ACTIVE_MAY_SLEEP BIT(0)
+
+#define __i915_active_call __aligned(4)
+#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
+
struct i915_active {
- struct drm_i915_private *i915;
+ atomic_t count;
+ struct mutex mutex;
+ spinlock_t tree_lock;
struct active_node *cache;
struct rb_root tree;
- struct mutex mutex;
- atomic_t count;
+
+ /* Preallocated "exclusive" node */
+ struct i915_active_fence excl;
unsigned long flags;
-#define I915_ACTIVE_GRAB_BIT 0
+#define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref);
+ struct work_struct work;
+
struct llist_head preallocated_barriers;
};
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index fe1871d7c126..e9d4200ce3bc 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -38,6 +38,7 @@ int __init i915_global_buddy_init(void)
if (!global.slab_blocks)
return -ENOMEM;
+ i915_global_register(&global.base);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b0f51591f2e4..8016484ebcd3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -41,7 +41,10 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
@@ -61,11 +64,18 @@ static int i915_capabilities(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_device_info *info = INTEL_INFO(dev_priv);
struct drm_printer p = drm_seq_file_printer(m);
+ const char *msg;
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
+ msg = "n/a";
+#ifdef CONFIG_INTEL_IOMMU
+ msg = enableddisabled(intel_iommu_gfx_mapped);
+#endif
+ seq_printf(m, "iommu: %s\n", msg);
+
intel_device_info_dump_flags(info, &p);
intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
intel_driver_caps_print(&dev_priv->caps, &p);
@@ -77,11 +87,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static char get_pin_flag(struct drm_i915_gem_object *obj)
-{
- return obj->pin_global ? 'p' : ' ';
-}
-
static char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (i915_gem_object_get_tiling(obj)) {
@@ -140,9 +145,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int pin_count = 0;
- seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
+ seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
- get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
get_pin_mapped_flag(obj),
@@ -221,8 +225,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", pin_count);
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- if (obj->pin_global)
- seq_printf(m, " (global)");
+ if (i915_gem_object_is_framebuffer(obj))
+ seq_printf(m, " (fb)");
engine = i915_gem_object_last_write_engine(obj);
if (engine)
@@ -243,6 +247,9 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ return 0;
+
stats->count++;
stats->total += obj->base.size;
if (!atomic_read(&obj->bind_count))
@@ -290,6 +297,7 @@ static int per_file_stats(int id, void *ptr, void *data)
}
spin_unlock(&obj->vma.lock);
+ i915_gem_object_put(obj);
return 0;
}
@@ -309,34 +317,44 @@ static void print_context_stats(struct seq_file *m,
struct drm_i915_private *i915)
{
struct file_stats kstats = {};
- struct i915_gem_context *ctx;
+ struct i915_gem_context *ctx, *cn;
- list_for_each_entry(ctx, &i915->contexts.list, link) {
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
intel_context_lock_pinned(ce);
if (intel_context_is_pinned(ce)) {
+ rcu_read_lock();
if (ce->state)
per_file_stats(0,
ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats);
+ rcu_read_unlock();
}
intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = { .vm = ctx->vm, };
+ struct file_stats stats = {
+ .vm = rcu_access_pointer(ctx->vm),
+ };
struct drm_file *file = ctx->file_priv->file;
struct task_struct *task;
char name[80];
- spin_lock(&file->table_lock);
+ rcu_read_lock();
idr_for_each(&file->object_idr, per_file_stats, &stats);
- spin_unlock(&file->table_lock);
+ rcu_read_unlock();
rcu_read_lock();
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
@@ -346,7 +364,12 @@ static void print_context_stats(struct seq_file *m,
print_file_stats(m, name, stats);
}
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
}
+ spin_unlock(&i915->gem.contexts.lock);
print_file_stats(m, "[k]contexts", kstats);
}
@@ -354,7 +377,6 @@ static void print_context_stats(struct seq_file *m,
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- int ret;
seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
i915->mm.shrink_count,
@@ -363,12 +385,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
print_context_stats(m, i915);
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
@@ -376,7 +393,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
static void gen8_display_interrupt_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- int pipe;
+ enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
@@ -527,6 +544,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
gen8_display_interrupt_info(m);
} else if (IS_VALLEYVIEW(dev_priv)) {
+ intel_wakeref_t pref;
+
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
seq_printf(m, "Display IIR:\t%08x\n",
@@ -537,7 +556,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
- intel_wakeref_t pref;
power_domain = POWER_DOMAIN_PIPE(pipe);
pref = intel_display_power_get_if_enabled(dev_priv,
@@ -571,12 +589,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "PM IMR:\t\t%08x\n",
I915_READ(GEN6_PMIMR));
+ pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
} else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
@@ -772,7 +792,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_uncore *uncore = &dev_priv->uncore;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
intel_wakeref_t wakeref;
int ret = 0;
@@ -808,23 +828,23 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
seq_printf(m, "actual GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+ intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
seq_printf(m, "max GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->min_freq));
+ intel_gpu_freq(rps, rps->min_freq));
seq_printf(m, "idle GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->idle_freq));
+ intel_gpu_freq(rps, rps->idle_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
} else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
@@ -858,7 +878,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
else
reqf >>= 25;
}
- reqf = intel_gpu_freq(dev_priv, reqf);
+ reqf = intel_gpu_freq(rps, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -871,8 +891,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- cagf = intel_gpu_freq(dev_priv,
- intel_get_cagf(dev_priv, rpstat));
+ cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -949,37 +968,37 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, "Current freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
seq_printf(m, "Actual freq: %d MHz\n", cagf);
seq_printf(m, "Idle freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->idle_freq));
+ intel_gpu_freq(rps, rps->idle_freq));
seq_printf(m, "Min freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->min_freq));
+ intel_gpu_freq(rps, rps->min_freq));
seq_printf(m, "Boost freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->boost_freq));
+ intel_gpu_freq(rps, rps->boost_freq));
seq_printf(m, "Max freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
} else {
seq_puts(m, "no P-state info available\n");
}
@@ -992,91 +1011,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static void i915_instdone_info(struct drm_i915_private *dev_priv,
- struct seq_file *m,
- struct intel_instdone *instdone)
-{
- int slice;
- int subslice;
-
- seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
- instdone->instdone);
-
- if (INTEL_GEN(dev_priv) <= 3)
- return;
-
- seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
- instdone->slice_common);
-
- if (INTEL_GEN(dev_priv) <= 6)
- return;
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->sampler[slice][subslice]);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->row[slice][subslice]);
-}
-
-static int i915_hangcheck_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_gt *gt = &i915->gt;
- struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
- enum intel_engine_id id;
-
- seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
- if (test_bit(I915_WEDGED, &gt->reset.flags))
- seq_puts(m, "\tWedged\n");
- if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
- seq_puts(m, "\tDevice (global) reset in progress\n");
-
- if (!i915_modparams.enable_hangcheck) {
- seq_puts(m, "Hangcheck disabled\n");
- return 0;
- }
-
- if (timer_pending(&gt->hangcheck.work.timer))
- seq_printf(m, "Hangcheck active, timer fires in %dms\n",
- jiffies_to_msecs(gt->hangcheck.work.timer.expires -
- jiffies));
- else if (delayed_work_pending(&gt->hangcheck.work))
- seq_puts(m, "Hangcheck active, work pending\n");
- else
- seq_puts(m, "Hangcheck inactive\n");
-
- seq_printf(m, "GT active? %s\n", yesno(gt->awake));
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- for_each_engine(engine, i915, id) {
- struct intel_instdone instdone;
-
- seq_printf(m, "%s: %d ms ago\n",
- engine->name,
- jiffies_to_msecs(jiffies -
- engine->hangcheck.action_timestamp));
-
- seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
- (long long)engine->hangcheck.acthd,
- intel_engine_get_active_head(engine));
-
- intel_engine_get_instdone(engine, &instdone);
-
- seq_puts(m, "\tinstdone read =\n");
- i915_instdone_info(i915, m, &instdone);
-
- seq_puts(m, "\tinstdone accu =\n");
- i915_instdone_info(i915, m,
- &engine->hangcheck.instdone);
- }
- }
-
- return 0;
-}
-
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1157,11 +1091,13 @@ static void print_rc6_res(struct seq_file *m,
const char *title,
const i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
- seq_printf(m, "%s %u (%llu us)\n",
- title, I915_READ(reg),
- intel_rc6_residency_us(dev_priv, reg));
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ seq_printf(m, "%s %u (%llu us)\n", title,
+ intel_uncore_read(&i915->uncore, reg),
+ intel_rc6_residency_us(&i915->gt.rc6, reg));
}
static int vlv_drpc_info(struct seq_file *m)
@@ -1439,7 +1375,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
@@ -1464,10 +1400,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ?
- GEN9_FREQ_SCALER : 1))),
+ intel_gpu_freq(rps,
+ (gpu_freq *
+ (IS_GEN9_BC(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10 ?
+ GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1478,21 +1415,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_opregion *opregion = &dev_priv->opregion;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
if (opregion->header)
seq_write(m, opregion->header, OPREGION_SIZE);
- mutex_unlock(&dev->struct_mutex);
-
-out:
return 0;
}
@@ -1512,11 +1439,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
@@ -1551,7 +1473,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1564,23 +1485,20 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
static int i915_context_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_gem_context *ctx;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct i915_gem_context *ctx, *cn;
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
seq_puts(m, "HW context ");
- if (!list_empty(&ctx->hw_id_link))
- seq_printf(m, "%x [pin %u]", ctx->hw_id,
- atomic_read(&ctx->hw_id_pin_count));
if (ctx->pid) {
struct task_struct *task;
@@ -1614,9 +1532,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
i915_gem_context_unlock_engines(ctx);
seq_putc(m, '\n');
- }
- mutex_unlock(&dev->struct_mutex);
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock(&i915->gem.contexts.lock);
return 0;
}
@@ -1654,9 +1575,9 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
@@ -1711,7 +1632,7 @@ static const char *rps_power_to_str(unsigned int power)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
@@ -1723,7 +1644,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
vlv_punit_put(dev_priv);
act_freq = (act_freq >> 8) & 0xff;
} else {
- act_freq = intel_get_cagf(dev_priv,
+ act_freq = intel_get_cagf(rps,
I915_READ(GEN6_RPSTAT1));
}
}
@@ -1734,17 +1655,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d, actual %d\n",
- intel_gpu_freq(dev_priv, rps->cur_freq),
- intel_gpu_freq(dev_priv, act_freq));
+ intel_gpu_freq(rps, rps->cur_freq),
+ intel_gpu_freq(rps, act_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
- intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->min_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
- intel_gpu_freq(dev_priv, rps->idle_freq),
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- intel_gpu_freq(dev_priv, rps->boost_freq));
+ intel_gpu_freq(rps, rps->idle_freq),
+ intel_gpu_freq(rps, rps->efficient_freq),
+ intel_gpu_freq(rps, rps->boost_freq));
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
@@ -1860,8 +1781,8 @@ static void i915_guc_log_info(struct seq_file *m,
struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
- if (!intel_guc_log_relay_enabled(log)) {
- seq_puts(m, "GuC log relay disabled\n");
+ if (!intel_guc_log_relay_created(log)) {
+ seq_puts(m, "GuC log relay not created\n");
return;
}
@@ -2048,9 +1969,23 @@ i915_guc_log_relay_write(struct file *filp,
loff_t *ppos)
{
struct intel_guc_log *log = filp->private_data;
+ int val;
+ int ret;
- intel_guc_log_relay_flush(log);
- return cnt;
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Enable and start the guc log relay on value of 1.
+ * Flush log relay for any other value.
+ */
+ if (val == 1)
+ ret = intel_guc_log_relay_start(log);
+ else
+ intel_guc_log_relay_flush(log);
+
+ return ret ?: cnt;
}
static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
@@ -2133,7 +2068,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- val = I915_READ(EDP_PSR2_STATUS);
+ val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
EDP_PSR2_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -2149,7 +2084,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- val = I915_READ(EDP_PSR_STATUS);
+ val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -2188,14 +2123,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
status = "disabled";
seq_printf(m, "PSR mode: %s\n", status);
- if (!psr->enabled)
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ yesno(psr->sink_not_reliable));
+
goto unlock;
+ }
if (psr->psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
- val = I915_READ(EDP_PSR_CTL);
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
@@ -2208,7 +2147,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
+ val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance counter: %u\n", val);
}
@@ -2226,8 +2166,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
* Reading all 3 registers before hand to minimize crossing a
* frame boundary between register reads
*/
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
- su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
+ frame));
+ su_frames_val[frame / 3] = val;
+ }
seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
@@ -2360,8 +2303,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
for_each_power_domain(power_domain, power_well->desc->domains)
seq_printf(m, " %-23s %d\n",
- intel_display_power_domain_str(dev_priv,
- power_domain),
+ intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
}
@@ -2396,6 +2338,13 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
if (INTEL_GEN(dev_priv) >= 12) {
dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
} else {
dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
SKL_CSR_DC3_DC5_COUNT;
@@ -3110,8 +3059,9 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
if (!intel_dig_port->dp.can_mst)
continue;
- seq_printf(m, "MST Source Port %c\n",
- port_name(intel_dig_port->base.port));
+ seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -3573,6 +3523,37 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
i915_wedged_get, i915_wedged_set,
"%llu\n");
+static int
+i915_perf_noa_delay_set(void *data, u64 val)
+{
+ struct drm_i915_private *i915 = data;
+ const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+
+ /*
+ * This would lead to infinite waits as we're doing timestamp
+ * difference on the CS with only 32bits.
+ */
+ if (val > mul_u32_u32(U32_MAX, clk))
+ return -EINVAL;
+
+ atomic64_set(&i915->perf.noa_programming_delay, val);
+ return 0;
+}
+
+static int
+i915_perf_noa_delay_get(void *data, u64 *val)
+{
+ struct drm_i915_private *i915 = data;
+
+ *val = atomic64_read(&i915->perf.noa_programming_delay);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
+ i915_perf_noa_delay_get,
+ i915_perf_noa_delay_set,
+ "%llu\n");
+
#define DROP_UNBOUND BIT(0)
#define DROP_BOUND BIT(1)
#define DROP_RETIRE BIT(2)
@@ -3582,6 +3563,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
#define DROP_IDLE BIT(6)
#define DROP_RESET_ACTIVE BIT(7)
#define DROP_RESET_SEQNO BIT(8)
+#define DROP_RCU BIT(9)
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
@@ -3590,7 +3572,8 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
DROP_SHRINK_ALL |\
DROP_IDLE | \
DROP_RESET_ACTIVE | \
- DROP_RESET_SEQNO)
+ DROP_RESET_SEQNO | \
+ DROP_RCU)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -3598,58 +3581,48 @@ i915_drop_caches_get(void *data, u64 *val)
return 0;
}
-
static int
-i915_drop_caches_set(void *data, u64 val)
+gt_drop_caches(struct intel_gt *gt, u64 val)
{
- struct drm_i915_private *i915 = data;
-
- DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
- val, val & DROP_ALL);
+ int ret;
if (val & DROP_RESET_ACTIVE &&
- wait_for(intel_engines_are_idle(&i915->gt),
- I915_IDLE_ENGINES_TIMEOUT))
- intel_gt_set_wedged(&i915->gt);
+ wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
+ intel_gt_set_wedged(gt);
- /* No need to check and wait for gpu resets, only libdrm auto-restarts
- * on ioctls on -EAGAIN. */
- if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
- int ret;
+ if (val & DROP_RETIRE)
+ intel_gt_retire_requests(gt);
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (val & (DROP_IDLE | DROP_ACTIVE)) {
+ ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
+ }
- /*
- * To finish the flush of the idle_worker, we must complete
- * the switch-to-kernel-context, which requires a double
- * pass through wait_for_idle: first queues the switch,
- * second waits for the switch.
- */
- if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ if (val & DROP_IDLE) {
+ ret = intel_gt_pm_wait_for_idle(gt);
+ if (ret)
+ return ret;
+ }
- if (ret == 0 && val & DROP_IDLE)
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
+ intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
- if (val & DROP_RETIRE)
- i915_retire_requests(i915);
+ return 0;
+}
- mutex_unlock(&i915->drm.struct_mutex);
+static int
+i915_drop_caches_set(void *data, u64 val)
+{
+ struct drm_i915_private *i915 = data;
+ int ret;
- if (ret == 0 && val & DROP_IDLE)
- ret = intel_gt_pm_wait_for_idle(&i915->gt);
- }
+ DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
+ val, val & DROP_ALL);
- if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
- intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
+ ret = gt_drop_caches(&i915->gt, val);
+ if (ret)
+ return ret;
fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
@@ -3662,10 +3635,8 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(i915);
fs_reclaim_release(GFP_KERNEL);
- if (val & DROP_IDLE) {
- flush_delayed_work(&i915->gem.retire_work);
- flush_work(&i915->gem.idle_work);
- }
+ if (val & DROP_RCU)
+ rcu_barrier();
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
@@ -3721,6 +3692,15 @@ i915_cache_sharing_set(void *data, u64 val)
return 0;
}
+static void
+intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
+ u8 *to_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
+}
+
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
@@ -3794,12 +3774,13 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
continue;
sseu->slice_mask |= BIT(s);
- sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
+ intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
- if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ if (info->sseu.has_subslice_pg &&
+ !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
@@ -3845,18 +3826,21 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(dev_priv))
- sseu->subslice_mask[s] =
- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
+ intel_sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
+ u8 ss_idx = s * info->sseu.ss_stride +
+ ss / BITS_PER_BYTE;
if (IS_GEN9_LP(dev_priv)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
- sseu->subslice_mask[s] |= BIT(ss);
+ sseu->subslice_mask[ss_idx] |=
+ BIT(ss % BITS_PER_BYTE);
}
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
@@ -3873,25 +3857,23 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
int s;
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
if (sseu->slice_mask) {
- sseu->eu_per_subslice =
- RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- sseu->subslice_mask[s] =
- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
- }
+ sseu->eu_per_subslice = info->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ intel_sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
sseu->eu_total = sseu->eu_per_subslice *
intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
- u8 subslice_7eu =
- RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
+ u8 subslice_7eu = info->sseu.subslice_7eu[s];
sseu->eu_total -= hweight8(subslice_7eu);
}
@@ -3938,6 +3920,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
struct sseu_dev_info sseu;
intel_wakeref_t wakeref;
@@ -3945,14 +3928,13 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
- i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
+ i915_print_sseu_info(m, true, &info->sseu);
seq_puts(m, "SSEU Device Status\n");
memset(&sseu, 0, sizeof(sseu));
- sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
- sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
- sseu.max_eus_per_subslice =
- RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
+ intel_sseu_set_info(&sseu, info->sseu.max_slices,
+ info->sseu.max_subslices,
+ info->sseu.max_eus_per_subslice);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(dev_priv))
@@ -3973,13 +3955,12 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
- file->private_data =
- (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
- intel_uncore_forcewake_user_get(&i915->uncore);
+ atomic_inc(&gt->user_wakeref);
+ intel_gt_pm_get(gt);
+ if (INTEL_GEN(i915) >= 6)
+ intel_uncore_forcewake_user_get(gt->uncore);
return 0;
}
@@ -3987,13 +3968,12 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
- intel_uncore_forcewake_user_put(&i915->uncore);
- intel_runtime_pm_put(&i915->runtime_pm,
- (intel_wakeref_t)(uintptr_t)file->private_data);
+ if (INTEL_GEN(i915) >= 6)
+ intel_uncore_forcewake_user_put(&i915->uncore);
+ intel_gt_pm_put(gt);
+ atomic_dec(&gt->user_wakeref);
return 0;
}
@@ -4302,7 +4282,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
@@ -4339,6 +4318,7 @@ static const struct i915_debugfs_files {
const char *name;
const struct file_operations *fops;
} i915_debugfs_files[] = {
+ {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -4528,7 +4508,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "DSC_Enabled: %s\n",
- yesno(crtc_state->dsc_params.compression_enable));
+ yesno(crtc_state->dsc.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
seq_printf(m, "Force_DSC_Enable: %s\n",
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3d717e282908..3c512c571e60 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -36,7 +36,6 @@
#include <linux/pm_runtime.h>
#include <linux/pnp.h>
#include <linux/slab.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
#include <acpi/video.h>
@@ -54,16 +53,17 @@
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
-#include "display/intel_gmbus.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_sprite.h"
+#include "display/intel_vga.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_rc6.h"
#include "i915_debugfs.h"
#include "i915_drv.h"
@@ -72,10 +72,12 @@
#include "i915_perf.h"
#include "i915_query.h"
#include "i915_suspend.h"
+#include "i915_switcheroo.h"
#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_csr.h"
+#include "intel_memory_region.h"
#include "intel_pm.h"
static struct drm_driver driver;
@@ -269,179 +271,97 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
release_resource(&dev_priv->mch_res);
}
-/* true = enable decode, false = disable decoder */
-static unsigned int i915_vga_set_decode(void *cookie, bool state)
+static int i915_driver_modeset_probe(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = cookie;
-
- intel_modeset_vga_set_state(dev_priv, state);
- if (state)
- return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- else
- return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-}
-
-static int i915_resume_switcheroo(struct drm_i915_private *i915);
-static int i915_suspend_switcheroo(struct drm_i915_private *i915,
- pm_message_t state);
-
-static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
-{
- struct drm_i915_private *i915 = pdev_to_i915(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
-
- if (!i915) {
- dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
- return;
- }
-
- if (state == VGA_SWITCHEROO_ON) {
- pr_info("switched on\n");
- i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
- /* i915 resume handler doesn't set to D0 */
- pci_set_power_state(pdev, PCI_D0);
- i915_resume_switcheroo(i915);
- i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
- } else {
- pr_info("switched off\n");
- i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend_switcheroo(i915, pmm);
- i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
- }
-}
-
-static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
-{
- struct drm_i915_private *i915 = pdev_to_i915(pdev);
-
- /*
- * FIXME: open_count is protected by drm_global_mutex but that would lead to
- * locking inversion with the driver load path. And the access here is
- * completely racy anyway. So don't bother with locking for now.
- */
- return i915 && i915->drm.open_count == 0;
-}
-
-static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
- .set_gpu_state = i915_switcheroo_set_state,
- .reprobe = NULL,
- .can_switch = i915_switcheroo_can_switch,
-};
-
-static int i915_driver_modeset_probe(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_probe_failure(dev_priv))
+ if (i915_inject_probe_failure(i915))
return -ENODEV;
- if (HAS_DISPLAY(dev_priv)) {
- ret = drm_vblank_init(&dev_priv->drm,
- INTEL_INFO(dev_priv)->num_pipes);
+ if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+ ret = drm_vblank_init(&i915->drm,
+ INTEL_NUM_PIPES(i915));
if (ret)
goto out;
}
- intel_bios_init(dev_priv);
+ intel_bios_init(i915);
- /* If we have > 1 VGA cards, then we need to arbitrate access
- * to the common VGA resources.
- *
- * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
- * then we do not take part in VGA arbitration and the
- * vga_client_register() fails with -ENODEV.
- */
- ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
- if (ret && ret != -ENODEV)
+ ret = intel_vga_register(i915);
+ if (ret)
goto out;
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
+ ret = i915_switcheroo_register(i915);
if (ret)
goto cleanup_vga_client;
- intel_power_domains_init_hw(dev_priv, false);
+ intel_power_domains_init_hw(i915, false);
- intel_csr_ucode_init(dev_priv);
+ intel_csr_ucode_init(i915);
- ret = intel_irq_install(dev_priv);
+ ret = intel_irq_install(i915);
if (ret)
goto cleanup_csr;
- intel_gmbus_setup(dev_priv);
-
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
- ret = intel_modeset_init(dev);
+ ret = intel_modeset_init(i915);
if (ret)
goto cleanup_irq;
- ret = i915_gem_init(dev_priv);
+ ret = i915_gem_init(i915);
if (ret)
goto cleanup_modeset;
- intel_overlay_setup(dev_priv);
+ intel_overlay_setup(i915);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
return 0;
- ret = intel_fbdev_init(dev);
+ ret = intel_fbdev_init(&i915->drm);
if (ret)
goto cleanup_gem;
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(dev_priv);
+ intel_hpd_init(i915);
- intel_init_ipc(dev_priv);
+ intel_init_ipc(i915);
return 0;
cleanup_gem:
- i915_gem_suspend(dev_priv);
- i915_gem_driver_remove(dev_priv);
- i915_gem_driver_release(dev_priv);
+ i915_gem_suspend(i915);
+ i915_gem_driver_remove(i915);
+ i915_gem_driver_release(i915);
cleanup_modeset:
- intel_modeset_driver_remove(dev);
+ intel_modeset_driver_remove(i915);
cleanup_irq:
- intel_irq_uninstall(dev_priv);
- intel_gmbus_teardown(dev_priv);
+ intel_irq_uninstall(i915);
cleanup_csr:
- intel_csr_ucode_fini(dev_priv);
- intel_power_domains_driver_remove(dev_priv);
- vga_switcheroo_unregister_client(pdev);
+ intel_csr_ucode_fini(i915);
+ intel_power_domains_driver_remove(i915);
+ i915_switcheroo_unregister(i915);
cleanup_vga_client:
- vga_client_register(pdev, NULL, NULL, NULL);
+ intel_vga_unregister(i915);
out:
return ret;
}
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
- struct apertures_struct *ap;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool primary;
- int ret;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
+ intel_modeset_driver_remove(i915);
- ap->ranges[0].base = ggtt->gmadr.start;
- ap->ranges[0].size = ggtt->mappable_end;
+ intel_irq_uninstall(i915);
- primary =
- pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+ intel_bios_driver_remove(i915);
- ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ i915_switcheroo_unregister(i915);
- kfree(ap);
+ intel_vga_unregister(i915);
- return ret;
+ intel_csr_ucode_fini(i915);
}
static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -598,9 +518,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_gt_init_early(&dev_priv->gt, dev_priv);
- ret = i915_gem_init_early(dev_priv);
- if (ret < 0)
- goto err_gt;
+ i915_gem_init_early(dev_priv);
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
@@ -622,7 +540,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
-err_gt:
intel_gt_driver_late_release(&dev_priv->gt);
vlv_free_s0ix_state(dev_priv);
err_workqueues:
@@ -680,12 +597,10 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
intel_uc_init_mmio(&dev_priv->gt.uc);
- ret = intel_engines_init_mmio(dev_priv);
+ ret = intel_engines_init_mmio(&dev_priv->gt);
if (ret)
goto err_uncore;
- i915_gem_init_mmio(dev_priv);
-
return 0;
err_uncore:
@@ -703,7 +618,7 @@ err_bridge:
*/
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
- intel_engines_cleanup(dev_priv);
+ intel_engines_cleanup(&dev_priv->gt);
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
@@ -1157,8 +1072,8 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
{
- const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- const unsigned int sets[4] = { 1, 1, 2, 2 };
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
return EDRAM_NUM_BANKS(cap) *
ways[EDRAM_WAYS_IDX(cap)] *
@@ -1246,32 +1161,24 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_perf;
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
- ret = i915_kick_out_firmware_fb(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
+ if (ret)
goto err_ggtt;
- }
- ret = vga_remove_vgacon(pdev);
- if (ret) {
- DRM_ERROR("failed to remove conflicting VGA console\n");
+ ret = i915_ggtt_init_hw(dev_priv);
+ if (ret)
goto err_ggtt;
- }
- ret = i915_ggtt_init_hw(dev_priv);
+ ret = intel_memory_regions_hw_probe(dev_priv);
if (ret)
goto err_ggtt;
- intel_gt_init_hw(dev_priv);
+ intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
DRM_ERROR("failed to enable GGTT\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
pci_set_master(pdev);
@@ -1288,7 +1195,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
}
@@ -1306,16 +1213,13 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
}
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_sanitize_gt_powersave(dev_priv);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1361,6 +1265,8 @@ err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
+err_mem_regions:
+ intel_memory_regions_driver_release(dev_priv);
err_ggtt:
i915_ggtt_driver_release(dev_priv);
err_perf:
@@ -1415,14 +1321,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
- if (HAS_DISPLAY(dev_priv)) {
+ if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
}
- if (IS_GEN(dev_priv, 5))
- intel_gpu_ips_init(dev_priv);
+ intel_gt_driver_register(&dev_priv->gt);
intel_audio_init(dev_priv);
@@ -1439,7 +1344,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (HAS_DISPLAY(dev_priv))
+ if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
@@ -1465,7 +1370,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
*/
drm_kms_helper_poll_fini(&dev_priv->drm);
- intel_gpu_ips_teardown();
+ intel_gt_driver_unregister(&dev_priv->gt);
acpi_video_unregister();
intel_opregion_unregister(dev_priv);
@@ -1574,6 +1479,23 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+ /*
+ * Check if we support fake LMEM -- for now we only unleash this for
+ * the live selftests(test-and-exit).
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
+ if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
+ i915_modparams.fake_lmem_start) {
+ mkwrite_device_info(dev_priv)->memory_regions =
+ REGION_SMEM | REGION_LMEM | REGION_STOLEN;
+ mkwrite_device_info(dev_priv)->is_dgfx = true;
+ GEM_BUG_ON(!HAS_LMEM(dev_priv));
+ GEM_BUG_ON(!IS_DGFX(dev_priv));
+ }
+ }
+#endif
+
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
@@ -1594,7 +1516,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_driver_modeset_probe(&dev_priv->drm);
+ ret = i915_driver_modeset_probe(dev_priv);
if (ret < 0)
goto out_cleanup_hw;
@@ -1608,10 +1530,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_cleanup_hw:
i915_driver_hw_remove(dev_priv);
+ intel_memory_regions_driver_release(dev_priv);
i915_ggtt_driver_release(dev_priv);
-
- /* Paranoia: make sure we have disabled everything before we exit. */
- intel_sanitize_gt_powersave(dev_priv);
out_cleanup_mmio:
i915_driver_mmio_release(dev_priv);
out_runtime_pm_put:
@@ -1627,8 +1547,6 @@ out_fini:
void i915_driver_remove(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
-
disable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_driver_unregister(i915);
@@ -1649,19 +1567,9 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_gvt_driver_remove(i915);
- intel_modeset_driver_remove(&i915->drm);
-
- intel_bios_driver_remove(i915);
+ i915_driver_modeset_remove(i915);
- vga_switcheroo_unregister_client(pdev);
- vga_client_register(pdev, NULL, NULL, NULL);
-
- intel_csr_ucode_fini(i915);
-
- /* Free error state after interrupts are fully disabled. */
- cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_reset_error_state(i915);
-
i915_gem_driver_remove(i915);
intel_power_domains_driver_remove(i915);
@@ -1680,11 +1588,9 @@ static void i915_driver_release(struct drm_device *dev)
i915_gem_driver_release(dev_priv);
+ intel_memory_regions_driver_release(dev_priv);
i915_ggtt_driver_release(dev_priv);
- /* Paranoia: make sure we have disabled everything before we exit. */
- intel_sanitize_gt_powersave(dev_priv);
-
i915_driver_mmio_release(dev_priv);
enable_rpm_wakeref_asserts(rpm);
@@ -1728,12 +1634,10 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
i915_gem_context_close(file);
i915_gem_release(dev, file);
- mutex_unlock(&dev->struct_mutex);
- kfree(file_priv);
+ kfree_rcu(file_priv, rcu);
/* Catch up with all the deferred frees from "this" client */
i915_gem_flush_free_objects(to_i915(dev));
@@ -1847,8 +1751,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv);
- i915_rc6_ctx_wa_suspend(dev_priv);
-
intel_uncore_suspend(&dev_priv->uncore);
intel_power_domains_suspend(dev_priv,
@@ -1890,8 +1792,7 @@ out:
return ret;
}
-static int
-i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
+int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
{
int error;
@@ -1915,18 +1816,17 @@ static int i915_drm_resume(struct drm_device *dev)
int ret;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- intel_sanitize_gt_powersave(dev_priv);
- i915_gem_sanitize(dev_priv);
+ intel_rc6_ctx_wa_resume(&dev_priv->gt.rc6);
+
+ intel_gt_sanitize(&dev_priv->gt, true);
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_restore_gtt_mappings(dev_priv);
- i915_gem_restore_fences(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
@@ -1951,7 +1851,7 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -2048,20 +1948,14 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_display_power_resume_early(dev_priv);
- intel_sanitize_gt_powersave(dev_priv);
-
intel_power_domains_resume(dev_priv);
- i915_rc6_ctx_wa_resume(dev_priv);
-
- intel_gt_sanitize(&dev_priv->gt, true);
-
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
-static int i915_resume_switcheroo(struct drm_i915_private *i915)
+int i915_resume_switcheroo(struct drm_i915_private *i915)
{
int ret;
@@ -2594,9 +2488,6 @@ static int intel_runtime_suspend(struct device *kdev)
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret = 0;
- if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
- return -ENODEV;
-
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -2629,7 +2520,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
enable_rpm_wakeref_asserts(rpm);
@@ -2709,7 +2600,7 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM).
*/
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
/*
* On VLV/CHV display interrupts are part of the display
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 89b6112bd66b..e29bc137e7ba 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -67,6 +67,7 @@
#include "display/intel_display.h"
#include "display/intel_display_power.h"
#include "display/intel_dpll_mgr.h"
+#include "display/intel_dsb.h"
#include "display/intel_frontbuffer.h"
#include "display/intel_gmbus.h"
#include "display/intel_opregion.h"
@@ -84,6 +85,7 @@
#include "intel_device_info.h"
#include "intel_pch.h"
#include "intel_runtime_pm.h"
+#include "intel_memory_region.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_wopcm.h"
@@ -92,12 +94,15 @@
#include "i915_gem_fence_reg.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
+#include "i915_perf_types.h"
#include "i915_request.h"
#include "i915_scheduler.h"
#include "gt/intel_timeline.h"
#include "i915_vma.h"
#include "i915_irq.h"
+#include "intel_region_lmem.h"
+
#include "intel_gvt.h"
/* General customization:
@@ -105,8 +110,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190822"
-#define DRIVER_TIMESTAMP 1566477988
+#define DRIVER_DATE "20191101"
+#define DRIVER_TIMESTAMP 1572604873
struct drm_i915_gem_object;
@@ -185,7 +190,11 @@ struct i915_mmu_object;
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
- struct drm_file *file;
+
+ union {
+ struct drm_file *file;
+ struct rcu_head rcu;
+ };
struct {
spinlock_t lock;
@@ -272,6 +281,7 @@ struct drm_i915_display_funcs {
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
+ u8 (*calc_voltage_level)(int cdclk);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -284,7 +294,8 @@ struct drm_i915_display_funcs {
struct intel_atomic_state *old_state);
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
struct intel_atomic_state *old_state);
- void (*update_crtcs)(struct intel_atomic_state *state);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+ void (*commit_modeset_disables)(struct intel_atomic_state *state);
void (*audio_codec_enable)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -331,6 +342,7 @@ struct intel_csr {
i915_reg_t mmioaddr[20];
u32 mmiodata[20];
u32 dc_state;
+ u32 target_dc_state;
u32 allowed_dc_mask;
intel_wakeref_t wakeref;
};
@@ -479,6 +491,7 @@ struct i915_psr {
bool enabled;
struct intel_dp *dp;
enum pipe pipe;
+ enum transcoder transcoder;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
@@ -492,6 +505,9 @@ struct i915_psr {
bool sink_not_reliable;
bool irq_aux_error;
u16 su_x_granularity;
+ bool dc3co_enabled;
+ u32 dc3co_exit_delay;
+ struct delayed_work idle_work;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -529,108 +545,6 @@ struct i915_suspend_saved_registers {
struct vlv_s0ix_state;
-struct intel_rps_ei {
- ktime_t ktime;
- u32 render_c0;
- u32 media_c0;
-};
-
-struct intel_rps {
- struct mutex lock; /* protects enabling and the worker */
-
- /*
- * work, interrupts_enabled and pm_iir are protected by
- * dev_priv->irq_lock
- */
- struct work_struct work;
- bool interrupts_enabled;
- u32 pm_iir;
-
- /* PM interrupt bits that should never be masked */
- u32 pm_intrmsk_mbz;
-
- /* Frequencies are stored in potentially platform dependent multiples.
- * In other words, *_freq needs to be multiplied by X to be interesting.
- * Soft limits are those which are used for the dynamic reclocking done
- * by the driver (raise frequencies under heavy loads, and lower for
- * lighter loads). Hard limits are those imposed by the hardware.
- *
- * A distinction is made for overclocking, which is never enabled by
- * default, and is considered to be above the hard limit if it's
- * possible at all.
- */
- u8 cur_freq; /* Current frequency (cached, may not == HW) */
- u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
- u8 max_freq_softlimit; /* Max frequency permitted by the driver */
- u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
- u8 min_freq; /* AKA RPn. Minimum frequency */
- u8 boost_freq; /* Frequency to request when wait boosting */
- u8 idle_freq; /* Frequency to request when we are idle */
- u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
- u8 rp1_freq; /* "less than" RP0 power/freqency */
- u8 rp0_freq; /* Non-overclocked max frequency. */
- u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
-
- int last_adj;
-
- struct {
- struct mutex mutex;
-
- enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
- unsigned int interactive;
-
- u8 up_threshold; /* Current %busy required to uplock */
- u8 down_threshold; /* Current %busy required to downclock */
- } power;
-
- bool enabled;
- atomic_t num_waiters;
- atomic_t boosts;
-
- /* manual wa residency calculations */
- struct intel_rps_ei ei;
-};
-
-struct intel_rc6 {
- bool enabled;
- bool ctx_corrupted;
- intel_wakeref_t ctx_corrupted_wakeref;
- u64 prev_hw_residency[4];
- u64 cur_residency[4];
-};
-
-struct intel_llc_pstate {
- bool enabled;
-};
-
-struct intel_gen6_power_mgmt {
- struct intel_rps rps;
- struct intel_rc6 rc6;
- struct intel_llc_pstate llc_pstate;
-};
-
-/* defined intel_pm.c */
-extern spinlock_t mchdev_lock;
-
-struct intel_ilk_power_mgmt {
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- u64 last_time2;
- unsigned long gfx_power;
- u8 corr;
-
- int c_m;
- int r_t;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -679,6 +593,8 @@ struct i915_gem_mm {
*/
struct vfsmount *gemfs;
+ struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
+
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -690,11 +606,6 @@ struct i915_gem_mm {
*/
struct workqueue_struct *userptr_wq;
- /** Bit 6 swizzling required for X tiling */
- u32 bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- u32 bit_6_swizzle_y;
-
/* shrinker accounting, also useful for userland debugging */
u64 shrink_memory;
u32 shrink_count;
@@ -975,305 +886,6 @@ struct intel_wm_config {
bool sprites_scaled;
};
-struct i915_oa_format {
- u32 format;
- int size;
-};
-
-struct i915_oa_reg {
- i915_reg_t addr;
- u32 value;
-};
-
-struct i915_oa_config {
- char uuid[UUID_STRING_LEN + 1];
- int id;
-
- const struct i915_oa_reg *mux_regs;
- u32 mux_regs_len;
- const struct i915_oa_reg *b_counter_regs;
- u32 b_counter_regs_len;
- const struct i915_oa_reg *flex_regs;
- u32 flex_regs_len;
-
- struct attribute_group sysfs_metric;
- struct attribute *attrs[2];
- struct device_attribute sysfs_metric_id;
-
- atomic_t ref_count;
-};
-
-struct i915_perf_stream;
-
-/**
- * struct i915_perf_stream_ops - the OPs to support a specific stream type
- */
-struct i915_perf_stream_ops {
- /**
- * @enable: Enables the collection of HW samples, either in response to
- * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
- * without `I915_PERF_FLAG_DISABLED`.
- */
- void (*enable)(struct i915_perf_stream *stream);
-
- /**
- * @disable: Disables the collection of HW samples, either in response
- * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
- * the stream.
- */
- void (*disable)(struct i915_perf_stream *stream);
-
- /**
- * @poll_wait: Call poll_wait, passing a wait queue that will be woken
- * once there is something ready to read() for the stream
- */
- void (*poll_wait)(struct i915_perf_stream *stream,
- struct file *file,
- poll_table *wait);
-
- /**
- * @wait_unlocked: For handling a blocking read, wait until there is
- * something to ready to read() for the stream. E.g. wait on the same
- * wait queue that would be passed to poll_wait().
- */
- int (*wait_unlocked)(struct i915_perf_stream *stream);
-
- /**
- * @read: Copy buffered metrics as records to userspace
- * **buf**: the userspace, destination buffer
- * **count**: the number of bytes to copy, requested by userspace
- * **offset**: zero at the start of the read, updated as the read
- * proceeds, it represents how many bytes have been copied so far and
- * the buffer offset for copying the next record.
- *
- * Copy as many buffered i915 perf samples and records for this stream
- * to userspace as will fit in the given buffer.
- *
- * Only write complete records; returning -%ENOSPC if there isn't room
- * for a complete record.
- *
- * Return any error condition that results in a short read such as
- * -%ENOSPC or -%EFAULT, even though these may be squashed before
- * returning to userspace.
- */
- int (*read)(struct i915_perf_stream *stream,
- char __user *buf,
- size_t count,
- size_t *offset);
-
- /**
- * @destroy: Cleanup any stream specific resources.
- *
- * The stream will always be disabled before this is called.
- */
- void (*destroy)(struct i915_perf_stream *stream);
-};
-
-/**
- * struct i915_perf_stream - state for a single open stream FD
- */
-struct i915_perf_stream {
- /**
- * @dev_priv: i915 drm device
- */
- struct drm_i915_private *dev_priv;
-
- /**
- * @link: Links the stream into ``&drm_i915_private->streams``
- */
- struct list_head link;
-
- /**
- * @wakeref: As we keep the device awake while the perf stream is
- * active, we track our runtime pm reference for later release.
- */
- intel_wakeref_t wakeref;
-
- /**
- * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
- * properties given when opening a stream, representing the contents
- * of a single sample as read() by userspace.
- */
- u32 sample_flags;
-
- /**
- * @sample_size: Considering the configured contents of a sample
- * combined with the required header size, this is the total size
- * of a single sample record.
- */
- int sample_size;
-
- /**
- * @ctx: %NULL if measuring system-wide across all contexts or a
- * specific context that is being monitored.
- */
- struct i915_gem_context *ctx;
-
- /**
- * @enabled: Whether the stream is currently enabled, considering
- * whether the stream was opened in a disabled state and based
- * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
- */
- bool enabled;
-
- /**
- * @ops: The callbacks providing the implementation of this specific
- * type of configured stream.
- */
- const struct i915_perf_stream_ops *ops;
-
- /**
- * @oa_config: The OA configuration used by the stream.
- */
- struct i915_oa_config *oa_config;
-
- /**
- * The OA context specific information.
- */
- struct intel_context *pinned_ctx;
- u32 specific_ctx_id;
- u32 specific_ctx_id_mask;
-
- struct hrtimer poll_check_timer;
- wait_queue_head_t poll_wq;
- bool pollin;
-
- bool periodic;
- int period_exponent;
-
- /**
- * State of the OA buffer.
- */
- struct {
- struct i915_vma *vma;
- u8 *vaddr;
- u32 last_ctx_id;
- int format;
- int format_size;
- int size_exponent;
-
- /**
- * Locks reads and writes to all head/tail state
- *
- * Consider: the head and tail pointer state needs to be read
- * consistently from a hrtimer callback (atomic context) and
- * read() fop (user context) with tail pointer updates happening
- * in atomic context and head updates in user context and the
- * (unlikely) possibility of read() errors needing to reset all
- * head/tail state.
- *
- * Note: Contention/performance aren't currently a significant
- * concern here considering the relatively low frequency of
- * hrtimer callbacks (5ms period) and that reads typically only
- * happen in response to a hrtimer event and likely complete
- * before the next callback.
- *
- * Note: This lock is not held *while* reading and copying data
- * to userspace so the value of head observed in htrimer
- * callbacks won't represent any partial consumption of data.
- */
- spinlock_t ptr_lock;
-
- /**
- * One 'aging' tail pointer and one 'aged' tail pointer ready to
- * used for reading.
- *
- * Initial values of 0xffffffff are invalid and imply that an
- * update is required (and should be ignored by an attempted
- * read)
- */
- struct {
- u32 offset;
- } tails[2];
-
- /**
- * Index for the aged tail ready to read() data up to.
- */
- unsigned int aged_tail_idx;
-
- /**
- * A monotonic timestamp for when the current aging tail pointer
- * was read; used to determine when it is old enough to trust.
- */
- u64 aging_timestamp;
-
- /**
- * Although we can always read back the head pointer register,
- * we prefer to avoid trusting the HW state, just to avoid any
- * risk that some hardware condition could * somehow bump the
- * head pointer unpredictably and cause us to forward the wrong
- * OA buffer data to userspace.
- */
- u32 head;
- } oa_buffer;
-};
-
-/**
- * struct i915_oa_ops - Gen specific implementation of an OA unit stream
- */
-struct i915_oa_ops {
- /**
- * @is_valid_b_counter_reg: Validates register's address for
- * programming boolean counters for a particular platform.
- */
- bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
- u32 addr);
-
- /**
- * @is_valid_mux_reg: Validates register's address for programming mux
- * for a particular platform.
- */
- bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
-
- /**
- * @is_valid_flex_reg: Validates register's address for programming
- * flex EU filtering for a particular platform.
- */
- bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
-
- /**
- * @enable_metric_set: Selects and applies any MUX configuration to set
- * up the Boolean and Custom (B/C) counters that are part of the
- * counter reports being sampled. May apply system constraints such as
- * disabling EU clock gating as required.
- */
- int (*enable_metric_set)(struct i915_perf_stream *stream);
-
- /**
- * @disable_metric_set: Remove system constraints associated with using
- * the OA unit.
- */
- void (*disable_metric_set)(struct i915_perf_stream *stream);
-
- /**
- * @oa_enable: Enable periodic sampling
- */
- void (*oa_enable)(struct i915_perf_stream *stream);
-
- /**
- * @oa_disable: Disable periodic sampling
- */
- void (*oa_disable)(struct i915_perf_stream *stream);
-
- /**
- * @read: Copy data from the circular OA buffer into a given userspace
- * buffer.
- */
- int (*read)(struct i915_perf_stream *stream,
- char __user *buf,
- size_t count,
- size_t *offset);
-
- /**
- * @oa_hw_tail_read: read the OA tail pointer register
- *
- * In particular this enables us to share all the fiddly code for
- * handling the OA unit tail pointer race that affects multiple
- * generations.
- */
- u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
-};
-
struct intel_cdclk_state {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
@@ -1333,11 +945,11 @@ struct drm_i915_private {
*/
u32 gpio_mmio_base;
+ u32 hsw_psr_mmio_adjust;
+
/* MMIO base address for MIPI regs */
u32 mipi_mmio_base;
- u32 psr_mmio_base;
-
u32 pps_mmio_base;
wait_queue_head_t gmbus_wait_queue;
@@ -1369,7 +981,6 @@ struct drm_i915_private {
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
- u32 pm_rps_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1399,13 +1010,14 @@ struct drm_i915_private {
unsigned int fdi_pll_freq;
unsigned int czclk_freq;
+ /*
+ * For reading holding any crtc lock is sufficient,
+ * for writing must hold all of them.
+ */
struct {
/*
* The current logical cdclk state.
* See intel_atomic_state.cdclk.logical
- *
- * For reading holding any crtc lock is sufficient,
- * for writing must hold all of them.
*/
struct intel_cdclk_state logical;
/*
@@ -1416,6 +1028,9 @@ struct drm_i915_private {
/* The current hardware cdclk state */
struct intel_cdclk_state hw;
+ /* cdclk, divider, and ratio table from bspec */
+ const struct intel_cdclk_vals *table;
+
int force_min_cdclk;
} cdclk;
@@ -1430,6 +1045,8 @@ struct drm_i915_private {
/* ordered wq for modesets */
struct workqueue_struct *modeset_wq;
+ /* unbound hipri wq for page flips/plane updates */
+ struct workqueue_struct *flip_wq;
/* Display functions */
struct drm_i915_display_funcs display;
@@ -1470,7 +1087,11 @@ struct drm_i915_private {
*/
struct mutex dpll_lock;
- unsigned int active_crtcs;
+ /*
+ * For reading active_pipes, min_cdclk, min_voltage_level holding
+ * any crtc lock is sufficient, for writing must hold all of them.
+ */
+ u8 active_pipes;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -1499,13 +1120,6 @@ struct drm_i915_private {
*/
u32 edram_size_mb;
- /* gen6+ GT PM state */
- struct intel_gen6_power_mgmt gt_pm;
-
- /* ilk-only ips/rps state. Everything in here is protected by the global
- * mchdev_lock in intel_pm.c */
- struct intel_ilk_power_mgmt ips;
-
struct i915_power_domains power_domains;
struct i915_psr psr;
@@ -1530,25 +1144,7 @@ struct drm_i915_private {
*/
struct mutex av_mutex;
int audio_power_refcount;
-
- struct {
- struct mutex mutex;
- struct list_head list;
- struct llist_head free_list;
- struct work_struct free_work;
-
- /* The hw wants to have a stable context identifier for the
- * lifetime of the context (for OA, PASID, faults, etc).
- * This is limited in execlists to 21 bits.
- */
- struct ida hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
-/* in Gen12 ID 0x7FF is reserved to indicate idle */
-#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
- struct list_head hw_id_list;
- } contexts;
+ u32 audio_freq_cntrl;
u32 fdi_rx_config;
@@ -1574,6 +1170,8 @@ struct drm_i915_private {
I915_SAGV_NOT_CONTROLLED
} sagv_status;
+ u32 sagv_block_time_us;
+
struct {
/*
* Raw watermark latency values:
@@ -1644,61 +1242,7 @@ struct drm_i915_private {
struct intel_runtime_pm runtime_pm;
- struct {
- bool initialized;
-
- struct kobject *metrics_kobj;
- struct ctl_table_header *sysctl_header;
-
- /*
- * Lock associated with adding/modifying/removing OA configs
- * in dev_priv->perf.metrics_idr.
- */
- struct mutex metrics_lock;
-
- /*
- * List of dynamic configurations, you need to hold
- * dev_priv->perf.metrics_lock to access it.
- */
- struct idr metrics_idr;
-
- /*
- * Lock associated with anything below within this structure
- * except exclusive_stream.
- */
- struct mutex lock;
- struct list_head streams;
-
- /*
- * The stream currently using the OA unit. If accessed
- * outside a syscall associated to its file
- * descriptor, you need to hold
- * dev_priv->drm.struct_mutex.
- */
- struct i915_perf_stream *exclusive_stream;
-
- /**
- * For rate limiting any notifications of spurious
- * invalid OA reports
- */
- struct ratelimit_state spurious_report_rs;
-
- struct i915_oa_config test_config;
-
- u32 gen7_latched_oastatus1;
- u32 ctx_oactxctrl_offset;
- u32 ctx_flexeu0_offset;
-
- /**
- * The RPT_ID/reason field for Gen8+ includes a bit
- * to determine if the CTX ID in the report is valid
- * but the specific bit differs between Gen 8 and 9
- */
- u32 gen8_valid_ctx_bit;
-
- struct i915_oa_ops ops;
- const struct i915_oa_format *oa_formats;
- } perf;
+ struct i915_perf perf;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct intel_gt gt;
@@ -1706,34 +1250,19 @@ struct drm_i915_private {
struct {
struct notifier_block pm_notifier;
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- /**
- * When we detect an idle GPU, we want to turn on
- * powersaving features. So once we see that there
- * are no more requests outstanding and no more
- * arrive within a small period of time, we fire
- * off the idle_work.
- */
- struct work_struct idle_work;
+ struct i915_gem_contexts {
+ spinlock_t lock; /* locks list */
+ struct list_head list;
+
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } contexts;
} gem;
u8 pch_ssc_use;
- /* For i945gm vblank irq vs. C3 workaround */
- struct {
- struct work_struct work;
- struct pm_qos_request pm_qos;
- u8 c3_disable_latency;
- u8 enabled;
- } i945gm_vblank;
+ /* For i915gm/i945gm vblank irq workaround */
+ u8 vblank_enabled;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -1796,10 +1325,10 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
/* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
- for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
+#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
+ for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
(tmp__) ? \
- ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
+ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
#define rb_to_uabi_engine(rb) \
@@ -1855,6 +1384,8 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
(BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
INTEL_INFO(dev_priv)->gen == (n))
+#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
+
/*
* Return true if revision is in range [since,until] inclusive.
*
@@ -1926,6 +1457,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
}
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
+#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2060,6 +1592,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ICL_REVID(p, since, until) \
(IS_ICELAKE(p) && IS_REVID(p, since, until))
+#define TGL_REVID_A0 0x0
+
+#define IS_TGL_REVID(p, since, until) \
+ (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
@@ -2166,6 +1703,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
+
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
/* Having GuC is not the same as using GuC */
@@ -2189,7 +1729,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
+
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
+
+/* Only valid when HAS_DISPLAY() is true */
+#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display)
static inline bool intel_vtd_active(void)
{
@@ -2222,7 +1767,9 @@ extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
void i915_driver_remove(struct drm_i915_private *i915);
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
+int i915_resume_switcheroo(struct drm_i915_private *i915);
+int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
+
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
@@ -2241,12 +1788,13 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
/* i915_gem.c */
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-void i915_gem_sanitize(struct drm_i915_private *i915);
-int i915_gem_init_early(struct drm_i915_private *dev_priv);
+void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
+
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
/*
@@ -2331,15 +1879,11 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
-void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_driver_register(struct drm_i915_private *i915);
void i915_gem_driver_unregister(struct drm_i915_private *i915);
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags, long timeout);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
@@ -2379,7 +1923,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
@@ -2395,9 +1939,9 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
i915_gem_object_is_tiled(obj);
}
@@ -2501,4 +2045,10 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
+static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
+{
+ return intel_guc_is_submission_supported(guc) &&
+ intel_guc_is_running(guc);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 98305d987ac1..b9eb6b3149b7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -45,13 +45,14 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
-#include "gem/i915_gemfs.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
#include "gt/intel_renderstate.h"
+#include "gt/intel_rps.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
@@ -62,20 +63,31 @@
#include "intel_pm.h"
static int
-insert_mappable_node(struct i915_ggtt *ggtt,
- struct drm_mm_node *node, u32 size)
+insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
{
+ int err;
+
+ err = mutex_lock_interruptible(&ggtt->vm.mutex);
+ if (err)
+ return err;
+
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
- size, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
+ err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
+ size, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+
+ mutex_unlock(&ggtt->vm.mutex);
+
+ return err;
}
static void
-remove_mappable_node(struct drm_mm_node *node)
+remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
{
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(node);
+ mutex_unlock(&ggtt->vm.mutex);
}
int
@@ -87,7 +99,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
- mutex_lock(&ggtt->vm.mutex);
+ if (mutex_lock_interruptible(&ggtt->vm.mutex))
+ return -EINTR;
pinned = ggtt->vm.reserved;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
@@ -109,20 +122,24 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
LIST_HEAD(still_in_list);
int ret = 0;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
+ struct i915_address_space *vm = vma->vm;
+
+ ret = -EBUSY;
+ if (!i915_vm_tryopen(vm))
+ break;
+
list_move_tail(&vma->obj_link, &still_in_list);
spin_unlock(&obj->vma.lock);
- ret = -EBUSY;
if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
!i915_vma_is_active(vma))
ret = i915_vma_unbind(vma);
+ i915_vm_close(vm);
spin_lock(&obj->vma.lock);
}
list_splice(&still_in_list, &obj->vma.list);
@@ -338,10 +355,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
u64 remain, offset;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = ERR_PTR(-ENODEV);
if (!i915_gem_object_is_tiled(obj))
@@ -351,16 +364,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
+ node.flags = 0;
} else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out_unlock;
- GEM_BUG_ON(!node.allocated);
+ goto out_rpm;
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- mutex_unlock(&i915->drm.struct_mutex);
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
@@ -393,7 +404,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
@@ -414,17 +425,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- mutex_lock(&i915->drm.struct_mutex);
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(&node);
+ remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
-out_unlock:
+out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return ret;
}
@@ -531,10 +539,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
void __user *user_data;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
if (i915_gem_object_has_struct_page(obj)) {
/*
* Avoid waking the device up if we can fallback, as
@@ -544,10 +548,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* using the cache bypass of indirect GGTT access.
*/
wakeref = intel_runtime_pm_get_if_in_use(rpm);
- if (!wakeref) {
- ret = -EFAULT;
- goto out_unlock;
- }
+ if (!wakeref)
+ return -EFAULT;
} else {
/* No backing pages, no fallback, we must force GGTT access */
wakeref = intel_runtime_pm_get(rpm);
@@ -561,16 +563,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
+ node.flags = 0;
} else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
- GEM_BUG_ON(!node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- mutex_unlock(&i915->drm.struct_mutex);
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
/* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
@@ -634,18 +634,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- mutex_lock(&i915->drm.struct_mutex);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(&node);
+ remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
out_rpm:
intel_runtime_pm_put(rpm, wakeref);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -887,74 +884,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
-static long
-wait_for_timelines(struct drm_i915_private *i915,
- unsigned int wait, long timeout)
-{
- struct intel_gt_timelines *timelines = &i915->gt.timelines;
- struct intel_timeline *tl;
- unsigned long flags;
-
- spin_lock_irqsave(&timelines->lock, flags);
- list_for_each_entry(tl, &timelines->active_list, link) {
- struct i915_request *rq;
-
- rq = i915_active_request_get_unlocked(&tl->last_request);
- if (!rq)
- continue;
-
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- /*
- * "Race-to-idle".
- *
- * Switching to the kernel context is often used a synchronous
- * step prior to idling, e.g. in suspend for flushing all
- * current operations to memory before sleeping. These we
- * want to complete as quickly as possible to avoid prolonged
- * stalls, so allow the gpu to boost to maximum clocks.
- */
- if (wait & I915_WAIT_FOR_IDLE_BOOST)
- gen6_rps_boost(rq);
-
- timeout = i915_request_wait(rq, wait, timeout);
- i915_request_put(rq);
- if (timeout < 0)
- return timeout;
-
- /* restart after reacquiring the lock */
- spin_lock_irqsave(&timelines->lock, flags);
- tl = list_entry(&timelines->active_list, typeof(*tl), link);
- }
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- return timeout;
-}
-
-int i915_gem_wait_for_idle(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
-{
- /* If the device is asleep, we have no requests outstanding */
- if (!intel_gt_pm_is_awake(&i915->gt))
- return 0;
-
- GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
- flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
-
- timeout = wait_for_timelines(i915, flags, timeout);
- if (timeout < 0)
- return timeout;
-
- if (flags & I915_WAIT_LOCKED) {
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- i915_retire_requests(i915);
- }
-
- return 0;
-}
-
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -981,8 +910,6 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
if (i915_gem_object_never_bind_ggtt(obj))
return ERR_PTR(-ENODEV);
@@ -1032,13 +959,6 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
}
- WARN(i915_vma_is_pinned(vma),
- "bo is already pinned in ggtt with incorrect alignment:"
- " offset=%08x, req.alignment=%llx,"
- " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
- i915_ggtt_offset(vma), alignment,
- !!(flags & PIN_MAPPABLE),
- i915_vma_is_map_and_fenceable(vma));
ret = i915_vma_unbind(vma);
if (ret)
return ERR_PTR(ret);
@@ -1133,128 +1053,7 @@ out:
return err;
}
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref;
-
- GEM_TRACE("\n");
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
- /*
- * As we have just resumed the machine and woken the device up from
- * deep PCI sleep (presumably D3_cold), assume the HW has been reset
- * back to defaults, recovering from whatever wedged state we left it
- * in and so worth trying to use the device once more.
- */
- if (intel_gt_is_wedged(&i915->gt))
- intel_gt_unset_wedged(&i915->gt);
-
- /*
- * If we inherit context state from the BIOS or earlier occupants
- * of the GPU, the GPU may be in an inconsistent state when we
- * try to take over. The only way to remove the earlier state
- * is by resetting. However, resetting on earlier gen is tricky as
- * it may impact the display and we are uncertain about the stability
- * of the reset, so this could be applied to even earlier gen.
- */
- intel_gt_sanitize(&i915->gt, false);
-
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-}
-
-static void init_unused_ring(struct intel_gt *gt, u32 base)
-{
- struct intel_uncore *uncore = gt->uncore;
-
- intel_uncore_write(uncore, RING_CTL(base), 0);
- intel_uncore_write(uncore, RING_HEAD(base), 0);
- intel_uncore_write(uncore, RING_TAIL(base), 0);
- intel_uncore_write(uncore, RING_START(base), 0);
-}
-
-static void init_unused_rings(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- if (IS_I830(i915)) {
- init_unused_ring(gt, PRB1_BASE);
- init_unused_ring(gt, SRB0_BASE);
- init_unused_ring(gt, SRB1_BASE);
- init_unused_ring(gt, SRB2_BASE);
- init_unused_ring(gt, SRB3_BASE);
- } else if (IS_GEN(i915, 2)) {
- init_unused_ring(gt, SRB0_BASE);
- init_unused_ring(gt, SRB1_BASE);
- } else if (IS_GEN(i915, 3)) {
- init_unused_ring(gt, PRB1_BASE);
- init_unused_ring(gt, PRB2_BASE);
- }
-}
-
-int i915_gem_init_hw(struct drm_i915_private *i915)
-{
- struct intel_uncore *uncore = &i915->uncore;
- struct intel_gt *gt = &i915->gt;
- int ret;
-
- BUG_ON(!i915->kernel_context);
- ret = intel_gt_terminally_wedged(gt);
- if (ret)
- return ret;
-
- gt->last_init_time = ktime_get();
-
- /* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
- if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
- intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
-
- if (IS_HASWELL(i915))
- intel_uncore_write(uncore,
- MI_PREDICATE_RESULT_2,
- IS_HSW_GT3(i915) ?
- LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
-
- /* Apply the GT workarounds... */
- intel_gt_apply_workarounds(gt);
- /* ...and determine whether they are sticking. */
- intel_gt_verify_workarounds(gt, "init");
-
- intel_gt_init_swizzling(gt);
-
- /*
- * At least 830 can leave some of the unused rings
- * "active" (ie. head != tail) after resume which
- * will prevent c3 entry. Makes sure all unused rings
- * are totally idle.
- */
- init_unused_rings(gt);
-
- ret = i915_ppgtt_init_hw(gt);
- if (ret) {
- DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
- goto out;
- }
-
- /* We can't enable contexts until all firmware is loaded */
- ret = intel_uc_init_hw(&gt->uc);
- if (ret) {
- i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
- goto out;
- }
-
- intel_mocs_init(gt);
-
-out:
- intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
- return ret;
-}
-
-static int __intel_engines_record_defaults(struct drm_i915_private *i915)
+static int __intel_engines_record_defaults(struct intel_gt *gt)
{
struct i915_request *requests[I915_NUM_ENGINES] = {};
struct intel_engine_cs *engine;
@@ -1270,7 +1069,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
* from the same default HW values.
*/
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_context *ce;
struct i915_request *rq;
@@ -1278,7 +1077,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
GEM_BUG_ON(!engine->kernel_context);
engine->serial++; /* force the kernel context switch */
- ce = intel_context_create(i915->kernel_context, engine);
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
@@ -1295,15 +1095,6 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err)
goto err_rq;
- /*
- * Failing to program the MOCS is non-fatal.The system will not
- * run at peak performance. So warn the user and carry on.
- */
- err = intel_mocs_emit(rq);
- if (err)
- dev_notice(i915->drm.dev,
- "Failed to program MOCS registers; expect performance issues.\n");
-
err = intel_renderstate_emit(rq);
if (err)
goto err_rq;
@@ -1316,7 +1107,7 @@ err_rq:
}
/* Flush the default context image to memory, and enable powersaving. */
- if (!i915_gem_load_power_context(i915)) {
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
err = -EIO;
goto out;
}
@@ -1375,7 +1166,7 @@ out:
* this is by declaring ourselves wedged.
*/
if (err)
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct intel_context *ce;
@@ -1392,18 +1183,7 @@ out:
return err;
}
-static int
-i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
-{
- return intel_gt_init_scratch(&i915->gt, size);
-}
-
-static void i915_gem_fini_scratch(struct drm_i915_private *i915)
-{
- intel_gt_fini_scratch(&i915->gt);
-}
-
-static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
+static int intel_engines_verify_workarounds(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1412,7 +1192,7 @@ static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (intel_engine_verify_workarounds(engine, "load"))
err = -EIO;
}
@@ -1444,7 +1224,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* we hold the forcewake during initialisation these problems
* just magically go away.
*/
- mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = i915_init_ggtt(dev_priv);
@@ -1453,36 +1232,29 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_init_scratch(dev_priv,
- IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_ggtt;
- }
+ intel_gt_init(&dev_priv->gt);
- ret = intel_engines_setup(dev_priv);
+ ret = intel_engines_setup(&dev_priv->gt);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_contexts(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_scratch;
}
- ret = intel_engines_init(dev_priv);
+ ret = intel_engines_init(&dev_priv->gt);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_context;
}
- intel_init_gt_powersave(dev_priv);
-
intel_uc_init(&dev_priv->gt.uc);
- ret = i915_gem_init_hw(dev_priv);
+ ret = intel_gt_init_hw(&dev_priv->gt);
if (ret)
goto err_uc_init;
@@ -1502,24 +1274,23 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
- ret = intel_engines_verify_workarounds(dev_priv);
+ ret = intel_engines_verify_workarounds(&dev_priv->gt);
if (ret)
goto err_gt;
- ret = __intel_engines_record_defaults(dev_priv);
+ ret = __intel_engines_record_defaults(&dev_priv->gt);
if (ret)
goto err_gt;
- ret = i915_inject_load_error(dev_priv, -ENODEV);
+ ret = i915_inject_probe_error(dev_priv, -ENODEV);
if (ret)
goto err_gt;
- ret = i915_inject_load_error(dev_priv, -EIO);
+ ret = i915_inject_probe_error(dev_priv, -EIO);
if (ret)
goto err_gt;
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
@@ -1530,32 +1301,25 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* driver doesn't explode during runtime.
*/
err_gt:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_gt_set_wedged(&dev_priv->gt);
+ intel_gt_set_wedged_on_init(&dev_priv->gt);
i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
i915_gem_drain_workqueue(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
err_init_hw:
intel_uc_fini_hw(&dev_priv->gt.uc);
err_uc_init:
if (ret != -EIO) {
intel_uc_fini(&dev_priv->gt.uc);
- intel_cleanup_gt_powersave(dev_priv);
- intel_engines_cleanup(dev_priv);
+ intel_engines_cleanup(&dev_priv->gt);
}
err_context:
if (ret != -EIO)
- i915_gem_contexts_fini(dev_priv);
+ i915_gem_driver_release__contexts(dev_priv);
err_scratch:
- i915_gem_fini_scratch(dev_priv);
-err_ggtt:
+ intel_gt_driver_release(&dev_priv->gt);
err_unlock:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret != -EIO) {
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
@@ -1564,8 +1328,6 @@ err_unlock:
}
if (ret == -EIO) {
- mutex_lock(&dev_priv->drm.struct_mutex);
-
/*
* Allow engines or uC initialisation to fail by marking the GPU
* as wedged. But we only want to do this when the GPU is angry,
@@ -1580,10 +1342,8 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_gem_restore_gtt_mappings(dev_priv);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
i915_gem_drain_freed_objects(dev_priv);
@@ -1604,48 +1364,35 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
- GEM_BUG_ON(dev_priv->gt.awake);
-
intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
i915_gem_suspend_late(dev_priv);
- intel_disable_gt_powersave(dev_priv);
+ intel_gt_driver_remove(&dev_priv->gt);
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
- mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(&dev_priv->gt.uc);
intel_uc_fini(&dev_priv->gt.uc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
}
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_engines_cleanup(dev_priv);
- i915_gem_contexts_fini(dev_priv);
- i915_gem_fini_scratch(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_engines_cleanup(&dev_priv->gt);
+ i915_gem_driver_release__contexts(dev_priv);
+ intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
- intel_cleanup_gt_powersave(dev_priv);
-
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
intel_timelines_fini(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
-void i915_gem_init_mmio(struct drm_i915_private *i915)
-{
- i915_gem_sanitize(i915);
+ WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
}
static void i915_gem_init__mm(struct drm_i915_private *i915)
@@ -1660,20 +1407,11 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
i915_gem_init__objects(i915);
}
-int i915_gem_init_early(struct drm_i915_private *dev_priv)
+void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
- int err;
-
i915_gem_init__mm(dev_priv);
- i915_gem_init__pm(dev_priv);
spin_lock_init(&dev_priv->fb_tracking.lock);
-
- err = i915_gemfs_init(dev_priv);
- if (err)
- DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
-
- return 0;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
@@ -1682,8 +1420,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.shrink_count);
-
- i915_gemfs_fini(dev_priv);
}
int i915_gem_freeze(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 6795f1daa3d5..f6f9675848b8 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -37,10 +37,8 @@ struct drm_i915_private;
#define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
- pr_err("%s:%d GEM_BUG_ON(%s)\n", \
- __func__, __LINE__, __stringify(condition)); \
- GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
- __func__, __LINE__, __stringify(condition)); \
+ GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
+ __func__, __LINE__, __stringify(condition)); \
BUG(); \
} \
} while(0)
@@ -66,11 +64,16 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
+#define GEM_TRACE_ERR(...) do { \
+ pr_err(__VA_ARGS__); \
+ trace_printk(__VA_ARGS__); \
+} while (0)
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
#define GEM_TRACE_DUMP_ON(expr) \
do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
+#define GEM_TRACE_ERR(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0)
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
@@ -83,6 +86,11 @@ static inline void tasklet_lock(struct tasklet_struct *t)
cpu_relax();
}
+static inline bool tasklet_is_locked(const struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_RUN, &t->state);
+}
+
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 52c86c6e0673..7e62c310290f 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -29,6 +29,7 @@
#include <drm/i915_drm.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_trace.h"
@@ -37,7 +38,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1;
} igt_evict_ctl;)
-static int ggtt_flush(struct drm_i915_private *i915)
+static int ggtt_flush(struct intel_gt *gt)
{
/*
* Not everything in the GGTT is tracked via vma (otherwise we
@@ -46,10 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
- return i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
static bool
@@ -70,7 +68,7 @@ mark_free(struct drm_mm_scan *scan,
* @vm: address space to evict from
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
- * @cache_level: cache_level for the desired space
+ * @color: color for the desired space
* @start: start (inclusive) of the range from which to evict objects
* @end: end (exclusive) of the range from which to evict objects
* @flags: additional flags to control the eviction algorithm
@@ -91,11 +89,10 @@ mark_free(struct drm_mm_scan *scan,
int
i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags)
{
- struct drm_i915_private *dev_priv = vm->i915;
struct drm_mm_scan scan;
struct list_head eviction_list;
struct i915_vma *vma, *next;
@@ -104,7 +101,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *active;
int ret;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -124,17 +121,10 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
drm_mm_scan_init_with_range(&scan, &vm->mm,
- min_size, alignment, cache_level,
+ min_size, alignment, color,
start, end, mode);
- /*
- * Retire before we search the active list. Although we have
- * reasonable accuracy in our retirement lists, we may have
- * a stray pin (preventing eviction) that can only be resolved by
- * retiring.
- */
- if (!(flags & PIN_NONBLOCK))
- i915_retire_requests(dev_priv);
+ intel_gt_retire_requests(vm->gt);
search_again:
active = NULL;
@@ -207,7 +197,7 @@ search_again:
if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
return -EBUSY;
- ret = ggtt_flush(dev_priv);
+ ret = ggtt_flush(vm->gt);
if (ret)
return ret;
@@ -235,12 +225,12 @@ found:
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
@@ -266,25 +256,23 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
u64 start = target->start;
u64 end = start + target->size;
struct i915_vma *vma, *next;
- bool check_color;
int ret = 0;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
trace_i915_gem_evict_node(vm, target, flags);
- /* Retire before we search the active list. Although we have
+ /*
+ * Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have
* a stray pin (preventing eviction) that can only be resolved by
* retiring.
*/
- if (!(flags & PIN_NONBLOCK))
- i915_retire_requests(vm->i915);
+ intel_gt_retire_requests(vm->gt);
- check_color = vm->mm.color_adjust;
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if (start)
start -= I915_GTT_PAGE_SIZE;
@@ -301,7 +289,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
- GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
/* If we are using coloring to insert guard pages between
@@ -310,7 +298,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* abutt and conflict. If they are in conflict, then we evict
* those as well to make room for our guard pages.
*/
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
if (node->start + node->size == target->start) {
if (node->color == target->color)
continue;
@@ -351,7 +339,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
@@ -375,7 +363,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
struct i915_vma *vma, *next;
int ret;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict_vm(vm);
/* Switch back to the default context in order to unpin
@@ -384,13 +372,12 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
* switch otherwise is ineffective.
*/
if (i915_is_ggtt(vm)) {
- ret = ggtt_flush(vm->i915);
+ ret = ggtt_flush(vm->gt);
if (ret)
return ret;
}
INIT_LIST_HEAD(&eviction_list);
- mutex_lock(&vm->mutex);
list_for_each_entry(vma, &vm->bound_list, vm_link) {
if (i915_vma_is_pinned(vma))
continue;
@@ -398,13 +385,12 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
- mutex_unlock(&vm->mutex);
ret = 0;
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 615a9f4ef30c..71efccfde122 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -59,6 +59,16 @@
#define pipelined 0
+static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.i915;
+}
+
+static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.gt->uncore;
+}
+
static void i965_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
@@ -66,7 +76,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
int fence_pitch_shift;
u64 val;
- if (INTEL_GEN(fence->i915) >= 6) {
+ if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
@@ -95,7 +105,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
/*
* To w/a incoherency with non-atomic 64-bit register updates,
@@ -132,7 +142,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
- if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
+ if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
stride /= 128;
else
stride /= 512;
@@ -148,7 +158,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
@@ -180,7 +190,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
@@ -191,15 +201,17 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
static void fence_write(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = fence_to_i915(fence);
+
/*
* Previous access through the fence register is marshalled by
* the mb() inside the fault handlers (i915_gem_release_mmaps)
* and explicitly managed for internal users.
*/
- if (IS_GEN(fence->i915, 2))
+ if (IS_GEN(i915, 2))
i830_write_fence_reg(fence, vma);
- else if (IS_GEN(fence->i915, 3))
+ else if (IS_GEN(i915, 3))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
@@ -215,6 +227,8 @@ static void fence_write(struct i915_fence_reg *fence,
static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
+ struct i915_ggtt *ggtt = fence->ggtt;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
intel_wakeref_t wakeref;
struct i915_vma *old;
int ret;
@@ -230,14 +244,15 @@ static int fence_update(struct i915_fence_reg *fence,
i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
- ret = i915_active_wait(&vma->active);
+ ret = i915_vma_sync(vma);
if (ret)
return ret;
}
old = xchg(&fence->vma, NULL);
if (old) {
- ret = i915_active_wait(&old->active);
+ /* XXX Ideally we would move the waiting to outside the mutex */
+ ret = i915_vma_sync(old);
if (ret) {
fence->vma = old;
return ret;
@@ -255,7 +270,7 @@ static int fence_update(struct i915_fence_reg *fence,
old->fence = NULL;
}
- list_move(&fence->link, &fence->i915->ggtt.fence_list);
+ list_move(&fence->link, &ggtt->fence_list);
}
/*
@@ -268,7 +283,7 @@ static int fence_update(struct i915_fence_reg *fence,
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
- wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
@@ -279,10 +294,10 @@ static int fence_update(struct i915_fence_reg *fence,
if (vma) {
vma->fence = fence;
- list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
+ list_move_tail(&fence->link, &ggtt->fence_list);
}
- intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
}
@@ -311,11 +326,11 @@ int i915_vma_revoke_fence(struct i915_vma *vma)
return fence_update(fence, NULL);
}
-static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
+static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
struct i915_fence_reg *fence;
- list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
+ list_for_each_entry(fence, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (atomic_read(&fence->pin_count))
@@ -325,19 +340,21 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(i915))
+ if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
}
-static int __i915_vma_pin_fence(struct i915_vma *vma)
+int __i915_vma_pin_fence(struct i915_vma *vma)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
+ lockdep_assert_held(&vma->vm->mutex);
+
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
@@ -348,7 +365,7 @@ static int __i915_vma_pin_fence(struct i915_vma *vma)
return 0;
}
} else if (set) {
- fence = fence_find(vma->vm->i915);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return PTR_ERR(fence);
@@ -399,7 +416,7 @@ int i915_vma_pin_fence(struct i915_vma *vma)
* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
- assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
+ assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -415,14 +432,13 @@ int i915_vma_pin_fence(struct i915_vma *vma)
/**
* i915_reserve_fence - Reserve a fence for vGPU
- * @i915: i915 device private
+ * @ggtt: Global GTT
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
-struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_fence_reg *fence;
int count;
int ret;
@@ -436,7 +452,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
if (count <= 1)
return ERR_PTR(-ENOSPC);
- fence = fence_find(i915);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return fence;
@@ -460,7 +476,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
*/
void i915_unreserve_fence(struct i915_fence_reg *fence)
{
- struct i915_ggtt *ggtt = &fence->i915->ggtt;
+ struct i915_ggtt *ggtt = fence->ggtt;
lockdep_assert_held(&ggtt->vm.mutex);
@@ -469,19 +485,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence)
/**
* i915_gem_restore_fences - restore fence state
- * @i915: i915 device private
+ * @ggtt: Global GTT
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_i915_private *i915)
+void i915_gem_restore_fences(struct i915_ggtt *ggtt)
{
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ for (i = 0; i < ggtt->num_fences; i++) {
+ struct i915_fence_reg *reg = &ggtt->fence_regs[i];
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@@ -547,15 +563,16 @@ void i915_gem_restore_fences(struct drm_i915_private *i915)
*/
/**
- * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @i915: i915 device private
+ * detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @ggtt: Global GGTT
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
-static void detect_bit_6_swizzle(struct drm_i915_private *i915)
+static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -717,8 +734,8 @@ static void detect_bit_6_swizzle(struct drm_i915_private *i915)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
- i915->mm.bit_6_swizzle_x = swizzle_x;
- i915->mm.bit_6_swizzle_y = swizzle_y;
+ i915->ggtt.bit_6_swizzle_x = swizzle_x;
+ i915->ggtt.bit_6_swizzle_y = swizzle_y;
}
/*
@@ -819,17 +836,20 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
int num_fences;
int i;
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
- intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
+ intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
- detect_bit_6_swizzle(i915);
+ detect_bit_6_swizzle(ggtt);
- if (INTEL_GEN(i915) >= 7 &&
- !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+ if (!i915_ggtt_has_aperture(ggtt))
+ num_fences = 0;
+ else if (INTEL_GEN(i915) >= 7 &&
+ !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
num_fences = 32;
else if (INTEL_GEN(i915) >= 4 ||
IS_I945G(i915) || IS_I945GM(i915) ||
@@ -839,20 +859,20 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
num_fences = 8;
if (intel_vgpu_active(i915))
- num_fences = intel_uncore_read(&i915->uncore,
+ num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
/* Initialize fence registers to zero */
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
- fence->i915 = i915;
+ fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
}
ggtt->num_fences = num_fences;
- i915_gem_restore_fences(i915);
+ i915_gem_restore_fences(ggtt);
}
void intel_gt_init_swizzling(struct intel_gt *gt)
@@ -861,7 +881,7 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
if (INTEL_GEN(i915) < 5 ||
- i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 99866fb9d94f..7bd521cd7cd7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -29,7 +29,6 @@
#include <linux/types.h>
struct drm_i915_gem_object;
-struct drm_i915_private;
struct i915_ggtt;
struct i915_vma;
struct intel_gt;
@@ -39,7 +38,7 @@ struct sg_table;
struct i915_fence_reg {
struct list_head link;
- struct drm_i915_private *i915;
+ struct i915_ggtt *ggtt;
struct i915_vma *vma;
atomic_t pin_count;
int id;
@@ -55,10 +54,10 @@ struct i915_fence_reg {
};
/* i915_gem_fence_reg.c */
-struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915);
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
void i915_unreserve_fence(struct i915_fence_reg *fence);
-void i915_gem_restore_fences(struct drm_i915_private *i915);
+void i915_gem_restore_fences(struct i915_ggtt *ggtt);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b1a7a8b9b46a..6239a9adbf14 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -38,6 +38,7 @@
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -132,9 +133,15 @@ static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
gen6_ggtt_invalidate(ggtt);
- intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+
+ if (INTEL_GEN(i915) >= 12)
+ intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ else
+ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -144,16 +151,18 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
u32 pte_flags;
int err;
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ if (flags & I915_VMA_ALLOC) {
err = vma->vm->allocate_va_range(vma->vm,
vma->node.start, vma->size);
if (err)
return err;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
}
/* Applicable to VLV, and gen8+ */
@@ -161,14 +170,17 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ wmb();
return 0;
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
static int ppgtt_set_pages(struct i915_vma *vma)
@@ -496,22 +508,26 @@ static void i915_address_space_fini(struct i915_address_space *vm)
mutex_destroy(&vm->mutex);
}
-static void ppgtt_destroy_vma(struct i915_address_space *vm)
+void __i915_vm_close(struct i915_address_space *vm)
{
- struct list_head *phases[] = {
- &vm->bound_list,
- &vm->unbound_list,
- NULL,
- }, **phase;
+ struct i915_vma *vma, *vn;
+
+ mutex_lock(&vm->mutex);
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
- mutex_lock(&vm->i915->drm.struct_mutex);
- for (phase = phases; *phase; phase++) {
- struct i915_vma *vma, *vn;
+ /* Keep the obj (and hence the vma) alive as _we_ destroy it */
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ i915_vma_destroy(vma);
- list_for_each_entry_safe(vma, vn, *phase, vm_link)
- i915_vma_destroy(vma);
+ i915_gem_object_put(obj);
}
- mutex_unlock(&vm->i915->drm.struct_mutex);
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ mutex_unlock(&vm->mutex);
}
static void __i915_vm_release(struct work_struct *work)
@@ -519,11 +535,6 @@ static void __i915_vm_release(struct work_struct *work)
struct i915_address_space *vm =
container_of(work, struct i915_address_space, rcu.work);
- ppgtt_destroy_vma(vm);
-
- GEM_BUG_ON(!list_empty(&vm->bound_list));
- GEM_BUG_ON(!list_empty(&vm->unbound_list));
-
vm->cleanup(vm);
i915_address_space_fini(vm);
@@ -538,7 +549,6 @@ void i915_vm_release(struct kref *kref)
GEM_BUG_ON(i915_is_ggtt(vm));
trace_i915_ppgtt_release(vm);
- vm->closed = true;
queue_rcu_work(vm->i915->wq, &vm->rcu);
}
@@ -546,6 +556,7 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
kref_init(&vm->ref);
INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
+ atomic_set(&vm->open, 1);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -562,7 +573,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
stash_init(&vm->free_pages);
- INIT_LIST_HEAD(&vm->unbound_list);
INIT_LIST_HEAD(&vm->bound_list);
}
@@ -816,17 +826,6 @@ release_pd_entry(struct i915_page_directory * const pd,
return free;
}
-/*
- * PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
-{
- ppgtt->pd_dirty_engines = ALL_ENGINES;
-}
-
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *dev_priv = ppgtt->vm.i915;
@@ -1367,7 +1366,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (vm->has_read_only &&
vm->i915->kernel_context &&
vm->i915->kernel_context->vm) {
- struct i915_address_space *clone = vm->i915->kernel_context->vm;
+ struct i915_address_space *clone =
+ rcu_dereference_protected(vm->i915->kernel_context->vm,
+ true); /* static */
GEM_BUG_ON(!clone->has_read_only);
@@ -1422,6 +1423,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
set_pd_entry(pd, idx, pde);
atomic_inc(px_used(pde)); /* keep pinned */
}
+ wmb();
return 0;
}
@@ -1489,8 +1491,10 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
*
* Gen11 has HSDES#:1807136187 unresolved. Disable ro support
* for now.
+ *
+ * Gen12 has inherited the same read-only fault issue from gen11.
*/
- ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
+ ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12);
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
@@ -1509,13 +1513,12 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
}
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- if (intel_vgpu_active(i915)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pd;
- }
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_free_pd;
}
+ ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
@@ -1566,7 +1569,7 @@ static void gen7_ppgtt_enable(struct intel_gt *gt)
}
intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
/* GFX_MODE is per-ring on gen7+ */
ENGINE_WRITE(engine,
RING_MODE_GEN7,
@@ -1729,10 +1732,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
}
spin_unlock(&pd->lock);
- if (flush) {
- mark_tlbs_dirty(&ppgtt->base);
+ if (flush)
gen6_ggtt_invalidate(vm->gt->ggtt);
- }
goto out;
@@ -1786,15 +1787,13 @@ static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct drm_i915_private *i915 = vm->i915;
- /* FIXME remove the struct_mutex to bring the locking under control */
- mutex_lock(&i915->drm.struct_mutex);
i915_vma_destroy(ppgtt->vma);
- mutex_unlock(&i915->drm.struct_mutex);
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
+
+ mutex_destroy(&ppgtt->pin_mutex);
kfree(ppgtt->base.pd);
}
@@ -1827,7 +1826,6 @@ static int pd_vma_bind(struct i915_vma *vma,
gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
gen6_write_pde(ppgtt, pde, pt);
- mark_tlbs_dirty(&ppgtt->base);
gen6_ggtt_invalidate(ggtt);
return 0;
@@ -1866,7 +1864,6 @@ static const struct i915_vma_ops pd_vma_ops = {
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{
- struct drm_i915_private *i915 = ppgtt->base.vm.i915;
struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
struct i915_vma *vma;
@@ -1877,33 +1874,30 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
if (!vma)
return ERR_PTR(-ENOMEM);
- i915_active_init(i915, &vma->active, NULL, NULL);
+ i915_active_init(&vma->active, NULL, NULL);
- vma->vm = &ggtt->vm;
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(&ggtt->vm);
vma->ops = &pd_vma_ops;
vma->private = ppgtt;
vma->size = size;
vma->fence_size = size;
- vma->flags = I915_VMA_GGTT;
+ atomic_set(&vma->flags, I915_VMA_GGTT);
vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->closed_link);
- mutex_lock(&vma->vm->mutex);
- list_add(&vma->vm_link, &vma->vm->unbound_list);
- mutex_unlock(&vma->vm->mutex);
-
return vma;
}
int gen6_ppgtt_pin(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- int err;
+ int err = 0;
- GEM_BUG_ON(ppgtt->base.vm.closed);
+ GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -1911,24 +1905,26 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base)
* (When vma->pin_count becomes atomic, I expect we will naturally
* need a larger, unpacked, type and kill this redundancy.)
*/
- if (ppgtt->pin_count++)
+ if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
return 0;
+ if (mutex_lock_interruptible(&ppgtt->pin_mutex))
+ return -EINTR;
+
/*
* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- err = i915_vma_pin(ppgtt->vma,
- 0, GEN6_PD_ALIGN,
- PIN_GLOBAL | PIN_HIGH);
- if (err)
- goto unpin;
-
- return 0;
+ if (!atomic_read(&ppgtt->pin_count)) {
+ err = i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
+ }
+ if (!err)
+ atomic_inc(&ppgtt->pin_count);
+ mutex_unlock(&ppgtt->pin_mutex);
-unpin:
- ppgtt->pin_count = 0;
return err;
}
@@ -1936,22 +1932,20 @@ void gen6_ppgtt_unpin(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- GEM_BUG_ON(!ppgtt->pin_count);
- if (--ppgtt->pin_count)
- return;
-
- i915_vma_unpin(ppgtt->vma);
+ GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+ if (atomic_dec_and_test(&ppgtt->pin_count))
+ i915_vma_unpin(ppgtt->vma);
}
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- if (!ppgtt->pin_count)
+ if (!atomic_read(&ppgtt->pin_count))
return;
- ppgtt->pin_count = 0;
i915_vma_unpin(ppgtt->vma);
+ atomic_set(&ppgtt->pin_count, 0);
}
static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
@@ -1964,9 +1958,12 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
+ mutex_init(&ppgtt->pin_mutex);
+
ppgtt_init(&ppgtt->base, &i915->gt);
ppgtt->base.vm.top = 1;
+ ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
@@ -2023,7 +2020,7 @@ static void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(i915) >= 9)
+ else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
@@ -2202,7 +2199,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_dma(addr, sgt_iter, vma->pages)
+ for_each_sgt_daddr(addr, sgt_iter, vma->pages)
gen8_set_pte(gtt_entries++, pte_encode | addr);
/*
@@ -2243,7 +2240,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
struct sgt_iter iter;
dma_addr_t addr;
- for_each_sgt_dma(addr, iter, vma->pages)
+ for_each_sgt_daddr(addr, iter, vma->pages)
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
/*
@@ -2448,7 +2445,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
- vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0;
}
@@ -2478,14 +2475,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ if (flags & I915_VMA_ALLOC) {
ret = alias->vm.allocate_va_range(&alias->vm,
vma->node.start,
vma->size);
if (ret)
return ret;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
}
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
+ __i915_vma_flags(vma)));
alias->vm.insert_entries(&alias->vm, vma,
cache_level, pte_flags);
}
@@ -2506,7 +2507,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- if (vma->flags & I915_VMA_GLOBAL_BIND) {
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref;
@@ -2514,7 +2515,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
vm->clear_range(vm, vma->node.start, vma->size);
}
- if (vma->flags & I915_VMA_LOCAL_BIND) {
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
struct i915_address_space *vm =
&i915_vm_to_ggtt(vma->vm)->alias->vm;
@@ -2530,7 +2531,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
+ /* XXX This does not prevent more requests being submitted! */
+ if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
+ -MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -2555,12 +2558,12 @@ static int ggtt_set_pages(struct i915_vma *vma)
return 0;
}
-static void i915_gtt_color_adjust(const struct drm_mm_node *node,
- unsigned long color,
- u64 *start,
- u64 *end)
+static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
{
- if (node->allocated && node->color != color)
+ if (i915_node_color_differs(node, color))
*start += I915_GTT_PAGE_SIZE;
/* Also leave a space between the unallocated reserved node after the
@@ -2598,6 +2601,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
goto err_ppgtt;
ggtt->alias = ppgtt;
+ ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
@@ -2614,22 +2618,16 @@ err_ppgtt:
static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_ppgtt *ppgtt;
- mutex_lock(&i915->drm.struct_mutex);
-
ppgtt = fetch_and_zero(&ggtt->alias);
if (!ppgtt)
- goto out;
+ return;
i915_vm_put(&ppgtt->vm);
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-
-out:
- mutex_unlock(&i915->drm.struct_mutex);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -2661,7 +2659,8 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
{
ggtt_release_guc_top(ggtt);
- drm_mm_remove_node(&ggtt->error_capture);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
}
static int init_ggtt(struct i915_ggtt *ggtt)
@@ -2692,13 +2691,15 @@ static int init_ggtt(struct i915_ggtt *ggtt)
if (ret)
return ret;
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ if (ggtt->mappable_end) {
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ if (ret)
+ return ret;
+ }
/*
* The upper portion of the GuC address space has a sizeable hole
@@ -2746,35 +2747,33 @@ int i915_init_ggtt(struct drm_i915_private *i915)
static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_vma *vma, *vn;
- ggtt->vm.closed = true;
+ atomic_set(&ggtt->vm.open, 0);
rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
- flush_workqueue(i915->wq);
+ flush_workqueue(ggtt->vm.i915->wq);
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&ggtt->vm.mutex);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
- WARN_ON(i915_vma_unbind(vma));
+ WARN_ON(__i915_vma_unbind(vma));
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
ggtt_release_guc_top(ggtt);
-
- if (drm_mm_initialized(&ggtt->vm.mm)) {
- intel_vgt_deballoon(ggtt);
- i915_address_space_fini(&ggtt->vm);
- }
+ intel_vgt_deballoon(ggtt);
ggtt->vm.cleanup(&ggtt->vm);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&ggtt->vm.mutex);
+ i915_address_space_fini(&ggtt->vm);
arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->iomap);
+
+ if (ggtt->iomap.size)
+ io_mapping_fini(&ggtt->iomap);
}
/**
@@ -2794,8 +2793,6 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915)
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
-
- i915_gem_cleanup_stolen(i915);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2873,35 +2870,51 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
-static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
- I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
- I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
- I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
-}
-
-static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
- I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
+static void cnl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(0),
+ GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(1),
+ GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(2),
+ GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(3),
+ GEN8_PPAT_UC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(4),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(5),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(6),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(7),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void bdw_setup_private_ppat(struct intel_uncore *uncore)
{
u64 pat;
@@ -2914,11 +2927,11 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
- I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
-static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void chv_setup_private_ppat(struct intel_uncore *uncore)
{
u64 pat;
@@ -2950,8 +2963,8 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
GEN8_PPAT(7, CHV_PPAT_SNOOP);
- I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -2962,18 +2975,26 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
cleanup_scratch_page(vm);
}
-static void setup_private_pat(struct drm_i915_private *dev_priv)
+static void setup_private_pat(struct intel_uncore *uncore)
{
- GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
+ struct drm_i915_private *i915 = uncore->i915;
- if (INTEL_GEN(dev_priv) >= 12)
- tgl_setup_private_ppat(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ if (INTEL_GEN(i915) >= 12)
+ tgl_setup_private_ppat(uncore);
+ else if (INTEL_GEN(i915) >= 10)
+ cnl_setup_private_ppat(uncore);
+ else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
+ chv_setup_private_ppat(uncore);
else
- bdw_setup_private_ppat(dev_priv);
+ bdw_setup_private_ppat(uncore);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
+{
+ return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -2985,10 +3006,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ if (!IS_DGFX(dev_priv)) {
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
@@ -3029,7 +3050,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.pte_encode = gen8_pte_encode;
- setup_private_pat(dev_priv);
+ setup_private_pat(ggtt->vm.gt->uncore);
return ggtt_probe_common(ggtt, size);
}
@@ -3200,9 +3221,6 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
static int ggtt_init_hw(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- int ret = 0;
-
- mutex_lock(&i915->drm.struct_mutex);
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
@@ -3212,24 +3230,23 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
- ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
-
- if (!io_mapping_init_wc(&ggtt->iomap,
- ggtt->gmadr.start,
- ggtt->mappable_end)) {
- ggtt->vm.cleanup(&ggtt->vm);
- ret = -EIO;
- goto out;
- }
+ ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
+
+ if (ggtt->mappable_end) {
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
+ ggtt->vm.cleanup(&ggtt->vm);
+ return -EIO;
+ }
- ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+ }
i915_ggtt_init_fences(ggtt);
-out:
- mutex_unlock(&i915->drm.struct_mutex);
-
- return ret;
+ return 0;
}
/**
@@ -3251,19 +3268,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- /*
- * Initialise stolen early so that we may reserve preallocated
- * objects for the BIOS to KMS transition.
- */
- ret = i915_gem_init_stolen(dev_priv);
- if (ret)
- goto out_gtt_cleanup;
-
return 0;
-
-out_gtt_cleanup:
- dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm);
- return ret;
}
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
@@ -3301,6 +3306,7 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
{
struct i915_vma *vma, *vn;
bool flush = false;
+ int open;
intel_gt_check_and_clear_faults(ggtt->vm.gt);
@@ -3308,33 +3314,31 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
/* First fill our portion of the GTT with scratch pages */
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
+
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
- if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
- mutex_unlock(&ggtt->vm.mutex);
-
- if (!i915_vma_unbind(vma))
- goto lock;
+ if (!__i915_vma_unbind(vma))
+ continue;
+ clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
- PIN_UPDATE));
+ PIN_GLOBAL, NULL));
if (obj) { /* only used during resume => exclusive access */
flush |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
-
-lock:
- mutex_lock(&ggtt->vm.mutex);
}
- ggtt->vm.closed = false;
+ atomic_set(&ggtt->vm.open, open);
ggtt->invalidate(ggtt);
mutex_unlock(&ggtt->vm.mutex);
@@ -3345,10 +3349,12 @@ lock:
void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
{
- ggtt_restore_mappings(&i915->ggtt);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ ggtt_restore_mappings(ggtt);
if (INTEL_GEN(i915) >= 8)
- setup_private_pat(i915);
+ setup_private_pat(ggtt->vm.gt->uncore);
}
static struct scatterlist *
@@ -3726,7 +3732,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 offset;
int err;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
+
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b97a47fc7a68..402283ce2864 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -148,8 +148,8 @@ typedef u64 gen8_pte_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
-#define for_each_sgt_dma(__dmap, __iter, __sgt) \
- __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE)
+#define for_each_sgt_daddr(__dp, __iter, __sgt) \
+ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
struct intel_remapped_plane_info {
/* in gtt pages */
@@ -305,7 +305,16 @@ struct i915_address_space {
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
- bool closed;
+ unsigned int bind_async_flags;
+
+ /*
+ * Each active user context has its own address space (in full-ppgtt).
+ * Since the vm may be shared between multiple contexts, we count how
+ * many contexts keep us "open". Once open hits zero, we are closed
+ * and do not allow any new attachments, and proceed to shutdown our
+ * vma and page directories.
+ */
+ atomic_t open;
struct mutex mutex; /* protects vma and our lists */
#define VM_CLASS_GGTT 0
@@ -320,11 +329,6 @@ struct i915_address_space {
*/
struct list_head bound_list;
- /**
- * List of vma that are not unbound.
- */
- struct list_head unbound_list;
-
struct pagestash free_pages;
/* Global GTT */
@@ -376,6 +380,12 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
}
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+ return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -401,6 +411,11 @@ struct i915_ggtt {
int mtrr;
+ /** Bit 6 swizzling required for X tiling */
+ u32 bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ u32 bit_6_swizzle_y;
+
u32 pin_bias;
unsigned int num_fences;
@@ -422,7 +437,6 @@ struct i915_ggtt {
struct i915_ppgtt {
struct i915_address_space vm;
- intel_engine_mask_t pd_dirty_engines;
struct i915_page_directory *pd;
};
@@ -432,7 +446,9 @@ struct gen6_ppgtt {
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
- unsigned int pin_count;
+ atomic_t pin_count;
+ struct mutex pin_mutex;
+
bool scan_for_unused_pt;
};
@@ -559,6 +575,11 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
int i915_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);
+static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
+{
+ return ggtt->mappable_end > 0;
+}
+
int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
@@ -577,6 +598,35 @@ static inline void i915_vm_put(struct i915_address_space *vm)
kref_put(&vm->ref, i915_vm_release);
}
+static inline struct i915_address_space *
+i915_vm_open(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ atomic_inc(&vm->open);
+ return i915_vm_get(vm);
+}
+
+static inline bool
+i915_vm_tryopen(struct i915_address_space *vm)
+{
+ if (atomic_add_unless(&vm->open, 1, 0))
+ return i915_vm_get(vm);
+
+ return false;
+}
+
+void __i915_vm_close(struct i915_address_space *vm);
+
+static inline void
+i915_vm_close(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ if (atomic_dec_and_test(&vm->open))
+ __i915_vm_close(vm);
+
+ i915_vm_put(vm);
+}
+
int gen6_ppgtt_pin(struct i915_ppgtt *base);
void gen6_ppgtt_unpin(struct i915_ppgtt *base);
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
@@ -609,10 +659,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
-#define PIN_MBZ BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER BIT_ULL(10) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE BIT_ULL(11)
+#define PIN_UPDATE BIT_ULL(9)
+#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 9f1517af5b7f..cf8a8c3ef047 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -5,6 +5,7 @@
#include "gt/intel_engine_user.h"
#include "i915_drv.h"
+#include "i915_perf.h"
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -79,8 +80,8 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915_modparams.enable_hangcheck &&
- intel_has_gpu_reset(i915);
- if (value && intel_has_reset_engine(i915))
+ intel_has_gpu_reset(&i915->gt);
+ if (value && intel_has_reset_engine(&i915->gt))
value = 2;
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
@@ -156,6 +157,9 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
break;
+ case I915_PARAM_PERF_REVISION:
+ value = i915_perf_ioctl_version();
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index e284bd76fa86..3c85cb0ee99f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -40,6 +40,7 @@
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
@@ -235,6 +236,7 @@ struct compress {
struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
+ bool wc;
};
static bool compress_init(struct compress *c)
@@ -292,7 +294,7 @@ static int compress_page(struct compress *c,
struct z_stream_s *zstream = &c->zstream;
zstream->next_in = src;
- if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+ if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
@@ -367,6 +369,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
struct compress {
struct pagevec pool;
+ bool wc;
};
static bool compress_init(struct compress *c)
@@ -389,7 +392,7 @@ static int compress_page(struct compress *c,
if (!ptr)
return -ENOMEM;
- if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+ if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
@@ -421,6 +424,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
int slice;
int subslice;
@@ -436,12 +440,12 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) <= 6)
return;
- for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.sampler[slice][subslice]);
- for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.row[slice][subslice]);
@@ -470,9 +474,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
- err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
- header, ctx->comm, ctx->pid, ctx->hw_id,
- ctx->sched_attr.priority, ctx->guilty, ctx->active);
+ err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
+ header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
+ ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -533,10 +537,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
}
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
- err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
- jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
- ee->hangcheck_timestamp,
- ee->hangcheck_timestamp == epoch ? "; epoch" : "");
err_printf(m, " engine reset count: %u\n", ee->reset_count);
for (n = 0; n < ee->num_ports; n++) {
@@ -574,6 +574,9 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
lower_32_bits(obj->gtt_offset));
}
+ if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
+ err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes);
+
err_compression_marker(m);
for (page = 0; page < obj->page_count; page++) {
int i, len;
@@ -675,11 +678,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
ts = ktime_to_timespec64(error->uptime);
err_printf(m, "Uptime: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
- err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
- err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
- error->capture,
- jiffies_to_msecs(jiffies - error->capture),
- jiffies_to_msecs(error->capture - error->epoch));
+ err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
+ error->capture, jiffies_to_msecs(jiffies - error->capture));
for (ee = error->engine; ee; ee = ee->next)
err_printf(m, "Active process (on ring %s): %s [%d]\n",
@@ -734,8 +734,24 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (IS_GEN(m->i915, 7))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+ if (IS_GEN_RANGE(m->i915, 8, 11))
+ err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache);
+
+ if (IS_GEN(m->i915, 12))
+ err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err);
+
+ if (INTEL_GEN(m->i915) >= 12) {
+ int i;
+
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+ err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
+ error->sfc_done[i]);
+
+ err_printf(m, " GAM_DONE: 0x%08x\n", error->gam_done);
+ }
+
for (ee = error->engine; ee; ee = ee->next)
- error_print_engine(m, ee, error->epoch);
+ error_print_engine(m, ee, error->capture);
for (ee = error->engine; ee; ee = ee->next) {
const struct drm_i915_error_object *obj;
@@ -763,7 +779,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
&ee->requests[j],
- error->epoch);
+ error->capture);
}
print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
@@ -963,7 +979,6 @@ i915_error_object_create(struct drm_i915_private *i915,
struct drm_i915_error_object *dst;
unsigned long num_pages;
struct sgt_iter iter;
- dma_addr_t dma;
int ret;
might_sleep();
@@ -984,21 +999,59 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->gtt_page_sizes = vma->page_sizes.gtt;
dst->num_pages = num_pages;
dst->page_count = 0;
dst->unused = 0;
+ compress->wc = i915_gem_object_is_lmem(vma->obj) ||
+ drm_mm_node_allocated(&ggtt->error_capture);
+
ret = -EINVAL;
- for_each_sgt_dma(dma, iter, vma->pages) {
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
void __iomem *s;
+ dma_addr_t dma;
- ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+ for_each_sgt_daddr(dma, iter, vma->pages) {
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
- s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
- ret = compress_page(compress, (void __force *)s, dst);
- io_mapping_unmap(s);
- if (ret)
- break;
+ s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
+ ret = compress_page(compress, (void __force *)s, dst);
+ io_mapping_unmap(s);
+ if (ret)
+ break;
+ }
+ } else if (i915_gem_object_is_lmem(vma->obj)) {
+ struct intel_memory_region *mem = vma->obj->mm.region;
+ dma_addr_t dma;
+
+ for_each_sgt_daddr(dma, iter, vma->pages) {
+ void __iomem *s;
+
+ s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
+ ret = compress_page(compress, (void __force *)s, dst);
+ io_mapping_unmap(s);
+ if (ret)
+ break;
+ }
+ } else {
+ struct page *page;
+
+ for_each_sgt_page(page, iter, vma->pages) {
+ void *s;
+
+ drm_clflush_pages(&page, 1);
+
+ s = kmap(page);
+ ret = compress_page(compress, s, dst);
+ kunmap(s);
+
+ drm_clflush_pages(&page, 1);
+
+ if (ret)
+ break;
+ }
}
if (ret || compress_flush(compress, dst)) {
@@ -1136,8 +1189,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
}
ee->idle = intel_engine_is_idle(engine);
- if (!ee->idle)
- ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
engine);
@@ -1263,7 +1314,6 @@ static bool record_context(struct drm_i915_error_context *e,
rcu_read_unlock();
}
- e->hw_id = ctx->hw_id;
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
@@ -1291,7 +1341,7 @@ capture_vma(struct capture_vma *next,
if (!c)
return next;
- if (!i915_active_trygrab(&vma->active)) {
+ if (!i915_active_acquire_if_busy(&vma->active)) {
kfree(c);
return next;
}
@@ -1431,7 +1481,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
*this->slot =
i915_error_object_create(i915, vma, compress);
- i915_active_ungrab(&vma->active);
+ i915_active_release(&vma->active);
i915_vma_put(vma);
capture = this->next;
@@ -1553,6 +1603,21 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
}
+ if (IS_GEN_RANGE(i915, 8, 11))
+ error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
+
+ if (IS_GEN(i915, 12))
+ error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
+
+ if (INTEL_GEN(i915) >= 12) {
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ error->sfc_done[i] =
+ intel_uncore_read(uncore, GEN12_SFC_DONE(i));
+ }
+
+ error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
+ }
+
/* 4: Everything else */
if (INTEL_GEN(i915) >= 11) {
error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
@@ -1647,26 +1712,15 @@ static void capture_params(struct i915_gpu_state *error)
i915_params_copy(&error->params, &i915_modparams);
}
-static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
-{
- const struct drm_i915_error_engine *ee;
- unsigned long epoch = error->capture;
-
- for (ee = error->engine; ee; ee = ee->next) {
- if (ee->hangcheck_timestamp &&
- time_before(ee->hangcheck_timestamp, epoch))
- epoch = ee->hangcheck_timestamp;
- }
-
- return epoch;
-}
-
static void capture_finish(struct i915_gpu_state *error)
{
struct i915_ggtt *ggtt = &error->i915->ggtt;
- const u64 slot = ggtt->error_capture.start;
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
+ const u64 slot = ggtt->error_capture.start;
+
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ }
}
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
@@ -1712,8 +1766,6 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
error->overlay = intel_overlay_capture_error_state(i915);
error->display = intel_display_capture_error_state(i915);
- error->epoch = capture_find_epoch(error);
-
capture_finish(error);
compress_fini(&compress);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index df9f57766626..5d2c3372ff99 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -34,7 +34,6 @@ struct i915_gpu_state {
ktime_t boottime;
ktime_t uptime;
unsigned long capture;
- unsigned long epoch;
struct drm_i915_private *i915;
@@ -74,6 +73,10 @@ struct i915_gpu_state {
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
+ u32 gtt_cache;
+ u32 aux_err; /* gen12 */
+ u32 sfc_done[GEN12_SFC_DONE_MAX]; /* gen12 */
+ u32 gam_done; /* gen12 */
u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
@@ -85,7 +88,6 @@ struct i915_gpu_state {
/* Software tracked state */
bool idle;
- unsigned long hangcheck_timestamp;
int num_requests;
u32 reset_count;
@@ -118,7 +120,6 @@ struct i915_gpu_state {
struct drm_i915_error_context {
char comm[TASK_COMM_LEN];
pid_t pid;
- u32 hw_id;
int active;
int guilty;
struct i915_sched_attr sched_attr;
@@ -127,6 +128,7 @@ struct i915_gpu_state {
struct drm_i915_error_object {
u64 gtt_offset;
u64 gtt_size;
+ u32 gtt_page_sizes;
int num_pages;
int page_count;
int unused;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 37e3dd3c1a9d..dae00f7dd7df 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -29,7 +29,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/circ_buf.h>
-#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
@@ -46,6 +45,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
+#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -149,30 +149,24 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = {
};
static const u32 hpd_icp[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
- [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
- [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
- [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
-};
-
-static const u32 hpd_mcc[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
+ [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+ [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+ [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
};
static const u32 hpd_tgp[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP,
- [HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP,
- [HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP,
- [HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP,
- [HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP,
- [HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP,
- [HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP,
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
+ [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
+ [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+ [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+ [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
+ [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
+ [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
@@ -327,180 +321,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
}
-static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
-{
- WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
-
- return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
-}
-
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_gt *gt = &dev_priv->gt;
-
- spin_lock_irq(&gt->irq_lock);
-
- while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM))
- ;
-
- dev_priv->gt_pm.rps.pm_iir = 0;
-
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_gt *gt = &dev_priv->gt;
-
- spin_lock_irq(&gt->irq_lock);
- gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS);
- dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_gt *gt = &dev_priv->gt;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&gt->irq_lock);
- WARN_ON_ONCE(rps->pm_iir);
-
- if (INTEL_GEN(dev_priv) >= 11)
- WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM));
- else
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
-
- rps->interrupts_enabled = true;
- gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events);
-
- spin_unlock_irq(&gt->irq_lock);
-}
-
-u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask)
-{
- return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
-}
-
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- struct intel_gt *gt = &dev_priv->gt;
-
- if (!READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&gt->irq_lock);
- rps->interrupts_enabled = false;
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
-
- gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
-
- spin_unlock_irq(&gt->irq_lock);
- intel_synchronize_irq(dev_priv);
-
- /* Now that we will not be generating any more work, flush any
- * outstanding tasks. As we are called on the RPS idle path,
- * we will reset the GPU to minimum frequencies, so the current
- * state of the worker can be discarded.
- */
- cancel_work_sync(&rps->work);
- if (INTEL_GEN(dev_priv) >= 11)
- gen11_reset_rps_interrupts(dev_priv);
- else
- gen6_reset_rps_interrupts(dev_priv);
-}
-
-void gen9_reset_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- assert_rpm_wakelock_held(&gt->i915->runtime_pm);
-
- spin_lock_irq(&gt->irq_lock);
- gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen9_enable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- assert_rpm_wakelock_held(&gt->i915->runtime_pm);
-
- spin_lock_irq(&gt->irq_lock);
- if (!guc->interrupts.enabled) {
- WARN_ON_ONCE(intel_uncore_read(gt->uncore,
- gen6_pm_iir(gt->i915)) &
- gt->pm_guc_events);
- guc->interrupts.enabled = true;
- gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
- }
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen9_disable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- assert_rpm_wakelock_held(&gt->i915->runtime_pm);
-
- spin_lock_irq(&gt->irq_lock);
- guc->interrupts.enabled = false;
-
- gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
-
- spin_unlock_irq(&gt->irq_lock);
- intel_synchronize_irq(gt->i915);
-
- gen9_reset_guc_interrupts(guc);
-}
-
-void gen11_reset_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- spin_lock_irq(&gt->irq_lock);
- gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen11_enable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- spin_lock_irq(&gt->irq_lock);
- if (!guc->interrupts.enabled) {
- u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
-
- WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
- guc->interrupts.enabled = true;
- }
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen11_disable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- spin_lock_irq(&gt->irq_lock);
- guc->interrupts.enabled = false;
-
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
-
- spin_unlock_irq(&gt->irq_lock);
- intel_synchronize_irq(gt->i915);
-
- gen11_reset_guc_interrupts(guc);
-}
-
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -942,14 +762,14 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
+ struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
+ enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
@@ -992,7 +812,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
- position = __intel_get_crtc_scanline(intel_crtc);
+ position = __intel_get_crtc_scanline(crtc);
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
@@ -1072,199 +892,6 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
return position;
}
-static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 busy_up, busy_down, max_avg, min_avg;
- u8 new_delay;
-
- spin_lock(&mchdev_lock);
-
- intel_uncore_write16(uncore,
- MEMINTRSTS,
- intel_uncore_read(uncore, MEMINTRSTS));
-
- new_delay = dev_priv->ips.cur_delay;
-
- intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
- busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
- busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
- max_avg = intel_uncore_read(uncore, RCBMAXAVG);
- min_avg = intel_uncore_read(uncore, RCBMINAVG);
-
- /* Handle RCS change request from hw */
- if (busy_up > max_avg) {
- if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
- new_delay = dev_priv->ips.cur_delay - 1;
- if (new_delay < dev_priv->ips.max_delay)
- new_delay = dev_priv->ips.max_delay;
- } else if (busy_down < min_avg) {
- if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
- new_delay = dev_priv->ips.cur_delay + 1;
- if (new_delay > dev_priv->ips.min_delay)
- new_delay = dev_priv->ips.min_delay;
- }
-
- if (ironlake_set_drps(dev_priv, new_delay))
- dev_priv->ips.cur_delay = new_delay;
-
- spin_unlock(&mchdev_lock);
-
- return;
-}
-
-static void vlv_c0_read(struct drm_i915_private *dev_priv,
- struct intel_rps_ei *ei)
-{
- ei->ktime = ktime_get_raw();
- ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
- ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
-}
-
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
-{
- memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
-}
-
-static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- const struct intel_rps_ei *prev = &rps->ei;
- struct intel_rps_ei now;
- u32 events = 0;
-
- if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
- return 0;
-
- vlv_c0_read(dev_priv, &now);
-
- if (prev->ktime) {
- u64 time, c0;
- u32 render, media;
-
- time = ktime_us_delta(now.ktime, prev->ktime);
-
- time *= dev_priv->czclk_freq;
-
- /* Workload can be split between render + media,
- * e.g. SwapBuffers being blitted in X after being rendered in
- * mesa. To account for this we need to combine both engines
- * into our activity counter.
- */
- render = now.render_c0 - prev->render_c0;
- media = now.media_c0 - prev->media_c0;
- c0 = max(render, media);
- c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
-
- if (c0 > time * rps->power.up_threshold)
- events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * rps->power.down_threshold)
- events = GEN6_PM_RP_DOWN_THRESHOLD;
- }
-
- rps->ei = now;
- return events;
-}
-
-static void gen6_pm_rps_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, gt_pm.rps.work);
- struct intel_gt *gt = &dev_priv->gt;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- bool client_boost = false;
- int new_delay, adj, min, max;
- u32 pm_iir = 0;
-
- spin_lock_irq(&gt->irq_lock);
- if (rps->interrupts_enabled) {
- pm_iir = fetch_and_zero(&rps->pm_iir);
- client_boost = atomic_read(&rps->num_waiters);
- }
- spin_unlock_irq(&gt->irq_lock);
-
- /* Make sure we didn't queue anything we're not going to process. */
- WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
- if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
- goto out;
-
- mutex_lock(&rps->lock);
-
- pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
-
- adj = rps->last_adj;
- new_delay = rps->cur_freq;
- min = rps->min_freq_softlimit;
- max = rps->max_freq_softlimit;
- if (client_boost)
- max = rps->max_freq;
- if (client_boost && new_delay < rps->boost_freq) {
- new_delay = rps->boost_freq;
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (adj > 0)
- adj *= 2;
- else /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
-
- if (new_delay >= rps->max_freq_softlimit)
- adj = 0;
- } else if (client_boost) {
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
- if (rps->cur_freq > rps->efficient_freq)
- new_delay = rps->efficient_freq;
- else if (rps->cur_freq > rps->min_freq_softlimit)
- new_delay = rps->min_freq_softlimit;
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
- if (adj < 0)
- adj *= 2;
- else /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
-
- if (new_delay <= rps->min_freq_softlimit)
- adj = 0;
- } else { /* unknown event */
- adj = 0;
- }
-
- rps->last_adj = adj;
-
- /*
- * Limit deboosting and boosting to keep ourselves at the extremes
- * when in the respective power modes (i.e. slowly decrease frequencies
- * while in the HIGH_POWER zone and slowly increase frequencies while
- * in the LOW_POWER zone). On idle, we will hit the timeout and drop
- * to the next level quickly, and conversely if busy we expect to
- * hit a waitboost and rapidly switch into max power.
- */
- if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
- (adj > 0 && rps->power.mode == LOW_POWER))
- rps->last_adj = 0;
-
- /* sysfs frequency interfaces may have snuck in while servicing the
- * interrupt
- */
- new_delay += adj;
- new_delay = clamp_t(int, new_delay, min, max);
-
- if (intel_set_rps(dev_priv, new_delay)) {
- DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
- rps->last_adj = 0;
- }
-
- mutex_unlock(&rps->lock);
-
-out:
- /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- spin_lock_irq(&gt->irq_lock);
- if (rps->interrupts_enabled)
- gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events);
- spin_unlock_irq(&gt->irq_lock);
-}
-
-
/**
* ivybridge_parity_work - Workqueue called when a parity error interrupt
* occurred.
@@ -1401,11 +1028,11 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_A:
- return val & ICP_DDIA_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
case HPD_PORT_B:
- return val & ICP_DDIB_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
case HPD_PORT_C:
- return val & TGP_DDIC_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
default:
return false;
}
@@ -1427,20 +1054,6 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
}
}
-static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
-{
- switch (pin) {
- case HPD_PORT_A:
- return val & ICP_DDIA_HPD_LONG_DETECT;
- case HPD_PORT_B:
- return val & ICP_DDIB_HPD_LONG_DETECT;
- case HPD_PORT_C:
- return val & TGP_DDIC_HPD_LONG_DETECT;
- default:
- return false;
- }
-}
-
static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1652,54 +1265,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
res1, res2);
}
-/* The RPS events need forcewake, so we add them to a work queue and mask their
- * IMR bits until the work is done. Other interrupts can be processed without
- * the work queue. */
-void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_rps *rps = &i915->gt_pm.rps;
- const u32 events = i915->pm_rps_events & pm_iir;
-
- lockdep_assert_held(&gt->irq_lock);
-
- if (unlikely(!events))
- return;
-
- gen6_gt_pm_mask_irq(gt, events);
-
- if (!rps->interrupts_enabled)
- return;
-
- rps->pm_iir |= events;
- schedule_work(&rps->work);
-}
-
-void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- struct intel_gt *gt = &dev_priv->gt;
-
- if (pm_iir & dev_priv->pm_rps_events) {
- spin_lock(&gt->irq_lock);
- gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events);
- if (rps->interrupts_enabled) {
- rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
- schedule_work(&rps->work);
- }
- spin_unlock(&gt->irq_lock);
- }
-
- if (INTEL_GEN(dev_priv) >= 8)
- return;
-
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
-
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
-}
-
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -1716,7 +1281,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- int pipe;
+ enum pipe pipe;
spin_lock(&dev_priv->irq_lock);
@@ -1741,6 +1306,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
status_mask = PIPE_FIFO_UNDERRUN_STATUS;
switch (pipe) {
+ default:
case PIPE_A:
iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
@@ -2009,7 +1575,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (gt_iir)
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2136,7 +1702,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- int pipe;
+ enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
@@ -2222,7 +1788,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- int pipe;
+ enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
@@ -2256,19 +1822,35 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
-static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
- const u32 *pins)
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- u32 ddi_hotplug_trigger;
- u32 tc_hotplug_trigger;
+ u32 ddi_hotplug_trigger, tc_hotplug_trigger;
u32 pin_mask = 0, long_mask = 0;
+ bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
+ const u32 *pins;
- if (HAS_PCH_MCC(dev_priv)) {
+ if (HAS_PCH_TGP(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
+ tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
+ pins = hpd_tgp;
+ } else if (HAS_PCH_JSP(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
tc_hotplug_trigger = 0;
+ pins = hpd_tgp;
+ } else if (HAS_PCH_MCC(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+ tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
+ pins = hpd_icp;
} else {
+ WARN(!HAS_PCH_ICP(dev_priv),
+ "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
+
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
+ pins = hpd_icp;
}
if (ddi_hotplug_trigger) {
@@ -2292,44 +1874,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger,
dig_hotplug_reg, pins,
- icp_tc_port_hotplug_long_detect);
- }
-
- if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
-
- if (pch_iir & SDE_GMBUS_ICP)
- gmbus_irq_handler(dev_priv);
-}
-
-static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
-{
- u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
- u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
- u32 pin_mask = 0, long_mask = 0;
-
- if (ddi_hotplug_trigger) {
- u32 dig_hotplug_reg;
-
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
- I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
-
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- ddi_hotplug_trigger,
- dig_hotplug_reg, hpd_tgp,
- tgp_ddi_port_hotplug_long_detect);
- }
-
- if (tc_hotplug_trigger) {
- u32 dig_hotplug_reg;
-
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
- I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
-
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- tc_hotplug_trigger,
- dig_hotplug_reg, hpd_tgp,
- tgp_tc_port_hotplug_long_detect);
+ tc_port_hotplug_long_detect);
}
if (pin_mask)
@@ -2434,7 +1979,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
}
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
- ironlake_rps_change_irq_handler(dev_priv);
+ gen5_rps_irq_handler(&dev_priv->gt.rps);
}
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
@@ -2539,7 +2084,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (pm_iir) {
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
}
}
@@ -2616,10 +2161,16 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
u32 mask;
if (INTEL_GEN(dev_priv) >= 12)
- /* TODO: Add AUX entries for USBC */
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
- TGL_DE_PORT_AUX_DDIC;
+ TGL_DE_PORT_AUX_DDIC |
+ TGL_DE_PORT_AUX_USBC1 |
+ TGL_DE_PORT_AUX_USBC2 |
+ TGL_DE_PORT_AUX_USBC3 |
+ TGL_DE_PORT_AUX_USBC4 |
+ TGL_DE_PORT_AUX_USBC5 |
+ TGL_DE_PORT_AUX_USBC6;
+
mask = GEN8_AUX_CHANNEL_A;
if (INTEL_GEN(dev_priv) >= 9)
@@ -2638,7 +2189,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(dev_priv) >= 11)
+ return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
+ else if (INTEL_GEN(dev_priv) >= 9)
return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2655,11 +2208,21 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (iir & GEN8_DE_EDP_PSR) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
+ u32 psr_iir;
+ i915_reg_t iir_reg;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
+ else
+ iir_reg = EDP_PSR_IIR;
+
+ psr_iir = I915_READ(iir_reg);
+ I915_WRITE(iir_reg, psr_iir);
+
+ if (psr_iir)
+ found = true;
intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
- found = true;
}
if (!found)
@@ -2780,12 +2343,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
- tgp_irq_handler(dev_priv, iir);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
- icp_irq_handler(dev_priv, iir, hpd_mcc);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir, hpd_icp);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_handler(dev_priv, iir);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
@@ -2894,9 +2453,11 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}
-static irqreturn_t gen11_irq_handler(int irq, void *arg)
+static __always_inline irqreturn_t
+__gen11_irq_handler(struct drm_i915_private * const i915,
+ u32 (*intr_disable)(void __iomem * const regs),
+ void (*intr_enable)(void __iomem * const regs))
{
- struct drm_i915_private * const i915 = arg;
void __iomem * const regs = i915->uncore.regs;
struct intel_gt *gt = &i915->gt;
u32 master_ctl;
@@ -2905,9 +2466,9 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
- master_ctl = gen11_master_intr_disable(regs);
+ master_ctl = intr_disable(regs);
if (!master_ctl) {
- gen11_master_intr_enable(regs);
+ intr_enable(regs);
return IRQ_NONE;
}
@@ -2929,13 +2490,20 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
- gen11_master_intr_enable(regs);
+ intr_enable(regs);
gen11_gu_misc_irq_handler(gt, gu_misc_iir);
return IRQ_HANDLED;
}
+static irqreturn_t gen11_irq_handler(int irq, void *arg)
+{
+ return __gen11_irq_handler(arg,
+ gen11_master_intr_disable,
+ gen11_master_intr_enable);
+}
+
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -2952,12 +2520,18 @@ int i8xx_enable_vblank(struct drm_crtc *crtc)
return 0;
}
-int i945gm_enable_vblank(struct drm_crtc *crtc)
+int i915gm_enable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- if (dev_priv->i945gm_vblank.enabled++ == 0)
- schedule_work(&dev_priv->i945gm_vblank.work);
+ /*
+ * Vblank interrupts fail to wake the device up from C2+.
+ * Disabling render clock gating during C-states avoids
+ * the problem. There is a small power cost so we do this
+ * only when vblank interrupts are actually enabled.
+ */
+ if (dev_priv->vblank_enabled++ == 0)
+ I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -3030,14 +2604,14 @@ void i8xx_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void i945gm_disable_vblank(struct drm_crtc *crtc)
+void i915gm_disable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
i8xx_disable_vblank(crtc);
- if (--dev_priv->i945gm_vblank.enabled == 0)
- schedule_work(&dev_priv->i945gm_vblank.work);
+ if (--dev_priv->vblank_enabled == 0)
+ I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -3076,60 +2650,6 @@ void bdw_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void i945gm_vblank_work_func(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, i945gm_vblank.work);
-
- /*
- * Vblank interrupts fail to wake up the device from C3,
- * hence we want to prevent C3 usage while vblank interrupts
- * are enabled.
- */
- pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
- READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
- dev_priv->i945gm_vblank.c3_disable_latency :
- PM_QOS_DEFAULT_VALUE);
-}
-
-static int cstate_disable_latency(const char *name)
-{
- const struct cpuidle_driver *drv;
- int i;
-
- drv = cpuidle_get_driver();
- if (!drv)
- return 0;
-
- for (i = 0; i < drv->state_count; i++) {
- const struct cpuidle_state *state = &drv->states[i];
-
- if (!strcmp(state->name, name))
- return state->exit_latency ?
- state->exit_latency - 1 : 0;
- }
-
- return 0;
-}
-
-static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
-{
- INIT_WORK(&dev_priv->i945gm_vblank.work,
- i945gm_vblank_work_func);
-
- dev_priv->i945gm_vblank.c3_disable_latency =
- cstate_disable_latency("C3");
- pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-}
-
-static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
-{
- cancel_work_sync(&dev_priv->i945gm_vblank.work);
- pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
-}
-
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3246,7 +2766,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- int pipe;
+ enum pipe pipe;
gen8_master_intr_disable(dev_priv->uncore.regs);
@@ -3271,7 +2791,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- int pipe;
+ enum pipe pipe;
gen11_master_intr_disable(dev_priv->uncore.regs);
@@ -3279,8 +2799,23 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder trans;
+
+ for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ enum intel_display_power_domain domain;
+
+ domain = POWER_DOMAIN_TRANSCODER(trans);
+ if (!intel_display_power_is_enabled(dev_priv, domain))
+ continue;
+
+ intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
+ intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
+ }
+ } else {
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ }
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
@@ -3431,42 +2966,44 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
}
}
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
+ u32 sde_ddi_mask, u32 sde_tc_mask,
+ u32 ddi_enable_mask, u32 tc_enable_mask,
+ const u32 *pins)
{
u32 hotplug_irqs, enabled_irqs;
- hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+ hotplug_irqs = sde_ddi_mask | sde_tc_mask;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
- icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
- ICP_TC_HPD_ENABLE_MASK);
+ icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
}
+/*
+ * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
+ * equivalent of SDE.
+ */
static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, enabled_irqs;
-
- hotplug_irqs = SDE_DDI_MASK_TGP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc);
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
-
- icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+ icp_hpd_irq_setup(dev_priv,
+ SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
+ hpd_icp);
}
-static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+/*
+ * JSP behaves exactly the same as MCC above except that port C is mapped to
+ * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's
+ * masks & tables rather than ICP's masks & tables.
+ */
+static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, enabled_irqs;
-
- hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp);
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
-
- icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
- TGP_TC_HPD_ENABLE_MASK);
+ icp_hpd_irq_setup(dev_priv,
+ SDE_DDI_MASK_TGP, 0,
+ TGP_DDI_HPD_ENABLE_MASK, 0,
+ hpd_tgp);
}
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3506,9 +3043,13 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
- tgp_hpd_irq_setup(dev_priv);
+ icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
+ TGP_DDI_HPD_ENABLE_MASK,
+ TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv);
+ icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
+ ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE_MASK, hpd_icp);
}
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3684,7 +3225,6 @@ static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
if (IS_HASWELL(dev_priv)) {
gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -3794,8 +3334,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder trans;
+
+ for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ enum intel_display_power_domain domain;
+
+ domain = POWER_DOMAIN_TRANSCODER(trans);
+ if (!intel_display_power_is_enabled(dev_priv, domain))
+ continue;
+
+ gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
+ }
+ } else {
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ }
for_each_pipe(dev_priv, pipe) {
dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
@@ -3853,8 +3406,11 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_PCH_TGP(dev_priv))
icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
TGP_TC_HPD_ENABLE_MASK);
- else if (HAS_PCH_MCC(dev_priv))
+ else if (HAS_PCH_JSP(dev_priv))
icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+ else if (HAS_PCH_MCC(dev_priv))
+ icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE(PORT_TC1));
else
icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
ICP_TC_HPD_ENABLE_MASK);
@@ -4317,16 +3873,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
- if (IS_I945GM(dev_priv))
- i945gm_vblank_work_init(dev_priv);
-
intel_hpd_init_work(dev_priv);
- INIT_WORK(&rps->work, gen6_pm_rps_work);
-
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
@@ -4335,33 +3885,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
- /* Let's track the enabled rps events */
- if (IS_VALLEYVIEW(dev_priv))
- /* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
- else
- dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
-
- /* We share the register with other engine */
- if (INTEL_GEN(dev_priv) > 9)
- GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
-
- rps->pm_intrmsk_mbz = 0;
-
- /*
- * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- *
- * TODO: verify if this can be reproduced on VLV,CHV.
- */
- if (INTEL_GEN(dev_priv) <= 7)
- rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (INTEL_GEN(dev_priv) >= 8)
- rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
@@ -4387,8 +3910,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
- if (HAS_PCH_MCC(dev_priv))
- /* EHL doesn't need most of gen11_hpd_irq_setup */
+ if (HAS_PCH_JSP(dev_priv))
+ dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
+ else if (HAS_PCH_MCC(dev_priv))
dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
else if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
@@ -4411,9 +3935,6 @@ void intel_irq_fini(struct drm_i915_private *i915)
{
int i;
- if (IS_I945GM(i915))
- i945gm_vblank_work_fini(i915);
-
for (i = 0; i < MAX_L3_SLICES; ++i)
kfree(i915->l3_parity.remap_info[i]);
}
@@ -4538,10 +4059,10 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
int irq = dev_priv->drm.pdev->irq;
/*
- * FIXME we can get called twice during driver load
- * error handling due to intel_modeset_cleanup()
- * calling us out of sequence. Would be nice if
- * it didn't do that...
+ * FIXME we can get called twice during driver probe
+ * error handling as well as during driver remove due to
+ * intel_modeset_driver_remove() calling us out of sequence.
+ * Would be nice if it didn't do that...
*/
if (!dev_priv->drm.irq_enabled)
return;
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 8e7e6071777e..812c47a9c2d6 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -17,14 +17,8 @@ struct drm_device;
struct drm_display_mode;
struct drm_i915_private;
struct intel_crtc;
-struct intel_crtc;
-struct intel_gt;
-struct intel_guc;
struct intel_uncore;
-void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir);
-void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-
void intel_irq_init(struct drm_i915_private *dev_priv);
void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
@@ -106,12 +100,6 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct intel_guc *guc);
-void gen9_enable_guc_interrupts(struct intel_guc *guc);
-void gen9_disable_guc_interrupts(struct intel_guc *guc);
-void gen11_reset_guc_interrupts(struct intel_guc *guc);
-void gen11_enable_guc_interrupts(struct intel_guc *guc);
-void gen11_disable_guc_interrupts(struct intel_guc *guc);
bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
@@ -122,12 +110,12 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
int i8xx_enable_vblank(struct drm_crtc *crtc);
-int i945gm_enable_vblank(struct drm_crtc *crtc);
+int i915gm_enable_vblank(struct drm_crtc *crtc);
int i965_enable_vblank(struct drm_crtc *crtc);
int ilk_enable_vblank(struct drm_crtc *crtc);
int bdw_enable_vblank(struct drm_crtc *crtc);
void i8xx_disable_vblank(struct drm_crtc *crtc);
-void i945gm_disable_vblank(struct drm_crtc *crtc);
+void i915gm_disable_vblank(struct drm_crtc *crtc);
void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 296452f9efe4..1dd1f3652795 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -46,7 +46,8 @@ i915_param_named(modeset, int, 0400,
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
- "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
+ "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
+ "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
i915_param_named_unsafe(enable_fbc, int, 0600,
"Enable frame buffer compression for power savings "
@@ -165,7 +166,7 @@ i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-i915_param_named_unsafe(inject_load_failure, uint, 0400,
+i915_param_named_unsafe(inject_probe_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
@@ -178,6 +179,11 @@ i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
#endif
+#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)
+i915_param_named_unsafe(fake_lmem_start, ulong, 0600,
+ "Fake LMEM start offset (default: 0)");
+#endif
+
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
const char *type,
@@ -189,6 +195,8 @@ static __always_inline void _print_param(struct drm_printer *p,
drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
else if (!__builtin_strcmp(type, "unsigned int"))
drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "unsigned long"))
+ drm_printf(p, "i915.%s=%lu\n", name, *(const unsigned long *)x);
else if (!__builtin_strcmp(type, "char *"))
drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
else
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index d29ade3b7de6..31b88f297fbc 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -61,11 +61,12 @@ struct drm_printer;
param(char *, dmc_firmware_path, NULL) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
param(int, edp_vswing, 0) \
- param(int, reset, 2) \
- param(unsigned int, inject_load_failure, 0) \
+ param(int, reset, 3) \
+ param(unsigned int, inject_probe_failure, 0) \
param(int, fastboot, -1) \
param(int, enable_dpcd_backlight, 0) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
+ param(unsigned long, fake_lmem_start, 0) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
param(bool, enable_hangcheck, true) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1974e4c78a43..1bb701d32a5d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -23,7 +23,6 @@
*/
#include <linux/console.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_drv.h>
@@ -118,6 +117,14 @@
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
}
+#define TGL_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = IVB_CURSOR_B_OFFSET, \
+ [PIPE_C] = IVB_CURSOR_C_OFFSET, \
+ [PIPE_D] = TGL_CURSOR_D_OFFSET, \
+ }
+
#define I9XX_COLORS \
.color = { .gamma_lut_size = 256 }
#define I965_COLORS \
@@ -144,10 +151,13 @@
#define GEN_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K
+#define GEN_DEFAULT_REGIONS \
+ .memory_regions = REGION_SMEM | REGION_STOLEN
+
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -161,11 +171,12 @@
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define I845_FEATURES \
GEN(2), \
- .num_pipes = 1, \
+ .pipe_mask = BIT(PIPE_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -178,7 +189,8 @@
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_i830_info = {
I830_FEATURES,
@@ -203,7 +215,7 @@ static const struct intel_device_info intel_i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.engine_mask = BIT(RCS0), \
@@ -212,7 +224,8 @@ static const struct intel_device_info intel_i865g_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
@@ -287,7 +300,7 @@ static const struct intel_device_info intel_pineview_m_info = {
#define GEN4_FEATURES \
GEN(4), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
@@ -297,7 +310,8 @@ static const struct intel_device_info intel_pineview_m_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I965_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
@@ -337,7 +351,7 @@ static const struct intel_device_info intel_gm45_info = {
#define GEN5_FEATURES \
GEN(5), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
@@ -347,7 +361,8 @@ static const struct intel_device_info intel_gm45_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
@@ -363,7 +378,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
#define GEN6_FEATURES \
GEN(6), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -377,7 +392,8 @@ static const struct intel_device_info intel_ironlake_m_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
@@ -411,7 +427,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
- .num_pipes = 3, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -420,12 +436,13 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
@@ -462,7 +479,7 @@ static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
- .num_pipes = 0, /* legal, last one wins */
+ .pipe_mask = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
@@ -470,13 +487,13 @@ static const struct intel_device_info intel_valleyview_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
- .num_pipes = 2,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -486,6 +503,7 @@ static const struct intel_device_info intel_valleyview_info = {
I9XX_CURSOR_OFFSETS,
I965_COLORS,
GEN_DEFAULT_PAGE_SIZES,
+ GEN_DEFAULT_REGIONS,
};
#define G75_FEATURES \
@@ -560,7 +578,7 @@ static const struct intel_device_info intel_broadwell_gt3_info = {
static const struct intel_device_info intel_cherryview_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .num_pipes = 3,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.display.has_hotplug = 1,
.is_lp = 1,
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
@@ -570,7 +588,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_rps = true,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
@@ -580,6 +598,7 @@ static const struct intel_device_info intel_cherryview_info = {
CHV_CURSOR_OFFSETS,
CHV_COLORS,
GEN_DEFAULT_PAGE_SIZES,
+ GEN_DEFAULT_REGIONS,
};
#define GEN9_DEFAULT_PAGE_SIZES \
@@ -593,6 +612,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
.has_gt_uc = 1, \
+ .display.has_hdcp = 1, \
.display.has_ipc = 1, \
.ddb_size = 896
@@ -631,11 +651,12 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.is_lp = 1, \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .num_pipes = 3, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_fbc = 1, \
+ .display.has_hdcp = 1, \
.display.has_psr = 1, \
.has_runtime_pm = 1, \
.display.has_csr = 1, \
@@ -654,7 +675,8 @@ static const struct intel_device_info intel_skylake_gt4_info = {
HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
- GEN9_DEFAULT_PAGE_SIZES
+ GEN9_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
@@ -715,6 +737,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
GEN9_FEATURES, \
GEN(10), \
.ddb_size = 1024, \
+ .display.has_dsc = 1, \
.has_coherent_ggtt = false, \
GLK_COLORS
@@ -787,18 +810,25 @@ static const struct intel_device_info intel_elkhartlake_info = {
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
- .has_global_mocs = 1
+ TGL_CURSOR_OFFSETS, \
+ .has_global_mocs = 1, \
+ .display.has_dsb = 1
static const struct intel_device_info intel_tigerlake_12_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
- .num_pipes = 4,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ .has_rps = false, /* XXX disabled for debugging */
};
+#define GEN12_DGFX_FEATURES \
+ GEN12_FEATURES, \
+ .is_dgfx = 1
+
#undef GEN
#undef PLATFORM
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index e42b86827d6b..65d7c2e599de 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -196,8 +196,11 @@
#include <linux/uuid.h>
#include "gem/i915_gem_context.h"
-#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_perf.h"
@@ -215,6 +218,7 @@
#include "oa/i915_oa_cflgt3.h"
#include "oa/i915_oa_cnl.h"
#include "oa/i915_oa_icl.h"
+#include "oa/i915_oa_tgl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -291,6 +295,7 @@ static u32 i915_perf_stream_paranoid = true;
/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
#define OAREPORT_REASON_MASK 0x3f
+#define OAREPORT_REASON_MASK_EXTENDED 0x7f
#define OAREPORT_REASON_SHIFT 19
#define OAREPORT_REASON_TIMER (1<<0)
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
@@ -336,17 +341,24 @@ static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
};
+static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
+ [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
+};
+
#define SAMPLE_OA_REPORT (1<<0)
/**
* struct perf_open_properties - for validated properties given to open a stream
* @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
* @single_context: Whether a single or all gpu contexts should be monitored
+ * @hold_preemption: Whether the preemption is disabled for the filtered
+ * context
* @ctx_handle: A gem ctx handle for use with @single_context
* @metrics_set: An ID for an OA unit metric set advertised via sysfs
* @oa_format: An OA unit HW report format
* @oa_periodic: Whether to enable periodic OA unit sampling
* @oa_period_exponent: The OA unit sampling period is derived from this
+ * @engine: The engine (typically rcs0) being monitored by the OA unit
*
* As read_properties_unlocked() enumerates and validates the properties given
* to open a stream of metrics the configuration is built up in the structure
@@ -356,6 +368,7 @@ struct perf_open_properties {
u32 sample_flags;
u64 single_context:1;
+ u64 hold_preemption:1;
u64 ctx_handle;
/* OA sampling state */
@@ -363,69 +376,74 @@ struct perf_open_properties {
int oa_format;
bool oa_periodic;
int oa_period_exponent;
+
+ struct intel_engine_cs *engine;
+};
+
+struct i915_oa_config_bo {
+ struct llist_node node;
+
+ struct i915_oa_config *oa_config;
+ struct i915_vma *vma;
};
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
-static void free_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
+void i915_oa_config_release(struct kref *ref)
{
- if (!PTR_ERR(oa_config->flex_regs))
- kfree(oa_config->flex_regs);
- if (!PTR_ERR(oa_config->b_counter_regs))
- kfree(oa_config->b_counter_regs);
- if (!PTR_ERR(oa_config->mux_regs))
- kfree(oa_config->mux_regs);
- kfree(oa_config);
-}
+ struct i915_oa_config *oa_config =
+ container_of(ref, typeof(*oa_config), ref);
-static void put_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
-{
- if (!atomic_dec_and_test(&oa_config->ref_count))
- return;
+ kfree(oa_config->flex_regs);
+ kfree(oa_config->b_counter_regs);
+ kfree(oa_config->mux_regs);
- free_oa_config(dev_priv, oa_config);
+ kfree_rcu(oa_config, rcu);
}
-static int get_oa_config(struct drm_i915_private *dev_priv,
- int metrics_set,
- struct i915_oa_config **out_config)
+struct i915_oa_config *
+i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
{
- int ret;
+ struct i915_oa_config *oa_config;
- if (metrics_set == 1) {
- *out_config = &dev_priv->perf.test_config;
- atomic_inc(&dev_priv->perf.test_config.ref_count);
- return 0;
- }
+ rcu_read_lock();
+ if (metrics_set == 1)
+ oa_config = &perf->test_config;
+ else
+ oa_config = idr_find(&perf->metrics_idr, metrics_set);
+ if (oa_config)
+ oa_config = i915_oa_config_get(oa_config);
+ rcu_read_unlock();
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
- if (ret)
- return ret;
+ return oa_config;
+}
- *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
- if (!*out_config)
- ret = -EINVAL;
- else
- atomic_inc(&(*out_config)->ref_count);
+static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
+{
+ i915_oa_config_put(oa_bo->oa_config);
+ i915_vma_put(oa_bo->vma);
+ kfree(oa_bo);
+}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
- return ret;
+ return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
+ GEN12_OAG_OATAILPTR_MASK;
}
static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
- return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
+ return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
}
static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
+ struct intel_uncore *uncore = stream->uncore;
+ u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
}
@@ -456,7 +474,6 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
*/
static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
int report_size = stream->oa_buffer.format_size;
unsigned long flags;
unsigned int aged_idx;
@@ -479,7 +496,7 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aged_tail = stream->oa_buffer.tails[aged_idx].offset;
aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
- hw_tail = dev_priv->perf.ops.oa_hw_tail_read(stream);
+ hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
/* The tail pointer increases in 64 byte increments,
* not in report_size steps...
@@ -536,7 +553,7 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aging_tail = hw_tail;
stream->oa_buffer.aging_timestamp = now;
} else {
- DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
+ DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
hw_tail);
}
}
@@ -655,7 +672,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
int report_size = stream->oa_buffer.format_size;
u8 *oa_buf_base = stream->oa_buffer.vaddr;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
@@ -738,9 +755,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* it to userspace...
*/
reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
- OAREPORT_REASON_MASK);
+ (IS_GEN(stream->perf->i915, 12) ?
+ OAREPORT_REASON_MASK_EXTENDED :
+ OAREPORT_REASON_MASK));
if (reason == 0) {
- if (__ratelimit(&dev_priv->perf.spurious_report_rs))
+ if (__ratelimit(&stream->perf->spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -755,7 +774,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* Note: that we don't clear the valid_ctx_bit so userspace can
* understand that the ID has been squashed by the kernel.
*/
- if (!(report32[0] & dev_priv->perf.gen8_valid_ctx_bit))
+ if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
+ INTEL_GEN(stream->perf->i915) <= 11)
ctx_id = report32[2] = INVALID_CTX_ID;
/*
@@ -789,7 +809,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* switches since it's not-uncommon for periodic samples to
* identify a switch before any 'context switch' report.
*/
- if (!dev_priv->perf.exclusive_stream->ctx ||
+ if (!stream->perf->exclusive_stream->ctx ||
stream->specific_ctx_id == ctx_id ||
stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
reason & OAREPORT_REASON_CTX_SWITCH) {
@@ -798,7 +818,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* While filtering for a single context we avoid
* leaking the IDs of other contexts.
*/
- if (dev_priv->perf.exclusive_stream->ctx &&
+ if (stream->perf->exclusive_stream->ctx &&
stream->specific_ctx_id != ctx_id) {
report32[2] = INVALID_CTX_ID;
}
@@ -822,6 +842,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
}
if (start_offset != *offset) {
+ i915_reg_t oaheadptr;
+
+ oaheadptr = IS_GEN(stream->perf->i915, 12) ?
+ GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
+
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/*
@@ -829,8 +854,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* relative to oa_buf_base so put back here...
*/
head += gtt_offset;
-
- I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
+ intel_uncore_write(uncore, oaheadptr,
+ head & GEN12_OAG_OAHEADPTR_MASK);
stream->oa_buffer.head = head;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -864,14 +889,18 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus;
+ i915_reg_t oastatus_reg;
int ret;
if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
+ GEN12_OAG_OASTATUS : GEN8_OASTATUS;
+
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
/*
* We treat OABUFFER_OVERFLOW as a significant error:
@@ -896,14 +925,14 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
stream->period_exponent);
- dev_priv->perf.ops.oa_disable(stream);
- dev_priv->perf.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
* reset GEN8_OASTATUS for us
*/
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
}
if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
@@ -911,8 +940,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- I915_WRITE(GEN8_OASTATUS,
- oastatus & ~GEN8_OASTATUS_REPORT_LOST);
+ intel_uncore_write(uncore, oastatus_reg,
+ oastatus & ~GEN8_OASTATUS_REPORT_LOST);
}
return gen8_append_oa_reports(stream, buf, count, offset);
@@ -943,7 +972,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
int report_size = stream->oa_buffer.format_size;
u8 *oa_buf_base = stream->oa_buffer.vaddr;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
@@ -1017,7 +1046,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* copying it to userspace...
*/
if (report32[0] == 0) {
- if (__ratelimit(&dev_priv->perf.spurious_report_rs))
+ if (__ratelimit(&stream->perf->spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -1043,9 +1072,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
*/
head += gtt_offset;
- I915_WRITE(GEN7_OASTATUS2,
- ((head & GEN7_OASTATUS2_HEAD_MASK) |
- GEN7_OASTATUS2_MEM_SELECT_GGTT));
+ intel_uncore_write(uncore, GEN7_OASTATUS2,
+ (head & GEN7_OASTATUS2_HEAD_MASK) |
+ GEN7_OASTATUS2_MEM_SELECT_GGTT);
stream->oa_buffer.head = head;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -1075,21 +1104,21 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus1;
int ret;
if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
/* XXX: On Haswell we don't have a safe way to clear oastatus1
* bits while the OA unit is enabled (while the tail pointer
* may be updated asynchronously) so we ignore status bits
* that have already been reported to userspace.
*/
- oastatus1 &= ~dev_priv->perf.gen7_latched_oastatus1;
+ oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
/* We treat OABUFFER_OVERFLOW as a significant error:
*
@@ -1120,10 +1149,10 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
stream->period_exponent);
- dev_priv->perf.ops.oa_disable(stream);
- dev_priv->perf.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
}
if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
@@ -1131,7 +1160,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- dev_priv->perf.gen7_latched_oastatus1 |=
+ stream->perf->gen7_latched_oastatus1 |=
GEN7_OASTATUS1_REPORT_LOST;
}
@@ -1196,25 +1225,18 @@ static int i915_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- return dev_priv->perf.ops.read(stream, buf, count, offset);
+ return stream->perf->ops.read(stream, buf, count, offset);
}
static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
{
struct i915_gem_engines_iter it;
- struct drm_i915_private *i915 = stream->dev_priv;
struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
int err;
- err = i915_mutex_lock_interruptible(&i915->drm);
- if (err)
- return ERR_PTR(err);
-
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- if (ce->engine->class != RENDER_CLASS)
+ if (ce->engine != stream->engine) /* first match! */
continue;
/*
@@ -1229,10 +1251,6 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
}
i915_gem_context_unlock_engines(ctx);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err)
- return ERR_PTR(err);
-
return stream->pinned_ctx;
}
@@ -1248,14 +1266,13 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *i915 = stream->dev_priv;
struct intel_context *ce;
ce = oa_pin_context(stream);
if (IS_ERR(ce))
return PTR_ERR(ce);
- switch (INTEL_GEN(i915)) {
+ switch (INTEL_GEN(ce->engine->i915)) {
case 7: {
/*
* On Haswell we don't do any post processing of the reports
@@ -1269,7 +1286,11 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
case 8:
case 9:
case 10:
- if (USES_GUC_SUBMISSION(i915)) {
+ if (intel_engine_in_execlists_submission_mode(ce->engine)) {
+ stream->specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
+ } else {
/*
* When using GuC, the context descriptor we write in
* i915 is read by GuC and rewritten before it's
@@ -1289,31 +1310,23 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
stream->specific_ctx_id_mask =
(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
- } else {
- stream->specific_ctx_id_mask =
- (1U << GEN8_CTX_ID_WIDTH) - 1;
- stream->specific_ctx_id =
- upper_32_bits(ce->lrc_desc);
- stream->specific_ctx_id &=
- stream->specific_ctx_id_mask;
}
break;
- case 11: {
+ case 11:
+ case 12: {
stream->specific_ctx_id_mask =
- ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
- ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
- ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
- stream->specific_ctx_id = upper_32_bits(ce->lrc_desc);
- stream->specific_ctx_id &=
- stream->specific_ctx_id_mask;
+ ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
break;
}
default:
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(INTEL_GEN(ce->engine->i915));
}
+ ce->tag = stream->specific_ctx_id_mask;
+
DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
stream->specific_ctx_id,
stream->specific_ctx_id_mask);
@@ -1330,69 +1343,76 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_context *ce;
- stream->specific_ctx_id = INVALID_CTX_ID;
- stream->specific_ctx_id_mask = 0;
-
ce = fetch_and_zero(&stream->pinned_ctx);
if (ce) {
- mutex_lock(&dev_priv->drm.struct_mutex);
+ ce->tag = 0; /* recomputed on next submission after parking */
intel_context_unpin(ce);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
+
+ stream->specific_ctx_id = INVALID_CTX_ID;
+ stream->specific_ctx_id_mask = 0;
}
static void
free_oa_buffer(struct i915_perf_stream *stream)
{
- struct drm_i915_private *i915 = stream->dev_priv;
-
- mutex_lock(&i915->drm.struct_mutex);
-
i915_vma_unpin_and_release(&stream->oa_buffer.vma,
I915_VMA_RELEASE_MAP);
- mutex_unlock(&i915->drm.struct_mutex);
-
stream->oa_buffer.vaddr = NULL;
}
+static void
+free_oa_configs(struct i915_perf_stream *stream)
+{
+ struct i915_oa_config_bo *oa_bo, *tmp;
+
+ i915_oa_config_put(stream->oa_config);
+ llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
+ free_oa_config_bo(oa_bo);
+}
+
+static void
+free_noa_wait(struct i915_perf_stream *stream)
+{
+ i915_vma_unpin_and_release(&stream->noa_wait, 0);
+}
+
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
- BUG_ON(stream != dev_priv->perf.exclusive_stream);
+ BUG_ON(stream != perf->exclusive_stream);
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
*/
- mutex_lock(&dev_priv->drm.struct_mutex);
- dev_priv->perf.exclusive_stream = NULL;
- dev_priv->perf.ops.disable_metric_set(stream);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ perf->exclusive_stream = NULL;
+ perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
if (stream->ctx)
oa_put_render_ctx_id(stream);
- put_oa_config(dev_priv, stream->oa_config);
+ free_oa_configs(stream);
+ free_noa_wait(stream);
- if (dev_priv->perf.spurious_report_rs.missed) {
+ if (perf->spurious_report_rs.missed) {
DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
- dev_priv->perf.spurious_report_rs.missed);
+ perf->spurious_report_rs.missed);
}
}
static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
@@ -1401,13 +1421,14 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
/* Pre-DevBDW: OABUFFER must be set with counters off,
* before OASTATUS1, but after OASTATUS2
*/
- I915_WRITE(GEN7_OASTATUS2,
- gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
+ intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
+ gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
stream->oa_buffer.head = gtt_offset;
- I915_WRITE(GEN7_OABUFFER, gtt_offset);
+ intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
- I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
+ intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
+ gtt_offset | OABUFFER_SIZE_16M);
/* Mark that we need updated tail pointers to read from... */
stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
@@ -1419,7 +1440,7 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
* already seen since they can't be cleared while periodic
* sampling is enabled.
*/
- dev_priv->perf.gen7_latched_oastatus1 = 0;
+ stream->perf->gen7_latched_oastatus1 = 0;
/* NB: although the OA buffer will initially be allocated
* zeroed via shmfs (and so this memset is redundant when
@@ -1434,25 +1455,22 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
- /* Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
- */
stream->pollin = false;
}
static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- I915_WRITE(GEN8_OASTATUS, 0);
- I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
+ intel_uncore_write(uncore, GEN8_OASTATUS, 0);
+ intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
stream->oa_buffer.head = gtt_offset;
- I915_WRITE(GEN8_OABUFFER_UDW, 0);
+ intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
/*
* PRM says:
@@ -1462,9 +1480,9 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
* to enable proper functionality of the overflow
* bit."
*/
- I915_WRITE(GEN8_OABUFFER, gtt_offset |
+ intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
- I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
+ intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
@@ -1493,35 +1511,82 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ stream->pollin = false;
+}
+
+static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
+
+ intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
+ intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
+ gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
+ stream->oa_buffer.head = gtt_offset;
+
+ /*
+ * PRM says:
+ *
+ * "This MMIO must be set before the OATAILPTR
+ * register and after the OAHEADPTR register. This is
+ * to enable proper functionality of the overflow
+ * bit."
+ */
+ intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
+ OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
+ intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
+ gtt_offset & GEN12_OAG_OATAILPTR_MASK);
+
+ /* Mark that we need updated tail pointers to read from... */
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+
+ /*
+ * Reset state used to recognise context switches, affecting which
+ * reports we will forward to userspace while filtering for a single
+ * context.
+ */
+ stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
+
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
+
/*
- * Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
+ * NB: although the OA buffer will initially be allocated
+ * zeroed via shmfs (and so this memset is redundant when
+ * first allocating), we may re-init the OA buffer, either
+ * when re-enabling a stream or in error/reset paths.
+ *
+ * The reason we clear the buffer for each re-init is for the
+ * sanity check in gen8_append_oa_reports() that looks at the
+ * reason field to make sure it's non-zero which relies on
+ * the assumption that new reports are being written to zeroed
+ * memory...
*/
+ memset(stream->oa_buffer.vaddr, 0,
+ stream->oa_buffer.vma->size);
+
stream->pollin = false;
}
static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
struct drm_i915_gem_object *bo;
- struct drm_i915_private *dev_priv = stream->dev_priv;
struct i915_vma *vma;
int ret;
if (WARN_ON(stream->oa_buffer.vma))
return -ENODEV;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
-
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
- bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE);
+ bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
DRM_ERROR("Failed to allocate OA buffer\n");
- ret = PTR_ERR(bo);
- goto unlock;
+ return PTR_ERR(bo);
}
i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
@@ -1541,11 +1606,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
goto err_unpin;
}
- DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
- i915_ggtt_offset(stream->oa_buffer.vma),
- stream->oa_buffer.vaddr);
-
- goto unlock;
+ return 0;
err_unpin:
__i915_vma_unpin(vma);
@@ -1556,55 +1617,389 @@ err_unref:
stream->oa_buffer.vaddr = NULL;
stream->oa_buffer.vma = NULL;
-unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
-static void config_oa_regs(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg *regs,
- u32 n_regs)
+static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
+ bool save, i915_reg_t reg, u32 offset,
+ u32 dword_count)
+{
+ u32 cmd;
+ u32 d;
+
+ cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
+ if (INTEL_GEN(stream->perf->i915) >= 8)
+ cmd++;
+
+ for (d = 0; d < dword_count; d++) {
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
+ *cs++ = intel_gt_scratch_offset(stream->engine->gt,
+ offset) + 4 * d;
+ *cs++ = 0;
+ }
+
+ return cs;
+}
+
+static int alloc_noa_wait(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct drm_i915_gem_object *bo;
+ struct i915_vma *vma;
+ const u64 delay_ticks = 0xffffffffffffffff -
+ DIV64_U64_ROUND_UP(
+ atomic64_read(&stream->perf->noa_programming_delay) *
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_khz,
+ 1000000ull);
+ const u32 base = stream->engine->mmio_base;
+#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
+ u32 *batch, *ts0, *cs, *jump;
+ int ret, i;
+ enum {
+ START_TS,
+ NOW_TS,
+ DELTA_TS,
+ JUMP_PREDICATE,
+ DELTA_TARGET,
+ N_CS_GPR
+ };
+
+ bo = i915_gem_object_create_internal(i915, 4096);
+ if (IS_ERR(bo)) {
+ DRM_ERROR("Failed to allocate NOA wait batchbuffer\n");
+ return PTR_ERR(bo);
+ }
+
+ /*
+ * We pin in GGTT because we jump into this buffer now because
+ * multiple OA config BOs will have a jump to this address and it
+ * needs to be fixed during the lifetime of the i915/perf stream.
+ */
+ vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
+ if (IS_ERR(batch)) {
+ ret = PTR_ERR(batch);
+ goto err_unpin;
+ }
+
+ /* Save registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, true /* save */, CS_GPR(i),
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+
+ /* First timestamp snapshot location. */
+ ts0 = cs;
+
+ /*
+ * Initial snapshot of the timestamp register to implement the wait.
+ * We work with 32b values, so clear out the top 32b bits of the
+ * register because the ALU works 64bits.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
+
+ /*
+ * This is the location we're going to jump back into until the
+ * required amount of time has passed.
+ */
+ jump = cs;
+
+ /*
+ * Take another snapshot of the timestamp register. Take care to clear
+ * up the top 32bits of CS_GPR(1) as we're using it for other
+ * operations below.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
+
+ /*
+ * Do a diff between the 2 timestamps and store the result back into
+ * CS_GPR(1).
+ */
+ *cs++ = MI_MATH(5);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
+ *cs++ = MI_MATH_SUB;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ /*
+ * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
+ * timestamp have rolled over the 32bits) into the predicate register
+ * to be used for the predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
+
+ /* Restart from the beginning if we had timestamps roll over. */
+ *cs++ = (INTEL_GEN(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
+ *cs++ = 0;
+
+ /*
+ * Now add the diff between to previous timestamps and add it to :
+ * (((1 * << 64) - 1) - delay_ns)
+ *
+ * When the Carry Flag contains 1 this means the elapsed time is
+ * longer than the expected delay, and we can exit the wait loop.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
+ *cs++ = lower_32_bits(delay_ticks);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
+ *cs++ = upper_32_bits(delay_ticks);
+
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ /*
+ * Transfer the result into the predicate register to be used for the
+ * predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
+
+ /* Predicate the jump. */
+ *cs++ = (INTEL_GEN(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
+ *cs++ = 0;
+
+ /* Restore registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, false /* restore */, CS_GPR(i),
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+
+ /* And return to the ring. */
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
+
+ i915_gem_object_flush_map(bo);
+ i915_gem_object_unpin_map(bo);
+
+ stream->noa_wait = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin_and_release(&vma, 0);
+err_unref:
+ i915_gem_object_put(bo);
+ return ret;
+}
+
+static u32 *write_cs_mi_lri(u32 *cs,
+ const struct i915_oa_reg *reg_data,
+ u32 n_regs)
{
u32 i;
for (i = 0; i < n_regs; i++) {
- const struct i915_oa_reg *reg = regs + i;
+ if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
+ u32 n_lri = min_t(u32,
+ n_regs - i,
+ MI_LOAD_REGISTER_IMM_MAX_REGS);
- I915_WRITE(reg->addr, reg->value);
+ *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
+ }
+ *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
+ *cs++ = reg_data[i].value;
}
+
+ return cs;
}
-static void delay_after_mux(void)
+static int num_lri_dwords(int num_regs)
{
+ int count = 0;
+
+ if (num_regs > 0) {
+ count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
+ count += num_regs * 2;
+ }
+
+ return count;
+}
+
+static struct i915_oa_config_bo *
+alloc_oa_config_buffer(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_oa_config_bo *oa_bo;
+ size_t config_length = 0;
+ u32 *cs;
+ int err;
+
+ oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
+ if (!oa_bo)
+ return ERR_PTR(-ENOMEM);
+
+ config_length += num_lri_dwords(oa_config->mux_regs_len);
+ config_length += num_lri_dwords(oa_config->b_counter_regs_len);
+ config_length += num_lri_dwords(oa_config->flex_regs_len);
+ config_length += 3; /* MI_BATCH_BUFFER_START */
+ config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_free;
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_oa_bo;
+ }
+
+ cs = write_cs_mi_lri(cs,
+ oa_config->mux_regs,
+ oa_config->mux_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->flex_regs,
+ oa_config->flex_regs_len);
+
+ /* Jump into the active wait. */
+ *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8);
+ *cs++ = i915_ggtt_offset(stream->noa_wait);
+ *cs++ = 0;
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ oa_bo->vma = i915_vma_instance(obj,
+ &stream->engine->gt->ggtt->vm,
+ NULL);
+ if (IS_ERR(oa_bo->vma)) {
+ err = PTR_ERR(oa_bo->vma);
+ goto err_oa_bo;
+ }
+
+ oa_bo->oa_config = i915_oa_config_get(oa_config);
+ llist_add(&oa_bo->node, &stream->oa_config_bos);
+
+ return oa_bo;
+
+err_oa_bo:
+ i915_gem_object_put(obj);
+err_free:
+ kfree(oa_bo);
+ return ERR_PTR(err);
+}
+
+static struct i915_vma *
+get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
+{
+ struct i915_oa_config_bo *oa_bo;
+
/*
- * It apparently takes a fairly long time for a new MUX
- * configuration to be be applied after these register writes.
- * This delay duration was derived empirically based on the
- * render_basic config but hopefully it covers the maximum
- * configuration latency.
- *
- * As a fallback, the checks in _append_oa_reports() to skip
- * invalid OA reports do also seem to work to discard reports
- * generated before this config has completed - albeit not
- * silently.
- *
- * Unfortunately this is essentially a magic number, since we
- * don't currently know of a reliable mechanism for predicting
- * how long the MUX config will take to apply and besides
- * seeing invalid reports we don't know of a reliable way to
- * explicitly check that the MUX config has landed.
- *
- * It's even possible we've miss characterized the underlying
- * problem - it just seems like the simplest explanation why
- * a delay at this location would mitigate any invalid reports.
+ * Look for the buffer in the already allocated BOs attached
+ * to the stream.
*/
- usleep_range(15000, 20000);
+ llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
+ if (oa_bo->oa_config == oa_config &&
+ memcmp(oa_bo->oa_config->uuid,
+ oa_config->uuid,
+ sizeof(oa_config->uuid)) == 0)
+ goto out;
+ }
+
+ oa_bo = alloc_oa_config_buffer(stream, oa_config);
+ if (IS_ERR(oa_bo))
+ return ERR_CAST(oa_bo);
+
+out:
+ return i915_vma_get(oa_bo->vma);
+}
+
+static int emit_oa_config(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config,
+ struct intel_context *ce)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
+
+ vma = get_oa_vma(stream, oa_config);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto err_vma_put;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma_unpin;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, 0);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+ if (err)
+ goto err_add_request;
+
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start, 0,
+ I915_DISPATCH_SECURE);
+err_add_request:
+ i915_request_add(rq);
+err_vma_unpin:
+ i915_vma_unpin(vma);
+err_vma_put:
+ i915_vma_put(vma);
+ return err;
+}
+
+static struct intel_context *oa_context(struct i915_perf_stream *stream)
+{
+ return stream->pinned_ctx ?: stream->engine->kernel_context;
}
static int hsw_enable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ struct intel_uncore *uncore = stream->uncore;
/*
* PRM:
@@ -1616,31 +2011,24 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
* count the events from non-render domain. Unit level clock
* gating for RCS should also be disabled.
*/
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN7_DOP_CLOCK_GATE_ENABLE));
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
- GEN6_CSUNIT_CLOCK_GATE_DISABLE));
-
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
-
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ GEN7_DOP_CLOCK_GATE_ENABLE, 0);
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
- return 0;
+ return emit_oa_config(stream, stream->oa_config, oa_context(stream));
}
static void hsw_disable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
- ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
- GEN7_DOP_CLOCK_GATE_ENABLE));
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN7_DOP_CLOCK_GATE_ENABLE);
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
@@ -1672,14 +2060,11 @@ static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
* in the case that the OA unit has been disabled.
*/
static void
-gen8_update_reg_state_unlocked(struct i915_perf_stream *stream,
- struct intel_context *ce,
- u32 *reg_state,
- const struct i915_oa_config *oa_config)
-{
- struct drm_i915_private *i915 = ce->engine->i915;
- u32 ctx_oactxctrl = i915->perf.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
+gen8_update_reg_state_unlocked(const struct intel_context *ce,
+ const struct i915_perf_stream *stream)
+{
+ u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1690,21 +2075,28 @@ gen8_update_reg_state_unlocked(struct i915_perf_stream *stream,
EU_PERF_CNTL5,
EU_PERF_CNTL6,
};
+ u32 *reg_state = ce->lrc_reg_state;
int i;
- CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME);
+ if (IS_GEN(stream->perf->i915, 12)) {
+ u32 format = stream->oa_buffer.format;
- for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
- CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i],
- oa_config_flex_reg(oa_config, flex_regs[i]));
+ reg_state[ctx_oactxctrl + 1] =
+ (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+ (stream->oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
+ } else {
+ reg_state[ctx_oactxctrl + 1] =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
}
- CTX_REG(reg_state,
- CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- intel_sseu_make_rpcs(i915, &ce->sseu));
+ for (i = 0; !!ctx_flexeu0 && i < ARRAY_SIZE(flex_regs); i++)
+ reg_state[ctx_flexeu0 + i * 2 + 1] =
+ oa_config_flex_reg(stream->oa_config, flex_regs[i]);
+
+ reg_state[CTX_R_PWR_CLK_STATE] =
+ intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu);
}
struct flex {
@@ -1728,7 +2120,7 @@ gen8_store_flex(struct i915_request *rq,
offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
do {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = offset + (flex->offset + 1) * sizeof(u32);
+ *cs++ = offset + flex->offset * sizeof(u32);
*cs++ = 0;
*cs++ = flex->value;
} while (flex++, --count);
@@ -1832,6 +2224,36 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
return err;
}
+static int gen12_emit_oar_config(struct intel_context *ce, bool enable)
+{
+ struct i915_request *rq;
+ u32 *cs;
+ int err = 0;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_CONTEXT_CONTROL(ce->engine->mmio_base));
+ *cs++ = _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
+ enable ? GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 0);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+out:
+ i915_request_add(rq);
+
+ return err;
+}
+
/*
* Manages updating the per-context aspects of the OA stream
* configuration across all contexts.
@@ -1856,24 +2278,22 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
*
* Note: it's only the RCS/Render context that has any OA state.
*/
-static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *i915 = stream->dev_priv;
+ struct drm_i915_private *i915 = stream->perf->i915;
/* The MMIO offsets for Flex EU registers aren't contiguous */
- const u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
-#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N))
+ const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
struct flex regs[] = {
{
GEN8_R_PWR_CLK_STATE,
CTX_R_PWR_CLK_STATE,
},
{
- GEN8_OACTXCONTROL,
- i915->perf.ctx_oactxctrl_offset,
- ((stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME)
+ IS_GEN(i915, 12) ?
+ GEN12_OAR_OACONTROL : GEN8_OACTXCONTROL,
+ stream->perf->ctx_oactxctrl_offset + 1,
},
{ EU_PERF_CNTL0, ctx_flexeuN(0) },
{ EU_PERF_CNTL1, ctx_flexeuN(1) },
@@ -1885,13 +2305,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
};
#undef ctx_flexeuN
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- int i;
+ struct i915_gem_context *ctx, *cn;
+ size_t array_size = IS_GEN(i915, 12) ? 2 : ARRAY_SIZE(regs);
+ int i, err;
- for (i = 2; i < ARRAY_SIZE(regs); i++)
+ if (IS_GEN(i915, 12)) {
+ u32 format = stream->oa_buffer.format;
+
+ regs[1].value =
+ (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+ (oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
+ } else {
+ regs[1].value =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
+ }
+
+ for (i = 2; !!ctx_flexeu0 && i < array_size; i++)
regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
- lockdep_assert_held(&i915->drm.struct_mutex);
+ lockdep_assert_held(&stream->perf->lock);
/*
* The OA register config is setup through the context image. This image
@@ -1909,16 +2343,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
* context. Contexts idle at the time of reconfiguration are not
* trapped behind the barrier.
*/
- list_for_each_entry(ctx, &i915->contexts.list, link) {
- int err;
-
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
if (ctx == i915->kernel_context)
continue;
- err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
- if (err)
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
+ err = gen8_configure_context(ctx, regs, array_size);
+ if (err) {
+ i915_gem_context_put(ctx);
return err;
+ }
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
}
+ spin_unlock(&i915->gem.contexts.lock);
/*
* After updating all other contexts, we need to modify ourselves.
@@ -1927,14 +2372,13 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
*/
for_each_uabi_engine(engine, i915) {
struct intel_context *ce = engine->kernel_context;
- int err;
if (engine->class != RENDER_CLASS)
continue;
regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
- err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs));
+ err = gen8_modify_self(ce, regs, array_size);
if (err)
return err;
}
@@ -1944,8 +2388,8 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
static int gen8_enable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
int ret;
/*
@@ -1971,10 +2415,10 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN_RANGE(dev_priv, 9, 11)) {
- I915_WRITE(GEN8_OA_DEBUG,
- _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
+ if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
+ intel_uncore_write(uncore, GEN8_OA_DEBUG,
+ _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
}
/*
@@ -1982,45 +2426,102 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(stream, oa_config);
+ ret = lrc_configure_all_contexts(stream, oa_config);
if (ret)
return ret;
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
+ return emit_oa_config(stream, oa_config, oa_context(stream));
+}
+
+static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ int ret;
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+ intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
+ /* Disable clk ratio reports, like previous Gens. */
+ _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
+ /*
+ * If the user didn't require OA reports, instruct the
+ * hardware not to emit ctx switch reports.
+ */
+ !(stream->sample_flags & SAMPLE_OA_REPORT) ?
+ _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS) :
+ _MASKED_BIT_DISABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS));
+
+ intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
+ (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
+ GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
+ (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
+ : 0);
- return 0;
+ /*
+ * Update all contexts prior writing the mux configurations as we need
+ * to make sure all slices/subslices are ON before writing to NOA
+ * registers.
+ */
+ ret = lrc_configure_all_contexts(stream, oa_config);
+ if (ret)
+ return ret;
+
+ /*
+ * For Gen12, performance counters are context
+ * saved/restored. Only enable it for the context that
+ * requested this.
+ */
+ if (stream->ctx) {
+ ret = gen12_emit_oar_config(stream->pinned_ctx,
+ oa_config != NULL);
+ if (ret)
+ return ret;
+ }
+
+ return emit_oa_config(stream, oa_config, oa_context(stream));
}
static void gen8_disable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL);
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
static void gen10_disable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
+
+ /* Reset all contexts' slices/subslices configurations. */
+ lrc_configure_all_contexts(stream, NULL);
+
+ /* Make sure we disable noa to save power. */
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
+}
+
+static void gen12_disable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL);
+
+ /* disable the context save/restore or OAR counters */
+ if (stream->ctx)
+ gen12_emit_oar_config(stream->pinned_ctx, false);
/* Make sure we disable noa to save power. */
- I915_WRITE(RPM_CONFIG1,
- I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
}
static void gen7_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
struct i915_gem_context *ctx = stream->ctx;
u32 ctx_id = stream->specific_ctx_id;
bool periodic = stream->periodic;
@@ -2038,19 +2539,19 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
*/
gen7_init_oa_buffer(stream);
- I915_WRITE(GEN7_OACONTROL,
- (ctx_id & GEN7_OACONTROL_CTX_MASK) |
- (period_exponent <<
- GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
- (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
- (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
- (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
- GEN7_OACONTROL_ENABLE);
+ intel_uncore_write(uncore, GEN7_OACONTROL,
+ (ctx_id & GEN7_OACONTROL_CTX_MASK) |
+ (period_exponent <<
+ GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
+ (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
+ (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
+ (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
+ GEN7_OACONTROL_ENABLE);
}
static void gen8_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 report_format = stream->oa_buffer.format;
/*
@@ -2069,9 +2570,28 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
* filtering and instead filter on the cpu based on the context-id
* field of reports
*/
- I915_WRITE(GEN8_OACONTROL, (report_format <<
- GEN8_OA_REPORT_FORMAT_SHIFT) |
- GEN8_OA_COUNTER_ENABLE);
+ intel_uncore_write(uncore, GEN8_OACONTROL,
+ (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
+ GEN8_OA_COUNTER_ENABLE);
+}
+
+static void gen12_oa_enable(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 report_format = stream->oa_buffer.format;
+
+ /*
+ * If we don't want OA reports from the OA buffer, then we don't even
+ * need to program the OAG unit.
+ */
+ if (!(stream->sample_flags & SAMPLE_OA_REPORT))
+ return;
+
+ gen12_init_oa_buffer(stream);
+
+ intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
+ (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
}
/**
@@ -2085,9 +2605,7 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- dev_priv->perf.ops.oa_enable(stream);
+ stream->perf->ops.oa_enable(stream);
if (stream->periodic)
hrtimer_start(&stream->poll_check_timer,
@@ -2097,7 +2615,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
- struct intel_uncore *uncore = &stream->dev_priv->uncore;
+ struct intel_uncore *uncore = stream->uncore;
intel_uncore_write(uncore, GEN7_OACONTROL, 0);
if (intel_wait_for_register(uncore,
@@ -2108,7 +2626,7 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
- struct intel_uncore *uncore = &stream->dev_priv->uncore;
+ struct intel_uncore *uncore = stream->uncore;
intel_uncore_write(uncore, GEN8_OACONTROL, 0);
if (intel_wait_for_register(uncore,
@@ -2117,6 +2635,18 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
DRM_ERROR("wait for OA to be disabled timed out\n");
}
+static void gen12_oa_disable(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+
+ intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
+ GEN12_OAG_OACONTROL,
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
+ 50))
+ DRM_ERROR("wait for OA to be disabled timed out\n");
+}
+
/**
* i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
* @stream: An i915 perf stream opened for OA metrics
@@ -2127,9 +2657,7 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- dev_priv->perf.ops.oa_disable(stream);
+ stream->perf->ops.oa_disable(stream);
if (stream->periodic)
hrtimer_cancel(&stream->poll_check_timer);
@@ -2166,15 +2694,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
int format_size;
int ret;
- /* If the sysfs metrics/ directory wasn't registered for some
+ if (!props->engine) {
+ DRM_DEBUG("OA engine not specified\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If the sysfs metrics/ directory wasn't registered for some
* reason then don't let userspace try their luck with config
* IDs
*/
- if (!dev_priv->perf.metrics_kobj) {
+ if (!perf->metrics_kobj) {
DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
@@ -2184,16 +2718,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- if (!dev_priv->perf.ops.enable_metric_set) {
+ if (!perf->ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
- /* To avoid the complexity of having to accurately filter
+ /*
+ * To avoid the complexity of having to accurately filter
* counter reports and marshal to the appropriate client
* we currently only allow exclusive access
*/
- if (dev_priv->perf.exclusive_stream) {
+ if (perf->exclusive_stream) {
DRM_DEBUG("OA unit already in use\n");
return -EBUSY;
}
@@ -2203,9 +2738,12 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
+ stream->engine = props->engine;
+ stream->uncore = stream->engine->gt->uncore;
+
stream->sample_size = sizeof(struct drm_i915_perf_record_header);
- format_size = dev_priv->perf.oa_formats[props->oa_format].size;
+ format_size = perf->oa_formats[props->oa_format].size;
stream->sample_flags |= SAMPLE_OA_REPORT;
stream->sample_size += format_size;
@@ -2214,8 +2752,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (WARN_ON(stream->oa_buffer.format_size == 0))
return -EINVAL;
+ stream->hold_preemption = props->hold_preemption;
+
stream->oa_buffer.format =
- dev_priv->perf.oa_formats[props->oa_format].format;
+ perf->oa_formats[props->oa_format].format;
stream->periodic = props->oa_periodic;
if (stream->periodic)
@@ -2229,9 +2769,16 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
}
}
- ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
+ ret = alloc_noa_wait(stream);
if (ret) {
+ DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
+ goto err_noa_wait_alloc;
+ }
+
+ stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
+ if (!stream->oa_config) {
DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
+ ret = -EINVAL;
goto err_config;
}
@@ -2247,27 +2794,24 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_get(stream->engine);
+ intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(stream);
if (ret)
goto err_oa_buf_alloc;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- goto err_lock;
-
stream->ops = &i915_oa_stream_ops;
- dev_priv->perf.exclusive_stream = stream;
+ perf->exclusive_stream = stream;
- ret = dev_priv->perf.ops.enable_metric_set(stream);
+ ret = perf->ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ DRM_DEBUG("opening stream oa config uuid=%s\n",
+ stream->oa_config->uuid);
hrtimer_init(&stream->poll_check_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -2278,38 +2822,40 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- dev_priv->perf.exclusive_stream = NULL;
- dev_priv->perf.ops.disable_metric_set(stream);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ perf->exclusive_stream = NULL;
+ perf->ops.disable_metric_set(stream);
-err_lock:
free_oa_buffer(stream);
err_oa_buf_alloc:
- put_oa_config(dev_priv, stream->oa_config);
+ free_oa_configs(stream);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
err_config:
+ free_noa_wait(stream);
+
+err_noa_wait_alloc:
if (stream->ctx)
oa_put_render_ctx_id(stream);
return ret;
}
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *regs)
+void i915_oa_init_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
struct i915_perf_stream *stream;
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+
if (engine->class != RENDER_CLASS)
return;
stream = engine->i915->perf.exclusive_stream;
if (stream)
- gen8_update_reg_state_unlocked(stream, ce, regs, stream->oa_config);
+ gen8_update_reg_state_unlocked(ce, stream);
}
/**
@@ -2379,7 +2925,7 @@ static ssize_t i915_perf_read(struct file *file,
loff_t *ppos)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
ssize_t ret;
/* To ensure it's handled consistently we simply treat all reads of a
@@ -2402,15 +2948,15 @@ static ssize_t i915_perf_read(struct file *file,
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file,
buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
} while (ret == -EAGAIN);
} else {
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file, buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
}
/* We allow the poll checking to sometimes report false positive EPOLLIN
@@ -2448,7 +2994,6 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
/**
* i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
- * @dev_priv: i915 device instance
* @stream: An i915 perf stream
* @file: An i915 perf stream file
* @wait: poll() state table
@@ -2457,15 +3002,14 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
* &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
* will be woken for new stream data.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*
* Returns: any poll events that are ready without sleeping
*/
-static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
- struct i915_perf_stream *stream,
- struct file *file,
- poll_table *wait)
+static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait)
{
__poll_t events = 0;
@@ -2499,12 +3043,12 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
__poll_t ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
+ ret = i915_perf_poll_locked(stream, file, wait);
+ mutex_unlock(&perf->lock);
return ret;
}
@@ -2529,6 +3073,9 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
if (stream->ops->enable)
stream->ops->enable(stream);
+
+ if (stream->hold_preemption)
+ i915_gem_context_set_nopreempt(stream->ctx);
}
/**
@@ -2553,17 +3100,54 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
/* Allow stream->ops->disable() to refer to this */
stream->enabled = false;
+ if (stream->hold_preemption)
+ i915_gem_context_clear_nopreempt(stream->ctx);
+
if (stream->ops->disable)
stream->ops->disable(stream);
}
+static long i915_perf_config_locked(struct i915_perf_stream *stream,
+ unsigned long metrics_set)
+{
+ struct i915_oa_config *config;
+ long ret = stream->oa_config->id;
+
+ config = i915_perf_get_oa_config(stream->perf, metrics_set);
+ if (!config)
+ return -EINVAL;
+
+ if (config != stream->oa_config) {
+ int err;
+
+ /*
+ * If OA is bound to a specific context, emit the
+ * reconfiguration inline from that context. The update
+ * will then be ordered with respect to submission on that
+ * context.
+ *
+ * When set globally, we use a low priority kernel context,
+ * so it will effectively take effect when idle.
+ */
+ err = emit_oa_config(stream, config, oa_context(stream));
+ if (err == 0)
+ config = xchg(&stream->oa_config, config);
+ else
+ ret = err;
+ }
+
+ i915_oa_config_put(config);
+
+ return ret;
+}
+
/**
* i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
* @stream: An i915 perf stream
* @cmd: the ioctl request
* @arg: the ioctl data
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*
* Returns: zero on success or a negative error code. Returns -EINVAL for
@@ -2580,6 +3164,8 @@ static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
case I915_PERF_IOCTL_DISABLE:
i915_perf_disable_locked(stream);
return 0;
+ case I915_PERF_IOCTL_CONFIG:
+ return i915_perf_config_locked(stream, arg);
}
return -EINVAL;
@@ -2601,12 +3187,12 @@ static long i915_perf_ioctl(struct file *file,
unsigned long arg)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
long ret;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_ioctl_locked(stream, cmd, arg);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
return ret;
}
@@ -2618,7 +3204,7 @@ static long i915_perf_ioctl(struct file *file,
* Frees all resources associated with the given i915 perf @stream, disabling
* any associated data capture in the process.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*/
static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
@@ -2629,8 +3215,6 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
if (stream->ops->destroy)
stream->ops->destroy(stream);
- list_del(&stream->link);
-
if (stream->ctx)
i915_gem_context_put(stream->ctx);
@@ -2651,14 +3235,14 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
static int i915_perf_release(struct inode *inode, struct file *file)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
i915_perf_destroy_locked(stream);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
/* Release the reference the perf stream kept on the driver. */
- drm_dev_put(&dev_priv->drm);
+ drm_dev_put(&perf->i915->drm);
return 0;
}
@@ -2680,7 +3264,7 @@ static const struct file_operations fops = {
/**
* i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
* @props: individually validated u64 property value pairs
* @file: drm file
@@ -2688,7 +3272,7 @@ static const struct file_operations fops = {
* See i915_perf_ioctl_open() for interface details.
*
* Implements further stream config validation and stream initialization on
- * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
+ * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
* taken to serialize with any non-file-operation driver hooks.
*
* Note: at this point the @props have only been validated in isolation and
@@ -2703,7 +3287,7 @@ static const struct file_operations fops = {
* Returns: zero on success or a negative error code.
*/
static int
-i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
+i915_perf_open_ioctl_locked(struct i915_perf *perf,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props,
struct drm_file *file)
@@ -2734,17 +3318,34 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
* rest of the system, which we consider acceptable for a
* non-privileged client.
*
- * For Gen8+ the OA unit no longer supports clock gating off for a
+ * For Gen8->11 the OA unit no longer supports clock gating off for a
* specific context and the kernel can't securely stop the counters
* from updating as system-wide / global values. Even though we can
* filter reports based on the included context ID we can't block
* clients from seeing the raw / global counter values via
* MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
* enable the OA unit by default.
+ *
+ * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
+ * per context basis. So we can relax requirements there if the user
+ * doesn't request global stream access (i.e. query based sampling
+ * using MI_RECORD_PERF_COUNT.
*/
- if (IS_HASWELL(dev_priv) && specific_ctx)
+ if (IS_HASWELL(perf->i915) && specific_ctx)
+ privileged_op = false;
+ else if (IS_GEN(perf->i915, 12) && specific_ctx &&
+ (props->sample_flags & SAMPLE_OA_REPORT) == 0)
privileged_op = false;
+ if (props->hold_preemption) {
+ if (!props->single_context) {
+ DRM_DEBUG("preemption disable with no context\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ privileged_op = true;
+ }
+
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
* to determine if it's ok to access system wide OA counters
@@ -2752,7 +3353,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
*/
if (privileged_op &&
i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
+ DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
}
@@ -2763,7 +3364,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_ctx;
}
- stream->dev_priv = dev_priv;
+ stream->perf = perf;
stream->ctx = specific_ctx;
ret = i915_oa_stream_init(stream, param, props);
@@ -2779,8 +3380,6 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_flags;
}
- list_add(&stream->link, &dev_priv->perf.streams);
-
if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
f_flags |= O_CLOEXEC;
if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
@@ -2789,7 +3388,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
if (stream_fd < 0) {
ret = stream_fd;
- goto err_open;
+ goto err_flags;
}
if (!(param->flags & I915_PERF_FLAG_DISABLED))
@@ -2798,12 +3397,10 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
/* Take a reference on the driver that will be kept with stream_fd
* until its release.
*/
- drm_dev_get(&dev_priv->drm);
+ drm_dev_get(&perf->i915->drm);
return stream_fd;
-err_open:
- list_del(&stream->link);
err_flags:
if (stream->ops->destroy)
stream->ops->destroy(stream);
@@ -2816,15 +3413,15 @@ err:
return ret;
}
-static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
+static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
return div64_u64(1000000000ULL * (2ULL << exponent),
- 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
+ 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz);
}
/**
* read_properties_unlocked - validate + copy userspace stream open properties
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @uprops: The array of u64 key value pairs given by userspace
* @n_props: The number of key value pairs expected in @uprops
* @props: The stream configuration built up while validating properties
@@ -2837,7 +3434,7 @@ static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
* we shouldn't validate or assume anything about ordering here. This doesn't
* rule out defining new properties with ordering requirements in the future.
*/
-static int read_properties_unlocked(struct drm_i915_private *dev_priv,
+static int read_properties_unlocked(struct i915_perf *perf,
u64 __user *uprops,
u32 n_props,
struct perf_open_properties *props)
@@ -2852,6 +3449,15 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
return -EINVAL;
}
+ /* At the moment we only support using i915-perf on the RCS. */
+ props->engine = intel_engine_lookup_user(perf->i915,
+ I915_ENGINE_CLASS_RENDER,
+ 0);
+ if (!props->engine) {
+ DRM_DEBUG("No RENDER-capable engines\n");
+ return -EINVAL;
+ }
+
/* Considering that ID = 0 is reserved and assuming that we don't
* (currently) expect any configurations to ever specify duplicate
* values for a particular property ID then the last _PROP_MAX value is
@@ -2903,7 +3509,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
value);
return -EINVAL;
}
- if (!dev_priv->perf.oa_formats[value].size) {
+ if (!perf->oa_formats[value].size) {
DRM_DEBUG("Unsupported OA report format %llu\n",
value);
return -EINVAL;
@@ -2924,7 +3530,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
*/
BUILD_BUG_ON(sizeof(oa_period) != 8);
- oa_period = oa_exponent_to_ns(dev_priv, value);
+ oa_period = oa_exponent_to_ns(perf, value);
/* This check is primarily to ensure that oa_period <=
* UINT32_MAX (before passing to do_div which only
@@ -2949,6 +3555,9 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
props->oa_periodic = true;
props->oa_period_exponent = value;
break;
+ case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
+ props->hold_preemption = !!value;
+ break;
case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
return -EINVAL;
@@ -2978,7 +3587,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
* mutex to avoid an awkward lockdep with mmap_sem.
*
* Most of the implementation details are handled by
- * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
+ * i915_perf_open_ioctl_locked() after taking the &perf->lock
* mutex for serializing with any non-file-operation driver hooks.
*
* Return: A newly opened i915 Perf stream file descriptor or negative
@@ -2987,13 +3596,13 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_open_param *param = data;
struct perf_open_properties props;
u32 known_open_flags;
int ret;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
@@ -3006,124 +3615,130 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = read_properties_unlocked(dev_priv,
+ ret = read_properties_unlocked(perf,
u64_to_user_ptr(param->properties_ptr),
param->num_properties,
&props);
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
+ ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
+ mutex_unlock(&perf->lock);
return ret;
}
/**
* i915_perf_register - exposes i915-perf to userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* In particular OA metric sets are advertised under a sysfs metrics/
* directory allowing userspace to enumerate valid IDs that can be
* used to open an i915-perf stream.
*/
-void i915_perf_register(struct drm_i915_private *dev_priv)
+void i915_perf_register(struct drm_i915_private *i915)
{
+ struct i915_perf *perf = &i915->perf;
int ret;
- if (!dev_priv->perf.initialized)
+ if (!perf->i915)
return;
/* To be sure we're synchronized with an attempted
* i915_perf_open_ioctl(); considering that we register after
* being exposed to userspace.
*/
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
- dev_priv->perf.metrics_kobj =
+ perf->metrics_kobj =
kobject_create_and_add("metrics",
- &dev_priv->drm.primary->kdev->kobj);
- if (!dev_priv->perf.metrics_kobj)
+ &i915->drm.primary->kdev->kobj);
+ if (!perf->metrics_kobj)
goto exit;
- sysfs_attr_init(&dev_priv->perf.test_config.sysfs_metric_id.attr);
-
- if (INTEL_GEN(dev_priv) >= 11) {
- i915_perf_load_test_config_icl(dev_priv);
- } else if (IS_CANNONLAKE(dev_priv)) {
- i915_perf_load_test_config_cnl(dev_priv);
- } else if (IS_COFFEELAKE(dev_priv)) {
- if (IS_CFL_GT2(dev_priv))
- i915_perf_load_test_config_cflgt2(dev_priv);
- if (IS_CFL_GT3(dev_priv))
- i915_perf_load_test_config_cflgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv)) {
- i915_perf_load_test_config_glk(dev_priv);
- } else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_load_test_config_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_load_test_config_kblgt3(dev_priv);
- } else if (IS_BROXTON(dev_priv)) {
- i915_perf_load_test_config_bxt(dev_priv);
- } else if (IS_SKYLAKE(dev_priv)) {
- if (IS_SKL_GT2(dev_priv))
- i915_perf_load_test_config_sklgt2(dev_priv);
- else if (IS_SKL_GT3(dev_priv))
- i915_perf_load_test_config_sklgt3(dev_priv);
- else if (IS_SKL_GT4(dev_priv))
- i915_perf_load_test_config_sklgt4(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- i915_perf_load_test_config_chv(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- i915_perf_load_test_config_bdw(dev_priv);
- } else if (IS_HASWELL(dev_priv)) {
- i915_perf_load_test_config_hsw(dev_priv);
-}
-
- if (dev_priv->perf.test_config.id == 0)
+ sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr);
+
+ if (IS_TIGERLAKE(i915)) {
+ i915_perf_load_test_config_tgl(i915);
+ } else if (INTEL_GEN(i915) >= 11) {
+ i915_perf_load_test_config_icl(i915);
+ } else if (IS_CANNONLAKE(i915)) {
+ i915_perf_load_test_config_cnl(i915);
+ } else if (IS_COFFEELAKE(i915)) {
+ if (IS_CFL_GT2(i915))
+ i915_perf_load_test_config_cflgt2(i915);
+ if (IS_CFL_GT3(i915))
+ i915_perf_load_test_config_cflgt3(i915);
+ } else if (IS_GEMINILAKE(i915)) {
+ i915_perf_load_test_config_glk(i915);
+ } else if (IS_KABYLAKE(i915)) {
+ if (IS_KBL_GT2(i915))
+ i915_perf_load_test_config_kblgt2(i915);
+ else if (IS_KBL_GT3(i915))
+ i915_perf_load_test_config_kblgt3(i915);
+ } else if (IS_BROXTON(i915)) {
+ i915_perf_load_test_config_bxt(i915);
+ } else if (IS_SKYLAKE(i915)) {
+ if (IS_SKL_GT2(i915))
+ i915_perf_load_test_config_sklgt2(i915);
+ else if (IS_SKL_GT3(i915))
+ i915_perf_load_test_config_sklgt3(i915);
+ else if (IS_SKL_GT4(i915))
+ i915_perf_load_test_config_sklgt4(i915);
+ } else if (IS_CHERRYVIEW(i915)) {
+ i915_perf_load_test_config_chv(i915);
+ } else if (IS_BROADWELL(i915)) {
+ i915_perf_load_test_config_bdw(i915);
+ } else if (IS_HASWELL(i915)) {
+ i915_perf_load_test_config_hsw(i915);
+ }
+
+ if (perf->test_config.id == 0)
goto sysfs_error;
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.test_config.sysfs_metric);
+ ret = sysfs_create_group(perf->metrics_kobj,
+ &perf->test_config.sysfs_metric);
if (ret)
goto sysfs_error;
- atomic_set(&dev_priv->perf.test_config.ref_count, 1);
+ perf->test_config.perf = perf;
+ kref_init(&perf->test_config.ref);
goto exit;
sysfs_error:
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
+ kobject_put(perf->metrics_kobj);
+ perf->metrics_kobj = NULL;
exit:
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
}
/**
* i915_perf_unregister - hide i915-perf from userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* i915-perf state cleanup is split up into an 'unregister' and
* 'deinit' phase where the interface is first hidden from
* userspace by i915_perf_unregister() before cleaning up
* remaining state in i915_perf_fini().
*/
-void i915_perf_unregister(struct drm_i915_private *dev_priv)
+void i915_perf_unregister(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.metrics_kobj)
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->metrics_kobj)
return;
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.test_config.sysfs_metric);
+ sysfs_remove_group(perf->metrics_kobj,
+ &perf->test_config.sysfs_metric);
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
+ kobject_put(perf->metrics_kobj);
+ perf->metrics_kobj = NULL;
}
-static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
{
static const i915_reg_t flex_eu_regs[] = {
EU_PERF_CNTL0,
@@ -3143,56 +3758,80 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
return false;
}
-static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
+#define ADDR_IN_RANGE(addr, start, end) \
+ ((addr) >= (start) && \
+ (addr) <= (end))
+
+#define REG_IN_RANGE(addr, start, end) \
+ ((addr) >= i915_mmio_reg_offset(start) && \
+ (addr) <= i915_mmio_reg_offset(end))
+
+#define REG_EQUAL(addr, mmio) \
+ ((addr) == i915_mmio_reg_offset(mmio))
+
+static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
+{
+ return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
+ REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
+ REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
+}
+
+static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
+{
+ return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
+ REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
+ REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
+ REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
+}
+
+static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
- addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
- addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OACEC0_0) &&
- addr <= i915_mmio_reg_offset(OACEC7_1));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
+ REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
}
-static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
- (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
- addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
+ return gen8_is_valid_mux_addr(perf, addr) ||
+ REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
+ REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
}
-static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
- (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
- addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
+ REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
+ REG_EQUAL(addr, HSW_MBVID2_MISR0);
}
-static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen8_is_valid_mux_addr(dev_priv, addr) ||
- addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
}
-static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x25100 && addr <= 0x2FF90) ||
- (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
- addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
- addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
+ return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
+ REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
+ REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
+ REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
+ REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
+ REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
+ REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
}
-static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x182300 && addr <= 0x1823A4);
+ return REG_EQUAL(addr, NOA_WRITE) ||
+ REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
+ REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
+ REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
+ REG_EQUAL(addr, RPM_CONFIG0) ||
+ REG_EQUAL(addr, RPM_CONFIG1) ||
+ REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
}
static u32 mask_reg_value(u32 reg, u32 val)
@@ -3201,21 +3840,21 @@ static u32 mask_reg_value(u32 reg, u32 val)
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this.
*/
- if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
+ if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
* indicated by its name and a bunch of selection fields used by OA
* configs.
*/
- if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
+ if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
}
-static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
- bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
+static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
+ bool (*is_valid)(struct i915_perf *perf, u32 addr),
u32 __user *regs,
u32 n_regs)
{
@@ -3245,7 +3884,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
if (err)
goto addr_err;
- if (!is_valid(dev_priv, addr)) {
+ if (!is_valid(perf, addr)) {
DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
err = -EINVAL;
goto addr_err;
@@ -3278,7 +3917,7 @@ static ssize_t show_dynamic_id(struct device *dev,
return sprintf(buf, "%d\n", oa_config->id);
}
-static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
+static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
struct i915_oa_config *oa_config)
{
sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
@@ -3293,7 +3932,7 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
oa_config->sysfs_metric.name = oa_config->uuid;
oa_config->sysfs_metric.attrs = oa_config->attrs;
- return sysfs_create_group(dev_priv->perf.metrics_kobj,
+ return sysfs_create_group(perf->metrics_kobj,
&oa_config->sysfs_metric);
}
@@ -3313,17 +3952,18 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_oa_config *args = data;
struct i915_oa_config *oa_config, *tmp;
+ static struct i915_oa_reg *regs;
int err, id;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
- if (!dev_priv->perf.metrics_kobj) {
+ if (!perf->metrics_kobj) {
DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
@@ -3346,7 +3986,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
}
- atomic_set(&oa_config->ref_count, 1);
+ oa_config->perf = perf;
+ kref_init(&oa_config->ref);
if (!uuid_is_valid(args->uuid)) {
DRM_DEBUG("Invalid uuid format for OA config\n");
@@ -3360,59 +4001,59 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
oa_config->mux_regs_len = args->n_mux_regs;
- oa_config->mux_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.ops.is_valid_mux_reg,
- u64_to_user_ptr(args->mux_regs_ptr),
- args->n_mux_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_mux_reg,
+ u64_to_user_ptr(args->mux_regs_ptr),
+ args->n_mux_regs);
- if (IS_ERR(oa_config->mux_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for mux_regs\n");
- err = PTR_ERR(oa_config->mux_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->mux_regs = regs;
oa_config->b_counter_regs_len = args->n_boolean_regs;
- oa_config->b_counter_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.ops.is_valid_b_counter_reg,
- u64_to_user_ptr(args->boolean_regs_ptr),
- args->n_boolean_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_b_counter_reg,
+ u64_to_user_ptr(args->boolean_regs_ptr),
+ args->n_boolean_regs);
- if (IS_ERR(oa_config->b_counter_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
- err = PTR_ERR(oa_config->b_counter_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->b_counter_regs = regs;
- if (INTEL_GEN(dev_priv) < 8) {
+ if (INTEL_GEN(perf->i915) < 8) {
if (args->n_flex_regs != 0) {
err = -EINVAL;
goto reg_err;
}
} else {
oa_config->flex_regs_len = args->n_flex_regs;
- oa_config->flex_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.ops.is_valid_flex_reg,
- u64_to_user_ptr(args->flex_regs_ptr),
- args->n_flex_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_flex_reg,
+ u64_to_user_ptr(args->flex_regs_ptr),
+ args->n_flex_regs);
- if (IS_ERR(oa_config->flex_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for flex_regs\n");
- err = PTR_ERR(oa_config->flex_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->flex_regs = regs;
}
- err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ err = mutex_lock_interruptible(&perf->metrics_lock);
if (err)
goto reg_err;
/* We shouldn't have too many configs, so this iteration shouldn't be
* too costly.
*/
- idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
if (!strcmp(tmp->uuid, oa_config->uuid)) {
DRM_DEBUG("OA config already exists with this uuid\n");
err = -EADDRINUSE;
@@ -3420,14 +4061,14 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
}
}
- err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
+ err = create_dynamic_oa_sysfs_entry(perf, oa_config);
if (err) {
DRM_DEBUG("Failed to create sysfs entry for OA config\n");
goto sysfs_err;
}
/* Config id 0 is invalid, id 1 for kernel stored test config. */
- oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
+ oa_config->id = idr_alloc(&perf->metrics_idr,
oa_config, 2,
0, GFP_KERNEL);
if (oa_config->id < 0) {
@@ -3436,16 +4077,16 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
goto sysfs_err;
}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ mutex_unlock(&perf->metrics_lock);
DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
return oa_config->id;
sysfs_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ mutex_unlock(&perf->metrics_lock);
reg_err:
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(oa_config);
DRM_DEBUG("Failed to add new OA config\n");
return err;
}
@@ -3464,12 +4105,12 @@ reg_err:
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
u64 *arg = data;
struct i915_oa_config *oa_config;
int ret;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
@@ -3479,31 +4120,33 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
return -EACCES;
}
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ ret = mutex_lock_interruptible(&perf->metrics_lock);
if (ret)
- goto lock_err;
+ return ret;
- oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
+ oa_config = idr_find(&perf->metrics_idr, *arg);
if (!oa_config) {
DRM_DEBUG("Failed to remove unknown OA config\n");
ret = -ENOENT;
- goto config_err;
+ goto err_unlock;
}
GEM_BUG_ON(*arg != oa_config->id);
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &oa_config->sysfs_metric);
+ sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
+
+ idr_remove(&perf->metrics_idr, *arg);
- idr_remove(&dev_priv->perf.metrics_idr, *arg);
+ mutex_unlock(&perf->metrics_lock);
DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(oa_config);
-config_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
-lock_err:
+ return 0;
+
+err_unlock:
+ mutex_unlock(&perf->metrics_lock);
return ret;
}
@@ -3551,103 +4194,126 @@ static struct ctl_table dev_root[] = {
/**
* i915_perf_init - initialize i915-perf state on module load
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Initializes i915-perf state without exposing anything to userspace.
*
* Note: i915-perf initialization is split into an 'init' and 'register'
* phase with the i915_perf_register() exposing state to userspace.
*/
-void i915_perf_init(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv)) {
- dev_priv->perf.ops.is_valid_b_counter_reg =
- gen7_is_valid_b_counter_addr;
- dev_priv->perf.ops.is_valid_mux_reg =
- hsw_is_valid_mux_addr;
- dev_priv->perf.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.ops.enable_metric_set = hsw_enable_metric_set;
- dev_priv->perf.ops.disable_metric_set = hsw_disable_metric_set;
- dev_priv->perf.ops.oa_enable = gen7_oa_enable;
- dev_priv->perf.ops.oa_disable = gen7_oa_disable;
- dev_priv->perf.ops.read = gen7_oa_read;
- dev_priv->perf.ops.oa_hw_tail_read =
- gen7_oa_hw_tail_read;
-
- dev_priv->perf.oa_formats = hsw_oa_formats;
- } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
+void i915_perf_init(struct drm_i915_private *i915)
+{
+ struct i915_perf *perf = &i915->perf;
+
+ /* XXX const struct i915_perf_ops! */
+
+ if (IS_HASWELL(i915)) {
+ perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg = NULL;
+ perf->ops.enable_metric_set = hsw_enable_metric_set;
+ perf->ops.disable_metric_set = hsw_disable_metric_set;
+ perf->ops.oa_enable = gen7_oa_enable;
+ perf->ops.oa_disable = gen7_oa_disable;
+ perf->ops.read = gen7_oa_read;
+ perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
+
+ perf->oa_formats = hsw_oa_formats;
+ } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
- dev_priv->perf.oa_formats = gen8_plus_oa_formats;
+ perf->ops.read = gen8_oa_read;
- dev_priv->perf.ops.oa_enable = gen8_oa_enable;
- dev_priv->perf.ops.oa_disable = gen8_oa_disable;
- dev_priv->perf.ops.read = gen8_oa_read;
- dev_priv->perf.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+ if (IS_GEN_RANGE(i915, 8, 9)) {
+ perf->oa_formats = gen8_plus_oa_formats;
- if (IS_GEN_RANGE(dev_priv, 8, 9)) {
- dev_priv->perf.ops.is_valid_b_counter_reg =
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.ops.is_valid_mux_reg =
+ perf->ops.is_valid_mux_reg =
gen8_is_valid_mux_addr;
- dev_priv->perf.ops.is_valid_flex_reg =
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->perf.ops.is_valid_mux_reg =
+ if (IS_CHERRYVIEW(i915)) {
+ perf->ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
- dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.ops.disable_metric_set = gen8_disable_metric_set;
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen8_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(dev_priv, 8)) {
- dev_priv->perf.ctx_oactxctrl_offset = 0x120;
- dev_priv->perf.ctx_flexeu0_offset = 0x2ce;
+ if (IS_GEN(i915, 8)) {
+ perf->ctx_oactxctrl_offset = 0x120;
+ perf->ctx_flexeu0_offset = 0x2ce;
- dev_priv->perf.gen8_valid_ctx_bit = BIT(25);
+ perf->gen8_valid_ctx_bit = BIT(25);
} else {
- dev_priv->perf.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.ctx_flexeu0_offset = 0x3de;
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
- dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
+ perf->gen8_valid_ctx_bit = BIT(16);
}
- } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
- dev_priv->perf.ops.is_valid_b_counter_reg =
+ } else if (IS_GEN_RANGE(i915, 10, 11)) {
+ perf->oa_formats = gen8_plus_oa_formats;
+
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.ops.is_valid_mux_reg =
+ perf->ops.is_valid_mux_reg =
gen10_is_valid_mux_addr;
- dev_priv->perf.ops.is_valid_flex_reg =
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.ops.disable_metric_set = gen10_disable_metric_set;
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen10_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(dev_priv, 10)) {
- dev_priv->perf.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.ctx_flexeu0_offset = 0x3de;
+ if (IS_GEN(i915, 10)) {
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
} else {
- dev_priv->perf.ctx_oactxctrl_offset = 0x124;
- dev_priv->perf.ctx_flexeu0_offset = 0x78e;
+ perf->ctx_oactxctrl_offset = 0x124;
+ perf->ctx_flexeu0_offset = 0x78e;
}
- dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
+ perf->gen8_valid_ctx_bit = BIT(16);
+ } else if (IS_GEN(i915, 12)) {
+ perf->oa_formats = gen12_oa_formats;
+
+ perf->ops.is_valid_b_counter_reg =
+ gen12_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg =
+ gen12_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
+
+ perf->ops.oa_enable = gen12_oa_enable;
+ perf->ops.oa_disable = gen12_oa_disable;
+ perf->ops.enable_metric_set = gen12_enable_metric_set;
+ perf->ops.disable_metric_set = gen12_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
+
+ perf->ctx_flexeu0_offset = 0;
+ perf->ctx_oactxctrl_offset = 0x144;
}
}
- if (dev_priv->perf.ops.enable_metric_set) {
- INIT_LIST_HEAD(&dev_priv->perf.streams);
- mutex_init(&dev_priv->perf.lock);
+ if (perf->ops.enable_metric_set) {
+ mutex_init(&perf->lock);
oa_sample_rate_hard_limit = 1000 *
- (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
- dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
+ (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
+ perf->sysctl_header = register_sysctl_table(dev_root);
- mutex_init(&dev_priv->perf.metrics_lock);
- idr_init(&dev_priv->perf.metrics_idr);
+ mutex_init(&perf->metrics_lock);
+ idr_init(&perf->metrics_idr);
/* We set up some ratelimit state to potentially throttle any
* _NOTES about spurious, invalid OA reports which we don't
@@ -3659,44 +4325,70 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
*
* Using the same limiting factors as printk_ratelimit()
*/
- ratelimit_state_init(&dev_priv->perf.spurious_report_rs,
- 5 * HZ, 10);
+ ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
/* Since we use a DRM_NOTE for spurious reports it would be
* inconsistent to let __ratelimit() automatically print a
* warning for throttling.
*/
- ratelimit_set_flags(&dev_priv->perf.spurious_report_rs,
+ ratelimit_set_flags(&perf->spurious_report_rs,
RATELIMIT_MSG_ON_RELEASE);
- dev_priv->perf.initialized = true;
+ atomic64_set(&perf->noa_programming_delay,
+ 500 * 1000 /* 500us */);
+
+ perf->i915 = i915;
}
}
static int destroy_config(int id, void *p, void *data)
{
- struct drm_i915_private *dev_priv = data;
- struct i915_oa_config *oa_config = p;
-
- put_oa_config(dev_priv, oa_config);
-
+ i915_oa_config_put(p);
return 0;
}
/**
* i915_perf_fini - Counter part to i915_perf_init()
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*/
-void i915_perf_fini(struct drm_i915_private *dev_priv)
+void i915_perf_fini(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.initialized)
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->i915)
return;
- idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
- idr_destroy(&dev_priv->perf.metrics_idr);
+ idr_for_each(&perf->metrics_idr, destroy_config, perf);
+ idr_destroy(&perf->metrics_idr);
- unregister_sysctl_table(dev_priv->perf.sysctl_header);
+ unregister_sysctl_table(perf->sysctl_header);
- memset(&dev_priv->perf.ops, 0, sizeof(dev_priv->perf.ops));
+ memset(&perf->ops, 0, sizeof(perf->ops));
+ perf->i915 = NULL;
+}
- dev_priv->perf.initialized = false;
+/**
+ * i915_perf_ioctl_version - Version of the i915-perf subsystem
+ *
+ * This version number is used by userspace to detect available features.
+ */
+int i915_perf_ioctl_version(void)
+{
+ /*
+ * 1: Initial version
+ * I915_PERF_IOCTL_ENABLE
+ * I915_PERF_IOCTL_DISABLE
+ *
+ * 2: Added runtime modification of OA config.
+ * I915_PERF_IOCTL_CONFIG
+ *
+ * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
+ * preemption on a particular context so that performance data is
+ * accessible from a delta of MI_RPC reports without looking at the
+ * OA buffer.
+ */
+ return 3;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_perf.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h
index a412b16d9ffc..4ceebce72060 100644
--- a/drivers/gpu/drm/i915/i915_perf.h
+++ b/drivers/gpu/drm/i915/i915_perf.h
@@ -6,11 +6,15 @@
#ifndef __I915_PERF_H__
#define __I915_PERF_H__
+#include <linux/kref.h>
#include <linux/types.h>
+#include "i915_perf_types.h"
+
struct drm_device;
struct drm_file;
struct drm_i915_private;
+struct i915_oa_config;
struct intel_context;
struct intel_engine_cs;
@@ -18,6 +22,7 @@ void i915_perf_init(struct drm_i915_private *i915);
void i915_perf_fini(struct drm_i915_private *i915);
void i915_perf_register(struct drm_i915_private *i915);
void i915_perf_unregister(struct drm_i915_private *i915);
+int i915_perf_ioctl_version(void);
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -25,8 +30,29 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *reg_state);
+
+void i915_oa_init_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+struct i915_oa_config *
+i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set);
+
+static inline struct i915_oa_config *
+i915_oa_config_get(struct i915_oa_config *oa_config)
+{
+ if (kref_get_unless_zero(&oa_config->ref))
+ return oa_config;
+ else
+ return NULL;
+}
+
+void i915_oa_config_release(struct kref *ref);
+static inline void i915_oa_config_put(struct i915_oa_config *oa_config)
+{
+ if (!oa_config)
+ return;
+
+ kref_put(&oa_config->ref, i915_oa_config_release);
+}
#endif /* __I915_PERF_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
new file mode 100644
index 000000000000..74ddc20a0d37
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -0,0 +1,435 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_PERF_TYPES_H_
+#define _I915_PERF_TYPES_H_
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/llist.h>
+#include <linux/poll.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/wait.h>
+
+#include "i915_reg.h"
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct file;
+struct i915_gem_context;
+struct i915_perf;
+struct i915_vma;
+struct intel_context;
+struct intel_engine_cs;
+
+struct i915_oa_format {
+ u32 format;
+ int size;
+};
+
+struct i915_oa_reg {
+ i915_reg_t addr;
+ u32 value;
+};
+
+struct i915_oa_config {
+ struct i915_perf *perf;
+
+ char uuid[UUID_STRING_LEN + 1];
+ int id;
+
+ const struct i915_oa_reg *mux_regs;
+ u32 mux_regs_len;
+ const struct i915_oa_reg *b_counter_regs;
+ u32 b_counter_regs_len;
+ const struct i915_oa_reg *flex_regs;
+ u32 flex_regs_len;
+
+ struct attribute_group sysfs_metric;
+ struct attribute *attrs[2];
+ struct device_attribute sysfs_metric_id;
+
+ struct kref ref;
+ struct rcu_head rcu;
+};
+
+struct i915_perf_stream;
+
+/**
+ * struct i915_perf_stream_ops - the OPs to support a specific stream type
+ */
+struct i915_perf_stream_ops {
+ /**
+ * @enable: Enables the collection of HW samples, either in response to
+ * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
+ * without `I915_PERF_FLAG_DISABLED`.
+ */
+ void (*enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable: Disables the collection of HW samples, either in response
+ * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
+ * the stream.
+ */
+ void (*disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @poll_wait: Call poll_wait, passing a wait queue that will be woken
+ * once there is something ready to read() for the stream
+ */
+ void (*poll_wait)(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait);
+
+ /**
+ * @wait_unlocked: For handling a blocking read, wait until there is
+ * something to ready to read() for the stream. E.g. wait on the same
+ * wait queue that would be passed to poll_wait().
+ */
+ int (*wait_unlocked)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy buffered metrics as records to userspace
+ * **buf**: the userspace, destination buffer
+ * **count**: the number of bytes to copy, requested by userspace
+ * **offset**: zero at the start of the read, updated as the read
+ * proceeds, it represents how many bytes have been copied so far and
+ * the buffer offset for copying the next record.
+ *
+ * Copy as many buffered i915 perf samples and records for this stream
+ * to userspace as will fit in the given buffer.
+ *
+ * Only write complete records; returning -%ENOSPC if there isn't room
+ * for a complete record.
+ *
+ * Return any error condition that results in a short read such as
+ * -%ENOSPC or -%EFAULT, even though these may be squashed before
+ * returning to userspace.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @destroy: Cleanup any stream specific resources.
+ *
+ * The stream will always be disabled before this is called.
+ */
+ void (*destroy)(struct i915_perf_stream *stream);
+};
+
+/**
+ * struct i915_perf_stream - state for a single open stream FD
+ */
+struct i915_perf_stream {
+ /**
+ * @perf: i915_perf backpointer
+ */
+ struct i915_perf *perf;
+
+ /**
+ * @uncore: mmio access path
+ */
+ struct intel_uncore *uncore;
+
+ /**
+ * @engine: Engine associated with this performance stream.
+ */
+ struct intel_engine_cs *engine;
+
+ /**
+ * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
+ * properties given when opening a stream, representing the contents
+ * of a single sample as read() by userspace.
+ */
+ u32 sample_flags;
+
+ /**
+ * @sample_size: Considering the configured contents of a sample
+ * combined with the required header size, this is the total size
+ * of a single sample record.
+ */
+ int sample_size;
+
+ /**
+ * @ctx: %NULL if measuring system-wide across all contexts or a
+ * specific context that is being monitored.
+ */
+ struct i915_gem_context *ctx;
+
+ /**
+ * @enabled: Whether the stream is currently enabled, considering
+ * whether the stream was opened in a disabled state and based
+ * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
+ */
+ bool enabled;
+
+ /**
+ * @hold_preemption: Whether preemption is put on hold for command
+ * submissions done on the @ctx. This is useful for some drivers that
+ * cannot easily post process the OA buffer context to subtract delta
+ * of performance counters not associated with @ctx.
+ */
+ bool hold_preemption;
+
+ /**
+ * @ops: The callbacks providing the implementation of this specific
+ * type of configured stream.
+ */
+ const struct i915_perf_stream_ops *ops;
+
+ /**
+ * @oa_config: The OA configuration used by the stream.
+ */
+ struct i915_oa_config *oa_config;
+
+ /**
+ * @oa_config_bos: A list of struct i915_oa_config_bo allocated lazily
+ * each time @oa_config changes.
+ */
+ struct llist_head oa_config_bos;
+
+ /**
+ * @pinned_ctx: The OA context specific information.
+ */
+ struct intel_context *pinned_ctx;
+
+ /**
+ * @specific_ctx_id: The id of the specific context.
+ */
+ u32 specific_ctx_id;
+
+ /**
+ * @specific_ctx_id_mask: The mask used to masking specific_ctx_id bits.
+ */
+ u32 specific_ctx_id_mask;
+
+ /**
+ * @poll_check_timer: High resolution timer that will periodically
+ * check for data in the circular OA buffer for notifying userspace
+ * (e.g. during a read() or poll()).
+ */
+ struct hrtimer poll_check_timer;
+
+ /**
+ * @poll_wq: The wait queue that hrtimer callback wakes when it
+ * sees data ready to read in the circular OA buffer.
+ */
+ wait_queue_head_t poll_wq;
+
+ /**
+ * @pollin: Whether there is data available to read.
+ */
+ bool pollin;
+
+ /**
+ * @periodic: Whether periodic sampling is currently enabled.
+ */
+ bool periodic;
+
+ /**
+ * @period_exponent: The OA unit sampling frequency is derived from this.
+ */
+ int period_exponent;
+
+ /**
+ * @oa_buffer: State of the OA buffer.
+ */
+ struct {
+ struct i915_vma *vma;
+ u8 *vaddr;
+ u32 last_ctx_id;
+ int format;
+ int format_size;
+ int size_exponent;
+
+ /**
+ * @ptr_lock: Locks reads and writes to all head/tail state
+ *
+ * Consider: the head and tail pointer state needs to be read
+ * consistently from a hrtimer callback (atomic context) and
+ * read() fop (user context) with tail pointer updates happening
+ * in atomic context and head updates in user context and the
+ * (unlikely) possibility of read() errors needing to reset all
+ * head/tail state.
+ *
+ * Note: Contention/performance aren't currently a significant
+ * concern here considering the relatively low frequency of
+ * hrtimer callbacks (5ms period) and that reads typically only
+ * happen in response to a hrtimer event and likely complete
+ * before the next callback.
+ *
+ * Note: This lock is not held *while* reading and copying data
+ * to userspace so the value of head observed in htrimer
+ * callbacks won't represent any partial consumption of data.
+ */
+ spinlock_t ptr_lock;
+
+ /**
+ * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to
+ * used for reading.
+ *
+ * Initial values of 0xffffffff are invalid and imply that an
+ * update is required (and should be ignored by an attempted
+ * read)
+ */
+ struct {
+ u32 offset;
+ } tails[2];
+
+ /**
+ * @aged_tail_idx: Index for the aged tail ready to read() data up to.
+ */
+ unsigned int aged_tail_idx;
+
+ /**
+ * @aging_timestamp: A monotonic timestamp for when the current aging tail pointer
+ * was read; used to determine when it is old enough to trust.
+ */
+ u64 aging_timestamp;
+
+ /**
+ * @head: Although we can always read back the head pointer register,
+ * we prefer to avoid trusting the HW state, just to avoid any
+ * risk that some hardware condition could * somehow bump the
+ * head pointer unpredictably and cause us to forward the wrong
+ * OA buffer data to userspace.
+ */
+ u32 head;
+ } oa_buffer;
+
+ /**
+ * @noa_wait: A batch buffer doing a wait on the GPU for the NOA logic to be
+ * reprogrammed.
+ */
+ struct i915_vma *noa_wait;
+};
+
+/**
+ * struct i915_oa_ops - Gen specific implementation of an OA unit stream
+ */
+struct i915_oa_ops {
+ /**
+ * @is_valid_b_counter_reg: Validates register's address for
+ * programming boolean counters for a particular platform.
+ */
+ bool (*is_valid_b_counter_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @is_valid_mux_reg: Validates register's address for programming mux
+ * for a particular platform.
+ */
+ bool (*is_valid_mux_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @is_valid_flex_reg: Validates register's address for programming
+ * flex EU filtering for a particular platform.
+ */
+ bool (*is_valid_flex_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @enable_metric_set: Selects and applies any MUX configuration to set
+ * up the Boolean and Custom (B/C) counters that are part of the
+ * counter reports being sampled. May apply system constraints such as
+ * disabling EU clock gating as required.
+ */
+ int (*enable_metric_set)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable_metric_set: Remove system constraints associated with using
+ * the OA unit.
+ */
+ void (*disable_metric_set)(struct i915_perf_stream *stream);
+
+ /**
+ * @oa_enable: Enable periodic sampling
+ */
+ void (*oa_enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @oa_disable: Disable periodic sampling
+ */
+ void (*oa_disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy data from the circular OA buffer into a given userspace
+ * buffer.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @oa_hw_tail_read: read the OA tail pointer register
+ *
+ * In particular this enables us to share all the fiddly code for
+ * handling the OA unit tail pointer race that affects multiple
+ * generations.
+ */
+ u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
+};
+
+struct i915_perf {
+ struct drm_i915_private *i915;
+
+ struct kobject *metrics_kobj;
+ struct ctl_table_header *sysctl_header;
+
+ /*
+ * Lock associated with adding/modifying/removing OA configs
+ * in perf->metrics_idr.
+ */
+ struct mutex metrics_lock;
+
+ /*
+ * List of dynamic configurations (struct i915_oa_config), you
+ * need to hold perf->metrics_lock to access it.
+ */
+ struct idr metrics_idr;
+
+ /*
+ * Lock associated with anything below within this structure
+ * except exclusive_stream.
+ */
+ struct mutex lock;
+
+ /*
+ * The stream currently using the OA unit. If accessed
+ * outside a syscall associated to its file
+ * descriptor.
+ */
+ struct i915_perf_stream *exclusive_stream;
+
+ /**
+ * For rate limiting any notifications of spurious
+ * invalid OA reports
+ */
+ struct ratelimit_state spurious_report_rs;
+
+ struct i915_oa_config test_config;
+
+ u32 gen7_latched_oastatus1;
+ u32 ctx_oactxctrl_offset;
+ u32 ctx_flexeu0_offset;
+
+ /**
+ * The RPT_ID/reason field for Gen8+ includes a bit
+ * to determine if the CTX ID in the report is valid
+ * but the specific bit differs between Gen 8 and 9
+ */
+ u32 gen8_valid_ctx_bit;
+
+ struct i915_oa_ops ops;
+ const struct i915_oa_format *oa_formats;
+
+ atomic64_t noa_programming_delay;
+};
+
+#endif /* _I915_PERF_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 212acaef581e..0d40dccd1409 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -11,6 +11,8 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_pmu.h"
@@ -116,22 +118,124 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
return enable;
}
-void i915_pmu_gt_parked(struct drm_i915_private *i915)
+static u64 __get_rc6(struct intel_gt *gt)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct drm_i915_private *i915 = gt->i915;
+ u64 val;
- if (!pmu->base.event_init)
- return;
+ val = intel_rc6_residency_ns(&gt->rc6,
+ IS_VALLEYVIEW(i915) ?
+ VLV_GT_RENDER_RC6 :
+ GEN6_GT_GFX_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6pp);
+
+ return val;
+}
+
+#if IS_ENABLED(CONFIG_PM)
+
+static inline s64 ktime_since(const ktime_t kt)
+{
+ return ktime_to_ns(ktime_sub(ktime_get(), kt));
+}
+
+static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
+{
+ u64 val;
- spin_lock_irq(&pmu->lock);
/*
- * Signal sampling timer to stop if only engine events are enabled and
- * GPU went idle.
+ * We think we are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
*/
- pmu->timer_enabled = pmu_needs_timer(pmu, false);
- spin_unlock_irq(&pmu->lock);
+ val = ktime_since(pmu->sleep_last);
+ val += pmu->sample[__I915_SAMPLE_RC6].cur;
+
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+
+ return val;
}
+static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
+{
+ /*
+ * If we are coming back from being runtime suspended we must
+ * be careful not to report a larger value than returned
+ * previously.
+ */
+ if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+ pmu->sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ }
+
+ return val;
+}
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct i915_pmu *pmu = &i915->pmu;
+ unsigned long flags;
+ u64 val;
+
+ val = 0;
+ if (intel_gt_pm_get_if_awake(gt)) {
+ val = __get_rc6(gt);
+ intel_gt_pm_put(gt);
+ }
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ if (val)
+ val = __pmu_update_rc6(pmu, val);
+ else
+ val = __pmu_estimate_rc6(pmu);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return val;
+}
+
+static void park_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ __pmu_update_rc6(pmu, __get_rc6(&i915->gt));
+
+ pmu->sleep_last = ktime_get();
+}
+
+static void unpark_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ /* Estimate how long we slept and accumulate that into rc6 counters */
+ if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ __pmu_estimate_rc6(pmu);
+}
+
+#else
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ return __get_rc6(gt);
+}
+
+static void park_rc6(struct drm_i915_private *i915) {}
+static void unpark_rc6(struct drm_i915_private *i915) {}
+
+#endif
+
static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
@@ -143,6 +247,26 @@ static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
}
}
+void i915_pmu_gt_parked(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
+ return;
+
+ spin_lock_irq(&pmu->lock);
+
+ park_rc6(i915);
+
+ /*
+ * Signal sampling timer to stop if only engine events are enabled and
+ * GPU went idle.
+ */
+ pmu->timer_enabled = pmu_needs_timer(pmu, false);
+
+ spin_unlock_irq(&pmu->lock);
+}
+
void i915_pmu_gt_unparked(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
@@ -151,10 +275,14 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
return;
spin_lock_irq(&pmu->lock);
+
/*
* Re-enable sampling timer when GPU goes active.
*/
__i915_pmu_maybe_start_timer(pmu);
+
+ unpark_rc6(i915);
+
spin_unlock_irq(&pmu->lock);
}
@@ -174,7 +302,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_engine_pmu *pmu = &engine->pmu;
unsigned long flags;
bool busy;
@@ -194,6 +322,10 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if (val & RING_WAIT_SEMAPHORE)
add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
+ /* No need to sample when busy stats are supported. */
+ if (intel_engine_supports_stats(engine))
+ goto skip;
+
/*
* While waiting on a semaphore or event, MI_MODE reports the
* ring as idle. However, previously using the seqno, and with
@@ -227,25 +359,26 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct i915_pmu *pmu = &i915->pmu;
+ struct intel_rps *rps = &gt->rps;
if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
u32 val;
- val = i915->gt_pm.rps.cur_freq;
+ val = rps->cur_freq;
if (intel_gt_pm_get_if_awake(gt)) {
val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
- val = intel_get_cagf(i915, val);
+ val = intel_get_cagf(rps, val);
intel_gt_pm_put(gt);
}
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
- intel_gpu_freq(i915, val),
+ intel_gpu_freq(rps, val),
period_ns / 1000);
}
if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
- intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq),
+ intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
}
}
@@ -426,104 +559,6 @@ static int i915_pmu_event_init(struct perf_event *event)
return 0;
}
-static u64 __get_rc6(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- u64 val;
-
- val = intel_rc6_residency_ns(i915,
- IS_VALLEYVIEW(i915) ?
- VLV_GT_RENDER_RC6 :
- GEN6_GT_GFX_RC6);
-
- if (HAS_RC6p(i915))
- val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
-
- if (HAS_RC6pp(i915))
- val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
-
- return val;
-}
-
-static u64 get_rc6(struct intel_gt *gt)
-{
-#if IS_ENABLED(CONFIG_PM)
- struct drm_i915_private *i915 = gt->i915;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- struct i915_pmu *pmu = &i915->pmu;
- intel_wakeref_t wakeref;
- unsigned long flags;
- u64 val;
-
- wakeref = intel_runtime_pm_get_if_in_use(rpm);
- if (wakeref) {
- val = __get_rc6(gt);
- intel_runtime_pm_put(rpm, wakeref);
-
- /*
- * If we are coming back from being runtime suspended we must
- * be careful not to report a larger value than returned
- * previously.
- */
-
- spin_lock_irqsave(&pmu->lock, flags);
-
- if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = val;
- } else {
- val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- }
-
- spin_unlock_irqrestore(&pmu->lock, flags);
- } else {
- struct device *kdev = rpm->kdev;
-
- /*
- * We are runtime suspended.
- *
- * Report the delta from when the device was suspended to now,
- * on top of the last known real value, as the approximated RC6
- * counter value.
- */
- spin_lock_irqsave(&pmu->lock, flags);
-
- /*
- * After the above branch intel_runtime_pm_get_if_in_use failed
- * to get the runtime PM reference we cannot assume we are in
- * runtime suspend since we can either: a) race with coming out
- * of it before we took the power.lock, or b) there are other
- * states than suspended which can bring us here.
- *
- * We need to double-check that we are indeed currently runtime
- * suspended and if not we cannot do better than report the last
- * known RC6 value.
- */
- if (pm_runtime_status_suspended(kdev)) {
- val = pm_runtime_suspended_time(kdev);
-
- if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- pmu->suspended_time_last = val;
-
- val -= pmu->suspended_time_last;
- val += pmu->sample[__I915_SAMPLE_RC6].cur;
-
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
- } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- } else {
- val = pmu->sample[__I915_SAMPLE_RC6].cur;
- }
-
- spin_unlock_irqrestore(&pmu->lock, flags);
- }
-
- return val;
-#else
- return __get_rc6(gt);
-#endif
-}
-
static u64 __i915_pmu_event_read(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -1047,21 +1082,43 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
cpuhp_remove_multi_state(cpuhp_slot);
}
+static bool is_igp(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ /* IGP is 0000:00:02.0 */
+ return pci_domain_nr(pdev->bus) == 0 &&
+ pdev->bus->number == 0 &&
+ PCI_SLOT(pdev->devfn) == 2 &&
+ PCI_FUNC(pdev->devfn) == 0;
+}
+
void i915_pmu_register(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
- int ret;
+ int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
dev_info(i915->drm.dev, "PMU not supported for this GPU.");
return;
}
- i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
- if (!i915_pmu_events_attr_group.attrs) {
- ret = -ENOMEM;
+ spin_lock_init(&pmu->lock);
+ hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pmu->timer.function = i915_sample;
+
+ if (!is_igp(i915))
+ pmu->name = kasprintf(GFP_KERNEL,
+ "i915-%s",
+ dev_name(i915->drm.dev));
+ else
+ pmu->name = "i915";
+ if (!pmu->name)
goto err;
- }
+
+ i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
+ if (!i915_pmu_events_attr_group.attrs)
+ goto err_name;
pmu->base.attr_groups = i915_pmu_attr_groups;
pmu->base.task_ctx_nr = perf_invalid_context;
@@ -1073,13 +1130,9 @@ void i915_pmu_register(struct drm_i915_private *i915)
pmu->base.read = i915_pmu_event_read;
pmu->base.event_idx = i915_pmu_event_event_idx;
- spin_lock_init(&pmu->lock);
- hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pmu->timer.function = i915_sample;
-
- ret = perf_pmu_register(&pmu->base, "i915", -1);
+ ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
- goto err;
+ goto err_attr;
ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
@@ -1089,10 +1142,14 @@ void i915_pmu_register(struct drm_i915_private *i915)
err_unreg:
perf_pmu_unregister(&pmu->base);
-err:
+err_attr:
pmu->base.event_init = NULL;
free_event_attributes(pmu);
- DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
+err_name:
+ if (!is_igp(i915))
+ kfree(pmu->name);
+err:
+ dev_notice(i915->drm.dev, "Failed to register PMU!\n");
}
void i915_pmu_unregister(struct drm_i915_private *i915)
@@ -1110,5 +1167,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
perf_pmu_unregister(&pmu->base);
pmu->base.event_init = NULL;
+ if (!is_igp(i915))
+ kfree(pmu->name);
free_event_attributes(pmu);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 4fc4f2478301..bf52e3983631 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -47,6 +47,10 @@ struct i915_pmu {
*/
struct pmu base;
/**
+ * @name: Name as registered with perf core.
+ */
+ const char *name;
+ /**
* @lock: Lock protecting enable mask and ref count handling.
*/
spinlock_t lock;
@@ -97,9 +101,9 @@ struct i915_pmu {
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
- * @suspended_time_last: Cached suspend time from PM core.
+ * @sleep_last: Last time GT parked for RC6 estimation.
*/
- u64 suspended_time_last;
+ ktime_t sleep_last;
/**
* @i915_attr: Memory block holding device attributes.
*/
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 21037a2e2038..732aad148881 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -16,6 +16,12 @@ enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+ /* A preemptive pulse used to monitor the health of each engine */
+ I915_PRIORITY_HEARTBEAT,
+
+ /* Interactive workload, scheduled for immediate pageflipping */
+ I915_PRIORITY_DISPLAY,
};
#define I915_USER_PRIORITY_SHIFT 2
@@ -39,6 +45,7 @@ enum {
* active request.
*/
#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
+#define I915_PRIORITY_BARRIER INT_MAX
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index ad9240a0817a..c27cfef9281c 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -7,6 +7,7 @@
#include <linux/nospec.h>
#include "i915_drv.h"
+#include "i915_perf.h"
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
@@ -37,8 +38,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
- u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
- u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int ret;
if (query_item->flags != 0)
@@ -50,8 +49,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
slice_length = sizeof(sseu->slice_mask);
- subslice_length = sseu->max_slices * subslice_stride;
- eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
+ subslice_length = sseu->max_slices * sseu->ss_stride;
+ eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
total_length = sizeof(topo) + slice_length + subslice_length +
eu_length;
@@ -69,9 +68,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
topo.subslice_offset = slice_length;
- topo.subslice_stride = subslice_stride;
+ topo.subslice_stride = sseu->ss_stride;
topo.eu_offset = slice_length + subslice_length;
- topo.eu_stride = eu_stride;
+ topo.eu_stride = sseu->eu_stride;
if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
@@ -142,10 +141,305 @@ query_engine_info(struct drm_i915_private *i915,
return len;
}
+static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
+ u64 user_regs_ptr,
+ u32 kernel_n_regs)
+{
+ /*
+ * We'll just put the number of registers, and won't copy the
+ * register.
+ */
+ if (user_n_regs == 0)
+ return 0;
+
+ if (user_n_regs < kernel_n_regs)
+ return -EINVAL;
+
+ if (!access_ok(u64_to_user_ptr(user_regs_ptr),
+ 2 * sizeof(u32) * kernel_n_regs))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
+ u32 kernel_n_regs,
+ u64 user_regs_ptr,
+ u32 *user_n_regs)
+{
+ u32 r;
+
+ if (*user_n_regs == 0) {
+ *user_n_regs = kernel_n_regs;
+ return 0;
+ }
+
+ *user_n_regs = kernel_n_regs;
+
+ for (r = 0; r < kernel_n_regs; r++) {
+ u32 __user *user_reg_ptr =
+ u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
+ u32 __user *user_val_ptr =
+ u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
+ sizeof(u32));
+ int ret;
+
+ ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
+ user_reg_ptr);
+ if (ret)
+ return -EFAULT;
+
+ ret = __put_user(kernel_regs[r].value, user_val_ptr);
+ if (ret)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int query_perf_config_data(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item,
+ bool use_uuid)
+{
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct drm_i915_perf_oa_config __user *user_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr +
+ sizeof(struct drm_i915_query_perf_config));
+ struct drm_i915_perf_oa_config user_config;
+ struct i915_perf *perf = &i915->perf;
+ struct i915_oa_config *oa_config;
+ char uuid[UUID_STRING_LEN + 1];
+ u64 config_id;
+ u32 flags, total_size;
+ int ret;
+
+ if (!perf->i915)
+ return -ENODEV;
+
+ total_size =
+ sizeof(struct drm_i915_query_perf_config) +
+ sizeof(struct drm_i915_perf_oa_config);
+
+ if (query_item->length == 0)
+ return total_size;
+
+ if (query_item->length < total_size) {
+ DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
+ query_item->length, total_size);
+ return -EINVAL;
+ }
+
+ if (!access_ok(user_query_config_ptr, total_size))
+ return -EFAULT;
+
+ if (__get_user(flags, &user_query_config_ptr->flags))
+ return -EFAULT;
+
+ if (flags != 0)
+ return -EINVAL;
+
+ if (use_uuid) {
+ struct i915_oa_config *tmp;
+ int id;
+
+ BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
+
+ memset(&uuid, 0, sizeof(uuid));
+ if (__copy_from_user(uuid, user_query_config_ptr->uuid,
+ sizeof(user_query_config_ptr->uuid)))
+ return -EFAULT;
+
+ oa_config = NULL;
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, uuid)) {
+ oa_config = i915_oa_config_get(tmp);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ } else {
+ if (__get_user(config_id, &user_query_config_ptr->config))
+ return -EFAULT;
+
+ oa_config = i915_perf_get_oa_config(perf, config_id);
+ }
+ if (!oa_config)
+ return -ENOENT;
+
+ if (__copy_from_user(&user_config, user_config_ptr,
+ sizeof(user_config))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
+ user_config.boolean_regs_ptr,
+ oa_config->b_counter_regs_len);
+ if (ret)
+ goto out;
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
+ user_config.flex_regs_ptr,
+ oa_config->flex_regs_len);
+ if (ret)
+ goto out;
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
+ user_config.mux_regs_ptr,
+ oa_config->mux_regs_len);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len,
+ user_config.boolean_regs_ptr,
+ &user_config.n_boolean_regs);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
+ oa_config->flex_regs_len,
+ user_config.flex_regs_ptr,
+ &user_config.n_flex_regs);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
+ oa_config->mux_regs_len,
+ user_config.mux_regs_ptr,
+ &user_config.n_mux_regs);
+ if (ret)
+ goto out;
+
+ memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
+
+ if (__copy_to_user(user_config_ptr, &user_config,
+ sizeof(user_config))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = total_size;
+
+out:
+ i915_oa_config_put(oa_config);
+ return ret;
+}
+
+static size_t sizeof_perf_config_list(size_t count)
+{
+ return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
+}
+
+static size_t sizeof_perf_metrics(struct i915_perf *perf)
+{
+ struct i915_oa_config *tmp;
+ size_t i;
+ int id;
+
+ i = 1;
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id)
+ i++;
+ rcu_read_unlock();
+
+ return sizeof_perf_config_list(i);
+}
+
+static int query_perf_config_list(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct i915_perf *perf = &i915->perf;
+ u64 *oa_config_ids = NULL;
+ int alloc, n_configs;
+ u32 flags;
+ int ret;
+
+ if (!perf->i915)
+ return -ENODEV;
+
+ if (query_item->length == 0)
+ return sizeof_perf_metrics(perf);
+
+ if (get_user(flags, &user_query_config_ptr->flags))
+ return -EFAULT;
+
+ if (flags != 0)
+ return -EINVAL;
+
+ n_configs = 1;
+ do {
+ struct i915_oa_config *tmp;
+ u64 *ids;
+ int id;
+
+ ids = krealloc(oa_config_ids,
+ n_configs * sizeof(*oa_config_ids),
+ GFP_KERNEL);
+ if (!ids)
+ return -ENOMEM;
+
+ alloc = fetch_and_zero(&n_configs);
+
+ ids[n_configs++] = 1ull; /* reserved for test_config */
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (n_configs < alloc)
+ ids[n_configs] = id;
+ n_configs++;
+ }
+ rcu_read_unlock();
+
+ oa_config_ids = ids;
+ } while (n_configs > alloc);
+
+ if (query_item->length < sizeof_perf_config_list(n_configs)) {
+ DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
+ query_item->length,
+ sizeof_perf_config_list(n_configs));
+ kfree(oa_config_ids);
+ return -EINVAL;
+ }
+
+ if (put_user(n_configs, &user_query_config_ptr->config)) {
+ kfree(oa_config_ids);
+ return -EFAULT;
+ }
+
+ ret = copy_to_user(user_query_config_ptr + 1,
+ oa_config_ids,
+ n_configs * sizeof(*oa_config_ids));
+ kfree(oa_config_ids);
+ if (ret)
+ return -EFAULT;
+
+ return sizeof_perf_config_list(n_configs);
+}
+
+static int query_perf_config(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ switch (query_item->flags) {
+ case DRM_I915_QUERY_PERF_CONFIG_LIST:
+ return query_perf_config_list(i915, query_item);
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
+ return query_perf_config_data(i915, query_item, true);
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
+ return query_perf_config_data(i915, query_item, false);
+ default:
+ return -EINVAL;
+ }
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
query_engine_info,
+ query_perf_config,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f8ee9aba3955..73079b503724 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -413,6 +413,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100)
+#define GEN12_SFC_DONE_MAX 4
+
#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
@@ -547,7 +550,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4)
#define MI_PREDICATE_SRC1 _MMIO(0x2408)
#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
-
+#define MI_PREDICATE_DATA _MMIO(0x2410)
+#define MI_PREDICATE_RESULT _MMIO(0x2418)
+#define MI_PREDICATE_RESULT_1 _MMIO(0x241c)
#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
#define LOWER_SLICE_ENABLED (1 << 0)
#define LOWER_SLICE_DISABLED (0 << 0)
@@ -688,6 +693,45 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6 << 3)
#define OABUFFER_SIZE_16M (7 << 3)
+/* Gen12 OAR unit */
+#define GEN12_OAR_OACONTROL _MMIO(0x2960)
+#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
+#define GEN12_OAR_OACONTROL_COUNTER_ENABLE (1 << 0)
+
+#define GEN12_OACTXCONTROL _MMIO(0x2360)
+#define GEN12_OAR_OASTATUS _MMIO(0x2968)
+
+/* Gen12 OAG unit */
+#define GEN12_OAG_OAHEADPTR _MMIO(0xdb00)
+#define GEN12_OAG_OAHEADPTR_MASK 0xffffffc0
+#define GEN12_OAG_OATAILPTR _MMIO(0xdb04)
+#define GEN12_OAG_OATAILPTR_MASK 0xffffffc0
+
+#define GEN12_OAG_OABUFFER _MMIO(0xdb08)
+#define GEN12_OAG_OABUFFER_BUFFER_SIZE_MASK (0x7)
+#define GEN12_OAG_OABUFFER_BUFFER_SIZE_SHIFT (3)
+#define GEN12_OAG_OABUFFER_MEMORY_SELECT (1 << 0) /* 0: PPGTT, 1: GGTT */
+
+#define GEN12_OAG_OAGLBCTXCTRL _MMIO(0x2b28)
+#define GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT 2
+#define GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE (1 << 1)
+#define GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME (1 << 0)
+
+#define GEN12_OAG_OACONTROL _MMIO(0xdaf4)
+#define GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT 2
+#define GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE (1 << 0)
+
+#define GEN12_OAG_OA_DEBUG _MMIO(0xdaf8)
+#define GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6)
+#define GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5)
+#define GEN12_OAG_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2)
+#define GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1)
+
+#define GEN12_OAG_OASTATUS _MMIO(0xdafc)
+#define GEN12_OAG_OASTATUS_COUNTER_OVERFLOW (1 << 2)
+#define GEN12_OAG_OASTATUS_BUFFER_OVERFLOW (1 << 1)
+#define GEN12_OAG_OASTATUS_REPORT_LOST (1 << 0)
+
/*
* Flexible, Aggregate EU Counter Registers.
* Note: these aren't contiguous
@@ -924,6 +968,26 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
+/* Same layout as OASTARTTRIGX */
+#define GEN12_OAG_OASTARTTRIG1 _MMIO(0xd900)
+#define GEN12_OAG_OASTARTTRIG2 _MMIO(0xd904)
+#define GEN12_OAG_OASTARTTRIG3 _MMIO(0xd908)
+#define GEN12_OAG_OASTARTTRIG4 _MMIO(0xd90c)
+#define GEN12_OAG_OASTARTTRIG5 _MMIO(0xd910)
+#define GEN12_OAG_OASTARTTRIG6 _MMIO(0xd914)
+#define GEN12_OAG_OASTARTTRIG7 _MMIO(0xd918)
+#define GEN12_OAG_OASTARTTRIG8 _MMIO(0xd91c)
+
+/* Same layout as OAREPORTTRIGX */
+#define GEN12_OAG_OAREPORTTRIG1 _MMIO(0xd920)
+#define GEN12_OAG_OAREPORTTRIG2 _MMIO(0xd924)
+#define GEN12_OAG_OAREPORTTRIG3 _MMIO(0xd928)
+#define GEN12_OAG_OAREPORTTRIG4 _MMIO(0xd92c)
+#define GEN12_OAG_OAREPORTTRIG5 _MMIO(0xd930)
+#define GEN12_OAG_OAREPORTTRIG6 _MMIO(0xd934)
+#define GEN12_OAG_OAREPORTTRIG7 _MMIO(0xd938)
+#define GEN12_OAG_OAREPORTTRIG8 _MMIO(0xd93c)
+
/* CECX_0 */
#define OACEC_COMPARE_LESS_OR_EQUAL 6
#define OACEC_COMPARE_NOT_EQUAL 5
@@ -940,6 +1004,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC_SELECT_PREV (1 << 19)
#define OACEC_SELECT_BOOLEAN (2 << 19)
+/* 11-bit array 0: pass-through, 1: negated */
+#define GEN12_OASCEC_NEGATE_MASK 0x7ff
+#define GEN12_OASCEC_NEGATE_SHIFT 21
+
/* CECX_1 */
#define OACEC_MASK_MASK 0xffff
#define OACEC_CONSIDERATIONS_MASK 0xffff
@@ -962,6 +1030,42 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC7_0 _MMIO(0x27a8)
#define OACEC7_1 _MMIO(0x27ac)
+/* Same layout as CECX_Y */
+#define GEN12_OAG_CEC0_0 _MMIO(0xd940)
+#define GEN12_OAG_CEC0_1 _MMIO(0xd944)
+#define GEN12_OAG_CEC1_0 _MMIO(0xd948)
+#define GEN12_OAG_CEC1_1 _MMIO(0xd94c)
+#define GEN12_OAG_CEC2_0 _MMIO(0xd950)
+#define GEN12_OAG_CEC2_1 _MMIO(0xd954)
+#define GEN12_OAG_CEC3_0 _MMIO(0xd958)
+#define GEN12_OAG_CEC3_1 _MMIO(0xd95c)
+#define GEN12_OAG_CEC4_0 _MMIO(0xd960)
+#define GEN12_OAG_CEC4_1 _MMIO(0xd964)
+#define GEN12_OAG_CEC5_0 _MMIO(0xd968)
+#define GEN12_OAG_CEC5_1 _MMIO(0xd96c)
+#define GEN12_OAG_CEC6_0 _MMIO(0xd970)
+#define GEN12_OAG_CEC6_1 _MMIO(0xd974)
+#define GEN12_OAG_CEC7_0 _MMIO(0xd978)
+#define GEN12_OAG_CEC7_1 _MMIO(0xd97c)
+
+/* Same layout as CECX_Y + negate 11-bit array */
+#define GEN12_OAG_SCEC0_0 _MMIO(0xdc00)
+#define GEN12_OAG_SCEC0_1 _MMIO(0xdc04)
+#define GEN12_OAG_SCEC1_0 _MMIO(0xdc08)
+#define GEN12_OAG_SCEC1_1 _MMIO(0xdc0c)
+#define GEN12_OAG_SCEC2_0 _MMIO(0xdc10)
+#define GEN12_OAG_SCEC2_1 _MMIO(0xdc14)
+#define GEN12_OAG_SCEC3_0 _MMIO(0xdc18)
+#define GEN12_OAG_SCEC3_1 _MMIO(0xdc1c)
+#define GEN12_OAG_SCEC4_0 _MMIO(0xdc20)
+#define GEN12_OAG_SCEC4_1 _MMIO(0xdc24)
+#define GEN12_OAG_SCEC5_0 _MMIO(0xdc28)
+#define GEN12_OAG_SCEC5_1 _MMIO(0xdc2c)
+#define GEN12_OAG_SCEC6_0 _MMIO(0xdc30)
+#define GEN12_OAG_SCEC6_1 _MMIO(0xdc34)
+#define GEN12_OAG_SCEC7_0 _MMIO(0xdc38)
+#define GEN12_OAG_SCEC7_1 _MMIO(0xdc3c)
+
/* OA perf counters */
#define OA_PERFCNT1_LO _MMIO(0x91B8)
#define OA_PERFCNT1_HI _MMIO(0x91BC)
@@ -1042,6 +1146,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
#define MICRO_BP_FIRED_ARMED _MMIO(0x983C)
+#define GEN12_OAA_DBG_REG _MMIO(0xdc44)
+#define GEN12_OAG_OA_PESS _MMIO(0x2b2c)
+#define GEN12_OAG_SPCTR_CNF _MMIO(0xdc40)
+
#define GDT_CHICKEN_BITS _MMIO(0x9840)
#define GT_NOA_ENABLE 0x00000080
@@ -1962,8 +2070,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
-#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
- _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
+#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \
+ _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C
@@ -1973,10 +2081,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
-#define MG_TX1_LINK_PARAMS(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
- MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
- MG_TX_LINK_PARAMS_TX1LN1_PORT1)
+#define MG_TX1_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX1LN1_PORT1)
#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC
@@ -1986,10 +2094,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
-#define MG_TX2_LINK_PARAMS(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
- MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
- MG_TX_LINK_PARAMS_TX2LN1_PORT1)
+#define MG_TX2_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX2LN1_PORT1)
#define CRI_USE_FS32 (1 << 5)
#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C
@@ -2000,10 +2108,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
-#define MG_TX1_PISO_READLOAD(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
- MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
- MG_TX_PISO_READLOAD_TX1LN1_PORT1)
+#define MG_TX1_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX1LN1_PORT1)
#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC
@@ -2013,10 +2121,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
-#define MG_TX2_PISO_READLOAD(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
- MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
- MG_TX_PISO_READLOAD_TX2LN1_PORT1)
+#define MG_TX2_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX2LN1_PORT1)
#define CRI_CALCINIT (1 << 1)
#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148
@@ -2027,10 +2135,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
-#define MG_TX1_SWINGCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
- MG_TX_SWINGCTRL_TX1LN0_PORT2, \
- MG_TX_SWINGCTRL_TX1LN1_PORT1)
+#define MG_TX1_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX1LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX1LN1_PORT1)
#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8
@@ -2040,10 +2148,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
-#define MG_TX2_SWINGCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
- MG_TX_SWINGCTRL_TX2LN0_PORT2, \
- MG_TX_SWINGCTRL_TX2LN1_PORT1)
+#define MG_TX2_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX2LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0)
@@ -2055,10 +2163,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
-#define MG_TX1_DRVCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
- MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
- MG_TX_DRVCTRL_TX1LN1_TXPORT1)
+#define MG_TX1_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+ MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
+ MG_TX_DRVCTRL_TX1LN1_TXPORT1)
#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4
#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4
@@ -2068,10 +2176,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
-#define MG_TX2_DRVCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
- MG_TX_DRVCTRL_TX2LN0_PORT2, \
- MG_TX_DRVCTRL_TX2LN1_PORT1)
+#define MG_TX2_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+ MG_TX_DRVCTRL_TX2LN0_PORT2, \
+ MG_TX_DRVCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24)
#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22)
@@ -2088,10 +2196,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_CLKHUB_LN1_PORT3 0x16A79C
#define MG_CLKHUB_LN0_PORT4 0x16B39C
#define MG_CLKHUB_LN1_PORT4 0x16B79C
-#define MG_CLKHUB(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \
- MG_CLKHUB_LN0_PORT2, \
- MG_CLKHUB_LN1_PORT1)
+#define MG_CLKHUB(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \
+ MG_CLKHUB_LN0_PORT2, \
+ MG_CLKHUB_LN1_PORT1)
#define CFG_LOW_RATE_LKREN_EN (1 << 11)
#define MG_TX_DCC_TX1LN0_PORT1 0x168110
@@ -2102,10 +2210,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DCC_TX1LN1_PORT3 0x16A510
#define MG_TX_DCC_TX1LN0_PORT4 0x16B110
#define MG_TX_DCC_TX1LN1_PORT4 0x16B510
-#define MG_TX1_DCC(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \
- MG_TX_DCC_TX1LN0_PORT2, \
- MG_TX_DCC_TX1LN1_PORT1)
+#define MG_TX1_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \
+ MG_TX_DCC_TX1LN0_PORT2, \
+ MG_TX_DCC_TX1LN1_PORT1)
#define MG_TX_DCC_TX2LN0_PORT1 0x168090
#define MG_TX_DCC_TX2LN1_PORT1 0x168490
#define MG_TX_DCC_TX2LN0_PORT2 0x169090
@@ -2114,10 +2222,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DCC_TX2LN1_PORT3 0x16A490
#define MG_TX_DCC_TX2LN0_PORT4 0x16B090
#define MG_TX_DCC_TX2LN1_PORT4 0x16B490
-#define MG_TX2_DCC(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \
- MG_TX_DCC_TX2LN0_PORT2, \
- MG_TX_DCC_TX2LN1_PORT1)
+#define MG_TX2_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \
+ MG_TX_DCC_TX2LN0_PORT2, \
+ MG_TX_DCC_TX2LN1_PORT1)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25)
#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24)
@@ -2130,10 +2238,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0
#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0
#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0
-#define MG_DP_MODE(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \
- MG_DP_MODE_LN0_ACU_PORT2, \
- MG_DP_MODE_LN1_ACU_PORT1)
+#define MG_DP_MODE(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \
+ MG_DP_MODE_LN0_ACU_PORT2, \
+ MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6)
#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5)
@@ -2172,13 +2280,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
-#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
+#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx)))
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
@@ -2459,6 +2567,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
#define RING_FAULT_VALID (1 << 0)
#define DONE_REG _MMIO(0x40b0)
+#define GEN12_GAM_DONE _MMIO(0xcf68)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
@@ -2489,7 +2598,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
+#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
+#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4)
+
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
+#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2)
#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28)
#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28)
@@ -2602,6 +2716,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
+#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
+
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1 << 31)
@@ -2711,6 +2827,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
+#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
#define GEN2_IER _MMIO(0x20a0)
#define GEN2_IIR _MMIO(0x20a4)
#define GEN2_IMR _MMIO(0x20a8)
@@ -2884,6 +3001,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
+#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10)
@@ -2962,6 +3080,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
+#define GEN12_GT_DSS_ENABLE _MMIO(0x913C)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -3564,6 +3684,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _PALETTE_A 0xa000
#define _PALETTE_B 0xa800
#define _CHV_PALETTE_C 0xc000
+#define PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
_PICK((pipe), _PALETTE_A, \
_PALETTE_B, _CHV_PALETTE_C) + \
@@ -4044,10 +4167,15 @@ enum {
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
#define MSCUNIT_CLKGATE_DIS (1 << 10)
+#define L3_CLKGATE_DIS REG_BIT(16)
+#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
#define GWUNIT_CLKGATE_DIS (1 << 16)
+#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
+#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS (1 << 20)
@@ -4141,6 +4269,7 @@ enum {
#define _VTOTAL_A 0x6000c
#define _VBLANK_A 0x60010
#define _VSYNC_A 0x60014
+#define _EXITLINE_A 0x60018
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
@@ -4192,10 +4321,22 @@ enum {
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-/* HSW+ eDP PSR registers */
-#define HSW_EDP_PSR_BASE 0x64800
-#define BDW_EDP_PSR_BASE 0x6f800
-#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
+#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A)
+#define EXITLINE_ENABLE REG_BIT(31)
+#define EXITLINE_MASK REG_GENMASK(12, 0)
+#define EXITLINE_SHIFT 0
+
+/*
+ * HSW+ eDP PSR registers
+ *
+ * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one
+ * instance of it
+ */
+#define _HSW_EDP_PSR_BASE 0x64800
+#define _SRD_CTL_A 0x60800
+#define _SRD_CTL_EDP 0x6f800
+#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust)
+#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A))
#define EDP_PSR_ENABLE (1 << 31)
#define BDW_PSR_SINGLE_FRAME (1 << 30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
@@ -4221,27 +4362,40 @@ enum {
#define EDP_PSR_TP1_TIME_0us (3 << 4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
-/* Bspec claims those aren't shifted but stay at 0x64800 */
+/*
+ * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative
+ * to transcoder and bits defined for each one as if using no shift (i.e. as if
+ * it was for TRANSCODER_EDP)
+ */
#define EDP_PSR_IMR _MMIO(0x64834)
#define EDP_PSR_IIR _MMIO(0x64838)
-#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2))
-#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1))
-#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift))
-#define EDP_PSR_TRANSCODER_C_SHIFT 24
-#define EDP_PSR_TRANSCODER_B_SHIFT 16
-#define EDP_PSR_TRANSCODER_A_SHIFT 8
-#define EDP_PSR_TRANSCODER_EDP_SHIFT 0
-
-#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
+#define _PSR_IMR_A 0x60814
+#define _PSR_IIR_A 0x60818
+#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A)
+#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
+#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
+ 0 : ((trans) - TRANSCODER_A + 1) * 8)
+#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+
+#define _SRD_AUX_CTL_A 0x60810
+#define _SRD_AUX_CTL_EDP 0x6f810
+#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A))
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16)
#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11)
#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff)
-#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
+#define _SRD_AUX_DATA_A 0x60814
+#define _SRD_AUX_DATA_EDP 0x6f814
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
-#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
+#define _SRD_STATUS_A 0x60840
+#define _SRD_STATUS_EDP 0x6f840
+#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A))
#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
#define EDP_PSR_STATUS_STATE_SHIFT 29
#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
@@ -4266,10 +4420,15 @@ enum {
#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
-#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
+#define _SRD_PERF_CNT_A 0x60844
+#define _SRD_PERF_CNT_EDP 0x6f844
+#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A))
#define EDP_PSR_PERF_CNT_MASK 0xffffff
-#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
+/* PSR_MASK on SKL+ */
+#define _SRD_DEBUG_A 0x60860
+#define _SRD_DEBUG_EDP 0x6f860
+#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A))
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
@@ -4277,7 +4436,9 @@ enum {
#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
-#define EDP_PSR2_CTL _MMIO(0x6f900)
+#define _PSR2_CTL_A 0x60900
+#define _PSR2_CTL_EDP 0x6f900
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
#define EDP_PSR2_ENABLE (1 << 31)
#define EDP_SU_TRACK_ENABLE (1 << 30)
#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
@@ -4299,8 +4460,8 @@ enum {
#define _PSR_EVENT_TRANS_B 0x61848
#define _PSR_EVENT_TRANS_C 0x62848
#define _PSR_EVENT_TRANS_D 0x63848
-#define _PSR_EVENT_TRANS_EDP 0x6F848
-#define PSR_EVENT(trans) _MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A)
+#define _PSR_EVENT_TRANS_EDP 0x6f848
+#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A)
#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17)
#define PSR_EVENT_PSR2_DISABLED (1 << 16)
#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15)
@@ -4318,15 +4479,16 @@ enum {
#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
#define PSR_EVENT_PSR_DISABLE (1 << 0)
-#define EDP_PSR2_STATUS _MMIO(0x6f940)
+#define _PSR2_STATUS_A 0x60940
+#define _PSR2_STATUS_EDP 0x6f940
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
-#define _PSR2_SU_STATUS_0 0x6F914
-#define _PSR2_SU_STATUS_1 0x6F918
-#define _PSR2_SU_STATUS_2 0x6F91C
-#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1))
-#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3))
+#define _PSR2_SU_STATUS_A 0x60914
+#define _PSR2_SU_STATUS_EDP 0x6f914
+#define _PSR2_SU_STATUS(tran, index) _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4)
+#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
#define PSR2_SU_STATUS_FRAMES 8
@@ -4652,6 +4814,7 @@ enum {
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_GMP_DATA_SIZE 36
#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_PPS_DATA_SIZE 132
#define VIDEO_DIP_CTL _MMIO(0x61170)
@@ -5488,45 +5651,9 @@ enum {
*/
#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
-#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
-#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
-#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
-#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
-#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
-#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
-#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
-#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
-
-#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
-#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
-#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
-#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
-#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
-#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
-
-#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
-#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
-#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
-#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
-#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
-#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
-
-#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
-#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
-#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
-#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
-#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
-#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
-
-#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
-#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
-#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
-#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
-#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
-#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5658,6 +5785,11 @@ enum {
#define PIPECONF_CXSR_DOWNCLOCK (1 << 16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
+#define PIPECONF_OUTPUT_COLORSPACE_MASK (3 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_RGB (0 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV601 (1 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV709 (2 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW (1 << 11) /* hsw only */
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0 << 5)
#define PIPECONF_10BPC (1 << 5)
@@ -5745,12 +5877,13 @@ enum {
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
+#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0)
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE (1 << 27)
-#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */
+#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
@@ -6207,6 +6340,7 @@ enum {
#define CHV_CURSOR_C_OFFSET 0x700e0
#define IVB_CURSOR_B_OFFSET 0x71080
#define IVB_CURSOR_C_OFFSET 0x72080
+#define TGL_CURSOR_D_OFFSET 0x73080
/* Display A control */
#define _DSPACNTR 0x70180
@@ -7177,11 +7311,17 @@ enum {
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define LGC_PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define LGC_PALETTE_BLUE_MASK REG_GENMASK(7, 0)
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
/* ilk/snb precision palette */
#define _PREC_PALETTE_A 0x4b000
#define _PREC_PALETTE_B 0x4c000
+#define PREC_PALETTE_RED_MASK REG_GENMASK(29, 20)
+#define PREC_PALETTE_GREEN_MASK REG_GENMASK(19, 10)
+#define PREC_PALETTE_BLUE_MASK REG_GENMASK(9, 0)
#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4)
#define _PREC_PIPEAGCMAX 0x4d000
@@ -7217,6 +7357,8 @@ enum {
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
+#define DMC_DEBUG3 _MMIO(0x101090)
+
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
#define MMIO_TIMEOUT_US(us) ((us) << 0)
@@ -7332,6 +7474,9 @@ enum {
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
+#define GEN11_PIPE_PLANE7_FAULT (1 << 22)
+#define GEN11_PIPE_PLANE6_FAULT (1 << 21)
+#define GEN11_PIPE_PLANE5_FAULT (1 << 20)
#define GEN9_PIPE_PLANE4_FAULT (1 << 10)
#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
@@ -7351,6 +7496,11 @@ enum {
GEN9_PIPE_PLANE3_FAULT | \
GEN9_PIPE_PLANE2_FAULT | \
GEN9_PIPE_PLANE1_FAULT)
+#define GEN11_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
+ GEN11_PIPE_PLANE7_FAULT | \
+ GEN11_PIPE_PLANE6_FAULT | \
+ GEN11_PIPE_PLANE5_FAULT)
#define GEN8_DE_PORT_ISR _MMIO(0x44440)
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
@@ -7370,6 +7520,12 @@ enum {
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_USBC6 (1 << 13)
+#define TGL_DE_PORT_AUX_USBC5 (1 << 12)
+#define TGL_DE_PORT_AUX_USBC4 (1 << 11)
+#define TGL_DE_PORT_AUX_USBC3 (1 << 10)
+#define TGL_DE_PORT_AUX_USBC2 (1 << 9)
+#define TGL_DE_PORT_AUX_USBC1 (1 << 8)
#define TGL_DE_PORT_AUX_DDIC (1 << 2)
#define TGL_DE_PORT_AUX_DDIB (1 << 1)
#define TGL_DE_PORT_AUX_DDIA (1 << 0)
@@ -7558,10 +7714,17 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
-#define CHICKEN_TRANS_A _MMIO(0x420c0)
-#define CHICKEN_TRANS_B _MMIO(0x420c4)
-#define CHICKEN_TRANS_C _MMIO(0x420c8)
-#define CHICKEN_TRANS_EDP _MMIO(0x420cc)
+#define _CHICKEN_TRANS_A 0x420c0
+#define _CHICKEN_TRANS_B 0x420c4
+#define _CHICKEN_TRANS_C 0x420c8
+#define _CHICKEN_TRANS_EDP 0x420cc
+#define _CHICKEN_TRANS_D 0x420d8
+#define CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+ [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
+ [TRANSCODER_A] = _CHICKEN_TRANS_A, \
+ [TRANSCODER_B] = _CHICKEN_TRANS_B, \
+ [TRANSCODER_C] = _CHICKEN_TRANS_C, \
+ [TRANSCODER_D] = _CHICKEN_TRANS_D))
#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7594,15 +7757,19 @@ enum {
#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
#define SKL_DFSM _MMIO(0x51000)
-#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
-#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
-#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
-#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
-#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
+#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define ICL_DFSM_DMC_DISABLE (1 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define CNL_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
#define SKL_DSSM _MMIO(0x51004)
#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
@@ -7619,7 +7786,10 @@ enum {
#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
+#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
@@ -7644,6 +7814,7 @@ enum {
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
+ #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE (1 << 9)
#define HIZ_CHICKEN _MMIO(0x7018)
# define CHV_HZ_8X8_MODE_IN_1X (1 << 15)
@@ -7828,29 +7999,24 @@ enum {
SDE_FDI_RXA_CPT)
/* south display engine interrupt: ICP/TGP */
-#define SDE_TC6_HOTPLUG_TGP (1 << 29)
-#define SDE_TC5_HOTPLUG_TGP (1 << 28)
-#define SDE_TC4_HOTPLUG_ICP (1 << 27)
-#define SDE_TC3_HOTPLUG_ICP (1 << 26)
-#define SDE_TC2_HOTPLUG_ICP (1 << 25)
-#define SDE_TC1_HOTPLUG_ICP (1 << 24)
#define SDE_GMBUS_ICP (1 << 23)
-#define SDE_DDIC_HOTPLUG_TGP (1 << 18)
-#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
-#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24))
#define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16))
-#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \
- SDE_DDIA_HOTPLUG_ICP)
-#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \
- SDE_TC3_HOTPLUG_ICP | \
- SDE_TC2_HOTPLUG_ICP | \
- SDE_TC1_HOTPLUG_ICP)
-#define SDE_DDI_MASK_TGP (SDE_DDIC_HOTPLUG_TGP | \
- SDE_DDI_MASK_ICP)
-#define SDE_TC_MASK_TGP (SDE_TC6_HOTPLUG_TGP | \
- SDE_TC5_HOTPLUG_TGP | \
- SDE_TC_MASK_ICP)
+#define SDE_DDI_MASK_ICP (SDE_DDI_HOTPLUG_ICP(PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_A))
+#define SDE_TC_MASK_ICP (SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC1))
+#define SDE_DDI_MASK_TGP (SDE_DDI_HOTPLUG_ICP(PORT_C) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_A))
+#define SDE_TC_MASK_TGP (SDE_TC_HOTPLUG_ICP(PORT_TC6) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC5) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC1))
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
@@ -7917,26 +8083,13 @@ enum {
* SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
*/
-#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
-#define TGP_DDIC_HPD_ENABLE (1 << 11)
-#define TGP_DDIC_HPD_STATUS_MASK (3 << 8)
-#define TGP_DDIC_HPD_NO_DETECT (0 << 8)
-#define TGP_DDIC_HPD_SHORT_DETECT (1 << 8)
-#define TGP_DDIC_HPD_LONG_DETECT (2 << 8)
-#define TGP_DDIC_HPD_SHORT_LONG_DETECT (3 << 8)
-#define ICP_DDIB_HPD_ENABLE (1 << 7)
-#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
-#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
-#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4)
-#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
-#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
-#define ICP_DDIA_HPD_ENABLE (1 << 3)
-#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2)
-#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
-#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
-#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
-#define ICP_DDIA_HPD_LONG_DETECT (2 << 0)
-#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0)
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define SHOTPLUG_CTL_DDI_HPD_ENABLE(port) (0x8 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(port) (0x3 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(port) (0x0 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(port) (0x1 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(port) (0x2 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(port) (0x3 << (4 * (port)))
#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
@@ -8047,14 +8200,15 @@ enum {
#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
-#define ICP_DDI_HPD_ENABLE_MASK (ICP_DDIB_HPD_ENABLE | \
- ICP_DDIA_HPD_ENABLE)
+#define ICP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
#define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \
ICP_TC_HPD_ENABLE(PORT_TC3) | \
ICP_TC_HPD_ENABLE(PORT_TC2) | \
ICP_TC_HPD_ENABLE(PORT_TC1))
-#define TGP_DDI_HPD_ENABLE_MASK (TGP_DDIC_HPD_ENABLE | \
- ICP_DDI_HPD_ENABLE_MASK)
+#define TGP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_C) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
#define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \
ICP_TC_HPD_ENABLE(PORT_TC5) | \
ICP_TC_HPD_ENABLE_MASK)
@@ -8604,6 +8758,10 @@ enum {
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
+#define POWERGATE_ENABLE _MMIO(0xa210)
+#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3)
+#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4)
+
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
@@ -8841,6 +8999,7 @@ enum {
#define GEN9_SAGV_DISABLE 0x0
#define GEN9_SAGV_IS_DISABLED 0x1
#define GEN9_SAGV_ENABLE 0x3
+#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -9104,6 +9263,10 @@ enum {
#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
+#define AUD_FREQ_CNTRL _MMIO(0x65900)
+#define AUD_PIN_BUF_CTL _MMIO(0x48414)
+#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+
/*
* HSW - ICL power wells
*
@@ -9266,12 +9429,20 @@ enum skl_power_gate {
/* HDCP Repeater Registers */
#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_TRANSA_REP_PRESENT BIT(31)
+#define HDCP_TRANSB_REP_PRESENT BIT(30)
+#define HDCP_TRANSC_REP_PRESENT BIT(29)
+#define HDCP_TRANSD_REP_PRESENT BIT(28)
#define HDCP_DDIB_REP_PRESENT BIT(30)
#define HDCP_DDIA_REP_PRESENT BIT(29)
#define HDCP_DDIC_REP_PRESENT BIT(28)
#define HDCP_DDID_REP_PRESENT BIT(27)
#define HDCP_DDIF_REP_PRESENT BIT(26)
#define HDCP_DDIE_REP_PRESENT BIT(25)
+#define HDCP_TRANSA_SHA1_M0 (1 << 20)
+#define HDCP_TRANSB_SHA1_M0 (2 << 20)
+#define HDCP_TRANSC_SHA1_M0 (3 << 20)
+#define HDCP_TRANSD_SHA1_M0 (4 << 20)
#define HDCP_DDIB_SHA1_M0 (1 << 20)
#define HDCP_DDIA_SHA1_M0 (2 << 20)
#define HDCP_DDIC_SHA1_M0 (3 << 20)
@@ -9311,15 +9482,92 @@ enum skl_power_gate {
_PORTE_HDCP_AUTHENC, \
_PORTF_HDCP_AUTHENC) + (x))
#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define _TRANSA_HDCP_CONF 0x66400
+#define _TRANSB_HDCP_CONF 0x66500
+#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
+ _TRANSB_HDCP_CONF)
+#define HDCP_CONF(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_CONF(trans) : \
+ PORT_HDCP_CONF(port))
+
#define HDCP_CONF_CAPTURE_AN BIT(0)
#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define _TRANSA_HDCP_ANINIT 0x66404
+#define _TRANSB_HDCP_ANINIT 0x66504
+#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_ANINIT, \
+ _TRANSB_HDCP_ANINIT)
+#define HDCP_ANINIT(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANINIT(trans) : \
+ PORT_HDCP_ANINIT(port))
+
#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define _TRANSA_HDCP_ANLO 0x66408
+#define _TRANSB_HDCP_ANLO 0x66508
+#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
+ _TRANSB_HDCP_ANLO)
+#define HDCP_ANLO(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANLO(trans) : \
+ PORT_HDCP_ANLO(port))
+
#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define _TRANSA_HDCP_ANHI 0x6640C
+#define _TRANSB_HDCP_ANHI 0x6650C
+#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
+ _TRANSB_HDCP_ANHI)
+#define HDCP_ANHI(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANHI(trans) : \
+ PORT_HDCP_ANHI(port))
+
#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define _TRANSA_HDCP_BKSVLO 0x66410
+#define _TRANSB_HDCP_BKSVLO 0x66510
+#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVLO, \
+ _TRANSB_HDCP_BKSVLO)
+#define HDCP_BKSVLO(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVLO(trans) : \
+ PORT_HDCP_BKSVLO(port))
+
#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define _TRANSA_HDCP_BKSVHI 0x66414
+#define _TRANSB_HDCP_BKSVHI 0x66514
+#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVHI, \
+ _TRANSB_HDCP_BKSVHI)
+#define HDCP_BKSVHI(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVHI(trans) : \
+ PORT_HDCP_BKSVHI(port))
+
#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define _TRANSA_HDCP_RPRIME 0x66418
+#define _TRANSB_HDCP_RPRIME 0x66518
+#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_RPRIME, \
+ _TRANSB_HDCP_RPRIME)
+#define HDCP_RPRIME(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_RPRIME(trans) : \
+ PORT_HDCP_RPRIME(port))
+
#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define _TRANSA_HDCP_STATUS 0x6641C
+#define _TRANSB_HDCP_STATUS 0x6651C
+#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_STATUS, \
+ _TRANSB_HDCP_STATUS)
+#define HDCP_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_STATUS(trans) : \
+ PORT_HDCP_STATUS(port))
+
#define HDCP_STATUS_STREAM_A_ENC BIT(31)
#define HDCP_STATUS_STREAM_B_ENC BIT(30)
#define HDCP_STATUS_STREAM_C_ENC BIT(29)
@@ -9346,23 +9594,44 @@ enum skl_power_gate {
_PORTD_HDCP2_BASE, \
_PORTE_HDCP2_BASE, \
_PORTF_HDCP2_BASE) + (x))
-
-#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98)
+#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
+#define _TRANSA_HDCP2_AUTH 0x66498
+#define _TRANSB_HDCP2_AUTH 0x66598
+#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
+ _TRANSB_HDCP2_AUTH)
#define AUTH_LINK_AUTHENTICATED BIT(31)
#define AUTH_LINK_TYPE BIT(30)
#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
#define AUTH_CLR_KEYS BIT(18)
-
-#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define HDCP2_AUTH(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH(trans) : \
+ PORT_HDCP2_AUTH(port))
+
+#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define _TRANSA_HDCP2_CTL 0x664B0
+#define _TRANSB_HDCP2_CTL 0x665B0
+#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
+ _TRANSB_HDCP2_CTL)
#define CTL_LINK_ENCRYPTION_REQ BIT(31)
-
-#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4)
-#define STREAM_ENCRYPTION_STATUS_A BIT(31)
-#define STREAM_ENCRYPTION_STATUS_B BIT(30)
-#define STREAM_ENCRYPTION_STATUS_C BIT(29)
+#define HDCP2_CTL(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_CTL(trans) : \
+ PORT_HDCP2_CTL(port))
+
+#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define _TRANSA_HDCP2_STATUS 0x664B4
+#define _TRANSB_HDCP2_STATUS 0x665B4
+#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STATUS, \
+ _TRANSB_HDCP2_STATUS)
#define LINK_TYPE_STATUS BIT(22)
#define LINK_AUTH_STATUS BIT(21)
#define LINK_ENCRYPTION_STATUS BIT(20)
+#define HDCP2_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STATUS(trans) : \
+ PORT_HDCP2_STATUS(port))
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
@@ -9402,6 +9671,9 @@ enum skl_power_gate {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
+#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
+ REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
@@ -9429,7 +9701,9 @@ enum skl_power_gate {
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
+#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
+#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A)
#define DP_TP_CTL_ENABLE (1 << 31)
#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
@@ -9449,7 +9723,9 @@ enum skl_power_gate {
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
+#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A)
#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
@@ -9604,17 +9880,7 @@ enum skl_power_gate {
#define _TRANSC_MSA_MISC 0x62410
#define _TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
-
-#define TRANS_MSA_SYNC_CLK (1 << 0)
-#define TRANS_MSA_SAMPLING_444 (2 << 1)
-#define TRANS_MSA_CLRSP_YCBCR (2 << 3)
-#define TRANS_MSA_6_BPC (0 << 5)
-#define TRANS_MSA_8_BPC (1 << 5)
-#define TRANS_MSA_10_BPC (2 << 5)
-#define TRANS_MSA_12_BPC (3 << 5)
-#define TRANS_MSA_16_BPC (4 << 5)
-#define TRANS_MSA_CEA_RANGE (1 << 3)
-#define TRANS_MSA_USE_VSC_SDP (1 << 14)
+/* See DP_MSA_MISC_* for the bit definitions */
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
@@ -9655,7 +9921,10 @@ enum skl_power_gate {
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
+#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
+#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
@@ -9976,6 +10245,166 @@ enum skl_power_gate {
_TGL_DPLL1_CFGCR1, \
_TGL_TBTPLL_CFGCR1)
+#define _DKL_PHY1_BASE 0x168000
+#define _DKL_PHY2_BASE 0x169000
+#define _DKL_PHY3_BASE 0x16A000
+#define _DKL_PHY4_BASE 0x16B000
+#define _DKL_PHY5_BASE 0x16C000
+#define _DKL_PHY6_BASE 0x16D000
+
+/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */
+#define _DKL_PLL_DIV0 0x200
+#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16)
+#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16)
+#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12)
+#define DKL_PLL_DIV0_PROP_COEFF_MASK (0xF << 12)
+#define DKL_PLL_DIV0_FBPREDIV_SHIFT (8)
+#define DKL_PLL_DIV0_FBPREDIV(x) ((x) << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
+#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0)
+#define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_DIV0)
+
+#define _DKL_PLL_DIV1 0x204
+#define DKL_PLL_DIV1_IREF_TRIM(x) ((x) << 16)
+#define DKL_PLL_DIV1_IREF_TRIM_MASK (0x1F << 16)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT(x) ((x) << 0)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT_MASK (0xFF << 0)
+#define DKL_PLL_DIV1(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_DIV1)
+
+#define _DKL_PLL_SSC 0x210
+#define DKL_PLL_SSC_IREF_NDIV_RATIO(x) ((x) << 29)
+#define DKL_PLL_SSC_IREF_NDIV_RATIO_MASK (0x7 << 29)
+#define DKL_PLL_SSC_STEP_LEN(x) ((x) << 16)
+#define DKL_PLL_SSC_STEP_LEN_MASK (0xFF << 16)
+#define DKL_PLL_SSC_STEP_NUM(x) ((x) << 11)
+#define DKL_PLL_SSC_STEP_NUM_MASK (0x7 << 11)
+#define DKL_PLL_SSC_EN (1 << 9)
+#define DKL_PLL_SSC(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_SSC)
+
+#define _DKL_PLL_BIAS 0x214
+#define DKL_PLL_BIAS_FRAC_EN_H (1 << 30)
+#define DKL_PLL_BIAS_FBDIV_SHIFT (8)
+#define DKL_PLL_BIAS_FBDIV_FRAC(x) ((x) << DKL_PLL_BIAS_FBDIV_SHIFT)
+#define DKL_PLL_BIAS_FBDIV_FRAC_MASK (0x3FFFFF << DKL_PLL_BIAS_FBDIV_SHIFT)
+#define DKL_PLL_BIAS(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_BIAS)
+
+#define _DKL_PLL_TDC_COLDST_BIAS 0x218
+#define DKL_PLL_TDC_SSC_STEP_SIZE(x) ((x) << 8)
+#define DKL_PLL_TDC_SSC_STEP_SIZE_MASK (0xFF << 8)
+#define DKL_PLL_TDC_FEED_FWD_GAIN(x) ((x) << 0)
+#define DKL_PLL_TDC_FEED_FWD_GAIN_MASK (0xFF << 0)
+#define DKL_PLL_TDC_COLDST_BIAS(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_TDC_COLDST_BIAS)
+
+#define _DKL_REFCLKIN_CTL 0x12C
+/* Bits are the same as MG_REFCLKIN_CTL */
+#define DKL_REFCLKIN_CTL(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_REFCLKIN_CTL)
+
+#define _DKL_CLKTOP2_HSCLKCTL 0xD4
+/* Bits are the same as MG_CLKTOP2_HSCLKCTL */
+#define DKL_CLKTOP2_HSCLKCTL(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CLKTOP2_HSCLKCTL)
+
+#define _DKL_CLKTOP2_CORECLKCTL1 0xD8
+/* Bits are the same as MG_CLKTOP2_CORECLKCTL1 */
+#define DKL_CLKTOP2_CORECLKCTL1(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CLKTOP2_CORECLKCTL1)
+
+#define _DKL_TX_DPCNTL0 0x2C0
+#define DKL_TX_PRESHOOT_COEFF(x) ((x) << 13)
+#define DKL_TX_PRESHOOT_COEFF_MASK (0x1f << 13)
+#define DKL_TX_DE_EMPHASIS_COEFF(x) ((x) << 8)
+#define DKL_TX_DE_EMPAHSIS_COEFF_MASK (0x1f << 8)
+#define DKL_TX_VSWING_CONTROL(x) ((x) << 0)
+#define DKL_TX_VSWING_CONTROL_MASK (0x7 << 0)
+#define DKL_TX_DPCNTL0(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL0)
+
+#define _DKL_TX_DPCNTL1 0x2C4
+/* Bits are the same as DKL_TX_DPCNTRL0 */
+#define DKL_TX_DPCNTL1(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL1)
+
+#define _DKL_TX_DPCNTL2 0x2C8
+#define DKL_TX_DP20BITMODE (1 << 2)
+#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL2)
+
+#define _DKL_TX_FW_CALIB 0x2F8
+#define DKL_TX_CFG_DISABLE_WAIT_INIT (1 << 7)
+#define DKL_TX_FW_CALIB(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_FW_CALIB)
+
+#define _DKL_TX_PMD_LANE_SUS 0xD00
+#define DKL_TX_PMD_LANE_SUS(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_PMD_LANE_SUS)
+
+#define _DKL_TX_DW17 0xDC4
+#define DKL_TX_DW17(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DW17)
+
+#define _DKL_TX_DW18 0xDC8
+#define DKL_TX_DW18(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DW18)
+
+#define _DKL_DP_MODE 0xA0
+#define DKL_DP_MODE(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_DP_MODE)
+
+#define _DKL_CMN_UC_DW27 0x36C
+#define DKL_CMN_UC_DW27_UC_HEALTH (0x1 << 15)
+#define DKL_CMN_UC_DW_27(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CMN_UC_DW27)
+
+/*
+ * Each Dekel PHY is addressed through a 4KB aperture. Each PHY has more than
+ * 4KB of register space, so a separate index is programmed in HIP_INDEX_REG0
+ * or HIP_INDEX_REG1, based on the port number, to set the upper 2 address
+ * bits that point the 4KB window into the full PHY register space.
+ */
+#define _HIP_INDEX_REG0 0x1010A0
+#define _HIP_INDEX_REG1 0x1010A4
+#define HIP_INDEX_REG(tc_port) _MMIO((tc_port) < 4 ? _HIP_INDEX_REG0 \
+ : _HIP_INDEX_REG1)
+#define _HIP_INDEX_SHIFT(tc_port) (8 * ((tc_port) % 4))
+#define HIP_INDEX_VAL(tc_port, val) ((val) << _HIP_INDEX_SHIFT(tc_port))
+
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@@ -9990,6 +10419,8 @@ enum skl_power_gate {
/* GEN9 DC */
#define DC_STATE_EN _MMIO(0x45504)
#define DC_STATE_DISABLE 0
+#define DC_STATE_EN_DC3CO REG_BIT(30)
+#define DC_STATE_DC3CO_STATUS REG_BIT(29)
#define DC_STATE_EN_UPTO_DC5 (1 << 0)
#define DC_STATE_EN_DC9 (1 << 3)
#define DC_STATE_EN_UPTO_DC6 (2 << 0)
@@ -10118,11 +10549,11 @@ enum skl_power_gate {
#define _PIPE_A_CSC_COEFF_BV 0x49024
#define _PIPE_A_CSC_MODE 0x49028
-#define ICL_CSC_ENABLE (1 << 31)
-#define ICL_OUTPUT_CSC_ENABLE (1 << 30)
-#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
-#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
-#define CSC_MODE_YUV_TO_RGB (1 << 0)
+#define ICL_CSC_ENABLE (1 << 31) /* icl+ */
+#define ICL_OUTPUT_CSC_ENABLE (1 << 30) /* icl+ */
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2) /* ilk/snb */
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1) /* pre-glk */
+#define CSC_MODE_YUV_TO_RGB (1 << 0) /* ilk/snb */
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
@@ -10238,6 +10669,9 @@ enum skl_power_gate {
#define _PAL_PREC_GC_MAX_A 0x4A410
#define _PAL_PREC_GC_MAX_B 0x4AC10
#define _PAL_PREC_GC_MAX_C 0x4B410
+#define PREC_PAL_DATA_RED_MASK REG_GENMASK(29, 20)
+#define PREC_PAL_DATA_GREEN_MASK REG_GENMASK(19, 10)
+#define PREC_PAL_DATA_BLUE_MASK REG_GENMASK(9, 0)
#define _PAL_PREC_EXT_GC_MAX_A 0x4A420
#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20
#define _PAL_PREC_EXT_GC_MAX_C 0x4B420
@@ -10290,6 +10724,9 @@ enum skl_power_gate {
#define CGM_PIPE_MODE_GAMMA (1 << 2)
#define CGM_PIPE_MODE_CSC (1 << 1)
#define CGM_PIPE_MODE_DEGAMMA (1 << 0)
+#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0)
+#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16)
+#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0)
#define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900)
#define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904)
@@ -11537,16 +11974,31 @@ enum skl_power_gate {
#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
#define MODULAR_FIA_MASK (1 << 4)
-#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
-#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
-#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
-#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
-#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
+#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
+#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
+#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
+#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
+#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
-#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
+#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
-#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
+#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
+#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
+#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
+#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+
+/* This register controls the Display State Buffer (DSB) engines. */
+#define _DSBSL_INSTANCE_BASE 0x70B00
+#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
+ (pipe) * 0x1000 + (id) * 100)
+#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
+#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
+#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
+#define DSB_ENABLE (1 << 31)
+#define DSB_STATUS (1 << 0)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index bc828a9ace84..bbd71af00a91 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -31,6 +31,8 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
+#include "gt/intel_rps.h"
#include "i915_active.h"
#include "i915_drv.h"
@@ -169,16 +171,17 @@ remove_from_client(struct i915_request *request)
{
struct drm_i915_file_private *file_priv;
- file_priv = READ_ONCE(request->file_priv);
- if (!file_priv)
+ if (!READ_ONCE(request->file_priv))
return;
- spin_lock(&file_priv->mm.lock);
- if (request->file_priv) {
+ rcu_read_lock();
+ file_priv = xchg(&request->file_priv, NULL);
+ if (file_priv) {
+ spin_lock(&file_priv->mm.lock);
list_del(&request->client_link);
- request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
}
- spin_unlock(&file_priv->mm.lock);
+ rcu_read_unlock();
}
static void free_capture_list(struct i915_request *request)
@@ -205,21 +208,18 @@ static void remove_from_engine(struct i915_request *rq)
* check that the rq still belongs to the newly locked engine.
*/
locked = READ_ONCE(rq->engine);
- spin_lock(&locked->active.lock);
+ spin_lock_irq(&locked->active.lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
spin_unlock(&locked->active.lock);
spin_lock(&engine->active.lock);
locked = engine;
}
list_del(&rq->sched.link);
- spin_unlock(&locked->active.lock);
+ spin_unlock_irq(&locked->active.lock);
}
-static bool i915_request_retire(struct i915_request *rq)
+bool i915_request_retire(struct i915_request *rq)
{
- struct i915_active_request *active, *next;
-
- lockdep_assert_held(&rq->timeline->mutex);
if (!i915_request_completed(rq))
return false;
@@ -240,41 +240,11 @@ static bool i915_request_retire(struct i915_request *rq)
* Note this requires that we are always called in request
* completion order.
*/
- GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
+ GEM_BUG_ON(!list_is_first(&rq->link,
+ &i915_request_timeline(rq)->requests));
rq->ring->head = rq->postfix;
/*
- * Walk through the active list, calling retire on each. This allows
- * objects to track their GPU activity and mark themselves as idle
- * when their *last* active request is completed (updating state
- * tracking lists for eviction, active references for GEM, etc).
- *
- * As the ->retire() may free the node, we decouple it first and
- * pass along the auxiliary information (to avoid dereferencing
- * the node after the callback).
- */
- list_for_each_entry_safe(active, next, &rq->active_list, link) {
- /*
- * In microbenchmarks or focusing upon time inside the kernel,
- * we may spend an inordinate amount of time simply handling
- * the retirement of requests and processing their callbacks.
- * Of which, this loop itself is particularly hot due to the
- * cache misses when jumping around the list of
- * i915_active_request. So we try to keep this loop as
- * streamlined as possible and also prefetch the next
- * i915_active_request to try and hide the likely cache miss.
- */
- prefetchw(next);
-
- INIT_LIST_HEAD(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, rq);
- }
-
- local_irq_disable();
-
- /*
* We only loosely track inflight requests across preemption,
* and so we may find ourselves attempting to retire a _completed_
* request that we have removed from the HW and put back on a run
@@ -282,24 +252,22 @@ static bool i915_request_retire(struct i915_request *rq)
*/
remove_from_engine(rq);
- spin_lock(&rq->lock);
+ spin_lock_irq(&rq->lock);
i915_request_mark_complete(rq);
if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
i915_request_cancel_breadcrumb(rq);
if (i915_request_has_waitboost(rq)) {
- GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
- atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+ GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+ atomic_dec(&rq->engine->gt->rps.num_waiters);
}
if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
__notify_execute_cb(rq);
}
GEM_BUG_ON(!list_empty(&rq->execute_cb));
- spin_unlock(&rq->lock);
-
- local_irq_enable();
+ spin_unlock_irq(&rq->lock);
remove_from_client(rq);
list_del(&rq->link);
@@ -316,7 +284,7 @@ static bool i915_request_retire(struct i915_request *rq)
void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_timeline * const tl = rq->timeline;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *tmp;
GEM_TRACE("%s fence %llx:%lld, current %d\n",
@@ -324,7 +292,6 @@ void i915_request_retire_upto(struct i915_request *rq)
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq));
- lockdep_assert_held(&tl->mutex);
GEM_BUG_ON(!i915_request_completed(rq));
do {
@@ -680,9 +647,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->gem_context = ce->gem_context;
rq->engine = ce->engine;
rq->ring = ce->ring;
- rq->timeline = tl;
+ rq->execution_mask = ce->engine->mask;
+
+ rcu_assign_pointer(rq->timeline, tl);
rq->hwsp_seqno = tl->hwsp_seqno;
rq->hwsp_cacheline = tl->hwsp_cacheline;
+
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
spin_lock_init(&rq->lock);
@@ -700,9 +670,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->batch = NULL;
rq->capture_list = NULL;
rq->flags = 0;
- rq->execution_mask = ALL_ENGINES;
- INIT_LIST_HEAD(&rq->active_list);
INIT_LIST_HEAD(&rq->execute_cb);
/*
@@ -741,7 +709,6 @@ err_unwind:
ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
- GEM_BUG_ON(!list_empty(&rq->active_list));
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
@@ -786,16 +753,43 @@ err_unlock:
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
- if (list_is_first(&signal->link, &signal->timeline->requests))
- return 0;
+ struct intel_timeline *tl;
+ struct dma_fence *fence;
+ int err;
- signal = list_prev_entry(signal, link);
- if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
+ GEM_BUG_ON(i915_request_timeline(rq) ==
+ rcu_access_pointer(signal->timeline));
+
+ rcu_read_lock();
+ tl = rcu_dereference(signal->timeline);
+ if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+ if (!tl) /* already started or maybe even completed */
return 0;
- return i915_sw_fence_await_dma_fence(&rq->submit,
- &signal->fence, 0,
- I915_FENCE_GFP);
+ fence = ERR_PTR(-EBUSY);
+ if (mutex_trylock(&tl->mutex)) {
+ fence = NULL;
+ if (!i915_request_started(signal) &&
+ !list_is_first(&signal->link, &tl->requests)) {
+ signal = list_prev_entry(signal, link);
+ fence = dma_fence_get(&signal->fence);
+ }
+ mutex_unlock(&tl->mutex);
+ }
+ intel_timeline_put(tl);
+ if (IS_ERR_OR_NULL(fence))
+ return PTR_ERR_OR_ZERO(fence);
+
+ err = 0;
+ if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+ err = i915_sw_fence_await_dma_fence(&rq->submit,
+ fence, 0,
+ I915_FENCE_GFP);
+ dma_fence_put(fence);
+
+ return err;
}
static intel_engine_mask_t
@@ -821,34 +815,33 @@ emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
+ const int has_token = INTEL_GEN(to->i915) >= 12;
u32 hwsp_offset;
+ int len;
u32 *cs;
- int err;
- GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
/* Just emit the first semaphore we see as request space is limited. */
if (already_busywaiting(to) & from->engine->mask)
- return i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
+ goto await_fence;
- err = i915_request_await_start(to, from);
- if (err < 0)
- return err;
+ if (i915_request_await_start(to, from) < 0)
+ goto await_fence;
/* Only submit our spinner after the signaler is running! */
- err = __i915_request_await_execution(to, from, NULL, gfp);
- if (err)
- return err;
+ if (__i915_request_await_execution(to, from, NULL, gfp))
+ goto await_fence;
/* We need to pin the signaler's HWSP until we are finished reading. */
- err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
- if (err)
- return err;
+ if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
+ goto await_fence;
+
+ len = 4;
+ if (has_token)
+ len += 2;
- cs = intel_ring_begin(to, 4);
+ cs = intel_ring_begin(to, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -860,18 +853,28 @@ emit_semaphore_wait(struct i915_request *to,
* (post-wrap) values than they were expecting (and so wait
* forever).
*/
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD) +
+ has_token;
*cs++ = from->fence.seqno;
*cs++ = hwsp_offset;
*cs++ = 0;
+ if (has_token) {
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ }
intel_ring_advance(to, cs);
to->sched.semaphores |= from->engine->mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
+
+await_fence:
+ return i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
}
static int
@@ -955,21 +958,23 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
/* Squash repeated waits to the same timelines */
if (fence->context &&
- intel_timeline_sync_is_later(rq->timeline, fence))
+ intel_timeline_sync_is_later(i915_request_timeline(rq),
+ fence))
continue;
if (dma_fence_is_i915(fence))
ret = i915_request_await_request(rq, to_request(fence));
else
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- I915_FENCE_TIMEOUT,
+ fence->context ? I915_FENCE_TIMEOUT : 0,
I915_FENCE_GFP);
if (ret < 0)
return ret;
/* Record the latest fence used against each timeline */
if (fence->context)
- intel_timeline_sync_set(rq->timeline, fence);
+ intel_timeline_sync_set(i915_request_timeline(rq),
+ fence);
} while (--nchild);
return 0;
@@ -1111,7 +1116,7 @@ void i915_request_skip(struct i915_request *rq, int error)
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
- struct intel_timeline *timeline = rq->timeline;
+ struct intel_timeline *timeline = i915_request_timeline(rq);
struct i915_request *prev;
/*
@@ -1134,8 +1139,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
*/
- prev = rcu_dereference_protected(timeline->last_request.request,
- lockdep_is_held(&timeline->mutex));
+ prev = to_request(__i915_active_fence_set(&timeline->last_request,
+ &rq->fence));
if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
@@ -1160,7 +1165,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
- __i915_active_request_set(&timeline->last_request, rq);
return prev;
}
@@ -1224,7 +1228,7 @@ void __i915_request_queue(struct i915_request *rq,
void i915_request_add(struct i915_request *rq)
{
struct i915_sched_attr attr = rq->gem_context->sched;
- struct intel_timeline * const tl = rq->timeline;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *prev;
lockdep_assert_held(&tl->mutex);
@@ -1279,7 +1283,9 @@ void i915_request_add(struct i915_request *rq)
* work on behalf of others -- but instead we should benefit from
* improved resource management. (Well, that's the theory at least.)
*/
- if (prev && i915_request_completed(prev) && prev->timeline == tl)
+ if (prev &&
+ i915_request_completed(prev) &&
+ rcu_access_pointer(prev->timeline) == tl)
i915_request_retire_upto(prev);
mutex_unlock(&tl->mutex);
@@ -1442,7 +1448,7 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (CONFIG_DRM_I915_SPIN_REQUEST &&
+ if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
__i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
dma_fence_signal(&rq->fence);
goto out;
@@ -1462,7 +1468,7 @@ long i915_request_wait(struct i915_request *rq,
*/
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq);
+ intel_rps_boost(rq);
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
}
@@ -1488,6 +1494,7 @@ long i915_request_wait(struct i915_request *rq,
break;
}
+ intel_engine_flush_submission(rq->engine);
timeout = io_schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
@@ -1500,48 +1507,6 @@ out:
return timeout;
}
-bool i915_retire_requests(struct drm_i915_private *i915)
-{
- struct intel_gt_timelines *timelines = &i915->gt.timelines;
- struct intel_timeline *tl, *tn;
- unsigned long flags;
- LIST_HEAD(free);
-
- spin_lock_irqsave(&timelines->lock, flags);
- list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
- if (!mutex_trylock(&tl->mutex))
- continue;
-
- intel_timeline_get(tl);
- GEM_BUG_ON(!tl->active_count);
- tl->active_count++; /* pin the list element */
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- retire_requests(tl);
-
- spin_lock_irqsave(&timelines->lock, flags);
-
- /* Resume iteration after dropping lock */
- list_safe_reset_next(tl, tn, link);
- if (!--tl->active_count)
- list_del(&tl->link);
-
- mutex_unlock(&tl->mutex);
-
- /* Defer the final release to after the spinlock */
- if (refcount_dec_and_test(&tl->kref.refcount)) {
- GEM_BUG_ON(tl->active_count);
- list_add(&tl->link, &free);
- }
- }
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- list_for_each_entry_safe(tl, tn, &free, link)
- __intel_timeline_free(&tl->kref);
-
- return !list_empty(&timelines->active_list);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index e4dd013761e8..96991d64759c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -113,7 +113,7 @@ struct i915_request {
struct intel_engine_cs *engine;
struct intel_context *hw_context;
struct intel_ring *ring;
- struct intel_timeline *timeline;
+ struct intel_timeline __rcu *timeline;
struct list_head signal_link;
/*
@@ -211,14 +211,14 @@ struct i915_request {
* on the active_list (of their final request).
*/
struct i915_capture_list *capture_list;
- struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
unsigned long flags;
-#define I915_REQUEST_WAITBOOST BIT(0)
-#define I915_REQUEST_NOPREEMPT BIT(1)
+#define I915_REQUEST_WAITBOOST BIT(0)
+#define I915_REQUEST_NOPREEMPT BIT(1)
+#define I915_REQUEST_SENTINEL BIT(2)
/** timeline->request entry for this request */
struct list_head link;
@@ -251,6 +251,7 @@ struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
+bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq);
static inline struct i915_request *
@@ -309,10 +310,8 @@ long i915_request_wait(struct i915_request *rq,
long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
-#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
-#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
+#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
+#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline bool i915_request_signaled(const struct i915_request *rq)
{
@@ -442,6 +441,29 @@ static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
}
-bool i915_retire_requests(struct drm_i915_private *i915);
+static inline bool i915_request_has_sentinel(const struct i915_request *rq)
+{
+ return unlikely(rq->flags & I915_REQUEST_SENTINEL);
+}
+
+static inline struct intel_timeline *
+i915_request_timeline(struct i915_request *rq)
+{
+ /* Valid only while the request is being constructed (or retired). */
+ return rcu_dereference_protected(rq->timeline,
+ lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
+}
+
+static inline struct intel_timeline *
+i915_request_active_timeline(struct i915_request *rq)
+{
+ /*
+ * When in use during submission, we are protected by a guarantee that
+ * the context/timeline is pinned and must remain pinned until after
+ * this submission.
+ */
+ return rcu_dereference_protected(rq->timeline,
+ lockdep_is_held(&rq->engine->active.lock));
+}
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index 6617963df9ed..b7b59328cb76 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -67,15 +67,15 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
}
/**
- * __for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
- * @__dmap: DMA address (output)
+ * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table
+ * @__dp: Device address (output)
* @__iter: 'struct sgt_iter' (iterator state, internal)
* @__sgt: sg_table to iterate over (input)
* @__step: step size
*/
-#define __for_each_sgt_dma(__dmap, __iter, __sgt, __step) \
+#define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
- ((__dmap) = (__iter).dma + (__iter).curr); \
+ ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \
(((__iter).curr += (__step)) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 3eba8a2b39c2..010d67f48ad9 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -211,10 +211,7 @@ static void kick_submission(struct intel_engine_cs *engine,
/*
* If we are already the currently executing context, don't
- * bother evaluating if we should preempt ourselves, or if
- * we expect nothing to change as a result of running the
- * tasklet, i.e. we have not change the priority queue
- * sufficiently to oust the running context.
+ * bother evaluating if we should preempt ourselves.
*/
if (inflight->hw_context == rq->hw_context)
goto unlock;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 7eefccff39bf..07d243acf553 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -52,22 +52,4 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
-static inline bool i915_scheduler_need_preempt(int prio, int active)
-{
- /*
- * Allow preemption of low -> normal -> high, but we do
- * not allow low priority tasks to preempt other low priority
- * tasks under the impression that latency for low priority
- * tasks does not matter (as much as background throughput),
- * so kiss.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- return prio > max(I915_PRIORITY_NORMAL - 1, active);
-}
-
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index aad81acba9dc..d18e70550054 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -49,6 +49,15 @@ struct i915_sched_attr {
* DAG of each request, we are able to insert it into a sorted queue when it
* is ready, and are able to reorder its portion of the graph to accommodate
* dynamic priority changes.
+ *
+ * Ok, there is now one active element to the "scheduler" in the backends.
+ * We let a new context run for a small amount of time before re-evaluating
+ * the run order. As we re-evaluate, we maintain the strict ordering of
+ * dependencies, but attempt to rotate the active contexts (the current context
+ * is put to the back of its priority queue, then reshuffling its dependents).
+ * This provides minimal timeslicing and prevents a userspace hog (e.g.
+ * something waiting on a user semaphore [VkEvent]) from denying service to
+ * others.
*/
struct i915_sched_node {
struct list_head signalers_list; /* those before us, we depend upon */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8508a01ad8b9..8812cdd9007f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -28,6 +28,7 @@
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
+#include "display/intel_vga.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -57,7 +58,7 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev_priv);
+ intel_vga_redisable(dev_priv);
}
int i915_save_state(struct drm_i915_private *dev_priv)
@@ -65,8 +66,6 @@ int i915_save_state(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
i915_save_display(dev_priv);
if (IS_GEN(dev_priv, 4))
@@ -100,8 +99,6 @@ int i915_save_state(struct drm_i915_private *dev_priv)
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
return 0;
}
@@ -110,8 +107,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
if (IS_GEN(dev_priv, 4))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
@@ -145,8 +140,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
intel_gmbus_reset(dev_priv);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
new file mode 100644
index 000000000000..39c79e1c5b52
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/vga_switcheroo.h>
+
+#include "i915_drv.h"
+#include "i915_switcheroo.h"
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
+{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+ if (!i915) {
+ dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
+ return;
+ }
+
+ if (state == VGA_SWITCHEROO_ON) {
+ pr_info("switched on\n");
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(pdev, PCI_D0);
+ i915_resume_switcheroo(i915);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
+ } else {
+ pr_info("switched off\n");
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend_switcheroo(i915, pmm);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
+
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return i915 && i915->drm.open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
+
+int i915_switcheroo_register(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ return vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
+}
+
+void i915_switcheroo_unregister(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ vga_switcheroo_unregister_client(pdev);
+}
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.h b/drivers/gpu/drm/i915/i915_switcheroo.h
new file mode 100644
index 000000000000..59b6c1e07d75
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_switcheroo.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SWITCHEROO__
+#define __I915_SWITCHEROO__
+
+struct drm_i915_private;
+
+int i915_switcheroo_register(struct drm_i915_private *i915);
+void i915_switcheroo_unregister(struct drm_i915_private *i915);
+
+#endif /* __I915_SWITCHEROO__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d8a3b180c084..65476909d1bf 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -30,6 +30,9 @@
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_sysfs.h"
#include "intel_pm.h"
@@ -49,7 +52,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
u64 res = 0;
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- res = intel_rc6_residency_us(dev_priv, reg);
+ res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
@@ -142,12 +145,12 @@ static const struct attribute_group media_rc6_attr_group = {
};
#endif
-static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
{
- if (!HAS_L3_DPF(dev_priv))
+ if (!HAS_L3_DPF(i915))
return -EPERM;
- if (offset % 4 != 0)
+ if (!IS_ALIGNED(offset, sizeof(u32)))
return -EINVAL;
if (offset >= GEN7_L3LOG_SIZE)
@@ -162,31 +165,24 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private;
int ret;
- count = round_down(count, 4);
-
- ret = l3_access_valid(dev_priv, offset);
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
+ count = round_down(count, sizeof(u32));
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+ memset(buf, 0, count);
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- if (dev_priv->l3_parity.remap_info[slice])
+ spin_lock(&i915->gem.contexts.lock);
+ if (i915->l3_parity.remap_info[slice])
memcpy(buf,
- dev_priv->l3_parity.remap_info[slice] + (offset/4),
+ i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
count);
- else
- memset(buf, 0, count);
-
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock(&i915->gem.contexts.lock);
return count;
}
@@ -197,46 +193,49 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_gem_context *ctx;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private;
- u32 **remap_info;
+ u32 *remap_info, *freeme = NULL;
+ struct i915_gem_context *ctx;
int ret;
- ret = l3_access_valid(dev_priv, offset);
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ if (count < sizeof(u32))
+ return -EINVAL;
- remap_info = &dev_priv->l3_parity.remap_info[slice];
- if (!*remap_info) {
- *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
- if (!*remap_info) {
- ret = -ENOMEM;
- goto out;
- }
+ remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+ if (!remap_info)
+ return -ENOMEM;
+
+ spin_lock(&i915->gem.contexts.lock);
+
+ if (i915->l3_parity.remap_info[slice]) {
+ freeme = remap_info;
+ remap_info = i915->l3_parity.remap_info[slice];
+ } else {
+ i915->l3_parity.remap_info[slice] = remap_info;
}
- /* TODO: Ideally we really want a GPU reset here to make sure errors
- * aren't propagated. Since I cannot find a stable way to reset the GPU
- * at this point it is left as a TODO.
- */
- memcpy(*remap_info + (offset/4), buf, count);
+ count = round_down(count, sizeof(u32));
+ memcpy(remap_info + offset / sizeof(u32), buf, count);
/* NB: We defer the remapping until we switch to the context */
- list_for_each_entry(ctx, &dev_priv->contexts.list, link)
- ctx->remap_slice |= (1<<slice);
+ list_for_each_entry(ctx, &i915->gem.contexts.list, link)
+ ctx->remap_slice |= BIT(slice);
- ret = count;
+ spin_unlock(&i915->gem.contexts.lock);
+ kfree(freeme);
-out:
- mutex_unlock(&dev->struct_mutex);
+ /*
+ * TODO: Ideally we really want a GPU reset here to make sure errors
+ * aren't propagated. Since I cannot find a stable way to reset the GPU
+ * at this point it is left as a TODO.
+ */
- return ret;
+ return count;
}
static const struct bin_attribute dpf_attrs = {
@@ -261,6 +260,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
intel_wakeref_t wakeref;
u32 freq;
@@ -273,31 +273,31 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
freq = (freq >> 8) & 0xff;
} else {
- freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
+ freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1));
}
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
+ return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(rps, freq));
}
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
}
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.boost_freq));
+ intel_gpu_freq(rps, rps->boost_freq));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -305,7 +305,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
bool boost = false;
ssize_t ret;
u32 val;
@@ -315,7 +315,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
return ret;
/* Validate against (static) hardware limits */
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
@@ -335,19 +335,19 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.max_freq_softlimit));
+ intel_gpu_freq(rps, rps->max_freq_softlimit));
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -355,19 +355,17 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_wakeref_t wakeref;
- u32 val;
+ struct intel_rps *rps = &dev_priv->gt.rps;
ssize_t ret;
+ u32 val;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
@@ -377,7 +375,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (val > rps->rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
- intel_gpu_freq(dev_priv, val));
+ intel_gpu_freq(rps, val));
rps->max_freq_softlimit = val;
@@ -385,14 +383,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- /* We still need *_set_rps to process the new max_delay and
+ /*
+ * We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- ret = intel_set_rps(dev_priv, val);
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -400,10 +399,10 @@ unlock:
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.min_freq_softlimit));
+ intel_gpu_freq(rps, rps->min_freq_softlimit));
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -411,19 +410,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_wakeref_t wakeref;
- u32 val;
+ struct intel_rps *rps = &dev_priv->gt.rps;
ssize_t ret;
+ u32 val;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
@@ -437,14 +434,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- /* We still need *_set_rps to process the new min_delay and
+ /*
+ * We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- ret = intel_set_rps(dev_priv, val);
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -466,15 +464,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->rp0_freq);
+ val = intel_gpu_freq(rps, rps->rp0_freq);
else if (attr == &dev_attr_gt_RP1_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->rp1_freq);
+ val = intel_gpu_freq(rps, rps->rp1_freq);
else if (attr == &dev_attr_gt_RPn_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->min_freq);
+ val = intel_gpu_freq(rps, rps->min_freq);
else
BUG();
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 24f2944da09d..7ef7a1e1664c 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -665,7 +665,6 @@ TRACE_EVENT(i915_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -675,7 +674,6 @@ TRACE_EVENT(i915_request_queue,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -683,10 +681,9 @@ TRACE_EVENT(i915_request_queue,
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->flags)
+ __entry->ctx, __entry->seqno, __entry->flags)
);
DECLARE_EVENT_CLASS(i915_request,
@@ -695,7 +692,6 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -704,16 +700,15 @@ DECLARE_EVENT_CLASS(i915_request,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno)
+ __entry->ctx, __entry->seqno)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -738,7 +733,6 @@ TRACE_EVENT(i915_request_in,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -749,7 +743,6 @@ TRACE_EVENT(i915_request_in,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -758,9 +751,9 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->ctx, __entry->seqno,
__entry->prio, __entry->port)
);
@@ -770,7 +763,6 @@ TRACE_EVENT(i915_request_out,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -780,7 +772,6 @@ TRACE_EVENT(i915_request_out,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -788,10 +779,9 @@ TRACE_EVENT(i915_request_out,
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->completed)
+ __entry->ctx, __entry->seqno, __entry->completed)
);
#else
@@ -829,7 +819,6 @@ TRACE_EVENT(i915_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -845,7 +834,6 @@ TRACE_EVENT(i915_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -853,9 +841,9 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->ctx, __entry->seqno,
__entry->flags)
);
@@ -958,19 +946,17 @@ DECLARE_EVENT_CLASS(i915_context,
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_gem_context *, ctx)
- __field(u32, hw_id)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
- __entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->vm;
+ __entry->vm = rcu_access_pointer(ctx->vm);
),
- TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
- __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
+ TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ctx, __entry->vm)
)
DEFINE_EVENT(i915_context, i915_context_create,
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 16acdf7bdbe6..0348c6d0ef5f 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -54,25 +54,54 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static unsigned int i915_probe_fail_count;
-int __i915_inject_load_error(struct drm_i915_private *i915, int err,
- const char *func, int line)
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
+ const char *func, int line)
{
- if (i915_probe_fail_count >= i915_modparams.inject_load_failure)
+ if (i915_probe_fail_count >= i915_modparams.inject_probe_failure)
return 0;
- if (++i915_probe_fail_count < i915_modparams.inject_load_failure)
+ if (++i915_probe_fail_count < i915_modparams.inject_probe_failure)
return 0;
__i915_printk(i915, KERN_INFO,
"Injecting failure %d at checkpoint %u [%s:%d]\n",
- err, i915_modparams.inject_load_failure, func, line);
- i915_modparams.inject_load_failure = 0;
+ err, i915_modparams.inject_probe_failure, func, line);
+ i915_modparams.inject_probe_failure = 0;
return err;
}
bool i915_error_injected(void)
{
- return i915_probe_fail_count && !i915_modparams.inject_load_failure;
+ return i915_probe_fail_count && !i915_modparams.inject_probe_failure;
}
#endif
+
+void cancel_timer(struct timer_list *t)
+{
+ if (!READ_ONCE(t->expires))
+ return;
+
+ del_timer(t);
+ WRITE_ONCE(t->expires, 0);
+}
+
+void set_timer_ms(struct timer_list *t, unsigned long timeout)
+{
+ if (!timeout) {
+ cancel_timer(t);
+ return;
+ }
+
+ timeout = msecs_to_jiffies_timeout(timeout);
+
+ /*
+ * Paranoia to make sure the compiler computes the timeout before
+ * loading 'jiffies' as jiffies is volatile and may be updated in
+ * the background by a timer tick. All to reduce the complexity
+ * of the addition and reduce the risk of losing a jiffie.
+ */
+ barrier();
+
+ mod_timer(t, jiffies + timeout);
+}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 562f756da421..04139ba1191e 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -32,6 +32,7 @@
#include <linux/workqueue.h>
struct drm_i915_private;
+struct timer_list;
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -60,20 +61,20 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-int __i915_inject_load_error(struct drm_i915_private *i915, int err,
- const char *func, int line);
-#define i915_inject_load_error(_i915, _err) \
- __i915_inject_load_error((_i915), (_err), __func__, __LINE__)
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
+ const char *func, int line);
+#define i915_inject_probe_error(_i915, _err) \
+ __i915_inject_probe_error((_i915), (_err), __func__, __LINE__)
bool i915_error_injected(void);
#else
-#define i915_inject_load_error(_i915, _err) 0
+#define i915_inject_probe_error(_i915, _err) 0
#define i915_error_injected() false
#endif
-#define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV)
+#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV)
#define i915_probe_error(i915, fmt, ...) \
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
@@ -421,4 +422,25 @@ static inline void add_taint_for_CI(unsigned int taint)
add_taint(taint, LOCKDEP_STILL_OK);
}
+void cancel_timer(struct timer_list *t);
+void set_timer_ms(struct timer_list *t, unsigned long timeout);
+
+static inline bool timer_expired(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires) && !timer_pending(t);
+}
+
+/*
+ * This is a lookalike for IS_ENABLED() that takes a kconfig value,
+ * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero
+ * i.e. whether the configuration is active. Wrapping up the config inside
+ * a boolean context prevents clang and smatch from complaining about potential
+ * issues in confusing logical-&& with bitwise-& for constants.
+ *
+ * Sadly IS_ENABLED() itself does not work with kconfig values.
+ *
+ * Returns 0 if @config is 0, 1 if set to any value.
+ */
+#define IS_ACTIVE(config) ((config) != 0)
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e0e677b2a3a9..e5512f26e20a 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_globals.h"
+#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_vma.h"
@@ -90,6 +91,7 @@ static int __i915_vma_active(struct i915_active *ref)
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
+__i915_active_call
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));
@@ -104,21 +106,21 @@ vma_create(struct drm_i915_gem_object *obj,
struct rb_node *rb, **p;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
+ GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- vma->vm = vm;
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->resv = obj->base.resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- i915_active_init(vm->i915, &vma->active,
- __i915_vma_active, __i915_vma_retire);
+ i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
/* Declare ourselves safe for use inside shrinkers */
if (IS_ENABLED(CONFIG_LOCKDEP)) {
@@ -171,7 +173,7 @@ vma_create(struct drm_i915_gem_object *obj,
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
- vma->flags |= I915_VMA_GGTT;
+ __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
spin_lock(&obj->vma.lock);
@@ -218,10 +220,6 @@ vma_create(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
- mutex_lock(&vm->mutex);
- list_add(&vma->vm_link, &vm->unbound_list);
- mutex_unlock(&vm->mutex);
-
return vma;
err_vma:
@@ -265,8 +263,6 @@ vma_lookup(struct drm_i915_gem_object *obj,
* Once created, the VMA is kept until either the object is freed, or the
* address space is closed.
*
- * Must be called with struct_mutex held.
- *
* Returns the vma, or an error pointer.
*/
struct i915_vma *
@@ -277,7 +273,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(vm->closed);
+ GEM_BUG_ON(!atomic_read(&vm->open));
spin_lock(&obj->vma.lock);
vma = vma_lookup(obj, vm, view);
@@ -291,18 +287,63 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
return vma;
}
+struct i915_vma_work {
+ struct dma_fence_work base;
+ struct i915_vma *vma;
+ enum i915_cache_level cache_level;
+ unsigned int flags;
+};
+
+static int __vma_bind(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+ struct i915_vma *vma = vw->vma;
+ int err;
+
+ err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+ if (err)
+ atomic_or(I915_VMA_ERROR, &vma->flags);
+
+ if (vma->obj)
+ __i915_gem_object_unpin_pages(vma->obj);
+
+ return err;
+}
+
+static const struct dma_fence_work_ops bind_ops = {
+ .name = "bind",
+ .work = __vma_bind,
+};
+
+struct i915_vma_work *i915_vma_work(void)
+{
+ struct i915_vma_work *vw;
+
+ vw = kzalloc(sizeof(*vw), GFP_KERNEL);
+ if (!vw)
+ return NULL;
+
+ dma_fence_work_init(&vw->base, &bind_ops);
+ vw->base.dma.error = -EAGAIN; /* disable the worker by default */
+
+ return vw;
+}
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
+ * @work: preallocated worker for allocating and binding the PTE
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
* Note that DMA addresses are also the only part of the SG table we care about.
*/
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags)
+int i915_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags,
+ struct i915_vma_work *work)
{
u32 bind_flags;
u32 vma_flags;
@@ -319,13 +360,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (GEM_DEBUG_WARN_ON(!flags))
return -EINVAL;
- bind_flags = 0;
- if (flags & PIN_GLOBAL)
- bind_flags |= I915_VMA_GLOBAL_BIND;
- if (flags & PIN_USER)
- bind_flags |= I915_VMA_LOCAL_BIND;
+ bind_flags = flags;
+ bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ vma_flags = atomic_read(&vma->flags);
+ vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
if (flags & PIN_UPDATE)
bind_flags |= vma_flags;
else
@@ -336,11 +375,34 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
- ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
- if (ret)
- return ret;
+ if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+ work->vma = vma;
+ work->cache_level = cache_level;
+ work->flags = bind_flags | I915_VMA_ALLOC;
- vma->flags |= bind_flags;
+ /*
+ * Note we only want to chain up to the migration fence on
+ * the pages (not the object itself). As we don't track that,
+ * yet, we have to use the exclusive fence instead.
+ *
+ * Also note that we do not want to track the async vma as
+ * part of the obj->resv->excl_fence as it only affects
+ * execution and not content or object's backing store lifetime.
+ */
+ GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
+ i915_active_set_exclusive(&vma->active, &work->base.dma);
+ work->base.dma.error = 0; /* enable the queue_work() */
+
+ if (vma->obj)
+ __i915_gem_object_pin_pages(vma->obj);
+ } else {
+ GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
+ ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+ }
+
+ atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -350,18 +412,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
int err;
/* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
-
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+ assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
+ if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
err = -ENODEV;
goto err;
}
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+ GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
- ptr = vma->iomap;
+ ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
@@ -371,7 +431,10 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
goto err;
}
- vma->iomap = ptr;
+ if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
+ io_mapping_unmap(ptr);
+ ptr = vma->iomap;
+ }
}
__i915_vma_pin(vma);
@@ -391,18 +454,12 @@ err:
void i915_vma_flush_writes(struct i915_vma *vma)
{
- if (!i915_vma_has_ggtt_write(vma))
- return;
-
- intel_gt_flush_ggtt_writes(vma->vm->gt);
-
- i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
}
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_flush_writes(vma);
@@ -438,6 +495,9 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
if (!drm_mm_node_allocated(&vma->node))
return false;
+ if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
+ return true;
+
if (vma->node.size < size)
return true;
@@ -472,17 +532,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable)
- vma->flags |= I915_VMA_CAN_FENCE;
+ set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
else
- vma->flags &= ~I915_VMA_CAN_FENCE;
-}
-
-static bool color_differs(struct drm_mm_node *node, unsigned long color)
-{
- return node->allocated && node->color != color;
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
{
struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
@@ -494,7 +549,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
* these constraints apply and set the drm_mm.color_adjust
* appropriately.
*/
- if (vma->vm->mm.color_adjust == NULL)
+ if (!i915_vm_has_cache_coloring(vma->vm))
return true;
/* Only valid to be called on an already inserted vma */
@@ -502,11 +557,13 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
GEM_BUG_ON(list_empty(&node->node_list));
other = list_prev_entry(node, node_list);
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
+ if (i915_node_color_differs(other, color) &&
+ !drm_mm_hole_follows(other))
return false;
other = list_next_entry(node, node_list);
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
+ if (i915_node_color_differs(other, color) &&
+ !drm_mm_hole_follows(node))
return false;
return true;
@@ -541,13 +598,12 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_private *dev_priv = vma->vm->i915;
- unsigned int cache_level;
+ unsigned long color;
u64 start, end;
int ret;
GEM_BUG_ON(i915_vma_is_closed(vma));
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
@@ -567,7 +623,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+ end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
@@ -583,35 +639,21 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return -ENOSPC;
}
- if (vma->obj) {
- ret = i915_gem_object_pin_pages(vma->obj);
- if (ret)
- return ret;
-
- cache_level = vma->obj->cache_level;
- } else {
- cache_level = 0;
- }
-
- GEM_BUG_ON(vma->pages);
-
- ret = vma->ops->set_pages(vma);
- if (ret)
- goto err_unpin;
+ color = 0;
+ if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
+ color = vma->obj->cache_level;
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (!IS_ALIGNED(offset, alignment) ||
- range_overflows(offset, size, end)) {
- ret = -EINVAL;
- goto err_clear;
- }
+ range_overflows(offset, size, end))
+ return -EINVAL;
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
- size, offset, cache_level,
+ size, offset, color,
flags);
if (ret)
- goto err_clear;
+ return ret;
} else {
/*
* We only support huge gtt pages through the 48b PPGTT,
@@ -647,116 +689,259 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
- size, alignment, cache_level,
+ size, alignment, color,
start, end, flags);
if (ret)
- goto err_clear;
+ return ret;
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
-
- mutex_lock(&vma->vm->mutex);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
if (vma->obj) {
- atomic_inc(&vma->obj->bind_count);
- assert_bind_count(vma->obj);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ atomic_inc(&obj->bind_count);
+ assert_bind_count(obj);
}
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
-
-err_clear:
- vma->ops->clear_pages(vma);
-err_unpin:
- if (vma->obj)
- i915_gem_object_unpin_pages(vma->obj);
- return ret;
}
static void
-i915_vma_remove(struct i915_vma *vma)
+i915_vma_detach(struct i915_vma *vma)
{
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
-
- vma->ops->clear_pages(vma);
-
- mutex_lock(&vma->vm->mutex);
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
/*
- * Since the unbound list is global, only move to that list if
- * no more VMAs exist.
+ * And finally now the object is completely decoupled from this
+ * vma, we can drop its hold on the backing storage and allow
+ * it to be reaped by the shrinker.
*/
+ list_del(&vma->vm_link);
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
+ assert_bind_count(obj);
atomic_dec(&obj->bind_count);
+ }
+}
- /*
- * And finally now the object is completely decoupled from this
- * vma, we can drop its hold on the backing storage and allow
- * it to be reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
- assert_bind_count(obj);
+static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
+{
+ unsigned int bound;
+ bool pinned = true;
+
+ bound = atomic_read(&vma->flags);
+ do {
+ if (unlikely(flags & ~bound))
+ return false;
+
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
+ return false;
+
+ if (!(bound & I915_VMA_PIN_MASK))
+ goto unpinned;
+
+ GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
+
+ return true;
+
+unpinned:
+ /*
+ * If pin_count==0, but we are bound, check under the lock to avoid
+ * racing with a concurrent i915_vma_unbind().
+ */
+ mutex_lock(&vma->vm->mutex);
+ do {
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
+ pinned = false;
+ break;
+ }
+
+ if (unlikely(flags & ~bound)) {
+ pinned = false;
+ break;
+ }
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
+ mutex_unlock(&vma->vm->mutex);
+
+ return pinned;
+}
+
+static int vma_get_pages(struct i915_vma *vma)
+{
+ int err = 0;
+
+ if (atomic_add_unless(&vma->pages_count, 1, 0))
+ return 0;
+
+ /* Allocations ahoy! */
+ if (mutex_lock_interruptible(&vma->pages_mutex))
+ return -EINTR;
+
+ if (!atomic_read(&vma->pages_count)) {
+ if (vma->obj) {
+ err = i915_gem_object_pin_pages(vma->obj);
+ if (err)
+ goto unlock;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err) {
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
+ goto unlock;
+ }
}
+ atomic_inc(&vma->pages_count);
+
+unlock:
+ mutex_unlock(&vma->pages_mutex);
+
+ return err;
}
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags)
+static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
- const unsigned int bound = vma->flags;
- int ret;
+ /* We allocate under vma_get_pages, so beware the shrinker */
+ mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
+ GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
+ if (atomic_sub_return(count, &vma->pages_count) == 0) {
+ vma->ops->clear_pages(vma);
+ GEM_BUG_ON(vma->pages);
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
+ }
+ mutex_unlock(&vma->pages_mutex);
+}
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
+static void vma_put_pages(struct i915_vma *vma)
+{
+ if (atomic_add_unless(&vma->pages_count, -1, 1))
+ return;
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
- ret = -EBUSY;
- goto err_unpin;
+ __vma_put_pages(vma, 1);
+}
+
+static void vma_unbind_pages(struct i915_vma *vma)
+{
+ unsigned int count;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ /* The upper portion of pages_count is the number of bindings */
+ count = atomic_read(&vma->pages_count);
+ count >>= I915_VMA_PAGES_BIAS;
+ GEM_BUG_ON(!count);
+
+ __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
+}
+
+int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct i915_vma_work *work = NULL;
+ unsigned int bound;
+ int err;
+
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ GEM_BUG_ON(flags & PIN_UPDATE);
+ GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
+
+ /* First try and grab the pin without rebinding the vma */
+ if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
+ return 0;
+
+ err = vma_get_pages(vma);
+ if (err)
+ return err;
+
+ if (flags & vma->vm->bind_async_flags) {
+ work = i915_vma_work();
+ if (!work) {
+ err = -ENOMEM;
+ goto err_pages;
+ }
}
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- ret = i915_vma_insert(vma, size, alignment, flags);
- if (ret)
- goto err_unpin;
+ /* No more allocations allowed once we hold vm->mutex */
+ err = mutex_lock_interruptible(&vma->vm->mutex);
+ if (err)
+ goto err_fence;
+
+ bound = atomic_read(&vma->flags);
+ if (unlikely(bound & I915_VMA_ERROR)) {
+ err = -ENOMEM;
+ goto err_unlock;
}
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
- if (ret)
- goto err_remove;
+ if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
+ err = -EAGAIN; /* pins are meant to be fairly temporary */
+ goto err_unlock;
+ }
- GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
+ if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
+ __i915_vma_pin(vma);
+ goto err_unlock;
+ }
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
- __i915_vma_set_map_and_fenceable(vma);
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unlock;
+
+ if (!(bound & I915_VMA_BIND_MASK)) {
+ err = i915_vma_insert(vma, size, alignment, flags);
+ if (err)
+ goto err_active;
+
+ if (i915_is_ggtt(vma->vm))
+ __i915_vma_set_map_and_fenceable(vma);
+ }
+
+ GEM_BUG_ON(!vma->pages);
+ err = i915_vma_bind(vma,
+ vma->obj ? vma->obj->cache_level : 0,
+ flags, work);
+ if (err)
+ goto err_remove;
+
+ /* There should only be at most 2 active bindings (user, global) */
+ GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
+ atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ __i915_vma_pin(vma);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
err_remove:
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- i915_vma_remove(vma);
- GEM_BUG_ON(vma->pages);
- GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
+ if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
+ i915_vma_detach(vma);
+ drm_mm_remove_node(&vma->node);
}
-err_unpin:
- __i915_vma_unpin(vma);
- return ret;
+err_active:
+ i915_active_release(&vma->active);
+err_unlock:
+ mutex_unlock(&vma->vm->mutex);
+err_fence:
+ if (work)
+ dma_fence_work_commit(&work->base);
+err_pages:
+ vma_put_pages(vma);
+ return err;
}
void i915_vma_close(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
+ struct intel_gt *gt = vma->vm->gt;
unsigned long flags;
GEM_BUG_ON(i915_vma_is_closed(vma));
@@ -773,79 +958,87 @@ void i915_vma_close(struct i915_vma *vma)
* causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state.
*/
- spin_lock_irqsave(&i915->gt.closed_lock, flags);
- list_add(&vma->closed_link, &i915->gt.closed_vma);
- spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
+ spin_lock_irqsave(&gt->closed_lock, flags);
+ list_add(&vma->closed_link, &gt->closed_vma);
+ spin_unlock_irqrestore(&gt->closed_lock, flags);
}
static void __i915_vma_remove_closed(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
- if (!i915_vma_is_closed(vma))
- return;
+ struct intel_gt *gt = vma->vm->gt;
- spin_lock_irq(&i915->gt.closed_lock);
+ spin_lock_irq(&gt->closed_lock);
list_del_init(&vma->closed_link);
- spin_unlock_irq(&i915->gt.closed_lock);
+ spin_unlock_irq(&gt->closed_lock);
}
void i915_vma_reopen(struct i915_vma *vma)
{
- __i915_vma_remove_closed(vma);
+ if (i915_vma_is_closed(vma))
+ __i915_vma_remove_closed(vma);
}
-static void __i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_destroy(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(vma->fence);
-
- mutex_lock(&vma->vm->mutex);
- list_del(&vma->vm_link);
- mutex_unlock(&vma->vm->mutex);
+ if (drm_mm_node_allocated(&vma->node)) {
+ mutex_lock(&vma->vm->mutex);
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+ }
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
spin_lock(&obj->vma.lock);
list_del(&vma->obj_link);
- rb_erase(&vma->obj_node, &vma->obj->vma.tree);
+ rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
}
- i915_active_fini(&vma->active);
+ __i915_vma_remove_closed(vma);
+ i915_vm_put(vma->vm);
+ i915_active_fini(&vma->active);
i915_vma_free(vma);
}
-void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_parked(struct intel_gt *gt)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ struct i915_vma *vma, *next;
- GEM_BUG_ON(i915_vma_is_pinned(vma));
+ spin_lock_irq(&gt->closed_lock);
+ list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
- __i915_vma_remove_closed(vma);
+ /* XXX All to avoid keeping a reference on i915_vma itself */
- WARN_ON(i915_vma_unbind(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
- __i915_vma_destroy(vma);
-}
+ if (!i915_vm_tryopen(vm)) {
+ i915_gem_object_put(obj);
+ obj = NULL;
+ }
-void i915_vma_parked(struct drm_i915_private *i915)
-{
- struct i915_vma *vma, *next;
+ spin_unlock_irq(&gt->closed_lock);
- spin_lock_irq(&i915->gt.closed_lock);
- list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
- list_del_init(&vma->closed_link);
- spin_unlock_irq(&i915->gt.closed_lock);
+ if (obj) {
+ i915_vma_destroy(vma);
+ i915_gem_object_put(obj);
+ }
- i915_vma_destroy(vma);
+ i915_vm_close(vm);
- spin_lock_irq(&i915->gt.closed_lock);
+ /* Restart after dropping lock */
+ spin_lock_irq(&gt->closed_lock);
+ next = list_first_entry(&gt->closed_vma,
+ typeof(*next), closed_link);
}
- spin_unlock_irq(&i915->gt.closed_lock);
+ spin_unlock_irq(&gt->closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -883,6 +1076,20 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
+int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
+{
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+
+ /* Wait for the vma to be bound before we start! */
+ err = i915_request_await_active(rq, &vma->active);
+ if (err)
+ return err;
+
+ return i915_active_add_request(&vma->active, rq);
+}
+
int i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
@@ -890,27 +1097,15 @@ int i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
int err;
- assert_vma_held(vma);
assert_object_held(obj);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- /*
- * Add a reference if we're newly entering the active list.
- * The order in which we add operations to the retirement queue is
- * vital here: mark_active adds to the start of the callback list,
- * such that subsequent callbacks are called first. Therefore we
- * add the active reference first and queue for it to be dropped
- * *last*.
- */
- err = i915_active_ref(&vma->active, rq->timeline, rq);
+ err = __i915_vma_move_to_active(vma, rq);
if (unlikely(err))
return err;
if (flags & EXEC_OBJECT_WRITE) {
if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
- i915_active_ref(&obj->frontbuffer->write,
- rq->timeline,
- rq);
+ i915_active_add_request(&obj->frontbuffer->write, rq);
dma_resv_add_excl_fence(vma->resv, &rq->fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
@@ -930,44 +1125,31 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return 0;
}
-int i915_vma_unbind(struct i915_vma *vma)
+int __i915_vma_unbind(struct i915_vma *vma)
{
int ret;
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vma->vm->mutex);
/*
* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
+ *
+ * XXX Actually waiting under the vm->mutex is a hinderance and
+ * should be pipelined wherever possible. In cases where that is
+ * unavoidable, we should lift the wait to before the mutex.
*/
- might_sleep();
- if (i915_vma_is_active(vma)) {
- /*
- * When a closed VMA is retired, it is unbound - eek.
- * In order to prevent it from being recursively closed,
- * take a pin on the vma so that the second unbind is
- * aborted.
- *
- * Even more scary is that the retire callback may free
- * the object (last active vma). To prevent the explosion
- * we defer the actual object free to a worker that can
- * only proceed once it acquires the struct_mutex (which
- * we currently hold, therefore it cannot free this object
- * before we are finished).
- */
- __i915_vma_pin(vma);
- ret = i915_active_wait(&vma->active);
- __i915_vma_unpin(vma);
- if (ret)
- return ret;
- }
- GEM_BUG_ON(i915_vma_is_active(vma));
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EBUSY;
}
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (!drm_mm_node_allocated(&vma->node))
return 0;
@@ -982,34 +1164,47 @@ int i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
- mutex_lock(&vma->vm->mutex);
ret = i915_vma_revoke_fence(vma);
- mutex_unlock(&vma->vm->mutex);
if (ret)
return ret;
/* Force a pagefault for domain tracking on next user access */
- mutex_lock(&vma->vm->mutex);
i915_vma_revoke_mmap(vma);
- mutex_unlock(&vma->vm->mutex);
__i915_vma_iounmap(vma);
- vma->flags &= ~I915_VMA_CAN_FENCE;
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
- if (likely(!vma->vm->closed)) {
+ if (likely(atomic_read(&vma->vm->open))) {
trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma);
}
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
- i915_vma_remove(vma);
+ i915_vma_detach(vma);
+ vma_unbind_pages(vma);
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
return 0;
}
+int i915_vma_unbind(struct i915_vma *vma)
+{
+ struct i915_address_space *vm = vma->vm;
+ int err;
+
+ err = mutex_lock_interruptible(&vm->mutex);
+ if (err)
+ return err;
+
+ err = __i915_vma_unbind(vma);
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
{
i915_gem_object_make_unshrinkable(vma->obj);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 889fc7cb910a..465932813bc5 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -72,7 +72,7 @@ struct i915_vma {
* that exist in the ctx->handle_vmas LUT for this vma.
*/
atomic_t open_count;
- unsigned long flags;
+ atomic_t flags;
/**
* How many users have pinned this object in GTT space.
*
@@ -96,22 +96,41 @@ struct i915_vma {
* exclusive cachelines of a single page, so a maximum of 64 possible
* users.
*/
-#define I915_VMA_PIN_MASK 0xff
-#define I915_VMA_PIN_OVERFLOW BIT(8)
+#define I915_VMA_PIN_MASK 0x3ff
+#define I915_VMA_OVERFLOW 0x200
/** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(9)
-#define I915_VMA_LOCAL_BIND BIT(10)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+#define I915_VMA_GLOBAL_BIND_BIT 10
+#define I915_VMA_LOCAL_BIND_BIT 11
-#define I915_VMA_GGTT BIT(11)
-#define I915_VMA_CAN_FENCE BIT(12)
-#define I915_VMA_USERFAULT_BIT 13
-#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE BIT(14)
+#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
+#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
+
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
+
+#define I915_VMA_ALLOC_BIT 12
+#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
+
+#define I915_VMA_ERROR_BIT 13
+#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
+
+#define I915_VMA_GGTT_BIT 14
+#define I915_VMA_CAN_FENCE_BIT 15
+#define I915_VMA_USERFAULT_BIT 16
+#define I915_VMA_GGTT_WRITE_BIT 17
+
+#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
+#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
+#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
+#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
struct i915_active active;
+#define I915_VMA_PAGES_BIAS 24
+#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
+ atomic_t pages_count; /* number of active binds to the pages */
+ struct mutex pages_mutex; /* protect acquire/release of backing pages */
+
/**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
@@ -158,52 +177,57 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
return !i915_active_is_idle(&vma->active);
}
+int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq);
int __must_check i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags);
+#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
+
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_GGTT;
+ return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_GGTT_WRITE;
+ return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
}
static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- vma->flags |= I915_VMA_GGTT_WRITE;
+ set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
}
-static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
+static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
{
- vma->flags &= ~I915_VMA_GGTT_WRITE;
+ return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
+ __i915_vma_flags(vma));
}
void i915_vma_flush_writes(struct i915_vma *vma);
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_CAN_FENCE;
+ return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline void i915_vma_unset_userfault(struct i915_vma *vma)
{
- return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
{
- return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
@@ -214,7 +238,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(upper_32_bits(vma->node.start));
GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
return lower_32_bits(vma->node.start);
@@ -293,13 +317,18 @@ i915_vma_compare(struct i915_vma *vma,
return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
}
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+struct i915_vma_work *i915_vma_work(void);
+int i915_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags,
+ struct i915_vma_work *work);
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
@@ -318,30 +347,12 @@ static inline void i915_vma_unlock(struct i915_vma *vma)
dma_resv_unlock(vma->resv);
}
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
- }
-
- return __i915_vma_do_pin(vma, size, alignment, flags);
-}
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
static inline int i915_vma_pin_count(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_PIN_MASK;
+ return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
}
static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
@@ -351,18 +362,18 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
static inline void __i915_vma_pin(struct i915_vma *vma)
{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+ atomic_inc(&vma->flags);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
}
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
- vma->flags--;
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ atomic_dec(&vma->flags);
}
static inline void i915_vma_unpin(struct i915_vma *vma)
{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
__i915_vma_unpin(vma);
}
@@ -370,7 +381,13 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
static inline bool i915_vma_is_bound(const struct i915_vma *vma,
unsigned int where)
{
- return vma->flags & where;
+ return atomic_read(&vma->flags) & where;
+}
+
+static inline bool i915_node_color_differs(const struct drm_mm_node *node,
+ unsigned long color)
+{
+ return drm_mm_node_allocated(node) && node->color != color;
}
/**
@@ -382,8 +399,6 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma,
* the caller must call i915_vma_unpin_iomap to relinquish the pinning
* after the iomapping is no longer required.
*
- * Callers must hold the struct_mutex.
- *
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
@@ -395,8 +410,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*
* Unpins the previously iomapped VMA from i915_vma_pin_iomap().
*
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ * This function is only valid to be called on a VMA previously
+ * iomapped by the caller with i915_vma_pin_iomap().
*/
void i915_vma_unpin_iomap(struct i915_vma *vma);
@@ -424,6 +439,8 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
int __must_check i915_vma_pin_fence(struct i915_vma *vma);
int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
+int __i915_vma_pin_fence(struct i915_vma *vma);
+
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
@@ -441,12 +458,11 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
if (vma->fence)
__i915_vma_unpin_fence(vma);
}
-void i915_vma_parked(struct drm_i915_private *i915);
+void i915_vma_parked(struct intel_gt *gt);
#define for_each_until(cond) if (cond) break; else
@@ -470,4 +486,10 @@ struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma);
+static inline int i915_vma_sync(struct i915_vma *vma)
+{
+ /* Wait for the asynchronous bindings and pending GPU reads */
+ return i915_active_wait(&vma->active);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 546577e39b4e..09870a31b4f0 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -44,8 +44,8 @@
#define TGL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(TGL_CSR_PATH);
-#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
-#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin"
+#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 9)
#define ICL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(ICL_CSR_PATH);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d0ed44d33484..a5b571364cf6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -93,9 +93,9 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
hweight8(sseu->slice_mask), sseu->slice_mask);
drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
+ drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- sseu->subslice_mask[s]);
+ intel_sseu_get_subslices(sseu, s));
}
drm_printf(p, "EU total: %u\n", sseu->eu_total);
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
@@ -118,10 +118,9 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
int subslice)
{
- int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
- int slice_stride = sseu->max_subslices * subslice_stride;
+ int slice_stride = sseu->max_subslices * sseu->eu_stride;
- return slice * slice_stride + subslice * subslice_stride;
+ return slice * slice_stride + subslice * sseu->eu_stride;
}
static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
@@ -130,7 +129,7 @@ static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
int i, offset = sseu_eu_idx(sseu, slice, subslice);
u16 eu_mask = 0;
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ for (i = 0; i < sseu->eu_stride; i++) {
eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
(i * BITS_PER_BYTE);
}
@@ -143,7 +142,7 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ for (i = 0; i < sseu->eu_stride; i++) {
sseu->eu_mask[offset + i] =
(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
}
@@ -160,9 +159,9 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
}
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
+ drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- sseu->subslice_mask[s]);
+ intel_sseu_get_subslices(sseu, s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
u16 enabled_eus = sseu_get_eus(sseu, s, ss);
@@ -183,44 +182,80 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
return total;
}
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+ u8 s_en, u32 ss_en, u16 eu_en)
+{
+ int s, ss;
+
+ /* ss_en represents entire subslice mask across all slices */
+ GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
+ sizeof(ss_en) * BITS_PER_BYTE);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ if ((s_en & BIT(s)) == 0)
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+
+ intel_sseu_set_subslices(sseu, s, ss_en);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, s, ss))
+ sseu_set_eus(sseu, s, ss, eu_en);
+ }
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
+}
+
+static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ u8 s_en;
+ u32 dss_en;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ int eu;
+
+ /*
+ * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
+ * Instead of splitting these, provide userspace with an array
+ * of DSS to more closely represent the hardware resource.
+ */
+ intel_sseu_set_info(sseu, 1, 6, 16);
+
+ s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
+
+ dss_en = I915_READ(GEN12_GT_DSS_ENABLE);
+
+ /* one bit per pair of EUs */
+ eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
+
+ /* TGL only supports slice-level power gating */
+ sseu->has_slice_pg = 1;
+}
+
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u8 s_en;
- u32 ss_en, ss_en_mask;
+ u32 ss_en;
u8 eu_en;
- int s;
- if (IS_ELKHARTLAKE(dev_priv)) {
- sseu->max_slices = 1;
- sseu->max_subslices = 4;
- sseu->max_eus_per_subslice = 8;
- } else {
- sseu->max_slices = 1;
- sseu->max_subslices = 8;
- sseu->max_eus_per_subslice = 8;
- }
+ if (IS_ELKHARTLAKE(dev_priv))
+ intel_sseu_set_info(sseu, 1, 4, 8);
+ else
+ intel_sseu_set_info(sseu, 1, 8, 8);
s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
- ss_en_mask = BIT(sseu->max_subslices) - 1;
eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
- for (s = 0; s < sseu->max_slices; s++) {
- if (s_en & BIT(s)) {
- int ss_idx = sseu->max_subslices * s;
- int ss;
-
- sseu->slice_mask |= BIT(s);
- sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- if (sseu->subslice_mask[s] & BIT(ss))
- sseu_set_eus(sseu, s, ss, eu_en);
- }
- }
- }
- sseu->eu_per_subslice = hweight8(eu_en);
- sseu->eu_total = compute_eu_total(sseu);
+ gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
/* ICL has no power gating restrictions. */
sseu->has_slice_pg = 1;
@@ -236,23 +271,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
const int eu_mask = 0xff;
u32 subslice_mask, eu_en;
+ intel_sseu_set_info(sseu, 6, 4, 8);
+
sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
GEN10_F2_S_ENA_SHIFT;
- sseu->max_slices = 6;
- sseu->max_subslices = 4;
- sseu->max_eus_per_subslice = 8;
-
- subslice_mask = (1 << 4) - 1;
- subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
- GEN10_F2_SS_DIS_SHIFT);
-
- /*
- * Slice0 can have up to 3 subslices, but there are only 2 in
- * slice1/2.
- */
- sseu->subslice_mask[0] = subslice_mask;
- for (s = 1; s < sseu->max_slices; s++)
- sseu->subslice_mask[s] = subslice_mask & 0x3;
/* Slice0 */
eu_en = ~I915_READ(GEN8_EU_DISABLE0);
@@ -277,14 +299,25 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
eu_en = ~I915_READ(GEN10_EU_DISABLE3);
sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
- /* Do a second pass where we mark the subslices disabled if all their
- * eus are off.
- */
+ subslice_mask = (1 << 4) - 1;
+ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
for (s = 0; s < sseu->max_slices; s++) {
+ u32 subslice_mask_with_eus = subslice_mask;
+
for (ss = 0; ss < sseu->max_subslices; ss++) {
if (sseu_get_eus(sseu, s, ss) == 0)
- sseu->subslice_mask[s] &= ~BIT(ss);
+ subslice_mask_with_eus &= ~BIT(ss);
}
+
+ /*
+ * Slice0 can have up to 3 subslices, but there are only 2 in
+ * slice1/2.
+ */
+ intel_sseu_set_subslices(sseu, s, s == 0 ?
+ subslice_mask_with_eus :
+ subslice_mask_with_eus & 0x3);
}
sseu->eu_total = compute_eu_total(sseu);
@@ -310,13 +343,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse;
+ u8 subslice_mask = 0;
fuse = I915_READ(CHV_FUSE_GT);
sseu->slice_mask = BIT(0);
- sseu->max_slices = 1;
- sseu->max_subslices = 2;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, 1, 2, 8);
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
u8 disabled_mask =
@@ -325,7 +357,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
- sseu->subslice_mask[0] |= BIT(0);
+ subslice_mask |= BIT(0);
sseu_set_eus(sseu, 0, 0, ~disabled_mask);
}
@@ -336,10 +368,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
- sseu->subslice_mask[0] |= BIT(1);
+ subslice_mask |= BIT(1);
sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
+ intel_sseu_set_subslices(sseu, 0, subslice_mask);
+
sseu->eu_total = compute_eu_total(sseu);
/*
@@ -372,9 +406,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
/* BXT has a single slice and at most 3 subslices. */
- sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
- sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
+ IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
/*
* The subslice disable field is global, i.e. it applies
@@ -393,14 +426,14 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
/* skip disabled slice */
continue;
- sseu->subslice_mask[s] = subslice_mask;
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
int eu_per_ss;
u8 eu_disabled_mask;
- if (!(sseu->subslice_mask[s] & BIT(ss)))
+ if (!intel_sseu_has_subslice(sseu, s, ss))
/* skip disabled subslice */
continue;
@@ -473,9 +506,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- sseu->max_slices = 3;
- sseu->max_subslices = 3;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, 3, 3, 8);
/*
* The subslice disable field is global, i.e. it applies
@@ -502,13 +533,13 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
/* skip disabled slice */
continue;
- sseu->subslice_mask[s] = subslice_mask;
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
u8 eu_disabled_mask;
u32 n_disabled;
- if (!(sseu->subslice_mask[s] & BIT(ss)))
+ if (!intel_sseu_has_subslice(sseu, s, ss))
/* skip disabled subslice */
continue;
@@ -552,6 +583,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
+ u8 subslice_mask = 0;
int s, ss;
/*
@@ -564,22 +596,18 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
/* fall through */
case 1:
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] = BIT(0);
+ subslice_mask = BIT(0);
break;
case 2:
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
break;
case 3:
sseu->slice_mask = BIT(0) | BIT(1);
- sseu->subslice_mask[0] = BIT(0) | BIT(1);
- sseu->subslice_mask[1] = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
break;
}
- sseu->max_slices = hweight8(sseu->slice_mask);
- sseu->max_subslices = hweight8(sseu->subslice_mask[0]);
-
fuse1 = I915_READ(HSW_PAVP_FUSE1);
switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
default:
@@ -596,9 +624,14 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->eu_per_subslice = 6;
break;
}
- sseu->max_eus_per_subslice = sseu->eu_per_subslice;
+
+ intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
+ hweight8(subslice_mask),
+ sseu->eu_per_subslice);
for (s = 0; s < sseu->max_slices; s++) {
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
for (ss = 0; ss < sseu->max_subslices; ss++) {
sseu_set_eus(sseu, s, ss,
(1UL << sseu->eu_per_subslice) - 1);
@@ -900,12 +933,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_sprites[pipe] = 1;
}
- if (i915_modparams.disable_display) {
- DRM_INFO("Display disabled (module parameter)\n");
- info->num_pipes = 0;
- } else if (HAS_DISPLAY(dev_priv) &&
- (IS_GEN_RANGE(dev_priv, 7, 8)) &&
- HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
+ HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -923,14 +952,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
(HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
- info->num_pipes = 0;
+ info->pipe_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
DRM_INFO("PipeC fused off\n");
- info->num_pipes -= 1;
+ info->pipe_mask &= ~BIT(PIPE_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
- u8 enabled_mask = BIT(info->num_pipes) - 1;
+ u8 enabled_mask = info->pipe_mask;
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
enabled_mask &= ~BIT(PIPE_A);
@@ -951,7 +980,20 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
enabled_mask);
else
- info->num_pipes = hweight8(enabled_mask);
+ info->pipe_mask = enabled_mask;
+
+ if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
+ info->display.has_hdcp = 0;
+
+ if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
+ info->display.has_fbc = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ info->display.has_csr = 0;
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
+ info->display.has_dsc = 0;
}
/* Initialize slice/subslice/EU info */
@@ -965,8 +1007,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
gen9_sseu_info_init(dev_priv);
else if (IS_GEN(dev_priv, 10))
gen10_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (IS_GEN(dev_priv, 11))
gen11_sseu_info_init(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 12)
+ gen12_sseu_info_init(dev_priv);
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
@@ -1010,8 +1054,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
GEN11_GT_VEBOX_DISABLE_SHIFT;
for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ if (!HAS_ENGINE(dev_priv, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
continue;
+ }
if (!(BIT(i) & vdbox_mask)) {
info->engine_mask &= ~BIT(_VCS(i));
@@ -1032,8 +1078,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ if (!HAS_ENGINE(dev_priv, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
continue;
+ }
if (!(BIT(i) & vebox_mask)) {
info->engine_mask &= ~BIT(_VECS(i));
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 92e0c2e0954c..4bdf8a6cfb47 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -107,6 +107,7 @@ enum intel_ppgtt_type {
func(is_mobile); \
func(is_lp); \
func(require_force_probe); \
+ func(is_dgfx); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(gpu_reset_clobbers_display); \
@@ -135,8 +136,11 @@ enum intel_ppgtt_type {
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
+ func(has_dsb); \
+ func(has_dsc); \
func(has_fbc); \
func(has_gmch); \
+ func(has_hdcp); \
func(has_hotplug); \
func(has_ipc); \
func(has_modular_fia); \
@@ -159,9 +163,11 @@ struct intel_device_info {
unsigned int page_sizes; /* page sizes supported by the HW */
+ u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
- u8 num_pipes;
+ u8 pipe_mask;
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
new file mode 100644
index 000000000000..baaeaecc64af
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_drv.h"
+
+/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
+#define REGION_MAP(type, inst) \
+ BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
+
+const u32 intel_region_map[] = {
+ [INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
+ [INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
+ [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
+};
+
+static u64
+intel_memory_region_free_pages(struct intel_memory_region *mem,
+ struct list_head *blocks)
+{
+ struct i915_buddy_block *block, *on;
+ u64 size = 0;
+
+ list_for_each_entry_safe(block, on, blocks, link) {
+ size += i915_buddy_block_size(&mem->mm, block);
+ i915_buddy_free(&mem->mm, block);
+ }
+ INIT_LIST_HEAD(blocks);
+
+ return size;
+}
+
+void
+__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+ struct list_head *blocks)
+{
+ mutex_lock(&mem->mm_lock);
+ intel_memory_region_free_pages(mem, blocks);
+ mutex_unlock(&mem->mm_lock);
+}
+
+void
+__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
+{
+ struct list_head blocks;
+
+ INIT_LIST_HEAD(&blocks);
+ list_add(&block->link, &blocks);
+ __intel_memory_region_put_pages_buddy(block->private, &blocks);
+}
+
+int
+__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags,
+ struct list_head *blocks)
+{
+ unsigned int min_order = 0;
+ unsigned long n_pages;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
+ GEM_BUG_ON(!list_empty(blocks));
+
+ if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
+ min_order = ilog2(mem->min_page_size) -
+ ilog2(mem->mm.chunk_size);
+ }
+
+ if (flags & I915_ALLOC_CONTIGUOUS) {
+ size = roundup_pow_of_two(size);
+ min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
+ }
+
+ n_pages = size >> ilog2(mem->mm.chunk_size);
+
+ mutex_lock(&mem->mm_lock);
+
+ do {
+ struct i915_buddy_block *block;
+ unsigned int order;
+
+ order = fls(n_pages) - 1;
+ GEM_BUG_ON(order > mem->mm.max_order);
+ GEM_BUG_ON(order < min_order);
+
+ do {
+ block = i915_buddy_alloc(&mem->mm, order);
+ if (!IS_ERR(block))
+ break;
+
+ if (order-- == min_order)
+ goto err_free_blocks;
+ } while (1);
+
+ n_pages -= BIT(order);
+
+ block->private = mem;
+ list_add(&block->link, blocks);
+
+ if (!n_pages)
+ break;
+ } while (1);
+
+ mutex_unlock(&mem->mm_lock);
+ return 0;
+
+err_free_blocks:
+ intel_memory_region_free_pages(mem, blocks);
+ mutex_unlock(&mem->mm_lock);
+ return -ENXIO;
+}
+
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct i915_buddy_block *block;
+ LIST_HEAD(blocks);
+ int ret;
+
+ ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
+ if (ret)
+ return ERR_PTR(ret);
+
+ block = list_first_entry(&blocks, typeof(*block), link);
+ list_del_init(&block->link);
+ return block;
+}
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem)
+{
+ return i915_buddy_init(&mem->mm, resource_size(&mem->region),
+ PAGE_SIZE);
+}
+
+void intel_memory_region_release_buddy(struct intel_memory_region *mem)
+{
+ i915_buddy_fini(&mem->mm);
+}
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops)
+{
+ struct intel_memory_region *mem;
+ int err;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ mem->i915 = i915;
+ mem->region = (struct resource)DEFINE_RES_MEM(start, size);
+ mem->io_start = io_start;
+ mem->min_page_size = min_page_size;
+ mem->ops = ops;
+
+ mutex_init(&mem->objects.lock);
+ INIT_LIST_HEAD(&mem->objects.list);
+ INIT_LIST_HEAD(&mem->objects.purgeable);
+
+ mutex_init(&mem->mm_lock);
+
+ if (ops->init) {
+ err = ops->init(mem);
+ if (err)
+ goto err_free;
+ }
+
+ kref_init(&mem->kref);
+ return mem;
+
+err_free:
+ kfree(mem);
+ return ERR_PTR(err);
+}
+
+static void __intel_memory_region_destroy(struct kref *kref)
+{
+ struct intel_memory_region *mem =
+ container_of(kref, typeof(*mem), kref);
+
+ if (mem->ops->release)
+ mem->ops->release(mem);
+
+ mutex_destroy(&mem->mm_lock);
+ mutex_destroy(&mem->objects.lock);
+ kfree(mem);
+}
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem)
+{
+ kref_get(&mem->kref);
+ return mem;
+}
+
+void intel_memory_region_put(struct intel_memory_region *mem)
+{
+ kref_put(&mem->kref, __intel_memory_region_destroy);
+}
+
+/* Global memory region registration -- only slight layer inversions! */
+
+int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *mem = ERR_PTR(-ENODEV);
+ u32 type;
+
+ if (!HAS_REGION(i915, BIT(i)))
+ continue;
+
+ type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
+ switch (type) {
+ case INTEL_MEMORY_SYSTEM:
+ mem = i915_gem_shmem_setup(i915);
+ break;
+ case INTEL_MEMORY_STOLEN:
+ mem = i915_gem_stolen_setup(i915);
+ break;
+ case INTEL_MEMORY_LOCAL:
+ mem = intel_setup_fake_lmem(i915);
+ break;
+ }
+
+ if (IS_ERR(mem)) {
+ err = PTR_ERR(mem);
+ DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
+ goto out_cleanup;
+ }
+
+ mem->id = intel_region_map[i];
+ mem->type = type;
+ mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
+
+ i915->mm.regions[i] = mem;
+ }
+
+ return 0;
+
+out_cleanup:
+ intel_memory_regions_driver_release(i915);
+ return err;
+}
+
+void intel_memory_regions_driver_release(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *region =
+ fetch_and_zero(&i915->mm.regions[i]);
+
+ if (region)
+ intel_memory_region_put(region);
+ }
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_memory_region.c"
+#include "selftests/mock_region.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
new file mode 100644
index 000000000000..238722009677
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_MEMORY_REGION_H__
+#define __INTEL_MEMORY_REGION_H__
+
+#include <linux/kref.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/io-mapping.h>
+#include <drm/drm_mm.h>
+
+#include "i915_buddy.h"
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+struct sg_table;
+
+/**
+ * Base memory type
+ */
+enum intel_memory_type {
+ INTEL_MEMORY_SYSTEM = 0,
+ INTEL_MEMORY_LOCAL,
+ INTEL_MEMORY_STOLEN,
+};
+
+enum intel_region_id {
+ INTEL_REGION_SMEM = 0,
+ INTEL_REGION_LMEM,
+ INTEL_REGION_STOLEN,
+ INTEL_REGION_UNKNOWN, /* Should be last */
+};
+
+#define REGION_SMEM BIT(INTEL_REGION_SMEM)
+#define REGION_LMEM BIT(INTEL_REGION_LMEM)
+#define REGION_STOLEN BIT(INTEL_REGION_STOLEN)
+
+#define INTEL_MEMORY_TYPE_SHIFT 16
+
+#define MEMORY_TYPE_FROM_REGION(r) (ilog2((r) >> INTEL_MEMORY_TYPE_SHIFT))
+#define MEMORY_INSTANCE_FROM_REGION(r) (ilog2((r) & 0xffff))
+
+#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
+#define I915_ALLOC_CONTIGUOUS BIT(1)
+
+/**
+ * Memory regions encoded as type | instance
+ */
+extern const u32 intel_region_map[];
+
+struct intel_memory_region_ops {
+ unsigned int flags;
+
+ int (*init)(struct intel_memory_region *mem);
+ void (*release)(struct intel_memory_region *mem);
+
+ struct drm_i915_gem_object *
+ (*create_object)(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+};
+
+struct intel_memory_region {
+ struct drm_i915_private *i915;
+
+ const struct intel_memory_region_ops *ops;
+
+ struct io_mapping iomap;
+ struct resource region;
+
+ /* For fake LMEM */
+ struct drm_mm_node fake_mappable;
+
+ struct i915_buddy_mm mm;
+ struct mutex mm_lock;
+
+ struct kref kref;
+
+ resource_size_t io_start;
+ resource_size_t min_page_size;
+
+ unsigned int type;
+ unsigned int instance;
+ unsigned int id;
+
+ dma_addr_t remap_addr;
+
+ struct {
+ struct mutex lock; /* Protects access to objects */
+ struct list_head list;
+ struct list_head purgeable;
+ } objects;
+};
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem);
+void intel_memory_region_release_buddy(struct intel_memory_region *mem);
+
+int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags,
+ struct list_head *blocks);
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+ struct list_head *blocks);
+void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block);
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops);
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem);
+void intel_memory_region_put(struct intel_memory_region *mem);
+
+int intel_memory_regions_hw_probe(struct drm_i915_private *i915);
+void intel_memory_regions_driver_release(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 15f8bff141f9..8fd92b9130a7 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -52,7 +52,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
return PCH_SPT;
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
@@ -74,12 +75,16 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
WARN_ON(!IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
+ case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* Comet Lake V PCH is based on KBP, which is SPT compatible */
+ return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
WARN_ON(!IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
@@ -87,6 +92,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
WARN_ON(!IS_TIGERLAKE(dev_priv));
return PCH_TGP;
+ case INTEL_PCH_JSP_DEVICE_ID_TYPE:
+ case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Jasper Lake PCH\n");
+ WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ return PCH_JSP;
default:
return PCH_NONE;
}
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index c29c81ec7971..d26c25dd8d54 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -23,6 +23,7 @@ enum intel_pch {
PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
+ PCH_JSP, /* Jasper Lake PCH */
PCH_MCC, /* Mule Creek Canyon PCH */
PCH_TGP, /* Tiger Lake PCH */
};
@@ -42,16 +43,19 @@ enum intel_pch {
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2efe1d12d5a9..809bff955b5a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -25,7 +25,6 @@
*
*/
-#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -38,6 +37,8 @@
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
+#include "gt/intel_llc.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_trace.h"
@@ -45,26 +46,6 @@
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
-/**
- * DOC: RC6
- *
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage. This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -224,8 +205,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
break;
}
- dev_priv->ips.r_t = dev_priv->mem_freq;
-
switch (csipll & 0x3ff) {
case 0x00c:
dev_priv->fsb_freq = 3200;
@@ -254,14 +233,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq = 0;
break;
}
-
- if (dev_priv->fsb_freq == 3200) {
- dev_priv->ips.c_m = 0;
- } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->ips.c_m = 1;
- } else {
- dev_priv->ips.c_m = 2;
- }
}
static const struct cxsr_latency cxsr_latency_table[] = {
@@ -1145,10 +1116,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- if (plane->id == PLANE_CURSOR)
- width = plane_state->base.crtc_w;
- else
- width = drm_rect_width(&plane_state->base.dst);
+ width = drm_rect_width(&plane_state->base.dst);
if (plane->id == PLANE_CURSOR) {
wm = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1335,8 +1303,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->base.state);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
- int num_active_planes = hweight32(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ int num_active_planes = hweight8(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
const struct g4x_pipe_wm *raw;
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -1498,7 +1466,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
- int num_active_crtcs = 0;
+ int num_active_pipes = 0;
wm->cxsr = true;
wm->hpll_en = true;
@@ -1517,10 +1485,10 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
if (!wm_state->fbc_en)
wm->fbc_en = false;
- num_active_crtcs++;
+ num_active_pipes++;
}
- if (num_active_crtcs != 1) {
+ if (num_active_pipes != 1) {
wm->cxsr = false;
wm->hpll_en = false;
wm->fbc_en = false;
@@ -1667,7 +1635,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight32(active_planes);
+ int num_active_planes = hweight8(active_planes);
const int fifo_size = 511;
int fifo_extra, fifo_left = fifo_size;
int sprite0_fifo_extra = 0;
@@ -1856,8 +1824,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
- int num_active_planes = hweight32(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ int num_active_planes = hweight8(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -1917,7 +1885,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -2106,7 +2074,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
- int num_active_crtcs = 0;
+ int num_active_pipes = 0;
wm->level = dev_priv->wm.max_level;
wm->cxsr = true;
@@ -2120,14 +2088,14 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (!wm_state->cxsr)
wm->cxsr = false;
- num_active_crtcs++;
+ num_active_pipes++;
wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
}
- if (num_active_crtcs != 1)
+ if (num_active_pipes != 1)
wm->cxsr = false;
- if (num_active_crtcs > 1)
+ if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2577,7 +2545,8 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
return ilk_wm_method2(crtc_state->pixel_rate,
crtc_state->base.adjusted_mode.crtc_htotal,
- plane_state->base.crtc_w, cpp, mem_value);
+ drm_rect_width(&plane_state->base.dst),
+ cpp, mem_value);
}
/* Only for WM_LP. */
@@ -2656,7 +2625,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
+ fifo_size /= INTEL_NUM_PIPES(dev_priv);
/*
* For some reason the non self refresh
@@ -3117,8 +3086,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane *plane;
- const struct drm_plane_state *plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
const struct intel_plane_state *pristate = NULL;
const struct intel_plane_state *sprstate = NULL;
const struct intel_plane_state *curstate = NULL;
@@ -3127,15 +3096,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
pipe_wm = &crtc_state->wm.ilk.optimal;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) {
- const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
-
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- pristate = ps;
- else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- sprstate = ps;
- else if (plane->type == DRM_PLANE_TYPE_CURSOR)
- curstate = ps;
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ pristate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ sprstate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ curstate = plane_state;
}
pipe_wm->pipe_enabled = crtc_state->base.active;
@@ -3662,10 +3629,47 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
+ /* HACK! */
+ if (IS_GEN(dev_priv, 12))
+ return false;
+
return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
+static void
+skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 12) {
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
+ if (!ret) {
+ dev_priv->sagv_block_time_us = val;
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n");
+ } else if (IS_GEN(dev_priv, 11)) {
+ dev_priv->sagv_block_time_us = 10;
+ return;
+ } else if (IS_GEN(dev_priv, 10)) {
+ dev_priv->sagv_block_time_us = 20;
+ return;
+ } else if (IS_GEN(dev_priv, 9)) {
+ dev_priv->sagv_block_time_us = 30;
+ return;
+ } else {
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ }
+
+ /* Default to an unusable block time */
+ dev_priv->sagv_block_time_us = -1;
+}
+
/*
* SAGV dynamically adjusts the system agent voltage and clock frequencies
* depending on power and performance requirements. The display engine access
@@ -3754,33 +3758,25 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
struct intel_crtc_state *crtc_state;
enum pipe pipe;
int level, latency;
- int sagv_block_time_us;
if (!intel_has_sagv(dev_priv))
return false;
- if (IS_GEN(dev_priv, 9))
- sagv_block_time_us = 30;
- else if (IS_GEN(dev_priv, 10))
- sagv_block_time_us = 20;
- else
- sagv_block_time_us = 10;
-
/*
* If there are no active CRTCs, no additional checks need be performed
*/
- if (hweight32(state->active_crtcs) == 0)
+ if (hweight8(state->active_pipes) == 0)
return true;
/*
* SKL+ workaround: bspec recommends we disable SAGV when we have
* more then one pipe enabled
*/
- if (hweight32(state->active_crtcs) > 1)
+ if (hweight8(state->active_pipes) > 1)
return false;
/* Since we're now guaranteed to only have one active CRTC... */
- pipe = ffs(state->active_crtcs) - 1;
+ pipe = ffs(state->active_pipes) - 1;
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
@@ -3812,7 +3808,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
* incur memory latencies higher than sagv_block_time_us we
* can't enable SAGV.
*/
- if (latency < sagv_block_time_us)
+ if (latency < dev_priv->sagv_block_time_us)
return false;
}
@@ -3875,14 +3871,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
if (WARN_ON(!state) || !crtc_state->base.active) {
alloc->start = 0;
alloc->end = 0;
- *num_active = hweight32(dev_priv->active_crtcs);
+ *num_active = hweight8(dev_priv->active_pipes);
return;
}
if (intel_state->active_pipe_changes)
- *num_active = hweight32(intel_state->active_crtcs);
+ *num_active = hweight8(intel_state->active_pipes);
else
- *num_active = hweight32(dev_priv->active_crtcs);
+ *num_active = hweight8(dev_priv->active_pipes);
ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
*num_active, ddb);
@@ -4013,7 +4009,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (is_planar_yuv_format(fourcc))
+ if (fourcc &&
+ drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
swap(val, val2);
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
@@ -4071,7 +4068,6 @@ static uint_fixed_16_16_t
skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@@ -4079,27 +4075,17 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
return u32_to_fixed16(0);
- /* n.b., src is 16.16 fixed point, dst is whole integer */
- if (plane->id == PLANE_CURSOR) {
- /*
- * Cursors only support 0/180 degree rotation,
- * hence no need to account for rotation here.
- */
- src_w = plane_state->base.src_w >> 16;
- src_h = plane_state->base.src_h >> 16;
- dst_w = plane_state->base.crtc_w;
- dst_h = plane_state->base.crtc_h;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
- dst_w = drm_rect_width(&plane_state->base.dst);
- dst_h = drm_rect_height(&plane_state->base.dst);
- }
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ *
+ * n.b., src is 16.16 fixed point, dst is whole integer.
+ */
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+ dst_h = drm_rect_height(&plane_state->base.dst);
fp_w_ratio = div_fixed16(src_w, dst_w);
fp_h_ratio = div_fixed16(src_h, dst_h);
@@ -4109,117 +4095,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
return mul_fixed16(downscale_w, downscale_h);
}
-static uint_fixed_16_16_t
-skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
-{
- uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
-
- if (!crtc_state->base.enable)
- return pipe_downscale;
-
- if (crtc_state->pch_pfit.enabled) {
- u32 src_w, src_h, dst_w, dst_h;
- u32 pfit_size = crtc_state->pch_pfit.size;
- uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
- uint_fixed_16_16_t downscale_h, downscale_w;
-
- src_w = crtc_state->pipe_src_w;
- src_h = crtc_state->pipe_src_h;
- dst_w = pfit_size >> 16;
- dst_h = pfit_size & 0xffff;
-
- if (!dst_w || !dst_h)
- return pipe_downscale;
-
- fp_w_ratio = div_fixed16(src_w, dst_w);
- fp_h_ratio = div_fixed16(src_h, dst_h);
- downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
- downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
-
- pipe_downscale = mul_fixed16(downscale_w, downscale_h);
- }
-
- return pipe_downscale;
-}
-
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
- int crtc_clock, dotclk;
- u32 pipe_max_pixel_rate;
- uint_fixed_16_16_t pipe_downscale;
- uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
-
- if (!crtc_state->base.enable)
- return 0;
-
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
- uint_fixed_16_16_t plane_downscale;
- uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
- int bpp;
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- continue;
-
- if (WARN_ON(!plane_state->base.fb))
- return -EINVAL;
-
- plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state);
- bpp = plane_state->base.fb->format->cpp[0] * 8;
- if (bpp == 64)
- plane_downscale = mul_fixed16(plane_downscale,
- fp_9_div_8);
-
- max_downscale = max_fixed16(plane_downscale, max_downscale);
- }
- pipe_downscale = skl_pipe_downscale_amount(crtc_state);
-
- pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
-
- crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- dotclk *= 2;
-
- pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
-
- if (pipe_max_pixel_rate < crtc_clock) {
- DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
- const int plane)
+ int color_plane)
{
- struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
u32 data_rate;
u32 width = 0, height = 0;
- struct drm_framebuffer *fb;
- u32 format;
uint_fixed_16_16_t down_scale_amount;
u64 rate;
if (!plane_state->base.visible)
return 0;
- fb = plane_state->base.fb;
- format = fb->format->format;
-
- if (intel_plane->id == PLANE_CURSOR)
+ if (plane->id == PLANE_CURSOR)
return 0;
- if (plane == 1 && !is_planar_yuv_format(format))
+
+ if (color_plane == 1 &&
+ !drm_format_info_is_yuv_semiplanar(fb->format))
return 0;
/*
@@ -4231,7 +4126,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
height = drm_rect_height(&plane_state->base.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
- if (plane == 1 && is_planar_yuv_format(format)) {
+ if (color_plane == 1) {
width /= 2;
height /= 2;
}
@@ -4242,7 +4137,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
- rate *= fb->format->cpp[plane];
+ rate *= fb->format->cpp[color_plane];
return rate;
}
@@ -4252,18 +4147,16 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *uv_plane_data_rate)
{
struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ enum plane_id plane_id = plane->id;
u64 rate;
/* packed/y */
@@ -4284,21 +4177,19 @@ static u64
icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate)
{
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
if (WARN_ON(!crtc_state->base.state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ enum plane_id plane_id = plane->id;
u64 rate;
- if (!plane_state->linked_plane) {
+ if (!plane_state->planar_linked_plane) {
rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
@@ -4307,17 +4198,17 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
/*
* The slave plane might not iterate in
- * drm_atomic_crtc_state_for_each_plane_state(),
+ * intel_atomic_crtc_state_for_each_plane_state(),
* and needs the master plane state which may be
* NULL if we try get_new_plane_state(), so we
* always calculate from the master.
*/
- if (plane_state->slave)
+ if (plane_state->planar_slave)
continue;
/* Y plane rate is calculated on the slave */
rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
- y_plane_id = plane_state->linked_plane->id;
+ y_plane_id = plane_state->planar_linked_plane->id;
plane_data_rate[y_plane_id] = rate;
total_data_rate += rate;
@@ -4647,7 +4538,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 interm_pbpl;
/* only planar format has two planes */
- if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
+ if (color_plane == 1 && !drm_format_info_is_yuv_semiplanar(format)) {
DRM_DEBUG_KMS("Non planar format have single plane\n");
return -EINVAL;
}
@@ -4659,7 +4550,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = is_planar_yuv_format(format->format);
+ wp->is_planar = drm_format_info_is_yuv_semiplanar(format);
wp->width = width;
if (color_plane == 1 && wp->is_planar)
@@ -4731,20 +4622,15 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct skl_wm_params *wp, int color_plane)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
const struct drm_framebuffer *fb = plane_state->base.fb;
int width;
- if (plane->id == PLANE_CURSOR) {
- width = plane_state->base.crtc_w;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- width = drm_rect_width(&plane_state->base.src) >> 16;
- }
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->base.src) >> 16;
return skl_compute_wm_params(crtc_state, width,
fb->format, fb->modifier,
@@ -5056,12 +4942,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
int ret;
/* Watermarks calculated in master */
- if (plane_state->slave)
+ if (plane_state->planar_slave)
return 0;
- if (plane_state->linked_plane) {
+ if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane_id y_plane_id = plane_state->linked_plane->id;
+ enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
WARN_ON(!fb->format->is_yuv ||
@@ -5090,8 +4976,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
int ret;
/*
@@ -5100,10 +4986,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
*/
memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state,
- &crtc_state->base) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,
+ crtc_state) {
if (INTEL_GEN(dev_priv) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state);
@@ -5263,19 +5147,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static u32
-pipes_modified(struct intel_atomic_state *state)
-{
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- u32 i, ret = 0;
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
- ret |= drm_crtc_mask(&crtc->base);
-
- return ret;
-}
-
static int
skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
@@ -5451,36 +5322,27 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
}
-static int
-skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
+static int intel_add_all_pipes(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- u32 realloc_pipes = pipes_modified(state);
- int ret, i;
- /*
- * When we distrust bios wm we always need to recompute to set the
- * expected DDB allocations for each CRTC.
- */
- if (dev_priv->wm.distrust_bios_wm)
- (*changed) = true;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
- /*
- * If this transaction isn't actually touching any CRTC's, don't
- * bother with watermark calculation. Note that if we pass this
- * test, we're guaranteed to hold at least one CRTC state mutex,
- * which means we can safely use values like dev_priv->active_crtcs
- * since any racing commits that want to update them would need to
- * hold _all_ CRTC state mutexes.
- */
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
- (*changed) = true;
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
- if (!*changed)
- return 0;
+ return 0;
+}
+
+static int
+skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int ret;
/*
* If this is our first atomic update following hardware readout,
@@ -5489,7 +5351,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
* ensure a full DDB recompute.
*/
if (dev_priv->wm.distrust_bios_wm) {
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
state->base.acquire_ctx);
if (ret)
return ret;
@@ -5497,13 +5359,13 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
state->active_pipe_changes = ~0;
/*
- * We usually only initialize state->active_crtcs if we
+ * We usually only initialize state->active_pipes if we
* we're doing a modeset; make sure this field is always
* initialized during the sanitization process that happens
* on the first commit too.
*/
if (!state->modeset)
- state->active_crtcs = dev_priv->active_crtcs;
+ state->active_pipes = dev_priv->active_pipes;
}
/*
@@ -5520,18 +5382,11 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
* to grab the lock on *all* CRTC's.
*/
if (state->active_pipe_changes || state->modeset) {
- realloc_pipes = ~0;
state->wm_results.dirty_pipes = ~0;
- }
- /*
- * We're not recomputing for the pipes not included in the commit, so
- * make sure we start with the current state.
- */
- for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ ret = intel_add_all_pipes(state);
+ if (ret)
+ return ret;
}
return 0;
@@ -5604,14 +5459,13 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
struct skl_ddb_values *results = &state->wm_results;
- bool changed = false;
int ret, i;
/* Clear all dirty flags */
results->dirty_pipes = 0;
- ret = skl_ddb_add_affected_pipes(state, &changed);
- if (ret || !changed)
+ ret = skl_ddb_add_affected_pipes(state);
+ if (ret)
return ret;
/*
@@ -5633,7 +5487,7 @@ skl_compute_wm(struct intel_atomic_state *state)
if (!skl_pipe_wm_equals(crtc,
&old_crtc_state->wm.skl.optimal,
&new_crtc_state->wm.skl.optimal))
- results->dirty_pipes |= drm_crtc_mask(&crtc->base);
+ results->dirty_pipes |= BIT(crtc->pipe);
}
ret = skl_compute_ddb(state);
@@ -5653,7 +5507,7 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
enum pipe pipe = crtc->pipe;
- if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
+ if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
@@ -5662,12 +5516,11 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
static void skl_initial_wm(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_ddb_values *results = &state->wm_results;
- if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
+ if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
@@ -5816,10 +5669,10 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
if (crtc->active)
- hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
+ hw->dirty_pipes |= BIT(crtc->pipe);
}
- if (dev_priv->active_crtcs) {
+ if (dev_priv->active_pipes) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
}
@@ -6397,2488 +6250,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
intel_enable_ipc(dev_priv);
}
-/*
- * Lock protecting IPS related data structures
- */
-DEFINE_SPINLOCK(mchdev_lock);
-
-bool ironlake_set_drps(struct drm_i915_private *i915, u8 val)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u16 rgvswctl;
-
- lockdep_assert_held(&mchdev_lock);
-
- rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
- }
-
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
- intel_uncore_posting_read16(uncore, MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
-
- return true;
-}
-
-static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 rgvmodectl;
- u8 fmax, fmin, fstart, vstart;
-
- spin_lock_irq(&mchdev_lock);
-
- rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
-
- /* Enable temp reporting */
- intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE);
-
- /* 100ms RC evaluation intervals */
- intel_uncore_write(uncore, RCUPEI, 100000);
- intel_uncore_write(uncore, RCDNEI, 100000);
-
- /* Set max/min thresholds to 90ms and 80ms respectively */
- intel_uncore_write(uncore, RCBMAXAVG, 90000);
- intel_uncore_write(uncore, RCBMINAVG, 80000);
-
- intel_uncore_write(uncore, MEMIHYST, 1);
-
- /* Set up min, max, and cur for interrupt handling */
- fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
- fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
- fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
-
- vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
- PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
-
- dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
- dev_priv->ips.fstart = fstart;
-
- dev_priv->ips.max_delay = fstart;
- dev_priv->ips.min_delay = fmin;
- dev_priv->ips.cur_delay = fstart;
-
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
-
- intel_uncore_write(uncore,
- MEMINTREN,
- MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
-
- /*
- * Interrupts will be enabled in ironlake_irq_postinstall
- */
-
- intel_uncore_write(uncore, VIDSTART, vstart);
- intel_uncore_posting_read(uncore, VIDSTART);
-
- rgvmodectl |= MEMMODE_SWMODE_EN;
- intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
-
- if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
- MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
- mdelay(1);
-
- ironlake_set_drps(dev_priv, fstart);
-
- dev_priv->ips.last_count1 =
- intel_uncore_read(uncore, DMIEC) +
- intel_uncore_read(uncore, DDREC) +
- intel_uncore_read(uncore, CSIEC);
- dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
- dev_priv->ips.last_time2 = ktime_get_raw_ns();
-
- spin_unlock_irq(&mchdev_lock);
-}
-
-static void ironlake_disable_drps(struct drm_i915_private *i915)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u16 rgvswctl;
-
- spin_lock_irq(&mchdev_lock);
-
- rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
-
- /* Ack interrupts, disable EFC interrupt */
- intel_uncore_write(uncore,
- MEMINTREN,
- intel_uncore_read(uncore, MEMINTREN) &
- ~MEMINT_EVAL_CHG_EN);
- intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
- intel_uncore_write(uncore,
- DEIER,
- intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
- intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
- intel_uncore_write(uncore,
- DEIMR,
- intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
-
- /* Go back to the starting frequency */
- ironlake_set_drps(i915, i915->ips.fstart);
- mdelay(1);
- rgvswctl |= MEMCTL_CMD_STS;
- intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
- mdelay(1);
-
- spin_unlock_irq(&mchdev_lock);
-}
-
-/* There's a funny hw issue where the hw returns all 0 when reading from
- * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
- * ourselves, instead of doing a rmw cycle (which might result in us clearing
- * all limits and the gpu stuck at whatever frequency it is at atm).
- */
-static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 limits;
-
- /* Only set the down limit when we've reached the lowest level to avoid
- * getting more interrupts, otherwise leave this clear. This prevents a
- * race in the hw when coming out of rc6: There's a tiny window where
- * the hw runs at the minimal clock before selecting the desired
- * frequency, if the down threshold expires in that window we will not
- * receive a down interrupt. */
- if (INTEL_GEN(dev_priv) >= 9) {
- limits = (rps->max_freq_softlimit) << 23;
- if (val <= rps->min_freq_softlimit)
- limits |= (rps->min_freq_softlimit) << 14;
- } else {
- limits = rps->max_freq_softlimit << 24;
- if (val <= rps->min_freq_softlimit)
- limits |= rps->min_freq_softlimit << 16;
- }
-
- return limits;
-}
-
-static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 threshold_up = 0, threshold_down = 0; /* in % */
- u32 ei_up = 0, ei_down = 0;
-
- lockdep_assert_held(&rps->power.mutex);
-
- if (new_power == rps->power.mode)
- return;
-
- /* Note the units here are not exactly 1us, but 1280ns. */
- switch (new_power) {
- case LOW_POWER:
- /* Upclock if more than 95% busy over 16ms */
- ei_up = 16000;
- threshold_up = 95;
-
- /* Downclock if less than 85% busy over 32ms */
- ei_down = 32000;
- threshold_down = 85;
- break;
-
- case BETWEEN:
- /* Upclock if more than 90% busy over 13ms */
- ei_up = 13000;
- threshold_up = 90;
-
- /* Downclock if less than 75% busy over 32ms */
- ei_down = 32000;
- threshold_down = 75;
- break;
-
- case HIGH_POWER:
- /* Upclock if more than 85% busy over 10ms */
- ei_up = 10000;
- threshold_up = 85;
-
- /* Downclock if less than 60% busy over 32ms */
- ei_down = 32000;
- threshold_down = 60;
- break;
- }
-
- /* When byt can survive without system hang with dynamic
- * sw freq adjustments, this restriction can be lifted.
- */
- if (IS_VALLEYVIEW(dev_priv))
- goto skip_hw_write;
-
- I915_WRITE(GEN6_RP_UP_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_up));
- I915_WRITE(GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv,
- ei_up * threshold_up / 100));
-
- I915_WRITE(GEN6_RP_DOWN_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_down));
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv,
- ei_down * threshold_down / 100));
-
- I915_WRITE(GEN6_RP_CONTROL,
- (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
-skip_hw_write:
- rps->power.mode = new_power;
- rps->power.up_threshold = threshold_up;
- rps->power.down_threshold = threshold_down;
-}
-
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int new_power;
-
- new_power = rps->power.mode;
- switch (rps->power.mode) {
- case LOW_POWER:
- if (val > rps->efficient_freq + 1 &&
- val > rps->cur_freq)
- new_power = BETWEEN;
- break;
-
- case BETWEEN:
- if (val <= rps->efficient_freq &&
- val < rps->cur_freq)
- new_power = LOW_POWER;
- else if (val >= rps->rp0_freq &&
- val > rps->cur_freq)
- new_power = HIGH_POWER;
- break;
-
- case HIGH_POWER:
- if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
- val < rps->cur_freq)
- new_power = BETWEEN;
- break;
- }
- /* Max/min bins are special */
- if (val <= rps->min_freq_softlimit)
- new_power = LOW_POWER;
- if (val >= rps->max_freq_softlimit)
- new_power = HIGH_POWER;
-
- mutex_lock(&rps->power.mutex);
- if (rps->power.interactive)
- new_power = HIGH_POWER;
- rps_set_power(dev_priv, new_power);
- mutex_unlock(&rps->power.mutex);
-}
-
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
-{
- struct intel_rps *rps = &i915->gt_pm.rps;
-
- if (INTEL_GEN(i915) < 6)
- return;
-
- mutex_lock(&rps->power.mutex);
- if (interactive) {
- if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
- rps_set_power(i915, HIGH_POWER);
- } else {
- GEM_BUG_ON(!rps->power.interactive);
- rps->power.interactive--;
- }
- mutex_unlock(&rps->power.mutex);
-}
-
-static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 mask = 0;
-
- /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
- if (val > rps->min_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
- if (val < rps->max_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
-
- mask &= dev_priv->pm_rps_events;
-
- return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
-}
-
-/* gen6_set_rps is called to update the frequency request, but should also be
- * called when the range (min_delay and max_delay) is modified so that we can
- * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* min/max delay may still have been modified so be sure to
- * write the limits value.
- */
- if (val != rps->cur_freq) {
- gen6_set_rps_thresholds(dev_priv, val);
-
- if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(GEN6_RPNSWREQ,
- GEN9_FREQUENCY(val));
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(val));
- else
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(val) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- }
-
- /* Make sure we continue to get interrupts
- * until we hit the minimum or maximum frequencies.
- */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- rps->cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
- return 0;
-}
-
-static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- int err;
-
- if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
- "Odd GPU freq value\n"))
- val &= ~1;
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- if (val != dev_priv->gt_pm.rps.cur_freq) {
- vlv_punit_get(dev_priv);
- err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- vlv_punit_put(dev_priv);
- if (err)
- return err;
-
- gen6_set_rps_thresholds(dev_priv, val);
- }
-
- dev_priv->gt_pm.rps.cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
- return 0;
-}
-
-/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
- *
- * * If Gfx is Idle, then
- * 1. Forcewake Media well.
- * 2. Request idle freq.
- * 3. Release Forcewake of Media well.
-*/
-static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val = rps->idle_freq;
- int err;
-
- if (rps->cur_freq <= val)
- return;
-
- /* The punit delays the write of the frequency and voltage until it
- * determines the GPU is awake. During normal usage we don't want to
- * waste power changing the frequency if the GPU is sleeping (rc6).
- * However, the GPU and driver is now idle and we do not want to delay
- * switching to minimum voltage (reducing power whilst idle) as we do
- * not expect to be woken in the near future and so must flush the
- * change by waking the device.
- *
- * We choose to take the media powerwell (either would do to trick the
- * punit into committing the voltage change) as that takes a lot less
- * power than the render powerwell.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
- err = valleyview_set_rps(dev_priv, val);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
-
- if (err)
- DRM_ERROR("Failed to set RPS for idle\n");
-}
-
-void gen6_rps_busy(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- u8 freq;
-
- if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
- gen6_rps_reset_ei(dev_priv);
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, rps->cur_freq));
-
- gen6_enable_rps_interrupts(dev_priv);
-
- /* Use the user's desired frequency as a guide, but for better
- * performance, jump directly to RPe as our starting frequency.
- */
- freq = max(rps->cur_freq,
- rps->efficient_freq);
-
- if (intel_set_rps(dev_priv,
- clamp(freq,
- rps->min_freq_softlimit,
- rps->max_freq_softlimit)))
- DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
- }
- mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* Flush our bottom-half so that it does not race with us
- * setting the idle frequency and so that it is bounded by
- * our rpm wakeref. And then disable the interrupts to stop any
- * futher RPS reclocking whilst we are asleep.
- */
- gen6_disable_rps_interrupts(dev_priv);
-
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_set_rps_idle(dev_priv);
- else
- gen6_set_rps(dev_priv, rps->idle_freq);
- rps->last_adj = 0;
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_sanitize_rps_pm_mask(dev_priv, ~0));
- }
- mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_boost(struct i915_request *rq)
-{
- struct intel_rps *rps = &rq->i915->gt_pm.rps;
- unsigned long flags;
- bool boost;
-
- /* This is intentionally racy! We peek at the state here, then
- * validate inside the RPS worker.
- */
- if (!rps->enabled)
- return;
-
- if (i915_request_signaled(rq))
- return;
-
- /* Serializes with i915_request_retire() */
- boost = false;
- spin_lock_irqsave(&rq->lock, flags);
- if (!i915_request_has_waitboost(rq) &&
- !dma_fence_is_signaled_locked(&rq->fence)) {
- boost = !atomic_fetch_inc(&rps->num_waiters);
- rq->flags |= I915_REQUEST_WAITBOOST;
- }
- spin_unlock_irqrestore(&rq->lock, flags);
- if (!boost)
- return;
-
- if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
- schedule_work(&rps->work);
-
- atomic_inc(&rps->boosts);
-}
-
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int err;
-
- lockdep_assert_held(&rps->lock);
- GEM_BUG_ON(val > rps->max_freq);
- GEM_BUG_ON(val < rps->min_freq);
-
- if (!rps->enabled) {
- rps->cur_freq = val;
- return 0;
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = valleyview_set_rps(dev_priv, val);
- else
- err = gen6_set_rps(dev_priv, val);
-
- return err;
-}
-
-static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN9_PG_ENABLE, 0);
-}
-
-static void gen9_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
-{
- /* We're doing forcewake before Disabling RC6,
- * This what the BIOS expects when going into suspend */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
-{
- bool enable_rc6 = true;
- unsigned long rc6_ctx_base;
- u32 rc_ctl;
- int rc_sw_target;
-
- rc_ctl = I915_READ(GEN6_RC_CONTROL);
- rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
- RC_SW_TARGET_STATE_SHIFT;
- DRM_DEBUG_DRIVER("BIOS enabled RC states: "
- "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
- onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
- onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
- rc_sw_target);
-
- if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
- DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
- enable_rc6 = false;
- }
-
- /*
- * The exact context size is not known for BXT, so assume a page size
- * for this check.
- */
- rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
- (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
- DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
- enable_rc6 = false;
- }
-
- if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
- DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
- !I915_READ(GEN8_PUSHBUS_ENABLE) ||
- !I915_READ(GEN8_PUSHBUS_SHIFT)) {
- DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN6_GFXPAUSE)) {
- DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN8_MISC_CTRL0)) {
- DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
- enable_rc6 = false;
- }
-
- return enable_rc6;
-}
-
-static bool sanitize_rc6(struct drm_i915_private *i915)
-{
- struct intel_device_info *info = mkwrite_device_info(i915);
-
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(i915)) {
- info->has_rc6 = 0;
- info->has_rps = false;
- }
-
- if (info->has_rc6 &&
- IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
- DRM_INFO("RC6 disabled by BIOS\n");
- info->has_rc6 = 0;
- }
-
- /*
- * We assume that we do not have any deep rc6 levels if we don't have
- * have the previous rc6 level supported, i.e. we use HAS_RC6()
- * as the initial coarse check for rc6 in general, moving on to
- * progressively finer/deeper levels.
- */
- if (!info->has_rc6 && info->has_rc6p)
- info->has_rc6p = 0;
-
- return info->has_rc6;
-}
-
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* All of these values are in units of 50MHz */
-
- /* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_GEN9_LP(dev_priv)) {
- u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 0) & 0xff;
- } else {
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 16) & 0xff;
- }
- /* hw_max = RP0 until we check for overclocking */
- rps->max_freq = rps->rp0_freq;
-
- rps->efficient_freq = rps->rp1_freq;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- u32 ddcc_status = 0;
-
- if (sandybridge_pcode_read(dev_priv,
- HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
- &ddcc_status, NULL) == 0)
- rps->efficient_freq =
- clamp_t(u8,
- ((ddcc_status >> 8) & 0xff),
- rps->min_freq,
- rps->max_freq);
- }
-
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Store the frequency values in 16.66 MHZ units, which is
- * the natural hardware unit for SKL
- */
- rps->rp0_freq *= GEN9_FREQ_SCALER;
- rps->rp1_freq *= GEN9_FREQ_SCALER;
- rps->min_freq *= GEN9_FREQ_SCALER;
- rps->max_freq *= GEN9_FREQ_SCALER;
- rps->efficient_freq *= GEN9_FREQ_SCALER;
- }
-}
-
-static void reset_rps(struct drm_i915_private *dev_priv,
- int (*set)(struct drm_i915_private *, u8))
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u8 freq = rps->cur_freq;
-
- /* force a reset */
- rps->power.mode = -1;
- rps->cur_freq = -1;
-
- if (set(dev_priv, freq))
- DRM_ERROR("Failed to reset RPS to initial values\n");
-}
-
-/* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_i915_private *dev_priv)
-{
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Program defaults and thresholds for RPS */
- if (IS_GEN(dev_priv, 9))
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
-
- /* 1 second timeout*/
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
- GT_INTERVAL_FROM_US(dev_priv, 1000000));
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
-
- /* Leaning on the below call to gen6_set_rps to program/setup the
- * Up/Down EI & threshold registers, as well as the RP_CONTROL,
- * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /*
- * 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
-
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- if (HAS_GT_UC(dev_priv))
- I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
-
- /*
- * 2c: Program Coarse Power Gating Policies.
- *
- * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
- * use instead is a more conservative estimate for the maximum time
- * it takes us to service a CS interrupt and submit a new ELSP - that
- * is the time which the GPU is idle waiting for the CPU to select the
- * next request to execute. If the idle hysteresis is less than that
- * interrupt service latency, the hardware will automatically gate
- * the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from plane_state is that we
- * do not want the enable hysteresis to less than the wakeup latency.
- *
- * igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
- */
- I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
- /* 3a: Enable RC6 */
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- GEN6_RC_CTL_EI_MODE(1));
-
- /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
- I915_WRITE(GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 rc6_mode;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- if (INTEL_GEN(dev_priv) >= 10) {
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
- } else if (IS_SKYLAKE(dev_priv)) {
- /*
- * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
- * when CPG is enabled
- */
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
- } else {
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
- }
-
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- if (HAS_GT_UC(dev_priv))
- I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- /*
- * 2c: Program Coarse Power Gating Policies.
- *
- * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
- * use instead is a more conservative estimate for the maximum time
- * it takes us to service a CS interrupt and submit a new ELSP - that
- * is the time which the GPU is idle waiting for the CPU to select the
- * next request to execute. If the idle hysteresis is less than that
- * interrupt service latency, the hardware will automatically gate
- * the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from plane_state is that we
- * do not want the enable hysteresis to less than the wakeup latency.
- *
- * igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
- */
- I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
- /* 3a: Enable RC6 */
- I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
-
- /* WaRsUseTimeoutMode:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- else
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- rc6_mode);
-
- /*
- * 3b: Enable Coarse Power Gating only when RC6 is enabled.
- * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
- */
- if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
- I915_WRITE(GEN9_PG_ENABLE, 0);
- else
- I915_WRITE(GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
-
- /* 3: Enable RC6 */
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_RC6_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rps(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 1 Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(rps->rp1_freq));
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- HSW_FREQUENCY(rps->rp1_freq));
- /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
-
- /* Docs recommend 900MHz, and 300 MHz respectively */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- rps->max_freq_softlimit << 24 |
- rps->min_freq_softlimit << 16);
-
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
- I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
- I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- /* 2: Enable RPS */
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 rc6vids, rc6_mask;
- u32 gtfifodbg;
- int ret;
-
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* Clear the DBG now so we don't confuse earlier errors */
- gtfifodbg = I915_READ(GTFIFODBG);
- if (gtfifodbg) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* disable the counters and set deterministic thresholds */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
- I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
- else
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
- I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
- /* We don't use those on Haswell */
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- if (HAS_RC6p(dev_priv))
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
- if (HAS_RC6pp(dev_priv))
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
- I915_WRITE(GEN6_RC_CONTROL,
- rc6_mask |
- GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
-
- rc6vids = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids, NULL);
- if (IS_GEN(dev_priv, 6) && ret) {
- DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
- DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
- GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
- rc6vids &= 0xffff00;
- rc6vids |= GEN6_ENCODE_RC6_VID(450);
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
- if (ret)
- DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
- }
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Power down if completely idle for over 50ms */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- const int min_freq = 15;
- const int scaling_factor = 180;
- unsigned int gpu_freq;
- unsigned int max_ia_freq, min_ring_freq;
- unsigned int max_gpu_freq, min_gpu_freq;
- struct cpufreq_policy *policy;
-
- lockdep_assert_held(&rps->lock);
-
- if (rps->max_freq <= rps->min_freq)
- return;
-
- policy = cpufreq_cpu_get(0);
- if (policy) {
- max_ia_freq = policy->cpuinfo.max_freq;
- cpufreq_cpu_put(policy);
- } else {
- /*
- * Default to measured freq if none found, PCU will ensure we
- * don't go over
- */
- max_ia_freq = tsc_khz;
- }
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
-
- min_ring_freq = I915_READ(DCLK) & 0xf;
- /* convert DDR frequency from units of 266.6MHz to bandwidth */
- min_ring_freq = mult_frac(min_ring_freq, 8, 3);
-
- min_gpu_freq = rps->min_freq;
- max_gpu_freq = rps->max_freq;
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Convert GT frequency to 50 HZ units */
- min_gpu_freq /= GEN9_FREQ_SCALER;
- max_gpu_freq /= GEN9_FREQ_SCALER;
- }
-
- /*
- * For each potential GPU frequency, load a ring frequency we'd like
- * to use for memory access. We do this by specifying the IA frequency
- * the PCU should use as a reference to determine the ring frequency.
- */
- for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
- const int diff = max_gpu_freq - gpu_freq;
- unsigned int ia_freq = 0, ring_freq = 0;
-
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /*
- * ring_freq = 2 * GT. ring_freq is in 100MHz units
- * No floor required for ring frequency on SKL.
- */
- ring_freq = gpu_freq;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- /* max(2 * GT, DDR). NB: GT is 50MHz units */
- ring_freq = max(min_ring_freq, gpu_freq);
- } else if (IS_HASWELL(dev_priv)) {
- ring_freq = mult_frac(gpu_freq, 5, 4);
- ring_freq = max(min_ring_freq, ring_freq);
- /* leave ia_freq as the default, chosen by cpufreq */
- } else {
- /* On older processors, there is no separate ring
- * clock domain, so in order to boost the bandwidth
- * of the ring, we need to upclock the CPU (ia_freq).
- *
- * For GPU frequencies less than 750MHz,
- * just use the lowest ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
- }
-
- sandybridge_pcode_write(dev_priv,
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
- ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
- ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
- gpu_freq);
- }
-}
-
-static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp0;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
-
- switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
- case 8:
- /* (2 * 4) config */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
- break;
- case 12:
- /* (2 * 6) config */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
- break;
- case 16:
- /* (2 * 8) config */
- default:
- /* Setting (2 * 8) Min RP0 for any other combination */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
- break;
- }
-
- rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
-
- return rp0;
-}
-
-static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpe;
-
- val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
- rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
-
- return rpe;
-}
-
-static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp1;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
-
- return rp1;
-}
-
-static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpn;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
- rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
- FB_GFX_FREQ_FUSE_MASK);
-
- return rpn;
-}
-
-static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp1;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
- rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
-
- return rp1;
-}
-
-static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp0;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
- rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
- /* Clamp to max */
- rp0 = min_t(u32, rp0, 0xea);
-
- return rp0;
-}
-
-static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpe;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
- rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
- rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
-
- return rpe;
-}
-
-static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
- /*
- * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
- * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
- * a BYT-M B0 the above register contains 0xbf. Moreover when setting
- * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
- * to make sure it matches what Punit accepts.
- */
- return max_t(u32, val, 0xc0);
-}
-
-/* Check that the pctx buffer wasn't move under us. */
-static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
-{
- unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
- WARN_ON(pctx_addr != dev_priv->dsm.start +
- dev_priv->vlv_pctx->stolen->start);
-}
-
-
-/* Check that the pcbr address is not empty. */
-static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
-{
- unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
- WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
-}
-
-static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
-{
- resource_size_t pctx_paddr, paddr;
- resource_size_t pctx_size = 32*1024;
- u32 pcbr;
-
- pcbr = I915_READ(VLV_PCBR);
- if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = dev_priv->dsm.end + 1 - pctx_size;
- GEM_BUG_ON(paddr > U32_MAX);
-
- pctx_paddr = (paddr & (~4095));
- I915_WRITE(VLV_PCBR, pctx_paddr);
- }
-
- DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
-}
-
-static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *pctx;
- resource_size_t pctx_paddr;
- resource_size_t pctx_size = 24*1024;
- u32 pcbr;
-
- pcbr = I915_READ(VLV_PCBR);
- if (pcbr) {
- /* BIOS set it up already, grab the pre-alloc'd space */
- resource_size_t pcbr_offset;
-
- pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
- pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
- pcbr_offset,
- I915_GTT_OFFSET_NONE,
- pctx_size);
- goto out;
- }
-
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
-
- /*
- * From the Gunit register HAS:
- * The Gfx driver is expected to program this register and ensure
- * proper allocation within Gfx stolen memory. For example, this
- * register should be programmed such than the PCBR range does not
- * overlap with other ranges, such as the frame buffer, protected
- * memory, or any other relevant ranges.
- */
- pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
- if (!pctx) {
- DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
- goto out;
- }
-
- GEM_BUG_ON(range_overflows_t(u64,
- dev_priv->dsm.start,
- pctx->stolen->start,
- U32_MAX));
- pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
- I915_WRITE(VLV_PCBR, pctx_paddr);
-
-out:
- DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
- dev_priv->vlv_pctx = pctx;
-}
-
-static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *pctx;
-
- pctx = fetch_and_zero(&dev_priv->vlv_pctx);
- if (pctx)
- i915_gem_object_put(pctx);
-}
-
-static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
-{
- dev_priv->gt_pm.rps.gpll_ref_freq =
- vlv_get_cck_clock(dev_priv, "GPLL ref",
- CCK_GPLL_CLOCK_CONTROL,
- dev_priv->czclk_freq);
-
- DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
- dev_priv->gt_pm.rps.gpll_ref_freq);
-}
-
-static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val;
-
- valleyview_setup_pctx(dev_priv);
-
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- vlv_init_gpll_ref_freq(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- dev_priv->mem_freq = 800;
- break;
- case 2:
- dev_priv->mem_freq = 1066;
- break;
- case 3:
- dev_priv->mem_freq = 1333;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
- rps->max_freq = valleyview_rps_max_freq(dev_priv);
- rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->max_freq),
- rps->max_freq);
-
- rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- rps->efficient_freq);
-
- rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
- DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->rp1_freq),
- rps->rp1_freq);
-
- rps->min_freq = valleyview_rps_min_freq(dev_priv);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- rps->min_freq);
-
- vlv_iosf_sb_put(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-}
-
-static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val;
-
- cherryview_setup_pctx(dev_priv);
-
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- vlv_init_gpll_ref_freq(dev_priv);
-
- val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
-
- switch ((val >> 2) & 0x7) {
- case 3:
- dev_priv->mem_freq = 2000;
- break;
- default:
- dev_priv->mem_freq = 1600;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
- rps->max_freq = cherryview_rps_max_freq(dev_priv);
- rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->max_freq),
- rps->max_freq);
-
- rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- rps->efficient_freq);
-
- rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
- DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->rp1_freq),
- rps->rp1_freq);
-
- rps->min_freq = cherryview_rps_min_freq(dev_priv);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- rps->min_freq);
-
- vlv_iosf_sb_put(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
- rps->min_freq) & 1,
- "Odd GPU freq values\n");
-}
-
-static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
- valleyview_cleanup_pctx(dev_priv);
-}
-
-static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 gtfifodbg, rc6_mode, pcbr;
-
- gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
- GT_FIFO_FREE_ENTRIES_CHV);
- if (gtfifodbg) {
- DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
- gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- cherryview_check_pctx(dev_priv);
-
- /* 1a & 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2a: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
- I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
-
- /* Allows RC6 residency counter to work */
- I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
-
- /* For now we assume BIOS is allocating and populating the PCBR */
- pcbr = I915_READ(VLV_PCBR);
-
- /* 3: Enable RC6 */
- rc6_mode = 0;
- if (pcbr >> VLV_PCBR_ADDR_SHIFT)
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 1: Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- /* 2: Enable RPS */
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
- /* Setting Fixed Bias */
- vlv_punit_get(dev_priv);
-
- val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
- vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
- vlv_punit_put(dev_priv);
-
- /* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
- reset_rps(dev_priv, valleyview_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 gtfifodbg;
-
- valleyview_check_pctx(dev_priv);
-
- gtfifodbg = I915_READ(GTFIFODBG);
- if (gtfifodbg) {
- DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
- gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
-
- /* Allows RC6 residency counter to work */
- I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC0_COUNT_EN |
- VLV_RENDER_RC0_COUNT_EN |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
- vlv_punit_get(dev_priv);
-
- /* Setting Fixed Bias */
- val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
- vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
- vlv_punit_put(dev_priv);
-
- /* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
- reset_rps(dev_priv, valleyview_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
- unsigned long freq;
- int div = (vidfreq & 0x3f0000) >> 16;
- int post = (vidfreq & 0x3000) >> 12;
- int pre = (vidfreq & 0x7);
-
- if (!pre)
- return 0;
-
- freq = ((div * 133333) / ((1<<post) * pre));
-
- return freq;
-}
-
-static const struct cparams {
- u16 i;
- u16 t;
- u16 m;
- u16 c;
-} cparams[] = {
- { 1, 1333, 301, 28664 },
- { 1, 1066, 294, 24460 },
- { 1, 800, 294, 25192 },
- { 0, 1333, 276, 27605 },
- { 0, 1066, 276, 27605 },
- { 0, 800, 231, 23784 },
-};
-
-static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- u64 total_count, diff, ret;
- u32 count1, count2, count3, m = 0, c = 0;
- unsigned long now = jiffies_to_msecs(jiffies), diff1;
- int i;
-
- lockdep_assert_held(&mchdev_lock);
-
- diff1 = now - dev_priv->ips.last_time1;
-
- /* Prevent division-by-zero if we are asking too fast.
- * Also, we don't get interesting results if we are polling
- * faster than once in 10ms, so just return the saved value
- * in such cases.
- */
- if (diff1 <= 10)
- return dev_priv->ips.chipset_power;
-
- count1 = I915_READ(DMIEC);
- count2 = I915_READ(DDREC);
- count3 = I915_READ(CSIEC);
-
- total_count = count1 + count2 + count3;
-
- /* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->ips.last_count1) {
- diff = ~0UL - dev_priv->ips.last_count1;
- diff += total_count;
- } else {
- diff = total_count - dev_priv->ips.last_count1;
- }
-
- for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->ips.c_m &&
- cparams[i].t == dev_priv->ips.r_t) {
- m = cparams[i].m;
- c = cparams[i].c;
- break;
- }
- }
-
- diff = div_u64(diff, diff1);
- ret = ((m * diff) + c);
- ret = div_u64(ret, 10);
-
- dev_priv->ips.last_count1 = total_count;
- dev_priv->ips.last_time1 = now;
-
- dev_priv->ips.chipset_power = ret;
-
- return ret;
-}
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
- unsigned long val = 0;
-
- if (!IS_GEN(dev_priv, 5))
- return 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- val = __i915_chipset_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-
- return val;
-}
-
-unsigned long i915_mch_val(struct drm_i915_private *i915)
-{
- unsigned long m, x, b;
- u32 tsfs;
-
- tsfs = intel_uncore_read(&i915->uncore, TSFS);
-
- m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = intel_uncore_read8(&i915->uncore, TR1);
-
- b = tsfs & TSFS_INTR_MASK;
-
- return ((m * x) / 127) - b;
-}
-
-static int _pxvid_to_vd(u8 pxvid)
-{
- if (pxvid == 0)
- return 0;
-
- if (pxvid >= 8 && pxvid < 31)
- pxvid = 31;
-
- return (pxvid + 2) * 125;
-}
-
-static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
- const int vd = _pxvid_to_vd(pxvid);
- const int vm = vd - 1125;
-
- if (INTEL_INFO(dev_priv)->is_mobile)
- return vm > 0 ? vm : 0;
-
- return vd;
-}
-
-static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
- u64 now, diff, diffms;
- u32 count;
-
- lockdep_assert_held(&mchdev_lock);
-
- now = ktime_get_raw_ns();
- diffms = now - dev_priv->ips.last_time2;
- do_div(diffms, NSEC_PER_MSEC);
-
- /* Don't divide by 0 */
- if (!diffms)
- return;
-
- count = I915_READ(GFXEC);
-
- if (count < dev_priv->ips.last_count2) {
- diff = ~0UL - dev_priv->ips.last_count2;
- diff += count;
- } else {
- diff = count - dev_priv->ips.last_count2;
- }
-
- dev_priv->ips.last_count2 = count;
- dev_priv->ips.last_time2 = now;
-
- /* More magic constants... */
- diff = diff * 1181;
- diff = div_u64(diff, diffms * 10);
- dev_priv->ips.gfx_power = diff;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- if (!IS_GEN(dev_priv, 5))
- return;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- __i915_update_gfx_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-}
-
-static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- unsigned long t, corr, state1, corr2, state2;
- u32 pxvid, ext_v;
-
- lockdep_assert_held(&mchdev_lock);
-
- pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
- pxvid = (pxvid >> 24) & 0x7f;
- ext_v = pvid_to_extvid(dev_priv, pxvid);
-
- state1 = ext_v;
-
- t = i915_mch_val(dev_priv);
-
- /* Revel in the empirically derived constants */
-
- /* Correction factor in 1/100000 units */
- if (t > 80)
- corr = ((t * 2349) + 135940);
- else if (t >= 50)
- corr = ((t * 964) + 29317);
- else /* < 50 */
- corr = ((t * 301) + 1004);
-
- corr = corr * ((150142 * state1) / 10000 - 78642);
- corr /= 100000;
- corr2 = (corr * dev_priv->ips.corr);
-
- state2 = (corr2 * state1) / 10000;
- state2 /= 100; /* convert to mW */
-
- __i915_update_gfx_val(dev_priv);
-
- return dev_priv->ips.gfx_power + state2;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
- unsigned long val = 0;
-
- if (!IS_GEN(dev_priv, 5))
- return 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- val = __i915_gfx_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-
- return val;
-}
-
-static struct drm_i915_private __rcu *i915_mch_dev;
-
-static struct drm_i915_private *mchdev_get(void)
-{
- struct drm_i915_private *i915;
-
- rcu_read_lock();
- i915 = rcu_dereference(i915_mch_dev);
- if (!kref_get_unless_zero(&i915->drm.ref))
- i915 = NULL;
- rcu_read_unlock();
-
- return i915;
-}
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
- struct drm_i915_private *i915;
- unsigned long chipset_val = 0;
- unsigned long graphics_val = 0;
- intel_wakeref_t wakeref;
-
- i915 = mchdev_get();
- if (!i915)
- return 0;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- chipset_val = __i915_chipset_val(i915);
- graphics_val = __i915_gfx_val(i915);
- spin_unlock_irq(&mchdev_lock);
- }
-
- drm_dev_put(&i915->drm);
- return chipset_val + graphics_val;
-}
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
- struct drm_i915_private *i915;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- if (i915->ips.max_delay > i915->ips.fmax)
- i915->ips.max_delay--;
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
- struct drm_i915_private *i915;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- if (i915->ips.max_delay < i915->ips.min_delay)
- i915->ips.max_delay++;
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
- struct drm_i915_private *i915;
- bool ret;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- ret = i915->gt.awake;
-
- drm_dev_put(&i915->drm);
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
- struct drm_i915_private *i915;
- bool ret;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- i915->ips.max_delay = i915->ips.fstart;
- ret = ironlake_set_drps(i915, i915->ips.fstart);
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
-
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
-{
- void (*link)(void);
-
- link = symbol_get(ips_link_to_i915_driver);
- if (link) {
- link();
- symbol_put(ips_link_to_i915_driver);
- }
-}
-
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
-{
- /* We only register the i915 ips part with intel-ips once everything is
- * set up, to avoid intel-ips sneaking in and reading bogus values. */
- rcu_assign_pointer(i915_mch_dev, dev_priv);
-
- ips_ping_for_i915_load();
-}
-
-void intel_gpu_ips_teardown(void)
-{
- rcu_assign_pointer(i915_mch_dev, NULL);
-}
-
-static void intel_init_emon(struct drm_i915_private *dev_priv)
-{
- u32 lcfuse;
- u8 pxw[16];
- int i;
-
- /* Disable to program */
- I915_WRITE(ECR, 0);
- POSTING_READ(ECR);
-
- /* Program energy weights for various events */
- I915_WRITE(SDEW, 0x15040d00);
- I915_WRITE(CSIEW0, 0x007f0000);
- I915_WRITE(CSIEW1, 0x1e220004);
- I915_WRITE(CSIEW2, 0x04000004);
-
- for (i = 0; i < 5; i++)
- I915_WRITE(PEW(i), 0);
- for (i = 0; i < 3; i++)
- I915_WRITE(DEW(i), 0);
-
- /* Program P-state weights to account for frequency power adjustment */
- for (i = 0; i < 16; i++) {
- u32 pxvidfreq = I915_READ(PXVFREQ(i));
- unsigned long freq = intel_pxfreq(pxvidfreq);
- unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
- unsigned long val;
-
- val = vid * vid;
- val *= (freq / 1000);
- val *= 255;
- val /= (127*127*900);
- if (val > 0xff)
- DRM_ERROR("bad pxval: %ld\n", val);
- pxw[i] = val;
- }
- /* Render standby states get 0 weight */
- pxw[14] = 0;
- pxw[15] = 0;
-
- for (i = 0; i < 4; i++) {
- u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
- (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
- I915_WRITE(PXW(i), val);
- }
-
- /* Adjust magic regs to magic values (more experimental results) */
- I915_WRITE(OGW0, 0);
- I915_WRITE(OGW1, 0);
- I915_WRITE(EG0, 0x00007f00);
- I915_WRITE(EG1, 0x0000000e);
- I915_WRITE(EG2, 0x000e0000);
- I915_WRITE(EG3, 0x68000300);
- I915_WRITE(EG4, 0x42000000);
- I915_WRITE(EG5, 0x00140031);
- I915_WRITE(EG6, 0);
- I915_WRITE(EG7, 0);
-
- for (i = 0; i < 8; i++)
- I915_WRITE(PXWL(i), 0);
-
- /* Enable PMON + select events */
- I915_WRITE(ECR, 0x80000019);
-
- lcfuse = I915_READ(LCFUSE02);
-
- dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
-{
- return !I915_READ(GEN8_RC6_CTX_INFO);
-}
-
-static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
-{
- if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- return;
-
- if (i915_rc6_ctx_corrupted(i915)) {
- DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
- i915->gt_pm.rc6.ctx_corrupted = true;
- i915->gt_pm.rc6.ctx_corrupted_wakeref =
- intel_runtime_pm_get(&i915->runtime_pm);
- }
-}
-
-static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
-{
- if (i915->gt_pm.rc6.ctx_corrupted) {
- intel_runtime_pm_put(&i915->runtime_pm,
- i915->gt_pm.rc6.ctx_corrupted_wakeref);
- i915->gt_pm.rc6.ctx_corrupted = false;
- }
-}
-
-/**
- * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
- * @i915: i915 device
- *
- * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
- */
-void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
-{
- if (i915->gt_pm.rc6.ctx_corrupted)
- intel_runtime_pm_put(&i915->runtime_pm,
- i915->gt_pm.rc6.ctx_corrupted_wakeref);
-}
-
-/**
- * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
- * @i915: i915 device
- *
- * Perform any steps needed to re-init the RC6 CTX WA after system resume.
- */
-void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
-{
- if (!i915->gt_pm.rc6.ctx_corrupted)
- return;
-
- if (i915_rc6_ctx_corrupted(i915)) {
- i915->gt_pm.rc6.ctx_corrupted_wakeref =
- intel_runtime_pm_get(&i915->runtime_pm);
- return;
- }
-
- DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
- i915->gt_pm.rc6.ctx_corrupted = false;
-}
-
-static void intel_disable_rc6(struct drm_i915_private *dev_priv);
-
-/**
- * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
- * @i915: i915 device
- *
- * Check if an RC6 CTX corruption has happened since the last check and if so
- * disable RC6 and runtime power management.
- *
- * Return false if no context corruption has happened since the last call of
- * this function, true otherwise.
-*/
-bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
-{
- if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- return false;
-
- if (i915->gt_pm.rc6.ctx_corrupted)
- return false;
-
- if (!i915_rc6_ctx_corrupted(i915))
- return false;
-
- DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
-
- intel_disable_rc6(i915);
- i915->gt_pm.rc6.ctx_corrupted = true;
- i915->gt_pm.rc6.ctx_corrupted_wakeref =
- intel_runtime_pm_get_noresume(&i915->runtime_pm);
-
- return true;
-}
-
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
- * requirement.
- */
- if (!sanitize_rc6(dev_priv)) {
- DRM_INFO("RC6 disabled, disabling runtime PM support\n");
- pm_runtime_get(&dev_priv->drm.pdev->dev);
- }
-
- i915_rc6_ctx_wa_init(dev_priv);
-
- /* Initialize RPS limits (for userspace) */
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_init_gt_powersave(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_init_gt_powersave(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_init_rps_frequencies(dev_priv);
-
- /* Derive initial user preferences/limits from the hardware limits */
- rps->max_freq_softlimit = rps->max_freq;
- rps->min_freq_softlimit = rps->min_freq;
-
- /* After setting max-softlimit, find the overclock max freq */
- if (IS_GEN(dev_priv, 6) ||
- IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
- u32 params = 0;
-
- sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS,
- &params, NULL);
- if (params & BIT(31)) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
- (rps->max_freq & 0xff) * 50,
- (params & 0xff) * 50);
- rps->max_freq = params & 0xff;
- }
- }
-
- /* Finally allow us to boost to max by default */
- rps->boost_freq = rps->max_freq;
- rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
-}
-
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv))
- valleyview_cleanup_gt_powersave(dev_priv);
-
- i915_rc6_ctx_wa_cleanup(dev_priv);
-
- if (!HAS_RC6(dev_priv))
- pm_runtime_put(&dev_priv->drm.pdev->dev);
-}
-
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
-{
- dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
- dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
- intel_disable_gt_powersave(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 11)
- gen11_reset_rps_interrupts(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_reset_rps_interrupts(dev_priv);
-}
-
-static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
-{
- lockdep_assert_held(&i915->gt_pm.rps.lock);
-
- if (!i915->gt_pm.llc_pstate.enabled)
- return;
-
- /* Currently there is no HW configuration to be done to disable. */
-
- i915->gt_pm.llc_pstate.enabled = false;
-}
-
-static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (!dev_priv->gt_pm.rc6.enabled)
- return;
-
- if (INTEL_GEN(dev_priv) >= 9)
- gen9_disable_rc6(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_disable_rc6(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_disable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_disable_rc6(dev_priv);
-
- dev_priv->gt_pm.rc6.enabled = false;
-}
-
-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- mutex_lock(&rps->lock);
- __intel_disable_rc6(dev_priv);
- mutex_unlock(&rps->lock);
-}
-
-static void intel_disable_rps(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (!dev_priv->gt_pm.rps.enabled)
- return;
-
- if (INTEL_GEN(dev_priv) >= 9)
- gen9_disable_rps(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_disable_rps(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_disable_rps(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_disable_rps(dev_priv);
- else if (IS_IRONLAKE_M(dev_priv))
- ironlake_disable_drps(dev_priv);
-
- dev_priv->gt_pm.rps.enabled = false;
-}
-
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- mutex_lock(&dev_priv->gt_pm.rps.lock);
-
- __intel_disable_rc6(dev_priv);
- intel_disable_rps(dev_priv);
- if (HAS_LLC(dev_priv))
- intel_disable_llc_pstate(dev_priv);
-
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
-static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
-{
- lockdep_assert_held(&i915->gt_pm.rps.lock);
-
- if (i915->gt_pm.llc_pstate.enabled)
- return;
-
- gen6_update_ring_freq(i915);
-
- i915->gt_pm.llc_pstate.enabled = true;
-}
-
-static void intel_enable_rc6(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (dev_priv->gt_pm.rc6.enabled)
- return;
-
- if (dev_priv->gt_pm.rc6.ctx_corrupted)
- return;
-
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_enable_rc6(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 11)
- gen11_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 9)
- gen9_enable_rc6(dev_priv);
- else if (IS_BROADWELL(dev_priv))
- gen8_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_enable_rc6(dev_priv);
-
- dev_priv->gt_pm.rc6.enabled = true;
-}
-
-static void intel_enable_rps(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- lockdep_assert_held(&rps->lock);
-
- if (rps->enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- cherryview_enable_rps(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- valleyview_enable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 9) {
- gen9_enable_rps(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- gen8_enable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 6) {
- gen6_enable_rps(dev_priv);
- } else if (IS_IRONLAKE_M(dev_priv)) {
- ironlake_enable_drps(dev_priv);
- intel_init_emon(dev_priv);
- }
-
- WARN_ON(rps->max_freq < rps->min_freq);
- WARN_ON(rps->idle_freq > rps->max_freq);
-
- WARN_ON(rps->efficient_freq < rps->min_freq);
- WARN_ON(rps->efficient_freq > rps->max_freq);
-
- rps->enabled = true;
-}
-
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev_priv))
- return;
-
- mutex_lock(&dev_priv->gt_pm.rps.lock);
-
- if (HAS_RC6(dev_priv))
- intel_enable_rc6(dev_priv);
- if (HAS_RPS(dev_priv))
- intel_enable_rps(dev_priv);
- if (HAS_LLC(dev_priv))
- intel_enable_llc_pstate(dev_priv);
-
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -8976,7 +6347,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- int pipe;
+ enum pipe pipe;
u32 val;
/*
@@ -9196,6 +6567,22 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
_MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
}
+static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 vd_pg_enable = 0;
+ unsigned int i;
+
+ /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (HAS_ENGINE(dev_priv, _VCS(i)))
+ vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
+ VDN_MFX_POWERGATE_ENABLE(i);
+ }
+
+ I915_WRITE(POWERGATE_ENABLE,
+ I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
+}
+
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (!HAS_PCH_CNP(dev_priv))
@@ -9716,7 +7103,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_GEN(dev_priv, 12))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = tgl_init_clock_gating;
else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
@@ -9772,6 +7159,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
else if (IS_GEN(dev_priv, 5))
i915_ironlake_get_mem_freq(dev_priv);
+ if (intel_has_sagv(dev_priv))
+ skl_setup_sagv_block_time(dev_priv);
+
/* For FIFO watermark updates */
if (INTEL_GEN(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
@@ -9830,7 +7220,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
} else if (IS_GEN(dev_priv, 2)) {
- if (INTEL_INFO(dev_priv)->num_pipes == 1) {
+ if (INTEL_NUM_PIPES(dev_priv) == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
} else {
@@ -9842,217 +7232,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
}
}
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * N = val - 0xb7
- * Slow = Fast = GPLL ref * N
- */
- return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
-}
-
-static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
-}
-
-static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * N = val / 2
- * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
- */
- return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
-}
-
-static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* CHV needs even values */
- return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
-}
-
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
- GEN9_FREQ_SCALER);
- else if (IS_CHERRYVIEW(dev_priv))
- return chv_gpu_freq(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv))
- return byt_gpu_freq(dev_priv, val);
- else
- return val * GT_FREQUENCY_MULTIPLIER;
-}
-
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
- GT_FREQUENCY_MULTIPLIER);
- else if (IS_CHERRYVIEW(dev_priv))
- return chv_freq_opcode(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv))
- return byt_freq_opcode(dev_priv, val);
- else
- return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
-}
-
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->gt_pm.rps.lock);
- mutex_init(&dev_priv->gt_pm.rps.power.mutex);
-
- atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
-
dev_priv->runtime_pm.suspended = false;
atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
-
-static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- u32 lower, upper, tmp;
- int loop = 2;
-
- /*
- * The register accessed do not need forcewake. We borrow
- * uncore lock to prevent concurrent access to range reg.
- */
- lockdep_assert_held(&dev_priv->uncore.lock);
-
- /*
- * vlv and chv residency counters are 40 bits in width.
- * With a control bit, we can choose between upper or lower
- * 32bit window into this counter.
- *
- * Although we always use the counter in high-range mode elsewhere,
- * userspace may attempt to read the value before rc6 is initialised,
- * before we have set the default VLV_COUNTER_CONTROL value. So always
- * set the high bit to be safe.
- */
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
- upper = I915_READ_FW(reg);
- do {
- tmp = upper;
-
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
- lower = I915_READ_FW(reg);
-
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
- upper = I915_READ_FW(reg);
- } while (upper != tmp && --loop);
-
- /*
- * Everywhere else we always use VLV_COUNTER_CONTROL with the
- * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
- * now.
- */
-
- return lower | (u64)upper << 8;
-}
-
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u64 time_hw, prev_hw, overflow_hw;
- unsigned int fw_domains;
- unsigned long flags;
- unsigned int i;
- u32 mul, div;
-
- if (!HAS_RC6(dev_priv))
- return 0;
-
- /*
- * Store previous hw counter values for counter wrap-around handling.
- *
- * There are only four interesting registers and they live next to each
- * other so we can use the relative address, compared to the smallest
- * one as the index into driver storage.
- */
- i = (i915_mmio_reg_offset(reg) -
- i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
- if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
- return 0;
-
- fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
-
- spin_lock_irqsave(&uncore->lock, flags);
- intel_uncore_forcewake_get__locked(uncore, fw_domains);
-
- /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- mul = 1000000;
- div = dev_priv->czclk_freq;
- overflow_hw = BIT_ULL(40);
- time_hw = vlv_residency_raw(dev_priv, reg);
- } else {
- /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
- if (IS_GEN9_LP(dev_priv)) {
- mul = 10000;
- div = 12;
- } else {
- mul = 1280;
- div = 1;
- }
-
- overflow_hw = BIT_ULL(32);
- time_hw = intel_uncore_read_fw(uncore, reg);
- }
-
- /*
- * Counter wrap handling.
- *
- * But relying on a sufficient frequency of queries otherwise counters
- * can still wrap.
- */
- prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
- dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
-
- /* RC6 delta from last sample. */
- if (time_hw >= prev_hw)
- time_hw -= prev_hw;
- else
- time_hw += overflow_hw - prev_hw;
-
- /* Add delta to RC6 extended raw driver copy. */
- time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
- dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
-
- intel_uncore_forcewake_put__locked(uncore, fw_domains);
- spin_unlock_irqrestore(&uncore->lock, flags);
-
- return mul_u64_u32_div(time_hw, mul, div);
-}
-
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
-}
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
-{
- u32 cagf;
-
- if (INTEL_GEN(dev_priv) >= 9)
- cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
- else
- cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
-
- return cagf;
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 0f7390c850ec..b579c724b915 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -29,19 +29,6 @@ void intel_update_watermarks(struct intel_crtc *crtc);
void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
-void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
-void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq);
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
@@ -68,26 +55,9 @@ void skl_write_plane_wm(struct intel_plane *plane,
void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate);
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg);
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg);
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-
-bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive);
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
new file mode 100644
index 000000000000..583118095635
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "intel_region_lmem.h"
+
+static int init_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ unsigned long n;
+ int ret;
+
+ /* We want to 1:1 map the mappable aperture to our reserved region */
+
+ mem->fake_mappable.start = 0;
+ mem->fake_mappable.size = resource_size(&mem->region);
+ mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
+
+ ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
+ if (ret)
+ return ret;
+
+ mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev,
+ mem->region.start,
+ mem->fake_mappable.size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) {
+ drm_mm_remove_node(&mem->fake_mappable);
+ return -EINVAL;
+ }
+
+ for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
+ ggtt->vm.insert_page(&ggtt->vm,
+ mem->remap_addr + (n << PAGE_SHIFT),
+ n << PAGE_SHIFT,
+ I915_CACHE_NONE, 0);
+ }
+
+ mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
+ mem->fake_mappable.size);
+
+ return 0;
+}
+
+static void release_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ if (drm_mm_node_allocated(&mem->fake_mappable))
+ drm_mm_remove_node(&mem->fake_mappable);
+
+ dma_unmap_resource(&mem->i915->drm.pdev->dev,
+ mem->remap_addr,
+ mem->fake_mappable.size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+}
+
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+ release_fake_lmem_bar(mem);
+ io_mapping_fini(&mem->iomap);
+ intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+ int ret;
+
+ if (i915_modparams.fake_lmem_start) {
+ ret = init_fake_lmem_bar(mem);
+ GEM_BUG_ON(ret);
+ }
+
+ if (!io_mapping_init_wc(&mem->iomap,
+ mem->io_start,
+ resource_size(&mem->region)))
+ return -EIO;
+
+ ret = intel_memory_region_init_buddy(mem);
+ if (ret)
+ io_mapping_fini(&mem->iomap);
+
+ return ret;
+}
+
+const struct intel_memory_region_ops intel_region_lmem_ops = {
+ .init = region_lmem_init,
+ .release = region_lmem_release,
+ .create_object = __i915_gem_lmem_object_create,
+};
+
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_memory_region *mem;
+ resource_size_t mappable_end;
+ resource_size_t io_start;
+ resource_size_t start;
+
+ GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
+ GEM_BUG_ON(!i915_modparams.fake_lmem_start);
+
+ /* Your mappable aperture belongs to me now! */
+ mappable_end = pci_resource_len(pdev, 2);
+ io_start = pci_resource_start(pdev, 2),
+ start = i915_modparams.fake_lmem_start;
+
+ mem = intel_memory_region_create(i915,
+ start,
+ mappable_end,
+ PAGE_SIZE,
+ io_start,
+ &intel_region_lmem_ops);
+ if (!IS_ERR(mem)) {
+ DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
+ DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
+ (u64)mem->io_start);
+ DRM_INFO("Intel graphics fake LMEM size: %llx\n",
+ (u64)resource_size(&mem->region));
+ }
+
+ return mem;
+}
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/intel_region_lmem.h
new file mode 100644
index 000000000000..213def7c7b8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_REGION_LMEM_H
+#define __INTEL_REGION_LMEM_H
+
+struct drm_i915_private;
+
+extern const struct intel_memory_region_ops intel_region_lmem_ops;
+
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915);
+
+#endif /* !__INTEL_REGION_LMEM_H */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 2fd3c097e1f5..ad719c9602af 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -27,7 +27,6 @@
*/
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9e583f13a9e4..94a97bf8c021 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -805,9 +805,6 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
-#define GEN11_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
-
#define __gen6_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
@@ -903,12 +900,10 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
})
#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (GEN11_NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(uncore, offset); \
- __fwd; \
-})
+ find_fw_domain(uncore, offset)
+
+#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
+ find_fw_domain(uncore, offset)
/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
@@ -935,6 +930,20 @@ static const i915_reg_t gen11_shadowed_regs[] = {
/* TODO: Other registers are not yet used */
};
+static const i915_reg_t gen12_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
+ RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
+ RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
+ RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
+ RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
+ RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
+ RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
+ /* TODO: Other registers are not yet used */
+};
+
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
{
u32 offset = i915_mmio_reg_offset(*reg);
@@ -957,6 +966,7 @@ static bool is_gen##x##_shadowed(u32 offset) \
__is_genX_shadowed(8)
__is_genX_shadowed(11)
+__is_genX_shadowed(12)
static enum forcewake_domains
gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
@@ -1005,8 +1015,18 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
- __fwd = find_fw_domain(uncore, offset); \
+ const u32 __offset = (offset); \
+ if (!is_gen11_shadowed(__offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
+ __fwd; \
+})
+
+#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ const u32 __offset = (offset); \
+ if (!is_gen12_shadowed(__offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
__fwd; \
})
@@ -1065,9 +1085,51 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x40000, 0x1bffff, 0),
+ GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
+ GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
+ GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
+ GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
+ GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
+};
+
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen12_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
@@ -1228,6 +1290,7 @@ __gen_read(func, 16) \
__gen_read(func, 32) \
__gen_read(func, 64)
+__gen_reg_read_funcs(gen12_fwtable);
__gen_reg_read_funcs(gen11_fwtable);
__gen_reg_read_funcs(fwtable);
__gen_reg_read_funcs(gen6);
@@ -1319,6 +1382,7 @@ __gen_write(func, 8) \
__gen_write(func, 16) \
__gen_write(func, 32)
+__gen_reg_write_funcs(gen12_fwtable);
__gen_reg_write_funcs(gen11_fwtable);
__gen_reg_write_funcs(fwtable);
__gen_reg_write_funcs(gen8);
@@ -1690,10 +1754,14 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
- } else {
+ } else if (IS_GEN(i915, 11)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ } else {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
}
uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 414fc2cb0459..dcfa243892c6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -378,23 +378,23 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
static inline void intel_uncore_rmw(struct intel_uncore *uncore,
i915_reg_t reg, u32 clear, u32 set)
{
- u32 val;
+ u32 old, val;
- val = intel_uncore_read(uncore, reg);
- val &= ~clear;
- val |= set;
- intel_uncore_write(uncore, reg, val);
+ old = intel_uncore_read(uncore, reg);
+ val = (old & ~clear) | set;
+ if (val != old)
+ intel_uncore_write(uncore, reg, val);
}
static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
i915_reg_t reg, u32 clear, u32 set)
{
- u32 val;
+ u32 old, val;
- val = intel_uncore_read_fw(uncore, reg);
- val &= ~clear;
- val |= set;
- intel_uncore_write_fw(uncore, reg, val);
+ old = intel_uncore_read_fw(uncore, reg);
+ val = (old & ~clear) | set;
+ if (val != old)
+ intel_uncore_write_fw(uncore, reg, val);
}
static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
new file mode 100644
index 000000000000..a29d93707345
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_tgl.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0xD920), 0x00000000 },
+ { _MMIO(0xD900), 0x00000000 },
+ { _MMIO(0xD904), 0xF0800000 },
+ { _MMIO(0xD910), 0x00000000 },
+ { _MMIO(0xD914), 0xF0800000 },
+ { _MMIO(0xDC40), 0x00FF0000 },
+ { _MMIO(0xD940), 0x00000004 },
+ { _MMIO(0xD944), 0x0000FFFF },
+ { _MMIO(0xDC00), 0x00000004 },
+ { _MMIO(0xDC04), 0x0000FFFF },
+ { _MMIO(0xD948), 0x00000003 },
+ { _MMIO(0xD94C), 0x0000FFFF },
+ { _MMIO(0xDC08), 0x00000003 },
+ { _MMIO(0xDC0C), 0x0000FFFF },
+ { _MMIO(0xD950), 0x00000007 },
+ { _MMIO(0xD954), 0x0000FFFF },
+ { _MMIO(0xDC10), 0x00000007 },
+ { _MMIO(0xDC14), 0x0000FFFF },
+ { _MMIO(0xD958), 0x00100002 },
+ { _MMIO(0xD95C), 0x0000FFF7 },
+ { _MMIO(0xDC18), 0x00100002 },
+ { _MMIO(0xDC1C), 0x0000FFF7 },
+ { _MMIO(0xD960), 0x00100002 },
+ { _MMIO(0xD964), 0x0000FFCF },
+ { _MMIO(0xDC20), 0x00100002 },
+ { _MMIO(0xDC24), 0x0000FFCF },
+ { _MMIO(0xD968), 0x00100082 },
+ { _MMIO(0xD96C), 0x0000FFEF },
+ { _MMIO(0xDC28), 0x00100082 },
+ { _MMIO(0xDC2C), 0x0000FFEF },
+ { _MMIO(0xD970), 0x001000C2 },
+ { _MMIO(0xD974), 0x0000FFE7 },
+ { _MMIO(0xDC30), 0x001000C2 },
+ { _MMIO(0xDC34), 0x0000FFE7 },
+ { _MMIO(0xD978), 0x00100001 },
+ { _MMIO(0xD97C), 0x0000FFE7 },
+ { _MMIO(0xDC38), 0x00100001 },
+ { _MMIO(0xDC3C), 0x0000FFE7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x0D04), 0x00000200 },
+ { _MMIO(0x9840), 0x00000000 },
+ { _MMIO(0x9884), 0x00000000 },
+ { _MMIO(0x9888), 0x280E0000 },
+ { _MMIO(0x9888), 0x1E0E0147 },
+ { _MMIO(0x9888), 0x180E0000 },
+ { _MMIO(0x9888), 0x160E0000 },
+ { _MMIO(0x9888), 0x1E0F1000 },
+ { _MMIO(0x9888), 0x1E104000 },
+ { _MMIO(0x9888), 0x2E020100 },
+ { _MMIO(0x9888), 0x2C030004 },
+ { _MMIO(0x9888), 0x38003000 },
+ { _MMIO(0x9888), 0x1E0A8000 },
+ { _MMIO(0x9884), 0x00000003 },
+ { _MMIO(0x9888), 0x49110000 },
+ { _MMIO(0x9888), 0x5D101400 },
+ { _MMIO(0x9888), 0x1D140020 },
+ { _MMIO(0x9888), 0x1D1103A3 },
+ { _MMIO(0x9888), 0x01110000 },
+ { _MMIO(0x9888), 0x61111000 },
+ { _MMIO(0x9888), 0x1F128000 },
+ { _MMIO(0x9888), 0x17100000 },
+ { _MMIO(0x9888), 0x55100630 },
+ { _MMIO(0x9888), 0x57100000 },
+ { _MMIO(0x9888), 0x31100000 },
+ { _MMIO(0x9884), 0x00000003 },
+ { _MMIO(0x9888), 0x65100002 },
+ { _MMIO(0x9884), 0x00000000 },
+ { _MMIO(0x9888), 0x42000001 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv)
+{
+ strlcpy(dev_priv->perf.test_config.uuid,
+ "80a833f0-2504-4321-8894-e9277844ce7b",
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
+
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
+
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
new file mode 100644
index 000000000000..4c25f0be825c
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_TGL_H__
+#define __I915_OA_TGL_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 77d844ac8b71..260b0ee5d1e3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915)
return NULL;
kref_init(&active->ref);
- i915_active_init(i915, &active->base, __live_active, __live_retire);
+ i915_active_init(&active->base, __live_active, __live_retire);
return active;
}
@@ -79,7 +79,6 @@ __live_active_setup(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
struct i915_sw_fence *submit;
struct live_active *active;
- enum intel_engine_id id;
unsigned int count = 0;
int err = 0;
@@ -97,7 +96,7 @@ __live_active_setup(struct drm_i915_private *i915)
if (err)
goto out;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
rq = i915_request_create(engine->kernel_context);
@@ -110,7 +109,7 @@ __live_active_setup(struct drm_i915_private *i915)
submit,
GFP_KERNEL);
if (err >= 0)
- err = i915_active_ref(&active->base, rq->timeline, rq);
+ err = i915_active_add_request(&active->base, rq);
i915_request_add(rq);
if (err) {
pr_err("Failed to track active ref!\n");
@@ -121,7 +120,7 @@ __live_active_setup(struct drm_i915_private *i915)
}
i915_active_release(&active->base);
- if (active->retired && count) {
+ if (READ_ONCE(active->retired) && count) {
pr_err("i915_active retired before submission!\n");
err = -EINVAL;
}
@@ -146,35 +145,25 @@ static int live_active_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
struct live_active *active;
- intel_wakeref_t wakeref;
int err = 0;
/* Check that we get a callback when requests retire upon waiting */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
active = __live_active_setup(i915);
- if (IS_ERR(active)) {
- err = PTR_ERR(active);
- goto err;
- }
+ if (IS_ERR(active))
+ return PTR_ERR(active);
i915_active_wait(&active->base);
- if (!active->retired) {
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after waiting!\n");
err = -EINVAL;
}
__live_put(active);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
-err:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
@@ -182,35 +171,25 @@ static int live_active_retire(void *arg)
{
struct drm_i915_private *i915 = arg;
struct live_active *active;
- intel_wakeref_t wakeref;
int err = 0;
/* Check that we get a callback when requests are indirectly retired */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
active = __live_active_setup(i915);
- if (IS_ERR(active)) {
- err = PTR_ERR(active);
- goto err;
- }
+ if (IS_ERR(active))
+ return PTR_ERR(active);
/* waits for & retires all requests */
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- if (!active->retired) {
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing!\n");
err = -EINVAL;
}
__live_put(active);
-err:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
@@ -226,3 +205,48 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
return i915_subtests(tests, i915);
}
+
+static struct intel_engine_cs *node_to_barrier(struct active_node *it)
+{
+ struct intel_engine_cs *engine;
+
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ return engine;
+}
+
+void i915_active_print(struct i915_active *ref, struct drm_printer *m)
+{
+ drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
+ drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
+ drm_printf(m, "\tpreallocated barriers? %s\n",
+ yesno(!llist_empty(&ref->preallocated_barriers)));
+
+ if (i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ struct intel_engine_cs *engine;
+
+ engine = node_to_barrier(it);
+ if (engine) {
+ drm_printf(m, "\tbarrier: %s\n", engine->name);
+ continue;
+ }
+
+ if (i915_active_fence_isset(&it->base)) {
+ drm_printf(m,
+ "\ttimeline: %llx\n", it->timeline);
+ continue;
+ }
+ }
+
+ i915_active_release(ref);
+ }
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 23f784eae1e7..1b856bae67b5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -375,6 +375,8 @@ retry:
if (err)
break;
+
+ cond_resched();
}
if (err == -ENOMEM)
@@ -687,6 +689,8 @@ static int igt_buddy_alloc_range(void *arg)
rem -= size;
if (!rem)
break;
+
+ cond_resched();
}
if (err == -ENOMEM)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 37593831b539..d83f6bf6d9d4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -15,23 +15,26 @@
#include "igt_flush_test.h"
#include "mock_drm.h"
-static int switch_to_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx)
+static int switch_to_context(struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ int err = 0;
- for_each_engine(engine, i915, id) {
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
i915_request_add(rq);
}
+ i915_gem_context_unlock_engines(ctx);
- return 0;
+ return err;
}
static void trash_stolen(struct drm_i915_private *i915)
@@ -42,6 +45,10 @@ static void trash_stolen(struct drm_i915_private *i915)
unsigned long page;
u32 prng = 0x12345678;
+ /* XXX: fsck. needs some more thought... */
+ if (!i915_ggtt_has_aperture(ggtt))
+ return;
+
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.start + page;
u32 __iomem *s;
@@ -117,12 +124,9 @@ static void pm_resume(struct drm_i915_private *i915)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(&i915->gt, false);
- i915_gem_sanitize(i915);
- mutex_lock(&i915->drm.struct_mutex);
i915_gem_restore_gtt_mappings(i915);
- i915_gem_restore_fences(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_restore_fences(&i915->ggtt);
i915_gem_resume(i915);
}
@@ -140,11 +144,9 @@ static int igt_gem_suspend(void *arg)
return PTR_ERR(file);
err = -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
if (err)
goto out;
@@ -159,9 +161,7 @@ static int igt_gem_suspend(void *arg)
pm_resume(i915);
- mutex_lock(&i915->drm.struct_mutex);
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
out:
mock_file_free(i915, file);
return err;
@@ -179,11 +179,9 @@ static int igt_gem_hibernate(void *arg)
return PTR_ERR(file);
err = -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
if (err)
goto out;
@@ -198,9 +196,7 @@ static int igt_gem_hibernate(void *arg)
pm_resume(i915);
- mutex_lock(&i915->drm.struct_mutex);
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
out:
mock_file_free(i915, file);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index cb30c669b1b7..42e948144f1b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -43,8 +43,7 @@ static void quirk_add(struct drm_i915_gem_object *obj,
list_add(&obj->st_link, objects);
}
-static int populate_ggtt(struct drm_i915_private *i915,
- struct list_head *objects)
+static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
{
unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
@@ -53,7 +52,8 @@ static int populate_ggtt(struct drm_i915_private *i915,
do {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -70,7 +70,7 @@ static int populate_ggtt(struct drm_i915_private *i915,
count++;
} while (1);
pr_debug("Filled GGTT with %lu pages [%llu total]\n",
- count, i915->ggtt.vm.total / PAGE_SIZE);
+ count, ggtt->vm.total / PAGE_SIZE);
bound = 0;
unbound = 0;
@@ -96,7 +96,7 @@ static int populate_ggtt(struct drm_i915_private *i915,
return -EINVAL;
}
- if (list_empty(&i915->ggtt.vm.bound_list)) {
+ if (list_empty(&ggtt->vm.bound_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
@@ -104,20 +104,16 @@ static int populate_ggtt(struct drm_i915_private *i915,
return 0;
}
-static void unpin_ggtt(struct drm_i915_private *i915)
+static void unpin_ggtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_vma *vma;
- mutex_lock(&ggtt->vm.mutex);
- list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
if (vma->obj->mm.quirked)
i915_vma_unpin(vma);
- mutex_unlock(&ggtt->vm.mutex);
}
-static void cleanup_objects(struct drm_i915_private *i915,
- struct list_head *list)
+static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
{
struct drm_i915_gem_object *obj, *on;
@@ -127,44 +123,44 @@ static void cleanup_objects(struct drm_i915_private *i915,
i915_gem_object_put(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
-
- i915_gem_drain_freed_objects(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(ggtt->vm.i915);
}
static int igt_evict_something(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict one. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
/* Everything is unpinned, we should be able to evict something */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
err);
@@ -172,13 +168,14 @@ static int igt_evict_something(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_overcommit(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
LIST_HEAD(objects);
@@ -188,11 +185,11 @@ static int igt_overcommit(void *arg)
* We expect it to fail.
*/
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -208,14 +205,14 @@ static int igt_overcommit(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_evict_for_vma(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
struct drm_mm_node target = {
.start = 0,
.size = 4096,
@@ -225,22 +222,26 @@ static int igt_evict_for_vma(void *arg)
/* Fill the GGTT with pinned objects and try to evict a range. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
/* Everything is unpinned, we should be able to evict the node */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
err);
@@ -248,7 +249,7 @@ static int igt_evict_for_vma(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
@@ -261,8 +262,8 @@ static void mock_color_adjust(const struct drm_mm_node *node,
static int igt_evict_for_cache_color(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
const unsigned long flags = PIN_OFFSET_FIXED;
struct drm_mm_node target = {
.start = I915_GTT_PAGE_SIZE * 2,
@@ -274,14 +275,16 @@ static int igt_evict_for_cache_color(void *arg)
LIST_HEAD(objects);
int err;
- /* Currently the use of color_adjust is limited to cache domains within
- * the ggtt, and so the presence of mm.color_adjust is assumed to be
- * i915_gtt_color_adjust throughout our driver, so using a mock color
- * adjust will work just fine for our purposes.
+ /*
+ * Currently the use of color_adjust for the GGTT is limited to cache
+ * coloring and guard pages, and so the presence of mm.color_adjust for
+ * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
+ * color adjust will work just fine for our purposes.
*/
ggtt->vm.mm.color_adjust = mock_color_adjust;
+ GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -297,7 +300,7 @@ static int igt_evict_for_cache_color(void *arg)
goto cleanup;
}
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -317,7 +320,9 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma);
/* Remove just the second vma */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup;
@@ -328,7 +333,9 @@ static int igt_evict_for_cache_color(void *arg)
*/
target.color = I915_CACHE_L3_LLC;
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL;
@@ -338,36 +345,40 @@ static int igt_evict_for_cache_color(void *arg)
err = 0;
cleanup:
- unpin_ggtt(i915);
- cleanup_objects(i915, &objects);
+ unpin_ggtt(ggtt);
+ cleanup_objects(ggtt, &objects);
ggtt->vm.mm.color_adjust = NULL;
return err;
}
static int igt_evict_vm(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict everything. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_vm(&ggtt->vm);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_vm(&ggtt->vm);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -375,14 +386,16 @@ static int igt_evict_vm(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_evict_contexts(void *arg)
{
const u64 PRETEND_GGTT_SIZE = 16ull << 20;
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct reserved {
@@ -408,14 +421,14 @@ static int igt_evict_contexts(void *arg)
if (!HAS_FULL_PPGTT(i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_gtt_insert(&ggtt->vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
PIN_NOEVICT);
if (err)
goto out_locked;
@@ -425,15 +438,17 @@ static int igt_evict_contexts(void *arg)
do {
struct reserved *r;
+ mutex_unlock(&ggtt->vm.mutex);
r = kcalloc(1, sizeof(*r), GFP_KERNEL);
+ mutex_lock(&ggtt->vm.mutex);
if (!r) {
err = -ENOMEM;
goto out_locked;
}
- if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
+ if (i915_gem_gtt_insert(&ggtt->vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
PIN_NOEVICT)) {
kfree(r);
break;
@@ -445,11 +460,11 @@ static int igt_evict_contexts(void *arg)
count++;
} while (1);
drm_mm_remove_node(&hole);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&ggtt->vm.mutex);
pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
/* Overfill the GGTT with context objects and so try to evict one. */
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
struct drm_file *file;
@@ -460,7 +475,6 @@ static int igt_evict_contexts(void *arg)
}
count = 0;
- mutex_lock(&i915->drm.struct_mutex);
onstack_fence_init(&fence);
do {
struct i915_request *rq;
@@ -478,8 +492,8 @@ static int igt_evict_contexts(void *arg)
if (IS_ERR(rq)) {
/* When full, fail_if_busy will trigger EBUSY */
if (PTR_ERR(rq) != -EBUSY) {
- pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
- ctx->hw_id, engine->name,
+ pr_err("Unexpected error from request alloc (on %s): %d\n",
+ engine->name,
(int)PTR_ERR(rq));
err = PTR_ERR(rq);
}
@@ -497,8 +511,6 @@ static int igt_evict_contexts(void *arg)
count++;
err = 0;
} while(1);
- mutex_unlock(&i915->drm.struct_mutex);
-
onstack_fence_fini(&fence);
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
@@ -508,9 +520,9 @@ static int igt_evict_contexts(void *arg)
break;
}
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&ggtt->vm.mutex);
out_locked:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
while (reserved) {
struct reserved *next = reserved->next;
@@ -522,8 +534,8 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
+ mutex_unlock(&ggtt->vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -545,11 +557,8 @@ int i915_gem_evict_mock_selftests(void)
if (!i915)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, i915);
-
- mutex_unlock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, &i915->gt);
drm_dev_put(&i915->drm);
return err;
@@ -564,5 +573,5 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 31a51ca1ddcb..3f7e80fb3bbd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -25,26 +25,20 @@
#include <linux/list_sort.h>
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_context.h"
#include "i915_random.h"
#include "i915_selftest.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
+#include "igt_flush_test.h"
static void cleanup_freed_objects(struct drm_i915_private *i915)
{
- /*
- * As we may hold onto the struct_mutex for inordinate lengths of
- * time, the NMI khungtaskd detector may fire for the free objects
- * worker.
- */
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_drain_freed_objects(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
}
static void fake_free_pages(struct drm_i915_gem_object *obj,
@@ -88,8 +82,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
}
GEM_BUG_ON(rem);
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0;
@@ -101,7 +93,6 @@ static void fake_put_pages(struct drm_i915_gem_object *obj,
{
fake_free_pages(obj, pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -113,6 +104,7 @@ static const struct drm_i915_gem_object_ops fake_ops = {
static struct drm_i915_gem_object *
fake_dma_object(struct drm_i915_private *i915, u64 size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -126,7 +118,9 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
goto err;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &fake_ops);
+ i915_gem_object_init(obj, &fake_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -293,18 +287,20 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vm->insert_entries(vm, &mock_vma,
+ I915_CACHE_NONE, 0);
}
count = n;
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
u64 addr = hole_start + order[n] * BIT_ULL(size);
+ intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
- vm->clear_range(vm, addr, BIT_ULL(size));
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vm->clear_range(vm, addr, BIT_ULL(size));
}
i915_gem_object_unpin_pages(obj);
@@ -875,6 +871,15 @@ static int __shrink_hole(struct drm_i915_private *i915,
i915_vma_unpin(vma);
addr += size;
+ /*
+ * Since we are injecting allocation faults at random intervals,
+ * wait for this allocation to complete before we change the
+ * faultinjection.
+ */
+ err = i915_vma_sync(vma);
+ if (err)
+ break;
+
if (igt_timeout(end_time,
"%s timed out at ofset %llx [%llx - %llx]\n",
__func__, addr, hole_start, hole_end)) {
@@ -1008,21 +1013,19 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
- goto out_unlock;
+ goto out_free;
}
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
- GEM_BUG_ON(ppgtt->vm.closed);
+ GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_vm_put(&ppgtt->vm);
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+out_free:
mock_file_free(dev_priv, file);
return err;
}
@@ -1085,7 +1088,6 @@ static int exercise_ggtt(struct drm_i915_private *i915,
IGT_TIMEOUT(end_time);
int err = 0;
- mutex_lock(&i915->drm.struct_mutex);
restart:
list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
@@ -1106,7 +1108,6 @@ restart:
last = hole_end;
goto restart;
}
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1148,13 +1149,12 @@ static int igt_ggtt_page(void *arg)
unsigned int *order, n;
int err;
- mutex_lock(&i915->drm.struct_mutex);
+ if (!i915_ggtt_has_aperture(ggtt))
+ return 0;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_unlock;
- }
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
err = i915_gem_object_pin_pages(obj);
if (err)
@@ -1222,8 +1222,6 @@ out_unpin:
i915_gem_object_unpin_pages(obj);
out_free:
i915_gem_object_put(obj);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1234,10 +1232,13 @@ static void track_vma_bind(struct i915_vma *vma)
atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
+ GEM_BUG_ON(vma->pages);
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+ __i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
mutex_lock(&vma->vm->mutex);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
mutex_unlock(&vma->vm->mutex);
}
@@ -1248,6 +1249,7 @@ static int exercise_mock(struct drm_i915_private *i915,
unsigned long end_time))
{
const u64 limit = totalram_pages() << PAGE_SHIFT;
+ struct i915_address_space *vm;
struct i915_gem_context *ctx;
IGT_TIMEOUT(end_time);
int err;
@@ -1256,7 +1258,9 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx)
return -ENOMEM;
- err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ err = func(i915, vm, 0, min(vm->total, limit), end_time);
+ i915_vm_put(vm);
mock_context_close(ctx);
return err;
@@ -1294,6 +1298,7 @@ static int igt_gtt_reserve(void *arg)
{
struct i915_ggtt *ggtt = arg;
struct drm_i915_gem_object *obj, *on;
+ I915_RND_STATE(prng);
LIST_HEAD(objects);
u64 total;
int err = -ENODEV;
@@ -1330,11 +1335,13 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1380,11 +1387,13 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1420,15 +1429,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, ggtt->vm.total,
- 2*I915_GTT_PAGE_SIZE,
- I915_GTT_MIN_ALIGNMENT);
+ offset = igt_random_offset(&prng,
+ 0, ggtt->vm.total,
+ 2 * I915_GTT_PAGE_SIZE,
+ I915_GTT_MIN_ALIGNMENT);
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1497,11 +1509,13 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
ii->size, ii->alignment, ii->start, ii->end,
@@ -1537,10 +1551,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
i915_gem_object_put(obj);
@@ -1595,10 +1611,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1642,10 +1660,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1689,13 +1709,10 @@ int i915_gem_gtt_mock_selftests(void)
}
mock_init_ggtt(i915, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, ggtt);
- mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
@@ -1703,6 +1720,312 @@ out_put:
return err;
}
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ long timeout;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 5);
+ i915_request_put(rq);
+
+ return timeout < 0 ? -EIO : 0;
+}
+
+static struct i915_request *
+submit_batch(struct intel_context *ce, u64 addr)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb) /* detect a hang */
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = rq->engine->emit_bb_start(rq, addr, 0, 0);
+
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return err ? ERR_PTR(err) : rq;
+}
+
+static u32 *spinner(u32 *batch, int i)
+{
+ return batch + i * 64 / sizeof(*batch) + 4;
+}
+
+static void end_spin(u32 *batch, int i)
+{
+ *spinner(batch, i) = MI_BATCH_BUFFER_END;
+ wmb();
+}
+
+static int igt_cs_tlb(void *arg)
+{
+ const unsigned int count = PAGE_SIZE / 64;
+ const unsigned int chunk_size = count * PAGE_SIZE;
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *bbe, *act, *out;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ struct i915_vma *vma;
+ I915_RND_STATE(prng);
+ unsigned int i;
+ u32 *result;
+ u32 *batch;
+ int err = 0;
+
+ /*
+ * Our mission here is to fool the hardware to execute something
+ * from scratch as it has not seen the batch move (due to missing
+ * the TLB invalidate).
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (i915_is_ggtt(vm))
+ goto out_vm;
+
+ /* Create two pages; dummy we prefill the TLB, and intended */
+ bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(bbe)) {
+ err = PTR_ERR(bbe);
+ goto out_vm;
+ }
+
+ batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_bbe;
+ }
+ memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
+ i915_gem_object_flush_map(bbe);
+ i915_gem_object_unpin_map(bbe);
+
+ act = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(act)) {
+ err = PTR_ERR(act);
+ goto out_put_bbe;
+ }
+
+ /* Track the execution of each request by writing into different slot */
+ batch = i915_gem_object_pin_map(act, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_act;
+ }
+ for (i = 0; i < count; i++) {
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 6);
+ cs[0] = MI_STORE_DWORD_IMM_GEN4;
+ if (INTEL_GEN(i915) >= 8) {
+ cs[1] = lower_32_bits(addr);
+ cs[2] = upper_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START_GEN8;
+ } else {
+ cs[1] = 0;
+ cs[2] = lower_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START;
+ }
+ }
+
+ out = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(out)) {
+ err = PTR_ERR(out);
+ goto out_put_batch;
+ }
+ i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(out, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put_batch;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ PIN_USER |
+ PIN_OFFSET_FIXED |
+ (vm->total - PAGE_SIZE));
+ if (err)
+ goto out_put_out;
+ GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
+
+ result = i915_gem_object_pin_map(out, I915_MAP_WB);
+ if (IS_ERR(result)) {
+ err = PTR_ERR(result);
+ goto out_put_out;
+ }
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ IGT_TIMEOUT(end_time);
+ unsigned long pass = 0;
+
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ while (!__igt_timeout(end_time, NULL)) {
+ struct i915_request *rq;
+ u64 offset;
+
+ offset = igt_random_offset(&prng,
+ 0, vm->total - PAGE_SIZE,
+ chunk_size, PAGE_SIZE);
+
+ err = vm->allocate_va_range(vm, offset, chunk_size);
+ if (err)
+ goto end;
+
+ memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ vma = i915_vma_instance(bbe, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err)
+ goto end;
+
+ /* Prime the TLB with the dummy pages */
+ for (i = 0; i < count; i++) {
+ vma->node.start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+ rq = submit_batch(ce, vma->node.start);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+ i915_request_put(rq);
+ }
+
+ vma->ops->clear_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: dummy setup timed out\n",
+ ce->engine->name);
+ goto end;
+ }
+
+ vma = i915_vma_instance(act, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err)
+ goto end;
+
+ /* Replace the TLB with target batches */
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr;
+
+ vma->node.start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+ addr = vma->node.start + i * 64;
+ cs[4] = MI_NOOP;
+ cs[6] = lower_32_bits(addr);
+ cs[7] = upper_32_bits(addr);
+ wmb();
+
+ rq = submit_batch(ce, addr);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+
+ /* Wait until the context chain has started */
+ if (i == 0) {
+ while (READ_ONCE(result[i]) &&
+ !i915_request_completed(rq))
+ cond_resched();
+ } else {
+ end_spin(batch, i - 1);
+ }
+
+ i915_request_put(rq);
+ }
+ end_spin(batch, count - 1);
+
+ vma->ops->clear_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: writes timed out\n",
+ ce->engine->name);
+ goto end;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (result[i] != i) {
+ pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
+ ce->engine->name, pass,
+ offset, i, result[i], i);
+ err = -EINVAL;
+ goto end;
+ }
+ }
+
+ vm->clear_range(vm, offset, chunk_size);
+ pass++;
+ }
+ }
+end:
+ if (igt_flush_test(i915))
+ err = -EIO;
+ i915_gem_context_unlock_engines(ctx);
+ i915_gem_object_unpin_map(out);
+out_put_out:
+ i915_gem_object_put(out);
+out_put_batch:
+ i915_gem_object_unpin_map(act);
+out_put_act:
+ i915_gem_object_put(act);
+out_put_bbe:
+ i915_gem_object_put(bbe);
+out_vm:
+ i915_vm_put(vm);
+out_unlock:
+ mock_file_free(i915, file);
+ return err;
+}
+
int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -1720,6 +2043,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_pot),
SUBTEST(igt_ggtt_fill),
SUBTEST(igt_ggtt_page),
+ SUBTEST(igt_cs_tlb),
};
GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 1ccf0f731ac0..4b3cac73e291 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -15,6 +15,9 @@ selftest(workarounds, intel_workarounds_live_selftests)
selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
+selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_pm, intel_gt_pm_live_selftests)
+selftest(gt_heartbeat, intel_heartbeat_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
@@ -30,6 +33,8 @@ selftest(gem_contexts, i915_gem_context_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
+selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
selftest(guc, intel_guc_live_selftest)
+selftest(perf, i915_perf_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index b88084fe3269..aa5a0e7f5d9e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -26,3 +26,4 @@ selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
selftest(buddy, i915_buddy_mock_selftests)
+selftest(memory_region, intel_memory_region_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
new file mode 100644
index 000000000000..aabd07f67e49
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -0,0 +1,217 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kref.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "lib_sw_fence.h"
+
+static struct i915_perf_stream *
+test_stream(struct i915_perf *perf)
+{
+ struct drm_i915_perf_open_param param = {};
+ struct perf_open_properties props = {
+ .engine = intel_engine_lookup_user(perf->i915,
+ I915_ENGINE_CLASS_RENDER,
+ 0),
+ .sample_flags = SAMPLE_OA_REPORT,
+ .oa_format = IS_GEN(perf->i915, 12) ?
+ I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
+ .metrics_set = 1,
+ };
+ struct i915_perf_stream *stream;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return NULL;
+
+ stream->perf = perf;
+
+ mutex_lock(&perf->lock);
+ if (i915_oa_stream_init(stream, &param, &props)) {
+ kfree(stream);
+ stream = NULL;
+ }
+ mutex_unlock(&perf->lock);
+
+ return stream;
+}
+
+static void stream_destroy(struct i915_perf_stream *stream)
+{
+ struct i915_perf *perf = stream->perf;
+
+ mutex_lock(&perf->lock);
+ i915_perf_destroy_locked(stream);
+ mutex_unlock(&perf->lock);
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+
+ /* Quick check we can create a perf stream */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -EINVAL;
+
+ stream_destroy(stream);
+ return 0;
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+ u32 *cs;
+ int len;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ len = 5;
+ if (INTEL_GEN(rq->i915) >= 8)
+ len++;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(len);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_WRITE_TIMESTAMP;
+ *cs++ = slot * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static ktime_t poll_status(struct i915_request *rq, int slot)
+{
+ while (!intel_read_status_page(rq->engine, slot) &&
+ !i915_request_completed(rq))
+ cpu_relax();
+
+ return ktime_get();
+}
+
+static int live_noa_delay(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u64 expected;
+ u32 delay;
+ int err;
+ int i;
+
+ /* Check that the GPU delays matches expectations */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -ENOMEM;
+
+ expected = atomic64_read(&stream->perf->noa_programming_delay);
+
+ if (stream->engine->class != RENDER_CLASS) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < 4; i++)
+ intel_write_status_page(stream->engine, 0x100 + i, 0);
+
+ rq = i915_request_create(stream->engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb &&
+ i915_request_timeline(rq)->has_initial_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ err = write_timestamp(rq, 0x100);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ i915_ggtt_offset(stream->noa_wait), 0,
+ I915_DISPATCH_SECURE);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = write_timestamp(rq, 0x102);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ preempt_disable();
+ t0 = poll_status(rq, 0x100);
+ t1 = poll_status(rq, 0x102);
+ preempt_enable();
+
+ pr_info("CPU delay: %lluns, expected %lluns\n",
+ ktime_sub(t1, t0), expected);
+
+ delay = intel_read_status_page(stream->engine, 0x102);
+ delay -= intel_read_status_page(stream->engine, 0x100);
+ delay = div_u64(mul_u32_u32(delay, 1000 * 1000),
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
+ pr_info("GPU delay: %uns, expected %lluns\n",
+ delay, expected);
+
+ if (4 * delay < 3 * expected || 2 * delay > 3 * expected) {
+ pr_err("GPU delay [%uus] outside of expected threshold! [%lluus, %lluus]\n",
+ delay / 1000,
+ div_u64(3 * expected, 4000),
+ div_u64(3 * expected, 2000));
+ err = -EINVAL;
+ }
+
+ i915_request_put(rq);
+out:
+ stream_destroy(stream);
+ return err;
+}
+
+int i915_perf_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_noa_delay),
+ };
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
+ return 0;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index 716a3f19f030..abdfadcf626b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "i915_random.h"
+#include "i915_utils.h"
u64 i915_prandom_u64_state(struct rnd_state *rnd)
{
@@ -87,3 +88,22 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
i915_random_reorder(order, count, state);
return order;
}
+
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align)
+{
+ u64 range, addr;
+
+ BUG_ON(range_overflows(start, len, end));
+ BUG_ON(round_up(start, align) > round_down(end - len, align));
+
+ range = round_down(end - len, align) - round_up(start, align);
+ if (range) {
+ addr = i915_prandom_u64_state(state);
+ div64_u64_rem(addr, range, &addr);
+ start += addr;
+ }
+
+ return round_up(start, align);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 8e1ff9c105b6..35cc69a3a1b9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -57,4 +57,8 @@ void i915_random_reorder(unsigned int *order,
void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
struct rnd_state *state);
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align);
+
#endif /* !__I915_SELFTESTS_RANDOM_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index b3688543ed7d..8618a4dc0701 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -37,25 +37,32 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
+static unsigned int num_uabi_engines(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for_each_uabi_engine(engine, i915)
+ count++;
+
+ return count;
+}
+
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_request *request;
- int err = -ENOMEM;
/* Basic preliminary test to create a request and let it loose! */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
if (!request)
- goto out_unlock;
+ return -ENOMEM;
i915_request_add(request);
- err = 0;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
static int igt_wait_request(void *arg)
@@ -67,12 +74,10 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_unlock;
- }
+ if (!request)
+ return -ENOMEM;
+
i915_request_get(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
@@ -125,9 +130,7 @@ static int igt_wait_request(void *arg)
err = 0;
out_request:
i915_request_put(request);
-out_unlock:
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -140,52 +143,45 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_locked;
- }
+ if (!request)
+ return -ENOMEM;
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_locked;
+ goto out;
}
i915_request_add(request);
- mutex_unlock(&i915->drm.struct_mutex);
if (dma_fence_is_signaled(&request->fence)) {
pr_err("fence signaled immediately!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
pr_err("fence wait success after submit (expected timeout)!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out (expected success)!\n");
- goto out_device;
+ goto out;
}
if (!dma_fence_is_signaled(&request->fence)) {
pr_err("fence unsignaled after waiting!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out when complete (expected success)!\n");
- goto out_device;
+ goto out;
}
err = 0;
-out_device:
- mutex_lock(&i915->drm.struct_mutex);
-out_locked:
+out:
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -197,8 +193,8 @@ static int igt_request_rewind(void *arg)
struct intel_context *ce;
int err = -EINVAL;
- mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
+
ce = i915_gem_context_get_engine(ctx[0], RCS0);
GEM_BUG_ON(IS_ERR(ce));
request = mock_request(ce, 2 * HZ);
@@ -212,6 +208,7 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
+
ce = i915_gem_context_get_engine(ctx[1], RCS0);
GEM_BUG_ON(IS_ERR(ce));
vip = mock_request(ce, 0);
@@ -233,7 +230,6 @@ static int igt_request_rewind(void *arg)
request->engine->submit_request(request);
rcu_read_unlock();
- mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request\n");
@@ -248,14 +244,12 @@ static int igt_request_rewind(void *arg)
err = 0;
err:
i915_request_put(vip);
- mutex_lock(&i915->drm.struct_mutex);
err_context_1:
mock_context_close(ctx[1]);
i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -282,7 +276,6 @@ __live_request_alloc(struct intel_context *ce)
static int __igt_breadcrumbs_smoketest(void *arg)
{
struct smoketest *t = arg;
- struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
const unsigned int total = 4 * t->ncontexts + 1;
unsigned int num_waits = 0, num_fences = 0;
@@ -300,7 +293,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
* that the fences were marked as signaled.
*/
- requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL);
+ requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
if (!requests)
return -ENOMEM;
@@ -337,14 +330,11 @@ static int __igt_breadcrumbs_smoketest(void *arg)
struct i915_request *rq;
struct intel_context *ce;
- mutex_lock(BKL);
-
ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
rq = t->request_alloc(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
- mutex_unlock(BKL);
err = PTR_ERR(rq);
count = n;
break;
@@ -357,8 +347,6 @@ static int __igt_breadcrumbs_smoketest(void *arg)
requests[n] = i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(BKL);
-
if (err >= 0)
err = i915_sw_fence_await_dma_fence(wait,
&rq->fence,
@@ -446,18 +434,16 @@ static int mock_breadcrumbs_smoketest(void *arg)
* See __igt_breadcrumbs_smoketest();
*/
- threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+ threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
if (!threads)
return -ENOMEM;
- t.contexts =
- kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
+ t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
if (!t.contexts) {
ret = -ENOMEM;
goto out_threads;
}
- mutex_lock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < t.ncontexts; n++) {
t.contexts[n] = mock_context(t.engine->i915, "mock");
if (!t.contexts[n]) {
@@ -465,7 +451,6 @@ static int mock_breadcrumbs_smoketest(void *arg)
goto out_contexts;
}
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < ncpus; n++) {
threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
@@ -479,6 +464,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
get_task_struct(threads[n]);
}
+ yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
for (n = 0; n < ncpus; n++) {
@@ -495,18 +481,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
atomic_long_read(&t.num_fences),
ncpus);
- mutex_lock(&t.engine->i915->drm.struct_mutex);
out_contexts:
for (n = 0; n < t.ncontexts; n++) {
if (!t.contexts[n])
break;
mock_context_close(t.contexts[n]);
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
kfree(t.contexts);
out_threads:
kfree(threads);
-
return ret;
}
@@ -539,40 +522,37 @@ static int live_nop_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- unsigned int id;
int err = -ENODEV;
- /* Submit various sized batches of empty requests, to each engine
+ /*
+ * Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- for_each_engine(engine, i915, id) {
- struct i915_request *request = NULL;
+ for_each_uabi_engine(engine, i915) {
unsigned long n, prime;
IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ return err;
for_each_prime_number_from(prime, 1, 8192) {
+ struct i915_request *request = NULL;
+
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = i915_request_create(engine->kernel_context);
- if (IS_ERR(request)) {
- err = PTR_ERR(request);
- goto out_unlock;
- }
+ if (IS_ERR(request))
+ return PTR_ERR(request);
- /* This space is left intentionally blank.
+ /*
+ * This space is left intentionally blank.
*
* We do not actually want to perform any
* action with this request, we just want
@@ -585,9 +565,11 @@ static int live_nop_request(void *arg)
* for latency.
*/
+ i915_request_get(request);
i915_request_add(request);
}
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(request);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -599,7 +581,7 @@ static int live_nop_request(void *arg)
err = igt_live_test_end(&t);
if (err)
- goto out_unlock;
+ return err;
pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@@ -607,9 +589,6 @@ static int live_nop_request(void *arg)
prime, div64_u64(ktime_to_ns(times[1]), prime));
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -647,8 +626,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
+ /* Force the wait wait now to avoid including it in the benchmark */
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
return vma;
+err_pin:
+ i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ERR_PTR(err);
@@ -672,6 +658,7 @@ empty_request(struct intel_engine_cs *engine,
if (err)
goto out_request;
+ i915_request_get(request);
out_request:
i915_request_add(request);
return err ? ERR_PTR(err) : request;
@@ -681,27 +668,21 @@ static int live_empty_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct i915_vma *batch;
- unsigned int id;
int err = 0;
- /* Submit various sized batches of empty requests, to each engine
+ /*
+ * Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
batch = empty_batch(i915);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_unlock;
- }
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
struct i915_request *request;
unsigned long n, prime;
@@ -723,6 +704,7 @@ static int live_empty_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
@@ -738,6 +720,7 @@ static int live_empty_request(void *arg)
if (__igt_timeout(end_time, NULL))
break;
}
+ i915_request_put(request);
err = igt_live_test_end(&t);
if (err)
@@ -752,18 +735,15 @@ static int live_empty_request(void *arg)
out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
+ struct i915_address_space *vm;
struct i915_vma *vma;
u32 *cmd;
int err;
@@ -772,7 +752,9 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
+ vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -832,67 +814,73 @@ static int recursive_batch_resolve(struct i915_vma *batch)
static int live_all_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
+ const unsigned int nengines = num_uabi_engines(i915);
struct intel_engine_cs *engine;
- struct i915_request *request[I915_NUM_ENGINES];
- intel_wakeref_t wakeref;
+ struct i915_request **request;
struct igt_live_test t;
struct i915_vma *batch;
- unsigned int id;
+ unsigned int idx;
int err;
- /* Check we can submit requests to all engines simultaneously. We
+ /*
+ * Check we can submit requests to all engines simultaneously. We
* send a recursive batch to each engine - checking that we don't
* block doing so, and that they don't complete too soon.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_free;
batch = recursive_batch(i915);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
- goto out_unlock;
+ goto out_free;
}
- for_each_engine(engine, i915, id) {
- request[id] = i915_request_create(engine->kernel_context);
- if (IS_ERR(request[id])) {
- err = PTR_ERR(request[id]);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ request[idx] = i915_request_create(engine->kernel_context);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n",
__func__, err);
goto out_request;
}
- err = engine->emit_bb_start(request[id],
+ err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
- request[id]->batch = batch;
+ request[idx]->batch = batch;
i915_vma_lock(batch);
- err = i915_request_await_object(request[id], batch->obj, 0);
+ err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0)
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_request_get(request[id]);
- i915_request_add(request[id]);
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
+ idx++;
}
- for_each_engine(engine, i915, id) {
- if (i915_request_completed(request[id])) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
goto out_request;
}
+ idx++;
}
err = recursive_batch_resolve(batch);
@@ -901,10 +889,11 @@ static int live_all_engines(void *arg)
goto out_request;
}
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
long timeout;
- timeout = i915_request_wait(request[id], 0,
+ timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -913,50 +902,56 @@ static int live_all_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[id]));
- i915_request_put(request[id]);
- request[id] = NULL;
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ i915_request_put(request[idx]);
+ request[idx] = NULL;
+ idx++;
}
err = igt_live_test_end(&t);
out_request:
- for_each_engine(engine, i915, id)
- if (request[id])
- i915_request_put(request[id]);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (request[idx])
+ i915_request_put(request[idx]);
+ idx++;
+ }
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+out_free:
+ kfree(request);
return err;
}
static int live_sequential_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_request *request[I915_NUM_ENGINES] = {};
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct i915_request **request;
struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- unsigned int id;
+ unsigned int idx;
int err;
- /* Check we can submit requests to all engines sequentially, such
+ /*
+ * Check we can submit requests to all engines sequentially, such
* that each successive request waits for the earlier ones. This
* tests that we don't execute requests out of order, even though
* they are running on independent engines.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_free;
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
struct i915_vma *batch;
batch = recursive_batch(i915);
@@ -964,66 +959,69 @@ static int live_sequential_engines(void *arg)
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n",
__func__, engine->name, err);
- goto out_unlock;
+ goto out_free;
}
- request[id] = i915_request_create(engine->kernel_context);
- if (IS_ERR(request[id])) {
- err = PTR_ERR(request[id]);
+ request[idx] = i915_request_create(engine->kernel_context);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
}
if (prev) {
- err = i915_request_await_dma_fence(request[id],
+ err = i915_request_await_dma_fence(request[idx],
&prev->fence);
if (err) {
- i915_request_add(request[id]);
+ i915_request_add(request[idx]);
pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
}
}
- err = engine->emit_bb_start(request[id],
+ err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
- request[id]->batch = batch;
+ request[idx]->batch = batch;
i915_vma_lock(batch);
- err = i915_request_await_object(request[id], batch->obj, false);
+ err = i915_request_await_object(request[idx],
+ batch->obj, false);
if (err == 0)
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_request_get(request[id]);
- i915_request_add(request[id]);
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
- prev = request[id];
+ prev = request[idx];
+ idx++;
}
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
long timeout;
- if (i915_request_completed(request[id])) {
+ if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
goto out_request;
}
- err = recursive_batch_resolve(request[id]->batch);
+ err = recursive_batch_resolve(request[idx]->batch);
if (err) {
pr_err("%s: failed to resolve batch, err=%d\n",
__func__, err);
goto out_request;
}
- timeout = i915_request_wait(request[id], 0,
+ timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1032,33 +1030,156 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[id]));
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ idx++;
}
err = igt_live_test_end(&t);
out_request:
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
u32 *cmd;
- if (!request[id])
+ if (!request[idx])
break;
- cmd = i915_gem_object_pin_map(request[id]->batch->obj,
+ cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
- i915_gem_object_unpin_map(request[id]->batch->obj);
+ i915_gem_object_unpin_map(request[idx]->batch->obj);
}
- i915_vma_put(request[id]->batch);
- i915_request_put(request[id]);
+ i915_vma_put(request[idx]->batch);
+ i915_request_put(request[idx]);
+ idx++;
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+out_free:
+ kfree(request);
+ return err;
+}
+
+static int __live_parallel_engine1(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu request + sync\n", engine->name, count);
+ return 0;
+}
+
+static int __live_parallel_engineN(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu requests\n", engine->name, count);
+ return 0;
+}
+
+static int live_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ __live_parallel_engine1,
+ __live_parallel_engineN,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct task_struct **tsk;
+ int err = 0;
+
+ /*
+ * Check we can submit requests to all engines concurrently. This
+ * tests that we load up the system maximally.
+ */
+
+ tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
+ if (!tsk)
+ return -ENOMEM;
+
+ for (fn = func; !err && *fn; fn++) {
+ struct igt_live_test t;
+ unsigned int idx;
+
+ err = igt_live_test_begin(&t, i915, __func__, "");
+ if (err)
+ break;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ tsk[idx] = kthread_run(*fn, engine,
+ "igt/parallel:%s",
+ engine->name);
+ if (IS_ERR(tsk[idx])) {
+ err = PTR_ERR(tsk[idx]);
+ break;
+ }
+ get_task_struct(tsk[idx++]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (IS_ERR(tsk[idx]))
+ break;
+
+ status = kthread_stop(tsk[idx]);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(tsk[idx++]);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ }
+
+ kfree(tsk);
return err;
}
@@ -1102,16 +1223,16 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
static int live_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct smoketest t[I915_NUM_ENGINES];
- unsigned int ncpus = num_online_cpus();
+ const unsigned int nengines = num_uabi_engines(i915);
+ const unsigned int ncpus = num_online_cpus();
unsigned long num_waits, num_fences;
struct intel_engine_cs *engine;
struct task_struct **threads;
struct igt_live_test live;
- enum intel_engine_id id;
intel_wakeref_t wakeref;
struct drm_file *file;
- unsigned int n;
+ struct smoketest *smoke;
+ unsigned int n, idx;
int ret = 0;
/*
@@ -1130,29 +1251,31 @@ static int live_breadcrumbs_smoketest(void *arg)
goto out_rpm;
}
- threads = kcalloc(ncpus * I915_NUM_ENGINES,
- sizeof(*threads),
- GFP_KERNEL);
- if (!threads) {
+ smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
+ if (!smoke) {
ret = -ENOMEM;
goto out_file;
}
- memset(&t[0], 0, sizeof(t[0]));
- t[0].request_alloc = __live_request_alloc;
- t[0].ncontexts = 64;
- t[0].contexts = kmalloc_array(t[0].ncontexts,
- sizeof(*t[0].contexts),
- GFP_KERNEL);
- if (!t[0].contexts) {
+ threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
+ if (!threads) {
+ ret = -ENOMEM;
+ goto out_smoke;
+ }
+
+ smoke[0].request_alloc = __live_request_alloc;
+ smoke[0].ncontexts = 64;
+ smoke[0].contexts = kcalloc(smoke[0].ncontexts,
+ sizeof(*smoke[0].contexts),
+ GFP_KERNEL);
+ if (!smoke[0].contexts) {
ret = -ENOMEM;
goto out_threads;
}
- mutex_lock(&i915->drm.struct_mutex);
- for (n = 0; n < t[0].ncontexts; n++) {
- t[0].contexts[n] = live_context(i915, file);
- if (!t[0].contexts[n]) {
+ for (n = 0; n < smoke[0].ncontexts; n++) {
+ smoke[0].contexts[n] = live_context(i915, file);
+ if (!smoke[0].contexts[n]) {
ret = -ENOMEM;
goto out_contexts;
}
@@ -1162,45 +1285,48 @@ static int live_breadcrumbs_smoketest(void *arg)
if (ret)
goto out_contexts;
- for_each_engine(engine, i915, id) {
- t[id] = t[0];
- t[id].engine = engine;
- t[id].max_batch = max_batches(t[0].contexts[0], engine);
- if (t[id].max_batch < 0) {
- ret = t[id].max_batch;
- mutex_unlock(&i915->drm.struct_mutex);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ smoke[idx] = smoke[0];
+ smoke[idx].engine = engine;
+ smoke[idx].max_batch =
+ max_batches(smoke[0].contexts[0], engine);
+ if (smoke[idx].max_batch < 0) {
+ ret = smoke[idx].max_batch;
goto out_flush;
}
/* One ring interleaved between requests from all cpus */
- t[id].max_batch /= num_online_cpus() + 1;
+ smoke[idx].max_batch /= num_online_cpus() + 1;
pr_debug("Limiting batches to %d requests on %s\n",
- t[id].max_batch, engine->name);
+ smoke[idx].max_batch, engine->name);
for (n = 0; n < ncpus; n++) {
struct task_struct *tsk;
tsk = kthread_run(__igt_breadcrumbs_smoketest,
- &t[id], "igt/%d.%d", id, n);
+ &smoke[idx], "igt/%d.%d", idx, n);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
- mutex_unlock(&i915->drm.struct_mutex);
goto out_flush;
}
get_task_struct(tsk);
- threads[id * ncpus + n] = tsk;
+ threads[idx * ncpus + n] = tsk;
}
+
+ idx++;
}
- mutex_unlock(&i915->drm.struct_mutex);
+ yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
out_flush:
+ idx = 0;
num_waits = 0;
num_fences = 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (n = 0; n < ncpus; n++) {
- struct task_struct *tsk = threads[id * ncpus + n];
+ struct task_struct *tsk = threads[idx * ncpus + n];
int err;
if (!tsk)
@@ -1213,19 +1339,20 @@ out_flush:
put_task_struct(tsk);
}
- num_waits += atomic_long_read(&t[id].num_waits);
- num_fences += atomic_long_read(&t[id].num_fences);
+ num_waits += atomic_long_read(&smoke[idx].num_waits);
+ num_fences += atomic_long_read(&smoke[idx].num_fences);
+ idx++;
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
- mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
out_contexts:
- mutex_unlock(&i915->drm.struct_mutex);
- kfree(t[0].contexts);
+ kfree(smoke[0].contexts);
out_threads:
kfree(threads);
+out_smoke:
+ kfree(smoke);
out_file:
mock_file_free(i915, file);
out_rpm:
@@ -1240,6 +1367,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_nop_request),
SUBTEST(live_all_engines),
SUBTEST(live_sequential_engines),
+ SUBTEST(live_parallel_engines),
SUBTEST(live_empty_request),
SUBTEST(live_breadcrumbs_smoketest),
};
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 438ea0eaa416..a6cca4ad96f6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -23,13 +23,14 @@
#include <linux/random.h>
-#include "../i915_drv.h"
-#include "../i915_selftest.h"
+#include "gt/intel_gt_pm.h"
+#include "i915_drv.h"
+#include "i915_selftest.h"
#include "igt_flush_test.h"
struct i915_selftest i915_selftest __read_mostly = {
- .timeout_ms = 1000,
+ .timeout_ms = 500,
};
int i915_mock_sanitycheck(void)
@@ -256,6 +257,10 @@ int __i915_live_setup(void *data)
{
struct drm_i915_private *i915 = data;
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(&i915->gt))
+ return -EIO;
+
return intel_gt_terminally_wedged(&i915->gt);
}
@@ -263,10 +268,8 @@ int __i915_live_teardown(int err, void *data)
{
struct drm_i915_private *i915 = data;
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
@@ -277,6 +280,10 @@ int __intel_gt_live_setup(void *data)
{
struct intel_gt *gt = data;
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(gt))
+ return -EIO;
+
return intel_gt_terminally_wedged(gt);
}
@@ -284,10 +291,8 @@ int __intel_gt_live_teardown(int err, void *data)
{
struct intel_gt *gt = data;
- mutex_lock(&gt->i915->drm.struct_mutex);
- if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
- mutex_unlock(&gt->i915->drm.struct_mutex);
i915_gem_drain_freed_objects(gt->i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index a5bec0a4cdcc..58b5f40a07dd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -24,6 +24,7 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
#include "i915_scatterlist.h"
@@ -38,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != ctx->vm) {
+ if (vma->vm != rcu_access_pointer(ctx->vm)) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -113,11 +114,13 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm = ctx->vm;
+ struct i915_address_space *vm;
struct i915_vma *vma;
int err;
+ vm = i915_gem_context_get_vm_rcu(ctx);
vma = checked_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -170,7 +173,7 @@ static int igt_vma_create(void *arg)
}
nc = 0;
- for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
+ for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) {
for (; nc < num_ctx; nc++) {
ctx = mock_context(i915, "mock");
if (!ctx)
@@ -623,7 +626,7 @@ static bool assert_partial(struct drm_i915_gem_object *obj,
struct sgt_iter sgt;
dma_addr_t dma;
- for_each_sgt_dma(dma, sgt, vma->pages) {
+ for_each_sgt_daddr(dma, sgt, vma->pages) {
dma_addr_t src;
if (!size) {
@@ -831,13 +834,10 @@ int i915_vma_mock_selftests(void)
}
mock_init_ggtt(i915, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, ggtt);
- mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
@@ -879,8 +879,6 @@ static int igt_vma_remapped_gtt(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (t = types; *t; t++) {
@@ -976,7 +974,6 @@ static int igt_vma_remapped_gtt(void *arg)
out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_put(obj);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index d3b5eb402d33..7b0939e3f007 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -4,39 +4,32 @@
* Copyright © 2018 Intel Corporation
*/
-#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_selftest.h"
#include "igt_flush_test.h"
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
+int igt_flush_test(struct drm_i915_private *i915)
{
- int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
- int repeat = !!(flags & I915_WAIT_LOCKED);
+ struct intel_gt *gt = &i915->gt;
+ int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
cond_resched();
- do {
- if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
- pr_err("%pS timed out, cancelling all further testing.\n",
- __builtin_return_address(0));
+ if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
- GEM_TRACE("%pS timed out.\n",
- __builtin_return_address(0));
- GEM_TRACE_DUMP();
+ GEM_TRACE("%pS timed out.\n",
+ __builtin_return_address(0));
+ GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
- repeat = 0;
- ret = -EIO;
- }
-
- /* Ensure we also flush after wedging. */
- if (flags & I915_WAIT_LOCKED)
- i915_retire_requests(i915);
- } while (repeat--);
+ intel_gt_set_wedged(gt);
+ ret = -EIO;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.h b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
index 63e009927c43..7541fa74e641 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
@@ -9,6 +9,6 @@
struct drm_i915_private;
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
+int igt_flush_test(struct drm_i915_private *i915);
#endif /* IGT_FLUSH_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 3e902761cd16..c130010a7033 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -4,7 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_drv.h"
+#include "i915_drv.h"
+#include "gt/intel_gt_requests.h"
#include "../i915_selftest.h"
#include "igt_flush_test.h"
@@ -15,20 +16,16 @@ int igt_live_test_begin(struct igt_live_test *t,
const char *func,
const char *name)
{
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
t->i915 = i915;
t->func = func;
t->name = name;
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err);
@@ -37,7 +34,7 @@ int igt_live_test_begin(struct igt_live_test *t,
t->reset_global = i915_reset_count(&i915->gpu_error);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
t->reset_engine[id] =
i915_reset_engine_count(&i915->gpu_error, engine);
@@ -50,9 +47,7 @@ int igt_live_test_end(struct igt_live_test *t)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
return -EIO;
if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
@@ -62,7 +57,7 @@ int igt_live_test_end(struct igt_live_test *t)
return -EIO;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, &i915->gt, id) {
if (t->reset_engine[id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 7ec8f8b049c6..9f8590b868a9 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -22,7 +22,7 @@ void igt_global_reset_lock(struct intel_gt *gt)
wait_event(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
while (test_and_set_bit(I915_RESET_ENGINE + id,
&gt->reset.flags))
wait_on_bit(&gt->reset.flags, I915_RESET_ENGINE + id,
@@ -35,7 +35,7 @@ void igt_global_reset_unlock(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 11f04ad48e68..ee8450b871da 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -147,7 +147,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
intel_gt_chipset_flush(engine->gt);
if (engine->emit_init_breadcrumb &&
- rq->timeline->has_initial_breadcrumb) {
+ i915_request_timeline(rq)->has_initial_breadcrumb) {
err = engine->emit_init_breadcrumb(rq);
if (err)
goto cancel_rq;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
new file mode 100644
index 000000000000..19e1cca8f143
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+#include "mock_region.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
+
+static void close_objects(struct intel_memory_region *mem,
+ struct list_head *objects)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+ /* No polluting the memory region between tests */
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+ }
+
+ cond_resched();
+
+ i915_gem_drain_freed_objects(i915);
+}
+
+static int igt_mock_fill(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ resource_size_t total = resource_size(&mem->region);
+ resource_size_t page_size;
+ resource_size_t rem;
+ unsigned long max_pages;
+ unsigned long page_num;
+ LIST_HEAD(objects);
+ int err = 0;
+
+ page_size = mem->mm.chunk_size;
+ max_pages = div64_u64(total, page_size);
+ rem = total;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ resource_size_t size = page_num * page_size;
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_region(mem, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+ rem -= size;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+ if (err == -ENXIO) {
+ if (page_num * page_size <= rem) {
+ pr_err("%s failed, space still left in region\n",
+ __func__);
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
+ }
+
+ close_objects(mem, &objects);
+
+ return err;
+}
+
+static struct drm_i915_gem_object *
+igt_object_create(struct intel_memory_region *mem,
+ struct list_head *objects,
+ u64 size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mem, size, flags);
+ if (IS_ERR(obj))
+ return obj;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto put;
+
+ list_add(&obj->st_link, objects);
+ return obj;
+
+put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static void igt_object_release(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+}
+
+static int igt_mock_contiguous(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned long n_objects;
+ LIST_HEAD(objects);
+ LIST_HEAD(holes);
+ I915_RND_STATE(prng);
+ resource_size_t total;
+ resource_size_t min;
+ u64 target;
+ int err = 0;
+
+ total = resource_size(&mem->region);
+
+ /* Min size */
+ obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s min object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Max size */
+ obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s max object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Internal fragmentation should not bleed into the object size */
+ target = i915_prandom_u64_state(&prng);
+ div64_u64_rem(target, total, &target);
+ target = round_up(target, PAGE_SIZE);
+ target = max_t(u64, PAGE_SIZE, target);
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->base.size != target) {
+ pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
+ obj->base.size, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Try to fragment the address space, such that half of it is free, but
+ * the max contiguous block size is SZ_64K.
+ */
+
+ target = SZ_64K;
+ n_objects = div64_u64(total, target);
+
+ while (n_objects--) {
+ struct list_head *list;
+
+ if (n_objects % 2)
+ list = &holes;
+ else
+ list = &objects;
+
+ obj = igt_object_create(mem, list, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+ }
+
+ close_objects(mem, &holes);
+
+ min = target;
+ target = total >> 1;
+
+ /* Make sure we can still allocate all the fragmented space */
+ obj = igt_object_create(mem, &objects, target, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Even though we have enough free space, we don't have a big enough
+ * contiguous block. Make sure that holds true.
+ */
+
+ do {
+ bool should_fail = target > min;
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (should_fail != IS_ERR(obj)) {
+ pr_err("%s target allocation(%llx) mismatch\n",
+ __func__, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ target >>= 1;
+ } while (target >= mem->mm.chunk_size);
+
+err_close_objects:
+ list_splice_tail(&holes, &objects);
+ close_objects(mem, &objects);
+ return err;
+}
+
+static int igt_gpu_write_dw(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 dword,
+ u32 value)
+{
+ return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+ vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n;
+ int err;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 __iomem *base;
+ u32 read_val;
+
+ base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+ read_val = ioread32(base + dword);
+ io_mapping_unmap_atomic(base);
+ if (read_val != val) {
+ pr_err("n=%lu base[%u]=%u, val=%u\n",
+ n, dword, read_val, val);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ unsigned int count;
+ struct i915_vma *vma;
+ int *order;
+ int i, n;
+ int err = 0;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ n = 0;
+ count = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ vm = ce->vm;
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
+ if (!n)
+ return 0;
+
+ order = i915_random_order(count * count, &prng);
+ if (!order)
+ return -ENOMEM;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_free;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_free;
+
+ i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
+ do {
+ u32 rng = prandom_u32_state(&prng);
+ u32 dword = offset_in_page(rng) / 4;
+
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ err = igt_gpu_write_dw(ce, vma, dword, rng);
+ if (err)
+ break;
+
+ err = igt_cpu_check(obj, dword, rng);
+ if (err)
+ break;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_gem_context_unlock_engines(ctx);
+
+out_free:
+ kfree(order);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+static int igt_lmem_create(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_lmem_write_gpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ u32 sz;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+ obj = i915_gem_object_create_lmem(i915, sz, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_file;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ err = igt_gpu_write(ctx, obj);
+ if (err)
+ pr_err("igt_gpu_write failed(%d)\n", err);
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
+static struct intel_engine_cs *
+random_engine_class(struct drm_i915_private *i915,
+ unsigned int class,
+ struct rnd_state *prng)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for (engine = intel_engine_lookup_user(i915, class, 0);
+ engine && engine->uabi_class == class;
+ engine = rb_entry_safe(rb_next(&engine->uabi_node),
+ typeof(*engine), uabi_node))
+ count++;
+
+ count = i915_prandom_u32_max_state(count, prng);
+ return intel_engine_lookup_user(i915, class, count);
+}
+
+static int igt_lmem_write_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ u32 bytes[] = {
+ 0, /* rng placeholder */
+ sizeof(u32),
+ sizeof(u64),
+ 64, /* cl */
+ PAGE_SIZE,
+ PAGE_SIZE - sizeof(u32),
+ PAGE_SIZE - sizeof(u64),
+ PAGE_SIZE - 64,
+ };
+ struct intel_engine_cs *engine;
+ u32 *vaddr;
+ u32 sz;
+ u32 i;
+ int *order;
+ int count;
+ int err;
+
+ engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
+ if (!engine)
+ return 0;
+
+ pr_info("%s: using %s\n", __func__, engine->name);
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+ sz = max_t(u32, 2 * PAGE_SIZE, sz);
+
+ obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+
+ /* Put the pages into a known state -- from the gpu for added fun */
+ err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out_unpin;
+
+ count = ARRAY_SIZE(bytes);
+ order = i915_random_order(count * count, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_unpin;
+ }
+
+ /* We want to throw in a random width/align */
+ bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
+ sizeof(u32));
+
+ i = 0;
+ do {
+ u32 offset;
+ u32 align;
+ u32 dword;
+ u32 size;
+ u32 val;
+
+ size = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
+
+ offset = igt_random_offset(&prng, 0, obj->base.size,
+ size, align);
+
+ val = prandom_u32_state(&prng);
+ memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
+ size / sizeof(u32));
+
+ /*
+ * Sample random dw -- don't waste precious time reading every
+ * single dw.
+ */
+ dword = igt_random_offset(&prng, offset,
+ offset + size,
+ sizeof(u32), sizeof(u32));
+ dword /= sizeof(u32);
+ if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+ pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
+ __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
+ size, align, offset);
+ err = -EINVAL;
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+out_unpin:
+ i915_gem_object_unpin_map(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int intel_memory_region_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_fill),
+ SUBTEST(igt_mock_contiguous),
+ };
+ struct intel_memory_region *mem;
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("failed to create memory region\n");
+ err = PTR_ERR(mem);
+ goto out_unref;
+ }
+
+ err = i915_subtests(tests, mem);
+
+ intel_memory_region_put(mem);
+out_unref:
+ drm_dev_put(&i915->drm);
+ return err;
+}
+
+int intel_memory_region_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_write_cpu),
+ SUBTEST(igt_lmem_write_gpu),
+ };
+
+ if (!HAS_LMEM(i915)) {
+ pr_info("device lacks LMEM support, skipping\n");
+ return 0;
+ }
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 86815c6072a1..0e4e6be0101d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -67,6 +67,7 @@ static int intel_shadow_table_check(void)
} reg_lists[] = {
{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
+ { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
};
const i915_reg_t *reg;
unsigned int i, j;
@@ -101,6 +102,7 @@ int intel_uncore_mock_selftests(void)
{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
+ { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
};
int err, i;
@@ -138,19 +140,19 @@ static int live_forcewake_ops(void *arg)
}
};
const struct reg *r;
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_uncore_forcewake_domain *domain;
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
unsigned int tmp;
int err = 0;
- GEM_BUG_ON(i915->gt.awake);
+ GEM_BUG_ON(gt->awake);
/* vlv/chv with their pcu behave differently wrt reads */
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
pr_debug("PCU fakes forcewake badly; skipping\n");
return 0;
}
@@ -168,15 +170,15 @@ static int live_forcewake_ops(void *arg)
/* We have to pick carefully to get the exact behaviour we need */
for (r = registers; r->name; r++)
- if (r->platforms & INTEL_INFO(i915)->gen_mask)
+ if (r->platforms & INTEL_INFO(gt->i915)->gen_mask)
break;
if (!r->name) {
pr_debug("Forcewaked register not known for %s; skipping\n",
- intel_platform_name(INTEL_INFO(i915)->platform));
+ intel_platform_name(INTEL_INFO(gt->i915)->platform));
return 0;
}
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
@@ -186,7 +188,7 @@ static int live_forcewake_ops(void *arg)
intel_uncore_fw_release_timer(&domain->timer);
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
enum forcewake_domains fw_domains;
@@ -247,22 +249,22 @@ static int live_forcewake_ops(void *arg)
}
out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return err;
}
static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
- struct drm_i915_private *dev_priv = arg;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = arg;
+ struct intel_uncore *uncore = gt->uncore;
unsigned long *valid;
u32 offset;
int err;
- if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
- !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv))
+ if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
+ !IS_VALLEYVIEW(gt->i915) &&
+ !IS_CHERRYVIEW(gt->i915))
return 0;
/*
@@ -281,7 +283,7 @@ static int live_forcewake_domains(void *arg)
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
- (void)I915_READ_FW(reg);
+ intel_uncore_posting_read_fw(uncore, reg);
if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
@@ -298,7 +300,7 @@ static int live_forcewake_domains(void *arg)
check_for_unclaimed_mmio(uncore);
- (void)I915_READ(reg);
+ intel_uncore_posting_read_fw(uncore, reg);
if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
@@ -310,21 +312,23 @@ static int live_forcewake_domains(void *arg)
return err;
}
+static int live_fw_table(void *arg)
+{
+ struct intel_gt *gt = arg;
+
+ /* Confirm the table we load is still valid */
+ return intel_fw_table_check(gt->uncore->fw_domains_table,
+ gt->uncore->fw_domains_table_entries,
+ INTEL_GEN(gt->i915) >= 9);
+}
+
int intel_uncore_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_fw_table),
SUBTEST(live_forcewake_ops),
SUBTEST(live_forcewake_domains),
};
- int err;
-
- /* Confirm the table we load is still valid */
- err = intel_fw_table_check(i915->uncore.fw_domains_table,
- i915->uncore.fw_domains_table_entries,
- INTEL_GEN(i915) >= 9);
- if (err)
- return err;
-
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 01a89c071bf5..27ed3cee6a9b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -26,27 +26,29 @@
#include <linux/pm_runtime.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
+#include "intel_memory_region.h"
#include "mock_request.h"
#include "mock_gem_device.h"
#include "mock_gtt.h"
#include "mock_uncore.h"
+#include "mock_region.h"
#include "gem/selftests/mock_context.h"
#include "gem/selftests/mock_gem_object.h"
void mock_device_flush(struct drm_i915_private *i915)
{
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
do {
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
mock_engine_flush(engine);
- } while (i915_retire_requests(i915));
+ } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT));
}
static void mock_device_release(struct drm_device *dev)
@@ -55,31 +57,23 @@ static void mock_device_release(struct drm_device *dev)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915);
- mutex_lock(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, &i915->gt, id)
mock_engine_free(engine);
- i915_gem_contexts_fini(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_driver_release__contexts(i915);
intel_timelines_fini(i915);
drain_workqueue(i915->wq);
i915_gem_drain_freed_objects(i915);
- mutex_lock(&i915->drm.struct_mutex);
mock_fini_ggtt(&i915->ggtt);
- mutex_unlock(&i915->drm.struct_mutex);
-
destroy_workqueue(i915->wq);
- i915_gemfs_fini(i915);
+ intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -103,14 +97,6 @@ static void release_dev(struct device *dev)
kfree(pdev);
}
-static void mock_retire_work_handler(struct work_struct *work)
-{
-}
-
-static void mock_idle_work_handler(struct work_struct *work)
-{
-}
-
static int pm_domain_resume(struct device *dev)
{
return pm_generic_runtime_resume(dev);
@@ -178,10 +164,15 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mock_uncore_init(&i915->uncore);
+ mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
+ intel_memory_regions_hw_probe(i915);
+
+ mock_uncore_init(&i915->uncore, i915);
+
i915_gem_init__mm(i915);
intel_gt_init_early(&i915->gt, i915);
atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
+ i915->gt.awake = -ENODEV;
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
@@ -189,15 +180,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
- INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler);
-
- i915->gt.awake = true;
-
intel_timelines_init(i915);
- mutex_lock(&i915->drm.struct_mutex);
-
mock_init_ggtt(i915, &i915->ggtt);
mkwrite_device_info(i915)->engine_mask = BIT(0);
@@ -214,21 +198,18 @@ struct drm_i915_private *mock_gem_device(void)
goto err_context;
intel_engines_driver_register(i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
- WARN_ON(i915_gemfs_init(i915));
return i915;
err_context:
- i915_gem_contexts_fini(i915);
+ i915_gem_driver_release__contexts(i915);
err_engine:
mock_engine_free(i915->engine[RCS0]);
err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
intel_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
+ intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
drm_dev_fini(&i915->drm);
put_device:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index e62a67e0f79c..20ac3844edec 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
- vma->flags |= I915_VMA_LOCAL_BIND;
+ set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma));
return 0;
}
@@ -63,6 +63,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
if (!ppgtt)
return NULL;
+ ppgtt->vm.gt = &i915->gt;
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
@@ -86,7 +87,7 @@ static int mock_bind_ggtt(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0;
}
@@ -117,8 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
- intel_gt_init_hw(i915);
+ i915->gt.ggtt = ggtt;
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
new file mode 100644
index 000000000000..b2ad41c27e67
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_region.h"
+#include "intel_memory_region.h"
+
+#include "mock_region.h"
+
+static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
+ .get_pages = i915_gem_object_get_pages_buddy,
+ .put_pages = i915_gem_object_put_pages_buddy,
+ .release = i915_gem_object_release_memory_region,
+};
+
+static struct drm_i915_gem_object *
+mock_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
+
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ i915_gem_object_init_memory_region(obj, mem, flags);
+
+ return obj;
+}
+
+static const struct intel_memory_region_ops mock_region_ops = {
+ .init = intel_memory_region_init_buddy,
+ .release = intel_memory_region_release_buddy,
+ .create_object = mock_object_create,
+};
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start)
+{
+ return intel_memory_region_create(i915, start, size, min_page_size,
+ io_start, &mock_region_ops);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h
new file mode 100644
index 000000000000..24608089d833
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __MOCK_REGION_H
+#define __MOCK_REGION_H
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start);
+
+#endif /* !__MOCK_REGION_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 49585f16d4a2..ca57e4008701 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -39,8 +39,11 @@ __nop_read(16)
__nop_read(32)
__nop_read(64)
-void mock_uncore_init(struct intel_uncore *uncore)
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915)
{
+ intel_uncore_init_early(uncore, i915);
+
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index dacb36b5ffcd..8a2cc553f466 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,7 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
-void mock_uncore_init(struct intel_uncore *uncore);
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915);
#endif /* !__MOCK_UNCORE_H */
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 695f307f36b2..208069faf183 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index e7ce17503ae1..35518e5de356 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -13,6 +13,7 @@
#include <video/of_display_timing.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 2e2ed653e9c6..ec32e1c67335 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -676,8 +677,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
}
if (panel)
- bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
&priv->dma_hwdesc_phys,
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index bb4ddc6bb0a6..571dc369a7e9 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -9,5 +9,6 @@ config DRM_LIMA
depends on COMMON_CLK
depends on OF
select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
help
DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
index 38cc70281ba5..a85444b0a1d4 100644
--- a/drivers/gpu/drm/lima/Makefile
+++ b/drivers/gpu/drm/lima/Makefile
@@ -13,9 +13,7 @@ lima-y := \
lima_vm.o \
lima_sched.o \
lima_ctx.o \
- lima_gem_prime.o \
lima_dlbu.o \
- lima_bcast.o \
- lima_object.o
+ lima_bcast.o
obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index d86b8d81a483..19829b543024 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -105,7 +105,8 @@ static int lima_clk_init(struct lima_device *dev)
if (err)
goto error_out0;
- dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
+ dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
+
if (IS_ERR(dev->reset)) {
err = PTR_ERR(dev->reset);
if (err != -EPROBE_DEFER)
@@ -313,7 +314,7 @@ int lima_device_init(struct lima_device *ldev)
ldev->va_end = LIMA_VA_RESERVE_START;
ldev->dlbu_cpu = dma_alloc_wc(
ldev->dev, LIMA_PAGE_SIZE,
- &ldev->dlbu_dma, GFP_KERNEL);
+ &ldev->dlbu_dma, GFP_KERNEL | __GFP_NOWARN);
if (!ldev->dlbu_cpu) {
err = -ENOMEM;
goto err_out2;
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 75ec703d22e0..124efe4fa97b 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -12,7 +12,6 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
int lima_sched_timeout_ms;
@@ -240,16 +239,7 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW),
};
-static const struct file_operations lima_drm_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .mmap = lima_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -258,10 +248,6 @@ static struct drm_driver lima_drm_driver = {
.ioctls = lima_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
.fops = &lima_drm_driver_fops,
- .gem_free_object_unlocked = lima_gem_free_object,
- .gem_open_object = lima_gem_object_open,
- .gem_close_object = lima_gem_object_close,
- .gem_vm_ops = &lima_gem_vm_ops,
.name = "lima",
.desc = "lima DRM",
.date = "20190217",
@@ -269,11 +255,11 @@ static struct drm_driver lima_drm_driver = {
.minor = 0,
.patchlevel = 0,
+ .gem_create_object = lima_gem_create_object,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
- .gem_prime_mmap = lima_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
};
static int lima_pdev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 4da21353c3a2..d0059d8c97d8 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -3,7 +3,7 @@
#include <linux/mm.h>
#include <linux/sync_file.h>
-#include <linux/pfn_t.h>
+#include <linux/pagemap.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
@@ -13,40 +13,55 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
-#include "lima_object.h"
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
int err;
- struct lima_bo *bo;
- struct lima_device *ldev = to_lima_dev(dev);
+ gfp_t mask;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ struct sg_table *sgt;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ obj = &shmem->base;
- bo = lima_bo_create(ldev, size, flags, NULL);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
+ /* Mali Utgard GPU can only support 32bit address space */
+ mask = mapping_gfp_mask(obj->filp->f_mapping);
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ mapping_set_gfp_mask(obj->filp->f_mapping, mask);
- err = drm_gem_handle_create(file, &bo->gem, handle);
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto out;
+ }
+
+ err = drm_gem_handle_create(file, obj, handle);
+out:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(obj);
return err;
}
-void lima_gem_free_object(struct drm_gem_object *obj)
+static void lima_gem_free_object(struct drm_gem_object *obj)
{
struct lima_bo *bo = to_lima_bo(obj);
if (!list_empty(&bo->va))
dev_err(obj->dev->dev, "lima gem free bo still has va\n");
- lima_bo_destroy(bo);
+ drm_gem_shmem_free_object(obj);
}
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
+static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -55,7 +70,7 @@ int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
return lima_vm_bo_add(vm, bo, true);
}
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
+static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -64,13 +79,41 @@ void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
lima_vm_bo_del(vm, bo);
}
+static const struct drm_gem_object_funcs lima_gem_funcs = {
+ .free = lima_gem_free_object,
+ .open = lima_gem_object_open,
+ .close = lima_gem_object_close,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = drm_gem_shmem_mmap,
+};
+
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct lima_bo *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+
+ mutex_init(&bo->lock);
+ INIT_LIST_HEAD(&bo->va);
+
+ bo->base.base.funcs = &lima_gem_funcs;
+
+ return &bo->base.base;
+}
+
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
{
struct drm_gem_object *obj;
struct lima_bo *bo;
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
- int err;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
@@ -80,53 +123,9 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
*va = lima_vm_get_va(vm, bo);
- err = drm_gem_create_mmap_offset(obj);
- if (!err)
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put_unlocked(obj);
- return err;
-}
-
-static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct lima_bo *bo = to_lima_bo(obj);
- pfn_t pfn;
- pgoff_t pgoff;
-
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
-
- return vmf_insert_mixed(vma, vmf->address, pfn);
-}
-
-const struct vm_operations_struct lima_gem_vm_ops = {
- .fault = lima_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-void lima_set_vma_flags(struct vm_area_struct *vma)
-{
- pgprot_t prot = vm_get_page_prot(vma->vm_flags);
-
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_page_prot = pgprot_writecombine(prot);
-}
-
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
return 0;
}
@@ -136,7 +135,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
int err = 0;
if (!write) {
- err = dma_resv_reserve_shared(bo->gem.resv, 1);
+ err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
if (err)
return err;
}
@@ -145,62 +144,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
if (explicit)
return 0;
- return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
-}
-
-static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i, ret = 0, contended, slow_locked = -1;
-
- ww_acquire_init(ctx, &reservation_ww_class);
-
-retry:
- for (i = 0; i < nr_bos; i++) {
- if (i == slow_locked) {
- slow_locked = -1;
- continue;
- }
-
- ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
- if (ret < 0) {
- contended = i;
- goto err;
- }
- }
-
- ww_acquire_done(ctx);
- return 0;
-
-err:
- for (i--; i >= 0; i--)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
-
- if (slow_locked >= 0)
- ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
-
- if (ret == -EDEADLK) {
- /* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(
- &bos[contended]->gem.resv->lock, ctx);
- if (!ret) {
- slow_locked = contended;
- goto retry;
- }
- }
- ww_acquire_fini(ctx);
-
- return ret;
-}
-
-static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < nr_bos; i++)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
- ww_acquire_fini(ctx);
+ return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@@ -268,7 +212,8 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
bos[i] = bo;
}
- err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
+ err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
if (err)
goto err_out0;
@@ -296,15 +241,16 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
else
- dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
}
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
for (i = 0; i < submit->nr_bos; i++)
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
if (out_sync) {
drm_syncobj_replace_fence(out_sync, fence);
@@ -318,13 +264,14 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
err_out2:
lima_sched_task_fini(submit->task);
err_out1:
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
err_out0:
for (i = 0; i < submit->nr_bos; i++) {
if (!bos[i])
break;
lima_vm_bo_del(vm, bos[i]);
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
}
if (out_sync)
drm_syncobj_put(out_sync);
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 556111a01135..1800feb3e47f 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -4,19 +4,37 @@
#ifndef __LIMA_GEM_H__
#define __LIMA_GEM_H__
-struct lima_bo;
+#include <drm/drm_gem_shmem_helper.h>
+
struct lima_submit;
-extern const struct vm_operations_struct lima_gem_vm_ops;
+struct lima_bo {
+ struct drm_gem_shmem_object base;
+
+ struct mutex lock;
+ struct list_head va;
+};
+
+static inline struct lima_bo *
+to_lima_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct lima_bo, base);
+}
+
+static inline size_t lima_bo_size(struct lima_bo *bo)
+{
+ return bo->base.base.size;
+}
+
+static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
+{
+ return bo->base.base.resv;
+}
-struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
-void lima_gem_free_object(struct drm_gem_object *obj);
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset);
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns);
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
deleted file mode 100644
index e3eb251e0a12..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <linux/dma-buf.h>
-#include <drm/drm_prime.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-
-#include "lima_device.h"
-#include "lima_object.h"
-#include "lima_gem.h"
-#include "lima_gem_prime.h"
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct lima_device *ldev = to_lima_dev(dev);
- struct lima_bo *bo;
-
- bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt);
- if (IS_ERR(bo))
- return ERR_CAST(bo);
-
- return &bo->gem;
-}
-
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct lima_bo *bo = to_lima_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
-
- return drm_prime_pages_to_sg(bo->pages, npages);
-}
-
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
- return 0;
-}
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h
deleted file mode 100644
index 34b4d35c21e3..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_GEM_PRIME_H__
-#define __LIMA_GEM_PRIME_H__
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 8e1651d6a61f..97ec09dee572 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -8,7 +8,6 @@
#include "lima_device.h"
#include "lima_mmu.h"
#include "lima_vm.h"
-#include "lima_object.h"
#include "lima_regs.h"
#define mmu_write(reg, data) writel(data, ip->iomem + reg)
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
deleted file mode 100644
index 87123b1d083c..000000000000
--- a/drivers/gpu/drm/lima/lima_object.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <drm/drm_prime.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-
-#include "lima_object.h"
-
-void lima_bo_destroy(struct lima_bo *bo)
-{
- if (bo->sgt) {
- kfree(bo->pages);
- drm_prime_gem_destroy(&bo->gem, bo->sgt);
- } else {
- if (bo->pages_dma_addr) {
- int i, npages = bo->gem.size >> PAGE_SHIFT;
-
- for (i = 0; i < npages; i++) {
- if (bo->pages_dma_addr[i])
- dma_unmap_page(bo->gem.dev->dev,
- bo->pages_dma_addr[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- }
- }
-
- if (bo->pages)
- drm_gem_put_pages(&bo->gem, bo->pages, true, true);
- }
-
- kfree(bo->pages_dma_addr);
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags)
-{
- struct lima_bo *bo;
- int err;
-
- size = PAGE_ALIGN(size);
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&bo->lock);
- INIT_LIST_HEAD(&bo->va);
-
- err = drm_gem_object_init(dev->ddev, &bo->gem, size);
- if (err) {
- kfree(bo);
- return ERR_PTR(err);
- }
-
- return bo;
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt)
-{
- int i, err;
- size_t npages;
- struct lima_bo *bo, *ret;
-
- bo = lima_bo_create_struct(dev, size, flags);
- if (IS_ERR(bo))
- return bo;
-
- npages = bo->gem.size >> PAGE_SHIFT;
-
- bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
- if (!bo->pages_dma_addr) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- if (sgt) {
- bo->sgt = sgt;
-
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!bo->pages) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- err = drm_prime_sg_to_page_addr_arrays(
- sgt, bo->pages, bo->pages_dma_addr, npages);
- if (err) {
- ret = ERR_PTR(err);
- goto err_out;
- }
- } else {
- mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
- bo->pages = drm_gem_get_pages(&bo->gem);
- if (IS_ERR(bo->pages)) {
- ret = ERR_CAST(bo->pages);
- bo->pages = NULL;
- goto err_out;
- }
-
- for (i = 0; i < npages; i++) {
- dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = ERR_PTR(-EFAULT);
- goto err_out;
- }
- bo->pages_dma_addr[i] = addr;
- }
-
- }
-
- return bo;
-
-err_out:
- lima_bo_destroy(bo);
- return ret;
-}
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
deleted file mode 100644
index 31ca2d8dc0a1..000000000000
--- a/drivers/gpu/drm/lima/lima_object.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_OBJECT_H__
-#define __LIMA_OBJECT_H__
-
-#include <drm/drm_gem.h>
-
-#include "lima_device.h"
-
-struct lima_bo {
- struct drm_gem_object gem;
-
- struct page **pages;
- dma_addr_t *pages_dma_addr;
- struct sg_table *sgt;
- void *vaddr;
-
- struct mutex lock;
- struct list_head va;
-};
-
-static inline struct lima_bo *
-to_lima_bo(struct drm_gem_object *obj)
-{
- return container_of(obj, struct lima_bo, gem);
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt);
-void lima_bo_destroy(struct lima_bo *bo);
-void *lima_bo_vmap(struct lima_bo *bo);
-void lima_bo_vunmap(struct lima_bo *bo);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 4127cacac454..f522c5f99729 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -10,7 +10,7 @@
#include "lima_vm.h"
#include "lima_mmu.h"
#include "lima_l2_cache.h"
-#include "lima_object.h"
+#include "lima_gem.h"
struct lima_fence {
struct dma_fence base;
@@ -117,7 +117,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
return -ENOMEM;
for (i = 0; i < num_bos; i++)
- drm_gem_object_get(&bos[i]->gem);
+ drm_gem_object_get(&bos[i]->base.base);
err = drm_sched_job_init(&task->base, &context->base, vm);
if (err) {
@@ -148,7 +148,7 @@ void lima_sched_task_fini(struct lima_sched_task *task)
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
- drm_gem_object_put_unlocked(&task->bos[i]->gem);
+ drm_gem_object_put_unlocked(&task->bos[i]->base.base);
kfree(task->bos);
}
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 19e88ca16527..840e2350d872 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -6,7 +6,7 @@
#include "lima_device.h"
#include "lima_vm.h"
-#include "lima_object.h"
+#include "lima_gem.h"
#include "lima_regs.h"
struct lima_bo_va {
@@ -32,7 +32,7 @@ struct lima_bo_va {
#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
-static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
+static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end)
{
u32 addr;
@@ -44,41 +44,32 @@ static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
}
}
-static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
- u32 start, u32 end)
+static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va)
{
- u64 addr;
- int i = 0;
-
- for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
- u32 pbe = LIMA_PBE(addr);
- u32 bte = LIMA_BTE(addr);
-
- if (!vm->bts[pbe].cpu) {
- dma_addr_t pts;
- u32 *pd;
- int j;
-
- vm->bts[pbe].cpu = dma_alloc_wc(
- vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
- &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
- if (!vm->bts[pbe].cpu) {
- if (addr != start)
- lima_vm_unmap_page_table(vm, start, addr - 1);
- return -ENOMEM;
- }
-
- pts = vm->bts[pbe].dma;
- pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
- for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
- pd[j] = pts | LIMA_VM_FLAG_PRESENT;
- pts += LIMA_PAGE_SIZE;
- }
+ u32 pbe = LIMA_PBE(va);
+ u32 bte = LIMA_BTE(va);
+
+ if (!vm->bts[pbe].cpu) {
+ dma_addr_t pts;
+ u32 *pd;
+ int j;
+
+ vm->bts[pbe].cpu = dma_alloc_wc(
+ vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ if (!vm->bts[pbe].cpu)
+ return -ENOMEM;
+
+ pts = vm->bts[pbe].dma;
+ pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ pd[j] = pts | LIMA_VM_FLAG_PRESENT;
+ pts += LIMA_PAGE_SIZE;
}
-
- vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
}
+ vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
+
return 0;
}
@@ -100,7 +91,8 @@ lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
{
struct lima_bo_va *bo_va;
- int err;
+ struct sg_dma_page_iter sg_iter;
+ int offset = 0, err;
mutex_lock(&bo->lock);
@@ -128,14 +120,18 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
mutex_lock(&vm->lock);
- err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
+ err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
if (err)
goto err_out1;
- err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
- if (err)
- goto err_out2;
+ for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ bo_va->node.start + offset);
+ if (err)
+ goto err_out2;
+
+ offset += PAGE_SIZE;
+ }
mutex_unlock(&vm->lock);
@@ -145,6 +141,8 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
return 0;
err_out2:
+ if (offset)
+ lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
drm_mm_remove_node(&bo_va->node);
err_out1:
mutex_unlock(&vm->lock);
@@ -168,8 +166,8 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
mutex_lock(&vm->lock);
- lima_vm_unmap_page_table(vm, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
+ lima_vm_unmap_range(vm, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
drm_mm_remove_node(&bo_va->node);
@@ -210,14 +208,13 @@ struct lima_vm *lima_vm_create(struct lima_device *dev)
kref_init(&vm->refcount);
vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!vm->pd.cpu)
goto err_out0;
if (dev->dlbu_cpu) {
- int err = lima_vm_map_page_table(
- vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
- LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
+ int err = lima_vm_map_page(
+ vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU);
if (err)
goto err_out1;
}
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9a09eba53182..5649887d2b90 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -484,7 +484,8 @@ static int mcde_probe(struct platform_device *pdev)
}
if (!match) {
dev_err(dev, "no matching components\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto clk_disable;
}
if (IS_ERR(match)) {
dev_err(dev, "could not create component match\n");
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index f9c9e32b299c..d6214d3c8b33 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -946,8 +946,8 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
}
}
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge)) {
dev_err(dev, "error adding panel bridge\n");
return PTR_ERR(bridge);
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 82ae49c64221..8067a4be8311 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -12,6 +12,8 @@ mediatek-drm-y := mtk_disp_color.o \
mtk_drm_plane.o \
mtk_dsi.o \
mtk_mipi_tx.o \
+ mtk_mt8173_mipi_tx.o \
+ mtk_mt8183_mipi_tx.o \
mtk_dpi.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 21851756c579..4a55bb6e2213 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -3,6 +3,8 @@
* Copyright (c) 2015 MediaTek Inc.
*/
+#include <drm/drm_fourcc.h>
+
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
@@ -19,6 +21,8 @@
#define DISP_REG_OVL_EN 0x000c
#define DISP_REG_OVL_RST 0x0014
#define DISP_REG_OVL_ROI_SIZE 0x0020
+#define DISP_REG_OVL_DATAPATH_CON 0x0024
+#define OVL_BGCLR_SEL_IN BIT(2)
#define DISP_REG_OVL_ROI_BGCLR 0x0028
#define DISP_REG_OVL_SRC_CON 0x002c
#define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
@@ -31,7 +35,9 @@
#define DISP_REG_OVL_ADDR_MT8173 0x0f40
#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
-#define OVL_RDMA_MEM_GMC 0x40402020
+#define GMC_THRESHOLD_BITS 16
+#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
+#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
#define OVL_CON_BYTE_SWAP BIT(24)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
@@ -46,9 +52,13 @@
OVL_CON_CLRFMT_RGB : 0)
#define OVL_CON_AEN BIT(8)
#define OVL_CON_ALPHA 0xff
+#define OVL_CON_VIRT_FLIP BIT(9)
+#define OVL_CON_HORZ_FLIP BIT(10)
struct mtk_disp_ovl_data {
unsigned int addr;
+ unsigned int gmc_bits;
+ unsigned int layer_nr;
bool fmt_rgb565_is_0;
};
@@ -126,15 +136,65 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
{
- return 4;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+
+ return ovl->data->layer_nr;
+}
+
+static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp)
+{
+ return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+}
+
+static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *mtk_state)
+{
+ struct drm_plane_state *state = &mtk_state->base;
+ unsigned int rotation = 0;
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ rotation &= ~DRM_MODE_ROTATE_0;
+
+ /* We can only do reflection, not rotation */
+ if ((rotation & DRM_MODE_ROTATE_MASK) != 0)
+ return -EINVAL;
+
+ /*
+ * TODO: Rotating/reflecting YUV buffers is not supported at this time.
+ * Only RGB[AX] variants are supported.
+ */
+ if (state->fb->format->is_yuv && rotation != 0)
+ return -EINVAL;
+
+ state->rotation = rotation;
+
+ return 0;
}
static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
{
unsigned int reg;
+ unsigned int gmc_thrshd_l;
+ unsigned int gmc_thrshd_h;
+ unsigned int gmc_value;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
- writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
+
+ gmc_thrshd_l = GMC_THRESHOLD_LOW >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ if (ovl->data->gmc_bits == 10)
+ gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
+ else
+ gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
+ gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
+ writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
reg = reg | BIT(idx);
@@ -207,6 +267,16 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
if (idx != 0)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
+ if (pending->rotation & DRM_MODE_REFLECT_Y) {
+ con |= OVL_CON_VIRT_FLIP;
+ addr += (pending->height - 1) * pending->pitch;
+ }
+
+ if (pending->rotation & DRM_MODE_REFLECT_X) {
+ con |= OVL_CON_HORZ_FLIP;
+ addr += pending->pitch - 1;
+ }
+
writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx));
writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
@@ -217,16 +287,38 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
mtk_ovl_layer_on(comp, idx);
}
+static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg | OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
+static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg & ~OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.config = mtk_ovl_config,
.start = mtk_ovl_start,
.stop = mtk_ovl_stop,
.enable_vblank = mtk_ovl_enable_vblank,
.disable_vblank = mtk_ovl_disable_vblank,
+ .supported_rotations = mtk_ovl_supported_rotations,
.layer_nr = mtk_ovl_layer_nr,
.layer_on = mtk_ovl_layer_on,
.layer_off = mtk_ovl_layer_off,
+ .layer_check = mtk_ovl_layer_check,
.layer_config = mtk_ovl_layer_config,
+ .bgclr_in_on = mtk_ovl_bgclr_in_on,
+ .bgclr_in_off = mtk_ovl_bgclr_in_off,
};
static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
@@ -276,7 +368,12 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+ priv->data = of_device_get_match_data(dev);
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node,
+ priv->data->layer_nr == 4 ?
+ MTK_DISP_OVL :
+ MTK_DISP_OVL_2L);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
return comp_id;
@@ -289,8 +386,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
- priv->data = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -316,11 +411,15 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev)
static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT2701,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = false,
};
static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = true,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index be6d95c5ff25..01fa8b8d763d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -17,6 +17,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 34a731755791..f80a8ba75977 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -207,6 +207,28 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
}
+static
+struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
+ struct drm_plane *plane,
+ unsigned int *local_layer)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *comp;
+ int i, count = 0;
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ comp = mtk_crtc->ddp_comp[i];
+ if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) {
+ *local_layer = plane->index - count;
+ return comp;
+ }
+ count += mtk_ddp_comp_layer_nr(comp);
+ }
+
+ WARN(1, "Failed to find component for plane %d\n", plane->index);
+ return NULL;
+}
+
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
@@ -272,6 +294,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_on(comp);
+
mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
mtk_ddp_comp_start(comp);
}
@@ -280,10 +305,12 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
+ struct mtk_ddp_comp *comp;
+ unsigned int local_layer;
plane_state = to_mtk_plane_state(plane->state);
- mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i,
- plane_state);
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ mtk_ddp_comp_layer_config(comp, local_layer, plane_state);
}
return 0;
@@ -301,8 +328,12 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
int i;
DRM_DEBUG_DRIVER("%s\n", __func__);
- for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
+ }
+
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
@@ -327,6 +358,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
+ unsigned int local_layer;
/*
* TODO: instead of updating the registers here, we should prepare
@@ -348,15 +380,30 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
plane_state = to_mtk_plane_state(plane->state);
- if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(comp, i, plane_state);
- plane_state->pending.config = false;
- }
+ if (!plane_state->pending.config)
+ continue;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
+ &local_layer);
+
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state);
+ plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
}
+int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state)
+{
+ unsigned int local_layer;
+ struct mtk_ddp_comp *comp;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ return mtk_ddp_comp_layer_check(comp, local_layer, state);
+}
+
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -518,14 +565,65 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
mtk_drm_finish_page_flip(mtk_crtc);
}
+static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
+ int comp_idx)
+{
+ struct mtk_ddp_comp *comp;
+
+ if (comp_idx > 1)
+ return 0;
+
+ comp = mtk_crtc->ddp_comp[comp_idx];
+ if (!comp->funcs)
+ return 0;
+
+ if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
+ return 0;
+
+ return mtk_ddp_comp_layer_nr(comp);
+}
+
+static inline
+enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx)
+{
+ if (plane_idx == 0)
+ return DRM_PLANE_TYPE_PRIMARY;
+ else if (plane_idx == 1)
+ return DRM_PLANE_TYPE_CURSOR;
+ else
+ return DRM_PLANE_TYPE_OVERLAY;
+
+}
+
+static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
+ struct mtk_drm_crtc *mtk_crtc,
+ int comp_idx, int pipe)
+{
+ int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
+ int i, ret;
+
+ for (i = 0; i < num_planes; i++) {
+ ret = mtk_plane_init(drm_dev,
+ &mtk_crtc->planes[mtk_crtc->layer_nr],
+ BIT(pipe),
+ mtk_drm_crtc_plane_type(mtk_crtc->layer_nr),
+ mtk_ddp_comp_supported_rotations(comp));
+ if (ret)
+ return ret;
+
+ mtk_crtc->layer_nr++;
+ }
+ return 0;
+}
+
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path, unsigned int path_len)
{
struct mtk_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
struct mtk_drm_crtc *mtk_crtc;
- enum drm_plane_type type;
- unsigned int zpos;
+ unsigned int num_comp_planes = 0;
int pipe = priv->num_pipes;
int ret;
int i;
@@ -581,17 +679,15 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp[i] = comp;
}
- mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
- mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
- sizeof(struct drm_plane),
- GFP_KERNEL);
-
- for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
- type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
- (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
- DRM_PLANE_TYPE_OVERLAY;
- ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
- BIT(pipe), type);
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
+
+ mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
+ sizeof(struct drm_plane), GFP_KERNEL);
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
+ pipe);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index fcc134eb00c9..6afe1c19557a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -19,5 +19,7 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
+int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state);
#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 8106a71a7404..13035c906035 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -33,12 +33,15 @@
#define DISP_REG_CONFIG_DSI_SEL 0x050
#define DISP_REG_CONFIG_DPI_SEL 0x064
-#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
-#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
-#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
-#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
+#define MT2701_DISP_MUTEX0_MOD0 0x2c
+#define MT2701_DISP_MUTEX0_SOF0 0x30
+
+#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
+#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
#define INT_MUTEX BIT(1)
@@ -139,12 +142,30 @@ struct mtk_disp_mutex {
bool claimed;
};
+enum mtk_ddp_mutex_sof_id {
+ DDP_MUTEX_SOF_SINGLE_MODE,
+ DDP_MUTEX_SOF_DSI0,
+ DDP_MUTEX_SOF_DSI1,
+ DDP_MUTEX_SOF_DPI0,
+ DDP_MUTEX_SOF_DPI1,
+ DDP_MUTEX_SOF_DSI2,
+ DDP_MUTEX_SOF_DSI3,
+};
+
+struct mtk_ddp_data {
+ const unsigned int *mutex_mod;
+ const unsigned int *mutex_sof;
+ const unsigned int mutex_mod_reg;
+ const unsigned int mutex_sof_reg;
+ const bool no_clk;
+};
+
struct mtk_ddp {
struct device *dev;
struct clk *clk;
void __iomem *regs;
struct mtk_disp_mutex mutex[10];
- const unsigned int *mutex_mod;
+ const struct mtk_ddp_data *data;
};
static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -194,6 +215,37 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
+static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
+ [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+ [DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
+ [DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+ [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+};
+
+static const struct mtk_ddp_data mt2701_ddp_driver_data = {
+ .mutex_mod = mt2701_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt2712_ddp_driver_data = {
+ .mutex_mod = mt2712_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt8173_ddp_driver_data = {
+ .mutex_mod = mt8173_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next,
unsigned int *addr)
@@ -432,45 +484,49 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int sof_id;
unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI1:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI2:
- reg = MUTEX_SOF_DSI2;
+ sof_id = DDP_MUTEX_SOF_DSI2;
break;
case DDP_COMPONENT_DSI3:
- reg = MUTEX_SOF_DSI3;
+ sof_id = DDP_MUTEX_SOF_DSI3;
break;
case DDP_COMPONENT_DPI0:
- reg = MUTEX_SOF_DPI0;
+ sof_id = DDP_MUTEX_SOF_DPI0;
break;
case DDP_COMPONENT_DPI1:
- reg = MUTEX_SOF_DPI1;
+ sof_id = DDP_MUTEX_SOF_DPI1;
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << ddp->mutex_mod[id];
+ reg |= 1 << ddp->data->mutex_mod[id];
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << (ddp->mutex_mod[id] - 32);
+ reg |= 1 << (ddp->data->mutex_mod[id] - 32);
writel_relaxed(reg, ddp->regs + offset);
}
return;
}
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ writel_relaxed(ddp->data->mutex_sof[sof_id],
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id));
}
void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
@@ -491,18 +547,21 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
case DDP_COMPONENT_DPI0:
case DDP_COMPONENT_DPI1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
- ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg,
+ mutex->id));
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << ddp->mutex_mod[id]);
+ reg &= ~(1 << ddp->data->mutex_mod[id]);
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+ reg &= ~(1 << (ddp->data->mutex_mod[id] - 32));
writel_relaxed(reg, ddp->regs + offset);
}
break;
@@ -564,10 +623,14 @@ static int mtk_ddp_probe(struct platform_device *pdev)
for (i = 0; i < 10; i++)
ddp->mutex[i].id = i;
- ddp->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddp->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(ddp->clk);
+ ddp->data = of_device_get_match_data(dev);
+
+ if (!ddp->data->no_clk) {
+ ddp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ddp->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(ddp->clk);
+ }
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -577,8 +640,6 @@ static int mtk_ddp_probe(struct platform_device *pdev)
return PTR_ERR(ddp->regs);
}
- ddp->mutex_mod = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, ddp);
return 0;
@@ -590,9 +651,12 @@ static int mtk_ddp_remove(struct platform_device *pdev)
}
static const struct of_device_id ddp_driver_dt_match[] = {
- { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
- { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
- { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
+ { .compatible = "mediatek,mt2701-disp-mutex",
+ .data = &mt2701_ddp_driver_data},
+ { .compatible = "mediatek,mt2712-disp-mutex",
+ .data = &mt2712_ddp_driver_data},
+ { .compatible = "mediatek,mt8173-disp-mutex",
+ .data = &mt8173_ddp_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index efa85973e46b..7f21307cda75 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -33,6 +33,18 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
+#define DISP_CCORR_EN 0x0000
+#define CCORR_EN BIT(0)
+#define DISP_CCORR_CFG 0x0020
+#define CCORR_RELAY_MODE BIT(0)
+#define DISP_CCORR_SIZE 0x0030
+
+#define DISP_DITHER_EN 0x0000
+#define DITHER_EN BIT(0)
+#define DISP_DITHER_CFG 0x0020
+#define DITHER_RELAY_MODE BIT(0)
+#define DISP_DITHER_SIZE 0x0030
+
#define DISP_GAMMA_EN 0x0000
#define DISP_GAMMA_CFG 0x0020
#define DISP_GAMMA_SIZE 0x0030
@@ -123,6 +135,42 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp)
writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
}
+static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE);
+ writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG);
+}
+
+static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
+{
+ writel(CCORR_EN, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE);
+ writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG);
+}
+
+static void mtk_dither_start(struct mtk_ddp_comp *comp)
+{
+ writel(DITHER_EN, comp->regs + DISP_DITHER_EN);
+}
+
+static void mtk_dither_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_DITHER_EN);
+}
+
static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
@@ -171,6 +219,18 @@ static const struct mtk_ddp_comp_funcs ddp_aal = {
.stop = mtk_aal_stop,
};
+static const struct mtk_ddp_comp_funcs ddp_ccorr = {
+ .config = mtk_ccorr_config,
+ .start = mtk_ccorr_start,
+ .stop = mtk_ccorr_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_dither = {
+ .config = mtk_dither_config,
+ .start = mtk_dither_start,
+ .stop = mtk_dither_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_gamma = {
.gamma_set = mtk_gamma_set,
.config = mtk_gamma_config,
@@ -189,11 +249,14 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = {
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_OVL] = "ovl",
+ [MTK_DISP_OVL_2L] = "ovl_2l",
[MTK_DISP_RDMA] = "rdma",
[MTK_DISP_WDMA] = "wdma",
[MTK_DISP_COLOR] = "color",
+ [MTK_DISP_CCORR] = "ccorr",
[MTK_DISP_AAL] = "aal",
[MTK_DISP_GAMMA] = "gamma",
+ [MTK_DISP_DITHER] = "dither",
[MTK_DISP_UFOE] = "ufoe",
[MTK_DSI] = "dsi",
[MTK_DPI] = "dpi",
@@ -213,8 +276,10 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
+ [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
+ [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
@@ -226,6 +291,8 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
+ [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, NULL },
+ [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, NULL },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
[DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
[DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 0ad287f427cc..2f1e9e75b8da 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -17,9 +17,12 @@ struct drm_crtc_state;
enum mtk_ddp_comp_type {
MTK_DISP_OVL,
+ MTK_DISP_OVL_2L,
MTK_DISP_RDMA,
MTK_DISP_WDMA,
MTK_DISP_COLOR,
+ MTK_DISP_CCORR,
+ MTK_DISP_DITHER,
MTK_DISP_AAL,
MTK_DISP_GAMMA,
MTK_DISP_UFOE,
@@ -36,8 +39,10 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
+ DDP_COMPONENT_CCORR,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_DITHER,
DDP_COMPONENT_DPI0,
DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
@@ -48,6 +53,8 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_OD0,
DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
@@ -70,13 +77,19 @@ struct mtk_ddp_comp_funcs {
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
+ unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
+ int (*layer_check)(struct mtk_ddp_comp *comp,
+ unsigned int idx,
+ struct mtk_plane_state *state);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
struct mtk_plane_state *state);
void (*gamma_set)(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state);
+ void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
+ void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
};
struct mtk_ddp_comp {
@@ -121,6 +134,15 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
comp->funcs->disable_vblank(comp);
}
+static inline
+unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->supported_rotations)
+ return comp->funcs->supported_rotations(comp);
+
+ return 0;
+}
+
static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->layer_nr)
@@ -143,6 +165,15 @@ static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp,
comp->funcs->layer_off(comp, idx);
}
+static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
+ unsigned int idx,
+ struct mtk_plane_state *state)
+{
+ if (comp->funcs && comp->funcs->layer_check)
+ return comp->funcs->layer_check(comp, idx, state);
+ return 0;
+}
+
static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
unsigned int idx,
struct mtk_plane_state *state)
@@ -158,6 +189,18 @@ static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
comp->funcs->gamma_set(comp, state);
}
+static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_on)
+ comp->funcs->bgclr_in_on(comp);
+}
+
+static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_off)
+ comp->funcs->bgclr_in_off(comp);
+}
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 352b81a7a670..84d14213d992 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -547,6 +547,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
*/
if (comp_type == MTK_DISP_COLOR ||
comp_type == MTK_DISP_OVL ||
+ comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DSI ||
comp_type == MTK_DPI) {
@@ -669,8 +670,8 @@ static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
&mtk_drm_platform_driver,
- &mtk_dsi_driver,
&mtk_mipi_tx_driver,
+ &mtk_dsi_driver,
};
static int __init mtk_drm_init(void)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index ca672f1d140d..b04a3c2b111e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -271,7 +271,7 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
pgprot_writecombine(PAGE_KERNEL));
out:
- kfree((void *)sgt);
+ kfree(sgt);
return mtk_gem->kvaddr;
}
@@ -285,5 +285,5 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
vunmap(vaddr);
mtk_gem->kvaddr = 0;
- kfree((void *)mtk_gem->pages);
+ kfree(mtk_gem->pages);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 584a9ecadce6..3b0cc91c7023 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -20,6 +20,12 @@
static const u32 formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
@@ -84,6 +90,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
{
struct drm_framebuffer *fb = state->fb;
struct drm_crtc_state *crtc_state;
+ int ret;
if (!fb)
return 0;
@@ -91,6 +98,11 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (!state->crtc)
return 0;
+ ret = mtk_drm_crtc_plane_check(state->crtc, plane,
+ to_mtk_plane_state(state));
+ if (ret)
+ return ret;
+
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -132,6 +144,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
state->pending.y = plane->state->dst.y1;
state->pending.width = drm_rect_width(&plane->state->dst);
state->pending.height = drm_rect_height(&plane->state->dst);
+ state->pending.rotation = plane->state->rotation;
wmb(); /* Make sure the above parameters are set before update */
state->pending.dirty = true;
}
@@ -154,7 +167,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
};
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs, enum drm_plane_type type)
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int supported_rotations)
{
int err;
@@ -166,6 +180,14 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
return err;
}
+ if (supported_rotations & ~DRM_MODE_ROTATE_0) {
+ err = drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+ if (err)
+ DRM_INFO("Create rotation property failed\n");
+ }
+
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index 6f842df722c7..760885e35b27 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -20,6 +20,7 @@ struct mtk_plane_pending_state {
unsigned int y;
unsigned int width;
unsigned int height;
+ unsigned int rotation;
bool dirty;
};
@@ -35,6 +36,7 @@ to_mtk_plane_state(struct drm_plane_state *state)
}
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs, enum drm_plane_type type);
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int supported_rotations);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 224afb666881..e9931bbbe846 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -16,6 +16,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -39,6 +40,7 @@
#define DSI_CON_CTRL 0x10
#define DSI_RESET BIT(0)
#define DSI_EN BIT(1)
+#define DPHY_RESET BIT(2)
#define DSI_MODE_CTRL 0x14
#define MODE (3)
@@ -72,6 +74,7 @@
#define DSI_VBP_NL 0x24
#define DSI_VFP_NL 0x28
#define DSI_VACT_NL 0x2C
+#define DSI_SIZE_CON 0x38
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
@@ -125,7 +128,10 @@
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
-#define DSI_CMDQ0 0x180
+#define DSI_SHADOW_DEBUG 0x190U
+#define FORCE_COMMIT BIT(0)
+#define BYPASS_SHADOW BIT(1)
+
#define CONFIG (0xff << 0)
#define SHORT_PACKET 0
#define LONG_PACKET 2
@@ -134,12 +140,6 @@
#define DATA_0 (0xff << 16)
#define DATA_1 (0xff << 24)
-#define T_LPX 5
-#define T_HS_PREP 6
-#define T_HS_TRAIL 8
-#define T_HS_EXIT 7
-#define T_HS_ZERO 10
-
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
#define MTK_DSI_HOST_IS_READ(type) \
@@ -148,8 +148,33 @@
(type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
(type == MIPI_DSI_DCS_READ))
+struct mtk_phy_timing {
+ u32 lpx;
+ u32 da_hs_prepare;
+ u32 da_hs_zero;
+ u32 da_hs_trail;
+
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+ u32 da_hs_exit;
+
+ u32 clk_hs_zero;
+ u32 clk_hs_trail;
+
+ u32 clk_hs_prepare;
+ u32 clk_hs_post;
+ u32 clk_hs_exit;
+};
+
struct phy;
+struct mtk_dsi_driver_data {
+ const u32 reg_cmdq_off;
+ bool has_shadow_ctl;
+ bool has_size_ctl;
+};
+
struct mtk_dsi {
struct mtk_ddp_comp ddp_comp;
struct device *dev;
@@ -172,10 +197,12 @@ struct mtk_dsi {
enum mipi_dsi_pixel_format format;
unsigned int lanes;
struct videomode vm;
+ struct mtk_phy_timing phy_timing;
int refcount;
bool enabled;
u32 irq_data;
wait_queue_head_t irq_wait_queue;
+ const struct mtk_dsi_driver_data *driver_data;
};
static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
@@ -204,17 +231,36 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
u32 ui, cycle_time;
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
+
+ ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
+ cycle_time = div_u64(8000000000ULL, dsi->data_rate);
+
+ timing->lpx = NS_TO_CYCLE(60, cycle_time);
+ timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
+ timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
+ timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
+
+ timing->ta_go = 4 * timing->lpx;
+ timing->ta_sure = 3 * timing->lpx / 2;
+ timing->ta_get = 5 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx;
- ui = 1000 / dsi->data_rate + 0x01;
- cycle_time = 8000 / dsi->data_rate + 0x01;
+ timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
+ timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
- timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
- timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
- T_HS_EXIT << 24;
- timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
- (NS_TO_CYCLE(0x150, cycle_time) << 16);
- timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
- NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
+ timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
+ timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
+ timing->clk_hs_exit = 2 * timing->lpx;
+
+ timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
+ timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
+ timcon1 = timing->ta_go | timing->ta_sure << 8 |
+ timing->ta_get << 16 | timing->da_hs_exit << 24;
+ timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
+ timing->clk_hs_trail << 24;
+ timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
+ timing->clk_hs_exit << 16;
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -238,6 +284,12 @@ static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
}
+static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET);
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0);
+}
+
static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
@@ -401,7 +453,8 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
u32 horizontal_frontporch_byte;
- u32 dsi_tmp_buf_bpp;
+ u32 dsi_tmp_buf_bpp, data_phy_cycles;
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
struct videomode *vm = &dsi->vm;
@@ -415,6 +468,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+ if (dsi->driver_data->has_size_ctl)
+ writel(vm->vactive << 16 | vm->hactive,
+ dsi->regs + DSI_SIZE_CON);
+
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
@@ -424,7 +481,34 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
dsi_tmp_buf_bpp - 10);
- horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
+ data_phy_cycles = timing->lpx + timing->da_hs_prepare +
+ timing->da_hs_zero + timing->da_hs_exit + 2;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 18) {
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp -
+ data_phy_cycles *
+ dsi->lanes - 18;
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ } else {
+ if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 12) {
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp -
+ data_phy_cycles *
+ dsi->lanes - 12;
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ }
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
@@ -522,10 +606,9 @@ static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
static int mtk_dsi_poweron(struct mtk_dsi *dsi)
{
- struct device *dev = dsi->dev;
+ struct device *dev = dsi->host.dev;
int ret;
- u64 pixel_clock, total_bits;
- u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
+ u32 bit_per_pixel;
if (++dsi->refcount != 1)
return 0;
@@ -544,24 +627,8 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
break;
}
- /**
- * htotal_time = htotal * byte_per_pixel / num_lanes
- * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
- * mipi_ratio = (htotal_time + overhead_time) / htotal_time
- * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
- */
- pixel_clock = dsi->vm.pixelclock;
- htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
- dsi->vm.hsync_len;
- htotal_bits = htotal * bit_per_pixel;
-
- overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
- T_HS_EXIT;
- overhead_bits = overhead_cycles * dsi->lanes * 8;
- total_bits = htotal_bits + overhead_bits;
-
- dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
- htotal * dsi->lanes);
+ dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
+ dsi->lanes);
ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
if (ret < 0) {
@@ -584,10 +651,17 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
}
mtk_dsi_enable(dsi);
+
+ if (dsi->driver_data->has_shadow_ctl)
+ writel(FORCE_COMMIT | BYPASS_SHADOW,
+ dsi->regs + DSI_SHADOW_DEBUG);
+
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
mtk_dsi_rxtx_control(dsi);
+ usleep_range(30, 100);
+ mtk_dsi_reset_dphy(dsi);
mtk_dsi_ps_control_vact(dsi);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
@@ -938,6 +1012,7 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
const char *tx_buf = msg->tx_buf;
u8 config, cmdq_size, cmdq_off, type = msg->type;
u32 reg_val, cmdq_mask, i;
+ u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off;
if (MTK_DSI_HOST_IS_READ(type))
config = BTA;
@@ -957,9 +1032,11 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
}
for (i = 0; i < msg->tx_len; i++)
- writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
+ mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U),
+ (0xffUL << (((i + cmdq_off) & 3U) * 8U)),
+ tx_buf[i] << (((i + cmdq_off) & 3U) * 8U));
- mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
+ mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val);
mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
}
@@ -1049,12 +1126,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = mipi_dsi_host_register(&dsi->host);
- if (ret < 0) {
- dev_err(dev, "failed to register DSI host: %d\n", ret);
- goto err_ddp_comp_unregister;
- }
-
ret = mtk_dsi_create_conn_enc(drm, dsi);
if (ret) {
DRM_ERROR("Encoder create failed with %d\n", ret);
@@ -1064,8 +1135,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return 0;
err_unregister:
- mipi_dsi_host_unregister(&dsi->host);
-err_ddp_comp_unregister:
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
return ret;
}
@@ -1077,7 +1146,6 @@ static void mtk_dsi_unbind(struct device *dev, struct device *master,
struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_destroy_conn_enc(dsi);
- mipi_dsi_host_unregister(&dsi->host);
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
}
@@ -1101,31 +1169,38 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->host);
+ if (ret < 0) {
+ dev_err(dev, "failed to register DSI host: %d\n", ret);
+ return ret;
+ }
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&dsi->panel, &dsi->bridge);
if (ret)
- return ret;
+ goto err_unregister_host;
+
+ dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
dev_err(dev, "Failed to get engine clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
dev_err(dev, "Failed to get digital clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->hs_clk = devm_clk_get(dev, "hs");
if (IS_ERR(dsi->hs_clk)) {
ret = PTR_ERR(dsi->hs_clk);
dev_err(dev, "Failed to get hs clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1133,33 +1208,35 @@ static int mtk_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->regs)) {
ret = PTR_ERR(dsi->regs);
dev_err(dev, "Failed to ioremap memory: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->phy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->phy)) {
ret = PTR_ERR(dsi->phy);
dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ ret = comp_id;
+ goto err_unregister_host;
}
ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
&mtk_dsi_funcs);
if (ret) {
dev_err(dev, "Failed to initialize component: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
- dev_err(&pdev->dev, "failed to request dsi irq resource\n");
- return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
+ ret = irq_num;
+ goto err_unregister_host;
}
irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
@@ -1167,14 +1244,24 @@ static int mtk_dsi_probe(struct platform_device *pdev)
IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
- return -EPROBE_DEFER;
+ goto err_unregister_host;
}
init_waitqueue_head(&dsi->irq_wait_queue);
platform_set_drvdata(pdev, dsi);
- return component_add(&pdev->dev, &mtk_dsi_component_ops);
+ ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add component: %d\n", ret);
+ goto err_unregister_host;
+ }
+
+ return 0;
+
+err_unregister_host:
+ mipi_dsi_host_unregister(&dsi->host);
+ return ret;
}
static int mtk_dsi_remove(struct platform_device *pdev)
@@ -1183,13 +1270,32 @@ static int mtk_dsi_remove(struct platform_device *pdev)
mtk_output_dsi_disable(dsi);
component_del(&pdev->dev, &mtk_dsi_component_ops);
+ mipi_dsi_host_unregister(&dsi->host);
return 0;
}
+static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+};
+
+static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
+ .reg_cmdq_off = 0x180,
+};
+
+static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+ .has_shadow_ctl = true,
+ .has_size_ctl = true,
+};
+
static const struct of_device_id mtk_dsi_of_match[] = {
- { .compatible = "mediatek,mt2701-dsi" },
- { .compatible = "mediatek,mt8173-dsi" },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = &mt2701_dsi_driver_data },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = &mt8173_dsi_driver_data },
+ { .compatible = "mediatek,mt8183-dsi",
+ .data = &mt8183_dsi_driver_data },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index ce91b61364eb..c79b1f855d89 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -23,6 +23,7 @@
#include <sound/hdmi-codec.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index 1842dc2caae9..e4d34484ecc8 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -3,292 +3,39 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
-
-#define MIPITX_DSI_CON 0x00
-#define RG_DSI_LDOCORE_EN BIT(0)
-#define RG_DSI_CKG_LDOOUT_EN BIT(1)
-#define RG_DSI_BCLK_SEL (3 << 2)
-#define RG_DSI_LD_IDX_SEL (7 << 4)
-#define RG_DSI_PHYCLK_SEL (2 << 8)
-#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
-#define RG_DSI_LPTX_CLMP_EN BIT(11)
-
-#define MIPITX_DSI_CLOCK_LANE 0x04
-#define MIPITX_DSI_DATA_LANE0 0x08
-#define MIPITX_DSI_DATA_LANE1 0x0c
-#define MIPITX_DSI_DATA_LANE2 0x10
-#define MIPITX_DSI_DATA_LANE3 0x14
-#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
-#define RG_DSI_LNTx_CKLANE_EN BIT(1)
-#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
-#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
-#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
-#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
-#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
-#define RG_DSI_LNTx_RT_CODE (0xf << 8)
-
-#define MIPITX_DSI_TOP_CON 0x40
-#define RG_DSI_LNT_INTR_EN BIT(0)
-#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
-#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
-#define RG_DSI_LNT_TESTMODE_EN BIT(3)
-#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
-#define RG_DSI_LNT_AIO_SEL (7 << 8)
-#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
-#define RG_DSI_DEBUG_INPUT_EN BIT(12)
-#define RG_DSI_PRESERVE (7 << 13)
-
-#define MIPITX_DSI_BG_CON 0x44
-#define RG_DSI_BG_CORE_EN BIT(0)
-#define RG_DSI_BG_CKEN BIT(1)
-#define RG_DSI_BG_DIV (0x3 << 2)
-#define RG_DSI_BG_FAST_CHARGE BIT(4)
-#define RG_DSI_VOUT_MSK (0x3ffff << 5)
-#define RG_DSI_V12_SEL (7 << 5)
-#define RG_DSI_V10_SEL (7 << 8)
-#define RG_DSI_V072_SEL (7 << 11)
-#define RG_DSI_V04_SEL (7 << 14)
-#define RG_DSI_V032_SEL (7 << 17)
-#define RG_DSI_V02_SEL (7 << 20)
-#define RG_DSI_BG_R1_TRIM (0xf << 24)
-#define RG_DSI_BG_R2_TRIM (0xf << 28)
-
-#define MIPITX_DSI_PLL_CON0 0x50
-#define RG_DSI_MPPLL_PLL_EN BIT(0)
-#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
-#define RG_DSI_MPPLL_PREDIV (3 << 1)
-#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
-#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
-#define RG_DSI_MPPLL_POSDIV (7 << 7)
-#define RG_DSI_MPPLL_MONVC_EN BIT(10)
-#define RG_DSI_MPPLL_MONREF_EN BIT(11)
-#define RG_DSI_MPPLL_VOD_EN BIT(12)
-
-#define MIPITX_DSI_PLL_CON1 0x54
-#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
-#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
-#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
-#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
-
-#define MIPITX_DSI_PLL_CON2 0x58
-
-#define MIPITX_DSI_PLL_TOP 0x64
-#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
-
-#define MIPITX_DSI_PLL_PWR 0x68
-#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
-#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
-#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
-
-#define MIPITX_DSI_SW_CTRL 0x80
-#define SW_CTRL_EN BIT(0)
-
-#define MIPITX_DSI_SW_CTRL_CON0 0x84
-#define SW_LNTC_LPTX_PRE_OE BIT(0)
-#define SW_LNTC_LPTX_OE BIT(1)
-#define SW_LNTC_LPTX_P BIT(2)
-#define SW_LNTC_LPTX_N BIT(3)
-#define SW_LNTC_HSTX_PRE_OE BIT(4)
-#define SW_LNTC_HSTX_OE BIT(5)
-#define SW_LNTC_HSTX_ZEROCLK BIT(6)
-#define SW_LNT0_LPTX_PRE_OE BIT(7)
-#define SW_LNT0_LPTX_OE BIT(8)
-#define SW_LNT0_LPTX_P BIT(9)
-#define SW_LNT0_LPTX_N BIT(10)
-#define SW_LNT0_HSTX_PRE_OE BIT(11)
-#define SW_LNT0_HSTX_OE BIT(12)
-#define SW_LNT0_LPRX_EN BIT(13)
-#define SW_LNT1_LPTX_PRE_OE BIT(14)
-#define SW_LNT1_LPTX_OE BIT(15)
-#define SW_LNT1_LPTX_P BIT(16)
-#define SW_LNT1_LPTX_N BIT(17)
-#define SW_LNT1_HSTX_PRE_OE BIT(18)
-#define SW_LNT1_HSTX_OE BIT(19)
-#define SW_LNT2_LPTX_PRE_OE BIT(20)
-#define SW_LNT2_LPTX_OE BIT(21)
-#define SW_LNT2_LPTX_P BIT(22)
-#define SW_LNT2_LPTX_N BIT(23)
-#define SW_LNT2_HSTX_PRE_OE BIT(24)
-#define SW_LNT2_HSTX_OE BIT(25)
-
-struct mtk_mipitx_data {
- const u32 mppll_preserve;
-};
-
-struct mtk_mipi_tx {
- struct device *dev;
- void __iomem *regs;
- u32 data_rate;
- const struct mtk_mipitx_data *driver_data;
- struct clk_hw pll_hw;
- struct clk *pll;
-};
+#include "mtk_mipi_tx.h"
-static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
+inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
{
return container_of(hw, struct mtk_mipi_tx, pll_hw);
}
-static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp & ~bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp | bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 mask, u32 data)
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 mask, u32 data)
{
u32 temp = readl(mipi_tx->regs + offset);
writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
}
-static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
- u8 txdiv, txdiv0, txdiv1;
- u64 pcw;
-
- dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
-
- if (mipi_tx->data_rate >= 500000000) {
- txdiv = 1;
- txdiv0 = 0;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 250000000) {
- txdiv = 2;
- txdiv0 = 1;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 125000000) {
- txdiv = 4;
- txdiv0 = 2;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate > 62000000) {
- txdiv = 8;
- txdiv0 = 2;
- txdiv1 = 1;
- } else if (mipi_tx->data_rate >= 50000000) {
- txdiv = 16;
- txdiv0 = 2;
- txdiv1 = 2;
- } else {
- return -EINVAL;
- }
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_VOUT_MSK |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
- (4 << 20) | (4 << 17) | (4 << 14) |
- (4 << 11) | (4 << 8) | (4 << 5) |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- usleep_range(30, 100);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
- (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_PWR_ON |
- RG_DSI_MPPLL_SDM_ISO_EN,
- RG_DSI_MPPLL_SDM_PWR_ON);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
- RG_DSI_MPPLL_PREDIV,
- (txdiv0 << 3) | (txdiv1 << 5));
-
- /*
- * PLL PCW config
- * PCW bit 24~30 = integer part of pcw
- * PCW bit 0~23 = fractional part of pcw
- * pcw = data_Rate*4*txdiv/(Ref_clk*2);
- * Post DIV =4, so need data_Rate*4
- * Ref_clk is 26MHz
- */
- pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
- 26000000);
- writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_FRA_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
-
- usleep_range(20, 100);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_SSC_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE,
- mipi_tx->driver_data->mppll_preserve);
-
- return 0;
-}
-
-static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
-
- dev_dbg(mipi_tx->dev, "unprepare\n");
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE, 0);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_ISO_EN |
- RG_DSI_MPPLL_SDM_PWR_ON,
- RG_DSI_MPPLL_SDM_ISO_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_DIV_MSK);
-}
-
-static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- return clamp_val(rate, 50000000, 1250000000);
-}
-
-static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
@@ -299,37 +46,14 @@ static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
return mipi_tx->data_rate;
}
-static const struct clk_ops mtk_mipi_tx_pll_ops = {
- .prepare = mtk_mipi_tx_pll_prepare,
- .unprepare = mtk_mipi_tx_pll_unprepare,
- .round_rate = mtk_mipi_tx_pll_round_rate,
- .set_rate = mtk_mipi_tx_pll_set_rate,
- .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
-};
-
-static int mtk_mipi_tx_power_on_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- return 0;
-}
-
static int mtk_mipi_tx_power_on(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -341,30 +65,16 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
return ret;
/* Enable DSI Lane LDO outputs, disable pad tie low */
- mtk_mipi_tx_power_on_signal(phy);
-
+ mipi_tx->driver_data->mipi_tx_enable_signal(phy);
return 0;
}
-static void mtk_mipi_tx_power_off_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-}
-
static int mtk_mipi_tx_power_off(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
/* Enable pad tie low, disable DSI Lane LDO outputs */
- mtk_mipi_tx_power_off_signal(phy);
+ mipi_tx->driver_data->mipi_tx_disable_signal(phy);
/* Disable PLL and power down core */
clk_disable_unprepare(mipi_tx->pll);
@@ -383,10 +93,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_mipi_tx *mipi_tx;
struct resource *mem;
- struct clk *ref_clk;
const char *ref_clk_name;
+ struct clk *ref_clk;
struct clk_init_data clk_init = {
- .ops = &mtk_mipi_tx_pll_ops,
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
.flags = CLK_SET_RATE_GATE,
@@ -400,6 +109,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return -ENOMEM;
mipi_tx->driver_data = of_device_get_match_data(dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi_tx->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(mipi_tx->regs)) {
@@ -414,6 +124,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
dev_err(dev, "Failed to get reference clock: %d\n", ret);
return ret;
}
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
@@ -423,6 +134,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return ret;
}
+ clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
+
mipi_tx->pll_hw.init = &clk_init;
mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
if (IS_ERR(mipi_tx->pll)) {
@@ -457,20 +170,14 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev)
return 0;
}
-static const struct mtk_mipitx_data mt2701_mipitx_data = {
- .mppll_preserve = (3 << 8)
-};
-
-static const struct mtk_mipitx_data mt8173_mipitx_data = {
- .mppll_preserve = (0 << 8)
-};
-
static const struct of_device_id mtk_mipi_tx_match[] = {
{ .compatible = "mediatek,mt2701-mipi-tx",
.data = &mt2701_mipitx_data },
{ .compatible = "mediatek,mt8173-mipi-tx",
.data = &mt8173_mipitx_data },
- {},
+ { .compatible = "mediatek,mt8183-mipi-tx",
+ .data = &mt8183_mipitx_data },
+ { },
};
struct platform_driver mtk_mipi_tx_driver = {
@@ -481,3 +188,4 @@ struct platform_driver mtk_mipi_tx_driver = {
.of_match_table = mtk_mipi_tx_match,
},
};
+
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
new file mode 100644
index 000000000000..413f35d86219
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Jitao Shi <jitao.shi@mediatek.com>
+ */
+
+#ifndef _MTK_MIPI_TX_H
+#define _MTK_MIPI_TX_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+struct mtk_mipitx_data {
+ const u32 mppll_preserve;
+ const struct clk_ops *mipi_tx_clk_ops;
+ void (*mipi_tx_enable_signal)(struct phy *phy);
+ void (*mipi_tx_disable_signal)(struct phy *phy);
+};
+
+struct mtk_mipi_tx {
+ struct device *dev;
+ void __iomem *regs;
+ u32 data_rate;
+ const struct mtk_mipitx_data *driver_data;
+ struct clk_hw pll_hw;
+ struct clk *pll;
+};
+
+struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask,
+ u32 data);
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+extern const struct mtk_mipitx_data mt2701_mipitx_data;
+extern const struct mtk_mipitx_data mt8173_mipitx_data;
+extern const struct mtk_mipitx_data mt8183_mipitx_data;
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
new file mode 100644
index 000000000000..f18db14d8b63
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_DSI_CON 0x00
+#define RG_DSI_LDOCORE_EN BIT(0)
+#define RG_DSI_CKG_LDOOUT_EN BIT(1)
+#define RG_DSI_BCLK_SEL (3 << 2)
+#define RG_DSI_LD_IDX_SEL (7 << 4)
+#define RG_DSI_PHYCLK_SEL (2 << 8)
+#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
+#define RG_DSI_LPTX_CLMP_EN BIT(11)
+
+#define MIPITX_DSI_CLOCK_LANE 0x04
+#define MIPITX_DSI_DATA_LANE0 0x08
+#define MIPITX_DSI_DATA_LANE1 0x0c
+#define MIPITX_DSI_DATA_LANE2 0x10
+#define MIPITX_DSI_DATA_LANE3 0x14
+#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
+#define RG_DSI_LNTx_CKLANE_EN BIT(1)
+#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
+#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
+#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
+#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
+#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
+#define RG_DSI_LNTx_RT_CODE (0xf << 8)
+
+#define MIPITX_DSI_TOP_CON 0x40
+#define RG_DSI_LNT_INTR_EN BIT(0)
+#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
+#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
+#define RG_DSI_LNT_TESTMODE_EN BIT(3)
+#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
+#define RG_DSI_LNT_AIO_SEL (7 << 8)
+#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
+#define RG_DSI_DEBUG_INPUT_EN BIT(12)
+#define RG_DSI_PRESERVE (7 << 13)
+
+#define MIPITX_DSI_BG_CON 0x44
+#define RG_DSI_BG_CORE_EN BIT(0)
+#define RG_DSI_BG_CKEN BIT(1)
+#define RG_DSI_BG_DIV (0x3 << 2)
+#define RG_DSI_BG_FAST_CHARGE BIT(4)
+#define RG_DSI_VOUT_MSK (0x3ffff << 5)
+#define RG_DSI_V12_SEL (7 << 5)
+#define RG_DSI_V10_SEL (7 << 8)
+#define RG_DSI_V072_SEL (7 << 11)
+#define RG_DSI_V04_SEL (7 << 14)
+#define RG_DSI_V032_SEL (7 << 17)
+#define RG_DSI_V02_SEL (7 << 20)
+#define RG_DSI_BG_R1_TRIM (0xf << 24)
+#define RG_DSI_BG_R2_TRIM (0xf << 28)
+
+#define MIPITX_DSI_PLL_CON0 0x50
+#define RG_DSI_MPPLL_PLL_EN BIT(0)
+#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
+#define RG_DSI_MPPLL_PREDIV (3 << 1)
+#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
+#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
+#define RG_DSI_MPPLL_POSDIV (7 << 7)
+#define RG_DSI_MPPLL_MONVC_EN BIT(10)
+#define RG_DSI_MPPLL_MONREF_EN BIT(11)
+#define RG_DSI_MPPLL_VOD_EN BIT(12)
+
+#define MIPITX_DSI_PLL_CON1 0x54
+#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
+#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
+#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
+#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
+
+#define MIPITX_DSI_PLL_CON2 0x58
+
+#define MIPITX_DSI_PLL_TOP 0x64
+#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+
+#define MIPITX_DSI_PLL_PWR 0x68
+#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
+#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
+#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
+
+#define MIPITX_DSI_SW_CTRL 0x80
+#define SW_CTRL_EN BIT(0)
+
+#define MIPITX_DSI_SW_CTRL_CON0 0x84
+#define SW_LNTC_LPTX_PRE_OE BIT(0)
+#define SW_LNTC_LPTX_OE BIT(1)
+#define SW_LNTC_LPTX_P BIT(2)
+#define SW_LNTC_LPTX_N BIT(3)
+#define SW_LNTC_HSTX_PRE_OE BIT(4)
+#define SW_LNTC_HSTX_OE BIT(5)
+#define SW_LNTC_HSTX_ZEROCLK BIT(6)
+#define SW_LNT0_LPTX_PRE_OE BIT(7)
+#define SW_LNT0_LPTX_OE BIT(8)
+#define SW_LNT0_LPTX_P BIT(9)
+#define SW_LNT0_LPTX_N BIT(10)
+#define SW_LNT0_HSTX_PRE_OE BIT(11)
+#define SW_LNT0_HSTX_OE BIT(12)
+#define SW_LNT0_LPRX_EN BIT(13)
+#define SW_LNT1_LPTX_PRE_OE BIT(14)
+#define SW_LNT1_LPTX_OE BIT(15)
+#define SW_LNT1_LPTX_P BIT(16)
+#define SW_LNT1_LPTX_N BIT(17)
+#define SW_LNT1_HSTX_PRE_OE BIT(18)
+#define SW_LNT1_HSTX_OE BIT(19)
+#define SW_LNT2_LPTX_PRE_OE BIT(20)
+#define SW_LNT2_LPTX_OE BIT(21)
+#define SW_LNT2_LPTX_P BIT(22)
+#define SW_LNT2_LPTX_N BIT(23)
+#define SW_LNT2_HSTX_PRE_OE BIT(24)
+#define SW_LNT2_HSTX_OE BIT(25)
+
+static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ u8 txdiv, txdiv0, txdiv1;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 250000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate > 62000000) {
+ txdiv = 8;
+ txdiv0 = 2;
+ txdiv1 = 1;
+ } else if (mipi_tx->data_rate >= 50000000) {
+ txdiv = 16;
+ txdiv0 = 2;
+ txdiv1 = 2;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_VOUT_MSK |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
+ (4 << 20) | (4 << 17) | (4 << 14) |
+ (4 << 11) | (4 << 8) | (4 << 5) |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ usleep_range(30, 100);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+ (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_PWR_ON |
+ RG_DSI_MPPLL_SDM_ISO_EN,
+ RG_DSI_MPPLL_SDM_PWR_ON);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+ RG_DSI_MPPLL_PREDIV,
+ (txdiv0 << 3) | (txdiv1 << 5));
+
+ /*
+ * PLL PCW config
+ * PCW bit 24~30 = integer part of pcw
+ * PCW bit 0~23 = fractional part of pcw
+ * pcw = data_Rate*4*txdiv/(Ref_clk*2);
+ * Post DIV =4, so need data_Rate*4
+ * Ref_clk is 26MHz
+ */
+ pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
+ 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_FRA_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+
+ usleep_range(20, 100);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_SSC_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ dev_dbg(mipi_tx->dev, "unprepare\n");
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE, 0);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_ISO_EN |
+ RG_DSI_MPPLL_SDM_PWR_ON,
+ RG_DSI_MPPLL_SDM_ISO_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_DIV_MSK);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1250000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .prepare = mtk_mipi_tx_pll_prepare,
+ .unprepare = mtk_mipi_tx_pll_unprepare,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+}
+
+const struct mtk_mipitx_data mt2701_mipitx_data = {
+ .mppll_preserve = (3 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
+
+const struct mtk_mipitx_data mt8173_mipitx_data = {
+ .mppll_preserve = (0 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
new file mode 100644
index 000000000000..91f08a351fd0
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_LANE_CON 0x000c
+#define RG_DSI_CPHY_T1DRV_EN BIT(0)
+#define RG_DSI_ANA_CK_SEL BIT(1)
+#define RG_DSI_PHY_CK_SEL BIT(2)
+#define RG_DSI_CPHY_EN BIT(3)
+#define RG_DSI_PHYCK_INV_EN BIT(4)
+#define RG_DSI_PWR04_EN BIT(5)
+#define RG_DSI_BG_LPF_EN BIT(6)
+#define RG_DSI_BG_CORE_EN BIT(7)
+#define RG_DSI_PAD_TIEL_SEL BIT(8)
+
+#define MIPITX_PLL_PWR 0x0028
+#define MIPITX_PLL_CON0 0x002c
+#define MIPITX_PLL_CON1 0x0030
+#define MIPITX_PLL_CON2 0x0034
+#define MIPITX_PLL_CON3 0x0038
+#define MIPITX_PLL_CON4 0x003c
+#define RG_DSI_PLL_IBIAS (3 << 10)
+
+#define MIPITX_D2_SW_CTL_EN 0x0144
+#define MIPITX_D0_SW_CTL_EN 0x0244
+#define MIPITX_CK_CKMODE_EN 0x0328
+#define DSI_CK_CKMODE_EN BIT(0)
+#define MIPITX_CK_SW_CTL_EN 0x0344
+#define MIPITX_D1_SW_CTL_EN 0x0444
+#define MIPITX_D3_SW_CTL_EN 0x0544
+#define DSI_SW_CTL_EN BIT(0)
+#define AD_DSI_PLL_SDM_PWR_ON BIT(0)
+#define AD_DSI_PLL_SDM_ISO_EN BIT(1)
+
+#define RG_DSI_PLL_EN BIT(4)
+#define RG_DSI_PLL_POSDIV (0x7 << 8)
+
+static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ unsigned int txdiv, txdiv0;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 2000000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ } else if (mipi_tx->data_rate >= 1000000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ } else if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ } else if (mipi_tx->data_rate > 250000000) {
+ txdiv = 8;
+ txdiv0 = 3;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 16;
+ txdiv0 = 4;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ udelay(1);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV,
+ txdiv0 << 8);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1600000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .enable = mtk_mipi_tx_pll_enable,
+ .disable = mtk_mipi_tx_pll_disable,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* BG_LPF_EN / BG_CORE_EN */
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ usleep_range(30, 100);
+ writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+
+ /* Switch OFF each Lane */
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* Switch ON each Lane */
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON);
+}
+
+const struct mtk_mipitx_data mt8183_mipitx_data = {
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index a24f8dec5adc..397c33182f4f 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -372,6 +372,33 @@ static const struct component_master_ops meson_drv_master_ops = {
.unbind = meson_drv_unbind,
};
+static int __maybe_unused meson_drv_pm_suspend(struct device *dev)
+{
+ struct meson_drm *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return 0;
+
+ return drm_mode_config_helper_suspend(priv->drm);
+}
+
+static int __maybe_unused meson_drv_pm_resume(struct device *dev)
+{
+ struct meson_drm *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return 0;
+
+ meson_vpu_init(priv);
+ meson_venc_init(priv);
+ meson_vpp_init(priv);
+ meson_viu_init(priv);
+
+ drm_mode_config_helper_resume(priv->drm);
+
+ return 0;
+}
+
static int compare_of(struct device *dev, void *data)
{
DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
@@ -467,11 +494,16 @@ static const struct of_device_id dt_match[] = {
};
MODULE_DEVICE_TABLE(of, dt_match);
+static const struct dev_pm_ops meson_drv_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_drv_pm_suspend, meson_drv_pm_resume)
+};
+
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,
+ .pm = &meson_drv_pm_ops,
},
};
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 68bbd987147b..3bb7ffe5fc39 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -802,6 +802,47 @@ static bool meson_hdmi_connector_is_available(struct device *dev)
return false;
}
+static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
+{
+ struct meson_drm *priv = meson_dw_hdmi->priv;
+
+ /* Enable clocks */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
+
+ /* Bring HDMITX MEM output of power down */
+ regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
+
+ /* Reset HDMITX APB & TX & PHY */
+ reset_control_reset(meson_dw_hdmi->hdmitx_apb);
+ reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
+ reset_control_reset(meson_dw_hdmi->hdmitx_phy);
+
+ /* Enable APB3 fail on error */
+ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ }
+
+ /* Bring out of reset */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
+
+ msleep(20);
+
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_CLK_CNTL, 0xff);
+
+ /* Enable HDMI-TX Interrupt */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_CORE);
+
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
+ HDMITX_TOP_INTR_CORE);
+
+}
+
static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -925,40 +966,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
DRM_DEBUG_DRIVER("encoder initialized\n");
- /* Enable clocks */
- regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
-
- /* Bring HDMITX MEM output of power down */
- regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
-
- /* Reset HDMITX APB & TX & PHY */
- reset_control_reset(meson_dw_hdmi->hdmitx_apb);
- reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
- reset_control_reset(meson_dw_hdmi->hdmitx_phy);
-
- /* Enable APB3 fail on error */
- if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
- }
-
- /* Bring out of reset */
- meson_dw_hdmi->data->top_write(meson_dw_hdmi,
- HDMITX_TOP_SW_RESET, 0);
-
- msleep(20);
-
- meson_dw_hdmi->data->top_write(meson_dw_hdmi,
- HDMITX_TOP_CLK_CNTL, 0xff);
-
- /* Enable HDMI-TX Interrupt */
- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
- HDMITX_TOP_INTR_CORE);
-
- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi_init(meson_dw_hdmi);
/* Bridge / Connector */
@@ -969,6 +977,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+ if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
+ dw_plat_data->use_drm_infoframe = true;
+
platform_set_drvdata(pdev, meson_dw_hdmi);
meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
@@ -994,6 +1007,34 @@ static const struct component_ops meson_dw_hdmi_ops = {
.unbind = meson_dw_hdmi_unbind,
};
+static int __maybe_unused meson_dw_hdmi_pm_suspend(struct device *dev)
+{
+ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
+
+ if (!meson_dw_hdmi)
+ return 0;
+
+ /* Reset TOP */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
+
+ return 0;
+}
+
+static int __maybe_unused meson_dw_hdmi_pm_resume(struct device *dev)
+{
+ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
+
+ if (!meson_dw_hdmi)
+ return 0;
+
+ meson_dw_hdmi_init(meson_dw_hdmi);
+
+ dw_hdmi_resume(meson_dw_hdmi->hdmi);
+
+ return 0;
+}
+
static int meson_dw_hdmi_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &meson_dw_hdmi_ops);
@@ -1006,6 +1047,11 @@ static int meson_dw_hdmi_remove(struct platform_device *pdev)
return 0;
}
+static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_dw_hdmi_pm_suspend,
+ meson_dw_hdmi_pm_resume)
+};
+
static const struct of_device_id meson_dw_hdmi_of_table[] = {
{ .compatible = "amlogic,meson-gxbb-dw-hdmi",
.data = &meson_dw_hdmi_gx_data },
@@ -1025,6 +1071,7 @@ static struct platform_driver meson_dw_hdmi_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_hdmi_of_table,
+ .pm = &meson_dw_hdmi_pm_ops,
},
};
module_platform_driver(meson_dw_hdmi_platform_driver);
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index ac491a781952..f690793ae2d5 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -638,13 +638,18 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
if (frac >= HDMI_FRAC_MAX_GXBB)
return false;
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
- meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) ||
- meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
if (frac >= HDMI_FRAC_MAX_GXL)
return false;
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ /* Empiric supported min/max dividers */
+ if (m < 106 || m > 247)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_G12A)
+ return false;
}
return true;
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 76fee0fbdcae..aed11f4f4c55 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -4,6 +4,8 @@ config DRM_MGAG200
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
This is a KMS driver for the MGA G200 server chips, it
does not support the original MGA G200 or any of the desktop
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 289ce3e29032..79711dbb5b03 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -12,35 +12,10 @@
static bool warn_transparent = true;
static bool warn_palette = true;
-/*
- Hide the cursor off screen. We can't disable the cursor hardware because it
- takes too long to re-activate and causes momentary corruption
-*/
-static void mga_hide_cursor(struct mga_device *mdev)
-{
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- if (mdev->cursor.pixels_current)
- drm_gem_vram_unpin(mdev->cursor.pixels_current);
- mdev->cursor.pixels_current = NULL;
-}
-
-int mga_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height)
+static int mgag200_cursor_update(struct mga_device *mdev, void *dst, void *src,
+ unsigned int width, unsigned int height)
{
- struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = (struct mga_device *)dev->dev_private;
- struct drm_gem_vram_object *pixels_1 = mdev->cursor.pixels_1;
- struct drm_gem_vram_object *pixels_2 = mdev->cursor.pixels_2;
- struct drm_gem_vram_object *pixels_current = mdev->cursor.pixels_current;
- struct drm_gem_vram_object *pixels_next;
- struct drm_gem_object *obj;
- struct drm_gem_vram_object *gbo = NULL;
- int ret = 0;
- u8 *src, *dst;
+ struct drm_device *dev = mdev->dev;
unsigned int i, row, col;
uint32_t colour_set[16];
uint32_t *next_space = &colour_set[0];
@@ -48,79 +23,9 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t this_colour;
bool found = false;
int colour_count = 0;
- s64 gpu_addr;
- u64 dst_gpu;
u8 reg_index;
u8 this_row[48];
- if (!pixels_1 || !pixels_2) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -ENOTSUPP; /* Didn't allocate space for cursors */
- }
-
- if (WARN_ON(pixels_current &&
- pixels_1 != pixels_current &&
- pixels_2 != pixels_current)) {
- return -ENOTSUPP; /* inconsistent state */
- }
-
- if (!handle || !file_priv) {
- mga_hide_cursor(mdev);
- return 0;
- }
-
- if (width != 64 || height != 64) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -EINVAL;
- }
-
- if (pixels_current == pixels_1)
- pixels_next = pixels_2;
- else
- pixels_next = pixels_1;
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj)
- return -ENOENT;
- gbo = drm_gem_vram_of_gem(obj);
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret) {
- dev_err(&dev->pdev->dev, "failed to lock user bo\n");
- goto err_drm_gem_object_put_unlocked;
- }
- src = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(src)) {
- ret = PTR_ERR(src);
- dev_err(&dev->pdev->dev,
- "failed to kmap user buffer updates\n");
- goto err_drm_gem_vram_unpin_src;
- }
-
- /* Pin and map up-coming buffer to write colour indices */
- ret = drm_gem_vram_pin(pixels_next, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- dev_err(&dev->pdev->dev,
- "failed to pin cursor buffer: %d\n", ret);
- goto err_drm_gem_vram_kunmap_src;
- }
- dst = drm_gem_vram_kmap(pixels_next, true, NULL);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- dev_err(&dev->pdev->dev,
- "failed to kmap cursor updates: %d\n", ret);
- goto err_drm_gem_vram_unpin_dst;
- }
- gpu_addr = drm_gem_vram_offset(pixels_next);
- if (gpu_addr < 0) {
- ret = (int)gpu_addr;
- dev_err(&dev->pdev->dev,
- "failed to get cursor scanout address: %d\n", ret);
- goto err_drm_gem_vram_kunmap_dst;
- }
- dst_gpu = (u64)gpu_addr;
-
memset(&colour_set[0], 0, sizeof(uint32_t)*16);
/* width*height*4 = 16384 */
for (i = 0; i < 16384; i += 4) {
@@ -133,8 +38,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
warn_transparent = false; /* Only tell the user once. */
}
- ret = -EINVAL;
- goto err_drm_gem_vram_kunmap_dst;
+ return -EINVAL;
}
/* Don't need to store transparent pixels as colours */
if (this_colour>>24 == 0x0)
@@ -155,8 +59,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
warn_palette = false; /* Only tell the user once. */
}
- ret = -EINVAL;
- goto err_drm_gem_vram_kunmap_dst;
+ return -EINVAL;
}
*next_space = this_colour;
next_space++;
@@ -200,54 +103,218 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
memcpy_toio(dst + row*48, &this_row[0], 48);
}
+ return 0;
+}
+
+static void mgag200_cursor_set_base(struct mga_device *mdev, u64 address)
+{
+ u8 addrl = (address >> 10) & 0xff;
+ u8 addrh = (address >> 18) & 0x3f;
+
/* Program gpu address of cursor buffer */
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((dst_gpu>>10) & 0xff));
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((dst_gpu>>18) & 0x3f));
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, addrl);
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, addrh);
+}
+
+static int mgag200_show_cursor(struct mga_device *mdev, void *src,
+ unsigned int width, unsigned int height)
+{
+ struct drm_device *dev = mdev->dev;
+ struct drm_gem_vram_object *gbo;
+ void *dst;
+ s64 off;
+ int ret;
+
+ gbo = mdev->cursor.gbo[mdev->cursor.next_index];
+ if (!gbo) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -ENOTSUPP; /* Didn't allocate space for cursors */
+ }
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ dev_err(&dev->pdev->dev,
+ "failed to map cursor updates: %d\n", ret);
+ return ret;
+ }
+ off = drm_gem_vram_offset(gbo);
+ if (off < 0) {
+ ret = (int)off;
+ dev_err(&dev->pdev->dev,
+ "failed to get cursor scanout address: %d\n", ret);
+ goto err_drm_gem_vram_vunmap;
+ }
+
+ ret = mgag200_cursor_update(mdev, dst, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ mgag200_cursor_set_base(mdev, off);
/* Adjust cursor control register to turn on the cursor */
WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */
- /* Now update internal buffer pointers */
- if (pixels_current)
- drm_gem_vram_unpin(pixels_current);
- mdev->cursor.pixels_current = pixels_next;
+ drm_gem_vram_vunmap(gbo, dst);
- drm_gem_vram_kunmap(pixels_next);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
- drm_gem_object_put_unlocked(obj);
+ ++mdev->cursor.next_index;
+ mdev->cursor.next_index %= ARRAY_SIZE(mdev->cursor.gbo);
+
+ return 0;
+
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, dst);
+ return ret;
+}
+
+/*
+ * Hide the cursor off screen. We can't disable the cursor hardware because
+ * it takes too long to re-activate and causes momentary corruption.
+ */
+static void mgag200_hide_cursor(struct mga_device *mdev)
+{
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+}
+
+static void mgag200_move_cursor(struct mga_device *mdev, int x, int y)
+{
+ if (WARN_ON(x <= 0))
+ return;
+ if (WARN_ON(y <= 0))
+ return;
+ if (WARN_ON(x & ~0xffff))
+ return;
+ if (WARN_ON(y & ~0xffff))
+ return;
+
+ WREG8(MGA_CURPOSXL, x & 0xff);
+ WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
+
+ WREG8(MGA_CURPOSYL, y & 0xff);
+ WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
+}
+
+int mgag200_cursor_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = mdev->dev;
+ size_t ncursors = ARRAY_SIZE(mdev->cursor.gbo);
+ size_t size;
+ int ret;
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ size = roundup(64 * 48, PAGE_SIZE);
+ if (size * ncursors > mdev->vram_fb_available)
+ return -ENOMEM;
+
+ for (i = 0; i < ncursors; ++i) {
+ gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
+ size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
+ goto err_drm_gem_vram_put;
+ }
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
+ if (ret) {
+ drm_gem_vram_put(gbo);
+ goto err_drm_gem_vram_put;
+ }
+
+ mdev->cursor.gbo[i] = gbo;
+ }
+
+ /*
+ * At the high end of video memory, we reserve space for
+ * buffer objects. The cursor plane uses this memory to store
+ * a double-buffered image of the current cursor. Hence, it's
+ * not available for framebuffers.
+ */
+ mdev->vram_fb_available -= ncursors * size;
return 0;
-err_drm_gem_vram_kunmap_dst:
- drm_gem_vram_kunmap(pixels_next);
-err_drm_gem_vram_unpin_dst:
- drm_gem_vram_unpin(pixels_next);
-err_drm_gem_vram_kunmap_src:
- drm_gem_vram_kunmap(gbo);
-err_drm_gem_vram_unpin_src:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_put:
+ while (i) {
+ --i;
+ gbo = mdev->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ mdev->cursor.gbo[i] = NULL;
+ }
+ return ret;
+}
+
+void mgag200_cursor_fini(struct mga_device *mdev)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ for (i = 0; i < ARRAY_SIZE(mdev->cursor.gbo); ++i) {
+ gbo = mdev->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ }
+}
+
+int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_gem_vram_object *gbo = NULL;
+ int ret;
+ u8 *src;
+
+ if (!handle || !file_priv) {
+ mgag200_hide_cursor(mdev);
+ return 0;
+ }
+
+ if (width != 64 || height != 64) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+ gbo = drm_gem_vram_of_gem(obj);
+ src = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ dev_err(&dev->pdev->dev,
+ "failed to map user buffer updates\n");
+ goto err_drm_gem_object_put_unlocked;
+ }
+
+ ret = mgag200_show_cursor(mdev, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+
+ /* Now update internal buffer pointers */
+ drm_gem_vram_vunmap(gbo, src);
+ drm_gem_object_put_unlocked(obj);
+
+ return 0;
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, src);
err_drm_gem_object_put_unlocked:
drm_gem_object_put_unlocked(obj);
return ret;
}
-int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+
/* Our origin is at (64,64) */
x += 64;
y += 64;
- BUG_ON(x <= 0);
- BUG_ON(y <= 0);
- BUG_ON(x & ~0xffff);
- BUG_ON(y & ~0xffff);
+ mgag200_move_cursor(mdev, x, y);
- WREG8(MGA_CURPOSXL, x & 0xff);
- WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
-
- WREG8(MGA_CURPOSYL, y & 0xff);
- WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index afd9119b6cf1..397f8b0a9af8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "mgag200drmfb");
+ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -58,10 +58,7 @@ static void mga_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static const struct file_operations mgag200_driver_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 1c93f8dc08c7..0ea9a525e57d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -19,7 +19,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "mgag200_reg.h"
@@ -130,16 +129,8 @@ struct mga_connector {
};
struct mga_cursor {
- /*
- We have to have 2 buffers for the cursor to avoid occasional
- corruption while switching cursor icons.
- If either of these is NULL, then don't do hardware cursors, and
- fall back to software.
- */
- struct drm_gem_vram_object *pixels_1;
- struct drm_gem_vram_object *pixels_2;
- /* The currently displayed icon, this points to one of pixels_1, or pixels_2 */
- struct drm_gem_vram_object *pixels_current;
+ struct drm_gem_vram_object *gbo[2];
+ unsigned int next_index;
};
struct mga_mc {
@@ -174,6 +165,8 @@ struct mga_device {
struct mga_cursor cursor;
+ size_t vram_fb_available;
+
bool suspended;
int num_crtc;
enum mga_type type;
@@ -204,8 +197,10 @@ int mgag200_mm_init(struct mga_device *mdev);
void mgag200_mm_fini(struct mga_device *mdev);
int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
-int mga_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height);
-int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+int mgag200_cursor_init(struct mga_device *mdev);
+void mgag200_cursor_fini(struct mga_device *mdev);
+int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index a9773334dedf..5f74aabcd3df 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -159,7 +159,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&mga_mode_funcs;
- if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
dev->mode_config.preferred_depth = 16;
else
dev->mode_config.preferred_depth = 32;
@@ -171,20 +171,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
goto err_modeset;
}
- /* Make small buffers to store a hardware cursor (double buffered icon updates) */
- mdev->cursor.pixels_1 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- roundup(48*64, PAGE_SIZE),
- 0, 0);
- mdev->cursor.pixels_2 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- roundup(48*64, PAGE_SIZE),
- 0, 0);
- if (IS_ERR(mdev->cursor.pixels_2) || IS_ERR(mdev->cursor.pixels_1)) {
- mdev->cursor.pixels_1 = NULL;
- mdev->cursor.pixels_2 = NULL;
+ r = mgag200_cursor_init(mdev);
+ if (r)
dev_warn(&dev->pdev->dev,
- "Could not allocate space for cursors. Not doing hardware cursors.\n");
- }
- mdev->cursor.pixels_current = NULL;
+ "Could not initialize cursors. Not doing hardware cursors.\n");
r = drm_fbdev_generic_setup(mdev->dev, 0);
if (r)
@@ -194,6 +184,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
err_modeset:
drm_mode_config_cleanup(dev);
+ mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
err_mm:
dev->dev_private = NULL;
@@ -209,6 +200,7 @@ void mgag200_driver_unload(struct drm_device *dev)
return;
mgag200_modeset_fini(mdev);
drm_mode_config_cleanup(dev);
+ mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
dev->dev_private = NULL;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 5e778b5f1a10..5ec697148fc1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1413,8 +1413,8 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs mga_crtc_funcs = {
- .cursor_set = mga_crtc_cursor_set,
- .cursor_move = mga_crtc_cursor_move,
+ .cursor_set = mgag200_crtc_cursor_set,
+ .cursor_move = mgag200_crtc_cursor_move,
.gamma_set = mga_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = mga_crtc_destroy,
@@ -1629,7 +1629,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
bpp = connector->cmdline_mode.bpp;
}
- if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+ if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->vram_fb_available) {
if (connector->cmdline_mode.specified)
connector->cmdline_mode.specified = false;
return MODE_BAD;
@@ -1638,16 +1638,6 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
- *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
static void mga_connector_destroy(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1659,7 +1649,6 @@ static void mga_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
.get_modes = mga_vga_get_modes,
.mode_valid = mga_vga_mode_valid,
- .best_encoder = mga_connector_best_encoder,
};
static const struct drm_connector_funcs mga_vga_connector_funcs = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 73a6b848601c..99997d737362 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -37,8 +37,7 @@ int mgag200_mm_init(struct mga_device *mdev)
struct drm_device *dev = mdev->dev;
vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
- mdev->mc.vram_size,
- &drm_gem_vram_mm_funcs);
+ mdev->mc.vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
@@ -51,6 +50,8 @@ int mgag200_mm_init(struct mga_device *mdev)
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
+ mdev->vram_fb_available = mdev->mc.vram_size;
+
return 0;
}
@@ -58,6 +59,8 @@ void mgag200_mm_fini(struct mga_device *mdev)
{
struct drm_device *dev = mdev->dev;
+ mdev->vram_fb_available = 0;
+
drm_vram_helper_release_mm(dev);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index e686331fa089..691c1a277d91 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -352,26 +352,26 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
cxdbg = ioremap(res->start, resource_size(res));
if (cxdbg) {
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
0x76543210);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
0xFEDCBA98);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
}
a6xx_state->debugbus = state_kcalloc(a6xx_state,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 4c889aabdaf9..959d03e007fa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -31,7 +31,7 @@
*/
#define DPU_DEBUG(fmt, ...) \
do { \
- if (unlikely(drm_debug & DRM_UT_KMS)) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
DRM_DEBUG(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
@@ -43,7 +43,7 @@
*/
#define DPU_DEBUG_DRIVER(fmt, ...) \
do { \
- if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ if (drm_debug_enabled(DRM_UT_DRIVER)) \
DRM_ERROR(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 0da8a4e428ad..eff1a4c61258 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -9,6 +9,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0f312ac5b624..ad4e963ccd9b 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,7 +178,9 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
- encoder->bridge = edp->bridge;
+ ret = drm_bridge_attach(encoder, edp->bridge, NULL);
+ if (ret)
+ goto fail;
priv->bridges[priv->num_bridges++] = edp->bridge;
priv->connectors[priv->num_connectors++] = edp->connector;
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
index f2c17858a703..eb34243dad53 100644
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ b/drivers/gpu/drm/msm/edp/edp.h
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7f3dd3ffe2c9..0d9657cc70db 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -89,7 +89,6 @@ struct edp_ctrl {
/* edid raw data */
struct edid *edid;
- struct drm_dp_link dp_link;
struct drm_dp_aux *drm_aux;
/* dpcd raw data */
@@ -403,7 +402,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
u32 prate;
u32 lrate;
u32 bpp;
- u8 max_lane = ctrl->dp_link.num_lanes;
+ u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
u8 lane;
prate = ctrl->pixel_rate;
@@ -413,7 +412,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
* By default, use the maximum link rate and minimum lane count,
* so that we can do rate down shift during link training.
*/
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
prate *= bpp;
prate /= 8; /* in kByte */
@@ -439,7 +438,7 @@ static void edp_config_ctrl(struct edp_ctrl *ctrl)
data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
- if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
depth = EDP_6BIT;
@@ -701,7 +700,7 @@ static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
rate = ctrl->link_rate;
lane = ctrl->lane_cnt;
- max_lane = ctrl->dp_link.num_lanes;
+ max_lane = drm_dp_max_lane_count(ctrl->dpcd);
bpp = ctrl->color_depth * 3;
prate = ctrl->pixel_rate;
@@ -751,18 +750,22 @@ static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
static int edp_do_link_train(struct edp_ctrl *ctrl)
{
+ u8 values[2];
int ret;
- struct drm_dp_link dp_link;
DBG("");
/*
* Set the current link rate and lane cnt to panel. They may have been
* adjusted and the values are different from them in DPCD CAP
*/
- dp_link.num_lanes = ctrl->lane_cnt;
- dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate);
- dp_link.capabilities = ctrl->dp_link.capabilities;
- if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0)
+ values[0] = ctrl->lane_cnt;
+ values[1] = ctrl->link_rate;
+
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
+ sizeof(values)) < 0)
return EDP_TRAIN_FAIL;
ctrl->v_level = 0; /* start from default level */
@@ -952,6 +955,7 @@ static void edp_ctrl_on_worker(struct work_struct *work)
{
struct edp_ctrl *ctrl = container_of(
work, struct edp_ctrl, on_work);
+ u8 value;
int ret;
mutex_lock(&ctrl->dev_mutex);
@@ -965,9 +969,27 @@ static void edp_ctrl_on_worker(struct work_struct *work)
edp_ctrl_link_enable(ctrl, 1);
edp_ctrl_irq_enable(ctrl, 1);
- ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link);
- if (ret)
- goto fail;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret < 0)
+ goto fail;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+ }
ctrl->power_on = true;
@@ -1011,7 +1033,19 @@ static void edp_ctrl_off_worker(struct work_struct *work)
edp_state_ctrl(ctrl, 0);
- drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link);
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ u8 value;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret > 0) {
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ }
+ }
edp_ctrl_irq_enable(ctrl, 0);
@@ -1225,14 +1259,8 @@ int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
edp_ctrl_irq_enable(ctrl, 1);
}
- ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link);
- if (ret) {
- pr_err("%s: read dpcd cap failed, %d\n", __func__, ret);
- goto disable_ret;
- }
-
/* Initialize link rate as panel max link rate */
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
if (!ctrl->edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 355afb936401..1a9b6289637d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,7 +327,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- encoder->bridge = hdmi->bridge;
+ ret = drm_bridge_attach(encoder, hdmi->bridge, NULL);
+ if (ret)
+ goto fail;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index bdac452b00fb..d0b84f0abee1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -14,6 +14,8 @@
#include <linux/gpio/consumer.h>
#include <linux/hdmi.h>
+#include <drm/drm_bridge.h>
+
#include "msm_drv.h"
#include "hdmi.xml.h"
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 6be879578140..1c74381a4fc9 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
struct msm_gpu_show_priv *show_priv = m->private;
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
- int ret;
-
- ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&show_priv->dev->struct_mutex);
gpu->funcs->gpu_state_put(show_priv->state);
mutex_unlock(&show_priv->dev->struct_mutex);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 12421567af89..b69ace8bf526 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -95,8 +95,11 @@ static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
reg = readl(mxsfb->base + LCDC_CTRL);
- if (mxsfb->connector.display_info.num_bus_formats)
- bus_format = mxsfb->connector.display_info.bus_formats[0];
+ if (mxsfb->connector->display_info.num_bus_formats)
+ bus_format = mxsfb->connector->display_info.bus_formats[0];
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n",
+ bus_format);
reg &= ~CTRL_BUS_WIDTH_MASK;
switch (bus_format) {
@@ -204,8 +207,9 @@ static dma_addr_t mxsfb_get_fb_paddr(struct mxsfb_drm_private *mxsfb)
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{
+ struct drm_device *drm = mxsfb->pipe.crtc.dev;
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
- const u32 bus_flags = mxsfb->connector.display_info.bus_flags;
+ u32 bus_flags = mxsfb->connector->display_info.bus_flags;
u32 vdctrl0, vsync_pulse_len, hsync_pulse_len;
int err;
@@ -229,6 +233,16 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
clk_set_rate(mxsfb->clk, m->crtc_clock * 1000);
+ if (mxsfb->bridge && mxsfb->bridge->timings)
+ bus_flags = mxsfb->bridge->timings->input_bus_flags;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
+ m->crtc_clock,
+ (int)(clk_get_rate(mxsfb->clk) / 1000));
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
+ bus_flags);
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
+
writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) |
TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay),
mxsfb->base + mxsfb->devdata->transfer_count);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index e8506335cd15..497cf443a9af 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -101,9 +101,25 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
+ struct drm_connector *connector;
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
struct drm_device *drm = pipe->plane.dev;
+ if (!mxsfb->connector) {
+ list_for_each_entry(connector,
+ &drm->mode_config.connector_list,
+ head)
+ if (connector->encoder == &mxsfb->pipe.encoder) {
+ mxsfb->connector = connector;
+ break;
+ }
+ }
+
+ if (!mxsfb->connector) {
+ dev_warn(drm->dev, "No connector attached, using default\n");
+ mxsfb->connector = &mxsfb->panel_connector;
+ }
+
pm_runtime_get_sync(drm->dev);
drm_panel_prepare(mxsfb->panel);
mxsfb_crtc_enable(mxsfb);
@@ -129,6 +145,9 @@ static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irq(&drm->event_lock);
+
+ if (mxsfb->connector != &mxsfb->panel_connector)
+ mxsfb->connector = NULL;
}
static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -226,16 +245,33 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
mxsfb_formats, ARRAY_SIZE(mxsfb_formats), NULL,
- &mxsfb->connector);
+ mxsfb->connector);
if (ret < 0) {
dev_err(drm->dev, "Cannot setup simple display pipe\n");
goto err_vblank;
}
- ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector);
- if (ret) {
- dev_err(drm->dev, "Cannot connect panel\n");
- goto err_vblank;
+ /*
+ * Attach panel only if there is one.
+ * If there is no panel attach, it must be a bridge. In this case, we
+ * need a reference to its connector for a proper initialization.
+ * We will do this check in pipe->enable(), since the connector won't
+ * be attached to an encoder until then.
+ */
+
+ if (mxsfb->panel) {
+ ret = drm_panel_attach(mxsfb->panel, mxsfb->connector);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect panel: %d\n", ret);
+ goto err_vblank;
+ }
+ } else if (mxsfb->bridge) {
+ ret = drm_simple_display_pipe_attach_bridge(&mxsfb->pipe,
+ mxsfb->bridge);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
+ goto err_vblank;
+ }
}
drm->mode_config.min_width = MXSFB_MIN_XRES;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
index d975300dca05..0b65b5194a9c 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
@@ -27,8 +27,10 @@ struct mxsfb_drm_private {
struct clk *clk_disp_axi;
struct drm_simple_display_pipe pipe;
- struct drm_connector connector;
+ struct drm_connector panel_connector;
+ struct drm_connector *connector;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
int mxsfb_setup_crtc(struct drm_device *dev);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index be36f4d6cc96..4eb94744c526 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -21,7 +21,8 @@
static struct mxsfb_drm_private *
drm_connector_to_mxsfb_drm_private(struct drm_connector *connector)
{
- return container_of(connector, struct mxsfb_drm_private, connector);
+ return container_of(connector, struct mxsfb_drm_private,
+ panel_connector);
}
static int mxsfb_panel_get_modes(struct drm_connector *connector)
@@ -76,22 +77,23 @@ static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
int mxsfb_create_output(struct drm_device *drm)
{
struct mxsfb_drm_private *mxsfb = drm->dev_private;
- struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, NULL);
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0,
+ &mxsfb->panel, &mxsfb->bridge);
if (ret)
return ret;
- mxsfb->connector.dpms = DRM_MODE_DPMS_OFF;
- mxsfb->connector.polled = 0;
- drm_connector_helper_add(&mxsfb->connector,
- &mxsfb_panel_connector_helper_funcs);
- ret = drm_connector_init(drm, &mxsfb->connector,
- &mxsfb_panel_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- if (!ret)
- mxsfb->panel = panel;
+ if (mxsfb->panel) {
+ mxsfb->connector = &mxsfb->panel_connector;
+ mxsfb->connector->dpms = DRM_MODE_DPMS_OFF;
+ mxsfb->connector->polled = 0;
+ drm_connector_helper_add(mxsfb->connector,
+ &mxsfb_panel_connector_helper_funcs);
+ ret = drm_connector_init(drm, mxsfb->connector,
+ &mxsfb_panel_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index dc64863b5fd8..44ee82d0c9b6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -256,7 +256,7 @@ nv04_display_create(struct drm_device *dev)
list_for_each_entry_safe(connector, ct,
&dev->mode_config.connector_list, head) {
- if (!connector->encoder_ids[0]) {
+ if (!connector->possible_encoders) {
NV_WARN(drm, "%s has no encoders, removing\n",
connector->name);
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index b46be8a091e9..549486f1d937 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -986,20 +986,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector,
return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
}
-static const struct drm_connector_helper_funcs
-nv50_mstc_help = {
- .get_modes = nv50_mstc_get_modes,
- .mode_valid = nv50_mstc_mode_valid,
- .best_encoder = nv50_mstc_best_encoder,
- .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
- .atomic_check = nv50_mstc_atomic_check,
-};
-
-static enum drm_connector_status
-nv50_mstc_detect(struct drm_connector *connector, bool force)
+static int
+nv50_mstc_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
- enum drm_connector_status conn_status;
int ret;
if (drm_connector_is_unregistered(connector))
@@ -1009,14 +1000,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
if (ret < 0 && ret != -EACCES)
return connector_status_disconnected;
- conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
- mstc->port);
+ ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
+ mstc->port);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
- return conn_status;
+ return ret;
}
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .best_encoder = nv50_mstc_best_encoder,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+ .atomic_check = nv50_mstc_atomic_check,
+ .detect_ctx = nv50_mstc_detect,
+};
+
static void
nv50_mstc_destroy(struct drm_connector *connector)
{
@@ -1031,7 +1032,6 @@ nv50_mstc_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs
nv50_mstc = {
.reset = nouveau_conn_reset,
- .detect = nv50_mstc_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
@@ -1309,14 +1309,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
}
static void
-nv50_mstm_init(struct nv50_mstm *mstm)
+nv50_mstm_init(struct nv50_mstm *mstm, bool runtime)
{
int ret;
if (!mstm || !mstm->mgr.mst_state)
return;
- ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
if (ret == -1) {
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
drm_kms_helper_hotplug_event(mstm->mgr.dev);
@@ -2263,7 +2263,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder);
- nv50_mstm_init(nv_encoder->dp.mstm);
+ nv50_mstm_init(nv_encoder->dp.mstm, runtime);
}
}
@@ -2392,7 +2392,7 @@ nv50_display_create(struct drm_device *dev)
/* cull any connectors we created that don't have an encoder */
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->encoder_ids[0])
+ if (connector->possible_encoders)
continue;
NV_WARN(drm, "%s has no encoders, removing\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 94dfa2e5a9ab..5b413588b823 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -365,9 +365,8 @@ find_encoder(struct drm_connector *connector, int type)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *enc;
- int i;
- drm_connector_for_each_possible_encoder(connector, enc, i) {
+ drm_connector_for_each_possible_encoder(connector, enc) {
nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
@@ -414,10 +413,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
- int i, ret;
+ int ret;
bool switcheroo_ddc = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
nv_encoder = nouveau_encoder(encoder);
switch (nv_encoder->dcb->type) {
@@ -1131,6 +1130,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
const char *name = connector->name;
struct nouveau_encoder *nv_encoder;
int ret;
+ bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
+
+ if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ NV_DEBUG(drm, "service %s\n", name);
+ drm_dp_cec_irq(&nv_connector->aux);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+ nv50_mstm_service(nv_encoder->dp.mstm);
+
+ return NVIF_NOTIFY_KEEP;
+ }
ret = pm_runtime_get(drm->dev->dev);
if (ret == 0) {
@@ -1151,25 +1160,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
return NVIF_NOTIFY_DROP;
}
- if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
- NV_DEBUG(drm, "service %s\n", name);
- drm_dp_cec_irq(&nv_connector->aux);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
- nv50_mstm_service(nv_encoder->dp.mstm);
- } else {
- bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
-
+ if (!plugged)
+ drm_dp_cec_unset_edid(&nv_connector->aux);
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
if (!plugged)
- drm_dp_cec_unset_edid(&nv_connector->aux);
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
- if (!plugged)
- nv50_mstm_remove(nv_encoder->dp.mstm);
- }
-
- drm_helper_hpd_irq_event(connector->dev);
+ nv50_mstm_remove(nv_encoder->dp.mstm);
}
+ drm_helper_hpd_irq_event(connector->dev);
+
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_autosuspend(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
@@ -1415,8 +1415,7 @@ nouveau_connector_create(struct drm_device *dev,
switch (type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- drm_dp_cec_register_connector(&nv_connector->aux,
- connector->name, dev->dev);
+ drm_dp_cec_register_connector(&nv_connector->aux, connector);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6f038511a03a..53f9bceaf17a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
struct drm_connector_list_iter conn_iter;
int ret;
+ /*
+ * Enable hotplug interrupts (done as early as possible, since we need
+ * them for MST)
+ */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nvif_notify_get(&conn->hpd);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
ret = disp->init(dev, resume, runtime);
if (ret)
return ret;
@@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
*/
drm_kms_helper_poll_enable(dev);
- /* enable hotplug interrupts */
- drm_connector_list_iter_begin(dev, &conn_iter);
- nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
- struct nouveau_connector *conn = nouveau_connector(connector);
- nvif_notify_get(&conn->hpd);
- }
- drm_connector_list_iter_end(&conn_iter);
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 668d4bd0c118..df9bf1fd1bc0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -88,6 +88,7 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
}
struct nouveau_svmm {
+ struct mmu_notifier notifier;
struct nouveau_vmm *vmm;
struct {
unsigned long start;
@@ -95,9 +96,6 @@ struct nouveau_svmm {
} unmanaged;
struct mutex mutex;
-
- struct mm_struct *mm;
- struct hmm_mirror mirror;
};
#define SVMM_DBG(s,f,a...) \
@@ -251,10 +249,11 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
}
static int
-nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update)
+nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *update)
{
- struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
+ struct nouveau_svmm *svmm =
+ container_of(mn, struct nouveau_svmm, notifier);
unsigned long start = update->start;
unsigned long limit = update->end;
@@ -264,6 +263,9 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
mutex_lock(&svmm->mutex);
+ if (unlikely(!svmm->vmm))
+ goto out;
+
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
if (start < svmm->unmanaged.start) {
nouveau_svmm_invalidate(svmm, start,
@@ -273,19 +275,20 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
}
nouveau_svmm_invalidate(svmm, start, limit);
+
+out:
mutex_unlock(&svmm->mutex);
return 0;
}
-static void
-nouveau_svmm_release(struct hmm_mirror *mirror)
+static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
{
+ kfree(container_of(mn, struct nouveau_svmm, notifier));
}
-static const struct hmm_mirror_ops
-nouveau_svmm = {
- .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
- .release = nouveau_svmm_release,
+static const struct mmu_notifier_ops nouveau_mn_ops = {
+ .invalidate_range_start = nouveau_svmm_invalidate_range_start,
+ .free_notifier = nouveau_svmm_free_notifier,
};
void
@@ -293,8 +296,10 @@ nouveau_svmm_fini(struct nouveau_svmm **psvmm)
{
struct nouveau_svmm *svmm = *psvmm;
if (svmm) {
- hmm_mirror_unregister(&svmm->mirror);
- kfree(*psvmm);
+ mutex_lock(&svmm->mutex);
+ svmm->vmm = NULL;
+ mutex_unlock(&svmm->mutex);
+ mmu_notifier_put(&svmm->notifier);
*psvmm = NULL;
}
}
@@ -320,7 +325,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
mutex_lock(&cli->mutex);
if (cli->svm.cli) {
ret = -EBUSY;
- goto done;
+ goto out_free;
}
/* Allocate a new GPU VMM that can support SVM (managed by the
@@ -335,24 +340,26 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
.fault_replay = true,
}, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
if (ret)
- goto done;
-
- /* Enable HMM mirroring of CPU address-space to VMM. */
- svmm->mm = get_task_mm(current);
- down_write(&svmm->mm->mmap_sem);
- svmm->mirror.ops = &nouveau_svmm;
- ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
- if (ret == 0) {
- cli->svm.svmm = svmm;
- cli->svm.cli = cli;
- }
- up_write(&svmm->mm->mmap_sem);
- mmput(svmm->mm);
+ goto out_free;
-done:
+ down_write(&current->mm->mmap_sem);
+ svmm->notifier.ops = &nouveau_mn_ops;
+ ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
- nouveau_svmm_fini(&svmm);
+ goto out_mm_unlock;
+ /* Note, ownership of svmm transfers to mmu_notifier */
+
+ cli->svm.svmm = svmm;
+ cli->svm.cli = cli;
+ up_write(&current->mm->mmap_sem);
mutex_unlock(&cli->mutex);
+ return 0;
+
+out_mm_unlock:
+ up_write(&current->mm->mmap_sem);
+out_free:
+ mutex_unlock(&cli->mutex);
+ kfree(svmm);
return ret;
}
@@ -475,43 +482,90 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
-static inline bool
-nouveau_range_done(struct hmm_range *range)
+struct svm_notifier {
+ struct mmu_interval_notifier notifier;
+ struct nouveau_svmm *svmm;
+};
+
+static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- bool ret = hmm_range_valid(range);
+ struct svm_notifier *sn =
+ container_of(mni, struct svm_notifier, notifier);
- hmm_range_unregister(range);
- return ret;
+ /*
+ * serializes the update to mni->invalidate_seq done by caller and
+ * prevents invalidation of the PTE from progressing while HW is being
+ * programmed. This is very hacky and only works because the normal
+ * notifier that does invalidation is always called after the range
+ * notifier.
+ */
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&sn->svmm->mutex);
+ else if (!mutex_trylock(&sn->svmm->mutex))
+ return false;
+ mmu_interval_set_seq(mni, cur_seq);
+ mutex_unlock(&sn->svmm->mutex);
+ return true;
}
-static int
-nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
+static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
+ .invalidate = nouveau_svm_range_invalidate,
+};
+
+static int nouveau_range_fault(struct nouveau_svmm *svmm,
+ struct nouveau_drm *drm, void *data, u32 size,
+ u64 *pfns, struct svm_notifier *notifier)
{
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ /* Have HMM fault pages within the fault window to the GPU. */
+ struct hmm_range range = {
+ .notifier = &notifier->notifier,
+ .start = notifier->notifier.interval_tree.start,
+ .end = notifier->notifier.interval_tree.last + 1,
+ .pfns = pfns,
+ .flags = nouveau_svm_pfn_flags,
+ .values = nouveau_svm_pfn_values,
+ .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
+ };
+ struct mm_struct *mm = notifier->notifier.mm;
long ret;
- range->default_flags = 0;
- range->pfn_flags_mask = -1UL;
+ while (true) {
+ if (time_after(jiffies, timeout))
+ return -EBUSY;
+
+ range.notifier_seq = mmu_interval_read_begin(range.notifier);
+ range.default_flags = 0;
+ range.pfn_flags_mask = -1UL;
+ down_read(&mm->mmap_sem);
+ ret = hmm_range_fault(&range, 0);
+ up_read(&mm->mmap_sem);
+ if (ret <= 0) {
+ if (ret == 0 || ret == -EBUSY)
+ continue;
+ return ret;
+ }
- ret = hmm_range_register(range, &svmm->mirror);
- if (ret) {
- up_read(&svmm->mm->mmap_sem);
- return (int)ret;
+ mutex_lock(&svmm->mutex);
+ if (mmu_interval_read_retry(range.notifier,
+ range.notifier_seq)) {
+ mutex_unlock(&svmm->mutex);
+ continue;
+ }
+ break;
}
- if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
- up_read(&svmm->mm->mmap_sem);
- return -EBUSY;
- }
+ nouveau_dmem_convert_pfn(drm, &range);
- ret = hmm_range_fault(range, 0);
- if (ret <= 0) {
- if (ret == 0)
- ret = -EBUSY;
- up_read(&svmm->mm->mmap_sem);
- hmm_range_unregister(range);
- return ret;
- }
- return 0;
+ svmm->vmm->vmm.object.client->super = true;
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
+ svmm->vmm->vmm.object.client->super = false;
+ mutex_unlock(&svmm->mutex);
+
+ return ret;
}
static int
@@ -531,7 +585,6 @@ nouveau_svm_fault(struct nvif_notify *notify)
} i;
u64 phys[16];
} args;
- struct hmm_range range;
struct vm_area_struct *vma;
u64 inst, start, limit;
int fi, fn, pi, fill;
@@ -587,6 +640,9 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
+ struct svm_notifier notifier;
+ struct mm_struct *mm;
+
/* Cancel any faults from non-SVM channels. */
if (!(svmm = buffer->fault[fi]->svmm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
@@ -606,24 +662,32 @@ nouveau_svm_fault(struct nvif_notify *notify)
start = max_t(u64, start, svmm->unmanaged.limit);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
+ mm = svmm->notifier.mm;
+ if (!mmget_not_zero(mm)) {
+ nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
+ continue;
+ }
+
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&svmm->mm->mmap_sem);
- vma = find_vma_intersection(svmm->mm, start, limit);
+ down_read(&mm->mmap_sem);
+ vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&svmm->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
start = max_t(u64, start, vma->vm_start);
limit = min_t(u64, limit, vma->vm_end);
+ up_read(&mm->mmap_sem);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
if (buffer->fault[fi]->addr != start) {
SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
- up_read(&svmm->mm->mmap_sem);
+ mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
@@ -679,33 +743,19 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.addr,
args.i.p.addr + args.i.p.size, fn - fi);
- /* Have HMM fault pages within the fault window to the GPU. */
- range.start = args.i.p.addr;
- range.end = args.i.p.addr + args.i.p.size;
- range.pfns = args.phys;
- range.flags = nouveau_svm_pfn_flags;
- range.values = nouveau_svm_pfn_values;
- range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
-again:
- ret = nouveau_range_fault(svmm, &range);
- if (ret == 0) {
- mutex_lock(&svmm->mutex);
- if (!nouveau_range_done(&range)) {
- mutex_unlock(&svmm->mutex);
- goto again;
- }
-
- nouveau_dmem_convert_pfn(svm->drm, &range);
-
- svmm->vmm->vmm.object.client->super = true;
- ret = nvif_object_ioctl(&svmm->vmm->vmm.object,
- &args, sizeof(args.i) +
- pi * sizeof(args.phys[0]),
- NULL);
- svmm->vmm->vmm.object.client->super = false;
- mutex_unlock(&svmm->mutex);
- up_read(&svmm->mm->mmap_sem);
+ notifier.svmm = svmm;
+ ret = mmu_interval_notifier_insert(&notifier.notifier,
+ svmm->notifier.mm,
+ args.i.p.addr, args.i.p.size,
+ &nouveau_svm_mni_ops);
+ if (!ret) {
+ ret = nouveau_range_fault(
+ svmm, svm->drm, &args,
+ sizeof(args.i) + pi * sizeof(args.phys[0]),
+ args.phys, &notifier);
+ mmu_interval_notifier_remove(&notifier.notifier);
}
+ mmput(mm);
/* Cancel any faults in the window whose pages didn't manage
* to keep their valid bit, or stay writeable when required.
@@ -714,10 +764,10 @@ again:
*/
while (fi < fn) {
struct nouveau_svm_fault *fault = buffer->fault[fi++];
- pi = (fault->addr - range.start) >> PAGE_SHIFT;
+ pi = (fault->addr - args.i.p.addr) >> PAGE_SHIFT;
if (ret ||
- !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
- (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
+ !(args.phys[pi] & NVIF_VMM_PFNMAP_V0_V) ||
+ (!(args.phys[pi] & NVIF_VMM_PFNMAP_V0_W) &&
fault->access != 0 && fault->access != 3)) {
nouveau_svm_fault_cancel_fault(svm, fault);
continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index f0daf958e03a..77a0c6ad3cef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -236,6 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_bo_device_init(&drm->ttm.bdev,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
+ dev->vma_offset_manager,
drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 904101c5e79d..5950c3f52c2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -6,7 +6,7 @@ omapdss-base-y := base.o display.o dss-of.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
-omapdss-y := core.o dss.o dispc.o dispc_coefs.o \
+omapdss-y := dss.o dispc.o dispc_coefs.o \
pll.o video-pll.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
deleted file mode 100644
index 6ac497b63711..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- */
-
-#define DSS_SUBSYS_NAME "CORE"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "omapdss.h"
-#include "dss.h"
-
-/* INIT */
-static struct platform_driver * const omap_dss_drivers[] = {
- &omap_dsshw_driver,
- &omap_dispchw_driver,
-#ifdef CONFIG_OMAP2_DSS_DSI
- &omap_dsihw_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- &omap_venchw_driver,
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
- &omapdss_hdmi4hw_driver,
-#endif
-#ifdef CONFIG_OMAP5_DSS_HDMI
- &omapdss_hdmi5hw_driver,
-#endif
-};
-
-static int __init omap_dss_init(void)
-{
- return platform_register_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-}
-
-static void __exit omap_dss_exit(void)
-{
- platform_unregister_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-}
-
-module_init(omap_dss_init);
-module_exit(omap_dss_exit);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
-MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index ed0ccbeed70f..413dbdd1771e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -114,6 +114,7 @@ struct dispc_features {
const unsigned int num_reg_fields;
const enum omap_overlay_caps *overlay_caps;
const u32 **supported_color_modes;
+ const u32 *supported_scaler_color_modes;
unsigned int num_mgrs;
unsigned int num_ovls;
unsigned int buffer_size_unit;
@@ -184,9 +185,6 @@ struct dispc_device {
struct regmap *syscon_pol;
u32 syscon_pol_offset;
-
- /* DISPC_CONTROL & DISPC_CONFIG lock*/
- spinlock_t control_lock;
};
enum omap_color_component {
@@ -368,25 +366,17 @@ static inline u32 dispc_read_reg(struct dispc_device *dispc, u16 idx)
static u32 mgr_fld_read(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld)
{
- const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
+ const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
- return REG_GET(dispc, rfld.reg, rfld.high, rfld.low);
+ return REG_GET(dispc, rfld->reg, rfld->high, rfld->low);
}
static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld, int val)
{
- const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
- const bool need_lock = rfld.reg == DISPC_CONTROL || rfld.reg == DISPC_CONFIG;
- unsigned long flags;
+ const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
- if (need_lock) {
- spin_lock_irqsave(&dispc->control_lock, flags);
- REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
- spin_unlock_irqrestore(&dispc->control_lock, flags);
- } else {
- REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
- }
+ REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low);
}
static int dispc_get_num_ovls(struct dispc_device *dispc)
@@ -2510,6 +2500,19 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
if (width == out_width && height == out_height)
return 0;
+ if (dispc->feat->supported_scaler_color_modes) {
+ const u32 *modes = dispc->feat->supported_scaler_color_modes;
+ unsigned int i;
+
+ for (i = 0; modes[i]; ++i) {
+ if (modes[i] == fourcc)
+ break;
+ }
+
+ if (modes[i] == 0)
+ return -EINVAL;
+ }
+
if (plane == OMAP_DSS_WB) {
switch (fourcc) {
case DRM_FORMAT_NV12:
@@ -4225,6 +4228,12 @@ static const u32 *omap4_dispc_supported_color_modes[] = {
DRM_FORMAT_RGBX8888),
};
+static const u32 omap3_dispc_supported_scaler_color_modes[] = {
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ 0,
+};
+
static const struct dispc_features omap24xx_dispc_feats = {
.sw_start = 5,
.fp_start = 15,
@@ -4253,6 +4262,7 @@ static const struct dispc_features omap24xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap2_dispc_reg_fields),
.overlay_caps = omap2_dispc_overlay_caps,
.supported_color_modes = omap2_dispc_supported_color_modes,
+ .supported_scaler_color_modes = COLOR_ARRAY(DRM_FORMAT_XRGB8888),
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4287,6 +4297,7 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4321,6 +4332,7 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4355,6 +4367,7 @@ static const struct dispc_features omap36xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3630_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4389,6 +4402,7 @@ static const struct dispc_features am43xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 1,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4768,8 +4782,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
platform_set_drvdata(pdev, dispc);
dispc->dss = dss;
- spin_lock_init(&dispc->control_lock);
-
/*
* The OMAP3-based models can't be told apart using the compatible
* string, use SoC device matching.
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b30fcaa2d0f5..da16ea095f13 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -3548,7 +3548,7 @@ static int dsi_proto_config(struct dsi_data *dsi)
static void dsi_proto_timings(struct dsi_data *dsi)
{
- unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned int tlpx, tclk_zero, tclk_prepare;
unsigned int tclk_pre, tclk_post;
unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero;
unsigned int ths_trail, ths_exit;
@@ -3567,7 +3567,6 @@ static void dsi_proto_timings(struct dsi_data *dsi)
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
- tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 4bdd63b57100..225ec808b01a 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1598,3 +1598,40 @@ struct platform_driver omap_dsshw_driver = {
.suppress_bind_attrs = true,
},
};
+
+/* INIT */
+static struct platform_driver * const omap_dss_drivers[] = {
+ &omap_dsshw_driver,
+ &omap_dispchw_driver,
+#ifdef CONFIG_OMAP2_DSS_DSI
+ &omap_dsihw_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ &omap_venchw_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+ &omapdss_hdmi4hw_driver,
+#endif
+#ifdef CONFIG_OMAP5_DSS_HDMI
+ &omapdss_hdmi5hw_driver,
+#endif
+};
+
+static int __init omap_dss_init(void)
+{
+ return platform_register_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+}
+
+static void __exit omap_dss_exit(void)
+{
+ platform_unregister_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+}
+
+module_init(omap_dss_init);
+module_exit(omap_dss_exit);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 5d5d5588ebc1..ea5d5c228534 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -542,8 +542,9 @@ static void hdmi_core_audio_config(struct hdmi_core_data *core,
}
/* Set ACR clock divisor */
- REG_FLD_MOD(av_base,
- HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
+ if (cfg->use_mclk)
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_FREQ_SVAL,
+ cfg->mclk_mode, 2, 0);
r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
/*
@@ -675,7 +676,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config acore;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -737,7 +738,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
acore.n = n;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 7400fb99d453..ff4d35c8771f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -23,24 +23,12 @@
#include "hdmi5_core.h"
-/* only 24 bit color depth used for now */
-static const struct csc_table csc_table_deepcolor[] = {
- /* HDMI_DEEP_COLOR_24BIT */
- [0] = { 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32, },
- /* HDMI_DEEP_COLOR_30BIT */
- [1] = { 7015, 0, 0, 128, 0, 7015, 0, 128, 0, 0, 7015, 128, },
- /* HDMI_DEEP_COLOR_36BIT */
- [2] = { 7010, 0, 0, 512, 0, 7010, 0, 512, 0, 0, 7010, 512, },
- /* FULL RANGE */
- [3] = { 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0, },
-};
-
static void hdmi_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned int ss_scl_high = 4600; /* ns */
- const unsigned int ss_scl_low = 5400; /* ns */
+ const unsigned int ss_scl_high = 4700; /* ns */
+ const unsigned int ss_scl_low = 5500; /* ns */
const unsigned int fs_scl_high = 600; /* ns */
const unsigned int fs_scl_low = 1300; /* ns */
const unsigned int sda_hold = 1000; /* ns */
@@ -397,14 +385,6 @@ static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 0 : 2, 1, 0);
}
-static void hdmi_core_config_csc(struct hdmi_core_data *core)
-{
- int clr_depth = 0; /* 24 bit color depth */
-
- /* CSC_COLORDEPTH */
- REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, clr_depth, 7, 4);
-}
-
static void hdmi_core_config_video_sampler(struct hdmi_core_data *core)
{
int video_mapping = 1; /* for 24 bit color depth */
@@ -469,47 +449,67 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0);
}
-static void hdmi_core_csc_config(struct hdmi_core_data *core,
- struct csc_table csc_coeff)
+static void hdmi_core_write_csc(struct hdmi_core_data *core,
+ const struct csc_table *csc_coeff)
{
void __iomem *base = core->base;
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff.a1 >> 8 , 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff.a1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff.a2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff.a2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff.a3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff.a3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff.a4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff.a4, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff.b1 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff.b1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff.b2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff.b2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff.b3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff.b3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff.b4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff.b4, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff.c1 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff.c1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff.c2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff.c2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff.c3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff.c3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff.c4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff.c4, 7, 0);
-
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff->a1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff->a1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff->a2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff->a2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff->a3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff->a3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff->a4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff->a4, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff->b1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff->b1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff->b2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff->b2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff->b3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff->b3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff->b4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff->b4, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff->c1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff->c1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff->c2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff->c2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff->c3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff->c3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff->c4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff->c4, 7, 0);
+
+ /* enable CSC */
REG_FLD_MOD(base, HDMI_CORE_MC_FLOWCTRL, 0x1, 0, 0);
}
-static void hdmi_core_configure_range(struct hdmi_core_data *core)
+static void hdmi_core_configure_range(struct hdmi_core_data *core,
+ enum hdmi_quantization_range range)
{
- struct csc_table csc_coeff = { 0 };
+ static const struct csc_table csc_limited_range = {
+ 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32
+ };
+ static const struct csc_table csc_full_range = {
+ 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0
+ };
+ const struct csc_table *csc_coeff;
+
+ /* CSC_COLORDEPTH = 24 bits*/
+ REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, 0, 7, 4);
+
+ switch (range) {
+ case HDMI_QUANTIZATION_RANGE_FULL:
+ csc_coeff = &csc_full_range;
+ break;
- /* support limited range with 24 bit color depth for now */
- csc_coeff = csc_table_deepcolor[0];
+ case HDMI_QUANTIZATION_RANGE_DEFAULT:
+ case HDMI_QUANTIZATION_RANGE_LIMITED:
+ default:
+ csc_coeff = &csc_limited_range;
+ break;
+ }
- hdmi_core_csc_config(core, csc_coeff);
+ hdmi_core_write_csc(core, csc_coeff);
}
static void hdmi_core_enable_video_path(struct hdmi_core_data *core)
@@ -600,9 +600,20 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
+ enum hdmi_quantization_range range;
hdmi_core_mask_interrupts(core);
+ if (cfg->hdmi_dvi_mode == HDMI_HDMI) {
+ char vic = cfg->infoframe.video_code;
+
+ /* All CEA modes other than VIC 1 use limited quantization range. */
+ range = vic > 1 ? HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL;
+ } else {
+ range = HDMI_QUANTIZATION_RANGE_FULL;
+ }
+
hdmi_core_init(&v_core_cfg, cfg);
hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
@@ -616,9 +627,8 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_wp_video_config_interface(wp, &vm);
- /* support limited range with 24 bit color depth for now */
- hdmi_core_configure_range(core);
- cfg->infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+ hdmi_core_configure_range(core, range);
+ cfg->infoframe.quantization_range = range;
/*
* configure core video part, set software reset in the core
@@ -628,7 +638,6 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_video_config(core, &v_core_cfg);
hdmi_core_config_video_packetizer(core);
- hdmi_core_config_csc(core);
hdmi_core_config_video_sampler(core);
if (cfg->hdmi_dvi_mode == HDMI_HDMI)
@@ -798,7 +807,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -841,7 +850,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
core_cfg.n = n;
core_cfg.cts = cts;
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 14b41de44ebc..0693d34fca1b 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
#include "dss.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 835e6654fa82..43c1d096b021 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -113,7 +113,7 @@ extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
static inline enum tiler_fmt gem2fmt(u32 flags)
{
- switch (flags & OMAP_BO_TILED) {
+ switch (flags & OMAP_BO_TILED_MASK) {
case OMAP_BO_TILED_8:
return TILFMT_8BIT;
case OMAP_BO_TILED_16:
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2983c003698e..b3e22c890c51 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -11,6 +11,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 6fe14111cd95..24bbe9f2a32e 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 1b8b5108caf8..9aeab81dfb90 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -95,7 +95,7 @@ static u32 get_linear_addr(struct drm_framebuffer *fb,
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -135,7 +135,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
- struct plane *plane = &omap_fb->planes[0];
u32 x, y, orient = 0;
info->fourcc = fb->format->format;
@@ -154,7 +153,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -209,10 +208,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
info->screen_width /= format->cpp[0];
if (fb->format->format == DRM_FORMAT_NV12) {
- plane = &omap_fb->planes[1];
-
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK));
omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 08f539efddfb..e518d93ca6df 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -67,7 +67,7 @@ struct omap_gem_object {
/**
* # of users of dma_addr
*/
- u32 dma_addr_cnt;
+ refcount_t dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
@@ -196,7 +196,7 @@ static void omap_gem_evict(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
@@ -324,7 +324,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
size_t size = obj->size;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
/* for tiled buffers, the virtual size has stride rounded up
* to 4kb.. (to hide the fact that row n+1 might start 16kb or
* 32kb later!). But we don't back the entire buffer with
@@ -513,7 +513,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
* probably trigger put_pages()?
*/
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = omap_gem_fault_2d(obj, vma, vmf);
else
ret = omap_gem_fault_1d(obj, vma, vmf);
@@ -773,18 +773,20 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
mutex_lock(&omap_obj->lock);
if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
- if (omap_obj->dma_addr_cnt == 0) {
+ if (refcount_read(&omap_obj->dma_addr_cnt) == 0) {
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
BUG_ON(omap_obj->block);
+ refcount_set(&omap_obj->dma_addr_cnt, 1);
+
ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt,
omap_obj->width,
omap_obj->height, 0);
@@ -813,13 +815,15 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
omap_obj->block = block;
DBG("got dma address: %pad", &omap_obj->dma_addr);
+ } else {
+ refcount_inc(&omap_obj->dma_addr_cnt);
}
- omap_obj->dma_addr_cnt++;
-
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else if (omap_gem_is_contiguous(omap_obj)) {
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
goto fail;
@@ -832,38 +836,46 @@ fail:
}
/**
+ * omap_gem_unpin_locked() - Unpin a GEM object from memory
+ * @obj: the GEM object
+ *
+ * omap_gem_unpin() without locking.
+ */
+static void omap_gem_unpin_locked(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret;
+
+ if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
+ ret = tiler_unpin(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not unpin pages: %d\n", ret);
+ }
+ ret = tiler_release(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not release unmap: %d\n", ret);
+ }
+ omap_obj->dma_addr = 0;
+ omap_obj->block = NULL;
+ }
+}
+
+/**
* omap_gem_unpin() - Unpin a GEM object from memory
* @obj: the GEM object
*
* Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
- * reference-counted, the actualy unpin will only be performed when the number
+ * reference-counted, the actual unpin will only be performed when the number
* of calls to this function matches the number of calls to omap_gem_pin().
*/
void omap_gem_unpin(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret;
mutex_lock(&omap_obj->lock);
-
- if (omap_obj->dma_addr_cnt > 0) {
- omap_obj->dma_addr_cnt--;
- if (omap_obj->dma_addr_cnt == 0) {
- ret = tiler_unpin(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not unpin pages: %d\n", ret);
- }
- ret = tiler_release(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not release unmap: %d\n", ret);
- }
- omap_obj->dma_addr = 0;
- omap_obj->block = NULL;
- }
- }
-
+ omap_gem_unpin_locked(obj);
mutex_unlock(&omap_obj->lock);
}
@@ -879,8 +891,8 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
mutex_lock(&omap_obj->lock);
- if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
- (omap_obj->flags & OMAP_BO_TILED)) {
+ if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
+ (omap_obj->flags & OMAP_BO_TILED_MASK)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
@@ -895,7 +907,7 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
return ret;
}
@@ -1030,10 +1042,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
- off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
+ off, &omap_obj->dma_addr,
+ refcount_read(&omap_obj->dma_addr_cnt),
omap_obj->vaddr, omap_obj->roll);
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
if (omap_obj->block) {
struct tcm_area *area = &omap_obj->block->area;
@@ -1093,7 +1106,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
mutex_lock(&omap_obj->lock);
/* The object should not be pinned. */
- WARN_ON(omap_obj->dma_addr_cnt > 0);
+ WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0);
if (omap_obj->pages) {
if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
@@ -1120,6 +1133,38 @@ void omap_gem_free_object(struct drm_gem_object *obj)
kfree(omap_obj);
}
+static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+
+ switch (flags & OMAP_BO_CACHE_MASK) {
+ case OMAP_BO_CACHED:
+ case OMAP_BO_WC:
+ case OMAP_BO_CACHE_MASK:
+ break;
+
+ default:
+ return false;
+ }
+
+ if (flags & OMAP_BO_TILED_MASK) {
+ if (!priv->usergart)
+ return false;
+
+ switch (flags & OMAP_BO_TILED_MASK) {
+ case OMAP_BO_TILED_8:
+ case OMAP_BO_TILED_16:
+ case OMAP_BO_TILED_32:
+ break;
+
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, u32 flags)
@@ -1131,18 +1176,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
size_t size;
int ret;
- /* Validate the flags and compute the memory and cache flags. */
- if (flags & OMAP_BO_TILED) {
- if (!priv->usergart) {
- dev_err(dev->dev, "Tiled buffers require DMM\n");
- return NULL;
- }
+ if (!omap_gem_validate_flags(dev, flags))
+ return NULL;
+ /* Validate the flags and compute the memory and cache flags. */
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* Tiled buffers are always shmem paged backed. When they are
* scanned out, they are remapped into DMM/TILER.
*/
- flags &= ~OMAP_BO_SCANOUT;
flags |= OMAP_BO_MEM_SHMEM;
/*
@@ -1153,9 +1195,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
flags |= tiler_get_cpu_cache_flags();
} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
/*
- * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
- * tiled. However, to lower the pressure on memory allocation,
- * use contiguous memory only if no TILER is available.
+ * If we don't have DMM, we must allocate scanout buffers
+ * from contiguous DMA memory.
*/
flags |= OMAP_BO_MEM_DMA_API;
} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
@@ -1174,7 +1215,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->flags = flags;
mutex_init(&omap_obj->lock);
- if (flags & OMAP_BO_TILED) {
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* For tiled buffers align dimensions to slot boundaries and
* calculate size based on aligned dimensions.
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index e8c3ae7ac77e..7344bb61936c 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -67,7 +67,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
{
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
- if (omap_gem_flags(obj) & OMAP_BO_TILED) {
+ if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
/* TODO we would need to pin at least part of the buffer to
* get de-tiled view. For now just reject it.
*/
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 5f72c922a04b..a0574dc03e16 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -350,9 +350,8 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel);
- vpanel->panel.dev = dev;
- vpanel->panel.funcs = &versatile_panel_drm_funcs;
+ drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&vpanel->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index dabf59e0f56f..98f184b81187 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -204,9 +204,8 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &feiyang_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd)) {
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 3c58f63adbf7..24955bec1958 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -895,9 +895,8 @@ static int ili9322_probe(struct spi_device *spi)
ili->input = ili->conf->input;
}
- drm_panel_init(&ili->panel);
- ili->panel.dev = dev;
- ili->panel.funcs = &ili9322_drm_funcs;
+ drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ili->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 3ad4a46c4e94..e8789e460a16 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -433,9 +433,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &ili9881c_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power)) {
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index d92d1c98878c..83df1ac4211f 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -487,9 +487,8 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
if (IS_ERR(innolux->backlight))
return PTR_ERR(innolux->backlight);
- drm_panel_init(&innolux->base);
- innolux->base.funcs = &innolux_panel_funcs;
- innolux->base.dev = dev;
+ drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
err = drm_panel_add(&innolux->base);
if (err < 0)
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index ff3e89e61e3f..56364a93f0b8 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -437,9 +437,8 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return ret;
}
- drm_panel_init(&jdi->base);
- jdi->base.funcs = &jdi_panel_funcs;
- jdi->base.dev = &jdi->dsi->dev;
+ drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&jdi->base);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 3ac04eb8d0fe..45f96556ec8c 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -391,9 +391,8 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
if (IS_ERR(kingdisplay->backlight))
return PTR_ERR(kingdisplay->backlight);
- drm_panel_init(&kingdisplay->base);
- kingdisplay->base.funcs = &kingdisplay_panel_funcs;
- kingdisplay->base.dev = &kingdisplay->link->dev;
+ drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
+ &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&kingdisplay->base);
}
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index ee4379729a5b..7a1385e834f0 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -196,9 +196,8 @@ static int lb035q02_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &lb035q02_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index 41bf02d122a1..db4865a4c2b9 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -259,9 +259,8 @@ static int lg4573_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &lg4573_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index ad47cc95459e..2405f26e5d31 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -197,7 +197,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
static int panel_lvds_probe(struct platform_device *pdev)
{
struct panel_lvds *lvds;
- struct device_node *np;
int ret;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
@@ -243,14 +242,9 @@ static int panel_lvds_probe(struct platform_device *pdev)
return ret;
}
- np = of_parse_phandle(lvds->dev->of_node, "backlight", 0);
- if (np) {
- lvds->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
-
- if (!lvds->backlight)
- return -EPROBE_DEFER;
- }
+ lvds->backlight = devm_of_find_backlight(lvds->dev);
+ if (IS_ERR(lvds->backlight))
+ return PTR_ERR(lvds->backlight);
/*
* TODO: Handle all power supplies specified in the DT node in a generic
@@ -260,20 +254,15 @@ static int panel_lvds_probe(struct platform_device *pdev)
*/
/* Register the panel. */
- drm_panel_init(&lvds->panel);
- lvds->panel.dev = lvds->dev;
- lvds->panel.funcs = &panel_lvds_funcs;
+ drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
ret = drm_panel_add(&lvds->panel);
if (ret < 0)
- goto error;
+ return ret;
dev_set_drvdata(lvds->dev, lvds);
return 0;
-
-error:
- put_device(&lvds->backlight->dev);
- return ret;
}
static int panel_lvds_remove(struct platform_device *pdev)
@@ -284,9 +273,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
panel_lvds_disable(&lvds->panel);
- if (lvds->backlight)
- put_device(&lvds->backlight->dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 20f17e46e65d..fd593532ab23 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -205,9 +205,8 @@ static int nl8048_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &nl8048_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 2ad1063b068d..60ccedce530c 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -292,9 +292,8 @@ static int nt39016_probe(struct spi_device *spi)
return err;
}
- drm_panel_init(&panel->drm_panel);
- panel->drm_panel.dev = dev;
- panel->drm_panel.funcs = &nt39016_funcs;
+ drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
+ DRM_MODE_CONNECTOR_DPI);
err = drm_panel_add(&panel->drm_panel);
if (err < 0) {
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 2bae1db3ff34..f2a72ee6ee07 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -288,9 +288,8 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
if (IS_ERR(lcd->backlight))
return PTR_ERR(lcd->backlight);
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = dev;
- lcd->panel.funcs = &lcd_olinuxino_funcs;
+ drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index c7b48df8869a..bf1f928b215f 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -455,9 +455,8 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &otm8009a_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dsi->host->dev, ctx,
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index e0e20ecff916..2b40913899d8 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -166,9 +166,8 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
if (IS_ERR(osd101t2587->backlight))
return PTR_ERR(osd101t2587->backlight);
- drm_panel_init(&osd101t2587->base);
- osd101t2587->base.funcs = &osd101t2587_panel_funcs;
- osd101t2587->base.dev = &osd101t2587->dsi->dev;
+ drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
+ &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&osd101t2587->base);
}
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 3dff0b3f73c2..664605071d34 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -223,9 +223,8 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
return -EPROBE_DEFER;
}
- drm_panel_init(&wuxga_nt->base);
- wuxga_nt->base.funcs = &wuxga_nt_panel_funcs;
- wuxga_nt->base.dev = &wuxga_nt->dsi->dev;
+ drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
+ &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&wuxga_nt->base);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index b5b14aa059ea..09824e92fc78 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -426,8 +426,8 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
return PTR_ERR(ts->dsi);
}
- ts->base.dev = dev;
- ts->base.funcs = &rpi_touchscreen_funcs;
+ drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index 6a5d37006103..fd67fc6185c4 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -606,9 +606,8 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- drm_panel_init(&panel->panel);
- panel->panel.funcs = &rad_panel_funcs;
- panel->panel.dev = dev;
+ drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
ret = drm_panel_add(&panel->panel);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index ba889625ad43..994e855721f4 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -404,9 +404,8 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &rm68200_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index b9109922397f..31234b79d3b1 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -343,9 +343,8 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &jh057n_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &jh057n_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 3c15764f0c03..170a5cda21b9 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -173,9 +173,8 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &rb070d30_panel_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3be902dcedc0..250809ba37c7 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -351,9 +351,8 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &ld9040_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index f75bef24e050..e3a0397e953e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -215,9 +215,8 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&s6->panel);
- s6->panel.dev = dev;
- s6->panel.funcs = &s6d16d0_drm_funcs;
+ drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&s6->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index b923de23ed65..938ab72c5540 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -732,9 +732,8 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e3ha2_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index cd90fa700c49..a60635e9226d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -466,9 +466,8 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63j0x03_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
&s6e63j0x03_bl_ops, NULL);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 142d395ea512..ba01af0b14fd 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -473,9 +473,8 @@ static int s6e63m0_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63m0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = s6e63m0_backlight_register(ctx);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 81858267723a..dbced6501204 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -1017,9 +1017,8 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
ctx->brightness = GAMMA_LEVEL_NUM - 1;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e8aa0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 18b22b1294fb..b3619ba443bd 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -274,9 +274,8 @@ static int seiko_panel_probe(struct device *dev,
return -EPROBE_DEFER;
}
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &seiko_panel_funcs;
+ drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
+ DRM_MODE_CONNECTOR_DPI);
err = drm_panel_add(&panel->base);
if (err < 0)
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index e910b4ad1310..5e136c3ba185 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -329,9 +329,8 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->backlight))
return PTR_ERR(sharp->backlight);
- drm_panel_init(&sharp->base);
- sharp->base.funcs = &sharp_panel_funcs;
- sharp->base.dev = &sharp->link1->dev;
+ drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&sharp->base);
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index 46cd9a250129..eeab7998c7de 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -185,9 +185,8 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
return PTR_ERR(lcd->ud_gpio);
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &pdev->dev;
- lcd->panel.funcs = &ls037v7dw01_funcs;
+ drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index c39abde9f9f1..b963ba4ab589 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -264,9 +264,8 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
if (IS_ERR(sharp_nt->backlight))
return PTR_ERR(sharp_nt->backlight);
- drm_panel_init(&sharp_nt->base);
- sharp_nt->base.funcs = &sharp_nt_panel_funcs;
- sharp_nt->base.dev = &sharp_nt->dsi->dev;
+ drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev,
+ &sharp_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&sharp_nt->base);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 28fa6ba7b767..5d487686d25c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -94,6 +94,7 @@ struct panel_desc {
u32 bus_format;
u32 bus_flags;
+ int connector_type;
};
struct panel_simple {
@@ -464,9 +465,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
panel_simple_parse_panel_timing_node(dev, panel, &dt);
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &panel_simple_funcs;
+ drm_panel_init(&panel->base, dev, &panel_simple_funcs,
+ desc->connector_type);
err = drm_panel_add(&panel->base);
if (err < 0)
@@ -833,6 +833,7 @@ static const struct panel_desc auo_g133han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_g185han01_timings = {
@@ -862,6 +863,7 @@ static const struct panel_desc auo_g185han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_p320hvn03_timings = {
@@ -890,6 +892,7 @@ static const struct panel_desc auo_p320hvn03 = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -1205,6 +1208,7 @@ static const struct panel_desc dlc_dlc0700yzg_1 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing dlc_dlc1010gig_timing = {
@@ -1235,6 +1239,7 @@ static const struct panel_desc dlc_dlc1010gig = {
.unprepare = 60,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode edt_et035012dm6_mode = {
@@ -1501,6 +1506,7 @@ static const struct panel_desc hannstar_hsd070pww1 = {
.height = 94,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -1525,6 +1531,7 @@ static const struct panel_desc hannstar_hsd100pxn1 = {
.height = 152,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
@@ -1631,6 +1638,7 @@ static const struct panel_desc innolux_g070y2_l01 = {
.unprepare = 800,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g101ice_l01_timing = {
@@ -1659,6 +1667,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g121i1_l01_timing = {
@@ -1686,6 +1695,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.disable = 20,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode innolux_g121x1_l03_mode = {
@@ -1869,6 +1879,7 @@ static const struct panel_desc koe_tx31d200vm0baa = {
.height = 109,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing kyo_tcg121xglp_timing = {
@@ -1893,6 +1904,7 @@ static const struct panel_desc kyo_tcg121xglp = {
.height = 184,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lemaker_bl035_rgb_002_mode = {
@@ -1941,6 +1953,7 @@ static const struct panel_desc lg_lb070wv8 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
@@ -2063,6 +2076,7 @@ static const struct panel_desc mitsubishi_aa070mc01 = {
.disable = 400,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
@@ -2091,6 +2105,7 @@ static const struct panel_desc nec_nl12880bc20_05 = {
.disable = 50,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
@@ -2193,6 +2208,7 @@ static const struct panel_desc nlt_nl192108ac18_02d = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nvd_9128_mode = {
@@ -2216,6 +2232,7 @@ static const struct panel_desc nvd_9128 = {
.height = 88,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing okaya_rs800480t_7x0gp_timing = {
@@ -2381,6 +2398,7 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode pda_91_00156_a0_mode = {
@@ -2628,6 +2646,7 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.height = 136,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing sharp_lq123p1jx31_timing = {
@@ -2807,6 +2826,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
.height = 95,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing tianma_tm070rvhg71_timing = {
@@ -2831,6 +2851,7 @@ static const struct panel_desc tianma_tm070rvhg71 = {
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = {
@@ -2913,6 +2934,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -2983,6 +3005,7 @@ static const struct panel_desc urt_umsh_8596md_lvds = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct panel_desc urt_umsh_8596md_parallel = {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 09c5d9a6f9fa..ee3f23f45755 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -369,7 +369,8 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(st7701->backlight))
return PTR_ERR(st7701->backlight);
- drm_panel_init(&st7701->panel);
+ drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
@@ -381,8 +382,6 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
* ts8550b and there is no valid documentation for that.
*/
st7701->sleep_delay = 120 + desc->panel_sleep_delay;
- st7701->panel.funcs = &st7701_funcs;
- st7701->panel.dev = &dsi->dev;
ret = drm_panel_add(&st7701->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 5e3e92ea9ea6..108a85bb6667 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -381,8 +381,8 @@ static int st7789v_probe(struct spi_device *spi)
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &st7789v_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &st7789v_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ctx->power = devm_regulator_get(&spi->dev, "power");
if (IS_ERR(ctx->power))
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 3d5b9c4f68d9..d6387d8f88a3 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -648,9 +648,8 @@ static int acx565akm_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &acx565akm_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_add(&lcd->panel);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index f2baff827f50..c44d6a65c0aa 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -347,9 +347,8 @@ static int td028ttec1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &td028ttec1_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index ba163c779084..621b65feec07 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -458,9 +458,8 @@ static int td043mtea1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &td043mtea1_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_add(&lcd->panel);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 71591e5f5938..1a5418ae2ccf 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -457,9 +457,8 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_panel_init(&tpg->panel);
- tpg->panel.dev = dev;
- tpg->panel.funcs = &tpg110_drm_funcs;
+ drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
spi_set_drvdata(spi, tpg);
return drm_panel_add(&tpg->panel);
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 77e1311b7c69..0feea2456e14 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -518,9 +518,8 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
/* dual port */
gpiod_set_value(ctx->mode_gpio, 0);
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &truly_nt35597_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &truly_nt35597_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index 536a0d4f8d29..8c811a9e683b 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -10,3 +10,5 @@
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.
+
+- Support core dump on job failure
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 12ff77dacc95..4c4e8a30a1ac 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -13,97 +13,42 @@
#include "panfrost_gpu.h"
#include "panfrost_regs.h"
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- struct dev_pm_opp *opp;
- unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
- unsigned long target_volt, target_rate;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
int err;
- opp = devfreq_recommended_opp(dev, freq, flags);
- if (IS_ERR(opp))
- return PTR_ERR(opp);
-
- target_rate = dev_pm_opp_get_freq(opp);
- target_volt = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
-
- if (old_clk_rate == target_rate)
- return 0;
-
- /*
- * If frequency scaling from low to high, adjust voltage first.
- * If frequency scaling from high to low, adjust frequency first.
- */
- if (old_clk_rate < target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err) {
- dev_err(dev, "Cannot set voltage %lu uV\n",
- target_volt);
- return err;
- }
- }
-
- err = clk_set_rate(pfdev->clock, target_rate);
- if (err) {
- dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
- err);
- regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
- pfdev->devfreq.cur_volt);
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (err)
return err;
- }
- if (old_clk_rate > target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err)
- dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
- }
-
- pfdev->devfreq.cur_freq = target_rate;
- pfdev->devfreq.cur_volt = target_volt;
+ *freq = clk_get_rate(pfdev->clock);
return 0;
}
static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
{
- ktime_t now = ktime_get();
- int i;
-
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- pfdev->devfreq.slot[i].busy_time = 0;
- pfdev->devfreq.slot[i].idle_time = 0;
- pfdev->devfreq.slot[i].time_last_update = now;
- }
+ pfdev->devfreq.busy_time = 0;
+ pfdev->devfreq.idle_time = 0;
+ pfdev->devfreq.time_last_update = ktime_get();
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- int i;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- panfrost_devfreq_update_utilization(pfdev, i);
- }
+ panfrost_devfreq_update_utilization(pfdev);
status->current_frequency = clk_get_rate(pfdev->clock);
- status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
- pfdev->devfreq.slot[0].idle_time));
+ status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time,
+ pfdev->devfreq.idle_time));
- status->busy_time = 0;
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
- }
-
- /* We're scheduling only to one core atm, so don't divide for now */
- /* status->busy_time /= NUM_JOB_SLOTS; */
+ status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time);
panfrost_devfreq_reset(pfdev);
@@ -119,7 +64,7 @@ static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- *freq = pfdev->devfreq.cur_freq;
+ *freq = clk_get_rate(pfdev->clock);
return 0;
}
@@ -135,6 +80,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
int ret;
struct dev_pm_opp *opp;
+ unsigned long cur_freq;
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
@@ -144,13 +90,13 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_reset(pfdev);
- pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
+ cur_freq = clk_get_rate(pfdev->clock);
- opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
+ opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
- panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
+ panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
@@ -174,14 +120,10 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
- int i;
-
if (!pfdev->devfreq.devfreq)
return;
panfrost_devfreq_reset(pfdev);
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- pfdev->devfreq.slot[i].busy = false;
devfreq_resume_device(pfdev->devfreq.devfreq);
}
@@ -194,9 +136,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
ktime_t now;
ktime_t last;
@@ -204,22 +145,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
return;
now = ktime_get();
- last = pfdev->devfreq.slot[slot].time_last_update;
+ last = pfdev->devfreq.time_last_update;
- /* If we last recorded a transition to busy, we have been idle since */
- if (devfreq_slot->busy)
- pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
+ if (atomic_read(&pfdev->devfreq.busy_count) > 0)
+ pfdev->devfreq.busy_time += ktime_sub(now, last);
else
- pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
+ pfdev->devfreq.idle_time += ktime_sub(now, last);
- pfdev->devfreq.slot[slot].time_last_update = now;
+ pfdev->devfreq.time_last_update = now;
+}
+
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev)
+{
+ panfrost_devfreq_update_utilization(pfdev);
+ atomic_inc(&pfdev->devfreq.busy_count);
}
-/* The job scheduler is expected to call this at every transition busy <-> idle */
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+ int count;
- panfrost_devfreq_update_utilization(pfdev, slot);
- devfreq_slot->busy = !devfreq_slot->busy;
+ panfrost_devfreq_update_utilization(pfdev);
+ count = atomic_dec_if_positive(&pfdev->devfreq.busy_count);
+ WARN_ON(count < 0);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index e3bc63e82843..0611beffc8d0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -10,6 +10,7 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev);
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev);
#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 9c39b9794811..06713811b92c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -51,13 +51,6 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
-struct panfrost_devfreq_slot {
- ktime_t busy_time;
- ktime_t idle_time;
- ktime_t time_last_update;
- bool busy;
-};
-
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -93,9 +86,10 @@ struct panfrost_device {
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
- unsigned long cur_freq;
- unsigned long cur_volt;
- struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ atomic_t busy_count;
} devfreq;
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index f21bc8a7ee3a..9458dc6c750c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -470,7 +470,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
/*
* Panfrost driver version:
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index acb07fe06580..deca0c30bbd4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -112,7 +112,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h
index cec6dcdadb5c..8e59d765bf19 100644
--- a/drivers/gpu/drm/panfrost/panfrost_issues.h
+++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
@@ -13,37 +13,118 @@
* to care about.
*/
enum panfrost_hw_issue {
+ /* Need way to guarantee that all previously-translated memory accesses
+ * are commited */
HW_ISSUE_6367,
+
+ /* On job complete with non-done the cache is not flushed */
HW_ISSUE_6787,
+
+ /* Write of PRFCNT_CONFIG_MODE_MANUAL to PRFCNT_CONFIG causes a
+ * instrumentation dump if PRFCNT_TILER_EN is enabled */
HW_ISSUE_8186,
+
+ /* TIB: Reports faults from a vtile which has not yet been allocated */
HW_ISSUE_8245,
+
+ /* uTLB deadlock could occur when writing to an invalid page at the
+ * same time as access to a valid page in the same uTLB cache line ( ==
+ * 4 PTEs == 16K block of mapping) */
HW_ISSUE_8316,
+
+ /* HT: TERMINATE for RUN command ignored if previous LOAD_DESCRIPTOR is
+ * still executing */
HW_ISSUE_8394,
+
+ /* CSE: Sends a TERMINATED response for a task that should not be
+ * terminated */
HW_ISSUE_8401,
+
+ /* Repeatedly Soft-stopping a job chain consisting of (Vertex Shader,
+ * Cache Flush, Tiler) jobs causes DATA_INVALID_FAULT on tiler job. */
HW_ISSUE_8408,
+
+ /* Disable the Pause Buffer in the LS pipe. */
HW_ISSUE_8443,
+
+ /* Change in RMUs in use causes problems related with the core's SDC */
HW_ISSUE_8987,
+
+ /* Compute endpoint has a 4-deep queue of tasks, meaning a soft stop
+ * won't complete until all 4 tasks have completed */
HW_ISSUE_9435,
+
+ /* HT: Tiler returns TERMINATED for non-terminated command */
HW_ISSUE_9510,
+
+ /* Occasionally the GPU will issue multiple page faults for the same
+ * address before the MMU page table has been read by the GPU */
HW_ISSUE_9630,
+
+ /* RA DCD load request to SDC returns invalid load ignore causing
+ * colour buffer mismatch */
HW_ISSUE_10327,
+
+ /* MMU TLB invalidation hazards */
HW_ISSUE_10649,
+
+ /* Missing cache flush in multi core-group configuration */
HW_ISSUE_10676,
+
+ /* Chicken bit on T72X for a hardware workaround in compiler */
HW_ISSUE_10797,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_FAULT */
HW_ISSUE_10817,
+
+ /* Intermittent missing interrupt on job completion */
HW_ISSUE_10883,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_ERROR
+ * (similar to issue 10817) and can use #10817 workaround */
HW_ISSUE_10959,
+
+ /* Soft-stopped fragment shader job can restart with out-of-bound
+ * restart index */
HW_ISSUE_10969,
+
+ /* Race condition can cause tile list corruption */
HW_ISSUE_11020,
+
+ /* Write buffer can cause tile list corruption */
HW_ISSUE_11024,
+
+ /* Pause buffer can cause a fragment job hang */
HW_ISSUE_11035,
+
+ /* Dynamic Core Scaling not supported due to errata */
HW_ISSUE_11056,
+
+ /* Clear encoder state for a hard stopped fragment job which is AFBC
+ * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and
+ * r0p1_50rel0 */
HW_ISSUE_T76X_3542,
+
+ /* Keep tiler module clock on to prevent GPU stall */
HW_ISSUE_T76X_3953,
+
+ /* Must ensure L2 is not transitioning when we reset. Workaround with a
+ * busy wait until L2 completes transition; ensure there is a maximum
+ * loop count as she may never complete her transition. (On chips
+ * without this errata, it's totally okay if L2 transitions.) */
HW_ISSUE_TMIX_8463,
+
+ /* Don't set SC_LS_ATTR_CHECK_DISABLE/SC_LS_ALLOW_ATTR_TYPES */
GPUCORE_1619,
+
+ /* When a hard-stop follows close after a soft-stop, the completion
+ * code for the terminated job may be incorrectly set to STOPPED */
HW_ISSUE_TMIX_8438,
+
+ /* "Protected mode" is buggy on Mali-G31 some Bifrost chips, so the
+ * kernel must fiddle with L2 caches to prevent data leakage */
HW_ISSUE_TGOX_R1_1234,
+
HW_ISSUE_END
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 21f34d44aac2..d411eb6c8eb9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -155,8 +155,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
}
cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_busy(pfdev);
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@@ -404,9 +403,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
}
spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
- /* panfrost_core_dump(pfdev); */
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_idle(pfdev);
panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
@@ -469,7 +466,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
pfdev->jobs[j] = NULL;
panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
- panfrost_devfreq_record_transition(pfdev, j);
+ panfrost_devfreq_record_idle(pfdev);
dma_fence_signal_locked(job->done_fence);
pm_runtime_put_autosuspend(pfdev->dev);
@@ -570,14 +567,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
struct panfrost_job_slot *js = pfdev->js;
int i;
+ /* Check whether the hardware is idle */
+ if (atomic_read(&pfdev->devfreq.busy_count))
+ return false;
+
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&js->queue[i].sched.hw_rq_count))
return false;
-
- /* Check whether the hardware is idle */
- if (pfdev->devfreq.slot[i].busy)
- return false;
}
return true;
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 024771a4083e..703ddc803c55 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -48,10 +48,10 @@ irqreturn_t pl111_irq(int irq, void *data)
}
static enum drm_mode_status
-pl111_mode_valid(struct drm_crtc *crtc,
+pl111_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
- struct drm_device *drm = crtc->dev;
+ struct drm_device *drm = pipe->crtc.dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
u32 cpp = priv->variant->fb_bpp / 8;
u64 bw;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 276b53473a84..63dfcda04147 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -150,8 +150,8 @@ static int pl111_modeset_init(struct drm_device *dev)
return -EPROBE_DEFER;
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto out_config;
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d0d691b31f4a..ca3f51c2a8fe 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -4,6 +4,7 @@ config DRM_QXL
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
+ select DRM_TTM_HELPER
select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 265bfe9f8016..1d601f57a6ba 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -88,7 +88,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto free_dev;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
if (ret)
goto disable_pci;
@@ -150,15 +150,7 @@ qxl_pci_remove(struct pci_dev *pdev)
drm_dev_put(dev);
}
-static const struct file_operations qxl_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .mmap = qxl_mmap,
-};
+DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
@@ -276,16 +268,8 @@ static struct drm_driver qxl_driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = qxl_gem_prime_pin,
- .gem_prime_unpin = qxl_gem_prime_unpin,
- .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
- .gem_prime_vmap = qxl_gem_prime_vmap,
- .gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
- .gem_free_object_unlocked = qxl_gem_object_free,
- .gem_open_object = qxl_gem_object_open,
- .gem_close_object = qxl_gem_object_close,
.fops = &qxl_fops,
.ioctls = qxl_ioctls,
.irq_handler = qxl_irq_handler,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 9e034c5fa87d..27e45a2d6b52 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,6 +38,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
#include <drm/qxl_drm.h>
@@ -354,7 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
/* qxl image */
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 548dfe6f3b26..ab72dc3476e9 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -54,9 +54,14 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
+ u32 pflag = 0;
unsigned int i;
+ if (pinned)
+ pflag |= TTM_PL_FLAG_NO_EVICT;
+ if (qbo->tbo.base.size <= PAGE_SIZE)
+ pflag |= TTM_PL_FLAG_TOPDOWN;
+
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
@@ -77,6 +82,19 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
}
}
+static const struct drm_gem_object_funcs qxl_object_funcs = {
+ .free = qxl_gem_object_free,
+ .open = qxl_gem_object_open,
+ .close = qxl_gem_object_close,
+ .pin = qxl_gem_prime_pin,
+ .unpin = qxl_gem_prime_unpin,
+ .get_sg_table = qxl_gem_prime_get_sg_table,
+ .vmap = qxl_gem_prime_vmap,
+ .vunmap = qxl_gem_prime_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .print_info = drm_gem_ttm_print_info,
+};
+
int qxl_bo_create(struct qxl_device *qdev,
unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
@@ -100,6 +118,7 @@ int qxl_bo_create(struct qxl_device *qdev,
kfree(bo);
return r;
}
+ bo->tbo.base.funcs = &qxl_object_funcs;
bo->type = domain;
bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
@@ -148,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
void *rptr;
int ret;
struct io_mapping *map;
@@ -160,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
else
goto fallback;
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
+ ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
fallback:
@@ -193,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
-
if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
(bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
-
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
return;
fallback:
qxl_bo_kunmap(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 312216caeea2..2feca734c7b1 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL, true);
+ !no_intr, NULL);
if (ret)
return ret;
@@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -451,18 +450,16 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- glob = bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 9b24514c75aa..16a5e903533d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,47 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static struct vm_operations_struct qxl_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo;
- vm_fault_t ret;
-
- bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL)
- return VM_FAULT_NOPAGE;
- ret = ttm_vm_ops->fault(vmf);
- return ret;
-}
-
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int r;
- struct drm_file *file_priv = filp->private_data;
- struct qxl_device *qdev = file_priv->minor->dev->dev_private;
-
- if (qdev == NULL) {
- DRM_ERROR(
- "filp->private_data->minor->dev->dev_private == NULL\n");
- return -EINVAL;
- }
- DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
- filp->private_data, vma->vm_pgoff);
-
- r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
- if (unlikely(r != 0))
- return r;
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- qxl_ttm_vm_ops = *ttm_vm_ops;
- qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
- }
- vma->vm_ops = &qxl_ttm_vm_ops;
- return 0;
-}
-
static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -151,16 +110,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement;
}
-static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct qxl_bo *qbo = to_qxl_bo(bo);
-
- return drm_vma_node_verify_access(&qbo->tbo.base.vma_node,
- filp->private_data);
-}
-
-static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct qxl_device *qdev = qxl_get_qdev(bdev);
@@ -310,7 +261,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
- .verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
.move_notify = &qxl_bo_move_notify,
@@ -325,6 +275,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_bo_device_init(&qdev->mman.bdev,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
+ qdev->ddev.vma_offset_manager,
false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -368,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- struct qxl_device *rdev = dev->dev_private;
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
#endif
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 62eab82a64f9..40a7e702c2a9 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -221,9 +221,7 @@ int ci_get_temp(struct radeon_device *rdev)
else
actual_temp = temp & 0x1ff;
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
+ return actual_temp * 1000;
}
/* get temperature in millidegrees */
@@ -239,9 +237,7 @@ int kv_get_temp(struct radeon_device *rdev)
else
actual_temp = 0;
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
+ return actual_temp * 1000;
}
/*
@@ -6969,8 +6965,8 @@ static int cik_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* XXX this should actually be a bus address, not an MC address. same on older asics */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -9504,7 +9500,6 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -9546,12 +9541,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -9561,14 +9551,17 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -9586,15 +9579,23 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -9607,26 +9608,45 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -9640,15 +9660,15 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e937cc01910d..033bc466a862 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d59b004f6695..30e32adc1fc6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -68,6 +68,10 @@
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
+#ifdef CONFIG_MMU_NOTIFIER
+#include <linux/mmu_notifier.h>
+#endif
+
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -509,8 +513,9 @@ struct radeon_bo {
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
- struct radeon_mn *mn;
- struct list_head mn_list;
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_interval_notifier notifier;
+#endif
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index b9aea5776d3d..72db2b41e96d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -367,10 +367,10 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
return;
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b684cd719612..c07427d3c199 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -249,11 +249,10 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
- int i;
best_encoder = connector_funcs->best_encoder(connector);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -269,9 +268,8 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
@@ -380,10 +378,9 @@ static int radeon_ddc_get_modes(struct drm_connector *connector)
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -428,14 +425,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
struct drm_encoder *enc;
- int i;
if (conflict == connector)
continue;
radeon_conflict = to_radeon_connector(conflict);
- drm_connector_for_each_possible_encoder(conflict, enc, i) {
+ drm_connector_for_each_possible_encoder(conflict, enc) {
/* if the IDs match */
if (enc == encoder) {
if (conflict->status != connector_status_connected)
@@ -1363,9 +1359,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
- int i;
-
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1443,9 +1437,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1460,7 +1453,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1603,9 +1596,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1624,10 +1616,9 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
- int i;
bool found = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 2994f07fbad9..ee28f5b3785e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
return &radeon_connector->mst_encoder->base;
}
+static int
+radeon_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_connector *master = radeon_connector->mst_port;
+
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ radeon_connector->port);
+}
+
static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
.get_modes = radeon_dp_mst_get_modes,
.mode_valid = radeon_dp_mst_mode_valid,
.best_encoder = radeon_mst_best_encoder,
+ .detect_ctx = radeon_dp_mst_detect,
};
-static enum drm_connector_status
-radeon_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector *master = radeon_connector->mst_port;
-
- return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port);
-}
-
static void
radeon_dp_mst_connector_destroy(struct drm_connector *connector)
{
@@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
};
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 4528f4dc0b2d..fd74e2611185 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -361,7 +361,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return -EPROBE_DEFER;
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "radeondrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "radeondrmfb");
if (ret)
return ret;
@@ -379,10 +379,6 @@ radeon_pci_remove(struct pci_dev *pdev)
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
-#ifdef CONFIG_PPC64
- struct drm_device *ddev = pci_get_drvdata(pdev);
-#endif
-
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
@@ -390,13 +386,14 @@ radeon_pci_shutdown(struct pci_dev *pdev)
radeon_pci_remove(pdev);
#ifdef CONFIG_PPC64
- /* Some adapters need to be suspended before a
+ /*
+ * Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error
* during kexec.
* Make this power specific becauase it breaks
* some non-power boards.
*/
- radeon_suspend_kms(ddev, true, true, false);
+ radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b2b076606f54..67298a0739cb 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index dbab9a3a969b..f93829f08a4d 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -36,131 +36,51 @@
#include "radeon.h"
-struct radeon_mn {
- struct mmu_notifier mn;
-
- /* objects protected by lock */
- struct mutex lock;
- struct rb_root_cached objects;
-};
-
-struct radeon_mn_node {
- struct interval_tree_node it;
- struct list_head bos;
-};
-
/**
- * radeon_mn_invalidate_range_start - callback to notify about mm change
+ * radeon_mn_invalidate - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
+ * @range: the VMA under invalidation
*
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again.
*/
-static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
+static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+ struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier);
struct ttm_operation_ctx ctx = { false, false };
- struct interval_tree_node *it;
- unsigned long end;
- int ret = 0;
-
- /* notification is exclusive, but interval is inclusive */
- end = range->end - 1;
-
- /* TODO we should be able to split locking for interval tree and
- * the tear down.
- */
- if (mmu_notifier_range_blockable(range))
- mutex_lock(&rmn->lock);
- else if (!mutex_trylock(&rmn->lock))
- return -EAGAIN;
-
- it = interval_tree_iter_first(&rmn->objects, range->start, end);
- while (it) {
- struct radeon_mn_node *node;
- struct radeon_bo *bo;
- long r;
-
- if (!mmu_notifier_range_blockable(range)) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- node = container_of(it, struct radeon_mn_node, it);
- it = interval_tree_iter_next(it, range->start, end);
+ long r;
- list_for_each_entry(bo, &node->bos, mn_list) {
+ if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+ return true;
- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
- continue;
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- r = radeon_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
-
- radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- radeon_bo_unreserve(bo);
- }
+ r = radeon_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("(%ld) failed to reserve user bo\n", r);
+ return true;
}
-
-out_unlock:
- mutex_unlock(&rmn->lock);
-
- return ret;
-}
-
-static void radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
-{
- struct mmu_notifier_range range = {
- .mm = mm,
- .start = 0,
- .end = ULONG_MAX,
- .flags = 0,
- .event = MMU_NOTIFY_UNMAP,
- };
-
- radeon_mn_invalidate_range_start(mn, &range);
-}
-
-static struct mmu_notifier *radeon_mn_alloc_notifier(struct mm_struct *mm)
-{
- struct radeon_mn *rmn;
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
- if (!rmn)
- return ERR_PTR(-ENOMEM);
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- mutex_init(&rmn->lock);
- rmn->objects = RB_ROOT_CACHED;
- return &rmn->mn;
-}
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ DRM_ERROR("(%ld) failed to validate user bo\n", r);
-static void radeon_mn_free_notifier(struct mmu_notifier *mn)
-{
- kfree(container_of(mn, struct radeon_mn, mn));
+ radeon_bo_unreserve(bo);
+ return true;
}
-static const struct mmu_notifier_ops radeon_mn_ops = {
- .release = radeon_mn_release,
- .invalidate_range_start = radeon_mn_invalidate_range_start,
- .alloc_notifier = radeon_mn_alloc_notifier,
- .free_notifier = radeon_mn_free_notifier,
+static const struct mmu_interval_notifier_ops radeon_mn_ops = {
+ .invalidate = radeon_mn_invalidate,
};
/**
@@ -174,51 +94,20 @@ static const struct mmu_notifier_ops radeon_mn_ops = {
*/
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
{
- unsigned long end = addr + radeon_bo_size(bo) - 1;
- struct mmu_notifier *mn;
- struct radeon_mn *rmn;
- struct radeon_mn_node *node = NULL;
- struct list_head bos;
- struct interval_tree_node *it;
-
- mn = mmu_notifier_get(&radeon_mn_ops, current->mm);
- if (IS_ERR(mn))
- return PTR_ERR(mn);
- rmn = container_of(mn, struct radeon_mn, mn);
-
- INIT_LIST_HEAD(&bos);
-
- mutex_lock(&rmn->lock);
-
- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
- kfree(node);
- node = container_of(it, struct radeon_mn_node, it);
- interval_tree_remove(&node->it, &rmn->objects);
- addr = min(it->start, addr);
- end = max(it->last, end);
- list_splice(&node->bos, &bos);
- }
-
- if (!node) {
- node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
- if (!node) {
- mutex_unlock(&rmn->lock);
- return -ENOMEM;
- }
- }
-
- bo->mn = rmn;
-
- node->it.start = addr;
- node->it.last = end;
- INIT_LIST_HEAD(&node->bos);
- list_splice(&bos, &node->bos);
- list_add(&bo->mn_list, &node->bos);
-
- interval_tree_insert(&node->it, &rmn->objects);
-
- mutex_unlock(&rmn->lock);
-
+ int ret;
+
+ ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ radeon_bo_size(bo), &radeon_mn_ops);
+ if (ret)
+ return ret;
+
+ /*
+ * FIXME: radeon appears to allow get_user_pages to run during
+ * invalidate_range_start/end, which is not a safe way to read the
+ * PTEs. It should use the mmu_interval_read_begin() scheme around the
+ * get_user_pages to ensure that the PTEs are read properly
+ */
+ mmu_interval_read_begin(&bo->notifier);
return 0;
}
@@ -231,27 +120,8 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
*/
void radeon_mn_unregister(struct radeon_bo *bo)
{
- struct radeon_mn *rmn = bo->mn;
- struct list_head *head;
-
- if (!rmn)
+ if (!bo->notifier.mm)
return;
-
- mutex_lock(&rmn->lock);
- /* save the next list entry for later */
- head = bo->mn_list.next;
-
- list_del(&bo->mn_list);
-
- if (list_empty(head)) {
- struct radeon_mn_node *node;
- node = container_of(head, struct radeon_mn_node, bos);
- interval_tree_remove(&node->it, &rmn->objects);
- kfree(node);
- }
-
- mutex_unlock(&rmn->lock);
-
- mmu_notifier_put(&rmn->mn);
- bo->mn = NULL;
+ mmu_interval_notifier_remove(&bo->notifier);
+ bo->notifier.mm = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2abe1eab471f..140d94cc080d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a05e10724d46..098bc9f40b98 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -794,6 +794,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_bo_device_init(&rdev->mman.bdev,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
+ rdev->ddev->vma_offset_manager,
dma_addressing_limited(&rdev->pdev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 05894d198a79..d7eea75b2c27 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3257,7 +3257,7 @@ static void si_gpu_init(struct radeon_device *rdev)
/* XXX what about 12? */
rdev->config.si.tile_config |= (3 << 0);
break;
- }
+ }
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.si.tile_config |= 0 << 4;
@@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -7087,7 +7087,6 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -7129,12 +7128,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -7144,14 +7138,17 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -7169,15 +7166,23 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -7190,26 +7195,46 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -7223,15 +7248,15 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 9c93eb4fad8b..f266c17b907a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -131,6 +131,35 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
.dpll_mask = BIT(1),
};
+static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(3) | BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A774B1 has one RGB output, one LVDS output and one HDMI
+ * output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+ .dpll_mask = BIT(1),
+};
+
static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -416,6 +445,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
{ .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
{ .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info },
+ { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info },
{ .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 0f00bdfe2366..3cd83a030a04 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -9,6 +9,7 @@
#include <linux/export.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
@@ -84,8 +85,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
goto done;
}
- bridge = devm_drm_panel_bridge_add(rcdu->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto done;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 2dc9caee8767..0d59f390de19 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -585,7 +585,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
vsps[j].crtcs_mask |= BIT(i);
- /* Store the VSP pointer and pipe index in the CRTC. */
+ /*
+ * Store the VSP pointer and pipe index in the CRTC. If the
+ * second cell of the 'vsps' specifier isn't present, default
+ * to 0 to remain compatible with older DT bindings.
+ */
rcdu->crtcs[i].vsp = &rcdu->vsps[j];
rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 3fc7e6899cab..8c6c172bbf2e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -16,6 +16,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -842,8 +843,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds)
return 0;
}
+static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = {
+ .gen = 2,
+ .quirks = RCAR_LVDS_QUIRK_LANES,
+ .pll_setup = rcar_lvds_pll_setup_gen2,
+};
+
+static const struct soc_device_attribute lvds_quirk_matches[] = {
+ {
+ .soc_id = "r8a7790", .revision = "ES1.*",
+ .data = &rcar_lvds_r8a7790es1_info,
+ },
+ { /* sentinel */ }
+};
+
static int rcar_lvds_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *attr;
struct rcar_lvds *lvds;
struct resource *mem;
int ret;
@@ -857,6 +873,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
lvds->dev = &pdev->dev;
lvds->info = of_device_get_match_data(&pdev->dev);
+ attr = soc_device_match(lvds_quirk_matches);
+ if (attr)
+ lvds->info = attr->data;
+
ret = rcar_lvds_parse_dt(lvds);
if (ret < 0)
return ret;
@@ -893,12 +913,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
.pll_setup = rcar_lvds_pll_setup_gen2,
};
-static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = {
- .gen = 2,
- .quirks = RCAR_LVDS_QUIRK_LANES,
- .pll_setup = rcar_lvds_pll_setup_gen2,
-};
-
static const struct rcar_lvds_device_info rcar_lvds_gen3_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_PWD,
@@ -929,8 +943,9 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
- { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
+ { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index d505ea7d5384..eed594bd38d3 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -477,8 +477,8 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
cdn_dp_set_firmware_active(dp, false);
cdn_dp_clk_disable(dp);
dp->active = false;
- dp->link.rate = 0;
- dp->link.num_lanes = 0;
+ dp->max_lanes = 0;
+ dp->max_rate = 0;
if (!dp->connected) {
kfree(dp->edid);
dp->edid = NULL;
@@ -570,7 +570,7 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
struct cdn_dp_port *port = cdn_dp_connected_port(dp);
u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
- if (!port || !dp->link.rate || !dp->link.num_lanes)
+ if (!port || !dp->max_rate || !dp->max_lanes)
return false;
if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
@@ -952,8 +952,8 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* Enabled and connected with a sink, re-train if requested */
} else if (!cdn_dp_check_link_status(dp)) {
- unsigned int rate = dp->link.rate;
- unsigned int lanes = dp->link.num_lanes;
+ unsigned int rate = dp->max_rate;
+ unsigned int lanes = dp->max_lanes;
struct drm_display_mode *mode = &dp->mode;
DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
@@ -966,7 +966,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* If training result is changed, update the video config */
if (mode->clock &&
- (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
+ (rate != dp->max_rate || lanes != dp->max_lanes)) {
ret = cdn_dp_config_video(dp);
if (ret) {
dp->connected = false;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index b85ea89eb60b..83c4586665b4 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -92,9 +92,10 @@ struct cdn_dp_device {
struct reset_control *core_rst;
struct audio_info audio_info;
struct video_info video_info;
- struct drm_dp_link link;
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
+ u8 max_lanes;
+ u8 max_rate;
u8 lanes;
int active_port;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 077c87021908..7361c07cb4a7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -535,8 +535,8 @@ static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
if (ret)
goto err_get_training_status;
- dp->link.rate = drm_dp_bw_code_to_link_rate(status[0]);
- dp->link.num_lanes = status[1];
+ dp->max_rate = drm_dp_bw_code_to_link_rate(status[0]);
+ dp->max_lanes = status[1];
err_get_training_status:
if (ret)
@@ -560,8 +560,8 @@ int cdn_dp_train_link(struct cdn_dp_device *dp)
return ret;
}
- DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate,
- dp->link.num_lanes);
+ DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->max_rate,
+ dp->max_lanes);
return ret;
}
@@ -639,7 +639,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
(video->color_depth * 2) : (video->color_depth * 3);
- link_rate = dp->link.rate / 1000;
+ link_rate = dp->max_rate / 1000;
ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
if (ret)
@@ -659,14 +659,13 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
do {
tu_size_reg += 2;
symbol = tu_size_reg * mode->clock * bit_per_pix;
- do_div(symbol, dp->link.num_lanes * link_rate * 8);
+ do_div(symbol, dp->max_lanes * link_rate * 8);
rem = do_div(symbol, 1000);
if (tu_size_reg > 64) {
ret = -EINVAL;
DRM_DEV_ERROR(dp->dev,
"tu error, clk:%d, lanes:%d, rate:%d\n",
- mode->clock, dp->link.num_lanes,
- link_rate);
+ mode->clock, dp->max_lanes, link_rate);
goto err_config_video;
}
} while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
@@ -680,7 +679,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
/* set the FIFO Buffer size */
val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
- val /= (dp->link.num_lanes * link_rate);
+ val /= (dp->max_lanes * link_rate);
val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
val += 2;
ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
@@ -833,7 +832,7 @@ static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
u32 val;
if (audio->channels == 2) {
- if (dp->link.num_lanes == 1)
+ if (dp->max_lanes == 1)
sub_pckt_num = 2;
else
sub_pckt_num = 4;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 906891b03a38..7f56d8c3491d 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -450,6 +450,7 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
.phy_ops = &rk3328_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
.phy_force_vendor = true,
+ .use_drm_infoframe = true,
};
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
@@ -464,6 +465,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3399_chip_data,
+ .use_drm_infoframe = true,
};
static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 85fc5f01f761..cdb401f4283d 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -743,7 +743,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct rk3066_hdmi *hdmi;
- struct resource *iores;
int irq;
int ret;
@@ -753,12 +752,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drm_dev = drm;
-
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENXIO;
-
- hdmi->regs = devm_ioremap_resource(dev, iores);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 291e89b4045f..7582d0e6a60a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -294,7 +294,7 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
kfree(rk_obj);
}
-struct rockchip_gem_object *
+static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
struct rockchip_gem_object *rk_obj;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 613404f86668..d04b3492bdac 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -139,6 +139,7 @@ struct vop {
uint32_t *regsbak;
void __iomem *regs;
+ void __iomem *lut_regs;
/* physical map length of vop register */
uint32_t len;
@@ -1040,14 +1041,118 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct vop *vop = to_vop(crtc);
+ unsigned long rate;
- adjusted_mode->clock =
- DIV_ROUND_UP(clk_round_rate(vop->dclk,
- adjusted_mode->clock * 1000), 1000);
+ /*
+ * Clock craziness.
+ *
+ * Key points:
+ *
+ * - DRM works in in kHz.
+ * - Clock framework works in Hz.
+ * - Rockchip's clock driver picks the clock rate that is the
+ * same _OR LOWER_ than the one requested.
+ *
+ * Action plan:
+ *
+ * 1. When DRM gives us a mode, we should add 999 Hz to it. That way
+ * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
+ * make 60000 kHz then the clock framework will actually give us
+ * the right clock.
+ *
+ * NOTE: if the PLL (maybe through a divider) could actually make
+ * a clock rate 999 Hz higher instead of the one we want then this
+ * could be a problem. Unfortunately there's not much we can do
+ * since it's baked into DRM to use kHz. It shouldn't matter in
+ * practice since Rockchip PLLs are controlled by tables and
+ * even if there is a divider in the middle I wouldn't expect PLL
+ * rates in the table that are just a few kHz different.
+ *
+ * 2. Get the clock framework to round the rate for us to tell us
+ * what it will actually make.
+ *
+ * 3. Store the rounded up rate so that we don't need to worry about
+ * this in the actual clk_set_rate().
+ */
+ rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+ adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
return true;
}
+static bool vop_dsp_lut_is_enabled(struct vop *vop)
+{
+ return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en);
+}
+
+static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc)
+{
+ struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+ unsigned int i;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ u32 word;
+
+ word = (drm_color_lut_extract(lut[i].red, 10) << 20) |
+ (drm_color_lut_extract(lut[i].green, 10) << 10) |
+ drm_color_lut_extract(lut[i].blue, 10);
+ writel(word, vop->lut_regs + i * 4);
+ }
+}
+
+static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_crtc_state *state = crtc->state;
+ unsigned int idle;
+ int ret;
+
+ if (!vop->lut_regs)
+ return;
+ /*
+ * To disable gamma (gamma_lut is null) or to write
+ * an update to the LUT, clear dsp_lut_en.
+ */
+ spin_lock(&vop->reg_lock);
+ VOP_REG_SET(vop, common, dsp_lut_en, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ /*
+ * In order to write the LUT to the internal memory,
+ * we need to first make sure the dsp_lut_en bit is cleared.
+ */
+ ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
+ idle, !idle, 5, 30 * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
+ return;
+ }
+
+ if (!state->gamma_lut)
+ return;
+
+ spin_lock(&vop->reg_lock);
+ vop_crtc_write_gamma_lut(vop, crtc);
+ VOP_REG_SET(vop, common, dsp_lut_en, 1);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+}
+
+static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ /*
+ * Only update GAMMA if the 'active' flag is not changed,
+ * otherwise it's updated by .atomic_enable.
+ */
+ if (crtc->state->color_mgmt_changed &&
+ !crtc->state->active_changed)
+ vop_crtc_gamma_set(vop, crtc, old_crtc_state);
+}
+
static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -1075,6 +1180,14 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
+ /*
+ * If we have a GAMMA LUT in the state, then let's make sure
+ * it's updated. We might be coming out of suspend,
+ * which means the LUT internal memory needs to be re-written.
+ */
+ if (crtc->state->gamma_lut)
+ vop_crtc_gamma_set(vop, crtc, old_state);
+
mutex_lock(&vop->vop_lock);
WARN_ON(vop->event);
@@ -1085,9 +1198,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
return;
}
-
- pin_pol = BIT(DCLK_INVERT);
- pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
+ pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
BIT(HSYNC_POSITIVE) : 0;
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
BIT(VSYNC_POSITIVE) : 0;
@@ -1096,25 +1207,29 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
- VOP_REG_SET(vop, output, rgb_en, 1);
+ VOP_REG_SET(vop, output, rgb_dclk_pol, 1);
VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, rgb_en, 1);
break;
case DRM_MODE_CONNECTOR_eDP:
+ VOP_REG_SET(vop, output, edp_dclk_pol, 1);
VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, edp_en, 1);
break;
case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_REG_SET(vop, output, hdmi_dclk_pol, 1);
VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, hdmi_en, 1);
break;
case DRM_MODE_CONNECTOR_DSI:
+ VOP_REG_SET(vop, output, mipi_dclk_pol, 1);
VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, mipi_en, 1);
VOP_REG_SET(vop, output, mipi_dual_channel_en,
!!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- pin_pol &= ~BIT(DCLK_INVERT);
+ VOP_REG_SET(vop, output, dp_dclk_pol, 0);
VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, dp_en, 1);
break;
@@ -1191,6 +1306,26 @@ static void vop_wait_for_irq_handler(struct vop *vop)
synchronize_irq(vop->irq);
}
+static int vop_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ if (vop->lut_regs && crtc_state->color_mgmt_changed &&
+ crtc_state->gamma_lut) {
+ unsigned int len;
+
+ len = drm_color_lut_size(crtc_state->gamma_lut);
+ if (len != crtc->gamma_size) {
+ DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
+ len, crtc->gamma_size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -1243,6 +1378,8 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
.mode_fixup = vop_crtc_mode_fixup,
+ .atomic_check = vop_crtc_atomic_check,
+ .atomic_begin = vop_crtc_atomic_begin,
.atomic_flush = vop_crtc_atomic_flush,
.atomic_enable = vop_crtc_atomic_enable,
.atomic_disable = vop_crtc_atomic_disable,
@@ -1361,6 +1498,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
.verify_crc_source = vop_crtc_verify_crc_source,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
@@ -1518,6 +1656,10 @@ static int vop_create_crtc(struct vop *vop)
goto err_cleanup_planes;
drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
+ if (vop->lut_regs) {
+ drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size);
+ }
/*
* Create drm_planes for overlay windows with possible_crtcs restricted
@@ -1822,6 +1964,17 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vop->regs))
return PTR_ERR(vop->regs);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ if (!vop_data->lut_size) {
+ DRM_DEV_ERROR(dev, "no gamma LUT size defined\n");
+ return -EINVAL;
+ }
+ vop->lut_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vop->lut_regs))
+ return PTR_ERR(vop->lut_regs);
+ }
+
vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
if (!vop->regsbak)
return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 2149a889c29d..0b3d18c457b2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -46,10 +46,15 @@ struct vop_modeset {
struct vop_output {
struct vop_reg pin_pol;
struct vop_reg dp_pin_pol;
+ struct vop_reg dp_dclk_pol;
struct vop_reg edp_pin_pol;
+ struct vop_reg edp_dclk_pol;
struct vop_reg hdmi_pin_pol;
+ struct vop_reg hdmi_dclk_pol;
struct vop_reg mipi_pin_pol;
+ struct vop_reg mipi_dclk_pol;
struct vop_reg rgb_pin_pol;
+ struct vop_reg rgb_dclk_pol;
struct vop_reg dp_en;
struct vop_reg edp_en;
struct vop_reg hdmi_en;
@@ -67,6 +72,7 @@ struct vop_common {
struct vop_reg dither_down_mode;
struct vop_reg dither_down_en;
struct vop_reg dither_up;
+ struct vop_reg dsp_lut_en;
struct vop_reg gate_en;
struct vop_reg mmu_en;
struct vop_reg out_mode;
@@ -170,6 +176,7 @@ struct vop_data {
const struct vop_win_yuv2yuv_data *win_yuv2yuv;
const struct vop_win_data *win;
unsigned int win_size;
+ unsigned int lut_size;
#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
#define VOP_FEATURE_INTERNAL_RGB BIT(1)
@@ -294,8 +301,7 @@ enum dither_down_mode_sel {
enum vop_pol {
HSYNC_POSITIVE = 0,
VSYNC_POSITIVE = 1,
- DEN_NEGATIVE = 2,
- DCLK_INVERT = 3
+ DEN_NEGATIVE = 2
};
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 64aefa856896..8a4c9af0ba73 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -16,6 +16,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 89e0bb0fe0ab..ae730275a34f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -9,6 +9,7 @@
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -135,7 +136,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs);
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_LVDS);
if (IS_ERR(bridge))
return ERR_CAST(bridge);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index d1494be14471..7a9d979c8d5d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -16,6 +16,7 @@
#include "rockchip_drm_vop.h"
#include "rockchip_vop_reg.h"
+#include "rockchip_drm_drv.h"
#define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \
{ \
@@ -214,9 +215,11 @@ static const struct vop_modeset px30_modeset = {
};
static const struct vop_output px30_output = {
- .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 1),
- .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 25),
+ .rgb_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 1),
+ .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 2),
.rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0),
+ .mipi_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 25),
+ .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 26),
.mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24),
};
@@ -598,6 +601,7 @@ static const struct vop_common rk3288_common = {
.dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
+ .dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0),
.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
@@ -646,6 +650,7 @@ static const struct vop_data rk3288_vop = {
.output = &rk3288_output,
.win = rk3288_vop_win_data,
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
+ .lut_size = 1024,
};
static const int rk3368_vop_intrs[] = {
@@ -717,10 +722,14 @@ static const struct vop_win_data rk3368_vop_win_data[] = {
};
static const struct vop_output rk3368_output = {
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
@@ -764,11 +773,16 @@ static const struct vop_data rk3366_vop = {
};
static const struct vop_output rk3399_output = {
- .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
@@ -872,14 +886,18 @@ static const struct vop_modeset rk3328_modeset = {
};
static const struct vop_output rk3328_output = {
+ .rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31),
.rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15),
- .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 28),
+ .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28),
};
static const struct vop_misc rk3328_misc = {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 1a5153197fe9..461a7a8129f4 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -23,6 +23,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/completion.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
@@ -68,6 +69,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
if (!entity->rq_list)
return -ENOMEM;
+ init_completion(&entity->entity_idle);
+
for (i = 0; i < num_rq_list; ++i)
entity->rq_list[i] = rq_list[i];
@@ -286,11 +289,12 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
*/
if (spsc_queue_count(&entity->job_queue)) {
if (sched) {
- /* Park the kernel for a moment to make sure it isn't processing
- * our enity.
+ /*
+ * Wait for thread to idle to make sure it isn't processing
+ * this entity.
*/
- kthread_park(sched->thread);
- kthread_unpark(sched->thread);
+ wait_for_completion(&entity->entity_idle);
+
}
if (entity->dependency) {
dma_fence_remove_callback(entity->dependency,
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 54977408f574..8b45c3a1b84e 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -128,13 +128,13 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
dma_fence_put(&fence->scheduled);
}
-const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_scheduled,
};
-const struct dma_fence_ops drm_sched_fence_ops_finished = {
+static const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_finished,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index f39b97ed4ade..3c57e84222ca 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -47,6 +47,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/completion.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
@@ -134,6 +135,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
list_for_each_entry_continue(entity, &rq->entities, list) {
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
@@ -144,6 +146,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
@@ -496,8 +499,10 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
fence = sched->ops->run_job(s_job);
if (IS_ERR_OR_NULL(fence)) {
+ if (IS_ERR(fence))
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+
s_job->s_fence->parent = NULL;
- dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
} else {
s_job->s_fence->parent = fence;
}
@@ -632,43 +637,45 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
- * drm_sched_cleanup_jobs - destroy finished jobs
+ * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
- * Remove all finished jobs from the mirror list and destroy them.
+ * Returns the next finished job from the mirror list (if there is one)
+ * ready for it to be destroyed.
*/
-static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
+static struct drm_sched_job *
+drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
+ struct drm_sched_job *job;
unsigned long flags;
- /* Don't destroy jobs while the timeout worker is running */
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !cancel_delayed_work(&sched->work_tdr))
- return;
-
+ /*
+ * Don't destroy jobs while the timeout worker is running OR thread
+ * is being parked and hence assumed to not touch ring_mirror_list
+ */
+ if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !cancel_delayed_work(&sched->work_tdr)) ||
+ __kthread_should_park(sched->thread))
+ return NULL;
- while (!list_empty(&sched->ring_mirror_list)) {
- struct drm_sched_job *job;
+ spin_lock_irqsave(&sched->job_list_lock, flags);
- job = list_first_entry(&sched->ring_mirror_list,
+ job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
- if (!dma_fence_is_signaled(&job->s_fence->finished))
- break;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from ring_mirror_list */
list_del_init(&job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- sched->ops->free_job(job);
+ } else {
+ job = NULL;
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
}
- /* queue timeout for next job */
- spin_lock_irqsave(&sched->job_list_lock, flags);
- drm_sched_start_timeout(sched);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ return job;
}
/**
@@ -708,17 +715,27 @@ static int drm_sched_main(void *param)
struct drm_sched_fence *s_fence;
struct drm_sched_job *sched_job;
struct dma_fence *fence;
+ struct drm_sched_job *cleanup_job = NULL;
wait_event_interruptible(sched->wake_up_worker,
- (drm_sched_cleanup_jobs(sched),
+ (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
- kthread_should_stop()));
+ kthread_should_stop());
+
+ if (cleanup_job) {
+ sched->ops->free_job(cleanup_job);
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
+ }
if (!entity)
continue;
sched_job = drm_sched_entity_pop_job(entity);
+
+ complete(&entity->entity_idle);
+
if (!sched_job)
continue;
@@ -741,8 +758,9 @@ static int drm_sched_main(void *param)
r);
dma_fence_put(fence);
} else {
+ if (IS_ERR(fence))
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
- dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
drm_sched_process_job(NULL, &sched_job->cb);
}
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index aae88f8a016c..d2137342b371 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
test-drm_format.o test-drm_framebuffer.o \
- test-drm_damage_helper.o
+ test-drm_damage_helper.o test-drm_dp_mst_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
index 464753746013..1898de0b4a4d 100644
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -32,3 +32,5 @@ selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
+selftest(dp_mst_calc_pbn_mode, igt_dp_mst_calc_pbn_mode)
+selftest(dp_mst_sideband_msg_req_decode, igt_dp_mst_sideband_msg_req_decode)
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
new file mode 100644
index 000000000000..af2b2de65316
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for for the DRM DP MST helpers
+ */
+
+#define PREFIX_STR "[drm_dp_mst_helper]"
+
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_print.h>
+
+#include "../drm_dp_mst_topology_internal.h"
+#include "test-drm_modeset_common.h"
+
+int igt_dp_mst_calc_pbn_mode(void *ignored)
+{
+ int pbn, i;
+ const struct {
+ int rate;
+ int bpp;
+ int expected;
+ } test_params[] = {
+ { 154000, 30, 689 },
+ { 234000, 30, 1047 },
+ { 297000, 24, 1063 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(test_params); i++) {
+ pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
+ test_params[i].bpp);
+ FAIL(pbn != test_params[i].expected,
+ "Expected PBN %d for clock %d bpp %d, got %d\n",
+ test_params[i].expected, test_params[i].rate,
+ test_params[i].bpp, pbn);
+ }
+
+ return 0;
+}
+
+static bool
+sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
+ const struct drm_dp_sideband_msg_req_body *out)
+{
+ const struct drm_dp_remote_i2c_read_tx *txin, *txout;
+ int i;
+
+ if (in->req_type != out->req_type)
+ return false;
+
+ switch (in->req_type) {
+ /*
+ * Compare struct members manually for request types which can't be
+ * compared simply using memcmp(). This is because said request types
+ * contain pointers to other allocated structs
+ */
+ case DP_REMOTE_I2C_READ:
+#define IN in->u.i2c_read
+#define OUT out->u.i2c_read
+ if (IN.num_bytes_read != OUT.num_bytes_read ||
+ IN.num_transactions != OUT.num_transactions ||
+ IN.port_number != OUT.port_number ||
+ IN.read_i2c_device_id != OUT.read_i2c_device_id)
+ return false;
+
+ for (i = 0; i < IN.num_transactions; i++) {
+ txin = &IN.transactions[i];
+ txout = &OUT.transactions[i];
+
+ if (txin->i2c_dev_id != txout->i2c_dev_id ||
+ txin->no_stop_bit != txout->no_stop_bit ||
+ txin->num_bytes != txout->num_bytes ||
+ txin->i2c_transaction_delay !=
+ txout->i2c_transaction_delay)
+ return false;
+
+ if (memcmp(txin->bytes, txout->bytes,
+ txin->num_bytes) != 0)
+ return false;
+ }
+ break;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_DPCD_WRITE:
+#define IN in->u.dpcd_write
+#define OUT out->u.dpcd_write
+ if (IN.dpcd_address != OUT.dpcd_address ||
+ IN.num_bytes != OUT.num_bytes ||
+ IN.port_number != OUT.port_number)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_I2C_WRITE:
+#define IN in->u.i2c_write
+#define OUT out->u.i2c_write
+ if (IN.port_number != OUT.port_number ||
+ IN.write_i2c_device_id != OUT.write_i2c_device_id ||
+ IN.num_bytes != OUT.num_bytes)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ default:
+ return memcmp(in, out, sizeof(*in)) == 0;
+ }
+
+ return true;
+}
+
+static bool
+sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
+{
+ struct drm_dp_sideband_msg_req_body out = {0};
+ struct drm_printer p = drm_err_printer(PREFIX_STR);
+ struct drm_dp_sideband_msg_tx txmsg;
+ int i, ret;
+
+ drm_dp_encode_sideband_req(in, &txmsg);
+ ret = drm_dp_decode_sideband_req(&txmsg, &out);
+ if (ret < 0) {
+ drm_printf(&p, "Failed to decode sideband request: %d\n",
+ ret);
+ return false;
+ }
+
+ if (!sideband_msg_req_equal(in, &out)) {
+ drm_printf(&p, "Encode/decode failed, expected:\n");
+ drm_dp_dump_sideband_msg_req_body(in, 1, &p);
+ drm_printf(&p, "Got:\n");
+ drm_dp_dump_sideband_msg_req_body(&out, 1, &p);
+ return false;
+ }
+
+ switch (in->req_type) {
+ case DP_REMOTE_DPCD_WRITE:
+ kfree(out.u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ for (i = 0; i < out.u.i2c_read.num_transactions; i++)
+ kfree(out.u.i2c_read.transactions[i].bytes);
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ kfree(out.u.i2c_write.bytes);
+ break;
+ }
+
+ /* Clear everything but the req_type for the input */
+ memset(&in->u, 0, sizeof(in->u));
+
+ return true;
+}
+
+int igt_dp_mst_sideband_msg_req_decode(void *unused)
+{
+ struct drm_dp_sideband_msg_req_body in = { 0 };
+ u8 data[] = { 0xff, 0x0, 0xdd };
+ int i;
+
+#define DO_TEST() FAIL_ON(!sideband_msg_req_encode_decode(&in))
+
+ in.req_type = DP_ENUM_PATH_RESOURCES;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_POWER_UP_PHY;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_POWER_DOWN_PHY;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_ALLOCATE_PAYLOAD;
+ in.u.allocate_payload.number_sdp_streams = 3;
+ for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++)
+ in.u.allocate_payload.sdp_stream_sink[i] = i + 1;
+ DO_TEST();
+ in.u.allocate_payload.port_number = 0xf;
+ DO_TEST();
+ in.u.allocate_payload.vcpi = 0x7f;
+ DO_TEST();
+ in.u.allocate_payload.pbn = U16_MAX;
+ DO_TEST();
+
+ in.req_type = DP_QUERY_PAYLOAD;
+ in.u.query_payload.port_number = 0xf;
+ DO_TEST();
+ in.u.query_payload.vcpi = 0x7f;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_DPCD_READ;
+ in.u.dpcd_read.port_number = 0xf;
+ DO_TEST();
+ in.u.dpcd_read.dpcd_address = 0xfedcb;
+ DO_TEST();
+ in.u.dpcd_read.num_bytes = U8_MAX;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_DPCD_WRITE;
+ in.u.dpcd_write.port_number = 0xf;
+ DO_TEST();
+ in.u.dpcd_write.dpcd_address = 0xfedcb;
+ DO_TEST();
+ in.u.dpcd_write.num_bytes = ARRAY_SIZE(data);
+ in.u.dpcd_write.bytes = data;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_I2C_READ;
+ in.u.i2c_read.port_number = 0xf;
+ DO_TEST();
+ in.u.i2c_read.read_i2c_device_id = 0x7f;
+ DO_TEST();
+ in.u.i2c_read.num_transactions = 3;
+ in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3;
+ for (i = 0; i < in.u.i2c_read.num_transactions; i++) {
+ in.u.i2c_read.transactions[i].bytes = data;
+ in.u.i2c_read.transactions[i].num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i;
+ in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i;
+ }
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_I2C_WRITE;
+ in.u.i2c_write.port_number = 0xf;
+ DO_TEST();
+ in.u.i2c_write.write_i2c_device_id = 0x7f;
+ DO_TEST();
+ in.u.i2c_write.num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_write.bytes = data;
+ DO_TEST();
+
+#undef DO_TEST
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
index 74d5561a862b..2d29ea6f92e2 100644
--- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -126,7 +126,7 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
.handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 },
}
},
-{ .buffer_created = 0, .name = "NV12 Invalid modifier/misssing DRM_MODE_FB_MODIFIERS flag",
+{ .buffer_created = 0, .name = "NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 388f9844f4ba..9aabe82dcd3a 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -854,7 +854,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (start > 0) {
node = __drm_mm_interval_first(mm, 0, start - 1);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node before start: node=%llx+%llu, start=%llx\n",
node->start, node->size, start);
return false;
@@ -863,7 +863,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (end < U64_MAX) {
node = __drm_mm_interval_first(mm, end, U64_MAX);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node after end: node=%llx+%llu, end=%llx\n",
node->start, node->size, end);
return false;
@@ -1156,12 +1156,12 @@ static void show_holes(const struct drm_mm *mm, int count)
struct drm_mm_node *next = list_next_entry(hole, node_list);
const char *node1 = NULL, *node2 = NULL;
- if (hole->allocated)
+ if (drm_mm_node_allocated(hole))
node1 = kasprintf(GFP_KERNEL,
"[%llx + %lld, color=%ld], ",
hole->start, hole->size, hole->color);
- if (next->allocated)
+ if (drm_mm_node_allocated(next))
node2 = kasprintf(GFP_KERNEL,
", [%llx + %lld, color=%ld]",
next->start, next->size, next->color);
@@ -1900,18 +1900,18 @@ static void separate_adjacent_colors(const struct drm_mm_node *node,
u64 *start,
u64 *end)
{
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
++*start;
node = list_next_entry(node, node_list);
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
--*end;
}
static bool colors_abutt(const struct drm_mm_node *node)
{
if (!drm_mm_hole_follows(node) &&
- list_next_entry(node, node_list)->allocated) {
+ drm_mm_node_allocated(list_next_entry(node, node_list))) {
pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
node->color, node->start, node->size,
list_next_entry(node, node_list)->color,
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
index 8c76f09c12d1..0fcb8bbc6a1b 100644
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -39,5 +39,7 @@ int igt_damage_iter_damage_one_intersect(void *ignored);
int igt_damage_iter_damage_one_outside(void *ignored);
int igt_damage_iter_damage_src_moved(void *ignored);
int igt_damage_iter_damage_not_visible(void *ignored);
+int igt_dp_mst_calc_pbn_mode(void *ignored);
+int igt_dp_mst_sideband_msg_req_decode(void *ignored);
#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 0bf7c332cf0b..ea64c1dcaf63 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -47,7 +47,7 @@ struct dma_pixmap {
void *base;
};
-/**
+/*
* STI Cursor structure
*
* @sti_plane: sti_plane structure
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index e55870190bf5..68289b0b063a 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
@@ -65,7 +66,7 @@ static struct dvo_config rgb_24bit_de_cfg = {
.awg_fwgen_fct = sti_awg_generate_code_data_enable_mode,
};
-/**
+/*
* STI digital video output structure
*
* @dev: driver device
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 8e926cd6a1c8..11595c748844 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -103,7 +103,7 @@ struct sti_gdp_node_list {
dma_addr_t btm_field_paddr;
};
-/**
+/*
* STI GDP structure
*
* @sti_plane: sti_plane structure
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 94e404f13234..8f7bf33815fd 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -12,6 +12,7 @@
#include <linux/seq_file.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
@@ -230,7 +231,7 @@ static const struct sti_hda_video_config hda_supported_modes[] = {
AWGi_720x480p_60, NN_720x480p_60, VID_ED}
};
-/**
+/*
* STI hd analog structure
*
* @dev: driver device
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 9862c322f0c4..814560ead4e1 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -9,11 +9,12 @@
#include <linux/debugfs.h>
#include <linux/hdmi.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -333,7 +334,6 @@ static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
* Helper to concatenate infoframe in 32 bits word
*
* @ptr: pointer on the hdmi internal structure
- * @data: infoframe to write
* @size: size to write
*/
static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
@@ -543,13 +543,14 @@ static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
return 0;
}
+#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
+
/**
* Software reset of the hdmi subsystem
*
* @hdmi: pointer on the hdmi internal structure
*
*/
-#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
static void hdmi_swreset(struct sti_hdmi *hdmi)
{
u32 val;
@@ -1256,6 +1257,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct sti_hdmi_connector *connector;
+ struct cec_connector_info conn_info;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
int err;
@@ -1318,6 +1320,14 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ cec_fill_conn_info_from_drm(&conn_info, drm_connector);
+ hdmi->notifier = cec_notifier_conn_register(&hdmi->dev, NULL,
+ &conn_info);
+ if (!hdmi->notifier) {
+ hdmi->drm_connector = NULL;
+ return -ENOMEM;
+ }
+
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
@@ -1331,6 +1341,9 @@ err_sysfs:
static void sti_hdmi_unbind(struct device *dev,
struct device *master, void *data)
{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+
+ cec_notifier_conn_unregister(hdmi->notifier);
}
static const struct component_ops sti_hdmi_ops = {
@@ -1436,10 +1449,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
goto release_adapter;
}
- hdmi->notifier = cec_notifier_get(&pdev->dev);
- if (!hdmi->notifier)
- goto release_adapter;
-
hdmi->reset = devm_reset_control_get(dev, "hdmi");
/* Take hdmi out of reset */
if (!IS_ERR(hdmi->reset))
@@ -1459,14 +1468,11 @@ static int sti_hdmi_remove(struct platform_device *pdev)
{
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
- cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
-
i2c_put_adapter(hdmi->ddc_adapt);
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
- cec_notifier_put(hdmi->notifier);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index aba79c172512..5767e93dd1cd 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -157,9 +157,9 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
*
* @tvout: tvout structure
* @reg: register to set
- * @cr_r:
- * @y_g:
- * @cb_b:
+ * @cr_r: red chroma or red order
+ * @y_g: y or green order
+ * @cb_b: blue chroma or blue order
*/
static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
u32 cr_r, u32 y_g, u32 cb_b)
@@ -214,7 +214,7 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
* @tvout: tvout structure
* @reg: register to set
* @main_path: main or auxiliary path
- * @sel_input: selected_input (main/aux + conv)
+ * @video_out: selected_input (main/aux + conv)
*/
static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
int reg,
@@ -251,7 +251,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
*
* @tvout: tvout structure
* @reg: register to set
- * @in_vid_signed: used video input format
+ * @in_vid_fmt: used video input format
*/
static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
int reg, u32 in_vid_fmt)
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index ef4009f11396..0b17ac8a3faa 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -121,7 +121,7 @@ struct sti_vtg_sync_params {
u32 vsync_off_bot;
};
-/**
+/*
* STI VTG structure
*
* @regs: register mapping
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index a03a642c147c..514efefb0016 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -260,8 +260,11 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
/* Compute requested pll out */
bpp = mipi_dsi_pixel_format_to_bpp(format);
pll_out_khz = mode->clock * bpp / lanes;
+
/* Add 20% to pll out to be higher than pixel bw (burst mode only) */
- pll_out_khz = (pll_out_khz * 12) / 10;
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ pll_out_khz = (pll_out_khz * 12) / 10;
+
if (pll_out_khz > dsi->lane_max_kbps) {
pll_out_khz = dsi->lane_max_kbps;
DRM_WARN("Warning max phy mbps is used\n");
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 3ab4fbf8eb0d..5b51298921cf 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -1040,6 +1041,36 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static void ltdc_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Set to sleep state the pinctrl whatever type of encoder */
+ pinctrl_pm_select_sleep_state(ddev->dev);
+}
+
+static void ltdc_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /*
+ * Set to default state the pinctrl only with DPI type.
+ * Others types like DSI, don't need pinctrl due to
+ * internal bridge (the signals do not come out of the chipset).
+ */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPI)
+ pinctrl_pm_select_default_state(ddev->dev);
+}
+
+static const struct drm_encoder_helper_funcs ltdc_encoder_helper_funcs = {
+ .disable = ltdc_encoder_disable,
+ .enable = ltdc_encoder_enable,
+};
+
static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
{
struct drm_encoder *encoder;
@@ -1055,6 +1086,8 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs,
DRM_MODE_ENCODER_DPI, NULL);
+ drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
+
ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
drm_encoder_cleanup(encoder);
@@ -1236,8 +1269,8 @@ int ltdc_load(struct drm_device *ddev)
/* Add endpoints panels or bridges if any */
for (i = 0; i < MAX_ENDPOINTS; i++) {
if (panel[i]) {
- bridge[i] = drm_panel_bridge_add(panel[i],
- DRM_MODE_CONNECTOR_DPI);
+ bridge[i] = drm_panel_bridge_add_typed(panel[i],
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge[i])) {
DRM_ERROR("panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge[i]);
@@ -1280,6 +1313,8 @@ int ltdc_load(struct drm_device *ddev)
clk_disable_unprepare(ldev->pixel_clk);
+ pinctrl_pm_select_sleep_state(ddev->dev);
+
pm_runtime_enable(ddev->dev);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index eb8071a4d6d0..a7c4654445c7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -490,6 +490,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
+ struct cec_connector_info conn_info;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
struct resource *res;
@@ -629,8 +630,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
hdmi->cec_adap = cec_pin_allocate_adapter(&sun4i_hdmi_cec_pin_ops,
- hdmi, "sun4i", CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC);
+ hdmi, "sun4i", CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0)
goto err_cleanup_connector;
@@ -649,6 +649,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
"Couldn't initialise the HDMI connector\n");
goto err_cleanup_connector;
}
+ cec_fill_conn_info_from_drm(&conn_info, &hdmi->connector);
+ cec_s_conn_info(hdmi->cec_adap, &conn_info);
/* There is no HPD interrupt, so we need to poll the controller */
hdmi->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 7fbf425acb55..25ab2ef6d545 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index aac56983f208..e74b9eddca01 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index b89439ed210d..42651d737c55 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -16,6 +16,7 @@
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 1636344ba9ec..c958ca9bae63 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -365,8 +366,7 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
- u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
- u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+ u16 delay = mode->vtotal - (mode->vsync_start - mode->vdisplay) + 1;
if (delay > mode->vtotal)
delay = delay % mode->vtotal;
@@ -437,9 +437,9 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
- } else if ((mode->hsync_end - mode->hdisplay) > 20) {
+ } else if ((mode->hsync_start - mode->hdisplay) > 20) {
/* Maaaaaagic */
- u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
+ u16 drq = (mode->hsync_start - mode->hdisplay) - 20;
drq *= mipi_dsi_pixel_format_to_bpp(device->format);
drq /= 32;
@@ -569,11 +569,12 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
- * The frontporch is set using a blanking packet (4
- * bytes + payload + 2 bytes). Its minimal size is
- * therefore 6 bytes
+ * The frontporch is set using a sync event (4 bytes)
+ * and two blanking packets (each one is 4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore
+ * 16 bytes
*/
-#define HFP_PACKET_OVERHEAD 6
+#define HFP_PACKET_OVERHEAD 16
hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
@@ -831,8 +832,8 @@ static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
u32 pkt = msg->type;
if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
- pkt |= ((msg->tx_len + 1) & 0xffff) << 8;
- pkt |= (((msg->tx_len + 1) >> 8) & 0xffff) << 16;
+ pkt |= ((msg->tx_len) & 0xffff) << 8;
+ pkt |= (((msg->tx_len) >> 8) & 0xffff) << 16;
} else {
pkt |= (((u8 *)msg->tx_buf)[0] << 8);
if (msg->tx_len > 1)
@@ -1100,6 +1101,12 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
+ dsi->regulator = devm_regulator_get(dev, "vcc-dsi");
+ if (IS_ERR(dsi->regulator)) {
+ dev_err(dev, "Couldn't get VCC-DSI supply\n");
+ return PTR_ERR(dsi->regulator);
+ }
+
dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base,
&sun6i_dsi_regmap_config);
if (IS_ERR(dsi->regs)) {
@@ -1173,6 +1180,13 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ err = regulator_enable(dsi->regulator);
+ if (err) {
+ dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
+ return err;
+ }
reset_control_deassert(dsi->reset);
clk_prepare_enable(dsi->mod_clk);
@@ -1205,6 +1219,7 @@ static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
clk_disable_unprepare(dsi->mod_clk);
reset_control_assert(dsi->reset);
+ regulator_disable(dsi->regulator);
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index 5c3ad5be0690..3f4846f581ef 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -23,6 +23,7 @@ struct sun6i_dsi {
struct clk *bus_clk;
struct clk *mod_clk;
struct regmap *regs;
+ struct regulator *regulator;
struct reset_control *reset;
struct phy *dphy;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index a44dca4b0219..e8a317d5ba19 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -226,6 +226,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
sun8i_hdmi_phy_init(hdmi->phy);
plat_data->mode_valid = hdmi->quirks->mode_valid;
+ plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
platform_set_drvdata(pdev, hdmi);
@@ -300,6 +301,7 @@ static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_h6,
+ .use_drm_infoframe = true,
};
static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d707c9171824..8e64945167e9 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -179,6 +179,7 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
const struct drm_display_mode *mode);
unsigned int set_rate : 1;
+ unsigned int use_drm_infoframe : 1;
};
struct sun8i_dw_hdmi {
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 1d1269fde3c1..5043dcaf1cf9 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -9,7 +9,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI
select DRM_PANEL
select TEGRA_HOST1X
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA
select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 33c463e8d49f..d6cf202414f0 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -5,6 +5,7 @@ tegra-drm-y := \
drm.o \
gem.o \
fb.o \
+ dp.o \
hub.o \
plane.o \
dc.o \
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index fbf57bc3cdab..5b1f9ff97576 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -715,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.swap = state->swap;
for (i = 0; i < fb->format->num_planes; i++) {
- struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
-
- window.base[i] = bo->paddr + fb->offsets[i];
+ window.base[i] = state->iova[i] + fb->offsets[i];
/*
* Tegra uses a shared stride for UV planes. Framebuffers are
@@ -732,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_plane_atomic_check,
.atomic_disable = tegra_plane_atomic_disable,
.atomic_update = tegra_plane_atomic_update,
@@ -869,11 +869,11 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
return;
}
- value |= (bo->paddr >> 10) & 0x3fffff;
+ value |= (bo->iova >> 10) & 0x3fffff;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- value = (bo->paddr >> 32) & 0x3;
+ value = (bo->iova >> 32) & 0x3;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
@@ -914,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
@@ -2014,9 +2016,8 @@ static int tegra_dc_init(struct host1x_client *client)
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
- dc->group = host1x_client_iommu_attach(client, true);
- if (IS_ERR(dc->group)) {
- err = PTR_ERR(dc->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
return err;
}
@@ -2074,6 +2075,12 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent device.
+ */
+ client->dev->dma_parms = client->parent->dma_parms;
+
return 0;
cleanup:
@@ -2083,7 +2090,7 @@ cleanup:
if (!IS_ERR(primary))
drm_plane_cleanup(primary);
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return err;
@@ -2097,6 +2104,9 @@ static int tegra_dc_exit(struct host1x_client *client)
if (!tegra_dc_has_window_groups(dc))
return 0;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
@@ -2105,7 +2115,7 @@ static int tegra_dc_exit(struct host1x_client *client)
return err;
}
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 0c4d17851f47..3d8ddccd758f 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -90,8 +90,6 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
const struct tegra_dc_soc_info *soc;
-
- struct iommu_group *group;
};
static inline struct tegra_dc *
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
new file mode 100644
index 000000000000..70dfb7d1dec5
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp.h"
+
+static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 };
+
+static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+ caps->alternate_scrambler_reset = false;
+}
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src)
+{
+ dest->enhanced_framing = src->enhanced_framing;
+ dest->tps3_supported = src->tps3_supported;
+ dest->fast_training = src->fast_training;
+ dest->channel_coding = src->channel_coding;
+ dest->alternate_scrambler_reset = src->alternate_scrambler_reset;
+}
+
+static void drm_dp_link_reset(struct drm_dp_link *link)
+{
+ unsigned int i;
+
+ if (!link)
+ return;
+
+ link->revision = 0;
+ link->max_rate = 0;
+ link->max_lanes = 0;
+
+ drm_dp_link_caps_reset(&link->caps);
+ link->aux_rd_interval.cr = 0;
+ link->aux_rd_interval.ce = 0;
+ link->edp = 0;
+
+ link->rate = 0;
+ link->lanes = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = 0;
+}
+
+/**
+ * drm_dp_link_add_rate() - add a rate to the list of supported rates
+ * @link: the link to add the rate to
+ * @rate: the rate to add
+ *
+ * Add a link rate to the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - ENOSPC if the maximum number of supported rates has been reached
+ * - EEXISTS if the link already supports this rate
+ *
+ * See also:
+ * drm_dp_link_remove_rate()
+ */
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i, pivot;
+
+ if (link->num_rates == DP_MAX_SUPPORTED_RATES)
+ return -ENOSPC;
+
+ for (pivot = 0; pivot < link->num_rates; pivot++)
+ if (rate <= link->rates[pivot])
+ break;
+
+ if (pivot != link->num_rates && rate == link->rates[pivot])
+ return -EEXIST;
+
+ for (i = link->num_rates; i > pivot; i--)
+ link->rates[i] = link->rates[i - 1];
+
+ link->rates[pivot] = rate;
+ link->num_rates++;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_remove_rate() - remove a rate from the list of supported rates
+ * @link: the link from which to remove the rate
+ * @rate: the rate to remove
+ *
+ * Removes a link rate from the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - EINVAL if the specified rate is not among the supported rates
+ *
+ * See also:
+ * drm_dp_link_add_rate()
+ */
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < link->num_rates; i++)
+ if (rate == link->rates[i])
+ break;
+
+ if (i == link->num_rates)
+ return -EINVAL;
+
+ link->num_rates--;
+
+ while (i < link->num_rates) {
+ link->rates[i] = link->rates[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_update_rates() - normalize the supported link rates array
+ * @link: the link for which to normalize the supported link rates
+ *
+ * Users should call this function after they've manually modified the array
+ * of supported link rates. This function removes any stale entries, compacts
+ * the array and updates the supported link rate count. Note that calling the
+ * drm_dp_link_remove_rate() function already does this janitorial work.
+ *
+ * See also:
+ * drm_dp_link_add_rate(), drm_dp_link_remove_rate()
+ */
+void drm_dp_link_update_rates(struct drm_dp_link *link)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < link->num_rates; i++) {
+ if (link->rates[i] != 0)
+ link->rates[count++] = link->rates[i];
+ }
+
+ for (i = count; i < link->num_rates; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = count;
+}
+
+/**
+ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to structure in which to return link capabilities
+ *
+ * The structure filled in by this function can usually be passed directly
+ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
+ * configure the link based on the link's capabilities.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 dpcd[DP_RECEIVER_CAP_SIZE], value;
+ unsigned int rd_interval;
+ int err;
+
+ drm_dp_link_reset(link);
+
+ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
+ if (err < 0)
+ return err;
+
+ link->revision = dpcd[DP_DPCD_REV];
+ link->max_rate = drm_dp_max_link_rate(dpcd);
+ link->max_lanes = drm_dp_max_lane_count(dpcd);
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd);
+
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ link->caps.alternate_scrambler_reset = true;
+
+ err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value);
+ if (err < 0)
+ return err;
+
+ if (value >= ARRAY_SIZE(drm_dp_edp_revisions))
+ DRM_ERROR("unsupported eDP version: %02x\n", value);
+ else
+ link->edp = drm_dp_edp_revisions[value];
+ }
+
+ /*
+ * The DPCD stores the AUX read interval in units of 4 ms. There are
+ * two special cases:
+ *
+ * 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery
+ * and channel equalization should use 100 us or 400 us AUX read
+ * intervals, respectively
+ *
+ * 2) for DP v1.4 and above, clock recovery should always use 100 us
+ * AUX read intervals
+ */
+ rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
+
+ if (rd_interval > 4) {
+ DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n",
+ rd_interval);
+ rd_interval = 4;
+ }
+
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14)
+ link->aux_rd_interval.cr = 100;
+
+ if (rd_interval == 0)
+ link->aux_rd_interval.ce = 400;
+
+ link->rate = link->max_rate;
+ link->lanes = link->max_lanes;
+
+ /* Parse SUPPORTED_LINK_RATES from eDP 1.4 */
+ if (link->edp >= 0x14) {
+ u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2];
+ unsigned int i;
+ u16 rate;
+
+ err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES,
+ supported_rates,
+ sizeof(supported_rates));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) {
+ rate = supported_rates[i * 2 + 1] << 8 |
+ supported_rates[i * 2 + 0];
+
+ drm_dp_link_add_rate(link, rate * 200);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[2], value;
+ int err;
+
+ if (link->ops && link->ops->configure) {
+ err = link->ops->configure(link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+ }
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->lanes;
+
+ if (link->caps.enhanced_framing)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ if (link->caps.channel_coding)
+ value = DP_SET_ANSI_8B10B;
+ else
+ value = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value);
+ if (err < 0)
+ return err;
+
+ if (link->caps.alternate_scrambler_reset) {
+ err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_choose() - choose the lowest possible configuration for a mode
+ * @link: DRM DP link object
+ * @mode: DRM display mode
+ * @info: DRM display information
+ *
+ * According to the eDP specification, a source should select a configuration
+ * with the lowest number of lanes and the lowest possible link rate that can
+ * match the bitrate requirements of a video mode. However it must ensure not
+ * to exceed the capabilities of the sink.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info)
+{
+ /* available link symbol clock rates */
+ static const unsigned int rates[3] = { 162000, 270000, 540000 };
+ /* available number of lanes */
+ static const unsigned int lanes[3] = { 1, 2, 4 };
+ unsigned long requirement, capacity;
+ unsigned int rate = link->max_rate;
+ unsigned int i, j;
+
+ /* bandwidth requirement */
+ requirement = mode->clock * info->bpc * 3;
+
+ for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) {
+ for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) {
+ /*
+ * Capacity for this combination of lanes and rate,
+ * factoring in the ANSI 8B/10B encoding.
+ *
+ * Link rates in the DRM DP helpers are really link
+ * symbol frequencies, so a tenth of the actual rate
+ * of the link.
+ */
+ capacity = lanes[i] * (rates[j] * 10) * 8 / 10;
+
+ if (capacity >= requirement) {
+ DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n",
+ lanes[i], rates[j], requirement,
+ capacity);
+ link->lanes = lanes[i];
+ link->rate = rates[j];
+ return 0;
+ }
+ }
+ }
+
+ return -ERANGE;
+}
+
+/**
+ * DOC: Link training
+ *
+ * These functions contain common logic and helpers to implement DisplayPort
+ * link training.
+ */
+
+/**
+ * drm_dp_link_train_init() - initialize DisplayPort link training state
+ * @train: DisplayPort link training state
+ */
+void drm_dp_link_train_init(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ request->voltage_swing[i] = 0;
+ adjust->voltage_swing[i] = 0;
+
+ request->pre_emphasis[i] = 0;
+ adjust->pre_emphasis[i] = 0;
+
+ request->post_cursor[i] = 0;
+ adjust->post_cursor[i] = 0;
+ }
+
+ train->pattern = DP_TRAINING_PATTERN_DISABLE;
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int drm_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct drm_dp_link_train_set *request = &link->train.request;
+ unsigned int lanes = link->lanes, *vs, *pe, *pc, i;
+ struct drm_dp_aux *aux = link->aux;
+ u8 values[4], pattern = 0;
+ int err;
+
+ err = link->ops->apply_training(link);
+ if (err < 0) {
+ DRM_ERROR("failed to apply link training: %d\n", err);
+ return err;
+ }
+
+ vs = request->voltage_swing;
+ pe = request->pre_emphasis;
+ pc = request->post_cursor;
+
+ /* write currently selected voltage-swing and pre-emphasis levels */
+ for (i = 0; i < lanes; i++)
+ values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) |
+ DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes);
+ if (err < 0) {
+ DRM_ERROR("failed to set training parameters: %d\n", err);
+ return err;
+ }
+
+ /* write currently selected post-cursor level (if supported) */
+ if (link->revision >= 0x12 && link->rate == 540000) {
+ values[0] = values[1] = 0;
+
+ for (i = 0; i < lanes; i++)
+ values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
+ DIV_ROUND_UP(lanes, 2));
+ if (err < 0) {
+ DRM_ERROR("failed to set post-cursor: %d\n", err);
+ return err;
+ }
+ }
+
+ /* write link pattern */
+ if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE)
+ pattern |= DP_LINK_SCRAMBLING_DISABLE;
+
+ pattern |= link->train.pattern;
+
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
+ if (err < 0) {
+ DRM_ERROR("failed to set training pattern: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_wait(struct drm_dp_link *link)
+{
+ unsigned long min = 0;
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_1:
+ min = link->aux_rd_interval.cr;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ min = link->aux_rd_interval.ce;
+ break;
+
+ default:
+ break;
+ }
+
+ if (min > 0)
+ usleep_range(min, 2 * min);
+}
+
+static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct drm_dp_link_train_set *adjust = &link->train.adjust;
+ unsigned int i;
+
+ for (i = 0; i < link->lanes; i++) {
+ adjust->voltage_swing[i] =
+ drm_dp_get_adjust_request_voltage(status, i) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+
+ adjust->pre_emphasis[i] =
+ drm_dp_get_adjust_request_pre_emphasis(status, i) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ adjust->post_cursor[i] =
+ drm_dp_get_adjust_request_post_cursor(status, i);
+ }
+}
+
+static void drm_dp_link_train_adjust(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++)
+ if (request->voltage_swing[i] != adjust->voltage_swing[i])
+ request->voltage_swing[i] = adjust->voltage_swing[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->pre_emphasis[i] != adjust->pre_emphasis[i])
+ request->pre_emphasis[i] = adjust->pre_emphasis[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->post_cursor[i] != adjust->post_cursor[i])
+ request->post_cursor[i] = adjust->post_cursor[i];
+}
+
+static int drm_dp_link_recover_clock(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.clock_recovered = true;
+
+ return 0;
+}
+
+static int drm_dp_link_clock_recovery(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start clock recovery using training pattern 1 */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_recover_clock(link);
+ if (err < 0) {
+ DRM_ERROR("failed to recover clock: %d\n", err);
+ return err;
+ }
+
+ if (link->train.clock_recovered)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_equalize_channel(struct drm_dp_link *link)
+{
+ struct drm_dp_aux *aux = link->aux;
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ return 0;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.channel_equalized = true;
+
+ return 0;
+}
+
+static int drm_dp_link_channel_equalization(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start channel equalization using pattern 2 or 3 */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_equalize_channel(link);
+ if (err < 0) {
+ DRM_ERROR("failed to equalize channel: %d\n", err);
+ return err;
+ }
+
+ if (link->train.channel_equalized)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_downgrade(struct drm_dp_link *link)
+{
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+
+ case 270000:
+ link->rate = 162000;
+ break;
+
+ case 540000:
+ link->rate = 270000;
+ return 0;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_disable(struct drm_dp_link *link)
+{
+ int err;
+
+ link->train.pattern = DP_TRAINING_PATTERN_DISABLE;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ DRM_ERROR("failed to disable link training: %d\n", err);
+}
+
+static int drm_dp_link_train_full(struct drm_dp_link *link)
+{
+ int err;
+
+retry:
+ DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ err = drm_dp_link_clock_recovery(link);
+ if (err < 0) {
+ DRM_ERROR("clock recovery failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ DRM_ERROR("clock recovery failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("clock recovery succeeded\n");
+
+ err = drm_dp_link_channel_equalization(link);
+ if (err < 0) {
+ DRM_ERROR("channel equalization failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ DRM_ERROR("channel equalization failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("channel equalization succeeded\n");
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+static int drm_dp_link_train_fast(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ /* transmit training pattern 1 for 500 microseconds */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ /* transmit training pattern 2 or 3 for 500 microseconds */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery failed\n");
+ err = -EIO;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ DRM_ERROR("channel equalization failed\n");
+ err = -EIO;
+ }
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+/**
+ * drm_dp_link_train() - perform DisplayPort link training
+ * @link: a DP link object
+ *
+ * Uses the context stored in the DP link object to perform link training. It
+ * is expected that drivers will call drm_dp_link_probe() to obtain the link
+ * capabilities before performing link training.
+ *
+ * If the sink supports fast link training (no AUX CH handshake) and valid
+ * training settings are available, this function will try to perform fast
+ * link training and fall back to full link training on failure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_train(struct drm_dp_link *link)
+{
+ int err;
+
+ drm_dp_link_train_init(&link->train);
+
+ if (link->caps.fast_training) {
+ if (drm_dp_link_train_valid(&link->train)) {
+ err = drm_dp_link_train_fast(link);
+ if (err < 0)
+ DRM_ERROR("fast link training failed: %d\n",
+ err);
+ else
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("training parameters not available\n");
+ }
+ } else {
+ DRM_DEBUG_KMS("fast link training not supported\n");
+ }
+
+ err = drm_dp_link_train_full(link);
+ if (err < 0)
+ DRM_ERROR("full link training failed: %d\n", err);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
new file mode 100644
index 000000000000..cb12ed0c54e7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation.
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#ifndef DRM_TEGRA_DP_H
+#define DRM_TEGRA_DP_H 1
+
+#include <linux/types.h>
+
+struct drm_display_info;
+struct drm_display_mode;
+struct drm_dp_aux;
+struct drm_dp_link;
+
+/**
+ * struct drm_dp_link_caps - DP link capabilities
+ */
+struct drm_dp_link_caps {
+ /**
+ * @enhanced_framing:
+ *
+ * enhanced framing capability (mandatory as of DP 1.2)
+ */
+ bool enhanced_framing;
+
+ /**
+ * tps3_supported:
+ *
+ * training pattern sequence 3 supported for equalization
+ */
+ bool tps3_supported;
+
+ /**
+ * @fast_training:
+ *
+ * AUX CH handshake not required for link training
+ */
+ bool fast_training;
+
+ /**
+ * @channel_coding:
+ *
+ * ANSI 8B/10B channel coding capability
+ */
+ bool channel_coding;
+
+ /**
+ * @alternate_scrambler_reset:
+ *
+ * eDP alternate scrambler reset capability
+ */
+ bool alternate_scrambler_reset;
+};
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src);
+
+/**
+ * struct drm_dp_link_ops - DP link operations
+ */
+struct drm_dp_link_ops {
+ /**
+ * @apply_training:
+ */
+ int (*apply_training)(struct drm_dp_link *link);
+
+ /**
+ * @configure:
+ */
+ int (*configure)(struct drm_dp_link *link);
+};
+
+#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0)
+#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3)
+#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2))
+
+/**
+ * struct drm_dp_link_train_set - link training settings
+ * @voltage_swing: per-lane voltage swing
+ * @pre_emphasis: per-lane pre-emphasis
+ * @post_cursor: per-lane post-cursor
+ */
+struct drm_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ unsigned int post_cursor[4];
+};
+
+/**
+ * struct drm_dp_link_train - link training state information
+ * @request: currently requested settings
+ * @adjust: adjustments requested by sink
+ * @pattern: currently requested training pattern
+ * @clock_recovered: flag to track if clock recovery has completed
+ * @channel_equalized: flag to track if channel equalization has completed
+ */
+struct drm_dp_link_train {
+ struct drm_dp_link_train_set request;
+ struct drm_dp_link_train_set adjust;
+
+ unsigned int pattern;
+
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+/**
+ * struct drm_dp_link - DP link capabilities and configuration
+ * @revision: DP specification revision supported on the link
+ * @max_rate: maximum clock rate supported on the link
+ * @max_lanes: maximum number of lanes supported on the link
+ * @caps: capabilities supported on the link (see &drm_dp_link_caps)
+ * @aux_rd_interval: AUX read interval to use for training (in microseconds)
+ * @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...)
+ * @rate: currently configured link rate
+ * @lanes: currently configured number of lanes
+ * @rates: additional supported link rates in kHz (eDP 1.4)
+ * @num_rates: number of additional supported link rates (eDP 1.4)
+ */
+struct drm_dp_link {
+ unsigned char revision;
+ unsigned int max_rate;
+ unsigned int max_lanes;
+
+ struct drm_dp_link_caps caps;
+
+ /**
+ * @cr: clock recovery read interval
+ * @ce: channel equalization read interval
+ */
+ struct {
+ unsigned int cr;
+ unsigned int ce;
+ } aux_rd_interval;
+
+ unsigned char edp;
+
+ unsigned int rate;
+ unsigned int lanes;
+
+ unsigned long rates[DP_MAX_SUPPORTED_RATES];
+ unsigned int num_rates;
+
+ /**
+ * @ops: DP link operations
+ */
+ const struct drm_dp_link_ops *ops;
+
+ /**
+ * @aux: DP AUX channel
+ */
+ struct drm_dp_aux *aux;
+
+ /**
+ * @train: DP link training state
+ */
+ struct drm_dp_link_train train;
+};
+
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate);
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
+void drm_dp_link_update_rates(struct drm_dp_link *link);
+
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info);
+
+void drm_dp_link_train_init(struct drm_dp_link_train *train);
+int drm_dp_link_train(struct drm_dp_link *link);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index a0f6f9b0d258..622cdf1ad246 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
@@ -22,6 +23,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
+#include "dp.h"
#include "dpaux.h"
#include "drm.h"
#include "trace.h"
@@ -29,10 +31,18 @@
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
+struct tegra_dpaux_soc {
+ unsigned int cmh;
+ unsigned int drvz;
+ unsigned int drvi;
+};
+
struct tegra_dpaux {
struct drm_dp_aux aux;
struct device *dev;
+ const struct tegra_dpaux_soc *soc;
+
void __iomem *regs;
int irq;
@@ -120,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long status;
ssize_t ret = 0;
+ u8 reply = 0;
u32 value;
/* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
@@ -214,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
case 0x00:
- msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ reply = DP_AUX_NATIVE_REPLY_ACK;
break;
case 0x01:
- msg->reply = DP_AUX_NATIVE_REPLY_NACK;
+ reply = DP_AUX_NATIVE_REPLY_NACK;
break;
case 0x02:
- msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+ reply = DP_AUX_NATIVE_REPLY_DEFER;
break;
case 0x04:
- msg->reply = DP_AUX_I2C_REPLY_NACK;
+ reply = DP_AUX_I2C_REPLY_NACK;
break;
case 0x08:
- msg->reply = DP_AUX_I2C_REPLY_DEFER;
+ reply = DP_AUX_I2C_REPLY_DEFER;
break;
}
@@ -238,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
if (msg->request & DP_AUX_I2C_READ) {
size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
- if (WARN_ON(count != msg->size))
- count = min_t(size_t, count, msg->size);
+ /*
+ * There might be a smarter way to do this, but since
+ * the DP helpers will already retry transactions for
+ * an -EBUSY return value, simply reuse that instead.
+ */
+ if (count != msg->size) {
+ ret = -EBUSY;
+ goto out;
+ }
tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
ret = count;
}
}
+ msg->reply = reply;
+
+out:
return ret;
}
@@ -310,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
switch (function) {
case DPAUX_PADCTL_FUNC_AUX:
- value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
DPAUX_HYBRID_PADCTL_MODE_AUX;
break;
@@ -320,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
case DPAUX_PADCTL_FUNC_I2C:
value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_MODE_I2C;
break;
@@ -436,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (!dpaux)
return -ENOMEM;
+ dpaux->soc = of_device_get_match_data(&pdev->dev);
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
init_completion(&dpaux->complete);
INIT_LIST_HEAD(&dpaux->list);
@@ -493,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->vdd);
}
+
+ dpaux->vdd = NULL;
}
platform_set_drvdata(pdev, dpaux);
@@ -641,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
};
+static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x18,
+};
+
+static const struct tegra_dpaux_soc tegra210_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x30,
+};
+
+static const struct tegra_dpaux_soc tegra194_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x2c,
+};
+
static const struct of_device_id tegra_dpaux_of_match[] = {
- { .compatible = "nvidia,tegra194-dpaux", },
- { .compatible = "nvidia,tegra186-dpaux", },
- { .compatible = "nvidia,tegra210-dpaux", },
- { .compatible = "nvidia,tegra124-dpaux", },
+ { .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc },
+ { .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
@@ -686,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
dpaux->output = output;
- err = regulator_enable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (output->panel) {
+ enum drm_connector_status status;
- timeout = jiffies + msecs_to_jiffies(250);
+ if (dpaux->vdd) {
+ err = regulator_enable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_connected)
+ break;
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_connected) {
- enable_irq(dpaux->irq);
- return 0;
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_connected)
+ return -ETIMEDOUT;
}
- return -ETIMEDOUT;
+ enable_irq(dpaux->irq);
+ return 0;
}
int drm_dp_aux_detach(struct drm_dp_aux *aux)
@@ -715,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux)
disable_irq(dpaux->irq);
- err = regulator_disable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (dpaux->output->panel) {
+ enum drm_connector_status status;
- timeout = jiffies + msecs_to_jiffies(250);
+ if (dpaux->vdd) {
+ err = regulator_disable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_disconnected)
+ break;
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_disconnected) {
- dpaux->output = NULL;
- return 0;
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_disconnected)
+ return -ETIMEDOUT;
+
+ dpaux->output = NULL;
}
- return -ETIMEDOUT;
+ return 0;
}
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
@@ -764,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux)
return 0;
}
-
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
-{
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
- encoding);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern)
-{
- u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
- u8 status[DP_LINK_STATUS_SIZE], values[4];
- unsigned int i;
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
- if (err < 0)
- return err;
-
- if (tp == DP_TRAINING_PATTERN_DISABLE)
- return 0;
-
- for (i = 0; i < link->num_lanes; i++)
- values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
- DP_TRAIN_PRE_EMPH_LEVEL_0 |
- DP_TRAIN_MAX_SWING_REACHED |
- DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
-
- err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
- link->num_lanes);
- if (err < 0)
- return err;
-
- usleep_range(500, 1000);
-
- err = drm_dp_dpcd_read_link_status(aux, status);
- if (err < 0)
- return err;
-
- switch (tp) {
- case DP_TRAINING_PATTERN_1:
- if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- case DP_TRAINING_PATTERN_2:
- if (!drm_dp_channel_eq_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- default:
- dev_err(aux->dev, "unsupported training pattern %u\n", tp);
- return -EINVAL;
- }
-
- err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
- if (err < 0)
- return err;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 6fb7d74ff553..56e5e7a5c108 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -20,10 +20,6 @@
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-#endif
-
#include "drm.h"
#include "gem.h"
@@ -86,168 +82,6 @@ tegra_drm_mode_config_helpers = {
.atomic_commit_tail = tegra_atomic_commit_tail,
};
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra;
- int err;
-
- tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
- if (!tegra)
- return -ENOMEM;
-
- if (iommu_present(&platform_bus_type)) {
- tegra->domain = iommu_domain_alloc(&platform_bus_type);
- if (!tegra->domain) {
- err = -ENOMEM;
- goto free;
- }
-
- err = iova_cache_get();
- if (err < 0)
- goto domain;
- }
-
- mutex_init(&tegra->clients_lock);
- INIT_LIST_HEAD(&tegra->clients);
-
- drm->dev_private = tegra;
- tegra->drm = drm;
-
- drm_mode_config_init(drm);
-
- drm->mode_config.min_width = 0;
- drm->mode_config.min_height = 0;
-
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
-
- drm->mode_config.allow_fb_modifiers = true;
-
- drm->mode_config.normalize_zpos = true;
-
- drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
- drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
-
- err = tegra_drm_fb_prepare(drm);
- if (err < 0)
- goto config;
-
- drm_kms_helper_poll_init(drm);
-
- err = host1x_device_init(device);
- if (err < 0)
- goto fbdev;
-
- if (tegra->domain) {
- u64 carveout_start, carveout_end, gem_start, gem_end;
- u64 dma_mask = dma_get_mask(&device->dev);
- dma_addr_t start, end;
- unsigned long order;
-
- start = tegra->domain->geometry.aperture_start & dma_mask;
- end = tegra->domain->geometry.aperture_end & dma_mask;
-
- gem_start = start;
- gem_end = end - CARVEOUT_SZ;
- carveout_start = gem_end + 1;
- carveout_end = end;
-
- order = __ffs(tegra->domain->pgsize_bitmap);
- init_iova_domain(&tegra->carveout.domain, 1UL << order,
- carveout_start >> order);
-
- tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
- tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
-
- drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
- mutex_init(&tegra->mm_lock);
-
- DRM_DEBUG("IOMMU apertures:\n");
- DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
- DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
- carveout_end);
- }
-
- if (tegra->hub) {
- err = tegra_display_hub_prepare(tegra->hub);
- if (err < 0)
- goto device;
- }
-
- /*
- * We don't use the drm_irq_install() helpers provided by the DRM
- * core, so we need to set this manually in order to allow the
- * DRM_IOCTL_WAIT_VBLANK to operate correctly.
- */
- drm->irq_enabled = true;
-
- /* syncpoints are used for full 32-bit hardware VBLANK counters */
- drm->max_vblank_count = 0xffffffff;
-
- err = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (err < 0)
- goto hub;
-
- drm_mode_config_reset(drm);
-
- err = tegra_drm_fb_init(drm);
- if (err < 0)
- goto hub;
-
- return 0;
-
-hub:
- if (tegra->hub)
- tegra_display_hub_cleanup(tegra->hub);
-device:
- host1x_device_exit(device);
-fbdev:
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_free(drm);
-config:
- drm_mode_config_cleanup(drm);
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- }
-domain:
- if (tegra->domain)
- iommu_domain_free(tegra->domain);
-free:
- kfree(tegra);
- return err;
-}
-
-static void tegra_drm_unload(struct drm_device *drm)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra = drm->dev_private;
- int err;
-
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_exit(drm);
- drm_atomic_helper_shutdown(drm);
- drm_mode_config_cleanup(drm);
-
- err = host1x_device_exit(device);
- if (err < 0)
- return;
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- iommu_domain_free(tegra->domain);
- }
-
- kfree(tegra);
-}
-
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
{
struct tegra_drm_file *fpriv;
@@ -311,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
if (err < 0)
return err;
+ dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
+
dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
@@ -1014,8 +850,6 @@ static int tegra_debugfs_init(struct drm_minor *minor)
static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
- .load = tegra_drm_load,
- .unload = tegra_drm_unload,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
.lastclose = drm_fb_helper_lastclose,
@@ -1068,57 +902,63 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared)
+int host1x_client_iommu_attach(struct host1x_client *client)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
+ /*
+ * If the host1x client is already attached to an IOMMU domain that is
+ * not the shared IOMMU domain, don't try to attach it to a different
+ * domain. This allows using the IOMMU-backed DMA API.
+ */
+ if (domain && domain != tegra->domain)
+ return 0;
+
if (tegra->domain) {
group = iommu_group_get(client->dev);
if (!group) {
dev_err(client->dev, "failed to get IOMMU group\n");
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
- if (!shared || (shared && (group != tegra->group))) {
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
- if (client->dev->archdata.mapping) {
- struct dma_iommu_mapping *mapping =
- to_dma_iommu_mapping(client->dev);
- arm_iommu_detach_device(client->dev);
- arm_iommu_release_mapping(mapping);
- }
-#endif
+ if (domain != tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
- return ERR_PTR(err);
+ return err;
}
-
- if (shared && !tegra->group)
- tegra->group = group;
}
+
+ tegra->use_explicit_iommu = true;
}
- return group;
+ client->group = group;
+
+ return 0;
}
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group)
+void host1x_client_iommu_detach(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
+ struct iommu_domain *domain;
- if (group) {
- if (group == tegra->group) {
- iommu_detach_group(tegra->domain, group);
- tegra->group = NULL;
- }
+ if (client->group) {
+ /*
+ * Devices that are part of the same group may no longer be
+ * attached to a domain at this point because their group may
+ * have been detached by an earlier client.
+ */
+ domain = iommu_get_domain_for_dev(client->dev);
+ if (domain)
+ iommu_detach_group(tegra->domain, client->group);
- iommu_group_put(group);
+ iommu_group_put(client->group);
+ client->group = NULL;
}
}
@@ -1202,6 +1042,8 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
static int host1x_drm_probe(struct host1x_device *dev)
{
struct drm_driver *driver = &tegra_drm_driver;
+ struct iommu_domain *domain;
+ struct tegra_drm *tegra;
struct drm_device *drm;
int err;
@@ -1209,18 +1051,180 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (IS_ERR(drm))
return PTR_ERR(drm);
+ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+ if (!tegra) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ /*
+ * If the Tegra DRM clients are backed by an IOMMU, push buffers are
+ * likely to be allocated beyond the 32-bit boundary if sufficient
+ * system memory is available. This is problematic on earlier Tegra
+ * generations where host1x supports a maximum of 32 address bits in
+ * the GATHER opcode. In this case, unless host1x is behind an IOMMU
+ * as well it won't be able to process buffers allocated beyond the
+ * 32-bit boundary.
+ *
+ * The DMA API will use bounce buffers in this case, so that could
+ * perhaps still be made to work, even if less efficient, but there
+ * is another catch: in order to perform cache maintenance on pages
+ * allocated for discontiguous buffers we need to map and unmap the
+ * SG table representing these buffers. This is fine for something
+ * small like a push buffer, but it exhausts the bounce buffer pool
+ * (typically on the order of a few MiB) for framebuffers (many MiB
+ * for any modern resolution).
+ *
+ * Work around this by making sure that Tegra DRM clients only use
+ * an IOMMU if the parent host1x also uses an IOMMU.
+ *
+ * Note that there's still a small gap here that we don't cover: if
+ * the DMA API is backed by an IOMMU there's no way to control which
+ * device is attached to an IOMMU and which isn't, except via wiring
+ * up the device tree appropriately. This is considered an problem
+ * of integration, so care must be taken for the DT to be consistent.
+ */
+ domain = iommu_get_domain_for_dev(drm->dev->parent);
+
+ if (domain && iommu_present(&platform_bus_type)) {
+ tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!tegra->domain) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto domain;
+ }
+
+ mutex_init(&tegra->clients_lock);
+ INIT_LIST_HEAD(&tegra->clients);
+
dev_set_drvdata(&dev->dev, drm);
+ drm->dev_private = tegra;
+ tegra->drm = drm;
+
+ drm_mode_config_init(drm);
- err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.allow_fb_modifiers = true;
+
+ drm->mode_config.normalize_zpos = true;
+
+ drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
+
+ err = tegra_drm_fb_prepare(drm);
if (err < 0)
- goto put;
+ goto config;
+
+ drm_kms_helper_poll_init(drm);
+
+ err = host1x_device_init(dev);
+ if (err < 0)
+ goto fbdev;
+
+ if (tegra->use_explicit_iommu) {
+ u64 carveout_start, carveout_end, gem_start, gem_end;
+ u64 dma_mask = dma_get_mask(&dev->dev);
+ dma_addr_t start, end;
+ unsigned long order;
+
+ start = tegra->domain->geometry.aperture_start & dma_mask;
+ end = tegra->domain->geometry.aperture_end & dma_mask;
+
+ gem_start = start;
+ gem_end = end - CARVEOUT_SZ;
+ carveout_start = gem_end + 1;
+ carveout_end = end;
+
+ order = __ffs(tegra->domain->pgsize_bitmap);
+ init_iova_domain(&tegra->carveout.domain, 1UL << order,
+ carveout_start >> order);
+
+ tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
+ tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+
+ drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
+ mutex_init(&tegra->mm_lock);
+
+ DRM_DEBUG_DRIVER("IOMMU apertures:\n");
+ DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
+ DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
+ carveout_end);
+ } else if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ tegra->domain = NULL;
+ iova_cache_put();
+ }
+
+ if (tegra->hub) {
+ err = tegra_display_hub_prepare(tegra->hub);
+ if (err < 0)
+ goto device;
+ }
+
+ /*
+ * We don't use the drm_irq_install() helpers provided by the DRM
+ * core, so we need to set this manually in order to allow the
+ * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+ */
+ drm->irq_enabled = true;
+
+ /* syncpoints are used for full 32-bit hardware VBLANK counters */
+ drm->max_vblank_count = 0xffffffff;
+
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ goto hub;
+
+ drm_mode_config_reset(drm);
+
+ err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb",
+ false);
+ if (err < 0)
+ goto hub;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ goto hub;
err = drm_dev_register(drm, 0);
if (err < 0)
- goto put;
+ goto fb;
return 0;
+fb:
+ tegra_drm_fb_exit(drm);
+hub:
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+device:
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ }
+
+ host1x_device_exit(dev);
+fbdev:
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_free(drm);
+config:
+ drm_mode_config_cleanup(drm);
+domain:
+ if (tegra->domain)
+ iommu_domain_free(tegra->domain);
+free:
+ kfree(tegra);
put:
drm_dev_put(drm);
return err;
@@ -1229,8 +1233,29 @@ put:
static int host1x_drm_remove(struct host1x_device *dev)
{
struct drm_device *drm = dev_get_drvdata(&dev->dev);
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm_mode_config_cleanup(drm);
+
+ err = host1x_device_exit(dev);
+ if (err < 0)
+ dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
+
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ iommu_domain_free(tegra->domain);
+ }
+
+ kfree(tegra);
drm_dev_put(drm);
return 0;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 29911eff9ceb..d941553f7a3d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -36,7 +36,7 @@ struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
- struct iommu_group *group;
+ bool use_explicit_iommu;
struct mutex mm_lock;
struct drm_mm mm;
@@ -100,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared);
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group);
+int host1x_client_iommu_attach(struct host1x_client *client);
+void host1x_client_iommu_detach(struct host1x_client *client);
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra);
@@ -155,17 +153,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector);
void tegra_output_encoder_destroy(struct drm_encoder *encoder);
/* from dpaux.c */
-struct drm_dp_link;
-
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
int drm_dp_aux_detach(struct drm_dp_aux *aux);
int drm_dp_aux_enable(struct drm_dp_aux *aux);
int drm_dp_aux_disable(struct drm_dp_aux *aux);
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern);
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index f49ad36e24db..56edef06c48e 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon,
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
- u32 *firmware_vaddr = falcon->firmware.vaddr;
- dma_addr_t daddr;
+ u32 *virt = falcon->firmware.virt;
size_t i;
- int err;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
- firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
-
- /* ensure that caches are flushed and falcon can see the firmware */
- daddr = dma_map_single(falcon->dev, firmware_vaddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- err = dma_mapping_error(falcon->dev, daddr);
- if (err) {
- dev_err(falcon->dev, "failed to map firmware: %d\n", err);
- return;
- }
- dma_sync_single_for_device(falcon->dev, daddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
- DMA_TO_DEVICE);
+ virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
- struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
+ struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
@@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
return -EINVAL;
}
- os = falcon->firmware.vaddr + bin->os_header_offset;
+ os = falcon->firmware.virt + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
@@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name)
if (err < 0)
return err;
+ falcon->firmware.size = falcon->firmware.firmware->size;
+
return 0;
}
@@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon)
const struct firmware *firmware = falcon->firmware.firmware;
int err;
- falcon->firmware.size = firmware->size;
-
- /* allocate iova space for the firmware */
- falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
- &falcon->firmware.paddr);
- if (IS_ERR(falcon->firmware.vaddr)) {
- dev_err(falcon->dev, "DMA memory mapping failed\n");
- return PTR_ERR(falcon->firmware.vaddr);
- }
-
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
@@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon)
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
- goto err_setup_firmware_image;
+ return err;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
-
-err_setup_firmware_image:
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr, falcon->firmware.vaddr);
-
- return err;
}
int falcon_init(struct falcon *falcon)
{
- /* check mandatory ops */
- if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
- return -EINVAL;
-
- falcon->firmware.vaddr = NULL;
+ falcon->firmware.virt = NULL;
return 0;
}
void falcon_exit(struct falcon *falcon)
{
- if (falcon->firmware.firmware) {
+ if (falcon->firmware.firmware)
release_firmware(falcon->firmware.firmware);
- falcon->firmware.firmware = NULL;
- }
-
- if (falcon->firmware.vaddr) {
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr,
- falcon->firmware.vaddr);
- falcon->firmware.vaddr = NULL;
- }
}
int falcon_boot(struct falcon *falcon)
@@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon)
u32 value;
int err;
- if (!falcon->firmware.vaddr)
+ if (!falcon->firmware.virt)
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
@@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon)
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
- falcon_writel(falcon, (falcon->firmware.paddr +
+ falcon_writel(falcon, (falcon->firmware.iova +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index 3d1243217410..c56ee32d92ee 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 {
u32 data_size;
};
-struct falcon;
-
-struct falcon_ops {
- void *(*alloc)(struct falcon *falcon, size_t size,
- dma_addr_t *paddr);
- void (*free)(struct falcon *falcon, size_t size,
- dma_addr_t paddr, void *vaddr);
-};
-
struct falcon_firmware_section {
unsigned long offset;
size_t size;
@@ -93,8 +84,9 @@ struct falcon_firmware {
const struct firmware *firmware;
/* Raw firmware data */
- dma_addr_t paddr;
- void *vaddr;
+ dma_addr_t iova;
+ dma_addr_t phys;
+ void *virt;
size_t size;
/* Parsed firmware information */
@@ -107,8 +99,6 @@ struct falcon {
/* Set by falcon client */
struct device *dev;
void __iomem *regs;
- const struct falcon_ops *ops;
- void *data;
struct falcon_firmware firmware;
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e34325c83d28..7cea89f29a5c 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -269,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
}
}
- drm->mode_config.fb_base = (resource_size_t)bo->paddr;
+ drm->mode_config.fb_base = (resource_size_t)bo->iova;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
- info->fix.smem_start = (unsigned long)(bo->paddr + offset);
+ info->fix.smem_start = (unsigned long)(bo->iova + offset);
info->fix.smem_len = size;
return 0;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index fb7667c8dd4c..746dae32c484 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -27,17 +27,55 @@ static void tegra_bo_put(struct host1x_bo *bo)
drm_gem_object_put_unlocked(&obj->gem);
}
-static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
+static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
+ dma_addr_t *phys)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct sg_table *sgt;
+ int err;
+
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make
+ * sure to return the IOVA address of our mapping.
+ */
+ if (phys && obj->mm) {
+ *phys = obj->iova;
+ return NULL;
+ }
+
+ /*
+ * If we don't have a mapping for this buffer yet, return an SG table
+ * so that host1x can do the mapping for us via the DMA API.
+ */
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
- *sgt = obj->sgt;
+ if (obj->pages) {
+ err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
+ 0, obj->gem.size, GFP_KERNEL);
+ if (err < 0)
+ goto free;
+ } else {
+ err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
+ obj->gem.size);
+ if (err < 0)
+ goto free;
+ }
- return obj->paddr;
+ return sgt;
+
+free:
+ kfree(sgt);
+ return ERR_PTR(err);
}
-static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
{
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
@@ -133,9 +171,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
goto unlock;
}
- bo->paddr = bo->mm->start;
+ bo->iova = bo->mm->start;
- bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+ bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
bo->sgt->nents, prot);
if (!bo->size) {
dev_err(tegra->drm->dev, "failed to map buffer\n");
@@ -161,7 +199,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
return 0;
mutex_lock(&tegra->mm_lock);
- iommu_unmap(tegra->domain, bo->paddr, bo->size);
+ iommu_unmap(tegra->domain, bo->iova, bo->size);
drm_mm_remove_node(bo->mm);
mutex_unlock(&tegra->mm_lock);
@@ -209,7 +247,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
- dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
}
}
@@ -264,7 +302,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
} else {
size_t size = bo->gem.size;
- bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
@@ -365,7 +403,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
goto detach;
}
- bo->paddr = sg_dma_address(bo->sgt->sgl);
+ bo->iova = sg_dma_address(bo->sgt->sgl);
}
bo->gem.import_attach = attach;
@@ -461,7 +499,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
gem->size);
if (err < 0) {
drm_gem_vm_close(vma);
@@ -508,25 +546,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
return NULL;
if (bo->pages) {
- struct scatterlist *sg;
- unsigned int i;
-
- if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
- goto free;
-
- for_each_sg(sgt->sgl, sg, bo->num_pages, i)
- sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
-
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
+ 0, gem->size, GFP_KERNEL) < 0)
goto free;
} else {
- if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
+ gem->size) < 0)
goto free;
-
- sg_dma_address(sgt->sgl) = bo->paddr;
- sg_dma_len(sgt->sgl) = gem->size;
}
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free;
+
return sgt;
free:
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 83ffb1e14ca3..fafb5724499b 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -31,7 +31,7 @@ struct tegra_bo {
struct host1x_bo base;
unsigned long flags;
struct sg_table *sgt;
- dma_addr_t paddr;
+ dma_addr_t iova;
void *vaddr;
struct drm_mm_node *mm;
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 641299cc85b8..1fc4e56c7cc5 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -17,7 +17,6 @@ struct gr2d_soc {
};
struct gr2d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk;
@@ -40,7 +39,7 @@ static int gr2d_init(struct host1x_client *client)
struct gr2d *gr2d = to_gr2d(drm);
int err;
- gr2d->channel = host1x_channel_request(client->dev);
+ gr2d->channel = host1x_channel_request(client);
if (!gr2d->channel)
return -ENOMEM;
@@ -51,9 +50,8 @@ static int gr2d_init(struct host1x_client *client)
goto put;
}
- gr2d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr2d->group)) {
- err = PTR_ERR(gr2d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -67,7 +65,7 @@ static int gr2d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -87,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 8b9a35b1cbb3..24fae0f64032 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -23,7 +23,6 @@ struct gr3d_soc {
};
struct gr3d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk_secondary;
@@ -49,7 +48,7 @@ static int gr3d_init(struct host1x_client *client)
struct gr3d *gr3d = to_gr3d(drm);
int err;
- gr3d->channel = host1x_channel_request(client->dev);
+ gr3d->channel = host1x_channel_request(client);
if (!gr3d->channel)
return -ENOMEM;
@@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client)
goto put;
}
- gr3d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr3d->group)) {
- err = PTR_ERR(gr3d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 839b49c40e51..2b4082d0bc9e 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -413,7 +413,6 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
unsigned int zpos = plane->state->normalized_zpos;
struct drm_framebuffer *fb = plane->state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
- struct tegra_bo *bo;
dma_addr_t base;
u32 value;
@@ -456,8 +455,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
/* disable compression */
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
- bo = tegra_fb_get_plane(fb, 0);
- base = bo->paddr;
+ base = state->iova[0] + fb->offsets[0];
tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
@@ -521,6 +519,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_shared_plane_atomic_check,
.atomic_update = tegra_shared_plane_atomic_update,
.atomic_disable = tegra_shared_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index bdcaa4c7168c..34373734ff68 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
void tegra_output_connector_destroy(struct drm_connector *connector)
{
+ struct tegra_output *output = connector_to_output(connector);
+
+ if (output->cec)
+ cec_notifier_conn_unregister(output->cec);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output)
disable_irq(output->hpd_irq);
}
- output->cec = cec_notifier_get(output->dev);
- if (!output->cec)
- return -ENOMEM;
-
return 0;
}
void tegra_output_remove(struct tegra_output *output)
{
- if (output->cec)
- cec_notifier_put(output->cec);
-
if (output->hpd_gpio)
free_irq(output->hpd_irq, output);
@@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output)
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
{
+ int connector_type;
int err;
if (output->panel) {
@@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
if (output->hpd_gpio)
enable_irq(output->hpd_irq);
+ connector_type = output->connector.connector_type;
+ /*
+ * Create a CEC notifier for HDMI connector.
+ */
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ struct cec_connector_info conn_info;
+
+ cec_fill_conn_info_from_drm(&conn_info, &output->connector);
+ output->cec = cec_notifier_conn_register(output->dev, NULL,
+ &conn_info);
+ if (!output->cec)
+ return -ENOMEM;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 6bab71d6e81d..163b590be224 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include "dc.h"
@@ -23,6 +24,7 @@ static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_plane_state *state;
+ unsigned int i;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
@@ -36,6 +38,9 @@ static void tegra_plane_reset(struct drm_plane *plane)
plane->state->plane = plane;
plane->state->zpos = p->index;
plane->state->normalized_zpos = p->index;
+
+ for (i = 0; i < 3; i++)
+ state->iova[i] = DMA_MAPPING_ERROR;
}
}
@@ -60,6 +65,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
for (i = 0; i < 2; i++)
copy->blending[i] = state->blending[i];
+ for (i = 0; i < 3; i++) {
+ copy->iova[i] = DMA_MAPPING_ERROR;
+ copy->sgt[i] = NULL;
+ }
+
return &copy->base;
}
@@ -95,6 +105,100 @@ const struct drm_plane_funcs tegra_plane_funcs = {
.format_mod_supported = tegra_plane_format_mod_supported,
};
+static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+
+ if (!dc->client.group) {
+ struct sg_table *sgt;
+
+ sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+
+ err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (err == 0) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ state->iova[i] = sg_dma_address(sgt->sgl);
+ state->sgt[i] = sgt;
+ } else {
+ state->iova[i] = bo->iova;
+ }
+ }
+
+ return 0;
+
+unpin:
+ dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
+
+ while (i--) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ struct sg_table *sgt = state->sgt[i];
+
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+
+ return err;
+}
+
+static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+
+ if (!dc->client.group) {
+ struct sg_table *sgt = state->sgt[i];
+
+ if (sgt) {
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ }
+ }
+
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+}
+
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (!state->fb)
+ return 0;
+
+ drm_gem_fb_prepare_fb(plane, state);
+
+ return tegra_dc_pin(dc, to_tegra_plane_state(state));
+}
+
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (dc)
+ tegra_dc_unpin(dc, to_tegra_plane_state(state));
+}
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state)
{
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index 510c394e6d9a..a158a915109a 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state {
struct tegra_plane_state {
struct drm_plane_state base;
+ struct sg_table *sgt[3];
+ dma_addr_t iova[3];
+
struct tegra_bo_tiling tiling;
u32 format;
u32 swap;
@@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state)
extern const struct drm_plane_funcs tegra_plane_funcs;
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index e1669ada0a40..615cb319fa8b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -25,6 +25,7 @@
#include <drm/drm_scdc_helper.h>
#include "dc.h"
+#include "dp.h"
#include "drm.h"
#include "hda.h"
#include "sor.h"
@@ -370,10 +371,11 @@ struct tegra_sor_regs {
};
struct tegra_sor_soc {
- bool supports_edp;
bool supports_lvds;
bool supports_hdmi;
bool supports_dp;
+ bool supports_audio;
+ bool supports_hdcp;
const struct tegra_sor_regs *regs;
bool has_nvdisplay;
@@ -382,6 +384,12 @@ struct tegra_sor_soc {
unsigned int num_settings;
const u8 *xbar_cfg;
+ const u8 *lane_map;
+
+ const u8 (*voltage_swing)[4][4];
+ const u8 (*pre_emphasis)[4][4];
+ const u8 (*post_cursor)[4][4];
+ const u8 (*tx_pu)[4][4];
};
struct tegra_sor;
@@ -390,6 +398,8 @@ struct tegra_sor_ops {
const char *name;
int (*probe)(struct tegra_sor *sor);
int (*remove)(struct tegra_sor *sor);
+ void (*audio_enable)(struct tegra_sor *sor);
+ void (*audio_disable)(struct tegra_sor *sor);
};
struct tegra_sor {
@@ -412,6 +422,7 @@ struct tegra_sor {
u8 xbar_cfg[5];
+ struct drm_dp_link link;
struct drm_dp_aux *aux;
struct drm_info_list *debugfs_files;
@@ -514,10 +525,19 @@ static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_pad_parents[] = {
- "pll_d2_out0", "pll_dp"
+static const char * const tegra_clk_sor_pad_parents[2][2] = {
+ { "pll_d_out0", "pll_dp" },
+ { "pll_d2_out0", "pll_dp" },
};
+/*
+ * Implementing ->set_parent() here isn't really required because the parent
+ * will be explicitly selected in the driver code via the DP_CLK_SEL mux in
+ * the SOR_CLK_CNTRL register. This is primarily for compatibility with the
+ * Tegra186 and later SoC generations where the BPMP implements this clock
+ * and doesn't expose the mux via the common clock framework.
+ */
+
static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
struct tegra_clk_sor_pad *pad = to_pad(hw);
@@ -586,8 +606,8 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_pad_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.parent_names = tegra_clk_sor_pad_parents[sor->index];
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]);
init.ops = &tegra_clk_sor_pad_ops;
pad->hw.init = &init;
@@ -597,112 +617,340 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
return clk;
}
-static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
- struct drm_dp_link *link)
+static void tegra_sor_filter_rates(struct tegra_sor *sor)
{
+ struct drm_dp_link *link = &sor->link;
unsigned int i;
- u8 pattern;
+
+ /* Tegra only supports RBR, HBR and HBR2 */
+ for (i = 0; i < link->num_rates; i++) {
+ switch (link->rates[i]) {
+ case 1620000:
+ case 2700000:
+ case 5400000:
+ break;
+
+ default:
+ DRM_DEBUG_KMS("link rate %lu kHz not supported\n",
+ link->rates[i]);
+ link->rates[i] = 0;
+ break;
+ }
+ }
+
+ drm_dp_link_update_rates(link);
+}
+
+static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes)
+{
+ unsigned long timeout;
u32 value;
- int err;
- /* setup lane parameters */
- value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
- tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+ /*
+ * Clear or set the PD_TXD bit corresponding to each lane, depending
+ * on whether it is used or not.
+ */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE0(0x0f);
- tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]);
- value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
- SOR_LANE_POSTCURSOR_LANE2(0x00) |
- SOR_LANE_POSTCURSOR_LANE1(0x00) |
- SOR_LANE_POSTCURSOR_LANE0(0x00);
- tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
- /* disable LVDS mode */
- tegra_sor_writel(sor, 0, SOR_LVDS);
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int tegra_sor_power_down_lanes(struct tegra_sor *sor)
+{
+ unsigned long timeout;
+ u32 value;
+
+ /* power down all lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_TX_PU_ENABLE;
- value &= ~SOR_DP_PADCTL_TX_PU_MASK;
- value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
+ value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
+ SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes)
+{
+ u32 value;
+
+ /* pre-charge all used lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
- SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
+
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]);
+
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
- usleep_range(10, 100);
+ usleep_range(15, 100);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
- if (err < 0)
- return err;
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+ u32 mask = 0x08, adj = 0, value;
+
+ /* enable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ while (mask) {
+ adj |= mask;
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN1;
- value = (value << 8) | lane;
+ usleep_range(100, 200);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ if (value & SOR_PLL1_TERM_COMPOUT)
+ adj &= ~mask;
+
+ mask >>= 1;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- pattern = DP_TRAINING_PATTERN_1;
+ /* disable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0;
+ const struct tegra_sor_soc *soc = sor->soc;
+ u32 pattern = 0, tx_pu = 0, value;
+ unsigned int i;
- value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- value |= SOR_DP_SPARE_SEQ_ENABLE;
- value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
- value |= SOR_DP_SPARE_MACRO_SOR_CLK;
- tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+ for (value = 0, i = 0; i < link->lanes; i++) {
+ u8 vs = link->train.request.voltage_swing[i];
+ u8 pe = link->train.request.pre_emphasis[i];
+ u8 pc = link->train.request.post_cursor[i];
+ u8 shift = sor->soc->lane_map[i] << 3;
+
+ voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift;
+ pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift;
+ post_cursor |= soc->post_cursor[pc][vs][pe] << shift;
+
+ if (sor->soc->tx_pu[pc][vs][pe] > tx_pu)
+ tx_pu = sor->soc->tx_pu[pc][vs][pe];
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ value = SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ break;
+
+ case DP_TRAINING_PATTERN_1:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN1;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN2;
+ break;
+
+ case DP_TRAINING_PATTERN_3:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (link->caps.channel_coding)
+ value |= SOR_DP_TPG_CHANNEL_CODING;
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN2;
- value = (value << 8) | lane;
+ pattern = pattern << 8 | value;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0);
+ tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0);
- pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
+ if (link->caps.tps3_supported)
+ tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+ tegra_sor_writel(sor, pattern, SOR_DP_TPG);
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value |= SOR_DP_PADCTL_TX_PU(tx_pu);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_sor_dp_link_configure(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ unsigned int rate, lanes;
+ u32 value;
+ int err;
+
+ rate = drm_dp_link_rate_to_bw_code(link->rate);
+ lanes = link->lanes;
+
+ /* configure link speed and lane count */
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
+
+ if (link->caps.enhanced_framing)
+ value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ usleep_range(400, 1000);
+
+ /* configure load pulse position adjustment */
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_LOADADJ_MASK;
+
+ switch (rate) {
+ case DP_LINK_BW_1_62:
+ value |= SOR_PLL1_LOADADJ(0x3);
+ break;
+
+ case DP_LINK_BW_2_7:
+ value |= SOR_PLL1_LOADADJ(0x4);
+ break;
+
+ case DP_LINK_BW_5_4:
+ value |= SOR_PLL1_LOADADJ(0x6);
+ break;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- pattern = DP_TRAINING_PATTERN_DISABLE;
+ /* use alternate scrambler reset for eDP */
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
+ if (link->edp == 0)
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ err = tegra_sor_power_down_lanes(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power down lanes: %d\n", err);
+ return err;
+ }
+
+ /* power up and pre-charge lanes */
+ err = tegra_sor_power_up_lanes(sor, lanes);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power up %u lane%s: %d\n",
+ lanes, (lanes != 1) ? "s" : "", err);
return err;
+ }
+
+ tegra_sor_dp_precharge(sor, lanes);
return 0;
}
+static const struct drm_dp_link_ops tegra_sor_dp_link_ops = {
+ .apply_training = tegra_sor_dp_link_apply_training,
+ .configure = tegra_sor_dp_link_configure,
+};
+
static void tegra_sor_super_update(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
@@ -912,11 +1160,11 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
u32 num_syms_per_line;
unsigned int i;
- if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel)
+ if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel)
return -EINVAL;
- output = link_rate * 8 * link->num_lanes;
input = pclk * config->bits_per_pixel;
+ output = link_rate * 8 * link->lanes;
if (input >= output)
return -ERANGE;
@@ -959,7 +1207,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
watermark = div_u64(watermark + params.error, f);
config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
- (link->num_lanes * 8);
+ (link->lanes * 8);
if (config->watermark > 30) {
config->watermark = 30;
@@ -976,15 +1224,15 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
config->hblank_symbols = div_u64(num, pclk);
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (link->caps.enhanced_framing)
config->hblank_symbols -= 3;
- config->hblank_symbols -= 12 / link->num_lanes;
+ config->hblank_symbols -= 12 / link->lanes;
/* compute the number of symbols per vertical blanking interval */
num = (mode->hdisplay - 25) * link_rate;
config->vblank_symbols = div_u64(num, pclk);
- config->vblank_symbols -= 36 / link->num_lanes + 4;
+ config->vblank_symbols -= 36 / link->lanes + 4;
dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
config->vblank_symbols);
@@ -1200,29 +1448,6 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
return err;
}
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
- SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* stop lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
- SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- timeout = jiffies + msecs_to_jiffies(250);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(25, 100);
- }
-
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
- return -ETIMEDOUT;
-
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
@@ -1584,403 +1809,6 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_sor_edp_disable(struct drm_encoder *encoder)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- u32 value;
- int err;
-
- if (output->panel)
- drm_panel_disable(output->panel);
-
- err = tegra_sor_detach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to detach SOR: %d\n", err);
-
- tegra_sor_writel(sor, 0, SOR_STATE1);
- tegra_sor_update(sor);
-
- /*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
- */
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
- }
-
- err = tegra_sor_power_down(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to power down SOR: %d\n", err);
-
- if (sor->aux) {
- err = drm_dp_aux_disable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to disable DP: %d\n", err);
- }
-
- err = tegra_io_pad_power_disable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
-
- pm_runtime_put(sor->dev);
-}
-
-#if 0
-static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
- unsigned int *value)
-{
- unsigned int hfp, hsw, hbp, a = 0, b;
-
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
-
- pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
-
- b = hfp - 1;
-
- pr_info("a: %u, b: %u\n", a, b);
- pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
-
- if (a + hsw + hbp <= 11) {
- a = 1 + 11 - hsw - hbp;
- pr_info("a: %u\n", a);
- }
-
- if (a > b)
- return -EINVAL;
-
- if (hsw < 1)
- return -EINVAL;
-
- if (mode->hdisplay < 16)
- return -EINVAL;
-
- if (value) {
- if (b > a && a % 2)
- *value = a + 1;
- else
- *value = a;
- }
-
- return 0;
-}
-#endif
-
-static void tegra_sor_edp_enable(struct drm_encoder *encoder)
-{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- struct tegra_sor_config config;
- struct tegra_sor_state *state;
- struct drm_dp_link link;
- u8 rate, lanes;
- unsigned int i;
- int err = 0;
- u32 value;
-
- state = to_sor_state(output->connector.state);
-
- pm_runtime_get_sync(sor->dev);
-
- if (output->panel)
- drm_panel_prepare(output->panel);
-
- err = drm_dp_aux_enable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DP: %d\n", err);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0) {
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
- return;
- }
-
- /* switch to safe parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
- dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
-
- memset(&config, 0, sizeof(config));
- config.bits_per_pixel = state->bpc * 3;
-
- err = tegra_sor_compute_config(sor, mode, &config, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to compute configuration: %d\n", err);
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
- value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
- usleep_range(20, 100);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll3);
- value |= SOR_PLL3_PLL_VDD_MODE_3V3;
- tegra_sor_writel(sor, value, sor->soc->regs->pll3);
-
- value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
- SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD;
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- value |= SOR_PLL2_LVDS_ENABLE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
- tegra_sor_writel(sor, value, sor->soc->regs->pll1);
-
- while (true) {
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /*
- * power up
- */
-
- /* set safe link bandwidth (1.62 Gbps) */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- /* step 1 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
- SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* step 2 */
- err = tegra_io_pad_power_enable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
-
- usleep_range(5, 100);
-
- /* step 3 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(20, 100);
-
- /* step 4 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value &= ~SOR_PLL0_VCOPD;
- value &= ~SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(200, 1000);
-
- /* step 5 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /* XXX not in TRM */
- for (value = 0, i = 0; i < 5; i++)
- value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
- SOR_XBAR_CTRL_LINK1_XSEL(i, i);
-
- tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
- tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
-
- /* switch to DP parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
- if (err < 0)
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
-
- /* power DP lanes */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
-
- if (link.num_lanes <= 2)
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
- else
- value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2;
-
- if (link.num_lanes <= 1)
- value &= ~SOR_DP_PADCTL_PD_TXD_1;
- else
- value |= SOR_DP_PADCTL_PD_TXD_1;
-
- if (link.num_lanes == 0)
- value &= ~SOR_DP_PADCTL_PD_TXD_0;
- else
- value |= SOR_DP_PADCTL_PD_TXD_0;
-
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* start lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
- SOR_LANE_SEQ_CTL_POWER_STATE_UP;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- while (true) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- /* set link bandwidth */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- tegra_sor_apply_config(sor, &config);
-
- /* enable link */
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value |= SOR_DP_LINKCTL_ENABLE;
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- for (i = 0, value = 0; i < 4; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- /* enable pad calibration logic */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
-
- err = drm_dp_link_power_up(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to power up eDP link: %d\n", err);
-
- err = drm_dp_link_configure(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to configure eDP link: %d\n", err);
-
- rate = drm_dp_link_rate_to_bw_code(link.rate);
- lanes = link.num_lanes;
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
-
- if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
-
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* disable training pattern generator */
-
- for (i = 0; i < link.num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- err = tegra_sor_dp_train_fast(sor, &link);
- if (err < 0)
- dev_err(sor->dev, "DP fast link training failed: %d\n", err);
-
- dev_dbg(sor->dev, "fast link training succeeded\n");
-
- err = tegra_sor_power_up(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to power up SOR: %d\n", err);
-
- /* CSTM (LVDS, link A/B, upper) */
- value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
- SOR_CSTM_UPPER;
- tegra_sor_writel(sor, value, SOR_CSTM);
-
- /* use DP-A protocol */
- value = tegra_sor_readl(sor, SOR_STATE1);
- value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
- value |= SOR_STATE_ASY_PROTOCOL_DP_A;
- tegra_sor_writel(sor, value, SOR_STATE1);
-
- tegra_sor_mode_set(sor, mode, state);
-
- /* PWM setup */
- err = tegra_sor_setup_pwm(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to setup PWM: %d\n", err);
-
- tegra_sor_update(sor);
-
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
-
- err = tegra_sor_attach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to attach SOR: %d\n", err);
-
- err = tegra_sor_wakeup(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DC: %d\n", err);
-
- if (output->panel)
- drm_panel_enable(output->panel);
-}
-
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -2030,12 +1858,6 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
- .disable = tegra_sor_edp_disable,
- .enable = tegra_sor_edp_enable,
- .atomic_check = tegra_sor_encoder_atomic_check,
-};
-
static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
{
u32 value = 0;
@@ -2160,6 +1982,15 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
{
u32 value;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI/DP driver.
+ */
+ value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+ tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+ tegra_sor_writel(sor, value, SOR_INT_MASK);
+
tegra_sor_write_eld(sor);
value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
@@ -2169,6 +2000,32 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+ tegra_sor_writel(sor, 0, SOR_INT_MASK);
+ tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+}
+
+static void tegra_sor_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+ /* select HDA audio input */
+ value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+ value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+ /* inject null samples */
+ if (sor->format.channels != 2)
+ value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+ else
+ value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+ value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+ tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+ /* enable advertising HBR capability */
+ tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
}
static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
@@ -2206,24 +2063,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
{
u32 value;
- value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
-
- /* select HDA audio input */
- value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
- value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
-
- /* inject null samples */
- if (sor->format.channels != 2)
- value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
- else
- value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
-
- value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
-
- tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
-
- /* enable advertising HBR capability */
- tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+ tegra_sor_audio_enable(sor);
tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
@@ -2399,9 +2239,9 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1));
- else
- value &= ~SOR_ENABLE(sor->index);
+ value &= ~SOR1_TIMING_CYA;
+
+ value &= ~SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2559,16 +2399,34 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
- /* switch to parent clock */
- err = clk_set_parent(sor->clk, sor->clk_parent);
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_dp);
if (err < 0) {
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
return;
}
+#endif
+ /* switch the SOR clock to the pad clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
if (err < 0) {
- dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
return;
}
@@ -2774,9 +2632,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value |= SOR_ENABLE(1) | SOR1_TIMING_CYA;
- else
- value |= SOR_ENABLE(sor->index);
+ value |= SOR1_TIMING_CYA;
+
+ value |= SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2803,6 +2661,396 @@ static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
.atomic_check = tegra_sor_encoder_atomic_check,
};
+static void tegra_sor_dp_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ u32 value;
+ int err;
+
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
+ /*
+ * Do not attempt to power down a DP link if we're not connected since
+ * the AUX transactions would just be timing out.
+ */
+ if (output->connector.status != connector_status_disconnected) {
+ err = drm_dp_link_power_down(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down link: %d\n",
+ err);
+ }
+
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ tegra_dc_commit(dc);
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value &= ~SOR_STATE_ASY_SUBOWNER_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe clock: %d\n", err);
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ err = tegra_io_pad_power_disable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
+
+ err = drm_dp_aux_disable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed disable DPAUX: %d\n", err);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ pm_runtime_put(sor->dev);
+}
+
+static void tegra_sor_dp_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_config config;
+ struct tegra_sor_state *state;
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+ unsigned int i;
+ u32 value;
+ int err;
+
+ state = to_sor_state(output->connector.state);
+ mode = &encoder->crtc->state->adjusted_mode;
+ info = &output->connector.display_info;
+
+ pm_runtime_get_sync(sor->dev);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ err = tegra_io_pad_power_enable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err);
+
+ usleep_range(20, 100);
+
+ err = drm_dp_aux_enable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable DPAUX: %d\n", err);
+
+ err = drm_dp_link_probe(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to probe DP link: %d\n", err);
+
+ tegra_sor_filter_rates(sor);
+
+ err = drm_dp_link_choose(&sor->link, mode, info);
+ if (err < 0)
+ dev_err(sor->dev, "failed to choose link: %d\n", err);
+
+ if (output->panel)
+ drm_panel_prepare(output->panel);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 40);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
+ value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ if (output->panel)
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ else
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK;
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ /* XXX not in TRM */
+ if (output->panel)
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+
+ value |= SOR_DP_SPARE_SEQ_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ /* XXX not in TRM */
+ tegra_sor_writel(sor, 0, SOR_LVDS);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_VCOCAP_MASK;
+ value |= SOR_PLL0_ICHPMP(0x1);
+ value |= SOR_PLL0_VCOCAP(0x3);
+ value |= SOR_PLL0_RESISTOR_EXT;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
+ return;
+ }
+#endif
+
+ /* switch the SOR clock to the pad clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* use DP-A protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /* enable port */
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value |= SOR_DP_LINKCTL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ tegra_sor_dp_term_calibrate(sor);
+
+ err = drm_dp_link_train(&sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "link training failed: %d\n", err);
+ else
+ dev_dbg(sor->dev, "link training succeeded\n");
+
+ err = drm_dp_link_power_up(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up DP link: %d\n", err);
+
+ /* compute configuration */
+ memset(&config, 0, sizeof(config));
+ config.bits_per_pixel = state->bpc * 3;
+
+ err = tegra_sor_compute_config(sor, mode, &config, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to compute configuration: %d\n", err);
+
+ tegra_sor_apply_config(sor, &config);
+ tegra_sor_mode_set(sor, mode, state);
+
+ if (output->panel) {
+ /* CSTM (LVDS, link A/B, upper) */
+ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
+ SOR_CSTM_UPPER;
+ tegra_sor_writel(sor, value, SOR_CSTM);
+
+ /* PWM setup */
+ err = tegra_sor_setup_pwm(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to setup PWM: %d\n", err);
+ }
+
+ tegra_sor_update(sor);
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+ /* attach and wake up */
+ err = tegra_sor_attach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+
+ if (output->panel)
+ drm_panel_enable(output->panel);
+}
+
+static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
+ .disable = tegra_sor_dp_disable,
+ .enable = tegra_sor_dp_enable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+ if (IS_ERR(sor->avdd_io_supply)) {
+ dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+ PTR_ERR(sor->avdd_io_supply));
+ return PTR_ERR(sor->avdd_io_supply);
+ }
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+ if (IS_ERR(sor->vdd_pll_supply)) {
+ dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+ PTR_ERR(sor->vdd_pll_supply));
+ return PTR_ERR(sor->vdd_pll_supply);
+ }
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+ if (IS_ERR(sor->hdmi_supply)) {
+ dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+ PTR_ERR(sor->hdmi_supply));
+ return PTR_ERR(sor->hdmi_supply);
+ }
+
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
+
+ return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->hdmi_supply);
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+ .name = "HDMI",
+ .probe = tegra_sor_hdmi_probe,
+ .remove = tegra_sor_hdmi_remove,
+ .audio_enable = tegra_sor_hdmi_audio_enable,
+ .audio_disable = tegra_sor_hdmi_audio_disable,
+};
+
+static int tegra_sor_dp_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
+ if (IS_ERR(sor->avdd_io_supply))
+ return PTR_ERR(sor->avdd_io_supply);
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0)
+ return err;
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
+ if (IS_ERR(sor->vdd_pll_supply))
+ return PTR_ERR(sor->vdd_pll_supply);
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra_sor_dp_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_dp_ops = {
+ .name = "DP",
+ .probe = tegra_sor_dp_probe,
+ .remove = tegra_sor_dp_remove,
+};
+
static int tegra_sor_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -2810,11 +3058,10 @@ static int tegra_sor_init(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
- u32 value;
int err;
if (!sor->aux) {
- if (sor->soc->supports_hdmi) {
+ if (sor->ops == &tegra_sor_hdmi_ops) {
connector = DRM_MODE_CONNECTOR_HDMIA;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_hdmi_helpers;
@@ -2823,14 +3070,18 @@ static int tegra_sor_init(struct host1x_client *client)
encoder = DRM_MODE_ENCODER_LVDS;
}
} else {
- if (sor->soc->supports_edp) {
+ if (sor->output.panel) {
connector = DRM_MODE_CONNECTOR_eDP;
encoder = DRM_MODE_ENCODER_TMDS;
- helpers = &tegra_sor_edp_helpers;
- } else if (sor->soc->supports_dp) {
+ helpers = &tegra_sor_dp_helpers;
+ } else {
connector = DRM_MODE_CONNECTOR_DisplayPort;
encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_dp_helpers;
}
+
+ sor->link.ops = &tegra_sor_dp_link_ops;
+ sor->link.aux = sor->aux;
}
sor->output.dev = sor->dev;
@@ -2913,15 +3164,6 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0)
return err;
- /*
- * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
- * is used for interoperability between the HDA codec driver and the
- * HDMI/DP driver.
- */
- value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
- tegra_sor_writel(sor, value, SOR_INT_ENABLE);
- tegra_sor_writel(sor, value, SOR_INT_MASK);
-
return 0;
}
@@ -2930,9 +3172,6 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
- tegra_sor_writel(sor, 0, SOR_INT_MASK);
- tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
-
tegra_output_exit(&sor->output);
if (sor->aux) {
@@ -2955,75 +3194,6 @@ static const struct host1x_client_ops sor_client_ops = {
.exit = tegra_sor_exit,
};
-static const struct tegra_sor_ops tegra_sor_edp_ops = {
- .name = "eDP",
-};
-
-static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
-{
- int err;
-
- sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
- if (IS_ERR(sor->avdd_io_supply)) {
- dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
- PTR_ERR(sor->avdd_io_supply));
- return PTR_ERR(sor->avdd_io_supply);
- }
-
- err = regulator_enable(sor->avdd_io_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
- err);
- return err;
- }
-
- sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
- if (IS_ERR(sor->vdd_pll_supply)) {
- dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
- PTR_ERR(sor->vdd_pll_supply));
- return PTR_ERR(sor->vdd_pll_supply);
- }
-
- err = regulator_enable(sor->vdd_pll_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
- err);
- return err;
- }
-
- sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
- if (IS_ERR(sor->hdmi_supply)) {
- dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
- PTR_ERR(sor->hdmi_supply));
- return PTR_ERR(sor->hdmi_supply);
- }
-
- err = regulator_enable(sor->hdmi_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
- return err;
- }
-
- INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
-
- return 0;
-}
-
-static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
-{
- regulator_disable(sor->hdmi_supply);
- regulator_disable(sor->vdd_pll_supply);
- regulator_disable(sor->avdd_io_supply);
-
- return 0;
-}
-
-static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
- .name = "HDMI",
- .probe = tegra_sor_hdmi_probe,
- .remove = tegra_sor_hdmi_remove,
-};
-
static const u8 tegra124_sor_xbar_cfg[5] = {
0, 1, 2, 3, 4
};
@@ -3043,14 +3213,161 @@ static const struct tegra_sor_regs tegra124_sor_regs = {
.dp_padctl2 = 0x73,
};
+/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */
+static const u8 tegra124_sor_lane_map[4] = {
+ 2, 1, 0, 3,
+};
+
+static const u8 tegra124_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x3c, },
+ }, {
+ { 0x12, 0x17, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x39, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x36, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
+
+static const u8 tegra124_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x09, 0x13, 0x25 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ },
+};
+
+static const u8 tegra124_sor_post_cursor[4][4][4] = {
+ {
+ { 0x00, 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0x00, },
+ { 0x00, 0x00, },
+ { 0x00, },
+ }, {
+ { 0x02, 0x02, 0x04, 0x05 },
+ { 0x02, 0x04, 0x05, },
+ { 0x04, 0x05, },
+ { 0x05, },
+ }, {
+ { 0x04, 0x05, 0x08, 0x0b },
+ { 0x05, 0x09, 0x0b, },
+ { 0x08, 0x0a, },
+ { 0x0b, },
+ }, {
+ { 0x05, 0x09, 0x0b, 0x12 },
+ { 0x09, 0x0d, 0x12, },
+ { 0x0b, 0x0f, },
+ { 0x12, },
+ },
+};
+
+static const u8 tegra124_sor_tx_pu[4][4][4] = {
+ {
+ { 0x20, 0x30, 0x40, 0x60 },
+ { 0x30, 0x40, 0x60, },
+ { 0x40, 0x60, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x50 },
+ { 0x30, 0x40, 0x50, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x20, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x40, },
+ { 0x60, },
+ },
+};
+
static const struct tegra_sor_soc tegra124_sor = {
- .supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
.regs = &tegra124_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const u8 tegra132_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
+};
+
+static const struct tegra_sor_soc tegra132_sor = {
+ .supports_lvds = true,
+ .supports_hdmi = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+ .regs = &tegra124_sor_regs,
+ .has_nvdisplay = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra132_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra210_sor_regs = {
@@ -3068,33 +3385,50 @@ static const struct tegra_sor_regs tegra210_sor_regs = {
.dp_padctl2 = 0x73,
};
+static const u8 tegra210_sor_xbar_cfg[5] = {
+ 2, 1, 0, 3, 4
+};
+
+static const u8 tegra210_sor_lane_map[4] = {
+ 0, 1, 2, 3,
+};
+
static const struct tegra_sor_soc tegra210_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
- .xbar_cfg = tegra124_sor_xbar_cfg,
-};
-static const u8 tegra210_sor_xbar_cfg[5] = {
- 2, 1, 0, 3, 4
+ .xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_soc tegra210_sor1 = {
- .supports_edp = false,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
-
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra186_sor_regs = {
@@ -3112,31 +3446,72 @@ static const struct tegra_sor_regs tegra186_sor_regs = {
.dp_padctl2 = 0x16a,
};
-static const struct tegra_sor_soc tegra186_sor = {
- .supports_edp = false,
- .supports_lvds = false,
- .supports_hdmi = false,
- .supports_dp = true,
-
- .regs = &tegra186_sor_regs,
- .has_nvdisplay = true,
+static const u8 tegra186_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x39, },
+ }, {
+ { 0x12, 0x16, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x37, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x35, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
- .xbar_cfg = tegra124_sor_xbar_cfg,
+static const u8 tegra186_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x14, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
};
-static const struct tegra_sor_soc tegra186_sor1 = {
- .supports_edp = false,
+static const struct tegra_sor_soc tegra186_sor = {
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra186_sor_regs,
.has_nvdisplay = true,
.num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults),
.settings = tegra186_sor_hdmi_defaults,
-
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra194_sor_regs = {
@@ -3155,10 +3530,11 @@ static const struct tegra_sor_regs tegra194_sor_regs = {
};
static const struct tegra_sor_soc tegra194_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra194_sor_regs,
.has_nvdisplay = true,
@@ -3167,14 +3543,19 @@ static const struct tegra_sor_soc tegra194_sor = {
.settings = tegra194_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor },
- { .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 },
{ .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+ { .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor },
{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
{ },
};
@@ -3200,6 +3581,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
* earlier
*/
sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
+ } else {
+ if (!sor->soc->supports_audio)
+ sor->index = 0;
+ else
+ sor->index = 1;
}
err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
@@ -3234,9 +3620,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data)
tegra_hda_parse_format(format, &sor->format);
- tegra_sor_hdmi_audio_enable(sor);
+ if (sor->ops->audio_enable)
+ sor->ops->audio_enable(sor);
} else {
- tegra_sor_hdmi_audio_disable(sor);
+ if (sor->ops->audio_disable)
+ sor->ops->audio_disable(sor);
}
}
@@ -3273,6 +3661,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
+
+ sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
@@ -3287,16 +3677,15 @@ static int tegra_sor_probe(struct platform_device *pdev)
return -ENODEV;
}
} else {
- if (sor->soc->supports_edp) {
- sor->ops = &tegra_sor_edp_ops;
- sor->pad = TEGRA_IO_PAD_LVDS;
- } else if (sor->soc->supports_dp) {
- dev_err(&pdev->dev, "DisplayPort not supported yet\n");
- return -ENODEV;
- } else {
- dev_err(&pdev->dev, "unknown (DP) support\n");
- return -ENODEV;
- }
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0);
+ /*
+ * No need to keep this around since we only use it as a check
+ * to see if a panel is connected (eDP) or not (DP).
+ */
+ of_node_put(np);
+
+ sor->ops = &tegra_sor_dp_ops;
+ sor->pad = TEGRA_IO_PAD_LVDS;
}
err = tegra_sor_parse_dt(sor);
@@ -3451,6 +3840,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
* pad output clock.
*/
if (!sor->clk_pad) {
+ char *name;
+
err = pm_runtime_get_sync(&pdev->dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
@@ -3458,8 +3849,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- sor->clk_pad = tegra_clk_sor_pad_register(sor,
- "sor1_pad_clkout");
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index);
+ if (!name) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
pm_runtime_put(&pdev->dev);
}
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index f8efd8be4b7c..00e09d5dca30 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -39,6 +39,7 @@
#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
+#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4)
#define SOR_STATE_ASY_OWNER_MASK 0xf
#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
@@ -283,10 +284,12 @@
#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
+#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x)))
#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
+#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x)))
#define SOR_DP_PADCTL1 0x5d
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index cd0399fd8c63..9444ba183990 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -34,7 +34,6 @@ struct vic {
void __iomem *regs;
struct tegra_drm_client client;
struct host1x_channel *channel;
- struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
struct reset_control *rst;
@@ -97,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev)
static int vic_boot(struct vic *vic)
{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+#endif
u32 fce_ucode_size, fce_bin_data_offset;
void *hdr;
int err = 0;
@@ -105,15 +107,14 @@ static int vic_boot(struct vic *vic)
return 0;
#ifdef CONFIG_IOMMU_API
- if (vic->config->supports_sid) {
- struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+ if (vic->config->supports_sid && spec) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
TRANSCFG_ATT(0, TRANSCFG_SID_HW);
vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
- if (spec && spec->num_ids > 0) {
+ if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
vic_writel(vic, value, VIC_THI_STREAMID0);
@@ -132,9 +133,9 @@ static int vic_boot(struct vic *vic)
if (err < 0)
return err;
- hdr = vic->falcon.firmware.vaddr;
+ hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- hdr = vic->falcon.firmware.vaddr +
+ hdr = vic->falcon.firmware.virt +
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
@@ -142,7 +143,7 @@ static int vic_boot(struct vic *vic)
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
- (vic->falcon.firmware.paddr + fce_bin_data_offset)
+ (vic->falcon.firmware.iova + fce_bin_data_offset)
>> 8);
err = falcon_wait_idle(&vic->falcon);
@@ -157,48 +158,21 @@ static int vic_boot(struct vic *vic)
return 0;
}
-static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
- dma_addr_t *iova)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_alloc(tegra, size, iova);
-}
-
-static void vic_falcon_free(struct falcon *falcon, size_t size,
- dma_addr_t iova, void *va)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_free(tegra, size, va, iova);
-}
-
-static const struct falcon_ops vic_falcon_ops = {
- .alloc = vic_falcon_alloc,
- .free = vic_falcon_free
-};
-
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
- if (group && tegra->domain) {
- err = iommu_attach_group(tegra->domain, group);
- if (err < 0) {
- dev_err(vic->dev, "failed to attach to domain: %d\n",
- err);
- return err;
- }
-
- vic->domain = tegra->domain;
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
+ dev_err(vic->dev, "failed to attach to domain: %d\n", err);
+ return err;
}
- vic->channel = host1x_channel_request(client->dev);
+ vic->channel = host1x_channel_request(client);
if (!vic->channel) {
err = -ENOMEM;
goto detach;
@@ -214,6 +188,12 @@ static int vic_init(struct host1x_client *client)
if (err < 0)
goto free_syncpt;
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent device.
+ */
+ client->dev->dma_parms = client->parent->dma_parms;
+
return 0;
free_syncpt:
@@ -221,8 +201,7 @@ free_syncpt:
free_channel:
host1x_channel_put(vic->channel);
detach:
- if (group && tegra->domain)
- iommu_detach_group(tegra->domain, group);
+ host1x_client_iommu_detach(client);
return err;
}
@@ -230,22 +209,32 @@ detach:
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(vic->channel);
-
- if (vic->domain) {
- iommu_detach_group(vic->domain, group);
- vic->domain = NULL;
+ host1x_client_iommu_detach(client);
+
+ if (client->group) {
+ dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
+ vic->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(vic->dev, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
}
return 0;
@@ -258,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = {
static int vic_load_firmware(struct vic *vic)
{
+ struct host1x_client *client = &vic->client.base;
+ struct tegra_drm *tegra = vic->client.drm;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
int err;
- if (vic->falcon.data)
+ if (vic->falcon.firmware.virt)
return 0;
- vic->falcon.data = vic->client.drm;
-
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
- goto cleanup;
+ return err;
+
+ size = vic->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
+
+ err = dma_mapping_error(vic->dev, iova);
+ if (err < 0)
+ return err;
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ }
+
+ vic->falcon.firmware.virt = virt;
+ vic->falcon.firmware.iova = iova;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
goto cleanup;
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(vic->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ vic->falcon.firmware.phys = phys;
+ }
+
return 0;
cleanup:
- vic->falcon.data = NULL;
+ if (!client->group)
+ dma_free_coherent(vic->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
return err;
}
@@ -374,6 +402,13 @@ static int vic_probe(struct platform_device *pdev)
struct vic *vic;
int err;
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
@@ -410,7 +445,6 @@ static int vic_probe(struct platform_device *pdev)
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
- vic->falcon.ops = &vic_falcon_ops;
err = falcon_init(&vic->falcon);
if (err < 0)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 43d756b7810e..51d034e095f4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -8,6 +8,7 @@
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include "tilcdc_drv.h"
@@ -139,8 +140,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
}
if (panel) {
- bridge = devm_drm_panel_bridge_add(ddev->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(ddev->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto err_encoder_cleanup;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 3abb9641f212..e2090020b3a0 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -11,7 +11,7 @@
#include "tilcdc_drv.h"
-static struct drm_plane_funcs tilcdc_plane_funcs = {
+static const struct drm_plane_funcs tilcdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 03d0e2df6774..94fb1f593564 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -649,7 +649,7 @@ static void gm12u320_driver_release(struct drm_device *dev)
kfree(gm12u320);
}
-DEFINE_DRM_GEM_SHMEM_FOPS(gm12u320_fops);
+DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static struct drm_driver gm12u320_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 01fc670ce7a2..caea2a099496 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,8 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \
- ttm_page_alloc_dma.o
+ ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
+ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index ea4d59eb8966..6050dc846894 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -51,7 +51,7 @@ struct ttm_agp_backend {
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
+ struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 98819462f025..8d91b0428af1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
DEFINE_MUTEX(ttm_global_mutex);
unsigned ttm_bo_glob_use_count;
struct ttm_bo_global ttm_bo_glob;
+EXPORT_SYMBOL(ttm_bo_glob);
static struct attribute ttm_bo_count = {
.name = "bo_count",
@@ -148,23 +149,21 @@ static void ttm_bo_release_list(struct kref *list_kref)
{
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
- struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
BUG_ON(kref_read(&bo->list_kref));
BUG_ON(kref_read(&bo->kref));
- BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
- atomic_dec(&bo->bdev->glob->bo_count);
+ atomic_dec(&ttm_bo_glob.bo_count);
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv);
mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
- ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
@@ -188,23 +187,17 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+ list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
-{
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
-}
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
-
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
}
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
bool notify = false;
@@ -224,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
bdev->driver->del_from_lru_notify(bo);
}
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
-}
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
-
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
struct ttm_buffer_object *bo)
{
@@ -248,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) {
@@ -311,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv);
- lru = &pos->first->bdev->glob->swap_lru[i];
+ lru = &ttm_bo_glob.swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
}
}
@@ -475,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bdev->glob;
int ret;
ret = ttm_bo_individualize_resv(bo);
@@ -485,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
30 * HZ);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
goto error;
}
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
if (!ret) {
if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (bo->base.resv != &bo->base._resv)
dma_resv_unlock(&bo->base._resv);
@@ -512,7 +494,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
}
dma_resv_unlock(bo->base.resv);
@@ -523,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
@@ -546,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
- struct ttm_bo_global *glob = bo->bdev->glob;
struct dma_resv *resv;
int ret;
@@ -565,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
lret = dma_resv_wait_timeout_rcu(resv, true,
interruptible,
@@ -576,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else if (lret == 0)
return -EBUSY;
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
@@ -586,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* delayed destruction would succeed, so just return success
* here.
*/
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
ret = 0;
@@ -595,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -603,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
@@ -618,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
*/
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct list_head removed;
bool empty;
@@ -676,7 +657,7 @@ static void ttm_bo_release(struct kref *kref)
if (bo->bdev->driver->release_notify)
bo->bdev->driver->release_notify(bo);
- drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -842,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bool locked = false;
unsigned i;
int ret;
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
@@ -880,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) {
if (busy_bo)
kref_get(&busy_bo->list_kref);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
kref_put(&busy_bo->list_kref, ttm_bo_release_list);
@@ -896,17 +876,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_bo_evict(bo, ctx);
- if (locked) {
+ if (locked)
ttm_bo_unreserve(bo);
- } else {
- spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&glob->lru_lock);
- }
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
@@ -926,7 +900,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem,
+ bool no_wait_gpu)
{
struct dma_fence *fence;
int ret;
@@ -935,19 +910,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
- if (fence) {
- dma_resv_add_shared_fence(bo->base.resv, fence);
+ if (!fence)
+ return 0;
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
+ if (no_wait_gpu)
+ return -EBUSY;
- dma_fence_put(bo->moving);
- bo->moving = fence;
+ dma_resv_add_shared_fence(bo->base.resv, fence);
+
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ if (unlikely(ret)) {
+ dma_fence_put(fence);
+ return ret;
}
+ dma_fence_put(bo->moving);
+ bo->moving = fence;
return 0;
}
@@ -978,7 +956,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
} while (1);
- return ttm_bo_add_move_fence(bo, man, mem);
+ return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1068,12 +1046,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = mem_type;
mem->placement = cur_flags;
- if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, mem);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
@@ -1120,14 +1096,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
goto error;
- if (mem->mm_node) {
- ret = ttm_bo_add_move_fence(bo, man, mem);
- if (unlikely(ret)) {
- (*man->func->put_node)(man, mem);
- goto error;
- }
- return 0;
+ if (!mem->mm_node)
+ continue;
+
+ ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ if (unlikely(ret)) {
+ (*man->func->put_node)(man, mem);
+ if (ret == -EBUSY)
+ continue;
+
+ goto error;
}
+ return 0;
}
for (i = 0; i < placement->num_busy_placement; ++i) {
@@ -1160,9 +1140,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bo->bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
return ret;
@@ -1286,9 +1266,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
int ret = 0;
unsigned long num_pages;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
@@ -1315,7 +1295,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
kref_init(&bo->kref);
kref_init(&bo->list_kref);
- atomic_set(&bo->cpu_writers, 0);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@@ -1349,7 +1328,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
- atomic_inc(&bo->bdev->glob->bo_count);
+ atomic_inc(&ttm_bo_glob.bo_count);
/*
* For ttm_bo_type_device buffers, allocate
@@ -1357,7 +1336,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
*/
if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg)
- ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
+ ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
bo->mem.num_pages);
/* passed reservation objects should already be locked,
@@ -1379,11 +1358,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret;
}
- if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bdev->glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -1481,7 +1458,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
.flags = TTM_OPT_FLAG_FORCE_ALLOC
};
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence;
int ret;
unsigned i;
@@ -1650,8 +1627,6 @@ static int ttm_bo_global_init(void)
goto out;
spin_lock_init(&glob->lru_lock);
- glob->mem_glob = &ttm_mem_glob;
- glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1675,10 +1650,10 @@ out:
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
+ struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0;
unsigned i = TTM_NUM_MEM_TYPES;
struct ttm_mem_type_manager *man;
- struct ttm_bo_global *glob = bdev->glob;
while (i--) {
man = &bdev->man[i];
@@ -1708,8 +1683,6 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
- drm_vma_offset_manager_destroy(&bdev->vma_manager);
-
if (!ret)
ttm_bo_global_release();
@@ -1720,11 +1693,15 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
+ if (WARN_ON(vma_manager == NULL))
+ return -EINVAL;
+
ret = ttm_bo_global_init();
if (ret)
return ret;
@@ -1741,13 +1718,10 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager,
- DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE);
+ bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
- bdev->glob = glob;
bdev->need_dma32 = need_dma32;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -1827,31 +1801,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
-{
- int ret = 0;
-
- /*
- * Using ttm_bo_reserve makes sure the lru lists are updated.
- */
-
- ret = ttm_bo_reserve(bo, true, no_wait, NULL);
- if (unlikely(ret != 0))
- return ret;
- ret = ttm_bo_wait(bo, true, no_wait);
- if (likely(ret == 0))
- atomic_inc(&bo->cpu_writers);
- ttm_bo_unreserve(bo);
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
-
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
-{
- atomic_dec(&bo->cpu_writers);
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
-
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
@@ -1951,8 +1900,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
.no_wait_gpu = false
};
- while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
- ;
+ while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fe81c565e7ef..6b0883a1776e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -102,7 +102,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
mutex_lock(&man->io_reserve_mutex);
return 0;
}
-EXPORT_SYMBOL(ttm_mem_io_lock);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
@@ -111,7 +110,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
mutex_unlock(&man->io_reserve_mutex);
}
-EXPORT_SYMBOL(ttm_mem_io_unlock);
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
@@ -153,7 +151,6 @@ retry:
}
return ret;
}
-EXPORT_SYMBOL(ttm_mem_io_reserve);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
@@ -169,7 +166,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev->driver->io_mem_free(bdev, mem);
}
-EXPORT_SYMBOL(ttm_mem_io_free);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
@@ -503,7 +499,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- atomic_inc(&bo->bdev->glob->bo_count);
+ atomic_inc(&ttm_bo_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
@@ -511,15 +507,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
- atomic_set(&fbo->base.cpu_writers, 0);
kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- fbo->base.base.resv = &fbo->base.base._resv;
- dma_resv_init(fbo->base.base.resv);
- ret = dma_resv_trylock(fbo->base.base.resv);
+ if (bo->base.resv == &bo->base._resv)
+ fbo->base.base.resv = &fbo->base.base._resv;
+
+ dma_resv_init(&fbo->base.base._resv);
+ ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
*new_obj = &fbo->base;
@@ -716,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -729,7 +726,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
}
@@ -772,7 +769,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
if (ret)
return ret;
- dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -785,7 +782,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
@@ -841,7 +838,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
if (ret)
return ret;
- ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
+ ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
@@ -850,7 +847,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->ttm = NULL;
- ttm_bo_unreserve(ghost);
+ dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 46dc3de7e81b..11863fbdd5d6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -42,8 +42,6 @@
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>
-#define TTM_BO_VM_NUM_PREFAULT 16
-
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
@@ -106,25 +104,30 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset;
}
-static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+/**
+ * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
+ * @bo: The buffer object
+ * @vmf: The fault structure handed to the callback
+ *
+ * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
+ * during long waits, and after the wait the callback will be restarted. This
+ * is to allow other threads using the same virtual memory space concurrent
+ * access to map(), unmap() completely unrelated buffer objects. TTM buffer
+ * object reservations sometimes wait for GPU and should therefore be
+ * considered long waits. This function reserves the buffer object interruptibly
+ * taking this into account. Starvation is avoided by the vm system not
+ * allowing too many repeated restarts.
+ * This function is intended to be used in customized fault() and _mkwrite()
+ * handlers.
+ *
+ * Return:
+ * 0 on success and the bo was reserved.
+ * VM_FAULT_RETRY if blocking wait.
+ * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
+ */
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf)
{
- struct vm_area_struct *vma = vmf->vma;
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
- vma->vm_private_data;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long page_offset;
- unsigned long page_last;
- unsigned long pfn;
- struct ttm_tt *ttm = NULL;
- struct page *page;
- int err;
- int i;
- vm_fault_t ret = VM_FAULT_NOPAGE;
- unsigned long address = vmf->address;
- struct ttm_mem_type_manager *man =
- &bdev->man[bo->mem.mem_type];
- struct vm_area_struct cvma;
-
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
@@ -151,14 +154,54 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vm_reserve);
+
+/**
+ * ttm_bo_vm_fault_reserved - TTM fault helper
+ * @vmf: The struct vm_fault given as argument to the fault callback
+ * @prot: The page protection to be used for this memory area.
+ * @num_prefault: Maximum number of prefault pages. The caller may want to
+ * specify this based on madvice settings and the size of the GPU object
+ * backed by the memory.
+ *
+ * This function inserts one or more page table entries pointing to the
+ * memory backing the buffer object, and then returns a return code
+ * instructing the caller to retry the page access.
+ *
+ * Return:
+ * VM_FAULT_NOPAGE on success or pending signal
+ * VM_FAULT_SIGBUS on unspecified error
+ * VM_FAULT_OOM on out-of-memory
+ * VM_FAULT_RETRY if retryable wait
+ */
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+ pgprot_t prot,
+ pgoff_t num_prefault)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vm_area_struct cvma = *vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct ttm_bo_device *bdev = bo->bdev;
+ unsigned long page_offset;
+ unsigned long page_last;
+ unsigned long pfn;
+ struct ttm_tt *ttm = NULL;
+ struct page *page;
+ int err;
+ pgoff_t i;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long address = vmf->address;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
+
/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
+ return VM_FAULT_SIGBUS;
if (bdev->driver->fault_reserve_notify) {
struct dma_fence *moving = dma_fence_get(bo->moving);
@@ -169,17 +212,15 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
break;
case -EBUSY:
case -ERESTARTSYS:
- ret = VM_FAULT_NOPAGE;
- goto out_unlock;
+ return VM_FAULT_NOPAGE;
default:
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
+ return VM_FAULT_SIGBUS;
}
if (bo->moving != moving) {
- spin_lock(&bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
dma_fence_put(moving);
}
@@ -189,21 +230,12 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
* move.
*/
ret = ttm_bo_vm_fault_idle(bo, vmf);
- if (unlikely(ret != 0)) {
- if (ret == VM_FAULT_RETRY &&
- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- /* The BO has already been unreserved. */
- return ret;
- }
-
- goto out_unlock;
- }
+ if (unlikely(ret != 0))
+ return ret;
err = ttm_mem_io_lock(man, true);
- if (unlikely(err != 0)) {
- ret = VM_FAULT_NOPAGE;
- goto out_unlock;
- }
+ if (unlikely(err != 0))
+ return VM_FAULT_NOPAGE;
err = ttm_mem_io_reserve_vm(bo);
if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
@@ -220,18 +252,8 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
goto out_io_unlock;
}
- /*
- * Make a local vma copy to modify the page_prot member
- * and vm_flags if necessary. The vma parameter is protected
- * by mmap_sem in write mode.
- */
- cvma = *vma;
- cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
-
- if (bo->mem.bus.is_iomem) {
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
- } else {
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
+ if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
@@ -240,24 +262,21 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
};
ttm = bo->ttm;
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
-
- /* Allocate all page at once, most common usage */
- if (ttm_tt_populate(ttm, &ctx)) {
+ if (ttm_tt_populate(bo->ttm, &ctx)) {
ret = VM_FAULT_OOM;
goto out_io_unlock;
}
+ } else {
+ /* Iomem should not be marked encrypted */
+ cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
- for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+ for (i = 0; i < num_prefault; ++i) {
if (bo->mem.bus.is_iomem) {
- /* Iomem should not be marked encrypted */
- cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
@@ -293,28 +312,49 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
ret = VM_FAULT_NOPAGE;
out_io_unlock:
ttm_mem_io_unlock(man);
-out_unlock:
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
+
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ pgprot_t prot;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ prot = vm_get_page_prot(vma->vm_flags);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
dma_resv_unlock(bo->base.resv);
+
return ret;
}
-static void ttm_bo_vm_open(struct vm_area_struct *vma)
+void ttm_bo_vm_open(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
ttm_bo_get(bo);
}
+EXPORT_SYMBOL(ttm_bo_vm_open);
-static void ttm_bo_vm_close(struct vm_area_struct *vma)
+void ttm_bo_vm_close(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
ttm_bo_put(bo);
vma->vm_private_data = NULL;
}
+EXPORT_SYMBOL(ttm_bo_vm_close);
static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
unsigned long offset,
@@ -407,16 +447,16 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
struct drm_vma_offset_node *node;
struct ttm_buffer_object *bo = NULL;
- drm_vma_offset_lock_lookup(&bdev->vma_manager);
+ drm_vma_offset_lock_lookup(bdev->vma_manager);
- node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+ node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) {
bo = container_of(node, struct ttm_buffer_object,
base.vma_node);
bo = ttm_bo_get_unless_zero(bo);
}
- drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+ drm_vma_offset_unlock_lookup(bdev->vma_manager);
if (!bo)
pr_err("Could not find buffer object to map\n");
@@ -424,6 +464,28 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
+static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &ttm_bo_vm_ops;
+
+ /*
+ * Note: We're transferring the bo reference to
+ * vma->vm_private_data here.
+ */
+
+ vma->vm_private_data = bo;
+
+ /*
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
+ */
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+}
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
@@ -447,24 +509,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
if (unlikely(ret != 0))
goto out_unref;
- vma->vm_ops = &ttm_bo_vm_ops;
-
- /*
- * Note: We're transferring the bo reference to
- * vma->vm_private_data here.
- */
-
- vma->vm_private_data = bo;
-
- /*
- * We'd like to use VM_PFNMAP on shared mappings, where
- * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
- * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
- * bad for performance. Until that has been sorted out, use
- * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
- */
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
out_unref:
ttm_bo_put(bo);
@@ -472,17 +517,17 @@ out_unref:
}
EXPORT_SYMBOL(ttm_bo_mmap);
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
- if (vma->vm_pgoff != 0)
- return -EACCES;
-
ttm_bo_get(bo);
- vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = bo;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ /*
+ * FIXME: &drm_gem_object_funcs.mmap is called with the fake offset
+ * removed. Add it back here until the rest of TTM works without it.
+ */
+ vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node);
+
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
}
-EXPORT_SYMBOL(ttm_fbdev_mmap);
+EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 131dae8f4170..1797f04c0534 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -43,37 +43,22 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
}
}
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_del_from_lru(bo);
- }
-}
-
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
@@ -94,18 +79,14 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru)
+ struct list_head *dups)
{
- struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
if (list_empty(list))
return 0;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
@@ -113,12 +94,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct ttm_buffer_object *bo = entry->bo;
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
- if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- dma_resv_unlock(bo->base.resv);
-
- ret = -EBUSY;
-
- } else if (ret == -EALREADY && dups) {
+ if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
list_del(&safe->head);
@@ -173,11 +149,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
- if (del_lru) {
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- }
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -187,30 +158,22 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
- struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
-
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
- bo = entry->bo;
+ struct ttm_buffer_object *bo = entry->bo;
+
if (entry->num_shared)
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 8617958b7ae6..acd63b70d814 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
- ret = ttm_bo_swapout(glob->bo_glob, ctx);
+ ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 627f8dc91d0e..b40a4678c296 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
static void
ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
if (mem_count_update == 0)
@@ -1049,7 +1049,7 @@ put_pages:
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
int ret;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7d78e6deac89..bf876faea592 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,7 +33,6 @@
* when freed).
*/
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/dma-mapping.h>
@@ -886,8 +885,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
struct dma_page *d_page;
@@ -991,8 +990,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
@@ -1238,5 +1237,3 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
-
-#endif
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 416f24823c0a..954b09c948eb 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -80,8 +80,8 @@ static int tve200_modeset_init(struct drm_device *dev)
if (ret && ret != -ENODEV)
return ret;
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto out_bridge;
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index ddb61a60c610..b4ae3e89a7b4 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -90,13 +90,6 @@ udl_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static struct drm_encoder*
-udl_best_single_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- return drm_encoder_find(connector->dev, NULL, enc_id);
-}
-
static int udl_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
@@ -120,7 +113,6 @@ static void udl_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
- .best_encoder = udl_best_single_encoder,
};
static const struct drm_connector_funcs udl_connector_funcs = {
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a22b75a3a533..edd299ab53d8 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -58,7 +58,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/* gem_create_object function for allocating a BO struct and doing
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 3506ae2723ae..1a07462b4528 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -126,6 +126,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_CSD:
args->value = v3d_has_csd(v3d);
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -169,7 +172,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
+DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 19c092d75266..549dde83408b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -530,13 +530,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_v3d_submit_cl *args = data;
struct v3d_bin_job *bin = NULL;
struct v3d_render_job *render;
+ struct v3d_job *clean_job = NULL;
+ struct v3d_job *last_job;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
- if (args->pad != 0) {
- DRM_INFO("pad must be zero: %d\n", args->pad);
+ if (args->flags != 0 &&
+ args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
return -EINVAL;
}
@@ -565,6 +568,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
if (ret) {
+ kfree(bin);
v3d_job_put(&render->base);
kfree(bin);
return ret;
@@ -578,12 +582,31 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
bin->render = render;
}
- ret = v3d_lookup_bos(dev, file_priv, &render->base,
+ if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
+ if (!clean_job) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
+ if (ret) {
+ kfree(clean_job);
+ clean_job = NULL;
+ goto fail;
+ }
+
+ last_job = clean_job;
+ } else {
+ last_job = &render->base;
+ }
+
+ ret = v3d_lookup_bos(dev, file_priv, last_job,
args->bo_handles, args->bo_handle_count);
if (ret)
goto fail;
- ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx);
+ ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
if (ret)
goto fail;
@@ -602,28 +625,44 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
if (ret)
goto fail_unreserve;
+
+ if (clean_job) {
+ struct dma_fence *render_fence =
+ dma_fence_get(render->base.done_fence);
+ ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+ if (ret)
+ goto fail_unreserve;
+ ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail_unreserve;
+ }
+
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
- &render->base,
+ last_job,
&acquire_ctx,
args->out_sync,
- render->base.done_fence);
+ last_job->done_fence);
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
+ if (clean_job)
+ v3d_job_put(clean_job);
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
- drm_gem_unlock_reservations(render->base.bo,
- render->base.bo_count, &acquire_ctx);
+ drm_gem_unlock_reservations(last_job->bo,
+ last_job->bo_count, &acquire_ctx);
fail:
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
+ if (clean_job)
+ v3d_job_put(clean_job);
return ret;
}
diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
index 56ba510f21a2..45fe135d6e43 100644
--- a/drivers/gpu/drm/vboxvideo/Kconfig
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -4,6 +4,8 @@ config DRM_VBOXVIDEO
depends on DRM && X86 && PCI
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
select GENERIC_ALLOCATOR
help
This is a KMS driver for the virtual Graphics Card used in
diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
index 55d798c76b21..f2e968b5ffa6 100644
--- a/drivers/gpu/drm/vboxvideo/Makefile
+++ b/drivers/gpu/drm/vboxvideo/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
- vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
+ vbox_drv.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
vbox_mode.o vbox_ttm.o
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 862db495d111..8512d970a09f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -32,10 +33,6 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-static const struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
- .fb_probe = vboxfb_create,
-};
-
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct vbox_private *vbox;
@@ -79,20 +76,16 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mode_fini;
- ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper,
- &vbox_fb_helper_funcs, 32,
- vbox->num_crtcs);
+ ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
if (ret)
goto err_irq_fini;
ret = drm_dev_register(&vbox->ddev, 0);
if (ret)
- goto err_fbdev_fini;
+ goto err_irq_fini;
return 0;
-err_fbdev_fini:
- vbox_fbdev_fini(vbox);
err_irq_fini:
vbox_irq_fini(vbox);
err_mode_fini:
@@ -113,7 +106,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
struct vbox_private *vbox = pci_get_drvdata(pdev);
drm_dev_unregister(&vbox->ddev);
- vbox_fbdev_fini(vbox);
vbox_irq_fini(vbox);
vbox_mode_fini(vbox);
vbox_mm_fini(vbox);
@@ -189,10 +181,7 @@ static struct pci_driver vbox_pci_driver = {
#endif
};
-static const struct file_operations vbox_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(vbox_fops);
static struct drm_driver driver = {
.driver_features =
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index e8cb9efc6088..87421903816c 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -16,12 +16,9 @@
#include <linux/string.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
-
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
#include "hgsmi_ch_setup.h"
@@ -48,16 +45,9 @@
sizeof(struct hgsmi_host_flags))
#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
-struct vbox_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct vbox_private {
/* Must be first; or we must define our own release callback */
struct drm_device ddev;
- struct drm_fb_helper fb_helper;
- struct vbox_framebuffer afb;
u8 __iomem *guest_heap;
u8 __iomem *vbva_buffers;
@@ -137,7 +127,6 @@ struct vbox_encoder {
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
-#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
bool vbox_check_supported(u16 id);
int vbox_hw_init(struct vbox_private *vbox);
@@ -148,25 +137,9 @@ void vbox_mode_fini(struct vbox_private *vbox);
void vbox_report_caps(struct vbox_private *vbox);
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects);
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-void vbox_fbdev_fini(struct vbox_private *vbox);
-
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj);
-
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
void vbox_irq_fini(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
deleted file mode 100644
index 8f74bcffc034..000000000000
--- a/drivers/gpu/drm/vboxvideo/vbox_fb.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright (C) 2013-2017 Oracle Corporation
- * This file is based on ast_fb.c
- * Copyright 2012 Red Hat Inc.
- * Authors: Dave Airlie <airlied@redhat.com>
- * Michael Thayer <michael.thayer@oracle.com,
- */
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/sysrq.h>
-#include <linux/tty.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-
-#include "vbox_drv.h"
-#include "vboxvideo.h"
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
-static struct fb_deferred_io vbox_defio = {
- .delay = HZ / 30,
- .deferred_io = drm_fb_helper_deferred_io,
-};
-#endif
-
-static struct fb_ops vboxfb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
-};
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct vbox_private *vbox =
- container_of(helper, struct vbox_private, fb_helper);
- struct pci_dev *pdev = vbox->ddev.pdev;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_framebuffer *fb;
- struct fb_info *info;
- struct drm_gem_object *gobj;
- struct drm_gem_vram_object *gbo;
- int size, ret;
- s64 gpu_addr;
- u32 pitch;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- mode_cmd.pitches[0] = pitch;
-
- size = pitch * mode_cmd.height;
-
- ret = vbox_gem_create(vbox, size, true, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj);
- if (ret)
- return ret;
-
- gbo = drm_gem_vram_of_gem(gobj);
-
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
-
- info->screen_size = size;
- info->screen_base = (char __iomem *)drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(info->screen_base))
- return PTR_ERR(info->screen_base);
-
- fb = &vbox->afb.base;
- helper->fb = fb;
-
- info->fbops = &vboxfb_ops;
-
- /*
- * This seems to be done for safety checking that the framebuffer
- * is not registered twice by different drivers.
- */
- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
-
- drm_fb_helper_fill_info(info, helper, sizes);
-
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0)
- return (int)gpu_addr;
- info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
- info->fix.smem_len = vbox->available_vram_size - gpu_addr;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- info->fbdefio = &vbox_defio;
- fb_deferred_io_init(info);
-#endif
-
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
-
- return 0;
-}
-
-void vbox_fbdev_fini(struct vbox_private *vbox)
-{
- struct vbox_framebuffer *afb = &vbox->afb;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio)
- fb_deferred_io_cleanup(vbox->fb_helper.fbdev);
-#endif
-
- drm_fb_helper_unregister_fbi(&vbox->fb_helper);
-
- if (afb->obj) {
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(afb->obj);
-
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
-
- drm_gem_object_put_unlocked(afb->obj);
- afb->obj = NULL;
- }
- drm_fb_helper_fini(&vbox->fb_helper);
-
- drm_framebuffer_unregister_private(&afb->base);
- drm_framebuffer_cleanup(&afb->base);
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 02fa8277ff1e..9dcab115a261 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -11,22 +11,12 @@
#include <linux/vbox_err.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
#include "vbox_drv.h"
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
-static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
-
- if (vbox_fb->obj)
- drm_gem_object_put_unlocked(vbox_fb->obj);
-
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
void vbox_report_caps(struct vbox_private *vbox)
{
u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
@@ -38,87 +28,6 @@ void vbox_report_caps(struct vbox_private *vbox)
hgsmi_send_caps_info(vbox->guest_pool, caps);
}
-/* Send information about dirty rectangles to VBVA. */
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- struct vbox_private *vbox = fb->dev->dev_private;
- struct drm_display_mode *mode;
- struct drm_crtc *crtc;
- int crtc_x, crtc_y;
- unsigned int i;
-
- mutex_lock(&vbox->hw_mutex);
- list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
- if (crtc->primary->state->fb != fb)
- continue;
-
- mode = &crtc->state->mode;
- crtc_x = crtc->primary->state->src_x >> 16;
- crtc_y = crtc->primary->state->src_y >> 16;
-
- for (i = 0; i < num_rects; ++i) {
- struct vbva_cmd_hdr cmd_hdr;
- unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
-
- if (rects[i].x1 > crtc_x + mode->hdisplay ||
- rects[i].y1 > crtc_y + mode->vdisplay ||
- rects[i].x2 < crtc_x ||
- rects[i].y2 < crtc_y)
- continue;
-
- cmd_hdr.x = (s16)rects[i].x1;
- cmd_hdr.y = (s16)rects[i].y1;
- cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
- cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
-
- if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
- vbox->guest_pool))
- continue;
-
- vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
- &cmd_hdr, sizeof(cmd_hdr));
- vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
- }
- }
- mutex_unlock(&vbox->hw_mutex);
-}
-
-static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int flags, unsigned int color,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
-
- return 0;
-}
-
-static const struct drm_framebuffer_funcs vbox_fb_funcs = {
- .destroy = vbox_user_framebuffer_destroy,
- .dirty = vbox_user_framebuffer_dirty,
-};
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
- vbox_fb->obj = obj;
- ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
- if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
static int vbox_accel_init(struct vbox_private *vbox)
{
struct vbva_buffer *vbva;
@@ -270,29 +179,3 @@ void vbox_hw_fini(struct vbox_private *vbox)
gen_pool_destroy(vbox->guest_pool);
pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
}
-
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev,
- size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
-
- *obj = &gbo->bo.base;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index e1e48ba919eb..19612132c8a3 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -13,7 +13,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -133,7 +135,7 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
if (!fb1) {
fb1 = fb;
- if (to_vbox_framebuffer(fb1) == &vbox->afb)
+ if (fb1 == vbox->ddev.fb_helper->fb)
break;
} else if (fb != fb1) {
single_framebuffer = false;
@@ -172,8 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
@@ -283,10 +284,43 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
{
struct drm_crtc *crtc = plane->state->crtc;
struct drm_framebuffer *fb = plane->state->fb;
+ struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_mode_rect *clips;
+ uint32_t num_clips, i;
vbox_crtc_set_base_and_mode(crtc, fb,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
+
+ /* Send information about dirty rectangles to VBVA. */
+
+ clips = drm_plane_get_damage_clips(plane->state);
+ num_clips = drm_plane_get_damage_clips_count(plane->state);
+
+ if (!num_clips)
+ return;
+
+ mutex_lock(&vbox->hw_mutex);
+
+ for (i = 0; i < num_clips; ++i, ++clips) {
+ struct vbva_cmd_hdr cmd_hdr;
+ unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
+
+ cmd_hdr.x = (s16)clips->x1;
+ cmd_hdr.y = (s16)clips->y1;
+ cmd_hdr.w = (u16)clips->x2 - clips->x1;
+ cmd_hdr.h = (u16)clips->y2 - clips->y1;
+
+ if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
+ vbox->guest_pool))
+ continue;
+
+ vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
+ &cmd_hdr, sizeof(cmd_hdr));
+ vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
+ }
+
+ mutex_unlock(&vbox->hw_mutex);
}
static void vbox_primary_atomic_disable(struct drm_plane *plane,
@@ -300,35 +334,6 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane,
old_state->src_y >> 16);
}
-static int vbox_primary_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
-
- return ret;
-}
-
-static void vbox_primary_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(old_state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static int vbox_cursor_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -386,8 +391,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
container_of(plane->dev, struct vbox_private, ddev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
u32 width = plane->state->crtc_w;
u32 height = plane->state->crtc_h;
size_t data_size, mask_size;
@@ -459,30 +463,6 @@ static void vbox_cursor_atomic_disable(struct drm_plane *plane,
mutex_unlock(&vbox->hw_mutex);
}
-static int vbox_cursor_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
-}
-
-static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!plane->state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(plane->state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static const u32 vbox_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
@@ -491,8 +471,8 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
.atomic_check = vbox_cursor_atomic_check,
.atomic_update = vbox_cursor_atomic_update,
.atomic_disable = vbox_cursor_atomic_disable,
- .prepare_fb = vbox_cursor_prepare_fb,
- .cleanup_fb = vbox_cursor_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
@@ -513,8 +493,8 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
.atomic_check = vbox_primary_atomic_check,
.atomic_update = vbox_primary_atomic_update,
.atomic_disable = vbox_primary_atomic_disable,
- .prepare_fb = vbox_primary_prepare_fb,
- .cleanup_fb = vbox_primary_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_primary_plane_funcs = {
@@ -856,40 +836,8 @@ static int vbox_connector_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *vbox_user_framebuffer_create(
- struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct vbox_private *vbox =
- container_of(dev, struct vbox_private, ddev);
- struct drm_gem_object *obj;
- struct vbox_framebuffer *vbox_fb;
- int ret = -ENOMEM;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
- if (!vbox_fb)
- goto err_unref_obj;
-
- ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj);
- if (ret)
- goto err_free_vbox_fb;
-
- return &vbox_fb->base;
-
-err_free_vbox_fb:
- kfree(vbox_fb);
-err_unref_obj:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
-}
-
static const struct drm_mode_config_funcs vbox_mode_funcs = {
- .fb_create = vbox_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index b82595a9ed0f..976423d0c3cc 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -17,8 +17,7 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_device *dev = &vbox->ddev;
vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
- vbox->available_vram_size,
- &drm_gem_vram_mm_funcs);
+ vbox->available_vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index f1f0a7c87771..b00e20f5ce05 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -994,7 +994,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
- if (vc4_state->mm.allocated) {
+ if (drm_mm_node_allocated(&vc4_state->mm)) {
unsigned long flags;
spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 8a27a6acee61..c586325de2a5 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -249,7 +249,8 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
}
if (panel)
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DPI);
return drm_bridge_attach(dpi->encoder, bridge, NULL);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index c78fa8144776..c9ba83ed49b9 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -31,6 +31,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -1575,8 +1576,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
}
if (panel) {
- dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_DSI);
+ dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(dsi->bridge))
return PTR_ERR(dsi->bridge);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ee7d4e7b0ee3..1c62c6c9244b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -398,10 +398,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- frame.avi.right_bar = cstate->tv.margins.right;
- frame.avi.left_bar = cstate->tv.margins.left;
- frame.avi.top_bar = cstate->tv.margins.top;
- frame.avi.bottom_bar = cstate->tv.margins.bottom;
+ drm_hdmi_avi_infoframe_bars(&frame.avi, cstate);
vc4_hdmi_write_infoframe(encoder, &frame);
}
@@ -1285,6 +1282,9 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+ struct cec_connector_info conn_info;
+#endif
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
@@ -1403,13 +1403,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
#ifdef CONFIG_DRM_VC4_HDMI_CEC
hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
vc4, "vc4",
- CEC_CAP_TRANSMIT |
- CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH |
- CEC_CAP_RC, 1);
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0)
goto err_destroy_conn;
+
+ cec_fill_conn_info_from_drm(&conn_info, hdmi->connector);
+ cec_s_conn_info(hdmi->cec_adap, &conn_info);
+
HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff);
value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 9936b15d0bf1..5a43659da319 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -315,7 +315,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
- if (vc4->hvs->mitchell_netravali_filter.allocated)
+ if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
drm_mm_takedown(&vc4->hvs->dlist_mm);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 5e5f90810aca..4934127f0d76 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -178,7 +178,7 @@ static void vc4_plane_destroy_state(struct drm_plane *plane,
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
- if (vc4_state->lbm.allocated) {
+ if (drm_mm_node_allocated(&vc4_state->lbm)) {
unsigned long irqflags;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
@@ -557,7 +557,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
/* Allocate the LBM memory that the HVS will use for temporary
* storage due to our scaling/format conversion.
*/
- if (!vc4_state->lbm.allocated) {
+ if (!drm_mm_node_allocated(&vc4_state->lbm)) {
int ret;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index ba36e933bb49..eff3047052d4 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -3,7 +3,7 @@ config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_GEM_SHMEM_HELPER
help
This is the virtual GPU driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen).
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 458e606a936f..92aa2b3d349d 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -4,7 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
- virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
+ virtgpu_display.o virtgpu_vq.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 0fc32fa0b3c0..8dee698c90ff 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -56,7 +56,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
dev->pdev = pdev;
if (vga)
drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
- 0,
"virtiodrmfb");
/*
@@ -185,17 +184,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-static const struct file_operations virtio_gpu_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = virtio_gpu_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
@@ -210,15 +199,10 @@ static struct drm_driver driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
- .gem_prime_vmap = virtgpu_gem_prime_vmap,
- .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
- .gem_prime_mmap = virtgpu_gem_prime_mmap,
- .gem_free_object_unlocked = virtio_gpu_gem_free_object,
- .gem_open_object = virtio_gpu_gem_object_open,
- .gem_close_object = virtio_gpu_gem_object_close,
+ .gem_create_object = virtio_gpu_create_object,
.fops = &virtio_gpu_driver_fops,
.ioctls = virtio_gpu_ioctls,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index e28829661724..0b56ba005e25 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -35,12 +35,9 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_placement.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
@@ -68,21 +65,23 @@ struct virtio_gpu_object_params {
};
struct virtio_gpu_object {
- struct drm_gem_object gem_base;
+ struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
struct sg_table *pages;
uint32_t mapped;
- void *vmap;
bool dumb;
- struct ttm_place placement_code;
- struct ttm_placement placement;
- struct ttm_buffer_object tbo;
- struct ttm_bo_kmap_obj kmap;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
- container_of((gobj), struct virtio_gpu_object, gem_base)
+ container_of((gobj), struct virtio_gpu_object, base.base)
+
+struct virtio_gpu_object_array {
+ struct ww_acquire_ctx ticket;
+ struct list_head next;
+ u32 nents, total;
+ struct drm_gem_object *objs[];
+};
struct virtio_gpu_vbuffer;
struct virtio_gpu_device;
@@ -115,9 +114,9 @@ struct virtio_gpu_vbuffer {
char *resp_buf;
int resp_size;
-
virtio_gpu_resp_cb resp_cb;
+ struct virtio_gpu_object_array *objs;
struct list_head list;
};
@@ -147,10 +146,6 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
-struct virtio_gpu_mman {
- struct ttm_bo_device bdev;
-};
-
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -179,8 +174,6 @@ struct virtio_gpu_device {
struct virtio_device *vdev;
- struct virtio_gpu_mman mman;
-
struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
uint32_t num_scanouts;
@@ -205,6 +198,10 @@ struct virtio_gpu_device {
struct work_struct config_changed_work;
+ struct work_struct obj_free_work;
+ spinlock_t obj_free_lock;
+ struct list_head obj_free_list;
+
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
struct list_head cap_cache;
@@ -217,9 +214,6 @@ struct virtio_gpu_fpriv {
/* virtio_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head);
-void virtio_gpu_unref_list(struct list_head *head);
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
@@ -240,10 +234,6 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file);
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct virtio_gpu_fence *fence);
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -251,20 +241,35 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj);
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence);
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_put_free_work(struct work_struct *work);
+
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
@@ -295,28 +300,32 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t id);
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id);
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id);
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence *fence);
+ uint32_t ctx_id,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
@@ -339,11 +348,6 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
-/* virtio_gpu_ttm.c */
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
/* virtio_gpu_fence.c */
bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
@@ -355,70 +359,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
/* virtio_gpu_object */
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+ size_t size);
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
- struct virtio_gpu_object *bo);
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-
-static inline struct virtio_gpu_object*
-virtio_gpu_object_ref(struct virtio_gpu_object *bo)
-{
- ttm_bo_get(&bo->tbo);
- return bo;
-}
-
-static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
- tbo = &((*bo)->tbo);
- ttm_bo_put(tbo);
- *bo = NULL;
-}
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
-}
-
-static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
- bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS) {
- struct virtio_gpu_device *qdev =
- bo->gem_base.dev->dev_private;
- dev_err(qdev->dev, "%p reserve failed\n", bo);
- }
- return r;
- }
- return 0;
-}
-
-static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
-{
- ttm_bo_unreserve(&bo->tbo);
+ return drm_vma_node_offset_addr(&bo->base.base.vma_node);
}
/* virgl debufs */
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index a0514f5bd006..a4b9881ca1d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -41,6 +41,10 @@ bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
+ if (WARN_ON_ONCE(fence->f.seqno == 0))
+ /* leaked fence outside driver before completing
+ * initialization with virtio_gpu_fence_emit */
+ return false;
if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
return true;
return false;
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 292566146814..4c1f579edfb3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -28,54 +28,31 @@
#include "virtgpu_drv.h"
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
-
- if (obj)
- virtio_gpu_object_unref(&obj);
-}
-
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct virtio_gpu_fence *fence)
-{
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_object *obj;
- int ret;
-
- ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
- if (ret)
- return ERR_PTR(ret);
-
- return obj;
-}
-
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
u32 handle;
- obj = virtio_gpu_alloc_object(dev, params, NULL);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
+ if (ret < 0)
+ return ret;
- ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
+ ret = drm_gem_handle_create(file, &obj->base.base, &handle);
if (ret) {
- drm_gem_object_release(&obj->gem_base);
+ drm_gem_object_release(&obj->base.base);
return ret;
}
- *obj_p = &obj->gem_base;
+ *obj_p = &obj->base.base;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&obj->gem_base);
+ drm_gem_object_put_unlocked(&obj->base.base);
*handle_p = handle;
return 0;
@@ -136,19 +113,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return 0;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
- return r;
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
return 0;
}
@@ -157,17 +133,136 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
return;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
+}
+
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
+
+ objs = kmalloc(size, GFP_KERNEL);
+ if (!objs)
+ return NULL;
+
+ objs->nents = 0;
+ objs->total = nents;
+ return objs;
+}
+
+static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
+{
+ kfree(objs);
+}
+
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ u32 i;
+
+ objs = virtio_gpu_array_alloc(nents);
+ if (!objs)
+ return NULL;
+
+ for (i = 0; i < nents; i++) {
+ objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
+ if (!objs->objs[i]) {
+ objs->nents = i;
+ virtio_gpu_array_put_free(objs);
+ return NULL;
+ }
+ }
+ objs->nents = i;
+ return objs;
+}
+
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj)
+{
+ if (WARN_ON_ONCE(objs->nents == objs->total))
+ return;
+
+ drm_gem_object_get(obj);
+ objs->objs[objs->nents] = obj;
+ objs->nents++;
+}
+
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
+{
+ int ret;
+
+ if (objs->nents == 1) {
+ ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
+ } else {
+ ret = drm_gem_lock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+ return ret;
+}
+
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
+{
+ if (objs->nents == 1) {
+ dma_resv_unlock(objs->objs[0]->resv);
+ } else {
+ drm_gem_unlock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+}
+
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < objs->nents; i++)
+ dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+}
+
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
+{
+ u32 i;
+
+ for (i = 0; i < objs->nents; i++)
+ drm_gem_object_put_unlocked(objs->objs[i]);
+ virtio_gpu_array_free(objs);
+}
+
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ spin_lock(&vgdev->obj_free_lock);
+ list_add_tail(&objs->next, &vgdev->obj_free_list);
+ spin_unlock(&vgdev->obj_free_lock);
+ schedule_work(&vgdev->obj_free_work);
+}
+
+void virtio_gpu_array_put_free_work(struct work_struct *work)
+{
+ struct virtio_gpu_device *vgdev =
+ container_of(work, struct virtio_gpu_device, obj_free_work);
+ struct virtio_gpu_object_array *objs;
+
+ spin_lock(&vgdev->obj_free_lock);
+ while (!list_empty(&vgdev->obj_free_list)) {
+ objs = list_first_entry(&vgdev->obj_free_list,
+ struct virtio_gpu_object_array, next);
+ list_del(&objs->next);
+ spin_unlock(&vgdev->obj_free_lock);
+ virtio_gpu_array_put_free(objs);
+ spin_lock(&vgdev->obj_free_lock);
+ }
+ spin_unlock(&vgdev->obj_free_lock);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 0a88ef11b9d3..9af1ec62434f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -29,7 +29,6 @@
#include <linux/sync_file.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/virtgpu_drm.h>
#include "virtgpu_drv.h"
@@ -56,45 +55,6 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
&virtio_gpu_map->offset);
}
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct ttm_validate_buffer *buf;
- struct ttm_buffer_object *bo;
- struct virtio_gpu_object *qobj;
- int ret;
-
- ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
- if (ret != 0)
- return ret;
-
- list_for_each_entry(buf, head, head) {
- bo = buf->bo;
- qobj = container_of(bo, struct virtio_gpu_object, tbo);
- ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
- if (ret) {
- ttm_eu_backoff_reservation(ticket, head);
- return ret;
- }
- }
- return 0;
-}
-
-void virtio_gpu_unref_list(struct list_head *head)
-{
- struct ttm_validate_buffer *buf;
- struct ttm_buffer_object *bo;
- struct virtio_gpu_object *qobj;
-
- list_for_each_entry(buf, head, head) {
- bo = buf->bo;
- qobj = container_of(bo, struct virtio_gpu_object, tbo);
-
- drm_gem_object_put_unlocked(&qobj->gem_base);
- }
-}
-
/*
* Usage of execbuffer:
* Relocations need to take into account the full VIRTIO_GPUDrawable size.
@@ -107,16 +67,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
- struct drm_gem_object *gobj;
struct virtio_gpu_fence *out_fence;
- struct virtio_gpu_object *qobj;
int ret;
uint32_t *bo_handles = NULL;
void __user *user_bo_handles = NULL;
- struct list_head validate_list;
- struct ttm_validate_buffer *buflist = NULL;
- int i;
- struct ww_acquire_ctx ticket;
+ struct virtio_gpu_object_array *buflist = NULL;
struct sync_file *sync_file;
int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
@@ -157,15 +112,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
return out_fence_fd;
}
- INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) {
-
bo_handles = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(uint32_t), GFP_KERNEL);
- buflist = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(struct ttm_validate_buffer),
- GFP_KERNEL | __GFP_ZERO);
- if (!bo_handles || !buflist) {
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!bo_handles) {
ret = -ENOMEM;
goto out_unused_fd;
}
@@ -177,27 +127,23 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
- for (i = 0; i < exbuf->num_bo_handles; i++) {
- gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
- if (!gobj) {
- ret = -ENOENT;
- goto out_unused_fd;
- }
-
- qobj = gem_to_virtio_gpu_obj(gobj);
- buflist[i].bo = &qobj->tbo;
-
- list_add(&buflist[i].head, &validate_list);
+ buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+ exbuf->num_bo_handles);
+ if (!buflist) {
+ ret = -ENOENT;
+ goto out_unused_fd;
}
kvfree(bo_handles);
bo_handles = NULL;
}
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret)
- goto out_free;
+ if (buflist) {
+ ret = virtio_gpu_array_lock_resv(buflist);
+ if (ret)
+ goto out_unused_fd;
+ }
- buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
+ buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_unresv;
@@ -222,24 +168,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
}
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
- vfpriv->ctx_id, out_fence);
-
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
-
- /* fence the command bo */
- virtio_gpu_unref_list(&validate_list);
- kvfree(buflist);
+ vfpriv->ctx_id, buflist, out_fence);
return 0;
out_memdup:
- kfree(buf);
+ kvfree(buf);
out_unresv:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
-out_free:
- virtio_gpu_unref_list(&validate_list);
+ if (buflist)
+ virtio_gpu_array_unlock_resv(buflist);
out_unused_fd:
kvfree(bo_handles);
- kvfree(buflist);
+ if (buflist)
+ virtio_gpu_array_put_free(buflist);
if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd);
@@ -316,11 +256,11 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence)
return -ENOMEM;
- qobj = virtio_gpu_alloc_object(dev, &params, fence);
+ ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
dma_fence_put(&fence->f);
- if (IS_ERR(qobj))
- return PTR_ERR(qobj);
- obj = &qobj->gem_base;
+ if (ret < 0)
+ return ret;
+ obj = &qobj->base.base;
ret = drm_gem_handle_create(file_priv, obj, &handle);
if (ret) {
@@ -347,7 +287,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
qobj = gem_to_virtio_gpu_obj(gobj);
- ri->size = qobj->gem_base.size;
+ ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
drm_gem_object_put_unlocked(gobj);
return 0;
@@ -360,9 +300,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
- struct ttm_operation_ctx ctx = { true, false };
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
u32 offset = args->offset;
@@ -371,39 +309,31 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
- gobj = drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj, false);
- if (ret)
- goto out;
-
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
- if (unlikely(ret))
- goto out_unres;
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
convert_to_hw_box(&box, &args->box);
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) {
ret = -ENOMEM;
- goto out_unres;
+ goto err_unlock;
}
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, qobj->hw_res_handle,
- vfpriv->ctx_id, offset, args->level,
- &box, fence);
- dma_resv_add_excl_fence(qobj->tbo.base.resv,
- &fence->f);
-
+ (vgdev, vfpriv->ctx_id, offset, args->level,
+ &box, objs, fence);
dma_fence_put(&fence->f);
-out_unres:
- virtio_gpu_object_unreserve(qobj);
-out:
- drm_gem_object_put_unlocked(gobj);
+ return 0;
+
+err_unlock:
+ virtio_gpu_array_unlock_resv(objs);
+err_put_free:
+ virtio_gpu_array_put_free(objs);
return ret;
}
@@ -413,75 +343,71 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
- struct ttm_operation_ctx ctx = { true, false };
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
struct virtio_gpu_box box;
int ret;
u32 offset = args->offset;
- gobj = drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj, false);
- if (ret)
- goto out;
-
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
- if (unlikely(ret))
- goto out_unres;
-
convert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, qobj, offset,
- box.w, box.h, box.x, box.y, NULL);
+ (vgdev, offset,
+ box.w, box.h, box.x, box.y,
+ objs, NULL);
} else {
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
+
+ ret = -ENOMEM;
fence = virtio_gpu_fence_alloc(vgdev);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unres;
- }
+ if (!fence)
+ goto err_unlock;
+
virtio_gpu_cmd_transfer_to_host_3d
- (vgdev, qobj,
+ (vgdev,
vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &box, fence);
- dma_resv_add_excl_fence(qobj->tbo.base.resv,
- &fence->f);
+ args->level, &box, objs, fence);
dma_fence_put(&fence->f);
}
+ return 0;
-out_unres:
- virtio_gpu_object_unreserve(qobj);
-out:
- drm_gem_object_put_unlocked(gobj);
+err_unlock:
+ virtio_gpu_array_unlock_resv(objs);
+err_put_free:
+ virtio_gpu_array_put_free(objs);
return ret;
}
static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+ struct drm_file *file)
{
struct drm_virtgpu_3d_wait *args = data;
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct drm_gem_object *obj;
+ long timeout = 15 * HZ;
int ret;
- bool nowait = false;
- gobj = drm_gem_object_lookup(file, args->handle);
- if (gobj == NULL)
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (obj == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- if (args->flags & VIRTGPU_WAIT_NOWAIT)
- nowait = true;
- ret = virtio_gpu_object_wait(qobj, nowait);
+ if (args->flags & VIRTGPU_WAIT_NOWAIT) {
+ ret = dma_resv_test_signaled_rcu(obj->resv, true);
+ } else {
+ ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
+ timeout);
+ }
+ if (ret == 0)
+ ret = -EBUSY;
+ else if (ret > 0)
+ ret = 0;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index c190702fab72..2f5773e43557 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -147,19 +147,23 @@ int virtio_gpu_init(struct drm_device *dev)
INIT_WORK(&vgdev->config_changed_work,
virtio_gpu_config_changed_work_func);
+ INIT_WORK(&vgdev->obj_free_work,
+ virtio_gpu_array_put_free_work);
+ INIT_LIST_HEAD(&vgdev->obj_free_list);
+ spin_lock_init(&vgdev->obj_free_lock);
+
#ifdef __LITTLE_ENDIAN
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
- DRM_INFO("virgl 3d acceleration %s\n",
- vgdev->has_virgl_3d ? "enabled" : "not supported by host");
-#else
- DRM_INFO("virgl 3d acceleration not supported by guest\n");
#endif
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
- DRM_INFO("EDID support available.\n");
}
+ DRM_INFO("features: %cvirgl %cedid\n",
+ vgdev->has_virgl_3d ? '+' : '-',
+ vgdev->has_edid ? '+' : '-');
+
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
@@ -173,12 +177,6 @@ int virtio_gpu_init(struct drm_device *dev)
goto err_vbufs;
}
- ret = virtio_gpu_ttm_init(vgdev);
- if (ret) {
- DRM_ERROR("failed to init ttm %d\n", ret);
- goto err_ttm;
- }
-
/* get display info */
virtio_cread(vgdev->vdev, struct virtio_gpu_config,
num_scanouts, &num_scanouts);
@@ -210,8 +208,6 @@ int virtio_gpu_init(struct drm_device *dev)
return 0;
err_scanouts:
- virtio_gpu_ttm_fini(vgdev);
-err_ttm:
virtio_gpu_free_vbufs(vgdev);
err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
@@ -234,6 +230,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
+ flush_work(&vgdev->obj_free_work);
vgdev->vqs_ready = false;
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
@@ -242,7 +239,6 @@ void virtio_gpu_deinit(struct drm_device *dev)
vgdev->vdev->config->del_vqs(vgdev->vdev);
virtio_gpu_modeset_fini(vgdev);
- virtio_gpu_ttm_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
virtio_gpu_cleanup_cap_cache(vgdev);
kfree(vgdev->capsets);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 09b526518f5a..017a9e0fc3bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,73 +23,83 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
+static int virtio_gpu_virglrenderer_workaround = 1;
+module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
+
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
uint32_t *resid)
{
-#if 0
- int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
-
- if (handle < 0)
- return handle;
-#else
- static int handle;
-
- /*
- * FIXME: dirty hack to avoid re-using IDs, virglrenderer
- * can't deal with that. Needs fixing in virglrenderer, also
- * should figure a better way to handle that in the guest.
- */
- handle++;
-#endif
-
- *resid = handle + 1;
+ if (virtio_gpu_virglrenderer_workaround) {
+ /*
+ * Hack to avoid re-using resource IDs.
+ *
+ * virglrenderer versions up to (and including) 0.7.0
+ * can't deal with that. virglrenderer commit
+ * "f91a9dd35715 Fix unlinking resources from hash
+ * table." (Feb 2019) fixes the bug.
+ */
+ static int handle;
+ handle++;
+ *resid = handle + 1;
+ } else {
+ int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+ if (handle < 0)
+ return handle;
+ *resid = handle + 1;
+ }
return 0;
}
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
-#if 0
- ida_free(&vgdev->resource_ida, id - 1);
-#endif
+ if (!virtio_gpu_virglrenderer_workaround) {
+ ida_free(&vgdev->resource_ida, id - 1);
+ }
}
-static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
{
- struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ if (bo->pages)
+ virtio_gpu_object_detach(vgdev, bo);
if (bo->created)
virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
- if (bo->pages)
- virtio_gpu_object_free_sg_table(bo);
- if (bo->vmap)
- virtio_gpu_object_kunmap(bo);
- drm_gem_object_release(&bo->gem_base);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
- kfree(bo);
+
+ drm_gem_shmem_free_object(obj);
}
-static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
+static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+ .free = virtio_gpu_free_object,
+ .open = virtio_gpu_gem_object_open,
+ .close = virtio_gpu_gem_object_close,
+
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = &drm_gem_shmem_mmap,
+};
+
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+ size_t size)
{
- u32 c = 1;
-
- vgbo->placement.placement = &vgbo->placement_code;
- vgbo->placement.busy_placement = &vgbo->placement_code;
- vgbo->placement_code.fpfn = 0;
- vgbo->placement_code.lpfn = 0;
- vgbo->placement_code.flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
- TTM_PL_FLAG_NO_EVICT;
- vgbo->placement.num_placement = c;
- vgbo->placement.num_busy_placement = c;
+ struct virtio_gpu_object *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+ bo->base.base.funcs = &virtio_gpu_gem_funcs;
+ return &bo->base.base;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
@@ -97,157 +107,59 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object_array *objs = NULL;
+ struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
- size_t acc_size;
int ret;
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
- sizeof(struct virtio_gpu_object));
+ params->size = roundup(params->size, PAGE_SIZE);
+ shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+ bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
- bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
- if (bo == NULL)
- return -ENOMEM;
ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
- if (ret < 0) {
- kfree(bo);
- return ret;
- }
- params->size = roundup(params->size, PAGE_SIZE);
- ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
- if (ret != 0) {
- virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
- kfree(bo);
- return ret;
- }
+ if (ret < 0)
+ goto err_free_gem;
+
bo->dumb = params->dumb;
+ if (fence) {
+ ret = -ENOMEM;
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ goto err_put_id;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_objs;
+ }
+
if (params->virgl) {
- virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
+ objs, fence);
} else {
- virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
+ virtio_gpu_cmd_create_resource(vgdev, bo, params,
+ objs, fence);
}
- virtio_gpu_init_ttm_placement(bo);
- ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
- ttm_bo_type_device, &bo->placement, 0,
- true, acc_size, NULL, NULL,
- &virtio_gpu_ttm_bo_destroy);
- /* ttm_bo_init failure will call the destroy */
- if (ret != 0)
+ ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+ if (ret != 0) {
+ virtio_gpu_free_object(&shmem_obj->base);
return ret;
-
- if (fence) {
- struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
- struct list_head validate_list;
- struct ttm_validate_buffer mainbuf;
- struct ww_acquire_ctx ticket;
- unsigned long irq_flags;
- bool signaled;
-
- INIT_LIST_HEAD(&validate_list);
- memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
-
- /* use a gem reference since unref list undoes them */
- drm_gem_object_get(&bo->gem_base);
- mainbuf.bo = &bo->tbo;
- list_add(&mainbuf.head, &validate_list);
-
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret == 0) {
- spin_lock_irqsave(&drv->lock, irq_flags);
- signaled = virtio_fence_signaled(&fence->f);
- if (!signaled)
- /* virtio create command still in flight */
- ttm_eu_fence_buffer_objects(&ticket, &validate_list,
- &fence->f);
- spin_unlock_irqrestore(&drv->lock, irq_flags);
- if (signaled)
- /* virtio create command finished */
- ttm_eu_backoff_reservation(&ticket, &validate_list);
- }
- virtio_gpu_unref_list(&validate_list);
}
*bo_ptr = bo;
return 0;
-}
-
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
-{
- bo->vmap = NULL;
- ttm_bo_kunmap(&bo->kmap);
-}
-
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
-{
- bool is_iomem;
- int r;
-
- WARN_ON(bo->vmap);
-
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
- if (r)
- return r;
- bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
- return 0;
-}
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
- struct virtio_gpu_object *bo)
-{
- int ret;
- struct page **pages = bo->tbo.ttm->pages;
- int nr_pages = bo->tbo.num_pages;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
- size_t max_segment;
-
- /* wtf swapping */
- if (bo->pages)
- return 0;
-
- if (bo->tbo.ttm->state == tt_unpopulated)
- bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
- bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!bo->pages)
- goto out;
-
- max_segment = virtio_max_dma_size(qdev->vdev);
- max_segment &= PAGE_MASK;
- if (max_segment > SCATTERLIST_MAX_SEGMENT)
- max_segment = SCATTERLIST_MAX_SEGMENT;
- ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
- max_segment, GFP_KERNEL);
- if (ret)
- goto out;
- return 0;
-out:
- kfree(bo->pages);
- bo->pages = NULL;
- return -ENOMEM;
-}
-
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
-{
- sg_free_table(bo->pages);
- kfree(bo->pages);
- bo->pages = NULL;
-}
-
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0))
- return r;
- r = ttm_bo_wait(&bo->tbo, true, no_wait);
- ttm_bo_unreserve(&bo->tbo);
- return r;
+err_put_objs:
+ virtio_gpu_array_put_free(objs);
+err_put_id:
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+err_free_gem:
+ drm_gem_shmem_free_object(&shmem_obj->base);
+ return ret;
}
-
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a492ac3f4a7e..390524143139 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -84,7 +84,22 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- return 0;
+ bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!state->fb || !state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ is_cursor, true);
+ return ret;
}
static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
@@ -109,12 +124,19 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
if (bo->dumb) {
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, bo, 0,
- cpu_to_le32(plane->state->src_w >> 16),
- cpu_to_le32(plane->state->src_h >> 16),
- cpu_to_le32(plane->state->src_x >> 16),
- cpu_to_le32(plane->state->src_y >> 16), NULL);
+ (vgdev, 0,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ objs, NULL);
}
} else {
handle = 0;
@@ -186,7 +208,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
- int ret = 0;
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
@@ -205,20 +226,20 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
/* new cursor -- update & wait */
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, bo, 0,
- cpu_to_le32(plane->state->crtc_w),
- cpu_to_le32(plane->state->crtc_h),
- 0, 0, vgfb->fence);
- ret = virtio_gpu_object_reserve(bo, false);
- if (!ret) {
- dma_resv_add_excl_fence(bo->tbo.base.resv,
- &vgfb->fence->f);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
- virtio_gpu_object_unreserve(bo);
- virtio_gpu_object_wait(bo, false);
- }
+ (vgdev, 0,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ 0, 0, objs, vgfb->fence);
+ dma_fence_wait(&vgfb->fence->f, true);
+ dma_fence_put(&vgfb->fence->f);
+ vgfb->fence = NULL;
}
if (plane->state->fb != old_state->fb) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index dc642a884b88..050d24c39a8f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -30,43 +30,9 @@
* device that might share buffers with virtgpu
*/
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-
- if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
- /* should not happen */
- return ERR_PTR(-EINVAL);
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
- bo->tbo.ttm->num_pages);
-}
-
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
return ERR_PTR(-ENODEV);
}
-
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
-{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
- int ret;
-
- ret = virtio_gpu_object_kmap(bo);
- if (ret)
- return NULL;
- return bo->vmap;
-}
-
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
- virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj));
-}
-
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- return drm_gem_prime_mmap(obj, vma);
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
deleted file mode 100644
index f87903641847..000000000000
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (C) 2015 Red Hat, Inc.
- * All Rights Reserved.
- *
- * Authors:
- * Dave Airlie
- * Alon Levy
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/delay.h>
-
-#include <drm/drm.h>
-#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/virtgpu_drm.h>
-
-#include "virtgpu_drv.h"
-
-static struct
-virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
-{
- struct virtio_gpu_mman *mman;
- struct virtio_gpu_device *vgdev;
-
- mman = container_of(bdev, struct virtio_gpu_mman, bdev);
- vgdev = container_of(mman, struct virtio_gpu_device, mman);
- return vgdev;
-}
-
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv;
- struct virtio_gpu_device *vgdev;
- int r;
-
- file_priv = filp->private_data;
- vgdev = file_priv->minor->dev->dev_private;
- if (vgdev == NULL) {
- DRM_ERROR(
- "filp->private_data->minor->dev->dev_private == NULL\n");
- return -EINVAL;
- }
- r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
-
- return r;
-}
-
-static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
- uint32_t flags)
-{
- return 0;
-}
-
-static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem)
-{
- mem->mm_node = (void *)1;
- return 0;
-}
-
-static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
-{
- mem->mm_node = (void *)NULL;
-}
-
-static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
-{
- return 0;
-}
-
-static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
-{
- return 0;
-}
-
-static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
-}
-
-static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
- .init = ttm_bo_man_init,
- .takedown = ttm_bo_man_takedown,
- .get_node = ttm_bo_man_get_node,
- .put_node = ttm_bo_man_put_node,
- .debug = ttm_bo_man_debug
-};
-
-static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_TT:
- man->func = &virtio_gpu_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
- return -EINVAL;
- }
- return 0;
-}
-
-static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- static const struct ttm_place placements = {
- .fpfn = 0,
- .lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
- };
-
- placement->placement = &placements;
- placement->busy_placement = &placements;
- placement->num_placement = 1;
- placement->num_busy_placement = 1;
-}
-
-static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- return 0;
-}
-
-static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- case TTM_PL_TT:
- /* system memory */
- return 0;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
-}
-
-/*
- * TTM backend functions.
- */
-struct virtio_gpu_ttm_tt {
- struct ttm_dma_tt ttm;
- struct virtio_gpu_object *obj;
-};
-
-static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
- struct virtio_gpu_device *vgdev =
- virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
-
- virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
- return 0;
-}
-
-static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
- struct virtio_gpu_device *vgdev =
- virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
-
- virtio_gpu_object_detach(vgdev, gtt->obj);
- return 0;
-}
-
-static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
-
- ttm_dma_tt_fini(&gtt->ttm);
- kfree(gtt);
-}
-
-static struct ttm_backend_func virtio_gpu_tt_func = {
- .bind = &virtio_gpu_ttm_tt_bind,
- .unbind = &virtio_gpu_ttm_tt_unbind,
- .destroy = &virtio_gpu_ttm_tt_destroy,
-};
-
-static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct virtio_gpu_device *vgdev;
- struct virtio_gpu_ttm_tt *gtt;
-
- vgdev = virtio_gpu_get_vgdev(bo->bdev);
- gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
- if (gtt == NULL)
- return NULL;
- gtt->ttm.ttm.func = &virtio_gpu_tt_func;
- gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
- if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
- kfree(gtt);
- return NULL;
- }
- return &gtt->ttm.ttm;
-}
-
-static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
-{
- struct virtio_gpu_object *bo;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
-
- if (bo->pages)
- virtio_gpu_object_free_sg_table(bo);
-}
-
-static struct ttm_bo_driver virtio_gpu_bo_driver = {
- .ttm_tt_create = &virtio_gpu_ttm_tt_create,
- .invalidate_caches = &virtio_gpu_invalidate_caches,
- .init_mem_type = &virtio_gpu_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = &virtio_gpu_evict_flags,
- .verify_access = &virtio_gpu_verify_access,
- .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
- .io_mem_free = &virtio_gpu_ttm_io_mem_free,
- .swap_notify = &virtio_gpu_bo_swap_notify,
-};
-
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
-{
- int r;
-
- /* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&vgdev->mman.bdev,
- &virtio_gpu_bo_driver,
- vgdev->ddev->anon_inode->i_mapping,
- false);
- if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
- goto err_dev_init;
- }
-
- r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
- if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
- goto err_mm_init;
- }
- return 0;
-
-err_mm_init:
- ttm_bo_device_release(&vgdev->mman.bdev);
-err_dev_init:
- return r;
-}
-
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
-{
- ttm_bo_device_release(&vgdev->mman.bdev);
- DRM_INFO("virtio_gpu: ttm finalized\n");
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 7ac20490e1b4..74ad3bc3ebe8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -155,7 +155,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev,
{
if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
kfree(vbuf->resp_buf);
- kfree(vbuf->data_buf);
+ kvfree(vbuf->data_buf);
kmem_cache_free(vgdev->vbufs, vbuf);
}
@@ -192,7 +192,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
spin_unlock(&vgdev->ctrlq.qlock);
- list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
@@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
-
- list_del(&entry->list);
- free_vbuf(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
if (fence_id)
virtio_gpu_fence_event_process(vgdev, fence_id);
+
+ list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ if (entry->objs)
+ virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
+ list_del(&entry->list);
+ free_vbuf(vgdev, entry);
+ }
}
void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
@@ -252,26 +256,67 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
wake_up(&vgdev->cursorq.ack_queue);
}
-static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+/* Create sg_table from a vmalloc'd buffer. */
+static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
+{
+ int ret, s, i;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ struct page *pg;
+
+ if (WARN_ON(!PAGE_ALIGNED(data)))
+ return NULL;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
+ ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return NULL;
+ }
+
+ for_each_sg(sgt->sgl, sg, *sg_ents, i) {
+ pg = vmalloc_to_page(data);
+ if (!pg) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ return NULL;
+ }
+
+ s = min_t(int, PAGE_SIZE, size);
+ sg_set_page(sg, pg, s, 0);
+
+ size -= s;
+ data += s;
+ }
+
+ return sgt;
+}
+
+static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct scatterlist *vout)
__releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *sgs[3], vcmd, vout, vresp;
+ struct scatterlist *sgs[3], vcmd, vresp;
int outcnt = 0, incnt = 0;
+ bool notify = false;
int ret;
if (!vgdev->vqs_ready)
- return -ENODEV;
+ return notify;
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
sgs[outcnt + incnt] = &vcmd;
outcnt++;
- if (vbuf->data_size) {
- sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
- sgs[outcnt + incnt] = &vout;
+ if (vout) {
+ sgs[outcnt + incnt] = vout;
outcnt++;
}
@@ -292,32 +337,35 @@ retry:
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ notify = virtqueue_kick_prepare(vq);
}
-
- if (!ret)
- ret = vq->num_free;
- return ret;
-}
-
-static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
-{
- int rc;
-
- spin_lock(&vgdev->ctrlq.qlock);
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
- spin_unlock(&vgdev->ctrlq.qlock);
- return rc;
+ return notify;
}
-static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence *fence)
+static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_ctrl_hdr *hdr,
+ struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- int rc;
+ struct scatterlist *vout = NULL, sg;
+ struct sg_table *sgt = NULL;
+ bool notify;
+ int outcnt = 0;
+
+ if (vbuf->data_size) {
+ if (is_vmalloc_addr(vbuf->data_buf)) {
+ sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
+ &outcnt);
+ if (!sgt)
+ return;
+ vout = sgt->sgl;
+ } else {
+ sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
+ vout = &sg;
+ outcnt = 1;
+ }
+ }
again:
spin_lock(&vgdev->ctrlq.qlock);
@@ -330,29 +378,47 @@ again:
* to wait for free space, which can result in fence ids being
* submitted out-of-order.
*/
- if (vq->num_free < 3) {
+ if (vq->num_free < 2 + outcnt) {
spin_unlock(&vgdev->ctrlq.qlock);
wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
goto again;
}
- if (fence)
+ if (hdr && fence) {
virtio_gpu_fence_emit(vgdev, hdr, fence);
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+ if (vbuf->objs) {
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ }
+ }
+ notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
spin_unlock(&vgdev->ctrlq.qlock);
- return rc;
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
+
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+}
+
+static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
}
-static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
+ bool notify;
int ret;
int outcnt;
if (!vgdev->vqs_ready)
- return -ENODEV;
+ return;
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
@@ -370,14 +436,13 @@ retry:
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ notify = virtqueue_kick_prepare(vq);
}
spin_unlock(&vgdev->cursorq.qlock);
- if (!ret)
- ret = vq->num_free;
- return ret;
+ if (notify)
+ virtqueue_notify(vq);
}
/* just create gem objects for userspace and long lived objects,
@@ -388,6 +453,7 @@ retry:
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
@@ -395,6 +461,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -481,12 +548,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -498,14 +566,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
- cmd_p->r.width = width;
- cmd_p->r.height = height;
- cmd_p->r.x = x;
- cmd_p->r.y = y;
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
@@ -826,34 +895,38 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -861,6 +934,7 @@ void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
@@ -868,6 +942,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -888,12 +963,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -906,6 +982,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -917,20 +995,24 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->box = *box;
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
@@ -940,7 +1022,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence *fence)
+ uint32_t ctx_id,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -950,6 +1034,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
vbuf->data_buf = data;
vbuf->data_size = data_size;
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
@@ -965,17 +1050,21 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
- int si, nents;
+ int si, nents, ret;
if (WARN_ON_ONCE(!obj->created))
return -EINVAL;
+ if (WARN_ON_ONCE(obj->pages))
+ return -EINVAL;
- if (!obj->pages) {
- int ret;
+ ret = drm_gem_shmem_pin(&obj->base.base);
+ if (ret < 0)
+ return -EINVAL;
- ret = virtio_gpu_object_get_sg_table(vgdev, obj);
- if (ret)
- return ret;
+ obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
+ if (obj->pages == NULL) {
+ drm_gem_shmem_unpin(&obj->base.base);
+ return -EINVAL;
}
if (use_dma_api) {
@@ -1014,6 +1103,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ if (WARN_ON_ONCE(!obj->pages))
+ return;
+
if (use_dma_api && obj->mapped) {
struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */
@@ -1029,6 +1121,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
} else {
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
}
+
+ sg_free_table(obj->pages);
+ obj->pages = NULL;
+
+ drm_gem_shmem_unpin(&obj->base.base);
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 927dafaebc76..74f703b8d22a 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -16,17 +16,18 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
u64 ret_overrun;
bool ret;
- spin_lock(&output->lock);
-
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
WARN_ON(ret_overrun != 1);
+ spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
state = output->composer_state;
+ spin_unlock(&output->lock);
+
if (state && output->composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
@@ -48,8 +49,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
DRM_DEBUG_DRIVER("Composer worker already queued\n");
}
- spin_unlock(&output->lock);
-
return HRTIMER_RESTART;
}
@@ -85,7 +84,7 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- *vblank_time = output->vblank_hrtimer.node.expires;
+ *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
if (WARN_ON(*vblank_time == vblank->time))
return true;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 44ab9f8ef8be..d1fe144aa289 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -11,13 +11,14 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
-#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
@@ -83,7 +84,7 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_hw_done(old_state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
struct vkms_crtc_state *vkms_state =
@@ -103,6 +104,8 @@ static struct drm_driver vkms_driver = {
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
.get_vblank_timestamp = vkms_get_vblank_timestamp,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = vkms_prime_import_sg_table,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -157,6 +160,14 @@ static int __init vkms_init(void)
if (ret)
goto out_unregister;
+ ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
+ DMA_BIT_MASK(64));
+
+ if (ret) {
+ DRM_ERROR("Could not initialize DMA support\n");
+ goto out_fini;
+ }
+
vkms_device->drm.irq_enabled = true;
ret = drm_vblank_init(&vkms_device->drm, 1);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5a95100fa18b..7d52e24564db 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -137,6 +137,12 @@ int vkms_gem_vmap(struct drm_gem_object *obj);
void vkms_gem_vunmap(struct drm_gem_object *obj);
+/* Prime */
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg);
+
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 6489bfe0a149..2e01186fb943 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
+#include <linux/dma-buf.h>
#include <linux/shmem_fs.h>
#include <linux/vmalloc.h>
+#include <drm/drm_prime.h>
#include "vkms_drv.h"
@@ -218,3 +220,28 @@ out:
mutex_unlock(&vkms_obj->pages_lock);
return ret;
}
+
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ struct vkms_gem_object *obj;
+ int npages;
+
+ obj = __vkms_gem_create(dev, attach->dmabuf->size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
+ DRM_DEBUG_PRIME("Importing %d pages\n", npages);
+
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!obj->pages) {
+ vkms_gem_free_object(&obj->gem);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+ return &obj->gem;
+}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 6b28a326f8bb..15acdf2a7c0f 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -8,6 +8,7 @@ config DRM_VMWGFX
select FB_CFB_IMAGEBLIT
select DRM_TTM
select FB
+ select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 8841bd30e1e5..c877a21a0739 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
- vmwgfx_validation.o \
+ vmwgfx_validation.o vmwgfx_page_dirty.o \
ttm_object.o ttm_lock.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index f2bfd3d80598..61414f105c67 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -1280,7 +1280,6 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
return offset;
}
-
static inline u32
svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
surf_size_struct baseLevelSize,
@@ -1375,4 +1374,236 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
return svga3dsurface_is_dx_screen_target_format(format);
}
+/**
+ * struct svga3dsurface_mip - Mimpmap level information
+ * @bytes: Bytes required in the backing store of this mipmap level.
+ * @img_stride: Byte stride per image.
+ * @row_stride: Byte stride per block row.
+ * @size: The size of the mipmap.
+ */
+struct svga3dsurface_mip {
+ size_t bytes;
+ size_t img_stride;
+ size_t row_stride;
+ struct drm_vmw_size size;
+
+};
+
+/**
+ * struct svga3dsurface_cache - Cached surface information
+ * @desc: Pointer to the surface descriptor
+ * @mip: Array of mipmap level information. Valid size is @num_mip_levels.
+ * @mip_chain_bytes: Bytes required in the backing store for the whole chain
+ * of mip levels.
+ * @sheet_bytes: Bytes required in the backing store for a sheet
+ * representing a single sample.
+ * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in
+ * a chain.
+ * @num_layers: Number of slices in an array texture or number of faces in
+ * a cubemap texture.
+ */
+struct svga3dsurface_cache {
+ const struct svga3d_surface_desc *desc;
+ struct svga3dsurface_mip mip[DRM_VMW_MAX_MIP_LEVELS];
+ size_t mip_chain_bytes;
+ size_t sheet_bytes;
+ u32 num_mip_levels;
+ u32 num_layers;
+};
+
+/**
+ * struct svga3dsurface_loc - Surface location
+ * @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
+ * mip_level.
+ * @x: X coordinate.
+ * @y: Y coordinate.
+ * @z: Z coordinate.
+ */
+struct svga3dsurface_loc {
+ u32 sub_resource;
+ u32 x, y, z;
+};
+
+/**
+ * svga3dsurface_subres - Compute the subresource from layer and mipmap.
+ * @cache: Surface layout data.
+ * @mip_level: The mipmap level.
+ * @layer: The surface layer (face or array slice).
+ *
+ * Return: The subresource.
+ */
+static inline u32 svga3dsurface_subres(const struct svga3dsurface_cache *cache,
+ u32 mip_level, u32 layer)
+{
+ return cache->num_mip_levels * layer + mip_level;
+}
+
+/**
+ * svga3dsurface_setup_cache - Build a surface cache entry
+ * @size: The surface base level dimensions.
+ * @format: The surface format.
+ * @num_mip_levels: Number of mipmap levels.
+ * @num_layers: Number of layers.
+ * @cache: Pointer to a struct svga3dsurface_cach object to be filled in.
+ *
+ * Return: Zero on success, -EINVAL on invalid surface layout.
+ */
+static inline int svga3dsurface_setup_cache(const struct drm_vmw_size *size,
+ SVGA3dSurfaceFormat format,
+ u32 num_mip_levels,
+ u32 num_layers,
+ u32 num_samples,
+ struct svga3dsurface_cache *cache)
+{
+ const struct svga3d_surface_desc *desc;
+ u32 i;
+
+ memset(cache, 0, sizeof(*cache));
+ cache->desc = desc = svga3dsurface_get_desc(format);
+ cache->num_mip_levels = num_mip_levels;
+ cache->num_layers = num_layers;
+ for (i = 0; i < cache->num_mip_levels; i++) {
+ struct svga3dsurface_mip *mip = &cache->mip[i];
+
+ mip->size = svga3dsurface_get_mip_size(*size, i);
+ mip->bytes = svga3dsurface_get_image_buffer_size
+ (desc, &mip->size, 0);
+ mip->row_stride =
+ __KERNEL_DIV_ROUND_UP(mip->size.width,
+ desc->block_size.width) *
+ desc->bytes_per_block * num_samples;
+ if (!mip->row_stride)
+ goto invalid_dim;
+
+ mip->img_stride =
+ __KERNEL_DIV_ROUND_UP(mip->size.height,
+ desc->block_size.height) *
+ mip->row_stride;
+ if (!mip->img_stride)
+ goto invalid_dim;
+
+ cache->mip_chain_bytes += mip->bytes;
+ }
+ cache->sheet_bytes = cache->mip_chain_bytes * num_layers;
+ if (!cache->sheet_bytes)
+ goto invalid_dim;
+
+ return 0;
+
+invalid_dim:
+ VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n");
+ return -EINVAL;
+}
+
+/**
+ * svga3dsurface_get_loc - Get a surface location from an offset into the
+ * backing store
+ * @cache: Surface layout data.
+ * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
+ * @offset: Offset into the surface backing store.
+ */
+static inline void
+svga3dsurface_get_loc(const struct svga3dsurface_cache *cache,
+ struct svga3dsurface_loc *loc,
+ size_t offset)
+{
+ const struct svga3dsurface_mip *mip = &cache->mip[0];
+ const struct svga3d_surface_desc *desc = cache->desc;
+ u32 layer;
+ int i;
+
+ if (offset >= cache->sheet_bytes)
+ offset %= cache->sheet_bytes;
+
+ layer = offset / cache->mip_chain_bytes;
+ offset -= layer * cache->mip_chain_bytes;
+ for (i = 0; i < cache->num_mip_levels; ++i, ++mip) {
+ if (mip->bytes > offset)
+ break;
+ offset -= mip->bytes;
+ }
+
+ loc->sub_resource = svga3dsurface_subres(cache, i, layer);
+ loc->z = offset / mip->img_stride;
+ offset -= loc->z * mip->img_stride;
+ loc->z *= desc->block_size.depth;
+ loc->y = offset / mip->row_stride;
+ offset -= loc->y * mip->row_stride;
+ loc->y *= desc->block_size.height;
+ loc->x = offset / desc->bytes_per_block;
+ loc->x *= desc->block_size.width;
+}
+
+/**
+ * svga3dsurface_inc_loc - Clamp increment a surface location with one block
+ * size
+ * in each dimension.
+ * @loc: Pointer to a struct svga3dsurface_loc to be incremented.
+ *
+ * When computing the size of a range as size = end - start, the range does not
+ * include the end element. However a location representing the last byte
+ * of a touched region in the backing store *is* included in the range.
+ * This function modifies such a location to match the end definition
+ * given as start + size which is the one used in a SVGA3dBox.
+ */
+static inline void
+svga3dsurface_inc_loc(const struct svga3dsurface_cache *cache,
+ struct svga3dsurface_loc *loc)
+{
+ const struct svga3d_surface_desc *desc = cache->desc;
+ u32 mip = loc->sub_resource % cache->num_mip_levels;
+ const struct drm_vmw_size *size = &cache->mip[mip].size;
+
+ loc->sub_resource++;
+ loc->x += desc->block_size.width;
+ if (loc->x > size->width)
+ loc->x = size->width;
+ loc->y += desc->block_size.height;
+ if (loc->y > size->height)
+ loc->y = size->height;
+ loc->z += desc->block_size.depth;
+ if (loc->z > size->depth)
+ loc->z = size->depth;
+}
+
+/**
+ * svga3dsurface_min_loc - The start location in a subresource
+ * @cache: Surface layout data.
+ * @sub_resource: The subresource.
+ * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
+ */
+static inline void
+svga3dsurface_min_loc(const struct svga3dsurface_cache *cache,
+ u32 sub_resource,
+ struct svga3dsurface_loc *loc)
+{
+ loc->sub_resource = sub_resource;
+ loc->x = loc->y = loc->z = 0;
+}
+
+/**
+ * svga3dsurface_min_loc - The end location in a subresource
+ * @cache: Surface layout data.
+ * @sub_resource: The subresource.
+ * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
+ *
+ * Following the end definition given in svga3dsurface_inc_loc(),
+ * Compute the end location of a surface subresource.
+ */
+static inline void
+svga3dsurface_max_loc(const struct svga3dsurface_cache *cache,
+ u32 sub_resource,
+ struct svga3dsurface_loc *loc)
+{
+ const struct drm_vmw_size *size;
+ u32 mip;
+
+ loc->sub_resource = sub_resource + 1;
+ mip = sub_resource % cache->num_mip_levels;
+ size = &cache->mip[mip].size;
+ loc->x = size->width;
+ loc->y = size->height;
+ loc->z = size->depth;
+}
+
#endif /* _SVGA3D_SURFACEDEFS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index aad8d8140259..8b71bf6b58ef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -462,6 +462,8 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+ WARN_ON(vmw_bo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
kfree(vmw_bo);
}
@@ -475,8 +477,11 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+ struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
- vmw_bo_unmap(&vmw_user_bo->vbo);
+ WARN_ON(vbo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ vmw_bo_unmap(vbo);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
@@ -511,8 +516,7 @@ int vmw_bo_init(struct vmw_private *dev_priv,
memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3;
-
- INIT_LIST_HEAD(&vmw_bo->res_list);
+ vmw_bo->res_tree = RB_ROOT;
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
@@ -566,7 +570,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
break;
default:
WARN_ONCE(true, "Undefined buffer object reference release.\n");
@@ -682,12 +686,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
struct ttm_object_file *tfile,
uint32_t flags)
{
+ bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &user_bo->vbo.base;
bool existed;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
- bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
lret = dma_resv_wait_timeout_rcu
@@ -700,15 +704,22 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
return 0;
}
- ret = ttm_bo_synccpu_write_grab
- (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ ret = ttm_bo_reserve(bo, true, nonblock, NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_wait(bo, true, nonblock);
+ if (likely(ret == 0))
+ atomic_inc(&user_bo->vbo.cpu_writers);
+
+ ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b38bcb032c99..e962048f65d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -576,8 +576,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
else
dev_priv->map_mode = vmw_dma_map_populate;
- /* No TTM coherent page pool? FIXME: Ask TTM instead! */
- if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
+ if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
(dev_priv->map_mode == vmw_dma_alloc_coherent))
return -EINVAL;
@@ -827,9 +826,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_fman;
}
+ drm_vma_offset_manager_init(&dev_priv->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
+ &dev_priv->vma_manager,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
@@ -986,6 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev)
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
(void) ttm_bo_device_release(&dev_priv->bdev);
+ drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5eb73ded8e07..a31e726d6d71 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -56,9 +56,9 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20180704"
+#define VMWGFX_DRIVER_DATE "20190328"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 15
+#define VMWGFX_DRIVER_MINOR 16
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -100,21 +100,26 @@ struct vmw_fpriv {
/**
* struct vmw_buffer_object - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object
- * @res_list: List of resources using this buffer object as a backing MOB
+ * @res_tree: RB tree of resources using this buffer object as a backing MOB
* @pin_count: pin depth
+ * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
+ * increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
* @map: Kmap object for semi-persistent mappings
* @res_prios: Eviction priority counts for attached resources
+ * @dirty: structure for user-space dirty-tracking
*/
struct vmw_buffer_object {
struct ttm_buffer_object base;
- struct list_head res_list;
+ struct rb_root res_tree;
s32 pin_count;
+ atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
u32 res_prios[TTM_MAX_BO_PRIORITY];
+ struct vmw_bo_dirty *dirty;
};
/**
@@ -145,7 +150,8 @@ struct vmw_res_func;
* @res_dirty: Resource contains data not yet in the backup buffer. Protected
* by resource reserved.
* @backup_dirty: Backup buffer contains data not yet in the HW resource.
- * Protecte by resource reserved.
+ * Protected by resource reserved.
+ * @coherent: Emulate coherency by tracking vm accesses.
* @backup: The backup buffer if any. Protected by resource reserved.
* @backup_offset: Offset into the backup buffer if any. Protected by resource
* reserved. Note that only a few resource types can have a @backup_offset
@@ -154,29 +160,32 @@ struct vmw_res_func;
* pin-count greater than zero. It is not on the resource LRU lists and its
* backup buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable.
+ * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
- * @mob_head: List head for the MOB backup list. Protected by @backup reserved.
* @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex
* @res_free: The resource destructor.
* @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction.
*/
+struct vmw_resource_dirty;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
u32 used_prio;
unsigned long backup_size;
- bool res_dirty;
- bool backup_dirty;
+ u32 res_dirty : 1;
+ u32 backup_dirty : 1;
+ u32 coherent : 1;
struct vmw_buffer_object *backup;
unsigned long backup_offset;
unsigned long pin_count;
const struct vmw_res_func *func;
+ struct rb_node mob_node;
struct list_head lru_head;
- struct list_head mob_head;
struct list_head binding_head;
+ struct vmw_resource_dirty *dirty;
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
@@ -438,6 +447,7 @@ struct vmw_private {
struct vmw_fifo_state fifo;
struct drm_device *dev;
+ struct drm_vma_offset_manager vma_manager;
unsigned long vmw_chipset;
unsigned int io_start;
uint32_t vram_start;
@@ -674,7 +684,8 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
-extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
+extern int vmw_resource_validate(struct vmw_resource *res, bool intr,
+ bool dirtying);
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -716,6 +727,10 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);
+void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
+ pgoff_t end);
+int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+ pgoff_t end, pgoff_t *num_prefault);
/**
* vmw_resource_mob_attached - Whether a resource currently has a mob attached
@@ -725,7 +740,7 @@ void vmw_resource_mob_detach(struct vmw_resource *res);
*/
static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
{
- return !list_empty(&res->mob_head);
+ return !RB_EMPTY_NODE(&res->mob_node);
}
/**
@@ -1403,6 +1418,17 @@ int vmw_host_log(const char *log);
#define VMW_DEBUG_USER(fmt, ...) \
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+/* Resource dirtying - vmwgfx_page_dirty.c */
+void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo);
+int vmw_bo_dirty_add(struct vmw_buffer_object *vbo);
+void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
+void vmw_bo_dirty_clear_res(struct vmw_resource *res);
+void vmw_bo_dirty_release(struct vmw_buffer_object *vbo);
+void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+ pgoff_t start, pgoff_t end);
+vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
+vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
+
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index ff86d49dc5e8..934ad7c0c342 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2560,7 +2560,6 @@ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
offsetof(typeof(*cmd), sid));
cmd = container_of(header, typeof(*cmd), header);
-
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
new file mode 100644
index 000000000000..f07aa857587c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2019 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#include "vmwgfx_drv.h"
+
+/*
+ * Different methods for tracking dirty:
+ * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
+ * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
+ * accesses in the VM mkwrite() callback
+ */
+enum vmw_bo_dirty_method {
+ VMW_BO_DIRTY_PAGETABLE,
+ VMW_BO_DIRTY_MKWRITE,
+};
+
+/*
+ * No dirtied pages at scan trigger a transition to the _MKWRITE method,
+ * similarly a certain percentage of dirty pages trigger a transition to
+ * the _PAGETABLE method. How many triggers should we wait for before
+ * changing method?
+ */
+#define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2
+
+/* Percentage to trigger a transition to the _PAGETABLE method */
+#define VMW_DIRTY_PERCENTAGE 10
+
+/**
+ * struct vmw_bo_dirty - Dirty information for buffer objects
+ * @start: First currently dirty bit
+ * @end: Last currently dirty bit + 1
+ * @method: The currently used dirty method
+ * @change_count: Number of consecutive method change triggers
+ * @ref_count: Reference count for this structure
+ * @bitmap_size: The size of the bitmap in bits. Typically equal to the
+ * nuber of pages in the bo.
+ * @size: The accounting size for this struct.
+ * @bitmap: A bitmap where each bit represents a page. A set bit means a
+ * dirty page.
+ */
+struct vmw_bo_dirty {
+ unsigned long start;
+ unsigned long end;
+ enum vmw_bo_dirty_method method;
+ unsigned int change_count;
+ unsigned int ref_count;
+ unsigned long bitmap_size;
+ size_t size;
+ unsigned long bitmap[0];
+};
+
+/**
+ * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
+ * @vbo: The buffer object to scan
+ *
+ * Scans the pagetable for dirty bits. Clear those bits and modify the
+ * dirty structure with the results. This function may change the
+ * dirty-tracking method.
+ */
+static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t num_marked;
+
+ num_marked = clean_record_shared_mapping_range
+ (mapping,
+ offset, dirty->bitmap_size,
+ offset, &dirty->bitmap[0],
+ &dirty->start, &dirty->end);
+ if (num_marked == 0)
+ dirty->change_count++;
+ else
+ dirty->change_count = 0;
+
+ if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
+ dirty->change_count = 0;
+ dirty->method = VMW_BO_DIRTY_MKWRITE;
+ wp_shared_mapping_range(mapping,
+ offset, dirty->bitmap_size);
+ clean_record_shared_mapping_range(mapping,
+ offset, dirty->bitmap_size,
+ offset, &dirty->bitmap[0],
+ &dirty->start, &dirty->end);
+ }
+}
+
+/**
+ * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
+ * @vbo: The buffer object to scan
+ *
+ * Write-protect pages written to so that consecutive write accesses will
+ * trigger a call to mkwrite.
+ *
+ * This function may change the dirty-tracking method.
+ */
+static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t num_marked;
+
+ if (dirty->end <= dirty->start)
+ return;
+
+ num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping,
+ dirty->start + offset,
+ dirty->end - dirty->start);
+
+ if (100UL * num_marked / dirty->bitmap_size >
+ VMW_DIRTY_PERCENTAGE) {
+ dirty->change_count++;
+ } else {
+ dirty->change_count = 0;
+ }
+
+ if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
+ pgoff_t start = 0;
+ pgoff_t end = dirty->bitmap_size;
+
+ dirty->method = VMW_BO_DIRTY_PAGETABLE;
+ clean_record_shared_mapping_range(mapping, offset, end, offset,
+ &dirty->bitmap[0],
+ &start, &end);
+ bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size);
+ if (dirty->start < dirty->end)
+ bitmap_set(&dirty->bitmap[0], dirty->start,
+ dirty->end - dirty->start);
+ dirty->change_count = 0;
+ }
+}
+
+/**
+ * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
+ * tracking structure
+ * @vbo: The buffer object to scan
+ *
+ * This function may change the dirty tracking method.
+ */
+void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
+ vmw_bo_dirty_scan_pagetable(vbo);
+ else
+ vmw_bo_dirty_scan_mkwrite(vbo);
+}
+
+/**
+ * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
+ * an unmap_mapping_range operation.
+ * @vbo: The buffer object,
+ * @start: First page of the range within the buffer object.
+ * @end: Last page of the range within the buffer object + 1.
+ *
+ * If we're using the _PAGETABLE scan method, we may leak dirty pages
+ * when calling unmap_mapping_range(). This function makes sure we pick
+ * up all dirty pages.
+ */
+static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
+ pgoff_t start, pgoff_t end)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+
+ if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
+ return;
+
+ wp_shared_mapping_range(mapping, start + offset, end - start);
+ clean_record_shared_mapping_range(mapping, start + offset,
+ end - start, offset,
+ &dirty->bitmap[0], &dirty->start,
+ &dirty->end);
+}
+
+/**
+ * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
+ * @vbo: The buffer object,
+ * @start: First page of the range within the buffer object.
+ * @end: Last page of the range within the buffer object + 1.
+ *
+ * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange.
+ */
+void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+ pgoff_t start, pgoff_t end)
+{
+ unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+
+ vmw_bo_dirty_pre_unmap(vbo, start, end);
+ unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
+ (loff_t) (end - start) << PAGE_SHIFT);
+}
+
+/**
+ * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
+ * @vbo: The buffer object
+ *
+ * This function registers a dirty-tracking user to a buffer object.
+ * A user can be for example a resource or a vma in a special user-space
+ * mapping.
+ *
+ * Return: Zero on success, -ENOMEM on memory allocation failure.
+ */
+int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t num_pages = vbo->base.num_pages;
+ size_t size, acc_size;
+ int ret;
+ static struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
+ if (dirty) {
+ dirty->ref_count++;
+ return 0;
+ }
+
+ size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
+ acc_size = ttm_round_pot(size);
+ ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
+ if (ret) {
+ VMW_DEBUG_USER("Out of graphics memory for buffer object "
+ "dirty tracker.\n");
+ return ret;
+ }
+ dirty = kvzalloc(size, GFP_KERNEL);
+ if (!dirty) {
+ ret = -ENOMEM;
+ goto out_no_dirty;
+ }
+
+ dirty->size = acc_size;
+ dirty->bitmap_size = num_pages;
+ dirty->start = dirty->bitmap_size;
+ dirty->end = 0;
+ dirty->ref_count = 1;
+ if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
+ dirty->method = VMW_BO_DIRTY_PAGETABLE;
+ } else {
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
+
+ dirty->method = VMW_BO_DIRTY_MKWRITE;
+
+ /* Write-protect and then pick up already dirty bits */
+ wp_shared_mapping_range(mapping, offset, num_pages);
+ clean_record_shared_mapping_range(mapping, offset, num_pages,
+ offset,
+ &dirty->bitmap[0],
+ &dirty->start, &dirty->end);
+ }
+
+ vbo->dirty = dirty;
+
+ return 0;
+
+out_no_dirty:
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
+ return ret;
+}
+
+/**
+ * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
+ * @vbo: The buffer object
+ *
+ * This function releases a dirty-tracking user from a buffer object.
+ * If the reference count reaches zero, then the dirty-tracking object is
+ * freed and the pointer to it cleared.
+ *
+ * Return: Zero on success, -ENOMEM on memory allocation failure.
+ */
+void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ if (dirty && --dirty->ref_count == 0) {
+ size_t acc_size = dirty->size;
+
+ kvfree(dirty);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
+ vbo->dirty = NULL;
+ }
+}
+
+/**
+ * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
+ * its backing mob.
+ * @res: The resource
+ *
+ * This function will pick up all dirty ranges affecting the resource from
+ * it's backup mob, and call vmw_resource_dirty_update() once for each
+ * range. The transferred ranges will be cleared from the backing mob's
+ * dirty tracking.
+ */
+void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
+{
+ struct vmw_buffer_object *vbo = res->backup;
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = res->backup_offset;
+ unsigned long res_end = res->backup_offset + res->backup_size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ vmw_resource_dirty_update(res, start, end);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
+/**
+ * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
+ * its backing mob.
+ * @res: The resource
+ *
+ * This function will clear all dirty ranges affecting the resource from
+ * it's backup mob's dirty tracking.
+ */
+void vmw_bo_dirty_clear_res(struct vmw_resource *res)
+{
+ unsigned long res_start = res->backup_offset;
+ unsigned long res_end = res->backup_offset + res->backup_size;
+ struct vmw_buffer_object *vbo = res->backup;
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ res_start = max(res_start, dirty->start);
+ res_end = min(res_end, dirty->end);
+ bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start);
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
+vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ vm_fault_t ret;
+ unsigned long page_offset;
+ unsigned int save_flags;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, typeof(*vbo), base);
+
+ /*
+ * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
+ * So make sure the TTM helpers are aware.
+ */
+ save_flags = vmf->flags;
+ vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ vmf->flags = save_flags;
+ if (ret)
+ return ret;
+
+ page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
+ if (unlikely(page_offset >= bo->num_pages)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE &&
+ !test_bit(page_offset, &vbo->dirty->bitmap[0])) {
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ __set_bit(page_offset, &dirty->bitmap[0]);
+ dirty->start = min(dirty->start, page_offset);
+ dirty->end = max(dirty->end, page_offset + 1);
+ }
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
+ pgoff_t num_prefault;
+ pgprot_t prot;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 :
+ TTM_BO_VM_NUM_PREFAULT;
+
+ if (vbo->dirty) {
+ pgoff_t allowed_prefault;
+ unsigned long page_offset;
+
+ page_offset = vmf->pgoff -
+ drm_vma_node_start(&bo->base.vma_node);
+ if (page_offset >= bo->num_pages ||
+ vmw_resources_clean(vbo, page_offset,
+ page_offset + PAGE_SIZE,
+ &allowed_prefault)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ num_prefault = min(num_prefault, allowed_prefault);
+ }
+
+ /*
+ * If we don't track dirty using the MKWRITE method, make sure
+ * sure the page protection is write-enabled so we don't get
+ * a lot of unnecessary write faults.
+ */
+ if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
+ prot = vma->vm_page_prot;
+ else
+ prot = vm_get_page_prot(vma->vm_flags);
+
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5581a7826b4c..c8441030637a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -40,11 +40,24 @@
void vmw_resource_mob_attach(struct vmw_resource *res)
{
struct vmw_buffer_object *backup = res->backup;
+ struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
dma_resv_assert_held(res->backup->base.base.resv);
res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
res->func->prio;
- list_add_tail(&res->mob_head, &backup->res_list);
+
+ while (*new) {
+ struct vmw_resource *this =
+ container_of(*new, struct vmw_resource, mob_node);
+
+ parent = *new;
+ new = (res->backup_offset < this->backup_offset) ?
+ &((*new)->rb_left) : &((*new)->rb_right);
+ }
+
+ rb_link_node(&res->mob_node, parent, new);
+ rb_insert_color(&res->mob_node, &backup->res_tree);
+
vmw_bo_prio_add(backup, res->used_prio);
}
@@ -58,7 +71,8 @@ void vmw_resource_mob_detach(struct vmw_resource *res)
dma_resv_assert_held(backup->base.base.resv);
if (vmw_resource_mob_attached(res)) {
- list_del_init(&res->mob_head);
+ rb_erase(&res->mob_node, &backup->res_tree);
+ RB_CLEAR_NODE(&res->mob_node);
vmw_bo_prio_del(backup, res->used_prio);
}
}
@@ -119,6 +133,10 @@ static void vmw_resource_release(struct kref *kref)
}
res->backup_dirty = false;
vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
+ if (res->coherent)
+ vmw_bo_dirty_release(res->backup);
ttm_bo_unreserve(bo);
vmw_bo_unreference(&res->backup);
}
@@ -200,15 +218,17 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
res->res_free = res_free;
res->dev_priv = dev_priv;
res->func = func;
+ RB_CLEAR_NODE(&res->mob_node);
INIT_LIST_HEAD(&res->lru_head);
- INIT_LIST_HEAD(&res->mob_head);
INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
res->backup = NULL;
res->backup_offset = 0;
res->backup_dirty = false;
res->res_dirty = false;
+ res->coherent = false;
res->used_prio = 3;
+ res->dirty = NULL;
if (delay_id)
return 0;
else
@@ -373,7 +393,8 @@ out_no_bo:
* should be retried once resources have been freed up.
*/
static int vmw_resource_do_validate(struct vmw_resource *res,
- struct ttm_validate_buffer *val_buf)
+ struct ttm_validate_buffer *val_buf,
+ bool dirtying)
{
int ret = 0;
const struct vmw_res_func *func = res->func;
@@ -395,6 +416,39 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
vmw_resource_mob_attach(res);
}
+ /*
+ * Handle the case where the backup mob is marked coherent but
+ * the resource isn't.
+ */
+ if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
+ !res->coherent) {
+ if (res->backup->dirty && !res->dirty) {
+ ret = func->dirty_alloc(res);
+ if (ret)
+ return ret;
+ } else if (!res->backup->dirty && res->dirty) {
+ func->dirty_free(res);
+ }
+ }
+
+ /*
+ * Transfer the dirty regions to the resource and update
+ * the resource.
+ */
+ if (res->dirty) {
+ if (dirtying && !res->res_dirty) {
+ pgoff_t start = res->backup_offset >> PAGE_SHIFT;
+ pgoff_t end = __KERNEL_DIV_ROUND_UP
+ (res->backup_offset + res->backup_size,
+ PAGE_SIZE);
+
+ vmw_bo_dirty_unmap(res->backup, start, end);
+ }
+
+ vmw_bo_dirty_transfer_to_res(res);
+ return func->dirty_sync(res);
+ }
+
return 0;
out_bind_failed:
@@ -433,16 +487,28 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (switch_backup && new_backup != res->backup) {
if (res->backup) {
vmw_resource_mob_detach(res);
+ if (res->coherent)
+ vmw_bo_dirty_release(res->backup);
vmw_bo_unreference(&res->backup);
}
if (new_backup) {
res->backup = vmw_bo_reference(new_backup);
+
+ /*
+ * The validation code should already have added a
+ * dirty tracker here.
+ */
+ WARN_ON(res->coherent && !new_backup->dirty);
+
vmw_resource_mob_attach(res);
} else {
res->backup = NULL;
}
+ } else if (switch_backup && res->coherent) {
+ vmw_bo_dirty_release(res->backup);
}
+
if (switch_backup)
res->backup_offset = new_backup_offset;
@@ -492,8 +558,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
- true);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -623,6 +688,7 @@ out_no_unbind:
* to the device.
* @res: The resource to make visible to the device.
* @intr: Perform waits interruptible if possible.
+ * @dirtying: Pending GPU operation will dirty the resource
*
* On succesful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
@@ -632,7 +698,8 @@ out_no_unbind:
* Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
* on failure.
*/
-int vmw_resource_validate(struct vmw_resource *res, bool intr)
+int vmw_resource_validate(struct vmw_resource *res, bool intr,
+ bool dirtying)
{
int ret;
struct vmw_resource *evict_res;
@@ -649,7 +716,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
if (res->backup)
val_buf.bo = &res->backup->base;
do {
- ret = vmw_resource_do_validate(res, &val_buf);
+ ret = vmw_resource_do_validate(res, &val_buf, dirtying);
if (likely(ret != -EBUSY))
break;
@@ -712,19 +779,20 @@ out_no_validate:
*/
void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
{
-
- struct vmw_resource *res, *next;
struct ttm_validate_buffer val_buf = {
.bo = &vbo->base,
.num_shared = 0
};
dma_resv_assert_held(vbo->base.base.resv);
- list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
- if (!res->func->unbind)
- continue;
+ while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
+ struct rb_node *node = vbo->res_tree.rb_node;
+ struct vmw_resource *res =
+ container_of(node, struct vmw_resource, mob_node);
+
+ if (!WARN_ON_ONCE(!res->func->unbind))
+ (void) res->func->unbind(res, res->res_dirty, &val_buf);
- (void) res->func->unbind(res, res->res_dirty, &val_buf);
res->backup_dirty = true;
res->res_dirty = false;
vmw_resource_mob_detach(res);
@@ -948,7 +1016,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
/* Do we really need to pin the MOB as well? */
vmw_bo_pin_reserved(vbo, true);
}
- ret = vmw_resource_validate(res, interruptible);
+ ret = vmw_resource_validate(res, interruptible, true);
if (vbo)
ttm_bo_unreserve(&vbo->base);
if (ret)
@@ -1008,3 +1076,101 @@ enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
{
return res->func->res_type;
}
+
+/**
+ * vmw_resource_update_dirty - Update a resource's dirty tracker with a
+ * sequential range of touched backing store memory.
+ * @res: The resource.
+ * @start: The first page touched.
+ * @end: The last page touched + 1.
+ */
+void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
+ pgoff_t end)
+{
+ if (res->dirty)
+ res->func->dirty_range_add(res, start << PAGE_SHIFT,
+ end << PAGE_SHIFT);
+}
+
+/**
+ * vmw_resources_clean - Clean resources intersecting a mob range
+ * @vbo: The mob buffer object
+ * @start: The mob page offset starting the range
+ * @end: The mob page offset ending the range
+ * @num_prefault: Returns how many pages including the first have been
+ * cleaned and are ok to prefault
+ */
+int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+ pgoff_t end, pgoff_t *num_prefault)
+{
+ struct rb_node *cur = vbo->res_tree.rb_node;
+ struct vmw_resource *found = NULL;
+ unsigned long res_start = start << PAGE_SHIFT;
+ unsigned long res_end = end << PAGE_SHIFT;
+ unsigned long last_cleaned = 0;
+
+ /*
+ * Find the resource with lowest backup_offset that intersects the
+ * range.
+ */
+ while (cur) {
+ struct vmw_resource *cur_res =
+ container_of(cur, struct vmw_resource, mob_node);
+
+ if (cur_res->backup_offset >= res_end) {
+ cur = cur->rb_left;
+ } else if (cur_res->backup_offset + cur_res->backup_size <=
+ res_start) {
+ cur = cur->rb_right;
+ } else {
+ found = cur_res;
+ cur = cur->rb_left;
+ /* Continue to look for resources with lower offsets */
+ }
+ }
+
+ /*
+ * In order of increasing backup_offset, clean dirty resorces
+ * intersecting the range.
+ */
+ while (found) {
+ if (found->res_dirty) {
+ int ret;
+
+ if (!found->func->clean)
+ return -EINVAL;
+
+ ret = found->func->clean(found);
+ if (ret)
+ return ret;
+
+ found->res_dirty = false;
+ }
+ last_cleaned = found->backup_offset + found->backup_size;
+ cur = rb_next(&found->mob_node);
+ if (!cur)
+ break;
+
+ found = container_of(cur, struct vmw_resource, mob_node);
+ if (found->backup_offset >= res_end)
+ break;
+ }
+
+ /*
+ * Set number of pages allowed prefaulting and fence the buffer object
+ */
+ *num_prefault = 1;
+ if (last_cleaned > res_start) {
+ struct ttm_buffer_object *bo = &vbo->base;
+
+ *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
+ PAGE_SIZE);
+ vmw_bo_fence_single(bo, NULL);
+ if (bo->moving)
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get
+ (dma_resv_get_excl(bo->base.resv));
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index 984e588c62ca..3b7438b2d289 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -71,6 +71,13 @@ struct vmw_user_resource_conv {
* @commit_notify: If the resource is a command buffer managed resource,
* callback to notify that a define or remove command
* has been committed to the device.
+ * @dirty_alloc: Allocate a dirty tracker. NULL if dirty-tracking is not
+ * supported.
+ * @dirty_free: Free the dirty tracker.
+ * @dirty_sync: Upload the dirty mob contents to the resource.
+ * @dirty_add_range: Add a sequential dirty range to the resource
+ * dirty tracker.
+ * @clean: Clean the resource.
*/
struct vmw_res_func {
enum vmw_res_type res_type;
@@ -90,6 +97,12 @@ struct vmw_res_func {
struct ttm_validate_buffer *val_buf);
void (*commit_notify)(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
+ int (*dirty_alloc)(struct vmw_resource *res);
+ void (*dirty_free)(struct vmw_resource *res);
+ int (*dirty_sync)(struct vmw_resource *res);
+ void (*dirty_range_add)(struct vmw_resource *res, size_t start,
+ size_t end);
+ int (*clean)(struct vmw_resource *res);
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 29d8794f0421..32b9131b2bae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -68,6 +68,20 @@ struct vmw_surface_offset {
uint32_t bo_offset;
};
+/**
+ * vmw_surface_dirty - Surface dirty-tracker
+ * @cache: Cached layout information of the surface.
+ * @size: Accounting size for the struct vmw_surface_dirty.
+ * @num_subres: Number of subresources.
+ * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
+ */
+struct vmw_surface_dirty {
+ struct svga3dsurface_cache cache;
+ size_t size;
+ u32 num_subres;
+ SVGA3dBox boxes[0];
+};
+
static void vmw_user_surface_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base);
@@ -96,6 +110,13 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct drm_vmw_gb_surface_ref_ext_rep *rep,
struct drm_file *file_priv);
+static void vmw_surface_dirty_free(struct vmw_resource *res);
+static int vmw_surface_dirty_alloc(struct vmw_resource *res);
+static int vmw_surface_dirty_sync(struct vmw_resource *res);
+static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
+ size_t end);
+static int vmw_surface_clean(struct vmw_resource *res);
+
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
.base_obj_to_res = vmw_user_surface_base_to_res,
@@ -133,7 +154,12 @@ static const struct vmw_res_func vmw_gb_surface_func = {
.create = vmw_gb_surface_create,
.destroy = vmw_gb_surface_destroy,
.bind = vmw_gb_surface_bind,
- .unbind = vmw_gb_surface_unbind
+ .unbind = vmw_gb_surface_unbind,
+ .dirty_alloc = vmw_surface_dirty_alloc,
+ .dirty_free = vmw_surface_dirty_free,
+ .dirty_sync = vmw_surface_dirty_sync,
+ .dirty_range_add = vmw_surface_dirty_range_add,
+ .clean = vmw_surface_clean,
};
/**
@@ -336,7 +362,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_surface *srf;
void *cmd;
if (res->func->destroy == vmw_gb_surface_destroy) {
@@ -360,7 +385,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
*/
mutex_lock(&dev_priv->cmdbuf_mutex);
- srf = vmw_res_to_srf(res);
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
@@ -641,6 +665,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_private *dev_priv = srf->res.dev_priv;
uint32_t size = user_srf->size;
+ WARN_ON_ONCE(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -1168,10 +1193,16 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
- res->backup_dirty = false;
}
vmw_fifo_commit(dev_priv, submit_size);
+ if (res->backup->dirty && res->backup_dirty) {
+ /* We've just made a full upload. Cear dirty regions. */
+ vmw_bo_dirty_clear_res(res);
+ }
+
+ res->backup_dirty = false;
+
return 0;
}
@@ -1636,7 +1667,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
}
} else if (req->base.drm_surface_flags &
- drm_vmw_surface_flag_create_buffer)
+ (drm_vmw_surface_flag_create_buffer |
+ drm_vmw_surface_flag_coherent))
ret = vmw_user_bo_alloc(dev_priv, tfile,
res->backup_size,
req->base.drm_surface_flags &
@@ -1650,6 +1682,26 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
goto out_unlock;
}
+ if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
+ struct vmw_buffer_object *backup = res->backup;
+
+ ttm_bo_reserve(&backup->base, false, false, NULL);
+ if (!res->func->dirty_alloc)
+ ret = -EINVAL;
+ if (!ret)
+ ret = vmw_bo_dirty_add(backup);
+ if (!ret) {
+ res->coherent = true;
+ ret = res->func->dirty_alloc(res);
+ }
+ ttm_bo_unreserve(&backup->base);
+ if (ret) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->base.drm_surface_flags &
@@ -1758,3 +1810,338 @@ out_bad_resource:
return ret;
}
+
+/**
+ * vmw_subres_dirty_add - Add a dirty region to a subresource
+ * @dirty: The surfaces's dirty tracker.
+ * @loc_start: The location corresponding to the start of the region.
+ * @loc_end: The location corresponding to the end of the region.
+ *
+ * As we are assuming that @loc_start and @loc_end represent a sequential
+ * range of backing store memory, if the region spans multiple lines then
+ * regardless of the x coordinate, the full lines are dirtied.
+ * Correspondingly if the region spans multiple z slices, then full rather
+ * than partial z slices are dirtied.
+ */
+static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
+ const struct svga3dsurface_loc *loc_start,
+ const struct svga3dsurface_loc *loc_end)
+{
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
+ u32 mip = loc_start->sub_resource % cache->num_mip_levels;
+ const struct drm_vmw_size *size = &cache->mip[mip].size;
+ u32 box_c2 = box->z + box->d;
+
+ if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
+ return;
+
+ if (box->d == 0 || box->z > loc_start->z)
+ box->z = loc_start->z;
+ if (box_c2 < loc_end->z)
+ box->d = loc_end->z - box->z;
+
+ if (loc_start->z + 1 == loc_end->z) {
+ box_c2 = box->y + box->h;
+ if (box->h == 0 || box->y > loc_start->y)
+ box->y = loc_start->y;
+ if (box_c2 < loc_end->y)
+ box->h = loc_end->y - box->y;
+
+ if (loc_start->y + 1 == loc_end->y) {
+ box_c2 = box->x + box->w;
+ if (box->w == 0 || box->x > loc_start->x)
+ box->x = loc_start->x;
+ if (box_c2 < loc_end->x)
+ box->w = loc_end->x - box->x;
+ } else {
+ box->x = 0;
+ box->w = size->width;
+ }
+ } else {
+ box->y = 0;
+ box->h = size->height;
+ box->x = 0;
+ box->w = size->width;
+ }
+}
+
+/**
+ * vmw_subres_dirty_full - Mark a full subresource as dirty
+ * @dirty: The surface's dirty tracker.
+ * @subres: The subresource
+ */
+static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
+{
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ u32 mip = subres % cache->num_mip_levels;
+ const struct drm_vmw_size *size = &cache->mip[mip].size;
+ SVGA3dBox *box = &dirty->boxes[subres];
+
+ box->x = 0;
+ box->y = 0;
+ box->z = 0;
+ box->w = size->width;
+ box->h = size->height;
+ box->d = size->depth;
+}
+
+/*
+ * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
+ * surfaces.
+ */
+static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
+ size_t start, size_t end)
+{
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ size_t backup_end = res->backup_offset + res->backup_size;
+ struct svga3dsurface_loc loc1, loc2;
+ const struct svga3dsurface_cache *cache;
+
+ start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
+ end = min(end, backup_end) - res->backup_offset;
+ cache = &dirty->cache;
+ svga3dsurface_get_loc(cache, &loc1, start);
+ svga3dsurface_get_loc(cache, &loc2, end - 1);
+ svga3dsurface_inc_loc(cache, &loc2);
+
+ if (loc1.sub_resource + 1 == loc2.sub_resource) {
+ /* Dirty range covers a single sub-resource */
+ vmw_subres_dirty_add(dirty, &loc1, &loc2);
+ } else {
+ /* Dirty range covers multiple sub-resources */
+ struct svga3dsurface_loc loc_min, loc_max;
+ u32 sub_res;
+
+ svga3dsurface_max_loc(cache, loc1.sub_resource, &loc_max);
+ vmw_subres_dirty_add(dirty, &loc1, &loc_max);
+ svga3dsurface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
+ vmw_subres_dirty_add(dirty, &loc_min, &loc2);
+ for (sub_res = loc1.sub_resource + 1;
+ sub_res < loc2.sub_resource - 1; ++sub_res)
+ vmw_subres_dirty_full(dirty, sub_res);
+ }
+}
+
+/*
+ * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
+ * surfaces.
+ */
+static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
+ size_t start, size_t end)
+{
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
+ SVGA3dBox *box = &dirty->boxes[0];
+ u32 box_c2;
+
+ box->h = box->d = 1;
+ start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
+ end = min(end, backup_end) - res->backup_offset;
+ box_c2 = box->x + box->w;
+ if (box->w == 0 || box->x > start)
+ box->x = start;
+ if (box_c2 < end)
+ box->w = end - box->x;
+}
+
+/*
+ * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
+ */
+static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
+ size_t end)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+
+ if (WARN_ON(end <= res->backup_offset ||
+ start >= res->backup_offset + res->backup_size))
+ return;
+
+ if (srf->format == SVGA3D_BUFFER)
+ vmw_surface_buf_dirty_range_add(res, start, end);
+ else
+ vmw_surface_tex_dirty_range_add(res, start, end);
+}
+
+/*
+ * vmw_surface_dirty_sync - The surface's dirty_sync callback.
+ */
+static int vmw_surface_dirty_sync(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ bool has_dx = 0;
+ u32 i, num_dirty;
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ size_t alloc_size;
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXUpdateSubResource body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+ } *cmd2;
+ void *cmd;
+
+ num_dirty = 0;
+ for (i = 0; i < dirty->num_subres; ++i) {
+ const SVGA3dBox *box = &dirty->boxes[i];
+
+ if (box->d)
+ num_dirty++;
+ }
+
+ if (!num_dirty)
+ goto out;
+
+ alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
+ cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd1 = cmd;
+ cmd2 = cmd;
+
+ for (i = 0; i < dirty->num_subres; ++i) {
+ const SVGA3dBox *box = &dirty->boxes[i];
+
+ if (!box->d)
+ continue;
+
+ /*
+ * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
+ * UPDATE_GB_IMAGE is not.
+ */
+ if (has_dx) {
+ cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.sid = res->id;
+ cmd1->body.subResource = i;
+ cmd1->body.box = *box;
+ cmd1++;
+ } else {
+ cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.image.sid = res->id;
+ cmd2->body.image.face = i / cache->num_mip_levels;
+ cmd2->body.image.mipmap = i -
+ (cache->num_mip_levels * cmd2->body.image.face);
+ cmd2->body.box = *box;
+ cmd2++;
+ }
+
+ }
+ vmw_fifo_commit(dev_priv, alloc_size);
+ out:
+ memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
+ dirty->num_subres);
+
+ return 0;
+}
+
+/*
+ * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
+ */
+static int vmw_surface_dirty_alloc(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ struct vmw_surface_dirty *dirty;
+ u32 num_layers = 1;
+ u32 num_mip;
+ u32 num_subres;
+ u32 num_samples;
+ size_t dirty_size, acc_size;
+ static struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ int ret;
+
+ if (srf->array_size)
+ num_layers = srf->array_size;
+ else if (srf->flags & SVGA3D_SURFACE_CUBEMAP)
+ num_layers *= SVGA3D_MAX_SURFACE_FACES;
+
+ num_mip = srf->mip_levels[0];
+ if (!num_mip)
+ num_mip = 1;
+
+ num_subres = num_layers * num_mip;
+ dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]);
+ acc_size = ttm_round_pot(dirty_size);
+ ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
+ acc_size, &ctx);
+ if (ret) {
+ VMW_DEBUG_USER("Out of graphics memory for surface "
+ "dirty tracker.\n");
+ return ret;
+ }
+
+ dirty = kvzalloc(dirty_size, GFP_KERNEL);
+ if (!dirty) {
+ ret = -ENOMEM;
+ goto out_no_dirty;
+ }
+
+ num_samples = max_t(u32, 1, srf->multisample_count);
+ ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip,
+ num_layers, num_samples, &dirty->cache);
+ if (ret)
+ goto out_no_cache;
+
+ dirty->num_subres = num_subres;
+ dirty->size = acc_size;
+ res->dirty = (struct vmw_resource_dirty *) dirty;
+
+ return 0;
+
+out_no_cache:
+ kvfree(dirty);
+out_no_dirty:
+ ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
+ return ret;
+}
+
+/*
+ * vmw_surface_dirty_free - The surface's dirty_free callback
+ */
+static void vmw_surface_dirty_free(struct vmw_resource *res)
+{
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ size_t acc_size = dirty->size;
+
+ kvfree(dirty);
+ ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
+ res->dirty = NULL;
+}
+
+/*
+ * vmw_surface_clean - The surface's clean callback
+ */
+static int vmw_surface_clean(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ size_t alloc_size;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBSurface body;
+ } *cmd;
+
+ alloc_size = sizeof(*cmd);
+ cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = res->id;
+ vmw_fifo_commit(dev_priv, alloc_size);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 5a7b8bb420de..ce288756531b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -29,10 +29,23 @@
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
+ static const struct vm_operations_struct vmw_vm_ops = {
+ .pfn_mkwrite = vmw_bo_vm_mkwrite,
+ .page_mkwrite = vmw_bo_vm_mkwrite,
+ .fault = vmw_bo_vm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close
+ };
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+ int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
- return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+ if (ret)
+ return ret;
+
+ vma->vm_ops = &vmw_vm_ops;
+
+ return 0;
}
/* struct vmw_validation_mem callback */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f611b2290a1b..e69bc373ae2e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -33,6 +33,8 @@
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
* @hash: A hash entry used for the duplicate detection hash table.
+ * @coherent_count: If switching backup buffers, number of new coherent
+ * resources that will have this buffer as a backup buffer.
* @as_mob: Validate as mob.
* @cpu_blit: Validate for cpu blit access.
*
@@ -42,6 +44,7 @@
struct vmw_validation_bo_node {
struct ttm_validate_buffer base;
struct drm_hash_item hash;
+ unsigned int coherent_count;
u32 as_mob : 1;
u32 cpu_blit : 1;
};
@@ -459,6 +462,19 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
if (ret)
goto out_unreserve;
}
+
+ if (val->switching_backup && val->new_backup &&
+ res->coherent) {
+ struct vmw_validation_bo_node *bo_node =
+ vmw_validation_find_bo_dup(ctx,
+ val->new_backup);
+
+ if (WARN_ON(!bo_node)) {
+ ret = -EINVAL;
+ goto out_unreserve;
+ }
+ bo_node->coherent_count++;
+ }
}
return 0;
@@ -521,6 +537,9 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
};
int ret;
+ if (atomic_read(&vbo->cpu_writers))
+ return -EBUSY;
+
if (vbo->pin_count > 0)
return 0;
@@ -562,6 +581,9 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
int ret;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ struct vmw_buffer_object *vbo =
+ container_of(entry->base.bo, typeof(*vbo), base);
+
if (entry->cpu_blit) {
struct ttm_operation_ctx ctx = {
.interruptible = intr,
@@ -576,6 +598,27 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
}
if (ret)
return ret;
+
+ /*
+ * Rather than having the resource code allocating the bo
+ * dirty tracker in resource_unreserve() where we can't fail,
+ * Do it here when validating the buffer object.
+ */
+ if (entry->coherent_count) {
+ unsigned int coherent_count = entry->coherent_count;
+
+ while (coherent_count) {
+ ret = vmw_bo_dirty_add(vbo);
+ if (ret)
+ return ret;
+
+ coherent_count--;
+ }
+ entry->coherent_count -= coherent_count;
+ }
+
+ if (vbo->dirty)
+ vmw_bo_dirty_scan(vbo);
}
return 0;
}
@@ -601,7 +644,8 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
struct vmw_resource *res = val->res;
struct vmw_buffer_object *backup = res->backup;
- ret = vmw_resource_validate(res, intr);
+ ret = vmw_resource_validate(res, intr, val->dirty_set &&
+ val->dirty);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to validate resource.\n");
@@ -828,3 +872,34 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx,
ctx->mem_size_left += size;
return 0;
}
+
+/**
+ * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve. It's typically used as part of an error path
+ */
+void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
+{
+ struct vmw_validation_bo_node *entry;
+
+ /*
+ * Switching coherent resource backup buffers failed.
+ * Release corresponding buffer object dirty trackers.
+ */
+ list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ if (entry->coherent_count) {
+ unsigned int coherent_count = entry->coherent_count;
+ struct vmw_buffer_object *vbo =
+ container_of(entry->base.bo, typeof(*vbo),
+ base);
+
+ while (coherent_count--)
+ vmw_bo_dirty_release(vbo);
+ }
+ }
+
+ ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 0e063743dd86..739906d1b3eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -170,21 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
- NULL, true);
-}
-
-/**
- * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
- * validation context
- * @ctx: The validation context
- *
- * This function unreserves the buffer objects previously reserved using
- * vmw_validation_bo_reserve. It's typically used as part of an error path
- */
-static inline void
-vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
-{
- ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+ NULL);
}
/**
@@ -269,4 +255,6 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx,
unsigned int size);
void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
void *val_private, u32 dirty);
+void vmw_validation_bo_backoff(struct vmw_validation_context *ctx);
+
#endif
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 21ad1c359b61..ff506bc99414 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -270,11 +270,12 @@ static void display_update(struct drm_simple_display_pipe *pipe,
}
static enum drm_mode_status
-display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+display_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
{
struct xen_drm_front_drm_pipeline *pipeline =
- container_of(crtc, struct xen_drm_front_drm_pipeline,
- pipe.crtc);
+ container_of(pipe, struct xen_drm_front_drm_pipeline,
+ pipe);
if (mode->hdisplay != pipeline->width)
return MODE_ERROR;
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index cf987a317a55..6dab94adf25e 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -2,7 +2,7 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 742aa9ff21b8..2c8559ff3481 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -445,7 +445,7 @@ static int host1x_device_add(struct host1x *host1x,
of_dma_configure(&device->dev, host1x->dev->of_node, true);
device->dev.dma_parms = &device->dma_parms;
- dma_set_max_seg_size(&device->dev, SZ_4M);
+ dma_set_max_seg_size(&device->dev, UINT_MAX);
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 48c84c48299c..e8d3fda91d8a 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -232,9 +232,9 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
*
* Must be called with the cdma lock held.
*/
-int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
- struct host1x_cdma *cdma,
- unsigned int needed)
+static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
+ struct host1x_cdma *cdma,
+ unsigned int needed)
{
while (true) {
struct push_buffer *pb = &cdma->push_buffer;
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index 1436295aa450..4cd212bb570d 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -115,14 +115,14 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host)
/**
* host1x_channel_request() - Allocate a channel
- * @device: Host1x unit this channel will be used to send commands to
+ * @client: Host1x client this channel will be used to send commands to
*
- * Allocates a new host1x channel for @device. May return NULL if CDMA
+ * Allocates a new host1x channel for @client. May return NULL if CDMA
* initialization fails.
*/
-struct host1x_channel *host1x_channel_request(struct device *dev)
+struct host1x_channel *host1x_channel_request(struct host1x_client *client)
{
- struct host1x *host = dev_get_drvdata(dev->parent);
+ struct host1x *host = dev_get_drvdata(client->dev->parent);
struct host1x_channel_list *chlist = &host->channel_list;
struct host1x_channel *channel;
int err;
@@ -133,7 +133,8 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
kref_init(&channel->refcount);
mutex_init(&channel->submitlock);
- channel->dev = dev;
+ channel->client = client;
+ channel->dev = client->dev;
err = host1x_hw_channel_init(host, channel, channel->id);
if (err < 0)
@@ -148,7 +149,7 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
fail:
clear_bit(channel->id, chlist->allocated_channels);
- dev_err(dev, "failed to initialize channel\n");
+ dev_err(client->dev, "failed to initialize channel\n");
return NULL;
}
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 4fd694834f74..39044ff6c3aa 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -26,6 +26,7 @@ struct host1x_channel {
unsigned int id;
struct mutex submitlock;
void __iomem *regs;
+ struct host1x_client *client;
struct device *dev;
struct host1x_cdma cdma;
};
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 5a3f797240d4..a738ea55e407 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -18,10 +18,6 @@
#include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-#endif
-
#include "bus.h"
#include "channel.h"
#include "debug.h"
@@ -77,6 +73,10 @@ static const struct host1x_info host1x01_info = {
.init = host1x01_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x02_info = {
@@ -87,6 +87,10 @@ static const struct host1x_info host1x02_info = {
.init = host1x02_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x04_info = {
@@ -97,6 +101,10 @@ static const struct host1x_info host1x04_info = {
.init = host1x04_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x05_info = {
@@ -107,6 +115,10 @@ static const struct host1x_info host1x05_info = {
.init = host1x05_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
@@ -126,6 +138,7 @@ static const struct host1x_info host1x06_info = {
.init = host1x06_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
+ .has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
@@ -148,6 +161,7 @@ static const struct host1x_info host1x07_info = {
.init = host1x07_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
+ .has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
.sid_table = tegra194_sid_table,
@@ -178,6 +192,117 @@ static void host1x_setup_sid_table(struct host1x *host)
}
}
+static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
+ int err;
+
+ /*
+ * If the host1x firewall is enabled, there's no need to enable IOMMU
+ * support. Similarly, if host1x is already attached to an IOMMU (via
+ * the DMA API), don't try to attach again.
+ */
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
+ return domain;
+
+ host->group = iommu_group_get(host->dev);
+ if (host->group) {
+ struct iommu_domain_geometry *geometry;
+ dma_addr_t start, end;
+ unsigned long order;
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto put_group;
+
+ host->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!host->domain) {
+ err = -ENOMEM;
+ goto put_cache;
+ }
+
+ err = iommu_attach_group(host->domain, host->group);
+ if (err) {
+ if (err == -ENODEV)
+ err = 0;
+
+ goto free_domain;
+ }
+
+ geometry = &host->domain->geometry;
+ start = geometry->aperture_start & host->info->dma_mask;
+ end = geometry->aperture_end & host->info->dma_mask;
+
+ order = __ffs(host->domain->pgsize_bitmap);
+ init_iova_domain(&host->iova, 1UL << order, start >> order);
+ host->iova_end = end;
+
+ domain = host->domain;
+ }
+
+ return domain;
+
+free_domain:
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+put_cache:
+ iova_cache_put();
+put_group:
+ iommu_group_put(host->group);
+ host->group = NULL;
+
+ return ERR_PTR(err);
+}
+
+static int host1x_iommu_init(struct host1x *host)
+{
+ u64 mask = host->info->dma_mask;
+ struct iommu_domain *domain;
+ int err;
+
+ domain = host1x_iommu_attach(host);
+ if (IS_ERR(domain)) {
+ err = PTR_ERR(domain);
+ dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
+ return err;
+ }
+
+ /*
+ * If we're not behind an IOMMU make sure we don't get push buffers
+ * that are allocated outside of the range addressable by the GATHER
+ * opcode.
+ *
+ * Newer generations of Tegra (Tegra186 and later) support a wide
+ * variant of the GATHER opcode that allows addressing more bits.
+ */
+ if (!domain && !host->info->has_wide_gather)
+ mask = DMA_BIT_MASK(32);
+
+ err = dma_coerce_mask_and_coherent(host->dev, mask);
+ if (err < 0) {
+ dev_err(host->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void host1x_iommu_exit(struct host1x *host)
+{
+ if (host->domain) {
+ put_iova_domain(&host->iova);
+ iommu_detach_group(host->domain, host->group);
+
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+
+ iova_cache_put();
+
+ iommu_group_put(host->group);
+ host->group = NULL;
+ }
+}
+
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
@@ -237,7 +362,8 @@ static int host1x_probe(struct platform_device *pdev)
return PTR_ERR(host->hv_regs);
}
- dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
+ host->dev->dma_parms = &host->dma_parms;
+ dma_set_max_seg_size(host->dev, UINT_MAX);
if (host->info->init) {
err = host->info->init(host);
@@ -261,87 +387,42 @@ static int host1x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
return err;
}
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
- if (host->dev->archdata.mapping) {
- struct dma_iommu_mapping *mapping =
- to_dma_iommu_mapping(host->dev);
- arm_iommu_detach_device(host->dev);
- arm_iommu_release_mapping(mapping);
- }
-#endif
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
- goto skip_iommu;
-
- host->group = iommu_group_get(&pdev->dev);
- if (host->group) {
- struct iommu_domain_geometry *geometry;
- u64 mask = dma_get_mask(host->dev);
- dma_addr_t start, end;
- unsigned long order;
-
- err = iova_cache_get();
- if (err < 0)
- goto put_group;
-
- host->domain = iommu_domain_alloc(&platform_bus_type);
- if (!host->domain) {
- err = -ENOMEM;
- goto put_cache;
- }
- err = iommu_attach_group(host->domain, host->group);
- if (err) {
- if (err == -ENODEV) {
- iommu_domain_free(host->domain);
- host->domain = NULL;
- iova_cache_put();
- iommu_group_put(host->group);
- host->group = NULL;
- goto skip_iommu;
- }
-
- goto fail_free_domain;
- }
-
- geometry = &host->domain->geometry;
- start = geometry->aperture_start & mask;
- end = geometry->aperture_end & mask;
-
- order = __ffs(host->domain->pgsize_bitmap);
- init_iova_domain(&host->iova, 1UL << order, start >> order);
- host->iova_end = end;
+ err = host1x_iommu_init(host);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
+ return err;
}
-skip_iommu:
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
dev_err(&pdev->dev, "failed to initialize channel list\n");
- goto fail_detach_device;
+ goto iommu_exit;
}
err = clk_prepare_enable(host->clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable clock\n");
- goto fail_free_channels;
+ goto free_channels;
}
err = reset_control_deassert(host->rst);
if (err < 0) {
dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
- goto fail_unprepare_disable;
+ goto unprepare_disable;
}
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
- goto fail_reset_assert;
+ goto reset_assert;
}
err = host1x_intr_init(host, syncpt_irq);
if (err) {
dev_err(&pdev->dev, "failed to initialize interrupts\n");
- goto fail_deinit_syncpt;
+ goto deinit_syncpt;
}
host1x_debug_init(host);
@@ -351,33 +432,22 @@ skip_iommu:
err = host1x_register(host);
if (err < 0)
- goto fail_deinit_intr;
+ goto deinit_intr;
return 0;
-fail_deinit_intr:
+deinit_intr:
host1x_intr_deinit(host);
-fail_deinit_syncpt:
+deinit_syncpt:
host1x_syncpt_deinit(host);
-fail_reset_assert:
+reset_assert:
reset_control_assert(host->rst);
-fail_unprepare_disable:
+unprepare_disable:
clk_disable_unprepare(host->clk);
-fail_free_channels:
+free_channels:
host1x_channel_list_free(&host->channel_list);
-fail_detach_device:
- if (host->group && host->domain) {
- put_iova_domain(&host->iova);
- iommu_detach_group(host->domain, host->group);
- }
-fail_free_domain:
- if (host->domain)
- iommu_domain_free(host->domain);
-put_cache:
- if (host->group)
- iova_cache_put();
-put_group:
- iommu_group_put(host->group);
+iommu_exit:
+ host1x_iommu_exit(host);
return err;
}
@@ -387,18 +457,12 @@ static int host1x_remove(struct platform_device *pdev)
struct host1x *host = platform_get_drvdata(pdev);
host1x_unregister(host);
+ host1x_debug_deinit(host);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
reset_control_assert(host->rst);
clk_disable_unprepare(host->clk);
-
- if (host->domain) {
- put_iova_domain(&host->iova);
- iommu_detach_group(host->domain, host->group);
- iommu_domain_free(host->domain);
- iova_cache_put();
- iommu_group_put(host->group);
- }
+ host1x_iommu_exit(host);
return 0;
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index ff56f5e23a02..f781a9b0f39d 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -97,6 +97,7 @@ struct host1x_info {
int (*init)(struct host1x *host1x); /* initialize per SoC ops */
unsigned int sync_offset; /* offset of syncpoint registers */
u64 dma_mask; /* mask of addressable memory */
+ bool has_wide_gather; /* supports GATHER_W opcode */
bool has_hypervisor; /* has hypervisor registers */
unsigned int num_sid_entries;
const struct host1x_sid_entry *sid_table;
@@ -140,6 +141,8 @@ struct host1x {
struct list_head devices;
struct list_head list;
+
+ struct device_dma_parameters dma_parms;
};
void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 26f3c741d085..9245add23b5d 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -105,7 +105,6 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
/* Add nr_completed to trace */
trace_host1x_channel_submit_complete(dev_name(channel->dev),
waiter->count, waiter->thresh);
-
}
static void action_wakeup(struct host1x_waitlist *waiter)
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index eaa5c3352c13..25ca54de8fc5 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -99,6 +99,8 @@ EXPORT_SYMBOL(host1x_job_add_gather);
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
+ struct host1x_client *client = job->client;
+ struct device *dev = client->dev;
unsigned int i;
int err;
@@ -106,8 +108,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
+ dma_addr_t phys_addr, *phys;
struct sg_table *sgt;
- dma_addr_t phys_addr;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) {
@@ -115,7 +117,50 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
+ if (client->group)
+ phys = &phys_addr;
+ else
+ phys = NULL;
+
+ sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+
+ if (sgt) {
+ unsigned long mask = HOST1X_RELOC_READ |
+ HOST1X_RELOC_WRITE;
+ enum dma_data_direction dir;
+
+ switch (reloc->flags & mask) {
+ case HOST1X_RELOC_READ:
+ dir = DMA_TO_DEVICE;
+ break;
+
+ case HOST1X_RELOC_WRITE:
+ dir = DMA_FROM_DEVICE;
+ break;
+
+ case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
+ dir = DMA_BIDIRECTIONAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ goto unpin;
+ }
+
+ err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (!err) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ job->unpins[job->num_unpins].dev = dev;
+ job->unpins[job->num_unpins].dir = dir;
+ phys_addr = sg_dma_address(sgt->sgl);
+ }
job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = reloc->target.bo;
@@ -139,7 +184,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- phys_addr = host1x_bo_pin(g->bo, &sgt);
+ sgt = host1x_bo_pin(host->dev, g->bo, NULL);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
for_each_sg(sgt->sgl, sg, sgt->nents, j)
@@ -163,15 +212,24 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- job->addr_phys[job->num_unpins] =
- iova_dma_addr(&host->iova, alloc);
job->unpins[job->num_unpins].size = gather_size;
+ phys_addr = iova_dma_addr(&host->iova, alloc);
} else {
- job->addr_phys[job->num_unpins] = phys_addr;
+ err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (!err) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ job->unpins[job->num_unpins].dev = host->dev;
+ phys_addr = sg_dma_address(sgt->sgl);
}
- job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
+ job->addr_phys[job->num_unpins] = phys_addr;
+ job->gather_addr_phys[i] = phys_addr;
+ job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
job->unpins[job->num_unpins].bo = g->bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
@@ -436,7 +494,8 @@ out:
return err;
}
-static inline int copy_gathers(struct host1x_job *job, struct device *dev)
+static inline int copy_gathers(struct device *host, struct host1x_job *job,
+ struct device *dev)
{
struct host1x_firewall fw;
size_t size = 0;
@@ -459,12 +518,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
* Try a non-blocking allocation from a higher priority pools first,
* as awaiting for the allocation here is a major performance hit.
*/
- job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
+ job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
GFP_NOWAIT);
/* the higher priority allocation failed, try the generic-blocking */
if (!job->gather_copy_mapped)
- job->gather_copy_mapped = dma_alloc_wc(dev, size,
+ job->gather_copy_mapped = dma_alloc_wc(host, size,
&job->gather_copy,
GFP_KERNEL);
if (!job->gather_copy_mapped)
@@ -512,7 +571,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
goto out;
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
- err = copy_gathers(job, dev);
+ err = copy_gathers(host->dev, job, dev);
if (err)
goto out;
}
@@ -557,6 +616,8 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
+ struct device *dev = unpin->dev ?: host->dev;
+ struct sg_table *sgt = unpin->sgt;
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
unpin->size && host->domain) {
@@ -566,14 +627,18 @@ void host1x_job_unpin(struct host1x_job *job)
iova_pfn(&host->iova, job->addr_phys[i]));
}
- host1x_bo_unpin(unpin->bo, unpin->sgt);
+ if (unpin->dev && sgt)
+ dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
+ unpin->dir);
+
+ host1x_bo_unpin(dev, unpin->bo, sgt);
host1x_bo_put(unpin->bo);
}
job->num_unpins = 0;
if (job->gather_copy_size)
- dma_free_wc(job->channel->dev, job->gather_copy_size,
+ dma_free_wc(host->dev, job->gather_copy_size,
job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 62b8805e6b35..94bc2e4ae241 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -8,6 +8,8 @@
#ifndef __HOST1X_JOB_H
#define __HOST1X_JOB_H
+#include <linux/dma-direction.h>
+
struct host1x_job_gather {
unsigned int words;
dma_addr_t base;
@@ -19,7 +21,9 @@ struct host1x_job_gather {
struct host1x_job_unpin_data {
struct host1x_bo *bo;
struct sg_table *sgt;
+ struct device *dev;
size_t size;
+ enum dma_data_direction dir;
};
/*
diff --git a/drivers/greybus/connection.c b/drivers/greybus/connection.c
index fc8f57f97ce6..e3799a53a193 100644
--- a/drivers/greybus/connection.c
+++ b/drivers/greybus/connection.c
@@ -361,9 +361,6 @@ static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
if (connection->mode_switch)
peer_space += sizeof(struct gb_operation_msg_hdr);
- if (!hd->driver->cport_quiesce)
- return 0;
-
ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
peer_space,
GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1ecb5124421c..494a39e74939 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -525,6 +525,7 @@ config HID_LENOVO
config HID_LOGITECH
tristate "Logitech devices"
depends on HID
+ depends on LEDS_CLASS
default !EXPERT
---help---
Support for Logitech devices that are not fully compliant with HID standard.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0c03308cfb08..bfefa365b1ce 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
+obj-$(CONFIG_HID_LOGITECH) += hid-lg-g15.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MACALLY) += hid-macally.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 63fdbf09b044..e0b241bd3070 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -212,6 +212,18 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
}
/*
+ * Concatenate usage which defines 16 bits or less with the
+ * currently defined usage page to form a 32 bit usage
+ */
+
+static void complete_usage(struct hid_parser *parser, unsigned int index)
+{
+ parser->local.usage[index] &= 0xFFFF;
+ parser->local.usage[index] |=
+ (parser->global.usage_page & 0xFFFF) << 16;
+}
+
+/*
* Add a usage to the temporary parser table.
*/
@@ -222,6 +234,14 @@ static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
+
+ /*
+ * If Usage item only includes usage id, concatenate it with
+ * currently defined usage page
+ */
+ if (size <= 2)
+ complete_usage(parser, parser->local.usage_index);
+
parser->local.usage_size[parser->local.usage_index] = size;
parser->local.collection_index[parser->local.usage_index] =
parser->collection_stack_ptr ?
@@ -543,13 +563,32 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
* usage value."
*/
-static void hid_concatenate_usage_page(struct hid_parser *parser)
+static void hid_concatenate_last_usage_page(struct hid_parser *parser)
{
int i;
+ unsigned int usage_page;
+ unsigned int current_page;
+
+ if (!parser->local.usage_index)
+ return;
- for (i = 0; i < parser->local.usage_index; i++)
- if (parser->local.usage_size[i] <= 2)
- parser->local.usage[i] += parser->global.usage_page << 16;
+ usage_page = parser->global.usage_page;
+
+ /*
+ * Concatenate usage page again only if last declared Usage Page
+ * has not been already used in previous usages concatenation
+ */
+ for (i = parser->local.usage_index - 1; i >= 0; i--) {
+ if (parser->local.usage_size[i] > 2)
+ /* Ignore extended usages */
+ continue;
+
+ current_page = parser->local.usage[i] >> 16;
+ if (current_page == usage_page)
+ break;
+
+ complete_usage(parser, i);
+ }
}
/*
@@ -561,7 +600,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int ret;
- hid_concatenate_usage_page(parser);
+ hid_concatenate_last_usage_page(parser);
data = item_udata(item);
@@ -742,6 +781,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
parser->global.report_size == 8)
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
+
+ if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
+ parser->global.report_size == 8)
+ parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
}
static void hid_scan_collection(struct hid_parser *parser, unsigned type)
@@ -772,7 +815,7 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int i;
- hid_concatenate_usage_page(parser);
+ hid_concatenate_last_usage_page(parser);
data = item_udata(item);
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index d86a9189e88f..2aa4ed157aec 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -35,6 +35,7 @@ struct cbas_ec {
struct device *dev; /* The platform device (EC) */
struct input_dev *input;
bool base_present;
+ bool base_folded;
struct notifier_block notifier;
};
@@ -208,7 +209,14 @@ static int __cbas_ec_probe(struct platform_device *pdev)
return error;
}
- input_report_switch(input, SW_TABLET_MODE, !cbas_ec.base_present);
+ if (!cbas_ec.base_present)
+ cbas_ec.base_folded = false;
+
+ dev_dbg(&pdev->dev, "%s: base: %d, folded: %d\n", __func__,
+ cbas_ec.base_present, cbas_ec.base_folded);
+
+ input_report_switch(input, SW_TABLET_MODE,
+ !cbas_ec.base_present || cbas_ec.base_folded);
cbas_ec_set_input(input);
@@ -322,10 +330,9 @@ static int hammer_kbd_brightness_set_blocking(struct led_classdev *cdev,
static int hammer_register_leds(struct hid_device *hdev)
{
struct hammer_kbd_leds *kbd_backlight;
+ int error;
- kbd_backlight = devm_kzalloc(&hdev->dev,
- sizeof(*kbd_backlight),
- GFP_KERNEL);
+ kbd_backlight = kzalloc(sizeof(*kbd_backlight), GFP_KERNEL);
if (!kbd_backlight)
return -ENOMEM;
@@ -339,12 +346,31 @@ static int hammer_register_leds(struct hid_device *hdev)
/* Set backlight to 0% initially. */
hammer_kbd_brightness_set_blocking(&kbd_backlight->cdev, 0);
- return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
+ error = led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
+ if (error)
+ goto err_free_mem;
+
+ hid_set_drvdata(hdev, kbd_backlight);
+ return 0;
+
+err_free_mem:
+ kfree(kbd_backlight);
+ return error;
+}
+
+static void hammer_unregister_leds(struct hid_device *hdev)
+{
+ struct hammer_kbd_leds *kbd_backlight = hid_get_drvdata(hdev);
+
+ if (kbd_backlight) {
+ led_classdev_unregister(&kbd_backlight->cdev);
+ kfree(kbd_backlight);
+ }
}
#define HID_UP_GOOGLEVENDOR 0xffd10000
#define HID_VD_KBD_FOLDED 0x00000019
-#define WHISKERS_KBD_FOLDED (HID_UP_GOOGLEVENDOR | HID_VD_KBD_FOLDED)
+#define HID_USAGE_KBD_FOLDED (HID_UP_GOOGLEVENDOR | HID_VD_KBD_FOLDED)
/* HID usage for keyboard backlight (Alphanumeric display brightness) */
#define HID_AD_BRIGHTNESS 0x00140046
@@ -354,8 +380,7 @@ static int hammer_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
- usage->hid == WHISKERS_KBD_FOLDED) {
+ if (usage->hid == HID_USAGE_KBD_FOLDED) {
/*
* We do not want to have this usage mapped as it will get
* mixed in with "base attached" signal and delivered over
@@ -372,19 +397,19 @@ static int hammer_event(struct hid_device *hid, struct hid_field *field,
{
unsigned long flags;
- if (hid->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
- usage->hid == WHISKERS_KBD_FOLDED) {
+ if (usage->hid == HID_USAGE_KBD_FOLDED) {
spin_lock_irqsave(&cbas_ec_lock, flags);
- hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
- cbas_ec.base_present, value);
-
/*
- * We should not get event if base is detached, but in case
- * we happen to service HID and EC notifications out of order
- * let's still check the "base present" flag.
+ * If we are getting events from Whiskers that means that it
+ * is attached to the lid.
*/
- if (cbas_ec.input && cbas_ec.base_present) {
+ cbas_ec.base_present = true;
+ cbas_ec.base_folded = value;
+ hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
+ cbas_ec.base_present, cbas_ec.base_folded);
+
+ if (cbas_ec.input) {
input_report_switch(cbas_ec.input,
SW_TABLET_MODE, value);
input_sync(cbas_ec.input);
@@ -397,33 +422,22 @@ static int hammer_event(struct hid_device *hid, struct hid_field *field,
return 0;
}
-static bool hammer_is_keyboard_interface(struct hid_device *hdev)
+static bool hammer_has_usage(struct hid_device *hdev, unsigned int report_type,
+ unsigned application, unsigned usage)
{
- struct hid_report_enum *re = &hdev->report_enum[HID_INPUT_REPORT];
- struct hid_report *report;
-
- list_for_each_entry(report, &re->report_list, list)
- if (report->application == HID_GD_KEYBOARD)
- return true;
-
- return false;
-}
-
-static bool hammer_has_backlight_control(struct hid_device *hdev)
-{
- struct hid_report_enum *re = &hdev->report_enum[HID_OUTPUT_REPORT];
+ struct hid_report_enum *re = &hdev->report_enum[report_type];
struct hid_report *report;
int i, j;
list_for_each_entry(report, &re->report_list, list) {
- if (report->application != HID_GD_KEYBOARD)
+ if (report->application != application)
continue;
for (i = 0; i < report->maxfield; i++) {
struct hid_field *field = report->field[i];
for (j = 0; j < field->maxusage; j++)
- if (field->usage[j].hid == HID_AD_BRIGHTNESS)
+ if (field->usage[j].hid == usage)
return true;
}
}
@@ -431,21 +445,23 @@ static bool hammer_has_backlight_control(struct hid_device *hdev)
return false;
}
+static bool hammer_has_folded_event(struct hid_device *hdev)
+{
+ return hammer_has_usage(hdev, HID_INPUT_REPORT,
+ HID_GD_KEYBOARD, HID_USAGE_KBD_FOLDED);
+}
+
+static bool hammer_has_backlight_control(struct hid_device *hdev)
+{
+ return hammer_has_usage(hdev, HID_OUTPUT_REPORT,
+ HID_GD_KEYBOARD, HID_AD_BRIGHTNESS);
+}
+
static int hammer_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int error;
- /*
- * We always want to poll for, and handle tablet mode events from
- * Whiskers, even when nobody has opened the input device. This also
- * prevents the hid core from dropping early tablet mode events from
- * the device.
- */
- if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
- hammer_is_keyboard_interface(hdev))
- hdev->quirks |= HID_QUIRK_ALWAYS_POLL;
-
error = hid_parse(hdev);
if (error)
return error;
@@ -454,6 +470,19 @@ static int hammer_probe(struct hid_device *hdev,
if (error)
return error;
+ /*
+ * We always want to poll for, and handle tablet mode events from
+ * devices that have folded usage, even when nobody has opened the input
+ * device. This also prevents the hid core from dropping early tablet
+ * mode events from the device.
+ */
+ if (hammer_has_folded_event(hdev)) {
+ hdev->quirks |= HID_QUIRK_ALWAYS_POLL;
+ error = hid_hw_open(hdev);
+ if (error)
+ return error;
+ }
+
if (hammer_has_backlight_control(hdev)) {
error = hammer_register_leds(hdev);
if (error)
@@ -465,6 +494,36 @@ static int hammer_probe(struct hid_device *hdev,
return 0;
}
+static void hammer_remove(struct hid_device *hdev)
+{
+ unsigned long flags;
+
+ if (hammer_has_folded_event(hdev)) {
+ hid_hw_close(hdev);
+
+ /*
+ * If we are disconnecting then most likely Whiskers is
+ * being removed. Even if it is not removed, without proper
+ * keyboard we should not stay in clamshell mode.
+ *
+ * The reason for doing it here and not waiting for signal
+ * from EC, is that on some devices there are high leakage
+ * on Whiskers pins and we do not detect disconnect reliably,
+ * resulting in devices being stuck in clamshell mode.
+ */
+ spin_lock_irqsave(&cbas_ec_lock, flags);
+ if (cbas_ec.input && cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input, SW_TABLET_MODE, 1);
+ input_sync(cbas_ec.input);
+ }
+ cbas_ec.base_present = false;
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ }
+
+ hammer_unregister_leds(hdev);
+
+ hid_hw_stop(hdev);
+}
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
@@ -487,6 +546,7 @@ static struct hid_driver hammer_driver = {
.name = "hammer",
.id_table = hammer_devices,
.probe = hammer_probe,
+ .remove = hammer_remove,
.input_mapping = hammer_input_mapping,
.event = hammer_event,
};
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 79a28fc91521..dddfca555df9 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -192,6 +192,9 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
if (desc->bLength == 0)
goto cleanup;
+ /* The pointer is not NULL when we resume from hibernation */
+ if (input_device->hid_desc != NULL)
+ kfree(input_device->hid_desc);
input_device->hid_desc = kmemdup(desc, desc->bLength, GFP_ATOMIC);
if (!input_device->hid_desc)
@@ -203,6 +206,9 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
goto cleanup;
}
+ /* The pointer is not NULL when we resume from hibernation */
+ if (input_device->report_desc != NULL)
+ kfree(input_device->report_desc);
input_device->report_desc = kzalloc(input_device->report_desc_size,
GFP_ATOMIC);
@@ -342,6 +348,8 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
struct mousevsc_prt_msg *request;
struct mousevsc_prt_msg *response;
+ reinit_completion(&input_dev->wait_event);
+
request = &input_dev->protocol_req;
memset(request, 0, sizeof(struct mousevsc_prt_msg));
@@ -541,6 +549,30 @@ static int mousevsc_remove(struct hv_device *dev)
return 0;
}
+static int mousevsc_suspend(struct hv_device *dev)
+{
+ vmbus_close(dev->channel);
+
+ return 0;
+}
+
+static int mousevsc_resume(struct hv_device *dev)
+{
+ int ret;
+
+ ret = vmbus_open(dev->channel,
+ INPUTVSC_SEND_RING_BUFFER_SIZE,
+ INPUTVSC_RECV_RING_BUFFER_SIZE,
+ NULL, 0,
+ mousevsc_on_channel_callback,
+ dev);
+ if (ret)
+ return ret;
+
+ ret = mousevsc_connect_to_vsp(dev);
+ return ret;
+}
+
static const struct hv_vmbus_device_id id_table[] = {
/* Mouse guid */
{ HV_MOUSE_GUID, },
@@ -554,6 +586,8 @@ static struct hv_driver mousevsc_drv = {
.id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
+ .suspend = mousevsc_suspend,
+ .resume = mousevsc_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 447e8db21174..7e1689ef35f5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -573,6 +573,7 @@
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a 0x1f4a
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -749,6 +750,10 @@
#define USB_DEVICE_ID_LOGITECH_DUAL_ACTION 0xc216
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219
+#define USB_DEVICE_ID_LOGITECH_G15_LCD 0xc222
+#define USB_DEVICE_ID_LOGITECH_G15_V2_LCD 0xc227
+#define USB_DEVICE_ID_LOGITECH_G510 0xc22d
+#define USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO 0xc22e
#define USB_DEVICE_ID_LOGITECH_G29_WHEEL 0xc24f
#define USB_DEVICE_ID_LOGITECH_G920_WHEEL 0xc262
#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
@@ -959,6 +964,7 @@
#define I2C_VENDOR_ID_RAYDIUM 0x2386
#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
+#define I2C_PRODUCT_ID_RAYDIUM_3118 0x3118
#define USB_VENDOR_ID_RAZER 0x1532
#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
new file mode 100644
index 000000000000..8a9268a5c66a
--- /dev/null
+++ b/drivers/hid/hid-lg-g15.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for gaming keys on Logitech gaming keyboards (such as the G15)
+ *
+ * Copyright (c) 2019 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/usb.h>
+#include <linux/wait.h>
+
+#include "hid-ids.h"
+
+#define LG_G15_TRANSFER_BUF_SIZE 20
+
+#define LG_G15_FEATURE_REPORT 0x02
+
+#define LG_G510_FEATURE_M_KEYS_LEDS 0x04
+#define LG_G510_FEATURE_BACKLIGHT_RGB 0x05
+#define LG_G510_FEATURE_POWER_ON_RGB 0x06
+
+enum lg_g15_model {
+ LG_G15,
+ LG_G15_V2,
+ LG_G510,
+ LG_G510_USB_AUDIO,
+};
+
+enum lg_g15_led_type {
+ LG_G15_KBD_BRIGHTNESS,
+ LG_G15_LCD_BRIGHTNESS,
+ LG_G15_BRIGHTNESS_MAX,
+ LG_G15_MACRO_PRESET1 = 2,
+ LG_G15_MACRO_PRESET2,
+ LG_G15_MACRO_PRESET3,
+ LG_G15_MACRO_RECORD,
+ LG_G15_LED_MAX
+};
+
+struct lg_g15_led {
+ struct led_classdev cdev;
+ enum led_brightness brightness;
+ enum lg_g15_led_type led;
+ u8 red, green, blue;
+};
+
+struct lg_g15_data {
+ /* Must be first for proper dma alignment */
+ u8 transfer_buf[LG_G15_TRANSFER_BUF_SIZE];
+ /* Protects the transfer_buf and led brightness */
+ struct mutex mutex;
+ struct work_struct work;
+ struct input_dev *input;
+ struct hid_device *hdev;
+ enum lg_g15_model model;
+ struct lg_g15_led leds[LG_G15_LED_MAX];
+ bool game_mode_enabled;
+};
+
+/******** G15 and G15 v2 LED functions ********/
+
+static int lg_g15_update_led_brightness(struct lg_g15_data *g15)
+{
+ int ret;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G15_FEATURE_REPORT,
+ g15->transfer_buf, 4,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret != 4) {
+ hid_err(g15->hdev, "Error getting LED brightness: %d\n", ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+
+ g15->leds[LG_G15_KBD_BRIGHTNESS].brightness = g15->transfer_buf[1];
+ g15->leds[LG_G15_LCD_BRIGHTNESS].brightness = g15->transfer_buf[2];
+
+ g15->leds[LG_G15_MACRO_PRESET1].brightness =
+ !(g15->transfer_buf[3] & 0x01);
+ g15->leds[LG_G15_MACRO_PRESET2].brightness =
+ !(g15->transfer_buf[3] & 0x02);
+ g15->leds[LG_G15_MACRO_PRESET3].brightness =
+ !(g15->transfer_buf[3] & 0x04);
+ g15->leds[LG_G15_MACRO_RECORD].brightness =
+ !(g15->transfer_buf[3] & 0x08);
+
+ return 0;
+}
+
+static enum led_brightness lg_g15_led_get(struct led_classdev *led_cdev)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ enum led_brightness brightness;
+
+ mutex_lock(&g15->mutex);
+ lg_g15_update_led_brightness(g15);
+ brightness = g15->leds[g15_led->led].brightness;
+ mutex_unlock(&g15->mutex);
+
+ return brightness;
+}
+
+static int lg_g15_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ u8 val, mask = 0;
+ int i, ret;
+
+ /* Ignore LED off on unregister / keyboard unplug */
+ if (led_cdev->flags & LED_UNREGISTERING)
+ return 0;
+
+ mutex_lock(&g15->mutex);
+
+ g15->transfer_buf[0] = LG_G15_FEATURE_REPORT;
+ g15->transfer_buf[3] = 0;
+
+ if (g15_led->led < LG_G15_BRIGHTNESS_MAX) {
+ g15->transfer_buf[1] = g15_led->led + 1;
+ g15->transfer_buf[2] = brightness << (g15_led->led * 4);
+ } else {
+ for (i = LG_G15_MACRO_PRESET1; i < LG_G15_LED_MAX; i++) {
+ if (i == g15_led->led)
+ val = brightness;
+ else
+ val = g15->leds[i].brightness;
+
+ if (val)
+ mask |= 1 << (i - LG_G15_MACRO_PRESET1);
+ }
+
+ g15->transfer_buf[1] = 0x04;
+ g15->transfer_buf[2] = ~mask;
+ }
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G15_FEATURE_REPORT,
+ g15->transfer_buf, 4,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret == 4) {
+ /* Success */
+ g15_led->brightness = brightness;
+ ret = 0;
+ } else {
+ hid_err(g15->hdev, "Error setting LED brightness: %d\n", ret);
+ ret = (ret < 0) ? ret : -EIO;
+ }
+
+ mutex_unlock(&g15->mutex);
+
+ return ret;
+}
+
+static void lg_g15_leds_changed_work(struct work_struct *work)
+{
+ struct lg_g15_data *g15 = container_of(work, struct lg_g15_data, work);
+ enum led_brightness old_brightness[LG_G15_BRIGHTNESS_MAX];
+ enum led_brightness brightness[LG_G15_BRIGHTNESS_MAX];
+ int i, ret;
+
+ mutex_lock(&g15->mutex);
+ for (i = 0; i < LG_G15_BRIGHTNESS_MAX; i++)
+ old_brightness[i] = g15->leds[i].brightness;
+
+ ret = lg_g15_update_led_brightness(g15);
+
+ for (i = 0; i < LG_G15_BRIGHTNESS_MAX; i++)
+ brightness[i] = g15->leds[i].brightness;
+ mutex_unlock(&g15->mutex);
+
+ if (ret)
+ return;
+
+ for (i = 0; i < LG_G15_BRIGHTNESS_MAX; i++) {
+ if (brightness[i] == old_brightness[i])
+ continue;
+
+ led_classdev_notify_brightness_hw_changed(&g15->leds[i].cdev,
+ brightness[i]);
+ }
+}
+
+/******** G510 LED functions ********/
+
+static int lg_g510_get_initial_led_brightness(struct lg_g15_data *g15, int i)
+{
+ int ret, high;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G510_FEATURE_BACKLIGHT_RGB + i,
+ g15->transfer_buf, 4,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret != 4) {
+ hid_err(g15->hdev, "Error getting LED brightness: %d\n", ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+
+ high = max3(g15->transfer_buf[1], g15->transfer_buf[2],
+ g15->transfer_buf[3]);
+
+ if (high) {
+ g15->leds[i].red =
+ DIV_ROUND_CLOSEST(g15->transfer_buf[1] * 255, high);
+ g15->leds[i].green =
+ DIV_ROUND_CLOSEST(g15->transfer_buf[2] * 255, high);
+ g15->leds[i].blue =
+ DIV_ROUND_CLOSEST(g15->transfer_buf[3] * 255, high);
+ g15->leds[i].brightness = high;
+ } else {
+ g15->leds[i].red = 255;
+ g15->leds[i].green = 255;
+ g15->leds[i].blue = 255;
+ g15->leds[i].brightness = 0;
+ }
+
+ return 0;
+}
+
+/* Must be called with g15->mutex locked */
+static int lg_g510_kbd_led_write(struct lg_g15_data *g15,
+ struct lg_g15_led *g15_led,
+ enum led_brightness brightness)
+{
+ int ret;
+
+ g15->transfer_buf[0] = 5 + g15_led->led;
+ g15->transfer_buf[1] =
+ DIV_ROUND_CLOSEST(g15_led->red * brightness, 255);
+ g15->transfer_buf[2] =
+ DIV_ROUND_CLOSEST(g15_led->green * brightness, 255);
+ g15->transfer_buf[3] =
+ DIV_ROUND_CLOSEST(g15_led->blue * brightness, 255);
+
+ ret = hid_hw_raw_request(g15->hdev,
+ LG_G510_FEATURE_BACKLIGHT_RGB + g15_led->led,
+ g15->transfer_buf, 4,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret == 4) {
+ /* Success */
+ g15_led->brightness = brightness;
+ ret = 0;
+ } else {
+ hid_err(g15->hdev, "Error setting LED brightness: %d\n", ret);
+ ret = (ret < 0) ? ret : -EIO;
+ }
+
+ return ret;
+}
+
+static int lg_g510_kbd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ int ret;
+
+ /* Ignore LED off on unregister / keyboard unplug */
+ if (led_cdev->flags & LED_UNREGISTERING)
+ return 0;
+
+ mutex_lock(&g15->mutex);
+ ret = lg_g510_kbd_led_write(g15, g15_led, brightness);
+ mutex_unlock(&g15->mutex);
+
+ return ret;
+}
+
+static enum led_brightness lg_g510_kbd_led_get(struct led_classdev *led_cdev)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+
+ return g15_led->brightness;
+}
+
+static ssize_t color_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ unsigned long value;
+ int ret;
+
+ if (count < 7 || (count == 8 && buf[7] != '\n') || count > 8)
+ return -EINVAL;
+
+ if (buf[0] != '#')
+ return -EINVAL;
+
+ ret = kstrtoul(buf + 1, 16, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&g15->mutex);
+ g15_led->red = (value & 0xff0000) >> 16;
+ g15_led->green = (value & 0x00ff00) >> 8;
+ g15_led->blue = (value & 0x0000ff);
+ ret = lg_g510_kbd_led_write(g15, g15_led, g15_led->brightness);
+ mutex_unlock(&g15->mutex);
+
+ return (ret < 0) ? ret : count;
+}
+
+static ssize_t color_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ ssize_t ret;
+
+ mutex_lock(&g15->mutex);
+ ret = sprintf(buf, "#%02x%02x%02x\n",
+ g15_led->red, g15_led->green, g15_led->blue);
+ mutex_unlock(&g15->mutex);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(color);
+
+static struct attribute *lg_g510_kbd_led_attrs[] = {
+ &dev_attr_color.attr,
+ NULL,
+};
+
+static const struct attribute_group lg_g510_kbd_led_group = {
+ .attrs = lg_g510_kbd_led_attrs,
+};
+
+static const struct attribute_group *lg_g510_kbd_led_groups[] = {
+ &lg_g510_kbd_led_group,
+ NULL,
+};
+
+static void lg_g510_leds_sync_work(struct work_struct *work)
+{
+ struct lg_g15_data *g15 = container_of(work, struct lg_g15_data, work);
+
+ mutex_lock(&g15->mutex);
+ lg_g510_kbd_led_write(g15, &g15->leds[LG_G15_KBD_BRIGHTNESS],
+ g15->leds[LG_G15_KBD_BRIGHTNESS].brightness);
+ mutex_unlock(&g15->mutex);
+}
+
+static int lg_g510_update_mkey_led_brightness(struct lg_g15_data *g15)
+{
+ int ret;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G510_FEATURE_M_KEYS_LEDS,
+ g15->transfer_buf, 2,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret != 2) {
+ hid_err(g15->hdev, "Error getting LED brightness: %d\n", ret);
+ ret = (ret < 0) ? ret : -EIO;
+ }
+
+ g15->leds[LG_G15_MACRO_PRESET1].brightness =
+ !!(g15->transfer_buf[1] & 0x80);
+ g15->leds[LG_G15_MACRO_PRESET2].brightness =
+ !!(g15->transfer_buf[1] & 0x40);
+ g15->leds[LG_G15_MACRO_PRESET3].brightness =
+ !!(g15->transfer_buf[1] & 0x20);
+ g15->leds[LG_G15_MACRO_RECORD].brightness =
+ !!(g15->transfer_buf[1] & 0x10);
+
+ return 0;
+}
+
+static enum led_brightness lg_g510_mkey_led_get(struct led_classdev *led_cdev)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ enum led_brightness brightness;
+
+ mutex_lock(&g15->mutex);
+ lg_g510_update_mkey_led_brightness(g15);
+ brightness = g15->leds[g15_led->led].brightness;
+ mutex_unlock(&g15->mutex);
+
+ return brightness;
+}
+
+static int lg_g510_mkey_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ u8 val, mask = 0;
+ int i, ret;
+
+ /* Ignore LED off on unregister / keyboard unplug */
+ if (led_cdev->flags & LED_UNREGISTERING)
+ return 0;
+
+ mutex_lock(&g15->mutex);
+
+ for (i = LG_G15_MACRO_PRESET1; i < LG_G15_LED_MAX; i++) {
+ if (i == g15_led->led)
+ val = brightness;
+ else
+ val = g15->leds[i].brightness;
+
+ if (val)
+ mask |= 0x80 >> (i - LG_G15_MACRO_PRESET1);
+ }
+
+ g15->transfer_buf[0] = LG_G510_FEATURE_M_KEYS_LEDS;
+ g15->transfer_buf[1] = mask;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G510_FEATURE_M_KEYS_LEDS,
+ g15->transfer_buf, 2,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret == 2) {
+ /* Success */
+ g15_led->brightness = brightness;
+ ret = 0;
+ } else {
+ hid_err(g15->hdev, "Error setting LED brightness: %d\n", ret);
+ ret = (ret < 0) ? ret : -EIO;
+ }
+
+ mutex_unlock(&g15->mutex);
+
+ return ret;
+}
+
+/******** Generic LED functions ********/
+static int lg_g15_get_initial_led_brightness(struct lg_g15_data *g15)
+{
+ int ret;
+
+ switch (g15->model) {
+ case LG_G15:
+ case LG_G15_V2:
+ return lg_g15_update_led_brightness(g15);
+ case LG_G510:
+ case LG_G510_USB_AUDIO:
+ ret = lg_g510_get_initial_led_brightness(g15, 0);
+ if (ret)
+ return ret;
+
+ ret = lg_g510_get_initial_led_brightness(g15, 1);
+ if (ret)
+ return ret;
+
+ return lg_g510_update_mkey_led_brightness(g15);
+ }
+ return -EINVAL; /* Never reached */
+}
+
+/******** Input functions ********/
+
+/* On the G15 Mark I Logitech has been quite creative with which bit is what */
+static int lg_g15_event(struct lg_g15_data *g15, u8 *data, int size)
+{
+ int i, val;
+
+ /* G1 - G6 */
+ for (i = 0; i < 6; i++) {
+ val = data[i + 1] & (1 << i);
+ input_report_key(g15->input, KEY_MACRO1 + i, val);
+ }
+ /* G7 - G12 */
+ for (i = 0; i < 6; i++) {
+ val = data[i + 2] & (1 << i);
+ input_report_key(g15->input, KEY_MACRO7 + i, val);
+ }
+ /* G13 - G17 */
+ for (i = 0; i < 5; i++) {
+ val = data[i + 1] & (4 << i);
+ input_report_key(g15->input, KEY_MACRO13 + i, val);
+ }
+ /* G18 */
+ input_report_key(g15->input, KEY_MACRO18, data[8] & 0x40);
+
+ /* M1 - M3 */
+ for (i = 0; i < 3; i++) {
+ val = data[i + 6] & (1 << i);
+ input_report_key(g15->input, KEY_MACRO_PRESET1 + i, val);
+ }
+ /* MR */
+ input_report_key(g15->input, KEY_MACRO_RECORD_START, data[7] & 0x40);
+
+ /* Most left (round) button below the LCD */
+ input_report_key(g15->input, KEY_KBD_LCD_MENU1, data[8] & 0x80);
+ /* 4 other buttons below the LCD */
+ for (i = 0; i < 4; i++) {
+ val = data[i + 2] & 0x80;
+ input_report_key(g15->input, KEY_KBD_LCD_MENU2 + i, val);
+ }
+
+ /* Backlight cycle button pressed? */
+ if (data[1] & 0x80)
+ schedule_work(&g15->work);
+
+ input_sync(g15->input);
+ return 0;
+}
+
+static int lg_g15_v2_event(struct lg_g15_data *g15, u8 *data, int size)
+{
+ int i, val;
+
+ /* G1 - G6 */
+ for (i = 0; i < 6; i++) {
+ val = data[1] & (1 << i);
+ input_report_key(g15->input, KEY_MACRO1 + i, val);
+ }
+
+ /* M1 - M3 + MR */
+ input_report_key(g15->input, KEY_MACRO_PRESET1, data[1] & 0x40);
+ input_report_key(g15->input, KEY_MACRO_PRESET2, data[1] & 0x80);
+ input_report_key(g15->input, KEY_MACRO_PRESET3, data[2] & 0x20);
+ input_report_key(g15->input, KEY_MACRO_RECORD_START, data[2] & 0x40);
+
+ /* Round button to the left of the LCD */
+ input_report_key(g15->input, KEY_KBD_LCD_MENU1, data[2] & 0x80);
+ /* 4 buttons below the LCD */
+ for (i = 0; i < 4; i++) {
+ val = data[2] & (2 << i);
+ input_report_key(g15->input, KEY_KBD_LCD_MENU2 + i, val);
+ }
+
+ /* Backlight cycle button pressed? */
+ if (data[2] & 0x01)
+ schedule_work(&g15->work);
+
+ input_sync(g15->input);
+ return 0;
+}
+
+static int lg_g510_event(struct lg_g15_data *g15, u8 *data, int size)
+{
+ bool game_mode_enabled;
+ int i, val;
+
+ /* G1 - G18 */
+ for (i = 0; i < 18; i++) {
+ val = data[i / 8 + 1] & (1 << (i % 8));
+ input_report_key(g15->input, KEY_MACRO1 + i, val);
+ }
+
+ /* Game mode on/off slider */
+ game_mode_enabled = data[3] & 0x04;
+ if (game_mode_enabled != g15->game_mode_enabled) {
+ if (game_mode_enabled)
+ hid_info(g15->hdev, "Game Mode enabled, Windows (super) key is disabled\n");
+ else
+ hid_info(g15->hdev, "Game Mode disabled\n");
+ g15->game_mode_enabled = game_mode_enabled;
+ }
+
+ /* M1 - M3 */
+ for (i = 0; i < 3; i++) {
+ val = data[3] & (0x10 << i);
+ input_report_key(g15->input, KEY_MACRO_PRESET1 + i, val);
+ }
+ /* MR */
+ input_report_key(g15->input, KEY_MACRO_RECORD_START, data[3] & 0x80);
+
+ /* LCD menu keys */
+ for (i = 0; i < 5; i++) {
+ val = data[4] & (1 << i);
+ input_report_key(g15->input, KEY_KBD_LCD_MENU1 + i, val);
+ }
+
+ /* Headphone Mute */
+ input_report_key(g15->input, KEY_MUTE, data[4] & 0x20);
+ /* Microphone Mute */
+ input_report_key(g15->input, KEY_F20, data[4] & 0x40);
+
+ input_sync(g15->input);
+ return 0;
+}
+
+static int lg_g510_leds_event(struct lg_g15_data *g15, u8 *data, int size)
+{
+ bool backlight_disabled;
+
+ /*
+ * The G510 ignores backlight updates when the backlight is turned off
+ * through the light toggle button on the keyboard, to work around this
+ * we queue a workitem to sync values when the backlight is turned on.
+ */
+ backlight_disabled = data[1] & 0x04;
+ if (!backlight_disabled)
+ schedule_work(&g15->work);
+
+ return 0;
+}
+
+static int lg_g15_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct lg_g15_data *g15 = hid_get_drvdata(hdev);
+
+ if (!g15)
+ return 0;
+
+ switch (g15->model) {
+ case LG_G15:
+ if (data[0] == 0x02 && size == 9)
+ return lg_g15_event(g15, data, size);
+ break;
+ case LG_G15_V2:
+ if (data[0] == 0x02 && size == 5)
+ return lg_g15_v2_event(g15, data, size);
+ break;
+ case LG_G510:
+ case LG_G510_USB_AUDIO:
+ if (data[0] == 0x03 && size == 5)
+ return lg_g510_event(g15, data, size);
+ if (data[0] == 0x04 && size == 2)
+ return lg_g510_leds_event(g15, data, size);
+ break;
+ }
+
+ return 0;
+}
+
+static int lg_g15_input_open(struct input_dev *dev)
+{
+ struct hid_device *hdev = input_get_drvdata(dev);
+
+ return hid_hw_open(hdev);
+}
+
+static void lg_g15_input_close(struct input_dev *dev)
+{
+ struct hid_device *hdev = input_get_drvdata(dev);
+
+ hid_hw_close(hdev);
+}
+
+static int lg_g15_register_led(struct lg_g15_data *g15, int i)
+{
+ const char * const led_names[] = {
+ "g15::kbd_backlight",
+ "g15::lcd_backlight",
+ "g15::macro_preset1",
+ "g15::macro_preset2",
+ "g15::macro_preset3",
+ "g15::macro_record",
+ };
+
+ g15->leds[i].led = i;
+ g15->leds[i].cdev.name = led_names[i];
+
+ switch (g15->model) {
+ case LG_G15:
+ case LG_G15_V2:
+ g15->leds[i].cdev.brightness_set_blocking = lg_g15_led_set;
+ g15->leds[i].cdev.brightness_get = lg_g15_led_get;
+ if (i < LG_G15_BRIGHTNESS_MAX) {
+ g15->leds[i].cdev.flags = LED_BRIGHT_HW_CHANGED;
+ g15->leds[i].cdev.max_brightness = 2;
+ } else {
+ g15->leds[i].cdev.max_brightness = 1;
+ }
+ break;
+ case LG_G510:
+ case LG_G510_USB_AUDIO:
+ switch (i) {
+ case LG_G15_LCD_BRIGHTNESS:
+ /*
+ * The G510 does not have a separate LCD brightness,
+ * but it does have a separate power-on (reset) value.
+ */
+ g15->leds[i].cdev.name = "g15::power_on_backlight_val";
+ /* fall through */
+ case LG_G15_KBD_BRIGHTNESS:
+ g15->leds[i].cdev.brightness_set_blocking =
+ lg_g510_kbd_led_set;
+ g15->leds[i].cdev.brightness_get =
+ lg_g510_kbd_led_get;
+ g15->leds[i].cdev.max_brightness = 255;
+ g15->leds[i].cdev.groups = lg_g510_kbd_led_groups;
+ break;
+ default:
+ g15->leds[i].cdev.brightness_set_blocking =
+ lg_g510_mkey_led_set;
+ g15->leds[i].cdev.brightness_get =
+ lg_g510_mkey_led_get;
+ g15->leds[i].cdev.max_brightness = 1;
+ }
+ break;
+ }
+
+ return devm_led_classdev_register(&g15->hdev->dev, &g15->leds[i].cdev);
+}
+
+static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ u8 gkeys_settings_output_report = 0;
+ u8 gkeys_settings_feature_report = 0;
+ struct hid_report_enum *rep_enum;
+ unsigned int connect_mask = 0;
+ bool has_ff000000 = false;
+ struct lg_g15_data *g15;
+ struct input_dev *input;
+ struct hid_report *rep;
+ int ret, i, gkeys = 0;
+
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Some models have multiple interfaces, we want the interface with
+ * with the f000.0000 application input report.
+ */
+ rep_enum = &hdev->report_enum[HID_INPUT_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ if (rep->application == 0xff000000)
+ has_ff000000 = true;
+ }
+ if (!has_ff000000)
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ g15 = devm_kzalloc(&hdev->dev, sizeof(*g15), GFP_KERNEL);
+ if (!g15)
+ return -ENOMEM;
+
+ mutex_init(&g15->mutex);
+
+ input = devm_input_allocate_device(&hdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ g15->hdev = hdev;
+ g15->model = id->driver_data;
+ hid_set_drvdata(hdev, (void *)g15);
+
+ switch (g15->model) {
+ case LG_G15:
+ INIT_WORK(&g15->work, lg_g15_leds_changed_work);
+ /*
+ * The G15 and G15 v2 use a separate usb-device (on a builtin
+ * hub) which emulates a keyboard for the F1 - F12 emulation
+ * on the G-keys, which we disable, rendering the emulated kbd
+ * non-functional, so we do not let hid-input connect.
+ */
+ connect_mask = HID_CONNECT_HIDRAW;
+ gkeys_settings_output_report = 0x02;
+ gkeys = 18;
+ break;
+ case LG_G15_V2:
+ INIT_WORK(&g15->work, lg_g15_leds_changed_work);
+ connect_mask = HID_CONNECT_HIDRAW;
+ gkeys_settings_output_report = 0x02;
+ gkeys = 6;
+ break;
+ case LG_G510:
+ case LG_G510_USB_AUDIO:
+ INIT_WORK(&g15->work, lg_g510_leds_sync_work);
+ connect_mask = HID_CONNECT_HIDINPUT | HID_CONNECT_HIDRAW;
+ gkeys_settings_feature_report = 0x01;
+ gkeys = 18;
+ break;
+ }
+
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret)
+ return ret;
+
+ /* Tell the keyboard to stop sending F1-F12 + 1-6 for G1 - G18 */
+ if (gkeys_settings_output_report) {
+ g15->transfer_buf[0] = gkeys_settings_output_report;
+ memset(g15->transfer_buf + 1, 0, gkeys);
+ /*
+ * The kbd ignores our output report if we do not queue
+ * an URB on the USB input endpoint first...
+ */
+ ret = hid_hw_open(hdev);
+ if (ret)
+ goto error_hw_stop;
+ ret = hid_hw_output_report(hdev, g15->transfer_buf, gkeys + 1);
+ hid_hw_close(hdev);
+ }
+
+ if (gkeys_settings_feature_report) {
+ g15->transfer_buf[0] = gkeys_settings_feature_report;
+ memset(g15->transfer_buf + 1, 0, gkeys);
+ ret = hid_hw_raw_request(g15->hdev,
+ gkeys_settings_feature_report,
+ g15->transfer_buf, gkeys + 1,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ }
+
+ if (ret < 0) {
+ hid_err(hdev, "Error disabling keyboard emulation for the G-keys\n");
+ goto error_hw_stop;
+ }
+
+ /* Get initial brightness levels */
+ ret = lg_g15_get_initial_led_brightness(g15);
+ if (ret)
+ goto error_hw_stop;
+
+ /* Setup and register input device */
+ input->name = "Logitech Gaming Keyboard Gaming Keys";
+ input->phys = hdev->phys;
+ input->uniq = hdev->uniq;
+ input->id.bustype = hdev->bus;
+ input->id.vendor = hdev->vendor;
+ input->id.product = hdev->product;
+ input->id.version = hdev->version;
+ input->dev.parent = &hdev->dev;
+ input->open = lg_g15_input_open;
+ input->close = lg_g15_input_close;
+
+ /* G-keys */
+ for (i = 0; i < gkeys; i++)
+ input_set_capability(input, EV_KEY, KEY_MACRO1 + i);
+
+ /* M1 - M3 and MR keys */
+ for (i = 0; i < 3; i++)
+ input_set_capability(input, EV_KEY, KEY_MACRO_PRESET1 + i);
+ input_set_capability(input, EV_KEY, KEY_MACRO_RECORD_START);
+
+ /* Keys below the LCD, intended for controlling a menu on the LCD */
+ for (i = 0; i < 5; i++)
+ input_set_capability(input, EV_KEY, KEY_KBD_LCD_MENU1 + i);
+
+ /*
+ * On the G510 only report headphone and mic mute keys when *not* using
+ * the builtin USB audio device. When the builtin audio is used these
+ * keys directly toggle mute (and the LEDs) on/off.
+ */
+ if (g15->model == LG_G510) {
+ input_set_capability(input, EV_KEY, KEY_MUTE);
+ /* Userspace expects F20 for micmute */
+ input_set_capability(input, EV_KEY, KEY_F20);
+ }
+
+ g15->input = input;
+ input_set_drvdata(input, hdev);
+
+ ret = input_register_device(input);
+ if (ret)
+ goto error_hw_stop;
+
+ /* Register LED devices */
+ for (i = 0; i < LG_G15_LED_MAX; i++) {
+ ret = lg_g15_register_led(g15, i);
+ if (ret)
+ goto error_hw_stop;
+ }
+
+ return 0;
+
+error_hw_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static const struct hid_device_id lg_g15_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G15_LCD),
+ .driver_data = LG_G15 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G15_V2_LCD),
+ .driver_data = LG_G15_V2 },
+ /* G510 without a headset plugged in */
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G510),
+ .driver_data = LG_G510 },
+ /* G510 with headset plugged in / with extra USB audio interface */
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO),
+ .driver_data = LG_G510_USB_AUDIO },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, lg_g15_devices);
+
+static struct hid_driver lg_g15_driver = {
+ .name = "lg-g15",
+ .id_table = lg_g15_devices,
+ .raw_event = lg_g15_raw_event,
+ .probe = lg_g15_probe,
+};
+module_hid_driver(lg_g15_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 8e91e2f06cb4..cd9193078525 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1102,6 +1102,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp,
ret = hidpp_send_fap_command_sync(hidpp, feature_index,
CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS,
NULL, 0, &response);
+ /* Ignore these intermittent errors */
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ return -EIO;
if (ret > 0) {
hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
__func__, ret);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index c50bcd967d99..d1b39c29e353 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
@@ -419,13 +420,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_LCPOWER)
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) },
#endif
-#if IS_ENABLED(CONFIG_HID_LED)
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
-#endif
#if IS_ENABLED(CONFIG_HID_LENOVO)
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 7c6abd7e0979..9ce22acdfaca 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -744,7 +744,8 @@ static void rmi_remove(struct hid_device *hdev)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
- if (hdata->device_flags & RMI_DEVICE) {
+ if ((hdata->device_flags & RMI_DEVICE)
+ && test_bit(RMI_STARTED, &hdata->flags)) {
clear_bit(RMI_STARTED, &hdata->flags);
cancel_work_sync(&hdata->reset_work);
rmi_unregister_transport_device(&hdata->xport);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index bbc6ec1aa5cb..c3fc0ceb8096 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -197,15 +197,15 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
}
if (count > HID_MAX_BUFFER_SIZE) {
- printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
- task_pid_nr(current));
+ hid_warn(dev, "pid %d passed too large report\n",
+ task_pid_nr(current));
ret = -EINVAL;
goto out;
}
if (count < 2) {
- printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
- task_pid_nr(current));
+ hid_warn(dev, "pid %d passed too short report\n",
+ task_pid_nr(current));
ret = -EINVAL;
goto out;
}
@@ -468,9 +468,7 @@ static const struct file_operations hidraw_ops = {
.release = hidraw_release,
.unlocked_ioctl = hidraw_ioctl,
.fasync = hidraw_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = hidraw_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
@@ -597,7 +595,7 @@ int __init hidraw_init(void)
if (result < 0)
goto error_class;
- printk(KERN_INFO "hidraw: raw HID events driver (C) Jiri Kosina\n");
+ pr_info("raw HID events driver (C) Jiri Kosina\n");
out:
return result;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 04c088131e04..a358e61fbc82 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -48,6 +48,7 @@
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
+#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
/* flags */
#define I2C_HID_STARTED 0
@@ -157,8 +158,6 @@ struct i2c_hid {
bool irq_wake_enabled;
struct mutex reset_lock;
-
- unsigned long sleep_delay;
};
static const struct i2c_hid_quirks {
@@ -170,8 +169,12 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_BOGUS_IRQ },
+ { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
+ I2C_HID_QUIRK_RESET_ON_RESUME },
{ 0, 0 }
};
@@ -1212,8 +1215,15 @@ static int i2c_hid_resume(struct device *dev)
* solves "incomplete reports" on Raydium devices 2386:3118 and
* 2386:4B33 and fixes various SIS touchscreens no longer sending
* data after a suspend/resume.
+ *
+ * However some ALPS touchpads generate IRQ storm without reset, so
+ * let's still reset them here.
*/
- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME)
+ ret = i2c_hid_hwreset(client);
+ else
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+
if (ret)
return ret;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index c6c9ac09dac3..30a91d068306 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -402,7 +402,7 @@ static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
* @dev: ISHTP device instance
* @disconnect_req: disconnect request structure
*
- * Disconnect request bus message from the fw. Send diconnect response.
+ * Disconnect request bus message from the fw. Send disconnect response.
*/
static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
struct hbm_client_connect_request *disconnect_req)
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 1f9bc4483465..e421cdf2d1a4 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -854,13 +854,6 @@ ret_unlock:
return r;
}
-#ifdef CONFIG_COMPAT
-static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return hiddev_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations hiddev_fops = {
.owner = THIS_MODULE,
.read = hiddev_read,
@@ -870,9 +863,7 @@ static const struct file_operations hiddev_fops = {
.release = hiddev_release,
.unlocked_ioctl = hiddev_ioctl,
.fasync = hiddev_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = hiddev_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index a1eec7177c2d..94daf8240c95 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -9,4 +9,5 @@ CFLAGS_hv_balloon.o = -I$(src)
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o hv_trace.o
+hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 6e4c015783ff..74e77de89b4f 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -14,6 +14,7 @@
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
@@ -40,29 +41,30 @@ EXPORT_SYMBOL_GPL(vmbus_connection);
__u32 vmbus_proto_version;
EXPORT_SYMBOL_GPL(vmbus_proto_version);
-static __u32 vmbus_get_next_version(__u32 current_version)
-{
- switch (current_version) {
- case (VERSION_WIN7):
- return VERSION_WS2008;
-
- case (VERSION_WIN8):
- return VERSION_WIN7;
-
- case (VERSION_WIN8_1):
- return VERSION_WIN8;
+/*
+ * Table of VMBus versions listed from newest to oldest.
+ */
+static __u32 vmbus_versions[] = {
+ VERSION_WIN10_V5_2,
+ VERSION_WIN10_V5_1,
+ VERSION_WIN10_V5,
+ VERSION_WIN10_V4_1,
+ VERSION_WIN10,
+ VERSION_WIN8_1,
+ VERSION_WIN8,
+ VERSION_WIN7,
+ VERSION_WS2008
+};
- case (VERSION_WIN10):
- return VERSION_WIN8_1;
+/*
+ * Maximal VMBus protocol version guests can negotiate. Useful to cap the
+ * VMBus version for testing and debugging purpose.
+ */
+static uint max_version = VERSION_WIN10_V5_2;
- case (VERSION_WIN10_V5):
- return VERSION_WIN10;
-
- case (VERSION_WS2008):
- default:
- return VERSION_INVAL;
- }
-}
+module_param(max_version, uint, S_IRUGO);
+MODULE_PARM_DESC(max_version,
+ "Maximal VMBus protocol version which can be negotiated");
int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
{
@@ -80,12 +82,12 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
msg->vmbus_version_requested = version;
/*
- * VMBus protocol 5.0 (VERSION_WIN10_V5) requires that we must use
- * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
+ * VMBus protocol 5.0 (VERSION_WIN10_V5) and higher require that we must
+ * use VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
* and for subsequent messages, we must use the Message Connection ID
* field in the host-returned Version Response Message. And, with
- * VERSION_WIN10_V5, we don't use msg->interrupt_page, but we tell
- * the host explicitly that we still use VMBUS_MESSAGE_SINT(2) for
+ * VERSION_WIN10_V5 and higher, we don't use msg->interrupt_page, but we
+ * tell the host explicitly that we still use VMBUS_MESSAGE_SINT(2) for
* compatibility.
*
* On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
@@ -169,8 +171,8 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
*/
int vmbus_connect(void)
{
- int ret = 0;
struct vmbus_channel_msginfo *msginfo = NULL;
+ int i, ret = 0;
__u32 version;
/* Initialize the vmbus connection */
@@ -206,7 +208,7 @@ int vmbus_connect(void)
* abstraction stuff
*/
vmbus_connection.int_page =
- (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0);
+ (void *)hv_alloc_hyperv_zeroed_page();
if (vmbus_connection.int_page == NULL) {
ret = -ENOMEM;
goto cleanup;
@@ -215,14 +217,14 @@ int vmbus_connect(void)
vmbus_connection.recv_int_page = vmbus_connection.int_page;
vmbus_connection.send_int_page =
(void *)((unsigned long)vmbus_connection.int_page +
- (PAGE_SIZE >> 1));
+ (HV_HYP_PAGE_SIZE >> 1));
/*
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
- vmbus_connection.monitor_pages[0] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
- vmbus_connection.monitor_pages[1] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
+ vmbus_connection.monitor_pages[0] = (void *)hv_alloc_hyperv_zeroed_page();
+ vmbus_connection.monitor_pages[1] = (void *)hv_alloc_hyperv_zeroed_page();
if ((vmbus_connection.monitor_pages[0] == NULL) ||
(vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
@@ -244,21 +246,21 @@ int vmbus_connect(void)
* version.
*/
- version = VERSION_CURRENT;
+ for (i = 0; ; i++) {
+ if (i == ARRAY_SIZE(vmbus_versions))
+ goto cleanup;
+
+ version = vmbus_versions[i];
+ if (version > max_version)
+ continue;
- do {
ret = vmbus_negotiate_version(msginfo, version);
if (ret == -ETIMEDOUT)
goto cleanup;
if (vmbus_connection.conn_state == CONNECTED)
break;
-
- version = vmbus_get_next_version(version);
- } while (version != VERSION_INVAL);
-
- if (version == VERSION_INVAL)
- goto cleanup;
+ }
vmbus_proto_version = version;
pr_info("Vmbus version:%d.%d\n",
@@ -295,12 +297,12 @@ void vmbus_disconnect(void)
destroy_workqueue(vmbus_connection.work_queue);
if (vmbus_connection.int_page) {
- free_pages((unsigned long)vmbus_connection.int_page, 0);
+ hv_free_hyperv_page((unsigned long)vmbus_connection.int_page);
vmbus_connection.int_page = NULL;
}
- free_pages((unsigned long)vmbus_connection.monitor_pages[0], 0);
- free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0);
+ hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
+ hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
vmbus_connection.monitor_pages[0] = NULL;
vmbus_connection.monitor_pages[1] = NULL;
}
@@ -361,6 +363,7 @@ void vmbus_on_event(unsigned long data)
trace_vmbus_on_event(channel);
+ hv_debug_delay_test(channel, INTERRUPT_DELAY);
do {
void (*callback_fn)(void *);
@@ -413,7 +416,7 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
case HV_STATUS_INVALID_CONNECTION_ID:
/*
* See vmbus_negotiate_version(): VMBus protocol 5.0
- * requires that we must use
+ * and higher require that we must use
* VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate
* Contact message, but on old hosts that only
* support VMBus protocol 4.0 or lower, here we get
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 34bd73526afd..b155d0052981 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -23,6 +23,9 @@
#include <linux/percpu_counter.h>
#include <linux/hyperv.h>
+#include <asm/hyperv-tlfs.h>
+
+#include <asm/mshyperv.h>
#define CREATE_TRACE_POINTS
#include "hv_trace_balloon.h"
@@ -341,8 +344,6 @@ struct dm_unballoon_response {
*
* mem_range: Memory range to hot add.
*
- * On Linux we currently don't support this since we cannot hot add
- * arbitrary granularity of memory.
*/
struct dm_hot_add {
@@ -457,6 +458,7 @@ struct hot_add_wrk {
struct work_struct wrk;
};
+static bool allow_hibernation;
static bool hot_add = true;
static bool do_hot_add;
/*
@@ -477,7 +479,7 @@ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
-static int dm_ring_size = (5 * PAGE_SIZE);
+static int dm_ring_size = 20 * 1024;
/*
* Driver specific state.
@@ -493,10 +495,10 @@ enum hv_dm_state {
};
-static __u8 recv_buffer[PAGE_SIZE];
-static __u8 balloon_up_send_buffer[PAGE_SIZE];
-#define PAGES_IN_2M 512
-#define HA_CHUNK (32 * 1024)
+static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
+static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
+#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
+#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
struct hv_dynmem_device {
struct hv_device *dev;
@@ -680,9 +682,7 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
__ClearPageOffline(pg);
/* This frame is currently backed; online the page. */
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
+ generic_online_page(pg, 0);
lockdep_assert_held(&dm_device.ha_lock);
dm_device.num_pages_onlined++;
@@ -1053,8 +1053,12 @@ static void hot_add_req(struct work_struct *dummy)
else
resp.result = 0;
- if (!do_hot_add || (resp.page_count == 0))
- pr_err("Memory hot add failed\n");
+ if (!do_hot_add || resp.page_count == 0) {
+ if (!allow_hibernation)
+ pr_err("Memory hot add failed\n");
+ else
+ pr_info("Ignore hot-add request!\n");
+ }
dm->state = DM_INITIALIZED;
resp.hdr.trans_id = atomic_inc_return(&trans_id);
@@ -1076,7 +1080,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
__u64 *max_page_count = (__u64 *)&info_hdr[1];
pr_info("Max. dynamic memory size: %llu MB\n",
- (*max_page_count) >> (20 - PAGE_SHIFT));
+ (*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
}
break;
@@ -1218,7 +1222,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
for (i = 0; (i * alloc_unit) < num_pages; i++) {
if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
- PAGE_SIZE)
+ HV_HYP_PAGE_SIZE)
return i * alloc_unit;
/*
@@ -1274,9 +1278,9 @@ static void balloon_up(struct work_struct *dummy)
/*
* We will attempt 2M allocations. However, if we fail to
- * allocate 2M chunks, we will go back to 4k allocations.
+ * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
*/
- alloc_unit = 512;
+ alloc_unit = PAGES_IN_2M;
avail_pages = si_mem_available();
floor = compute_balloon_floor();
@@ -1292,7 +1296,7 @@ static void balloon_up(struct work_struct *dummy)
}
while (!done) {
- memset(balloon_up_send_buffer, 0, PAGE_SIZE);
+ memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
bl_resp->hdr.type = DM_BALLOON_RESPONSE;
bl_resp->hdr.size = sizeof(struct dm_balloon_response);
@@ -1491,7 +1495,7 @@ static void balloon_onchannelcallback(void *context)
memset(recv_buffer, 0, sizeof(recv_buffer));
vmbus_recvpacket(dev->channel, recv_buffer,
- PAGE_SIZE, &recvlen, &requestid);
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
dm_msg = (struct dm_message *)recv_buffer;
@@ -1509,6 +1513,11 @@ static void balloon_onchannelcallback(void *context)
break;
case DM_BALLOON_REQUEST:
+ if (allow_hibernation) {
+ pr_info("Ignore balloon-up request!\n");
+ break;
+ }
+
if (dm->state == DM_BALLOON_UP)
pr_warn("Currently ballooning\n");
bal_msg = (struct dm_balloon *)recv_buffer;
@@ -1518,6 +1527,11 @@ static void balloon_onchannelcallback(void *context)
break;
case DM_UNBALLOON_REQUEST:
+ if (allow_hibernation) {
+ pr_info("Ignore balloon-down request!\n");
+ break;
+ }
+
dm->state = DM_BALLOON_DOWN;
balloon_down(dm,
(struct dm_unballoon_request *)recv_buffer);
@@ -1623,6 +1637,11 @@ static int balloon_connect_vsp(struct hv_device *dev)
cap_msg.hdr.size = sizeof(struct dm_capabilities);
cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
+ /*
+ * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
+ * currently still requires the bits to be set, so we have to add code
+ * to fail the host's hot-add and balloon up/down requests, if any.
+ */
cap_msg.caps.cap_bits.balloon = 1;
cap_msg.caps.cap_bits.hot_add = 1;
@@ -1672,6 +1691,10 @@ static int balloon_probe(struct hv_device *dev,
{
int ret;
+ allow_hibernation = hv_is_hibernation_supported();
+ if (allow_hibernation)
+ hot_add = false;
+
#ifdef CONFIG_MEMORY_HOTPLUG
do_hot_add = hot_add;
#else
@@ -1711,6 +1734,8 @@ static int balloon_probe(struct hv_device *dev,
return 0;
probe_error:
+ dm_device.state = DM_INIT_ERROR;
+ dm_device.thread = NULL;
vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
@@ -1752,6 +1777,59 @@ static int balloon_remove(struct hv_device *dev)
return 0;
}
+static int balloon_suspend(struct hv_device *hv_dev)
+{
+ struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev);
+
+ tasklet_disable(&hv_dev->channel->callback_event);
+
+ cancel_work_sync(&dm->balloon_wrk.wrk);
+ cancel_work_sync(&dm->ha_wrk.wrk);
+
+ if (dm->thread) {
+ kthread_stop(dm->thread);
+ dm->thread = NULL;
+ vmbus_close(hv_dev->channel);
+ }
+
+ tasklet_enable(&hv_dev->channel->callback_event);
+
+ return 0;
+
+}
+
+static int balloon_resume(struct hv_device *dev)
+{
+ int ret;
+
+ dm_device.state = DM_INITIALIZING;
+
+ ret = balloon_connect_vsp(dev);
+
+ if (ret != 0)
+ goto out;
+
+ dm_device.thread =
+ kthread_run(dm_thread_func, &dm_device, "hv_balloon");
+ if (IS_ERR(dm_device.thread)) {
+ ret = PTR_ERR(dm_device.thread);
+ dm_device.thread = NULL;
+ goto close_channel;
+ }
+
+ dm_device.state = DM_INITIALIZED;
+ return 0;
+close_channel:
+ vmbus_close(dev->channel);
+out:
+ dm_device.state = DM_INIT_ERROR;
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unregister_memory_notifier(&hv_memory_nb);
+ restore_online_page_callback(&hv_online_page);
+#endif
+ return ret;
+}
+
static const struct hv_vmbus_device_id id_table[] = {
/* Dynamic Memory Class ID */
/* 525074DC-8985-46e2-8057-A307DC18A502 */
@@ -1766,6 +1844,8 @@ static struct hv_driver balloon_drv = {
.id_table = id_table,
.probe = balloon_probe,
.remove = balloon_remove,
+ .suspend = balloon_suspend,
+ .resume = balloon_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/hv/hv_debugfs.c b/drivers/hv/hv_debugfs.c
new file mode 100644
index 000000000000..8a2878573582
--- /dev/null
+++ b/drivers/hv/hv_debugfs.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Authors:
+ * Branden Bonaby <brandonbonaby94@gmail.com>
+ */
+
+#include <linux/hyperv.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include "hyperv_vmbus.h"
+
+struct dentry *hv_debug_root;
+
+static int hv_debugfs_delay_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+
+static int hv_debugfs_delay_set(void *data, u64 val)
+{
+ if (val > 1000)
+ return -EINVAL;
+ *(u32 *)data = val;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_delay_fops, hv_debugfs_delay_get,
+ hv_debugfs_delay_set, "%llu\n");
+
+static int hv_debugfs_state_get(void *data, u64 *val)
+{
+ *val = *(bool *)data;
+ return 0;
+}
+
+static int hv_debugfs_state_set(void *data, u64 val)
+{
+ if (val == 1)
+ *(bool *)data = true;
+ else if (val == 0)
+ *(bool *)data = false;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_state_fops, hv_debugfs_state_get,
+ hv_debugfs_state_set, "%llu\n");
+
+/* Setup delay files to store test values */
+static int hv_debug_delay_files(struct hv_device *dev, struct dentry *root)
+{
+ struct vmbus_channel *channel = dev->channel;
+ char *buffer = "fuzz_test_buffer_interrupt_delay";
+ char *message = "fuzz_test_message_delay";
+ int *buffer_val = &channel->fuzz_testing_interrupt_delay;
+ int *message_val = &channel->fuzz_testing_message_delay;
+ struct dentry *buffer_file, *message_file;
+
+ buffer_file = debugfs_create_file(buffer, 0644, root,
+ buffer_val,
+ &hv_debugfs_delay_fops);
+ if (IS_ERR(buffer_file)) {
+ pr_debug("debugfs_hyperv: file %s not created\n", buffer);
+ return PTR_ERR(buffer_file);
+ }
+
+ message_file = debugfs_create_file(message, 0644, root,
+ message_val,
+ &hv_debugfs_delay_fops);
+ if (IS_ERR(message_file)) {
+ pr_debug("debugfs_hyperv: file %s not created\n", message);
+ return PTR_ERR(message_file);
+ }
+
+ return 0;
+}
+
+/* Setup test state value for vmbus device */
+static int hv_debug_set_test_state(struct hv_device *dev, struct dentry *root)
+{
+ struct vmbus_channel *channel = dev->channel;
+ bool *state = &channel->fuzz_testing_state;
+ char *status = "fuzz_test_state";
+ struct dentry *test_state;
+
+ test_state = debugfs_create_file(status, 0644, root,
+ state,
+ &hv_debugfs_state_fops);
+ if (IS_ERR(test_state)) {
+ pr_debug("debugfs_hyperv: file %s not created\n", status);
+ return PTR_ERR(test_state);
+ }
+
+ return 0;
+}
+
+/* Bind hv device to a dentry for debugfs */
+static void hv_debug_set_dir_dentry(struct hv_device *dev, struct dentry *root)
+{
+ if (hv_debug_root)
+ dev->debug_dir = root;
+}
+
+/* Create all test dentry's and names for fuzz testing */
+int hv_debug_add_dev_dir(struct hv_device *dev)
+{
+ const char *device = dev_name(&dev->device);
+ char *delay_name = "delay";
+ struct dentry *delay, *dev_root;
+ int ret;
+
+ if (!IS_ERR(hv_debug_root)) {
+ dev_root = debugfs_create_dir(device, hv_debug_root);
+ if (IS_ERR(dev_root)) {
+ pr_debug("debugfs_hyperv: hyperv/%s/ not created\n",
+ device);
+ return PTR_ERR(dev_root);
+ }
+ hv_debug_set_test_state(dev, dev_root);
+ hv_debug_set_dir_dentry(dev, dev_root);
+ delay = debugfs_create_dir(delay_name, dev_root);
+
+ if (IS_ERR(delay)) {
+ pr_debug("debugfs_hyperv: hyperv/%s/%s/ not created\n",
+ device, delay_name);
+ return PTR_ERR(delay);
+ }
+ ret = hv_debug_delay_files(dev, delay);
+
+ return ret;
+ }
+ pr_debug("debugfs_hyperv: hyperv/ not in root debugfs path\n");
+ return PTR_ERR(hv_debug_root);
+}
+
+/* Remove dentry associated with released hv device */
+void hv_debug_rm_dev_dir(struct hv_device *dev)
+{
+ if (!IS_ERR(hv_debug_root))
+ debugfs_remove_recursive(dev->debug_dir);
+}
+
+/* Remove all dentrys associated with vmbus testing */
+void hv_debug_rm_all_dir(void)
+{
+ debugfs_remove_recursive(hv_debug_root);
+}
+
+/* Delay buffer/message reads on a vmbus channel */
+void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type)
+{
+ struct vmbus_channel *test_channel = channel->primary_channel ?
+ channel->primary_channel :
+ channel;
+ bool state = test_channel->fuzz_testing_state;
+
+ if (state) {
+ if (delay_type == 0)
+ udelay(test_channel->fuzz_testing_interrupt_delay);
+ else
+ udelay(test_channel->fuzz_testing_message_delay);
+ }
+}
+
+/* Initialize top dentry for vmbus testing */
+int hv_debug_init(void)
+{
+ hv_debug_root = debugfs_create_dir("hyperv", NULL);
+ if (IS_ERR(hv_debug_root)) {
+ pr_debug("debugfs_hyperv: hyperv/ not created\n");
+ return PTR_ERR(hv_debug_root);
+ }
+ return 0;
+}
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 7e30ae0635cc..08fa4a5de644 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -13,6 +13,7 @@
#include <linux/workqueue.h>
#include <linux/hyperv.h>
#include <linux/sched.h>
+#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@@ -234,7 +235,7 @@ void hv_fcopy_onchannelcallback(void *context)
if (fcopy_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
&requestid);
if (recvlen <= 0)
return;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 5054d1105236..ae7c028dc5a8 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -27,6 +27,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
+#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@@ -661,7 +662,7 @@ void hv_kvp_onchannelcallback(void *context)
if (kvp_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
+ vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen,
&requestid);
if (recvlen > 0) {
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 20ba95b75a94..03b6454268b3 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -12,6 +12,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
+#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@@ -297,7 +298,7 @@ void hv_vss_onchannelcallback(void *context)
if (vss_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
&requestid);
if (recvlen > 0) {
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index e32681ee7b9f..766bd8457346 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -136,7 +136,7 @@ static void shutdown_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
vmbus_recvpacket(channel, shut_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
@@ -284,7 +284,7 @@ static void timesync_onchannelcallback(void *context)
u8 *time_txf_buf = util_timesynch.recv_buffer;
vmbus_recvpacket(channel, time_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
@@ -346,7 +346,7 @@ static void heartbeat_onchannelcallback(void *context)
while (1) {
vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (!recvlen)
break;
@@ -390,7 +390,7 @@ static int util_probe(struct hv_device *dev,
(struct hv_util_service *)dev_id->driver_data;
int ret;
- srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+ srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
srv->channel = dev->channel;
@@ -413,8 +413,9 @@ static int util_probe(struct hv_device *dev,
hv_set_drvdata(dev, srv);
- ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
- srv->util_cb, dev->channel);
+ ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
+ 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+ dev->channel);
if (ret)
goto error;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index af9379a3bf89..20edcfd3b96c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -385,4 +385,35 @@ enum hvutil_device_state {
HVUTIL_DEVICE_DYING, /* driver unload is in progress */
};
+enum delay {
+ INTERRUPT_DELAY = 0,
+ MESSAGE_DELAY = 1,
+};
+
+#ifdef CONFIG_HYPERV_TESTING
+
+int hv_debug_add_dev_dir(struct hv_device *dev);
+void hv_debug_rm_dev_dir(struct hv_device *dev);
+void hv_debug_rm_all_dir(void);
+int hv_debug_init(void);
+void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
+
+#else /* CONFIG_HYPERV_TESTING */
+
+static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
+static inline void hv_debug_rm_all_dir(void) {};
+static inline void hv_debug_delay_test(struct vmbus_channel *channel,
+ enum delay delay_type) {};
+static inline int hv_debug_init(void)
+{
+ return -1;
+}
+
+static inline int hv_debug_add_dev_dir(struct hv_device *dev)
+{
+ return -1;
+}
+
+#endif /* CONFIG_HYPERV_TESTING */
+
#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 9a03b163cbbd..356e22159e83 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -396,6 +396,7 @@ struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
struct hv_ring_buffer_info *rbi = &channel->inbound;
struct vmpacket_descriptor *desc;
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
return NULL;
@@ -421,6 +422,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
u32 packetlen = desc->len8 << 3;
u32 dsize = rbi->ring_datasize;
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
/* bump offset to next potential packet */
rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
if (rbi->priv_read_index >= dsize)
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8c06b3361c27..4ef5a66df680 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -79,7 +79,7 @@ static struct notifier_block hyperv_panic_block = {
static const char *fb_mmio_name = "fb_range";
static struct resource *fb_mmio;
static struct resource *hyperv_mmio;
-static DEFINE_SEMAPHORE(hyperv_mmio_lock);
+static DEFINE_MUTEX(hyperv_mmio_lock);
static int vmbus_exists(void)
{
@@ -960,6 +960,8 @@ static void vmbus_device_release(struct device *device)
struct hv_device *hv_dev = device_to_hv_device(device);
struct vmbus_channel *channel = hv_dev->channel;
+ hv_debug_rm_dev_dir(hv_dev);
+
mutex_lock(&vmbus_connection.channel_mutex);
hv_process_channel_removal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
@@ -1273,7 +1275,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
* Write dump contents to the page. No need to synchronize; panic should
* be single-threaded.
*/
- kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
+ kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE,
&bytes_written);
if (bytes_written)
hyperv_report_panic_msg(panic_pa, bytes_written);
@@ -1373,7 +1375,7 @@ static int vmbus_bus_init(void)
*/
hv_get_crash_ctl(hyperv_crash_ctl);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
- hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
+ hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page();
if (hv_panic_page) {
ret = kmsg_dump_register(&hv_kmsg_dumper);
if (ret)
@@ -1401,7 +1403,7 @@ err_alloc:
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
- free_page((unsigned long)hv_panic_page);
+ hv_free_hyperv_page((unsigned long)hv_panic_page);
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
return ret;
@@ -1809,6 +1811,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
pr_err("Unable to register primary channeln");
goto err_kset_unregister;
}
+ hv_debug_add_dev_dir(child_device_obj);
return 0;
@@ -2010,7 +2013,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
int retval;
retval = -ENXIO;
- down(&hyperv_mmio_lock);
+ mutex_lock(&hyperv_mmio_lock);
/*
* If overlaps with frame buffers are allowed, then first attempt to
@@ -2057,7 +2060,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
}
exit:
- up(&hyperv_mmio_lock);
+ mutex_unlock(&hyperv_mmio_lock);
return retval;
}
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
@@ -2074,7 +2077,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
{
struct resource *iter;
- down(&hyperv_mmio_lock);
+ mutex_lock(&hyperv_mmio_lock);
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= start + size) || (iter->end <= start))
continue;
@@ -2082,7 +2085,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
__release_region(iter, start, size);
}
release_mem_region(start, size);
- up(&hyperv_mmio_lock);
+ mutex_unlock(&hyperv_mmio_lock);
}
EXPORT_SYMBOL_GPL(vmbus_free_mmio);
@@ -2215,8 +2218,7 @@ static int vmbus_bus_resume(struct device *dev)
* We only use the 'vmbus_proto_version', which was in use before
* hibernation, to re-negotiate with the host.
*/
- if (vmbus_proto_version == VERSION_INVAL ||
- vmbus_proto_version == 0) {
+ if (!vmbus_proto_version) {
pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
return -EINVAL;
}
@@ -2303,7 +2305,7 @@ static void hv_crash_handler(struct pt_regs *regs)
vmbus_connection.conn_state = DISCONNECTED;
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
- hv_synic_cleanup(cpu);
+ hv_synic_disable_regs(cpu);
hyperv_cleanup();
};
@@ -2373,6 +2375,7 @@ static int __init hv_acpi_init(void)
ret = -ETIMEDOUT;
goto cleanup;
}
+ hv_debug_init();
ret = vmbus_bus_init();
if (ret)
@@ -2409,6 +2412,8 @@ static void __exit vmbus_exit(void)
tasklet_kill(&hv_cpu->msg_dpc);
}
+ hv_debug_rm_all_dir();
+
vmbus_free_channels();
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 13a6b4afb4b3..23dfe848979a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -40,7 +40,8 @@ comment "Native drivers"
config SENSORS_AB8500
tristate "AB8500 thermal monitoring"
- depends on AB8500_GPADC && AB8500_BM
+ depends on AB8500_GPADC && AB8500_BM && (IIO = y)
+ default n
help
If you say yes here you get support for the thermal sensor part
of the AB8500 chip. The driver includes thermal management for
@@ -309,7 +310,6 @@ config SENSORS_APPLESMC
depends on INPUT && X86
select NEW_LEDS
select LEDS_CLASS
- select INPUT_POLLDEV
help
This driver provides support for the Apple System Management
Controller, which provides an accelerometer (Apple Sudden Motion
@@ -727,6 +727,33 @@ config SENSORS_LTC2945
This driver can also be built as a module. If so, the module will
be called ltc2945.
+config SENSORS_LTC2947
+ tristate
+
+config SENSORS_LTC2947_I2C
+ tristate "Analog Devices LTC2947 High Precision Power and Energy Monitor over I2C"
+ depends on I2C
+ select REGMAP_I2C
+ select SENSORS_LTC2947
+ help
+ If you say yes here you get support for Linear Technology LTC2947
+ I2C High Precision Power and Energy Monitor
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2947-i2c.
+
+config SENSORS_LTC2947_SPI
+ tristate "Analog Devices LTC2947 High Precision Power and Energy Monitor over SPI"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ select SENSORS_LTC2947
+ help
+ If you say yes here you get support for Linear Technology LTC2947
+ SPI High Precision Power and Energy Monitor
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2947-spi.
+
config SENSORS_LTC2990
tristate "Linear Technology LTC2990"
depends on I2C
@@ -1709,6 +1736,16 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_TMP513
+ tristate "Texas Instruments TMP513 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP512,
+ and TMP513 temperature and power supply sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp513.
+
config SENSORS_VEXPRESS
tristate "Versatile Express"
depends on VEXPRESS_CONFIG
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 40c036ea45e6..6db5db9cdc29 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -106,6 +106,9 @@ obj-$(CONFIG_SENSORS_LM95234) += lm95234.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
obj-$(CONFIG_SENSORS_LTC2945) += ltc2945.o
+obj-$(CONFIG_SENSORS_LTC2947) += ltc2947-core.o
+obj-$(CONFIG_SENSORS_LTC2947_I2C) += ltc2947-i2c.o
+obj-$(CONFIG_SENSORS_LTC2947_SPI) += ltc2947-spi.o
obj-$(CONFIG_SENSORS_LTC2990) += ltc2990.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
@@ -166,6 +169,7 @@ obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
obj-$(CONFIG_SENSORS_TMP108) += tmp108.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_TMP513) += tmp513.o
obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
index 207f77f85a40..53f3379d799d 100644
--- a/drivers/hwmon/ab8500.c
+++ b/drivers/hwmon/ab8500.c
@@ -17,20 +17,24 @@
#include <linux/hwmon-sysfs.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power/ab8500.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/iio/consumer.h>
#include "abx500.h"
#define DEFAULT_POWER_OFF_DELAY (HZ * 10)
#define THERMAL_VCC 1800
#define PULL_UP_RESISTOR 47000
-/* Number of monitored sensors should not greater than NUM_SENSORS */
-#define NUM_MONITORED_SENSORS 4
+
+#define AB8500_SENSOR_AUX1 0
+#define AB8500_SENSOR_AUX2 1
+#define AB8500_SENSOR_BTEMP_BALL 2
+#define AB8500_SENSOR_BAT_CTRL 3
+#define NUM_MONITORED_SENSORS 4
struct ab8500_gpadc_cfg {
const struct abx500_res_to_temp *temp_tbl;
@@ -40,7 +44,8 @@ struct ab8500_gpadc_cfg {
};
struct ab8500_temp {
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *aux1;
+ struct iio_channel *aux2;
struct ab8500_btemp *btemp;
struct delayed_work power_off_work;
struct ab8500_gpadc_cfg cfg;
@@ -82,15 +87,21 @@ static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor, int *temp)
int voltage, ret;
struct ab8500_temp *ab8500_data = data->plat_data;
- if (sensor == BAT_CTRL) {
- *temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp);
- } else if (sensor == BTEMP_BALL) {
+ if (sensor == AB8500_SENSOR_BTEMP_BALL) {
*temp = ab8500_btemp_get_temp(ab8500_data->btemp);
- } else {
- voltage = ab8500_gpadc_convert(ab8500_data->gpadc, sensor);
- if (voltage < 0)
- return voltage;
-
+ } else if (sensor == AB8500_SENSOR_BAT_CTRL) {
+ *temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp);
+ } else if (sensor == AB8500_SENSOR_AUX1) {
+ ret = iio_read_channel_processed(ab8500_data->aux1, &voltage);
+ if (ret < 0)
+ return ret;
+ ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
+ if (ret < 0)
+ return ret;
+ } else if (sensor == AB8500_SENSOR_AUX2) {
+ ret = iio_read_channel_processed(ab8500_data->aux2, &voltage);
+ if (ret < 0)
+ return ret;
ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
if (ret < 0)
return ret;
@@ -164,10 +175,6 @@ int abx500_hwmon_init(struct abx500_temp *data)
if (!ab8500_data)
return -ENOMEM;
- ab8500_data->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- if (IS_ERR(ab8500_data->gpadc))
- return PTR_ERR(ab8500_data->gpadc);
-
ab8500_data->btemp = ab8500_btemp_get();
if (IS_ERR(ab8500_data->btemp))
return PTR_ERR(ab8500_data->btemp);
@@ -181,15 +188,25 @@ int abx500_hwmon_init(struct abx500_temp *data)
ab8500_data->cfg.tbl_sz = ab8500_temp_tbl_a_size;
data->plat_data = ab8500_data;
+ ab8500_data->aux1 = devm_iio_channel_get(&data->pdev->dev, "aux1");
+ if (IS_ERR(ab8500_data->aux1)) {
+ if (PTR_ERR(ab8500_data->aux1) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&data->pdev->dev, "failed to get AUX1 ADC channel\n");
+ return PTR_ERR(ab8500_data->aux1);
+ }
+ ab8500_data->aux2 = devm_iio_channel_get(&data->pdev->dev, "aux2");
+ if (IS_ERR(ab8500_data->aux2)) {
+ if (PTR_ERR(ab8500_data->aux2) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&data->pdev->dev, "failed to get AUX2 ADC channel\n");
+ return PTR_ERR(ab8500_data->aux2);
+ }
- /*
- * ADC_AUX1 and ADC_AUX2, connected to external NTC
- * BTEMP_BALL and BAT_CTRL, fixed usage
- */
- data->gpadc_addr[0] = ADC_AUX1;
- data->gpadc_addr[1] = ADC_AUX2;
- data->gpadc_addr[2] = BTEMP_BALL;
- data->gpadc_addr[3] = BAT_CTRL;
+ data->gpadc_addr[0] = AB8500_SENSOR_AUX1;
+ data->gpadc_addr[1] = AB8500_SENSOR_AUX2;
+ data->gpadc_addr[2] = AB8500_SENSOR_BTEMP_BALL;
+ data->gpadc_addr[3] = AB8500_SENSOR_BAT_CTRL;
data->monitored_sensors = NUM_MONITORED_SENSORS;
data->ops.read_sensor = ab8500_read_sensor;
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index a5cf6b2a6e49..681f0623868f 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1264,7 +1264,7 @@ static int abituguru_probe(struct platform_device *pdev)
* El weirdo probe order, to keep the sysfs order identical to the
* BIOS and window-appliction listing order.
*/
- const u8 probe_order[ABIT_UGURU_MAX_BANK1_SENSORS] = {
+ static const u8 probe_order[ABIT_UGURU_MAX_BANK1_SENSORS] = {
0x00, 0x01, 0x03, 0x04, 0x0A, 0x08, 0x0E, 0x02,
0x09, 0x06, 0x05, 0x0B, 0x0F, 0x0D, 0x07, 0x0C };
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 183ff3d25129..ec93b8d673f5 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -140,7 +140,7 @@ static s16 rest_y;
static u8 backlight_state[2];
static struct device *hwmon_dev;
-static struct input_polled_dev *applesmc_idev;
+static struct input_dev *applesmc_idev;
/*
* Last index written to key_at_index sysfs file, and value to use for all other
@@ -681,9 +681,8 @@ static void applesmc_calibrate(void)
rest_x = -rest_x;
}
-static void applesmc_idev_poll(struct input_polled_dev *dev)
+static void applesmc_idev_poll(struct input_dev *idev)
{
- struct input_dev *idev = dev->input;
s16 x, y;
if (applesmc_read_s16(MOTION_SENSOR_X_KEY, &x))
@@ -1134,7 +1133,6 @@ out:
/* Create accelerometer resources */
static int applesmc_create_accelerometer(void)
{
- struct input_dev *idev;
int ret;
if (!smcreg.has_accelerometer)
@@ -1144,37 +1142,38 @@ static int applesmc_create_accelerometer(void)
if (ret)
goto out;
- applesmc_idev = input_allocate_polled_device();
+ applesmc_idev = input_allocate_device();
if (!applesmc_idev) {
ret = -ENOMEM;
goto out_sysfs;
}
- applesmc_idev->poll = applesmc_idev_poll;
- applesmc_idev->poll_interval = APPLESMC_POLL_INTERVAL;
-
/* initial calibrate for the input device */
applesmc_calibrate();
/* initialize the input device */
- idev = applesmc_idev->input;
- idev->name = "applesmc";
- idev->id.bustype = BUS_HOST;
- idev->dev.parent = &pdev->dev;
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X,
+ applesmc_idev->name = "applesmc";
+ applesmc_idev->id.bustype = BUS_HOST;
+ applesmc_idev->dev.parent = &pdev->dev;
+ input_set_abs_params(applesmc_idev, ABS_X,
-256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT);
- input_set_abs_params(idev, ABS_Y,
+ input_set_abs_params(applesmc_idev, ABS_Y,
-256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT);
- ret = input_register_polled_device(applesmc_idev);
+ ret = input_setup_polling(applesmc_idev, applesmc_idev_poll);
+ if (ret)
+ goto out_idev;
+
+ input_set_poll_interval(applesmc_idev, APPLESMC_POLL_INTERVAL);
+
+ ret = input_register_device(applesmc_idev);
if (ret)
goto out_idev;
return 0;
out_idev:
- input_free_polled_device(applesmc_idev);
+ input_free_device(applesmc_idev);
out_sysfs:
applesmc_destroy_nodes(accelerometer_group);
@@ -1189,8 +1188,7 @@ static void applesmc_release_accelerometer(void)
{
if (!smcreg.has_accelerometer)
return;
- input_unregister_polled_device(applesmc_idev);
- input_free_polled_device(applesmc_idev);
+ input_unregister_device(applesmc_idev);
applesmc_destroy_nodes(accelerometer_group);
}
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 40c489be62ea..33fb54845bf6 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -891,17 +891,12 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
struct device_node *np, *child;
struct aspeed_pwm_tacho_data *priv;
void __iomem *regs;
- struct resource *res;
struct device *hwmon;
struct clk *clk;
int ret;
np = dev->of_node;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 4212d022d253..17583bf8c2dc 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -68,6 +68,8 @@ static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
static bool disallow_fan_type_call;
static bool disallow_fan_support;
+static unsigned int manual_fan;
+static unsigned int auto_fan;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -301,6 +303,20 @@ static int i8k_get_fan_nominal_speed(int fan, int speed)
}
/*
+ * Enable or disable automatic BIOS fan control support
+ */
+static int i8k_enable_fan_auto_mode(bool enable)
+{
+ struct smm_regs regs = { };
+
+ if (disallow_fan_support)
+ return -EINVAL;
+
+ regs.eax = enable ? auto_fan : manual_fan;
+ return i8k_smm(&regs);
+}
+
+/*
* Set the fan speed (off, low, high). Returns the new fan status.
*/
static int i8k_set_fan(int fan, int speed)
@@ -726,6 +742,35 @@ static ssize_t i8k_hwmon_pwm_store(struct device *dev,
return err < 0 ? -EIO : count;
}
+static ssize_t i8k_hwmon_pwm_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ bool enable;
+ unsigned long val;
+
+ if (!auto_fan)
+ return -ENODEV;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val == 1)
+ enable = false;
+ else if (val == 2)
+ enable = true;
+ else
+ return -EINVAL;
+
+ mutex_lock(&i8k_mutex);
+ err = i8k_enable_fan_auto_mode(enable);
+ mutex_unlock(&i8k_mutex);
+
+ return err ? err : count;
+}
+
static SENSOR_DEVICE_ATTR_RO(temp1_input, i8k_hwmon_temp, 0);
static SENSOR_DEVICE_ATTR_RO(temp1_label, i8k_hwmon_temp_label, 0);
static SENSOR_DEVICE_ATTR_RO(temp2_input, i8k_hwmon_temp, 1);
@@ -749,6 +794,7 @@ static SENSOR_DEVICE_ATTR_RO(temp10_label, i8k_hwmon_temp_label, 9);
static SENSOR_DEVICE_ATTR_RO(fan1_input, i8k_hwmon_fan, 0);
static SENSOR_DEVICE_ATTR_RO(fan1_label, i8k_hwmon_fan_label, 0);
static SENSOR_DEVICE_ATTR_RW(pwm1, i8k_hwmon_pwm, 0);
+static SENSOR_DEVICE_ATTR_WO(pwm1_enable, i8k_hwmon_pwm_enable, 0);
static SENSOR_DEVICE_ATTR_RO(fan2_input, i8k_hwmon_fan, 1);
static SENSOR_DEVICE_ATTR_RO(fan2_label, i8k_hwmon_fan_label, 1);
static SENSOR_DEVICE_ATTR_RW(pwm2, i8k_hwmon_pwm, 1);
@@ -780,12 +826,13 @@ static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr, /* 20 */
&sensor_dev_attr_fan1_label.dev_attr.attr, /* 21 */
&sensor_dev_attr_pwm1.dev_attr.attr, /* 22 */
- &sensor_dev_attr_fan2_input.dev_attr.attr, /* 23 */
- &sensor_dev_attr_fan2_label.dev_attr.attr, /* 24 */
- &sensor_dev_attr_pwm2.dev_attr.attr, /* 25 */
- &sensor_dev_attr_fan3_input.dev_attr.attr, /* 26 */
- &sensor_dev_attr_fan3_label.dev_attr.attr, /* 27 */
- &sensor_dev_attr_pwm3.dev_attr.attr, /* 28 */
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr, /* 23 */
+ &sensor_dev_attr_fan2_input.dev_attr.attr, /* 24 */
+ &sensor_dev_attr_fan2_label.dev_attr.attr, /* 25 */
+ &sensor_dev_attr_pwm2.dev_attr.attr, /* 26 */
+ &sensor_dev_attr_fan3_input.dev_attr.attr, /* 27 */
+ &sensor_dev_attr_fan3_label.dev_attr.attr, /* 28 */
+ &sensor_dev_attr_pwm3.dev_attr.attr, /* 29 */
NULL
};
@@ -828,16 +875,19 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP10))
return 0;
- if (index >= 20 && index <= 22 &&
+ if (index >= 20 && index <= 23 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
return 0;
- if (index >= 23 && index <= 25 &&
+ if (index >= 24 && index <= 26 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
return 0;
- if (index >= 26 && index <= 28 &&
+ if (index >= 27 && index <= 29 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN3))
return 0;
+ if (index == 23 && !auto_fan)
+ return 0;
+
return attr->mode;
}
@@ -1135,12 +1185,48 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
{ }
};
+struct i8k_fan_control_data {
+ unsigned int manual_fan;
+ unsigned int auto_fan;
+};
+
+enum i8k_fan_controls {
+ I8K_FAN_34A3_35A3,
+};
+
+static const struct i8k_fan_control_data i8k_fan_control_data[] = {
+ [I8K_FAN_34A3_35A3] = {
+ .manual_fan = 0x34a3,
+ .auto_fan = 0x35a3,
+ },
+};
+
+static struct dmi_system_id i8k_whitelist_fan_control[] __initdata = {
+ {
+ .ident = "Dell Precision 5530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Precision 5530"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ {
+ .ident = "Dell Latitude E6440",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Latitude E6440"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ { }
+};
+
/*
* Probe for the presence of a supported laptop.
*/
static int __init i8k_probe(void)
{
- const struct dmi_system_id *id;
+ const struct dmi_system_id *id, *fan_control;
int fan, ret;
/*
@@ -1200,6 +1286,15 @@ static int __init i8k_probe(void)
i8k_fan_max = fan_max ? : I8K_FAN_HIGH; /* Must not be 0 */
i8k_pwm_mult = DIV_ROUND_UP(255, i8k_fan_max);
+ fan_control = dmi_first_match(i8k_whitelist_fan_control);
+ if (fan_control && fan_control->driver_data) {
+ const struct i8k_fan_control_data *data = fan_control->driver_data;
+
+ manual_fan = data->manual_fan;
+ auto_fan = data->auto_fan;
+ pr_info("enabling support for setting automatic/manual fan control\n");
+ }
+
if (!fan_mult) {
/*
* Autodetect fan multiplier based on nominal rpm
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index fa0c2f1fb443..4136643d8e0c 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -954,6 +954,7 @@ static const struct file_operations watchdog_fops = {
.release = watchdog_release,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 8a51dcf055ea..f335d0cb0c77 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -31,6 +31,8 @@
#define INA3221_WARN2 0x0a
#define INA3221_CRIT3 0x0b
#define INA3221_WARN3 0x0c
+#define INA3221_SHUNT_SUM 0x0d
+#define INA3221_CRIT_SUM 0x0e
#define INA3221_MASK_ENABLE 0x0f
#define INA3221_CONFIG_MODE_MASK GENMASK(2, 0)
@@ -50,6 +52,8 @@
#define INA3221_CONFIG_CHs_EN_MASK GENMASK(14, 12)
#define INA3221_CONFIG_CHx_EN(x) BIT(14 - (x))
+#define INA3221_MASK_ENABLE_SCC_MASK GENMASK(14, 12)
+
#define INA3221_CONFIG_DEFAULT 0x7127
#define INA3221_RSHUNT_DEFAULT 10000
@@ -60,9 +64,11 @@ enum ina3221_fields {
/* Status Flags */
F_CVRF,
- /* Alert Flags */
+ /* Warning Flags */
F_WF3, F_WF2, F_WF1,
- F_CF3, F_CF2, F_CF1,
+
+ /* Alert Flags: SF is the summation-alert flag */
+ F_SF, F_CF3, F_CF2, F_CF1,
/* sentinel */
F_MAX_FIELDS
@@ -75,6 +81,7 @@ static const struct reg_field ina3221_reg_fields[] = {
[F_WF3] = REG_FIELD(INA3221_MASK_ENABLE, 3, 3),
[F_WF2] = REG_FIELD(INA3221_MASK_ENABLE, 4, 4),
[F_WF1] = REG_FIELD(INA3221_MASK_ENABLE, 5, 5),
+ [F_SF] = REG_FIELD(INA3221_MASK_ENABLE, 6, 6),
[F_CF3] = REG_FIELD(INA3221_MASK_ENABLE, 7, 7),
[F_CF2] = REG_FIELD(INA3221_MASK_ENABLE, 8, 8),
[F_CF1] = REG_FIELD(INA3221_MASK_ENABLE, 9, 9),
@@ -107,6 +114,7 @@ struct ina3221_input {
* @inputs: Array of channel input source specific structures
* @lock: mutex lock to serialize sysfs attribute accesses
* @reg_config: Register value of INA3221_CONFIG
+ * @summation_shunt_resistor: equivalent shunt resistor value for summation
* @single_shot: running in single-shot operating mode
*/
struct ina3221_data {
@@ -116,16 +124,51 @@ struct ina3221_data {
struct ina3221_input inputs[INA3221_NUM_CHANNELS];
struct mutex lock;
u32 reg_config;
+ int summation_shunt_resistor;
bool single_shot;
};
static inline bool ina3221_is_enabled(struct ina3221_data *ina, int channel)
{
+ /* Summation channel checks shunt resistor values */
+ if (channel > INA3221_CHANNEL3)
+ return ina->summation_shunt_resistor != 0;
+
return pm_runtime_active(ina->pm_dev) &&
(ina->reg_config & INA3221_CONFIG_CHx_EN(channel));
}
+/**
+ * Helper function to return the resistor value for current summation.
+ *
+ * There is a condition to calculate current summation -- all the shunt
+ * resistor values should be the same, so as to simply fit the formula:
+ * current summation = shunt voltage summation / shunt resistor
+ *
+ * Returns the equivalent shunt resistor value on success or 0 on failure
+ */
+static inline int ina3221_summation_shunt_resistor(struct ina3221_data *ina)
+{
+ struct ina3221_input *input = ina->inputs;
+ int i, shunt_resistor = 0;
+
+ for (i = 0; i < INA3221_NUM_CHANNELS; i++) {
+ if (input[i].disconnected || !input[i].shunt_resistor)
+ continue;
+ if (!shunt_resistor) {
+ /* Found the reference shunt resistor value */
+ shunt_resistor = input[i].shunt_resistor;
+ } else {
+ /* No summation if resistor values are different */
+ if (shunt_resistor != input[i].shunt_resistor)
+ return 0;
+ }
+ }
+
+ return shunt_resistor;
+}
+
/* Lookup table for Bus and Shunt conversion times in usec */
static const u16 ina3221_conv_time[] = {
140, 204, 332, 588, 1100, 2116, 4156, 8244,
@@ -183,7 +226,14 @@ static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
if (ret)
return ret;
- *val = sign_extend32(regval >> 3, 12);
+ /*
+ * Shunt Voltage Sum register has 14-bit value with 1-bit shift
+ * Other Shunt Voltage registers have 12 bits with 3-bit shift
+ */
+ if (reg == INA3221_SHUNT_SUM)
+ *val = sign_extend32(regval >> 1, 14);
+ else
+ *val = sign_extend32(regval >> 3, 12);
return 0;
}
@@ -195,6 +245,7 @@ static const u8 ina3221_in_reg[] = {
INA3221_SHUNT1,
INA3221_SHUNT2,
INA3221_SHUNT3,
+ INA3221_SHUNT_SUM,
};
static int ina3221_read_chip(struct device *dev, u32 attr, long *val)
@@ -224,8 +275,12 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
u8 reg = ina3221_in_reg[channel];
int regval, ret;
- /* Translate shunt channel index to sensor channel index */
- channel %= INA3221_NUM_CHANNELS;
+ /*
+ * Translate shunt channel index to sensor channel index except
+ * the 7th channel (6 since being 0-aligned) is for summation.
+ */
+ if (channel != 6)
+ channel %= INA3221_NUM_CHANNELS;
switch (attr) {
case hwmon_in_input:
@@ -259,22 +314,29 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
}
}
-static const u8 ina3221_curr_reg[][INA3221_NUM_CHANNELS] = {
- [hwmon_curr_input] = { INA3221_SHUNT1, INA3221_SHUNT2, INA3221_SHUNT3 },
- [hwmon_curr_max] = { INA3221_WARN1, INA3221_WARN2, INA3221_WARN3 },
- [hwmon_curr_crit] = { INA3221_CRIT1, INA3221_CRIT2, INA3221_CRIT3 },
- [hwmon_curr_max_alarm] = { F_WF1, F_WF2, F_WF3 },
- [hwmon_curr_crit_alarm] = { F_CF1, F_CF2, F_CF3 },
+static const u8 ina3221_curr_reg[][INA3221_NUM_CHANNELS + 1] = {
+ [hwmon_curr_input] = { INA3221_SHUNT1, INA3221_SHUNT2,
+ INA3221_SHUNT3, INA3221_SHUNT_SUM },
+ [hwmon_curr_max] = { INA3221_WARN1, INA3221_WARN2, INA3221_WARN3, 0 },
+ [hwmon_curr_crit] = { INA3221_CRIT1, INA3221_CRIT2,
+ INA3221_CRIT3, INA3221_CRIT_SUM },
+ [hwmon_curr_max_alarm] = { F_WF1, F_WF2, F_WF3, 0 },
+ [hwmon_curr_crit_alarm] = { F_CF1, F_CF2, F_CF3, F_SF },
};
static int ina3221_read_curr(struct device *dev, u32 attr,
int channel, long *val)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
- struct ina3221_input *input = &ina->inputs[channel];
- int resistance_uo = input->shunt_resistor;
+ struct ina3221_input *input = ina->inputs;
u8 reg = ina3221_curr_reg[attr][channel];
- int regval, voltage_nv, ret;
+ int resistance_uo, voltage_nv;
+ int regval, ret;
+
+ if (channel > INA3221_CHANNEL3)
+ resistance_uo = ina->summation_shunt_resistor;
+ else
+ resistance_uo = input[channel].shunt_resistor;
switch (attr) {
case hwmon_curr_input:
@@ -293,6 +355,9 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
/* fall through */
case hwmon_curr_crit:
case hwmon_curr_max:
+ if (!resistance_uo)
+ return -ENODATA;
+
ret = ina3221_read_value(ina, reg, &regval);
if (ret)
return ret;
@@ -366,10 +431,18 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
int channel, long val)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
- struct ina3221_input *input = &ina->inputs[channel];
- int resistance_uo = input->shunt_resistor;
+ struct ina3221_input *input = ina->inputs;
u8 reg = ina3221_curr_reg[attr][channel];
- int regval, current_ma, voltage_uv;
+ int resistance_uo, current_ma, voltage_uv;
+ int regval;
+
+ if (channel > INA3221_CHANNEL3)
+ resistance_uo = ina->summation_shunt_resistor;
+ else
+ resistance_uo = input[channel].shunt_resistor;
+
+ if (!resistance_uo)
+ return -EOPNOTSUPP;
/* clamp current */
current_ma = clamp_val(val,
@@ -381,8 +454,21 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
/* clamp voltage */
voltage_uv = clamp_val(voltage_uv, -163800, 163800);
- /* 1 / 40uV(scale) << 3(register shift) = 5 */
- regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
+ /*
+ * Formula to convert voltage_uv to register value:
+ * regval = (voltage_uv / scale) << shift
+ * Note:
+ * The scale is 40uV for all shunt voltage registers
+ * Shunt Voltage Sum register left-shifts 1 bit
+ * All other Shunt Voltage registers shift 3 bits
+ * Results:
+ * SHUNT_SUM: (1 / 40uV) << 1 = 1 / 20uV
+ * SHUNT[1-3]: (1 / 40uV) << 3 = 1 / 5uV
+ */
+ if (reg == INA3221_SHUNT_SUM)
+ regval = DIV_ROUND_CLOSEST(voltage_uv, 20) & 0xfffe;
+ else
+ regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
return regmap_write(ina->regmap, reg, regval);
}
@@ -499,7 +585,10 @@ static int ina3221_read_string(struct device *dev, enum hwmon_sensor_types type,
struct ina3221_data *ina = dev_get_drvdata(dev);
int index = channel - 1;
- *str = ina->inputs[index].label;
+ if (channel == 7)
+ *str = "sum of shunt voltages";
+ else
+ *str = ina->inputs[index].label;
return 0;
}
@@ -529,6 +618,8 @@ static umode_t ina3221_is_visible(const void *drvdata,
case hwmon_in_label:
if (channel - 1 <= INA3221_CHANNEL3)
input = &ina->inputs[channel - 1];
+ else if (channel == 7)
+ return 0444;
/* Hide label node if label is not provided */
return (input && input->label) ? 0444 : 0;
case hwmon_in_input:
@@ -573,11 +664,16 @@ static const struct hwmon_channel_info *ina3221_info[] = {
/* 4-6: shunt voltage Channels */
HWMON_I_INPUT,
HWMON_I_INPUT,
- HWMON_I_INPUT),
+ HWMON_I_INPUT,
+ /* 7: summation of shunt voltage channels */
+ HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(curr,
+ /* 1-3: current channels*/
+ INA3221_HWMON_CURR_CONFIG,
INA3221_HWMON_CURR_CONFIG,
INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG),
+ /* 4: summation of current channels */
+ HWMON_C_INPUT | HWMON_C_CRIT | HWMON_C_CRIT_ALARM),
NULL
};
@@ -624,6 +720,9 @@ static ssize_t ina3221_shunt_store(struct device *dev,
input->shunt_resistor = val;
+ /* Update summation_shunt_resistor for summation channel */
+ ina->summation_shunt_resistor = ina3221_summation_shunt_resistor(ina);
+
return count;
}
@@ -642,6 +741,7 @@ ATTRIBUTE_GROUPS(ina3221);
static const struct regmap_range ina3221_yes_ranges[] = {
regmap_reg_range(INA3221_CONFIG, INA3221_BUS3),
+ regmap_reg_range(INA3221_SHUNT_SUM, INA3221_SHUNT_SUM),
regmap_reg_range(INA3221_MASK_ENABLE, INA3221_MASK_ENABLE),
};
@@ -772,6 +872,9 @@ static int ina3221_probe(struct i2c_client *client,
ina->reg_config &= ~INA3221_CONFIG_CHx_EN(i);
}
+ /* Initialize summation_shunt_resistor for summation channel control */
+ ina->summation_shunt_resistor = ina3221_summation_shunt_resistor(ina);
+
ina->pm_dev = dev;
mutex_init(&ina->lock);
dev_set_drvdata(dev, ina);
@@ -875,6 +978,22 @@ static int __maybe_unused ina3221_resume(struct device *dev)
if (ret)
return ret;
+ /* Initialize summation channel control */
+ if (ina->summation_shunt_resistor) {
+ /*
+ * Take all three channels into summation by default
+ * Shunt measurements of disconnected channels should
+ * be 0, so it does not matter for summation.
+ */
+ ret = regmap_update_bits(ina->regmap, INA3221_MASK_ENABLE,
+ INA3221_MASK_ENABLE_SCC_MASK,
+ INA3221_MASK_ENABLE_SCC_MASK);
+ if (ret) {
+ dev_err(dev, "Unable to control summation channel\n");
+ return ret;
+ }
+ }
+
return 0;
}
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
new file mode 100644
index 000000000000..bb3f7749a0b0
--- /dev/null
+++ b/drivers/hwmon/ltc2947-core.c
@@ -0,0 +1,1183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "ltc2947.h"
+
+/* register's */
+#define LTC2947_REG_PAGE_CTRL 0xFF
+#define LTC2947_REG_CTRL 0xF0
+#define LTC2947_REG_TBCTL 0xE9
+#define LTC2947_CONT_MODE_MASK BIT(3)
+#define LTC2947_CONT_MODE(x) FIELD_PREP(LTC2947_CONT_MODE_MASK, x)
+#define LTC2947_PRE_MASK GENMASK(2, 0)
+#define LTC2947_PRE(x) FIELD_PREP(LTC2947_PRE_MASK, x)
+#define LTC2947_DIV_MASK GENMASK(7, 3)
+#define LTC2947_DIV(x) FIELD_PREP(LTC2947_DIV_MASK, x)
+#define LTC2947_SHUTDOWN_MASK BIT(0)
+#define LTC2947_REG_ACCUM_POL 0xE1
+#define LTC2947_ACCUM_POL_1_MASK GENMASK(1, 0)
+#define LTC2947_ACCUM_POL_1(x) FIELD_PREP(LTC2947_ACCUM_POL_1_MASK, x)
+#define LTC2947_ACCUM_POL_2_MASK GENMASK(3, 2)
+#define LTC2947_ACCUM_POL_2(x) FIELD_PREP(LTC2947_ACCUM_POL_2_MASK, x)
+#define LTC2947_REG_ACCUM_DEADBAND 0xE4
+#define LTC2947_REG_GPIOSTATCTL 0x67
+#define LTC2947_GPIO_EN_MASK BIT(0)
+#define LTC2947_GPIO_EN(x) FIELD_PREP(LTC2947_GPIO_EN_MASK, x)
+#define LTC2947_GPIO_FAN_EN_MASK BIT(6)
+#define LTC2947_GPIO_FAN_EN(x) FIELD_PREP(LTC2947_GPIO_FAN_EN_MASK, x)
+#define LTC2947_GPIO_FAN_POL_MASK BIT(7)
+#define LTC2947_GPIO_FAN_POL(x) FIELD_PREP(LTC2947_GPIO_FAN_POL_MASK, x)
+#define LTC2947_REG_GPIO_ACCUM 0xE3
+/* 200Khz */
+#define LTC2947_CLK_MIN 200000
+/* 25Mhz */
+#define LTC2947_CLK_MAX 25000000
+#define LTC2947_PAGE0 0
+#define LTC2947_PAGE1 1
+/* Voltage registers */
+#define LTC2947_REG_VOLTAGE 0xA0
+#define LTC2947_REG_VOLTAGE_MAX 0x50
+#define LTC2947_REG_VOLTAGE_MIN 0x52
+#define LTC2947_REG_VOLTAGE_THRE_H 0x90
+#define LTC2947_REG_VOLTAGE_THRE_L 0x92
+#define LTC2947_REG_DVCC 0xA4
+#define LTC2947_REG_DVCC_MAX 0x58
+#define LTC2947_REG_DVCC_MIN 0x5A
+#define LTC2947_REG_DVCC_THRE_H 0x98
+#define LTC2947_REG_DVCC_THRE_L 0x9A
+#define LTC2947_VOLTAGE_GEN_CHAN 0
+#define LTC2947_VOLTAGE_DVCC_CHAN 1
+/* in mV */
+#define VOLTAGE_MAX 15500
+#define VOLTAGE_MIN -300
+#define VDVCC_MAX 15000
+#define VDVCC_MIN 4750
+/* Current registers */
+#define LTC2947_REG_CURRENT 0x90
+#define LTC2947_REG_CURRENT_MAX 0x40
+#define LTC2947_REG_CURRENT_MIN 0x42
+#define LTC2947_REG_CURRENT_THRE_H 0x80
+#define LTC2947_REG_CURRENT_THRE_L 0x82
+/* in mA */
+#define CURRENT_MAX 30000
+#define CURRENT_MIN -30000
+/* Power registers */
+#define LTC2947_REG_POWER 0x93
+#define LTC2947_REG_POWER_MAX 0x44
+#define LTC2947_REG_POWER_MIN 0x46
+#define LTC2947_REG_POWER_THRE_H 0x84
+#define LTC2947_REG_POWER_THRE_L 0x86
+/* in uW */
+#define POWER_MAX 450000000
+#define POWER_MIN -450000000
+/* Temperature registers */
+#define LTC2947_REG_TEMP 0xA2
+#define LTC2947_REG_TEMP_MAX 0x54
+#define LTC2947_REG_TEMP_MIN 0x56
+#define LTC2947_REG_TEMP_THRE_H 0x94
+#define LTC2947_REG_TEMP_THRE_L 0x96
+#define LTC2947_REG_TEMP_FAN_THRE_H 0x9C
+#define LTC2947_REG_TEMP_FAN_THRE_L 0x9E
+#define LTC2947_TEMP_FAN_CHAN 1
+/* in millidegress Celsius */
+#define TEMP_MAX 85000
+#define TEMP_MIN -40000
+/* Energy registers */
+#define LTC2947_REG_ENERGY1 0x06
+#define LTC2947_REG_ENERGY2 0x16
+/* Status/Alarm/Overflow registers */
+#define LTC2947_REG_STATUS 0x80
+#define LTC2947_REG_STATVT 0x81
+#define LTC2947_REG_STATIP 0x82
+#define LTC2947_REG_STATVDVCC 0x87
+
+#define LTC2947_ALERTS_SIZE (LTC2947_REG_STATVDVCC - LTC2947_REG_STATUS)
+#define LTC2947_MAX_VOLTAGE_MASK BIT(0)
+#define LTC2947_MIN_VOLTAGE_MASK BIT(1)
+#define LTC2947_MAX_CURRENT_MASK BIT(0)
+#define LTC2947_MIN_CURRENT_MASK BIT(1)
+#define LTC2947_MAX_POWER_MASK BIT(2)
+#define LTC2947_MIN_POWER_MASK BIT(3)
+#define LTC2947_MAX_TEMP_MASK BIT(2)
+#define LTC2947_MIN_TEMP_MASK BIT(3)
+#define LTC2947_MAX_TEMP_FAN_MASK BIT(4)
+#define LTC2947_MIN_TEMP_FAN_MASK BIT(5)
+
+struct ltc2947_data {
+ struct regmap *map;
+ struct device *dev;
+ /*
+ * The mutex is needed because the device has 2 memory pages. When
+ * reading/writing the correct page needs to be set so that, the
+ * complete sequence select_page->read/write needs to be protected.
+ */
+ struct mutex lock;
+ u32 lsb_energy;
+ bool gpio_out;
+};
+
+static int __ltc2947_val_read16(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be16 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 2);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(__val);
+
+ return 0;
+}
+
+static int __ltc2947_val_read24(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be32 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 3);
+ if (ret)
+ return ret;
+
+ *val = be32_to_cpu(__val) >> 8;
+
+ return 0;
+}
+
+static int __ltc2947_val_read64(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be64 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 6);
+ if (ret)
+ return ret;
+
+ *val = be64_to_cpu(__val) >> 16;
+
+ return 0;
+}
+
+static int ltc2947_val_read(struct ltc2947_data *st, const u8 reg,
+ const u8 page, const size_t size, s64 *val)
+{
+ int ret;
+ u64 __val = 0;
+
+ mutex_lock(&st->lock);
+
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
+ if (ret) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ dev_dbg(st->dev, "Read val, reg:%02X, p:%d sz:%zu\n", reg, page,
+ size);
+
+ switch (size) {
+ case 2:
+ ret = __ltc2947_val_read16(st, reg, &__val);
+ break;
+ case 3:
+ ret = __ltc2947_val_read24(st, reg, &__val);
+ break;
+ case 6:
+ ret = __ltc2947_val_read64(st, reg, &__val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&st->lock);
+
+ if (ret)
+ return ret;
+
+ *val = sign_extend64(__val, (8 * size) - 1);
+
+ dev_dbg(st->dev, "Got s:%lld, u:%016llX\n", *val, __val);
+
+ return 0;
+}
+
+static int __ltc2947_val_write64(const struct ltc2947_data *st, const u8 reg,
+ const u64 val)
+{
+ __be64 __val;
+
+ __val = cpu_to_be64(val << 16);
+ return regmap_bulk_write(st->map, reg, &__val, 6);
+}
+
+static int __ltc2947_val_write16(const struct ltc2947_data *st, const u8 reg,
+ const u16 val)
+{
+ __be16 __val;
+
+ __val = cpu_to_be16(val);
+ return regmap_bulk_write(st->map, reg, &__val, 2);
+}
+
+static int ltc2947_val_write(struct ltc2947_data *st, const u8 reg,
+ const u8 page, const size_t size, const u64 val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ /* set device on correct page */
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
+ if (ret) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ dev_dbg(st->dev, "Write val, r:%02X, p:%d, sz:%zu, val:%016llX\n",
+ reg, page, size, val);
+
+ switch (size) {
+ case 2:
+ ret = __ltc2947_val_write16(st, reg, val);
+ break;
+ case 6:
+ ret = __ltc2947_val_write64(st, reg, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ltc2947_reset_history(struct ltc2947_data *st, const u8 reg_h,
+ const u8 reg_l)
+{
+ int ret;
+ /*
+ * let's reset the tracking register's. Tracking register's have all
+ * 2 bytes size
+ */
+ ret = ltc2947_val_write(st, reg_h, LTC2947_PAGE0, 2, 0x8000U);
+ if (ret)
+ return ret;
+
+ return ltc2947_val_write(st, reg_l, LTC2947_PAGE0, 2, 0x7FFFU);
+}
+
+static int ltc2947_alarm_read(struct ltc2947_data *st, const u8 reg,
+ const u32 mask, long *val)
+{
+ u8 offset = reg - LTC2947_REG_STATUS;
+ /* +1 to include status reg */
+ char alarms[LTC2947_ALERTS_SIZE + 1];
+ int ret = 0;
+
+ memset(alarms, 0, sizeof(alarms));
+
+ mutex_lock(&st->lock);
+
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, LTC2947_PAGE0);
+ if (ret)
+ goto unlock;
+
+ dev_dbg(st->dev, "Read alarm, reg:%02X, mask:%02X\n", reg, mask);
+ /*
+ * As stated in the datasheet, when Threshold and Overflow registers
+ * are used, the status and all alert registers must be read in one
+ * multi-byte transaction.
+ */
+ ret = regmap_bulk_read(st->map, LTC2947_REG_STATUS, alarms,
+ sizeof(alarms));
+ if (ret)
+ goto unlock;
+
+ /* get the alarm */
+ *val = !!(alarms[offset] & mask);
+unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static ssize_t ltc2947_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int ret;
+ s64 val = 0;
+
+ ret = ltc2947_val_read(st, attr->index, LTC2947_PAGE0, 6, &val);
+ if (ret)
+ return ret;
+
+ /* value in microJoule. st->lsb_energy was multiplied by 10E9 */
+ val = div_s64(val * st->lsb_energy, 1000);
+
+ return sprintf(buf, "%lld\n", val);
+}
+
+static int ltc2947_read_temp(struct device *dev, const u32 attr, long *val,
+ const int channel)
+{
+ int ret;
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_MAX, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_MIN, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_max_alarm:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_TEMP_FAN_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_TEMP_MASK, val);
+ case hwmon_temp_min_alarm:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_TEMP_FAN_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_TEMP_MASK, val);
+ case hwmon_temp_max:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_FAN_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ else
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_temp_min:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_FAN_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ else
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ /* in milidegrees celcius, temp is given by: */
+ *val = (__val * 204) + 550;
+
+ return 0;
+}
+
+static int ltc2947_read_power(struct device *dev, const u32 attr, long *val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u32 lsb = 200000; /* in uW */
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_power_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER, LTC2947_PAGE0,
+ 3, &__val);
+ lsb = 50000;
+ break;
+ case hwmon_power_input_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_MAX, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_power_input_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_MIN, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_power_max_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MAX_POWER_MASK, val);
+ case hwmon_power_min_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MIN_POWER_MASK, val);
+ case hwmon_power_max:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_power_min:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read_curr(struct device *dev, const u32 attr, long *val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u8 lsb = 12; /* in mA */
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT,
+ LTC2947_PAGE0, 3, &__val);
+ lsb = 3;
+ break;
+ case hwmon_curr_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ break;
+ case hwmon_curr_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ break;
+ case hwmon_curr_max_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MAX_CURRENT_MASK, val);
+ case hwmon_curr_min_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MIN_CURRENT_MASK, val);
+ case hwmon_curr_max:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_curr_min:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read_in(struct device *dev, const u32 attr, long *val,
+ const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u8 lsb = 2; /* in mV */
+ s64 __val = 0;
+
+ if (channel < 0 || channel > LTC2947_VOLTAGE_DVCC_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for voltage", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_in_input:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_highest:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_lowest:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_max_alarm:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVDVCC,
+ LTC2947_MAX_VOLTAGE_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_VOLTAGE_MASK, val);
+ case hwmon_in_min_alarm:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVDVCC,
+ LTC2947_MIN_VOLTAGE_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_VOLTAGE_MASK, val);
+ case hwmon_in_max:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ }
+ break;
+ case hwmon_in_min:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_read_in(dev, attr, val, channel);
+ case hwmon_curr:
+ return ltc2947_read_curr(dev, attr, val);
+ case hwmon_power:
+ return ltc2947_read_power(dev, attr, val);
+ case hwmon_temp:
+ return ltc2947_read_temp(dev, attr, val, channel);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_temp(struct device *dev, const u32 attr,
+ long val, const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ if (channel < 0 || channel > LTC2947_TEMP_FAN_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for temperature", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_temp_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_TEMP_MAX,
+ LTC2947_REG_TEMP_MIN);
+ case hwmon_temp_max:
+ val = clamp_val(val, TEMP_MIN, TEMP_MAX);
+ if (channel == LTC2947_TEMP_FAN_CHAN) {
+ if (!st->gpio_out)
+ return -ENOTSUPP;
+
+ return ltc2947_val_write(st,
+ LTC2947_REG_TEMP_FAN_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ }
+
+ return ltc2947_val_write(st, LTC2947_REG_TEMP_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ case hwmon_temp_min:
+ val = clamp_val(val, TEMP_MIN, TEMP_MAX);
+ if (channel == LTC2947_TEMP_FAN_CHAN) {
+ if (!st->gpio_out)
+ return -ENOTSUPP;
+
+ return ltc2947_val_write(st,
+ LTC2947_REG_TEMP_FAN_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ }
+
+ return ltc2947_val_write(st, LTC2947_REG_TEMP_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_power(struct device *dev, const u32 attr,
+ long val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_power_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_POWER_MAX,
+ LTC2947_REG_POWER_MIN);
+ case hwmon_power_max:
+ val = clamp_val(val, POWER_MIN, POWER_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_POWER_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 200000));
+ case hwmon_power_min:
+ val = clamp_val(val, POWER_MIN, POWER_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_POWER_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 200000));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_curr(struct device *dev, const u32 attr,
+ long val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_curr_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_CURRENT_MAX,
+ LTC2947_REG_CURRENT_MIN);
+ case hwmon_curr_max:
+ val = clamp_val(val, CURRENT_MIN, CURRENT_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_CURRENT_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 12));
+ case hwmon_curr_min:
+ val = clamp_val(val, CURRENT_MIN, CURRENT_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_CURRENT_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 12));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_in(struct device *dev, const u32 attr, long val,
+ const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ if (channel > LTC2947_VOLTAGE_DVCC_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for voltage", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_in_reset_history:
+ if (val != 1)
+ return -EINVAL;
+
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_reset_history(st, LTC2947_REG_DVCC_MAX,
+ LTC2947_REG_DVCC_MIN);
+
+ return ltc2947_reset_history(st, LTC2947_REG_VOLTAGE_MAX,
+ LTC2947_REG_VOLTAGE_MIN);
+ case hwmon_in_max:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ val = clamp_val(val, VDVCC_MIN, VDVCC_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_DVCC_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 145));
+ }
+
+ val = clamp_val(val, VOLTAGE_MIN, VOLTAGE_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_VOLTAGE_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 2));
+ case hwmon_in_min:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ val = clamp_val(val, VDVCC_MIN, VDVCC_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_DVCC_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 145));
+ }
+
+ val = clamp_val(val, VOLTAGE_MIN, VOLTAGE_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_VOLTAGE_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 2));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_write_in(dev, attr, val, channel);
+ case hwmon_curr:
+ return ltc2947_write_curr(dev, attr, val);
+ case hwmon_power:
+ return ltc2947_write_power(dev, attr, val);
+ case hwmon_temp:
+ return ltc2947_write_temp(dev, attr, val, channel);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ *str = "DVCC";
+ else
+ *str = "VP-VM";
+ return 0;
+ case hwmon_curr:
+ *str = "IP-IM";
+ return 0;
+ case hwmon_temp:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ *str = "TEMPFAN";
+ else
+ *str = "Ambient";
+ return 0;
+ case hwmon_power:
+ *str = "Power";
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_in_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_highest:
+ case hwmon_in_lowest:
+ case hwmon_in_max_alarm:
+ case hwmon_in_min_alarm:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_in_reset_history:
+ return 0200;
+ case hwmon_in_max:
+ case hwmon_in_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_curr_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_highest:
+ case hwmon_curr_lowest:
+ case hwmon_curr_max_alarm:
+ case hwmon_curr_min_alarm:
+ case hwmon_curr_label:
+ return 0444;
+ case hwmon_curr_reset_history:
+ return 0200;
+ case hwmon_curr_max:
+ case hwmon_curr_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_power_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_input_highest:
+ case hwmon_power_input_lowest:
+ case hwmon_power_label:
+ case hwmon_power_max_alarm:
+ case hwmon_power_min_alarm:
+ return 0444;
+ case hwmon_power_reset_history:
+ return 0200;
+ case hwmon_power_max:
+ case hwmon_power_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_temp_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_highest:
+ case hwmon_temp_lowest:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_reset_history:
+ return 0200;
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static umode_t ltc2947_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_in_is_visible(attr);
+ case hwmon_curr:
+ return ltc2947_curr_is_visible(attr);
+ case hwmon_power:
+ return ltc2947_power_is_visible(attr);
+ case hwmon_temp:
+ return ltc2947_temp_is_visible(attr);
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_channel_info *ltc2947_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_RESET_HISTORY |
+ HWMON_I_MIN_ALARM | HWMON_I_MAX_ALARM |
+ HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_RESET_HISTORY |
+ HWMON_I_MIN_ALARM | HWMON_I_MAX_ALARM |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LOWEST | HWMON_C_HIGHEST |
+ HWMON_C_MAX | HWMON_C_MIN | HWMON_C_RESET_HISTORY |
+ HWMON_C_MIN_ALARM | HWMON_C_MAX_ALARM |
+ HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_INPUT_LOWEST |
+ HWMON_P_INPUT_HIGHEST | HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_RESET_HISTORY | HWMON_P_MAX_ALARM |
+ HWMON_P_MIN_ALARM | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LOWEST | HWMON_T_HIGHEST |
+ HWMON_T_MAX | HWMON_T_MIN | HWMON_T_RESET_HISTORY |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_LABEL,
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | HWMON_T_MAX |
+ HWMON_T_MIN | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops ltc2947_hwmon_ops = {
+ .is_visible = ltc2947_is_visible,
+ .read = ltc2947_read,
+ .write = ltc2947_write,
+ .read_string = ltc2947_read_labels,
+};
+
+static const struct hwmon_chip_info ltc2947_chip_info = {
+ .ops = &ltc2947_hwmon_ops,
+ .info = ltc2947_info,
+};
+
+/* energy attributes are 6bytes wide so we need u64 */
+static SENSOR_DEVICE_ATTR(energy1_input, 0444, ltc2947_show_value, NULL,
+ LTC2947_REG_ENERGY1);
+static SENSOR_DEVICE_ATTR(energy2_input, 0444, ltc2947_show_value, NULL,
+ LTC2947_REG_ENERGY2);
+
+static struct attribute *ltc2947_attrs[] = {
+ &sensor_dev_attr_energy1_input.dev_attr.attr,
+ &sensor_dev_attr_energy2_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc2947);
+
+static void ltc2947_clk_disable(void *data)
+{
+ struct clk *extclk = data;
+
+ clk_disable_unprepare(extclk);
+}
+
+static int ltc2947_setup(struct ltc2947_data *st)
+{
+ int ret;
+ struct clk *extclk;
+ u32 dummy, deadband, pol;
+ u32 accum[2];
+
+ /* clear status register by reading it */
+ ret = regmap_read(st->map, LTC2947_REG_STATUS, &dummy);
+ if (ret)
+ return ret;
+ /*
+ * Set max/min for power here since the default values x scale
+ * would overflow on 32bit arch
+ */
+ ret = ltc2947_val_write(st, LTC2947_REG_POWER_THRE_H, LTC2947_PAGE1, 2,
+ POWER_MAX / 200000);
+ if (ret)
+ return ret;
+
+ ret = ltc2947_val_write(st, LTC2947_REG_POWER_THRE_L, LTC2947_PAGE1, 2,
+ POWER_MIN / 200000);
+ if (ret)
+ return ret;
+
+ /* check external clock presence */
+ extclk = devm_clk_get(st->dev, NULL);
+ if (!IS_ERR(extclk)) {
+ unsigned long rate_hz;
+ u8 pre = 0, div, tbctl;
+ u64 aux;
+
+ /* let's calculate and set the right valus in TBCTL */
+ rate_hz = clk_get_rate(extclk);
+ if (rate_hz < LTC2947_CLK_MIN || rate_hz > LTC2947_CLK_MAX) {
+ dev_err(st->dev, "Invalid rate:%lu for external clock",
+ rate_hz);
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(extclk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(st->dev, ltc2947_clk_disable,
+ extclk);
+ if (ret)
+ return ret;
+ /* as in table 1 of the datasheet */
+ if (rate_hz >= LTC2947_CLK_MIN && rate_hz <= 1000000)
+ pre = 0;
+ else if (rate_hz > 1000000 && rate_hz <= 2000000)
+ pre = 1;
+ else if (rate_hz > 2000000 && rate_hz <= 4000000)
+ pre = 2;
+ else if (rate_hz > 4000000 && rate_hz <= 8000000)
+ pre = 3;
+ else if (rate_hz > 8000000 && rate_hz <= 16000000)
+ pre = 4;
+ else if (rate_hz > 16000000 && rate_hz <= LTC2947_CLK_MAX)
+ pre = 5;
+ /*
+ * Div is given by:
+ * floor(fref / (2^PRE * 32768))
+ */
+ div = rate_hz / ((1 << pre) * 32768);
+ tbctl = LTC2947_PRE(pre) | LTC2947_DIV(div);
+
+ ret = regmap_write(st->map, LTC2947_REG_TBCTL, tbctl);
+ if (ret)
+ return ret;
+ /*
+ * The energy lsb is given by (in W*s):
+ * 06416 * (1/fref) * 2^PRE * (DIV + 1)
+ * The value is multiplied by 10E9
+ */
+ aux = (div + 1) * ((1 << pre) * 641600000ULL);
+ st->lsb_energy = DIV_ROUND_CLOSEST_ULL(aux, rate_hz);
+ } else {
+ /* 19.89E-6 * 10E9 */
+ st->lsb_energy = 19890;
+ }
+ ret = of_property_read_u32_array(st->dev->of_node,
+ "adi,accumulator-ctl-pol", accum,
+ ARRAY_SIZE(accum));
+ if (!ret) {
+ u32 accum_reg = LTC2947_ACCUM_POL_1(accum[0]) |
+ LTC2947_ACCUM_POL_2(accum[1]);
+
+ ret = regmap_write(st->map, LTC2947_REG_ACCUM_POL, accum_reg);
+ if (ret)
+ return ret;
+ }
+ ret = of_property_read_u32(st->dev->of_node,
+ "adi,accumulation-deadband-microamp",
+ &deadband);
+ if (!ret) {
+ /* the LSB is the same as the current, so 3mA */
+ ret = regmap_write(st->map, LTC2947_REG_ACCUM_DEADBAND,
+ deadband / (1000 * 3));
+ if (ret)
+ return ret;
+ }
+ /* check gpio cfg */
+ ret = of_property_read_u32(st->dev->of_node, "adi,gpio-out-pol", &pol);
+ if (!ret) {
+ /* setup GPIO as output */
+ u32 gpio_ctl = LTC2947_GPIO_EN(1) | LTC2947_GPIO_FAN_EN(1) |
+ LTC2947_GPIO_FAN_POL(pol);
+
+ st->gpio_out = true;
+ ret = regmap_write(st->map, LTC2947_REG_GPIOSTATCTL, gpio_ctl);
+ if (ret)
+ return ret;
+ }
+ ret = of_property_read_u32_array(st->dev->of_node, "adi,gpio-in-accum",
+ accum, ARRAY_SIZE(accum));
+ if (!ret) {
+ /*
+ * Setup the accum options. The gpioctl is already defined as
+ * input by default.
+ */
+ u32 accum_val = LTC2947_ACCUM_POL_1(accum[0]) |
+ LTC2947_ACCUM_POL_2(accum[1]);
+
+ if (st->gpio_out) {
+ dev_err(st->dev,
+ "Cannot have input gpio config if already configured as output");
+ return -EINVAL;
+ }
+
+ ret = regmap_write(st->map, LTC2947_REG_GPIO_ACCUM, accum_val);
+ if (ret)
+ return ret;
+ }
+
+ /* set continuos mode */
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
+}
+
+int ltc2947_core_probe(struct regmap *map, const char *name)
+{
+ struct ltc2947_data *st;
+ struct device *dev = regmap_get_device(map);
+ struct device *hwmon;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->map = map;
+ st->dev = dev;
+ dev_set_drvdata(dev, st);
+ mutex_init(&st->lock);
+
+ ret = ltc2947_setup(st);
+ if (ret)
+ return ret;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, name, st,
+ &ltc2947_chip_info,
+ ltc2947_groups);
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+EXPORT_SYMBOL_GPL(ltc2947_core_probe);
+
+static int __maybe_unused ltc2947_resume(struct device *dev)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ u32 ctrl = 0;
+ int ret;
+
+ /* dummy read to wake the device */
+ ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
+ if (ret)
+ return ret;
+ /*
+ * Wait for the device. It takes 100ms to wake up so, 10ms extra
+ * should be enough.
+ */
+ msleep(110);
+ ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
+ if (ret)
+ return ret;
+ /* ctrl should be 0 */
+ if (ctrl != 0) {
+ dev_err(st->dev, "Device failed to wake up, ctl:%02X\n", ctrl);
+ return -ETIMEDOUT;
+ }
+
+ /* set continuous mode */
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
+}
+
+static int __maybe_unused ltc2947_suspend(struct device *dev)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_SHUTDOWN_MASK, 1);
+}
+
+SIMPLE_DEV_PM_OPS(ltc2947_pm_ops, ltc2947_suspend, ltc2947_resume);
+EXPORT_SYMBOL_GPL(ltc2947_pm_ops);
+
+const struct of_device_id ltc2947_of_match[] = {
+ { .compatible = "adi,ltc2947" },
+ {}
+};
+EXPORT_SYMBOL_GPL(ltc2947_of_match);
+MODULE_DEVICE_TABLE(of, ltc2947_of_match);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 power and energy monitor core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947-i2c.c b/drivers/hwmon/ltc2947-i2c.c
new file mode 100644
index 000000000000..cf6074b110ae
--- /dev/null
+++ b/drivers/hwmon/ltc2947-i2c.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor over I2C
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ltc2947.h"
+
+static const struct regmap_config ltc2947_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ltc2947_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_i2c(i2c, &ltc2947_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return ltc2947_core_probe(map, i2c->name);
+}
+
+static const struct i2c_device_id ltc2947_id[] = {
+ {"ltc2947", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2947_id);
+
+static struct i2c_driver ltc2947_driver = {
+ .driver = {
+ .name = "ltc2947",
+ .of_match_table = ltc2947_of_match,
+ .pm = &ltc2947_pm_ops,
+ },
+ .probe = ltc2947_probe,
+ .id_table = ltc2947_id,
+};
+module_i2c_driver(ltc2947_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 I2C power and energy monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947-spi.c b/drivers/hwmon/ltc2947-spi.c
new file mode 100644
index 000000000000..c24ca569db1b
--- /dev/null
+++ b/drivers/hwmon/ltc2947-spi.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor over SPI
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ltc2947.h"
+
+static const struct regmap_config ltc2947_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = BIT(0),
+};
+
+static int ltc2947_probe(struct spi_device *spi)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_spi(spi, &ltc2947_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return ltc2947_core_probe(map, spi_get_device_id(spi)->name);
+}
+
+static const struct spi_device_id ltc2947_id[] = {
+ {"ltc2947", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ltc2947_id);
+
+static struct spi_driver ltc2947_driver = {
+ .driver = {
+ .name = "ltc2947",
+ .of_match_table = ltc2947_of_match,
+ .pm = &ltc2947_pm_ops,
+ },
+ .probe = ltc2947_probe,
+ .id_table = ltc2947_id,
+};
+module_spi_driver(ltc2947_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 SPI power and energy monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947.h b/drivers/hwmon/ltc2947.h
new file mode 100644
index 000000000000..5b8ff81a3dba
--- /dev/null
+++ b/drivers/hwmon/ltc2947.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LTC2947_H
+#define _LINUX_LTC2947_H
+
+struct regmap;
+
+extern const struct of_device_id ltc2947_of_match[];
+extern const struct dev_pm_ops ltc2947_pm_ops;
+
+int ltc2947_core_probe(struct regmap *map, const char *name);
+
+#endif
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index d62d69bb7e49..59859979571d 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -36,6 +36,15 @@ config SENSORS_ADM1275
This driver can also be built as a module. If so, the module will
be called adm1275.
+config SENSORS_BEL_PFE
+ tristate "Bel PFE Compatible Power Supplies"
+ help
+ If you say yes here you get hardware monitoring support for BEL
+ PFE1100 and PFE3000 Power Supplies.
+
+ This driver can also be built as a module. If so, the module will
+ be called bel-pfe.
+
config SENSORS_IBM_CFFPS
tristate "IBM Common Form Factor Power Supply"
depends on LEDS_CLASS
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 03bacfcfd660..3f8c1014938b 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_PMBUS) += pmbus_core.o
obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
+obj-$(CONFIG_SENSORS_BEL_PFE) += bel-pfe.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
new file mode 100644
index 000000000000..f236e18f45a5
--- /dev/null
+++ b/drivers/hwmon/pmbus/bel-pfe.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for BEL PFE family power supplies.
+ *
+ * Copyright (c) 2019 Facebook Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+
+#include "pmbus.h"
+
+enum chips {pfe1100, pfe3000};
+
+/*
+ * Disable status check for pfe3000 devices, because some devices report
+ * communication error (invalid command) for VOUT_MODE command (0x20)
+ * although correct VOUT_MODE (0x16) is returned: it leads to incorrect
+ * exponent in linear mode.
+ */
+static struct pmbus_platform_data pfe3000_plat_data = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
+static struct pmbus_driver_info pfe_driver_info[] = {
+ [pfe1100] = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_FAN] = linear,
+
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_POUT |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_FAN12,
+ },
+
+ [pfe3000] = {
+ .pages = 7,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_FAN] = linear,
+
+ /* Page 0: V1. */
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_FAN12 |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_VCAP,
+
+ /* Page 1: Vsb. */
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_POUT,
+
+ /*
+ * Page 2: V1 Ishare.
+ * Page 3: Reserved.
+ * Page 4: V1 Cathode.
+ * Page 5: Vsb Cathode.
+ * Page 6: V1 Sense.
+ */
+ .func[2] = PMBUS_HAVE_VOUT,
+ .func[4] = PMBUS_HAVE_VOUT,
+ .func[5] = PMBUS_HAVE_VOUT,
+ .func[6] = PMBUS_HAVE_VOUT,
+ },
+};
+
+static int pfe_pmbus_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int model;
+
+ model = (int)id->driver_data;
+
+ /*
+ * PFE3000-12-069RA devices may not stay in page 0 during device
+ * probe which leads to probe failure (read status word failed).
+ * So let's set the device to page 0 at the beginning.
+ */
+ if (model == pfe3000) {
+ client->dev.platform_data = &pfe3000_plat_data;
+ i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ }
+
+ return pmbus_do_probe(client, id, &pfe_driver_info[model]);
+}
+
+static const struct i2c_device_id pfe_device_id[] = {
+ {"pfe1100", pfe1100},
+ {"pfe3000", pfe3000},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pfe_device_id);
+
+static struct i2c_driver pfe_pmbus_driver = {
+ .driver = {
+ .name = "bel-pfe",
+ },
+ .probe = pfe_pmbus_probe,
+ .remove = pmbus_do_remove,
+ .id_table = pfe_device_id,
+};
+
+module_i2c_driver(pfe_pmbus_driver);
+
+MODULE_AUTHOR("Tao Ren <rentao.bupt@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for BEL PFE Family Power Supplies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index d44745e498e7..d359b76bcb36 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -3,6 +3,7 @@
* Copyright 2017 IBM Corp.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/device.h>
@@ -29,6 +30,10 @@
#define CFFPS_INPUT_HISTORY_CMD 0xD6
#define CFFPS_INPUT_HISTORY_SIZE 100
+#define CFFPS_CCIN_VERSION GENMASK(15, 8)
+#define CFFPS_CCIN_VERSION_1 0x2b
+#define CFFPS_CCIN_VERSION_2 0x2e
+
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
#define CFFPS_MFR_THERMAL_FAULT BIT(1)
@@ -39,9 +44,13 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+/*
+ * LED off state actually relinquishes LED control to PSU firmware, so it can
+ * turn on the LED for faults.
+ */
+#define CFFPS_LED_OFF 0
#define CFFPS_LED_BLINK BIT(0)
#define CFFPS_LED_ON BIT(1)
-#define CFFPS_LED_OFF BIT(2)
#define CFFPS_BLINK_RATE_MS 250
enum {
@@ -54,7 +63,7 @@ enum {
CFFPS_DEBUGFS_NUM_ENTRIES
};
-enum versions { cffps1, cffps2 };
+enum versions { cffps1, cffps2, cffps_unknown };
struct ibm_cffps_input_history {
struct mutex update_lock;
@@ -292,28 +301,38 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
return rc;
}
-static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
+static int ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
{
int rc;
+ u8 next_led_state;
struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
if (brightness == LED_OFF) {
- psu->led_state = CFFPS_LED_OFF;
+ next_led_state = CFFPS_LED_OFF;
} else {
brightness = LED_FULL;
+
if (psu->led_state != CFFPS_LED_BLINK)
- psu->led_state = CFFPS_LED_ON;
+ next_led_state = CFFPS_LED_ON;
+ else
+ next_led_state = CFFPS_LED_BLINK;
}
+ dev_dbg(&psu->client->dev, "LED brightness set: %d. Command: %d.\n",
+ brightness, next_led_state);
+
pmbus_set_page(psu->client, 0);
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
- psu->led_state);
+ next_led_state);
if (rc < 0)
- return;
+ return rc;
+ psu->led_state = next_led_state;
led_cdev->brightness = brightness;
+
+ return 0;
}
static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
@@ -323,10 +342,7 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
int rc;
struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
- psu->led_state = CFFPS_LED_BLINK;
-
- if (led_cdev->brightness == LED_OFF)
- return 0;
+ dev_dbg(&psu->client->dev, "LED blink set.\n");
pmbus_set_page(psu->client, 0);
@@ -335,6 +351,8 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
if (rc < 0)
return rc;
+ psu->led_state = CFFPS_LED_BLINK;
+ led_cdev->brightness = LED_FULL;
*delay_on = CFFPS_BLINK_RATE_MS;
*delay_off = CFFPS_BLINK_RATE_MS;
@@ -351,7 +369,7 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
client->addr);
psu->led.name = psu->led_name;
psu->led.max_brightness = LED_FULL;
- psu->led.brightness_set = ibm_cffps_led_brightness_set;
+ psu->led.brightness_set_blocking = ibm_cffps_led_brightness_set;
psu->led.blink_set = ibm_cffps_led_blink_set;
rc = devm_led_classdev_register(dev, &psu->led);
@@ -395,7 +413,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int i, rc;
- enum versions vs;
+ enum versions vs = cffps_unknown;
struct dentry *debugfs;
struct dentry *ibm_cffps_dir;
struct ibm_cffps *psu;
@@ -405,8 +423,27 @@ static int ibm_cffps_probe(struct i2c_client *client,
vs = (enum versions)md;
else if (id)
vs = (enum versions)id->driver_data;
- else
- vs = cffps1;
+
+ if (vs == cffps_unknown) {
+ u16 ccin_version = CFFPS_CCIN_VERSION_1;
+ int ccin = i2c_smbus_read_word_swapped(client, CFFPS_CCIN_CMD);
+
+ if (ccin > 0)
+ ccin_version = FIELD_GET(CFFPS_CCIN_VERSION, ccin);
+
+ switch (ccin_version) {
+ default:
+ case CFFPS_CCIN_VERSION_1:
+ vs = cffps1;
+ break;
+ case CFFPS_CCIN_VERSION_2:
+ vs = cffps2;
+ break;
+ }
+
+ /* Set the client name to include the version number. */
+ snprintf(client->name, I2C_NAME_SIZE, "cffps%d", vs + 1);
+ }
client->dev.platform_data = &ibm_cffps_pdata;
rc = pmbus_do_probe(client, id, &ibm_cffps_info[vs]);
@@ -465,6 +502,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
static const struct i2c_device_id ibm_cffps_id[] = {
{ "ibm_cffps1", cffps1 },
{ "ibm_cffps2", cffps2 },
+ { "ibm_cffps", cffps_unknown },
{}
};
MODULE_DEVICE_TABLE(i2c, ibm_cffps_id);
@@ -478,6 +516,10 @@ static const struct of_device_id ibm_cffps_of_match[] = {
.compatible = "ibm,cffps2",
.data = (void *)cffps2
},
+ {
+ .compatible = "ibm,cffps",
+ .data = (void *)cffps_unknown
+ },
{}
};
MODULE_DEVICE_TABLE(of, ibm_cffps_of_match);
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index a94e35cff3e5..83a4fab151d2 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -127,7 +127,8 @@ static struct tmp421_data *tmp421_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
+ if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
+ !data->valid) {
data->config = i2c_smbus_read_byte_data(client,
TMP421_CONFIG_REG_1);
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
new file mode 100644
index 000000000000..df66e0bc1253
--- /dev/null
+++ b/drivers/hwmon/tmp513.c
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Texas Instruments TMP512, TMP513 power monitor chips
+ *
+ * TMP513:
+ * Thermal/Power Management with Triple Remote and
+ * Local Temperature Sensor and Current Shunt Monitor
+ * Datasheet: http://www.ti.com/lit/gpn/tmp513
+ *
+ * TMP512:
+ * Thermal/Power Management with Dual Remote
+ * and Local Temperature Sensor and Current Shunt Monitor
+ * Datasheet: http://www.ti.com/lit/gpn/tmp512
+ *
+ * Copyright (C) 2019 Eric Tremblay <etremblay@distech-controls.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/util_macros.h>
+
+// Common register definition
+#define TMP51X_SHUNT_CONFIG 0x00
+#define TMP51X_TEMP_CONFIG 0x01
+#define TMP51X_STATUS 0x02
+#define TMP51X_SMBUS_ALERT 0x03
+#define TMP51X_SHUNT_CURRENT_RESULT 0x04
+#define TMP51X_BUS_VOLTAGE_RESULT 0x05
+#define TMP51X_POWER_RESULT 0x06
+#define TMP51X_BUS_CURRENT_RESULT 0x07
+#define TMP51X_LOCAL_TEMP_RESULT 0x08
+#define TMP51X_REMOTE_TEMP_RESULT_1 0x09
+#define TMP51X_REMOTE_TEMP_RESULT_2 0x0A
+#define TMP51X_SHUNT_CURRENT_H_LIMIT 0x0C
+#define TMP51X_SHUNT_CURRENT_L_LIMIT 0x0D
+#define TMP51X_BUS_VOLTAGE_H_LIMIT 0x0E
+#define TMP51X_BUS_VOLTAGE_L_LIMIT 0x0F
+#define TMP51X_POWER_LIMIT 0x10
+#define TMP51X_LOCAL_TEMP_LIMIT 0x11
+#define TMP51X_REMOTE_TEMP_LIMIT_1 0x12
+#define TMP51X_REMOTE_TEMP_LIMIT_2 0x13
+#define TMP51X_SHUNT_CALIBRATION 0x15
+#define TMP51X_N_FACTOR_AND_HYST_1 0x16
+#define TMP51X_N_FACTOR_2 0x17
+#define TMP51X_MAN_ID_REG 0xFE
+#define TMP51X_DEVICE_ID_REG 0xFF
+
+// TMP513 specific register definition
+#define TMP513_REMOTE_TEMP_RESULT_3 0x0B
+#define TMP513_REMOTE_TEMP_LIMIT_3 0x14
+#define TMP513_N_FACTOR_3 0x18
+
+// Common attrs, and NULL
+#define TMP51X_MANUFACTURER_ID 0x55FF
+
+#define TMP512_DEVICE_ID 0x22FF
+#define TMP513_DEVICE_ID 0x23FF
+
+// Default config
+#define TMP51X_SHUNT_CONFIG_DEFAULT 0x399F
+#define TMP51X_SHUNT_VALUE_DEFAULT 1000
+#define TMP51X_VBUS_RANGE_DEFAULT TMP51X_VBUS_RANGE_32V
+#define TMP51X_PGA_DEFAULT 8
+#define TMP51X_MAX_REGISTER_ADDR 0xFF
+
+#define TMP512_TEMP_CONFIG_DEFAULT 0xBF80
+#define TMP513_TEMP_CONFIG_DEFAULT 0xFF80
+
+// Mask and shift
+#define CURRENT_SENSE_VOLTAGE_320_MASK 0x1800
+#define CURRENT_SENSE_VOLTAGE_160_MASK 0x1000
+#define CURRENT_SENSE_VOLTAGE_80_MASK 0x0800
+#define CURRENT_SENSE_VOLTAGE_40_MASK 0
+
+#define TMP51X_BUS_VOLTAGE_MASK 0x2000
+#define TMP51X_NFACTOR_MASK 0xFF00
+#define TMP51X_HYST_MASK 0x00FF
+
+#define TMP51X_BUS_VOLTAGE_SHIFT 3
+#define TMP51X_TEMP_SHIFT 3
+
+// Alarms
+#define TMP51X_SHUNT_CURRENT_H_LIMIT_POS 15
+#define TMP51X_SHUNT_CURRENT_L_LIMIT_POS 14
+#define TMP51X_BUS_VOLTAGE_H_LIMIT_POS 13
+#define TMP51X_BUS_VOLTAGE_L_LIMIT_POS 12
+#define TMP51X_POWER_LIMIT_POS 11
+#define TMP51X_LOCAL_TEMP_LIMIT_POS 10
+#define TMP51X_REMOTE_TEMP_LIMIT_1_POS 9
+#define TMP51X_REMOTE_TEMP_LIMIT_2_POS 8
+#define TMP513_REMOTE_TEMP_LIMIT_3_POS 7
+
+#define TMP51X_VBUS_RANGE_32V 32000000
+#define TMP51X_VBUS_RANGE_16V 16000000
+
+// Max and Min value
+#define MAX_BUS_VOLTAGE_32_LIMIT 32764
+#define MAX_BUS_VOLTAGE_16_LIMIT 16382
+
+// Max possible value is -256 to +256 but datasheet indicated -40 to 125.
+#define MAX_TEMP_LIMIT 125000
+#define MIN_TEMP_LIMIT -40000
+
+#define MAX_TEMP_HYST 127500
+
+static const u8 TMP51X_TEMP_INPUT[4] = {
+ TMP51X_LOCAL_TEMP_RESULT,
+ TMP51X_REMOTE_TEMP_RESULT_1,
+ TMP51X_REMOTE_TEMP_RESULT_2,
+ TMP513_REMOTE_TEMP_RESULT_3
+};
+
+static const u8 TMP51X_TEMP_CRIT[4] = {
+ TMP51X_LOCAL_TEMP_LIMIT,
+ TMP51X_REMOTE_TEMP_LIMIT_1,
+ TMP51X_REMOTE_TEMP_LIMIT_2,
+ TMP513_REMOTE_TEMP_LIMIT_3
+};
+
+static const u8 TMP51X_TEMP_CRIT_ALARM[4] = {
+ TMP51X_LOCAL_TEMP_LIMIT_POS,
+ TMP51X_REMOTE_TEMP_LIMIT_1_POS,
+ TMP51X_REMOTE_TEMP_LIMIT_2_POS,
+ TMP513_REMOTE_TEMP_LIMIT_3_POS
+};
+
+static const u8 TMP51X_TEMP_CRIT_HYST[4] = {
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1
+};
+
+static const u8 TMP51X_CURR_INPUT[2] = {
+ TMP51X_SHUNT_CURRENT_RESULT,
+ TMP51X_BUS_CURRENT_RESULT
+};
+
+static struct regmap_config tmp51x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP51X_MAX_REGISTER_ADDR,
+};
+
+enum tmp51x_ids {
+ tmp512, tmp513
+};
+
+struct tmp51x_data {
+ u16 shunt_config;
+ u16 pga_gain;
+ u32 vbus_range_uvolt;
+
+ u16 temp_config;
+ u32 nfactor[3];
+
+ u32 shunt_uohms;
+
+ u32 curr_lsb_ua;
+ u32 pwr_lsb_uw;
+
+ enum tmp51x_ids id;
+ struct regmap *regmap;
+};
+
+// Set the shift based on the gain 8=4, 4=3, 2=2, 1=1
+static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data)
+{
+ return 5 - ffs(data->pga_gain);
+}
+
+static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
+ unsigned int regval, long *val)
+{
+ switch (reg) {
+ case TMP51X_STATUS:
+ *val = (regval >> pos) & 1;
+ break;
+ case TMP51X_SHUNT_CURRENT_RESULT:
+ case TMP51X_SHUNT_CURRENT_H_LIMIT:
+ case TMP51X_SHUNT_CURRENT_L_LIMIT:
+ /*
+ * The valus is read in voltage in the chip but reported as
+ * current to the user.
+ * 2's compliment number shifted by one to four depending
+ * on the pga gain setting. 1lsb = 10uV
+ */
+ *val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
+ *val = DIV_ROUND_CLOSEST(*val * 10000, data->shunt_uohms);
+ break;
+ case TMP51X_BUS_VOLTAGE_RESULT:
+ case TMP51X_BUS_VOLTAGE_H_LIMIT:
+ case TMP51X_BUS_VOLTAGE_L_LIMIT:
+ // 1lsb = 4mV
+ *val = (regval >> TMP51X_BUS_VOLTAGE_SHIFT) * 4;
+ break;
+ case TMP51X_POWER_RESULT:
+ case TMP51X_POWER_LIMIT:
+ // Power = (current * BusVoltage) / 5000
+ *val = regval * data->pwr_lsb_uw;
+ break;
+ case TMP51X_BUS_CURRENT_RESULT:
+ // Current = (ShuntVoltage * CalibrationRegister) / 4096
+ *val = sign_extend32(regval, 16) * data->curr_lsb_ua;
+ *val = DIV_ROUND_CLOSEST(*val, 1000);
+ break;
+ case TMP51X_LOCAL_TEMP_RESULT:
+ case TMP51X_REMOTE_TEMP_RESULT_1:
+ case TMP51X_REMOTE_TEMP_RESULT_2:
+ case TMP513_REMOTE_TEMP_RESULT_3:
+ case TMP51X_LOCAL_TEMP_LIMIT:
+ case TMP51X_REMOTE_TEMP_LIMIT_1:
+ case TMP51X_REMOTE_TEMP_LIMIT_2:
+ case TMP513_REMOTE_TEMP_LIMIT_3:
+ // 1lsb = 0.0625 degrees centigrade
+ *val = sign_extend32(regval, 16) >> TMP51X_TEMP_SHIFT;
+ *val = DIV_ROUND_CLOSEST(*val * 625, 10);
+ break;
+ case TMP51X_N_FACTOR_AND_HYST_1:
+ // 1lsb = 0.5 degrees centigrade
+ *val = (regval & TMP51X_HYST_MASK) * 500;
+ break;
+ default:
+ // Programmer goofed
+ WARN_ON_ONCE(1);
+ *val = 0;
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
+{
+ int regval, max_val;
+ u32 mask = 0;
+
+ switch (reg) {
+ case TMP51X_SHUNT_CURRENT_H_LIMIT:
+ case TMP51X_SHUNT_CURRENT_L_LIMIT:
+ /*
+ * The user enter current value and we convert it to
+ * voltage. 1lsb = 10uV
+ */
+ val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10000);
+ max_val = U16_MAX >> tmp51x_get_pga_shift(data);
+ regval = clamp_val(val, -max_val, max_val);
+ break;
+ case TMP51X_BUS_VOLTAGE_H_LIMIT:
+ case TMP51X_BUS_VOLTAGE_L_LIMIT:
+ // 1lsb = 4mV
+ max_val = (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_32V) ?
+ MAX_BUS_VOLTAGE_32_LIMIT : MAX_BUS_VOLTAGE_16_LIMIT;
+
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 4), 0, max_val);
+ regval = val << TMP51X_BUS_VOLTAGE_SHIFT;
+ break;
+ case TMP51X_POWER_LIMIT:
+ regval = clamp_val(DIV_ROUND_CLOSEST(val, data->pwr_lsb_uw), 0,
+ U16_MAX);
+ break;
+ case TMP51X_LOCAL_TEMP_LIMIT:
+ case TMP51X_REMOTE_TEMP_LIMIT_1:
+ case TMP51X_REMOTE_TEMP_LIMIT_2:
+ case TMP513_REMOTE_TEMP_LIMIT_3:
+ // 1lsb = 0.0625 degrees centigrade
+ val = clamp_val(val, MIN_TEMP_LIMIT, MAX_TEMP_LIMIT);
+ regval = DIV_ROUND_CLOSEST(val * 10, 625) << TMP51X_TEMP_SHIFT;
+ break;
+ case TMP51X_N_FACTOR_AND_HYST_1:
+ // 1lsb = 0.5 degrees centigrade
+ val = clamp_val(val, 0, MAX_TEMP_HYST);
+ regval = DIV_ROUND_CLOSEST(val, 500);
+ mask = TMP51X_HYST_MASK;
+ break;
+ default:
+ // Programmer goofed
+ WARN_ON_ONCE(1);
+ return -EOPNOTSUPP;
+ }
+
+ if (mask == 0)
+ return regmap_write(data->regmap, reg, regval);
+ else
+ return regmap_update_bits(data->regmap, reg, mask, regval);
+}
+
+static u8 tmp51x_get_reg(enum hwmon_sensor_types type, u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return TMP51X_TEMP_INPUT[channel];
+ case hwmon_temp_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_temp_crit:
+ return TMP51X_TEMP_CRIT[channel];
+ case hwmon_temp_crit_hyst:
+ return TMP51X_TEMP_CRIT_HYST[channel];
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return TMP51X_BUS_VOLTAGE_RESULT;
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_in_lcrit:
+ return TMP51X_BUS_VOLTAGE_L_LIMIT;
+ case hwmon_in_crit:
+ return TMP51X_BUS_VOLTAGE_H_LIMIT;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ return TMP51X_CURR_INPUT[channel];
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_curr_lcrit:
+ return TMP51X_SHUNT_CURRENT_L_LIMIT;
+ case hwmon_curr_crit:
+ return TMP51X_SHUNT_CURRENT_H_LIMIT;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ return TMP51X_POWER_RESULT;
+ case hwmon_power_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_power_crit:
+ return TMP51X_POWER_LIMIT;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static u8 tmp51x_get_status_pos(enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_crit_alarm:
+ return TMP51X_TEMP_CRIT_ALARM[channel];
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_lcrit_alarm:
+ return TMP51X_BUS_VOLTAGE_L_LIMIT_POS;
+ case hwmon_in_crit_alarm:
+ return TMP51X_BUS_VOLTAGE_H_LIMIT_POS;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_lcrit_alarm:
+ return TMP51X_SHUNT_CURRENT_L_LIMIT_POS;
+ case hwmon_curr_crit_alarm:
+ return TMP51X_SHUNT_CURRENT_H_LIMIT_POS;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_crit_alarm:
+ return TMP51X_POWER_LIMIT_POS;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tmp51x_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct tmp51x_data *data = dev_get_drvdata(dev);
+ int ret;
+ u32 regval;
+ u8 pos = 0, reg = 0;
+
+ reg = tmp51x_get_reg(type, attr, channel);
+ if (reg == 0)
+ return -EOPNOTSUPP;
+
+ if (reg == TMP51X_STATUS)
+ pos = tmp51x_get_status_pos(type, attr, channel);
+
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ return tmp51x_get_value(data, reg, pos, regval, val);
+}
+
+static int tmp51x_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ u8 reg = 0;
+
+ reg = tmp51x_get_reg(type, attr, channel);
+ if (reg == 0)
+ return -EOPNOTSUPP;
+
+ return tmp51x_set_value(dev_get_drvdata(dev), reg, val);
+}
+
+static umode_t tmp51x_is_visible(const void *_data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct tmp51x_data *data = _data;
+
+ switch (type) {
+ case hwmon_temp:
+ if (data->id == tmp512 && channel == 4)
+ return 0;
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_crit:
+ return 0644;
+ case hwmon_temp_crit_hyst:
+ if (channel == 0)
+ return 0644;
+ return 0444;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ return 0444;
+ case hwmon_in_lcrit:
+ case hwmon_in_crit:
+ return 0644;
+ }
+ break;
+ case hwmon_curr:
+ if (!data->shunt_uohms)
+ return 0;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ return 0444;
+ case hwmon_curr_lcrit:
+ case hwmon_curr_crit:
+ return 0644;
+ }
+ break;
+ case hwmon_power:
+ if (!data->shunt_uohms)
+ return 0;
+
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_crit_alarm:
+ return 0444;
+ case hwmon_power_crit:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *tmp51x_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LCRIT | HWMON_I_LCRIT_ALARM |
+ HWMON_I_CRIT | HWMON_I_CRIT_ALARM),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LCRIT | HWMON_C_LCRIT_ALARM |
+ HWMON_C_CRIT | HWMON_C_CRIT_ALARM,
+ HWMON_C_INPUT),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops tmp51x_hwmon_ops = {
+ .is_visible = tmp51x_is_visible,
+ .read = tmp51x_read,
+ .write = tmp51x_write,
+};
+
+static const struct hwmon_chip_info tmp51x_chip_info = {
+ .ops = &tmp51x_hwmon_ops,
+ .info = tmp51x_info,
+};
+
+/*
+ * Calibrate the tmp51x following the datasheet method
+ */
+static int tmp51x_calibrate(struct tmp51x_data *data)
+{
+ int vshunt_max = data->pga_gain * 40;
+ u64 max_curr_ma;
+ u32 div;
+
+ /*
+ * If shunt_uohms is equal to 0, the calibration should be set to 0.
+ * The consequence will be that the current and power measurement engine
+ * of the sensor will not work. Temperature and voltage sensing will
+ * continue to work.
+ */
+ if (data->shunt_uohms == 0)
+ return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION, 0);
+
+ max_curr_ma = DIV_ROUND_CLOSEST_ULL(vshunt_max * 1000 * 1000,
+ data->shunt_uohms);
+
+ /*
+ * Calculate the minimal bit resolution for the current and the power.
+ * Those values will be used during register interpretation.
+ */
+ data->curr_lsb_ua = DIV_ROUND_CLOSEST_ULL(max_curr_ma * 1000, 32767);
+ data->pwr_lsb_uw = 20 * data->curr_lsb_ua;
+
+ div = DIV_ROUND_CLOSEST_ULL(data->curr_lsb_ua * data->shunt_uohms,
+ 1000 * 1000);
+
+ return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION,
+ DIV_ROUND_CLOSEST(40960, div));
+}
+
+/*
+ * Initialize the configuration and calibration registers.
+ */
+static int tmp51x_init(struct tmp51x_data *data)
+{
+ unsigned int regval;
+ int ret = regmap_write(data->regmap, TMP51X_SHUNT_CONFIG,
+ data->shunt_config);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, TMP51X_TEMP_CONFIG, data->temp_config);
+ if (ret < 0)
+ return ret;
+
+ // nFactor configuration
+ ret = regmap_update_bits(data->regmap, TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_NFACTOR_MASK, data->nfactor[0] << 8);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, TMP51X_N_FACTOR_2,
+ data->nfactor[1] << 8);
+ if (ret < 0)
+ return ret;
+
+ if (data->id == tmp513) {
+ ret = regmap_write(data->regmap, TMP513_N_FACTOR_3,
+ data->nfactor[2] << 8);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = tmp51x_calibrate(data);
+ if (ret < 0)
+ return ret;
+
+ // Read the status register before using as the datasheet propose
+ return regmap_read(data->regmap, TMP51X_STATUS, &regval);
+}
+
+static const struct i2c_device_id tmp51x_id[] = {
+ { "tmp512", tmp512 },
+ { "tmp513", tmp513 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp51x_id);
+
+static const struct of_device_id tmp51x_of_match[] = {
+ {
+ .compatible = "ti,tmp512",
+ .data = (void *)tmp512
+ },
+ {
+ .compatible = "ti,tmp513",
+ .data = (void *)tmp513
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tmp51x_of_match);
+
+static int tmp51x_vbus_range_to_reg(struct device *dev,
+ struct tmp51x_data *data)
+{
+ if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_32V) {
+ data->shunt_config |= TMP51X_BUS_VOLTAGE_MASK;
+ } else if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_16V) {
+ data->shunt_config &= ~TMP51X_BUS_VOLTAGE_MASK;
+ } else {
+ dev_err(dev, "ti,bus-range-microvolt is invalid: %u\n",
+ data->vbus_range_uvolt);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tmp51x_pga_gain_to_reg(struct device *dev, struct tmp51x_data *data)
+{
+ if (data->pga_gain == 8) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_320_MASK;
+ } else if (data->pga_gain == 4) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_160_MASK;
+ } else if (data->pga_gain == 2) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_80_MASK;
+ } else if (data->pga_gain == 1) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_40_MASK;
+ } else {
+ dev_err(dev, "ti,pga-gain is invalid: %u\n", data->pga_gain);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tmp51x_read_properties(struct device *dev, struct tmp51x_data *data)
+{
+ int ret;
+ u32 nfactor[3];
+ u32 val;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms", &val);
+ data->shunt_uohms = (ret >= 0) ? val : TMP51X_SHUNT_VALUE_DEFAULT;
+
+ ret = device_property_read_u32(dev, "ti,bus-range-microvolt", &val);
+ data->vbus_range_uvolt = (ret >= 0) ? val : TMP51X_VBUS_RANGE_DEFAULT;
+ ret = tmp51x_vbus_range_to_reg(dev, data);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32(dev, "ti,pga-gain", &val);
+ data->pga_gain = (ret >= 0) ? val : TMP51X_PGA_DEFAULT;
+ ret = tmp51x_pga_gain_to_reg(dev, data);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32_array(dev, "ti,nfactor", nfactor,
+ (data->id == tmp513) ? 3 : 2);
+ if (ret >= 0)
+ memcpy(data->nfactor, nfactor, (data->id == tmp513) ? 3 : 2);
+
+ // Check if shunt value is compatible with pga-gain
+ if (data->shunt_uohms > data->pga_gain * 40 * 1000 * 1000) {
+ dev_err(dev, "shunt-resistor: %u too big for pga_gain: %u\n",
+ data->shunt_uohms, data->pga_gain);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void tmp51x_use_default(struct tmp51x_data *data)
+{
+ data->vbus_range_uvolt = TMP51X_VBUS_RANGE_DEFAULT;
+ data->pga_gain = TMP51X_PGA_DEFAULT;
+ data->shunt_uohms = TMP51X_SHUNT_VALUE_DEFAULT;
+}
+
+static int tmp51x_configure(struct device *dev, struct tmp51x_data *data)
+{
+ data->shunt_config = TMP51X_SHUNT_CONFIG_DEFAULT;
+ data->temp_config = (data->id == tmp513) ?
+ TMP513_TEMP_CONFIG_DEFAULT : TMP512_TEMP_CONFIG_DEFAULT;
+
+ if (dev->of_node)
+ return tmp51x_read_properties(dev, data);
+
+ tmp51x_use_default(data);
+
+ return 0;
+}
+
+static int tmp51x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tmp51x_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (client->dev.of_node)
+ data->id = (enum tmp51x_ids)device_get_match_data(&client->dev);
+ else
+ data->id = id->driver_data;
+
+ ret = tmp51x_configure(dev, data);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return ret;
+ }
+
+ data->regmap = devm_regmap_init_i2c(client, &tmp51x_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = tmp51x_init(data);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &tmp51x_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_dbg(dev, "power monitor %s\n", id->name);
+
+ return 0;
+}
+
+static struct i2c_driver tmp51x_driver = {
+ .driver = {
+ .name = "tmp51x",
+ .of_match_table = of_match_ptr(tmp51x_of_match),
+ },
+ .probe = tmp51x_probe,
+ .id_table = tmp51x_id,
+};
+
+module_i2c_driver(tmp51x_driver);
+
+MODULE_AUTHOR("Eric Tremblay <etremblay@distechcontrols.com>");
+MODULE_DESCRIPTION("tmp51x driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 9df48b70c70c..3f59f2a1a5e3 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1458,6 +1458,7 @@ static const struct file_operations watchdog_fops = {
.release = watchdog_close,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/*
@@ -2096,7 +2097,7 @@ END:
static u8 w83793_read_value(struct i2c_client *client, u16 reg)
{
struct w83793_data *data = i2c_get_clientdata(client);
- u8 res = 0xff;
+ u8 res;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 8862445aa858..fd5f5c5a5244 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -92,8 +92,8 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
int ret;
- BUG_ON(!hwlock);
- BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+ if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
+ return -EINVAL;
/*
* This spin_lock{_irq, _irqsave} serves three purposes:
@@ -264,8 +264,8 @@ EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
*/
void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
- BUG_ON(!hwlock);
- BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+ if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
+ return;
/*
* We must make sure that memory operations (both reads and writes),
@@ -657,13 +657,15 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
/* notify PM core that power is now needed */
ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
+ if (ret < 0 && ret != -EACCES) {
dev_err(dev, "%s: can't power on device\n", __func__);
pm_runtime_put_noidle(dev);
module_put(dev->driver->owner);
return ret;
}
+ ret = 0;
+
/* mark hwspinlock as used, should not fail */
tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
@@ -820,9 +822,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
}
/* notify the underlying device that power is not needed */
- ret = pm_runtime_put(dev);
- if (ret < 0)
- goto out;
+ pm_runtime_put(dev);
/* mark this hwspinlock as available */
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
diff --git a/drivers/hwspinlock/sprd_hwspinlock.c b/drivers/hwspinlock/sprd_hwspinlock.c
index dc42bf51f3e6..36dc8038bbb4 100644
--- a/drivers/hwspinlock/sprd_hwspinlock.c
+++ b/drivers/hwspinlock/sprd_hwspinlock.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "hwspinlock_internal.h"
@@ -79,11 +78,17 @@ static const struct hwspinlock_ops sprd_hwspinlock_ops = {
.relax = sprd_hwspinlock_relax,
};
+static void sprd_hwspinlock_disable(void *data)
+{
+ struct sprd_hwspinlock_dev *sprd_hwlock = data;
+
+ clk_disable_unprepare(sprd_hwlock->clk);
+}
+
static int sprd_hwspinlock_probe(struct platform_device *pdev)
{
struct sprd_hwspinlock_dev *sprd_hwlock;
struct hwspinlock *lock;
- struct resource *res;
int i, ret;
if (!pdev->dev.of_node)
@@ -96,8 +101,7 @@ static int sprd_hwspinlock_probe(struct platform_device *pdev)
if (!sprd_hwlock)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sprd_hwlock->base = devm_ioremap_resource(&pdev->dev, res);
+ sprd_hwlock->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sprd_hwlock->base))
return PTR_ERR(sprd_hwlock->base);
@@ -107,7 +111,17 @@ static int sprd_hwspinlock_probe(struct platform_device *pdev)
return PTR_ERR(sprd_hwlock->clk);
}
- clk_prepare_enable(sprd_hwlock->clk);
+ ret = clk_prepare_enable(sprd_hwlock->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&pdev->dev, sprd_hwspinlock_disable,
+ sprd_hwlock);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to add hwspinlock disable action\n");
+ return ret;
+ }
/* set the hwspinlock to record user id to identify subsystems */
writel(HWSPINLOCK_USER_BITS, sprd_hwlock->base + HWSPINLOCK_RECCTRL);
@@ -118,27 +132,10 @@ static int sprd_hwspinlock_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, sprd_hwlock);
- pm_runtime_enable(&pdev->dev);
- ret = hwspin_lock_register(&sprd_hwlock->bank, &pdev->dev,
- &sprd_hwspinlock_ops, 0, SPRD_HWLOCKS_NUM);
- if (ret) {
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(sprd_hwlock->clk);
- return ret;
- }
-
- return 0;
-}
-
-static int sprd_hwspinlock_remove(struct platform_device *pdev)
-{
- struct sprd_hwspinlock_dev *sprd_hwlock = platform_get_drvdata(pdev);
-
- hwspin_lock_unregister(&sprd_hwlock->bank);
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(sprd_hwlock->clk);
- return 0;
+ return devm_hwspin_lock_register(&pdev->dev, &sprd_hwlock->bank,
+ &sprd_hwspinlock_ops, 0,
+ SPRD_HWLOCKS_NUM);
}
static const struct of_device_id sprd_hwspinlock_of_match[] = {
@@ -149,7 +146,6 @@ MODULE_DEVICE_TABLE(of, sprd_hwspinlock_of_match);
static struct platform_driver sprd_hwspinlock_driver = {
.probe = sprd_hwspinlock_probe,
- .remove = sprd_hwspinlock_remove,
.driver = {
.name = "sprd_hwspinlock",
.of_match_table = of_match_ptr(sprd_hwspinlock_of_match),
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 572ca79d77e8..67845c0c9701 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/hwspinlock.h>
@@ -88,21 +87,16 @@ static int u8500_hsem_probe(struct platform_device *pdev)
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
struct hwspinlock_device *bank;
struct hwspinlock *hwlock;
- struct resource *res;
void __iomem *io_base;
- int i, ret, num_locks = U8500_MAX_SEMAPHORE;
+ int i, num_locks = U8500_MAX_SEMAPHORE;
ulong val;
if (!pdata)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- io_base = ioremap(res->start, resource_size(res));
- if (!io_base)
- return -ENOMEM;
+ io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
/* make sure protocol 1 is selected */
val = readl(io_base + HSEM_CTRL_REG);
@@ -111,54 +105,29 @@ static int u8500_hsem_probe(struct platform_device *pdev)
/* clear all interrupts */
writel(0xFFFF, io_base + HSEM_ICRALL);
- bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
- if (!bank) {
- ret = -ENOMEM;
- goto iounmap_base;
- }
+ bank = devm_kzalloc(&pdev->dev, struct_size(bank, lock, num_locks),
+ GFP_KERNEL);
+ if (!bank)
+ return -ENOMEM;
platform_set_drvdata(pdev, bank);
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
hwlock->priv = io_base + HSEM_REGISTER_OFFSET + sizeof(u32) * i;
- /* no pm needed for HSem but required to comply with hwspilock core */
- pm_runtime_enable(&pdev->dev);
-
- ret = hwspin_lock_register(bank, &pdev->dev, &u8500_hwspinlock_ops,
- pdata->base_id, num_locks);
- if (ret)
- goto reg_fail;
-
- return 0;
-
-reg_fail:
- pm_runtime_disable(&pdev->dev);
- kfree(bank);
-iounmap_base:
- iounmap(io_base);
- return ret;
+ return devm_hwspin_lock_register(&pdev->dev, bank,
+ &u8500_hwspinlock_ops,
+ pdata->base_id, num_locks);
}
static int u8500_hsem_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET;
- int ret;
/* clear all interrupts */
writel(0xFFFF, io_base + HSEM_ICRALL);
- ret = hwspin_lock_unregister(bank);
- if (ret) {
- dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
- return ret;
- }
-
- pm_runtime_disable(&pdev->dev);
- iounmap(io_base);
- kfree(bank);
-
return 0;
}
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 7a9f5fb08330..6ff30e25af55 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -4,6 +4,7 @@
#
menuconfig CORESIGHT
bool "CoreSight Tracing Support"
+ depends on ARM || ARM64
depends on OF || ACPI
select ARM_AMBA
select PERF_EVENTS
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 219c10eb752c..ce41482431f9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -217,6 +217,7 @@ static ssize_t reset_store(struct device *dev,
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
+ config->vipcssctlr = 0x0;
/* Disable seq events */
for (i = 0; i < drvdata->nrseqstate-1; i++)
@@ -238,6 +239,7 @@ static ssize_t reset_store(struct device *dev,
for (i = 0; i < drvdata->nr_resource; i++)
config->res_ctrl[i] = 0x0;
+ config->ss_idx = 0x0;
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_ctrl[i] = 0x0;
config->ss_pe_cmp[i] = 0x0;
@@ -296,8 +298,6 @@ static ssize_t mode_store(struct device *dev,
spin_lock(&drvdata->spinlock);
config->mode = val & ETMv4_MODE_ALL;
- etm4_set_mode_exclude(drvdata,
- config->mode & ETM_MODE_EXCLUDE ? true : false);
if (drvdata->instrp0 == true) {
/* start by clearing instruction P0 field */
@@ -652,10 +652,13 @@ static ssize_t cyc_threshold_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
+
+ /* mask off max threshold before checking min value */
+ val &= ETM_CYC_THRESHOLD_MASK;
if (val < drvdata->ccitmin)
return -EINVAL;
- config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
+ config->ccctlr = val;
return size;
}
static DEVICE_ATTR_RW(cyc_threshold);
@@ -686,14 +689,16 @@ static ssize_t bb_ctrl_store(struct device *dev,
return -EINVAL;
if (!drvdata->nr_addr_cmp)
return -EINVAL;
+
/*
- * Bit[7:0] selects which address range comparator is used for
- * branch broadcast control.
+ * Bit[8] controls include(1) / exclude(0), bits[0-7] select
+ * individual range comparators. If include then at least 1
+ * range must be selected.
*/
- if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
+ if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
return -EINVAL;
- config->bb_ctrl = val;
+ config->bb_ctrl = val & GENMASK(8, 0);
return size;
}
static DEVICE_ATTR_RW(bb_ctrl);
@@ -738,7 +743,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = BMVAL(config->vinst_ctrl, 16, 19);
+ val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -754,8 +759,8 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
- config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
+ /* clear all EXLEVEL_S bits */
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
config->vinst_ctrl |= (val << 16);
@@ -773,7 +778,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/* EXLEVEL_NS, bits[23:20] */
- val = BMVAL(config->vinst_ctrl, 20, 23);
+ val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -789,8 +794,8 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- /* clear EXLEVEL_NS bits (bit[23] is never implemented */
- config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
+ /* clear EXLEVEL_NS bits */
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
config->vinst_ctrl |= (val << 20);
@@ -966,8 +971,12 @@ static ssize_t addr_range_store(struct device *dev,
unsigned long val1, val2;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int elements, exclude;
+
+ elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* exclude is optional, but need at least two parameter */
+ if (elements < 2)
return -EINVAL;
/* lower address comparator cannot have a higher address value */
if (val1 > val2)
@@ -995,9 +1004,11 @@ static ssize_t addr_range_store(struct device *dev,
/*
* Program include or exclude control bits for vinst or vdata
* whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
+ * use supplied value, or default to bit set in 'mode'
*/
- etm4_set_mode_exclude(drvdata,
- config->mode & ETM_MODE_EXCLUDE ? true : false);
+ if (elements != 3)
+ exclude = config->mode & ETM_MODE_EXCLUDE;
+ etm4_set_mode_exclude(drvdata, exclude ? true : false);
spin_unlock(&drvdata->spinlock);
return size;
@@ -1054,8 +1065,6 @@ static ssize_t addr_start_store(struct device *dev,
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_START;
config->vissctlr |= BIT(idx);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- config->vinst_ctrl |= BIT(9);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1111,8 +1120,6 @@ static ssize_t addr_stop_store(struct device *dev,
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
config->vissctlr |= BIT(idx + 16);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- config->vinst_ctrl |= BIT(9);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1228,6 +1235,131 @@ static ssize_t addr_context_store(struct device *dev,
}
static DEVICE_ATTR_RW(addr_context);
+static ssize_t addr_exlevel_s_ns_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ val = BMVAL(config->addr_acc[idx], 8, 14);
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_exlevel_s_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val & ~((GENMASK(14, 8) >> 8)))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
+ config->addr_acc[idx] &= ~(GENMASK(14, 8));
+ config->addr_acc[idx] |= (val << 8);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_exlevel_s_ns);
+
+static const char * const addr_type_names[] = {
+ "unused",
+ "single",
+ "range",
+ "start",
+ "stop"
+};
+
+static ssize_t addr_cmp_view_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx, addr_type;
+ unsigned long addr_v, addr_v2, addr_ctrl;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+ int size = 0;
+ bool exclude = false;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ addr_v = config->addr_val[idx];
+ addr_ctrl = config->addr_acc[idx];
+ addr_type = config->addr_type[idx];
+ if (addr_type == ETM_ADDR_TYPE_RANGE) {
+ if (idx & 0x1) {
+ idx -= 1;
+ addr_v2 = addr_v;
+ addr_v = config->addr_val[idx];
+ } else {
+ addr_v2 = config->addr_val[idx + 1];
+ }
+ exclude = config->viiectlr & BIT(idx / 2 + 16);
+ }
+ spin_unlock(&drvdata->spinlock);
+ if (addr_type) {
+ size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
+ addr_type_names[addr_type], addr_v);
+ if (addr_type == ETM_ADDR_TYPE_RANGE) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ " %#lx %s", addr_v2,
+ exclude ? "exclude" : "include");
+ }
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ " ctrl(%#lx)\n", addr_ctrl);
+ } else {
+ size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
+ }
+ return size;
+}
+static DEVICE_ATTR_RO(addr_cmp_view);
+
+static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (!drvdata->nr_pe_cmp)
+ return -EINVAL;
+ val = config->vipcssctlr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!drvdata->nr_pe_cmp)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->vipcssctlr = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
+
static ssize_t seq_idx_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1324,8 +1456,8 @@ static ssize_t seq_event_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->seq_idx;
- /* RST, bits[7:0] */
- config->seq_ctrl[idx] = val & 0xFF;
+ /* Seq control has two masks B[15:8] F[7:0] */
+ config->seq_ctrl[idx] = val & 0xFFFF;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1580,12 +1712,129 @@ static ssize_t res_ctrl_store(struct device *dev,
if (idx % 2 != 0)
/* PAIRINV, bit[21] */
val &= ~BIT(21);
- config->res_ctrl[idx] = val;
+ config->res_ctrl[idx] = val & GENMASK(21, 0);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(res_ctrl);
+static ssize_t sshot_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->ss_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nr_ss_cmp)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->ss_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_idx);
+
+static ssize_t sshot_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_ctrl[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ss_idx;
+ config->ss_ctrl[idx] = val & GENMASK(24, 0);
+ /* must clear bit 31 in related status register on programming */
+ config->ss_status[idx] &= ~BIT(31);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_ctrl);
+
+static ssize_t sshot_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_status[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(sshot_status);
+
+static ssize_t sshot_pe_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_pe_cmp[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_pe_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ss_idx;
+ config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
+ /* must clear bit 31 in related status register on programming */
+ config->ss_status[idx] &= ~BIT(31);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_pe_ctrl);
+
static ssize_t ctxid_idx_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1714,6 +1963,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
unsigned long val1, val2, mask;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int nr_inputs;
/*
* Don't use contextID tracing if coming from a PID namespace. See
@@ -1729,7 +1979,9 @@ static ssize_t ctxid_masks_store(struct device *dev,
*/
if (!drvdata->ctxid_size || !drvdata->numcidc)
return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* one mask if <= 4 comparators, two for up to 8 */
+ nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
+ if ((drvdata->numcidc > 4) && (nr_inputs != 2))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -1903,6 +2155,7 @@ static ssize_t vmid_masks_store(struct device *dev,
unsigned long val1, val2, mask;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int nr_inputs;
/*
* only implemented when vmid tracing is enabled, i.e. at least one
@@ -1910,7 +2163,9 @@ static ssize_t vmid_masks_store(struct device *dev,
*/
if (!drvdata->vmid_size || !drvdata->numvmidc)
return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* one mask if <= 4 comparators, two for up to 8 */
+ nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
+ if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -2033,6 +2288,13 @@ static struct attribute *coresight_etmv4_attrs[] = {
&dev_attr_addr_stop.attr,
&dev_attr_addr_ctxtype.attr,
&dev_attr_addr_context.attr,
+ &dev_attr_addr_exlevel_s_ns.attr,
+ &dev_attr_addr_cmp_view.attr,
+ &dev_attr_vinst_pe_cmp_start_stop.attr,
+ &dev_attr_sshot_idx.attr,
+ &dev_attr_sshot_ctrl.attr,
+ &dev_attr_sshot_pe_ctrl.attr,
+ &dev_attr_sshot_status.attr,
&dev_attr_seq_idx.attr,
&dev_attr_seq_state.attr,
&dev_attr_seq_event.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a128b5063f46..dc3f507e7562 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -18,6 +18,7 @@
#include <linux/stat.h>
#include <linux/clk.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/pm_wakeup.h>
@@ -26,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <asm/sections.h>
#include <asm/local.h>
#include <asm/virt.h>
@@ -37,6 +39,15 @@ static int boot_enable;
module_param(boot_enable, int, 0444);
MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
+#define PARAM_PM_SAVE_FIRMWARE 0 /* save self-hosted state as per firmware */
+#define PARAM_PM_SAVE_NEVER 1 /* never save any state */
+#define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */
+
+static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE;
+module_param(pm_save_enable, int, 0444);
+MODULE_PARM_DESC(pm_save_enable,
+ "Save/restore state on power down: 1 = never, 2 = self-hosted");
+
/* The number of ETMv4 currently registered */
static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
@@ -54,6 +65,14 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
isb();
}
+static void etm4_os_lock(struct etmv4_drvdata *drvdata)
+{
+ /* Writing 0x1 to TRCOSLAR locks the trace registers */
+ writel_relaxed(0x1, drvdata->base + TRCOSLAR);
+ drvdata->os_unlock = false;
+ isb();
+}
+
static bool etm4_arch_supported(u8 arch)
{
/* Mask out the minor version number */
@@ -149,6 +168,9 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
drvdata->base + TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ /* always clear status bit on restart if using single-shot */
+ if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
+ config->ss_status[i] &= ~BIT(31);
writel_relaxed(config->ss_ctrl[i],
drvdata->base + TRCSSCCRn(i));
writel_relaxed(config->ss_status[i],
@@ -448,6 +470,9 @@ static void etm4_disable_hw(void *info)
{
u32 control;
struct etmv4_drvdata *drvdata = info;
+ struct etmv4_config *config = &drvdata->config;
+ struct device *etm_dev = &drvdata->csdev->dev;
+ int i;
CS_UNLOCK(drvdata->base);
@@ -470,6 +495,18 @@ static void etm4_disable_hw(void *info)
isb();
writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+ /* wait for TRCSTATR.PMSTABLE to go to '1' */
+ if (coresight_timeout(drvdata->base, TRCSTATR,
+ TRCSTATR_PMSTABLE_BIT, 1))
+ dev_err(etm_dev,
+ "timeout while waiting for PM stable Trace Status\n");
+
+ /* read the status of the single shot comparators */
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ config->ss_status[i] =
+ readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ }
+
coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
@@ -576,6 +613,7 @@ static void etm4_init_arch_data(void *info)
u32 etmidr4;
u32 etmidr5;
struct etmv4_drvdata *drvdata = info;
+ int i;
/* Make sure all registers are accessible */
etm4_os_unlock(drvdata);
@@ -629,6 +667,7 @@ static void etm4_init_arch_data(void *info)
* TRCARCHMAJ, bits[11:8] architecture major versin number
*/
drvdata->arch = BMVAL(etmidr1, 4, 11);
+ drvdata->config.arch = drvdata->arch;
/* maximum size of resources */
etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
@@ -698,9 +737,14 @@ static void etm4_init_arch_data(void *info)
drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
/*
* NUMSSCC, bits[23:20] the number of single-shot
- * comparator control for tracing
+ * comparator control for tracing. Read any status regs as these
+ * also contain RO capability data.
*/
drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ drvdata->config.ss_status[i] =
+ readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ }
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
drvdata->numcidc = BMVAL(etmidr4, 24, 27);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
@@ -780,6 +824,7 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
static u64 etm4_get_access_type(struct etmv4_config *config)
{
u64 access_type = etm4_get_ns_access_type(config);
+ u64 s_hyp = (config->arch & 0x0f) >= 0x4 ? ETM_EXLEVEL_S_HYP : 0;
/*
* EXLEVEL_S, bits[11:8], don't trace anything happening
@@ -787,7 +832,8 @@ static u64 etm4_get_access_type(struct etmv4_config *config)
*/
access_type |= (ETM_EXLEVEL_S_APP |
ETM_EXLEVEL_S_OS |
- ETM_EXLEVEL_S_HYP);
+ s_hyp |
+ ETM_EXLEVEL_S_MON);
return access_type;
}
@@ -865,6 +911,7 @@ static void etm4_set_default_filter(struct etmv4_config *config)
* in the started state
*/
config->vinst_ctrl |= BIT(9);
+ config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
@@ -1085,6 +1132,288 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
}
+#ifdef CONFIG_CPU_PM
+static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+{
+ int i, ret = 0;
+ struct etmv4_save_state *state;
+ struct device *etm_dev = &drvdata->csdev->dev;
+
+ /*
+ * As recommended by 3.4.1 ("The procedure when powering down the PE")
+ * of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Lock the OS lock to disable trace and external debugger access */
+ etm4_os_lock(drvdata);
+
+ /* wait for TRCSTATR.PMSTABLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR,
+ TRCSTATR_PMSTABLE_BIT, 1)) {
+ dev_err(etm_dev,
+ "timeout while waiting for PM Stable Status\n");
+ etm4_os_unlock(drvdata);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ state = drvdata->save_state;
+
+ state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
+ state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
+ state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
+ state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
+ state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
+ state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
+ state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
+ state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
+ state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
+ state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
+ state->trcbbctlr = readl(drvdata->base + TRCBBCTLR);
+ state->trctraceidr = readl(drvdata->base + TRCTRACEIDR);
+ state->trcqctlr = readl(drvdata->base + TRCQCTLR);
+
+ state->trcvictlr = readl(drvdata->base + TRCVICTLR);
+ state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
+ state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
+ state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
+ state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
+ state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
+ state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate; i++)
+ state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
+
+ state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
+ state->trcseqstr = readl(drvdata->base + TRCSEQSTR);
+ state->trcextinselr = readl(drvdata->base + TRCEXTINSELR);
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ state->trccntrldvr[i] = readl(drvdata->base + TRCCNTRLDVRn(i));
+ state->trccntctlr[i] = readl(drvdata->base + TRCCNTCTLRn(i));
+ state->trccntvr[i] = readl(drvdata->base + TRCCNTVRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_resource * 2; i++)
+ state->trcrsctlr[i] = readl(drvdata->base + TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ state->trcssccr[i] = readl(drvdata->base + TRCSSCCRn(i));
+ state->trcsscsr[i] = readl(drvdata->base + TRCSSCSRn(i));
+ state->trcsspcicr[i] = readl(drvdata->base + TRCSSPCICRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ state->trcacvr[i] = readl(drvdata->base + TRCACVRn(i));
+ state->trcacatr[i] = readl(drvdata->base + TRCACATRn(i));
+ }
+
+ /*
+ * Data trace stream is architecturally prohibited for A profile cores
+ * so we don't save (or later restore) trcdvcvr and trcdvcmr - As per
+ * section 1.3.4 ("Possible functional configurations of an ETMv4 trace
+ * unit") of ARM IHI 0064D.
+ */
+
+ for (i = 0; i < drvdata->numcidc; i++)
+ state->trccidcvr[i] = readl(drvdata->base + TRCCIDCVRn(i));
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ state->trcvmidcvr[i] = readl(drvdata->base + TRCVMIDCVRn(i));
+
+ state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
+ state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
+
+ state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
+ state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
+
+ state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+
+ state->trcpdcr = readl(drvdata->base + TRCPDCR);
+
+ /* wait for TRCSTATR.IDLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+ dev_err(etm_dev,
+ "timeout while waiting for Idle Trace Status\n");
+ etm4_os_unlock(drvdata);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ drvdata->state_needs_restore = true;
+
+ /*
+ * Power can be removed from the trace unit now. We do this to
+ * potentially save power on systems that respect the TRCPDCR_PU
+ * despite requesting software to save/restore state.
+ */
+ writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
+ drvdata->base + TRCPDCR);
+
+out:
+ CS_LOCK(drvdata->base);
+ return ret;
+}
+
+static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+{
+ int i;
+ struct etmv4_save_state *state = drvdata->save_state;
+
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+
+ writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
+ writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
+ writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
+ writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
+ writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
+ writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
+ writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
+ writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
+ writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
+ writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
+ writel_relaxed(state->trcbbctlr, drvdata->base + TRCBBCTLR);
+ writel_relaxed(state->trctraceidr, drvdata->base + TRCTRACEIDR);
+ writel_relaxed(state->trcqctlr, drvdata->base + TRCQCTLR);
+
+ writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
+ writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
+ writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
+ writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
+ writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
+ writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
+ writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate; i++)
+ writel_relaxed(state->trcseqevr[i],
+ drvdata->base + TRCSEQEVRn(i));
+
+ writel_relaxed(state->trcseqrstevr, drvdata->base + TRCSEQRSTEVR);
+ writel_relaxed(state->trcseqstr, drvdata->base + TRCSEQSTR);
+ writel_relaxed(state->trcextinselr, drvdata->base + TRCEXTINSELR);
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ writel_relaxed(state->trccntrldvr[i],
+ drvdata->base + TRCCNTRLDVRn(i));
+ writel_relaxed(state->trccntctlr[i],
+ drvdata->base + TRCCNTCTLRn(i));
+ writel_relaxed(state->trccntvr[i],
+ drvdata->base + TRCCNTVRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_resource * 2; i++)
+ writel_relaxed(state->trcrsctlr[i],
+ drvdata->base + TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ writel_relaxed(state->trcssccr[i],
+ drvdata->base + TRCSSCCRn(i));
+ writel_relaxed(state->trcsscsr[i],
+ drvdata->base + TRCSSCSRn(i));
+ writel_relaxed(state->trcsspcicr[i],
+ drvdata->base + TRCSSPCICRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ writel_relaxed(state->trcacvr[i],
+ drvdata->base + TRCACVRn(i));
+ writel_relaxed(state->trcacatr[i],
+ drvdata->base + TRCACATRn(i));
+ }
+
+ for (i = 0; i < drvdata->numcidc; i++)
+ writel_relaxed(state->trccidcvr[i],
+ drvdata->base + TRCCIDCVRn(i));
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ writel_relaxed(state->trcvmidcvr[i],
+ drvdata->base + TRCVMIDCVRn(i));
+
+ writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
+ writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
+
+ writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
+ writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
+
+ writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+
+ writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
+
+ drvdata->state_needs_restore = false;
+
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using the
+ * memory-mapped interface") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ /* Unlock the OS lock to re-enable trace and external debug access */
+ etm4_os_unlock(drvdata);
+ CS_LOCK(drvdata->base);
+}
+
+static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct etmv4_drvdata *drvdata;
+ unsigned int cpu = smp_processor_id();
+
+ if (!etmdrvdata[cpu])
+ return NOTIFY_OK;
+
+ drvdata = etmdrvdata[cpu];
+
+ if (!drvdata->save_state)
+ return NOTIFY_OK;
+
+ if (WARN_ON_ONCE(drvdata->cpu != cpu))
+ return NOTIFY_BAD;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* save the state if self-hosted coresight is in use */
+ if (local_read(&drvdata->mode))
+ if (etm4_cpu_save(drvdata))
+ return NOTIFY_BAD;
+ break;
+ case CPU_PM_EXIT:
+ /* fallthrough */
+ case CPU_PM_ENTER_FAILED:
+ if (drvdata->state_needs_restore)
+ etm4_cpu_restore(drvdata);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block etm4_cpu_pm_nb = {
+ .notifier_call = etm4_cpu_pm_notify,
+};
+
+static int etm4_cpu_pm_register(void)
+{
+ return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+}
+
+static void etm4_cpu_pm_unregister(void)
+{
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+}
+#else
+static int etm4_cpu_pm_register(void) { return 0; }
+static void etm4_cpu_pm_unregister(void) { }
+#endif
+
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -1101,6 +1430,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
dev_set_drvdata(dev, drvdata);
+ if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
+ pm_save_enable = coresight_loses_context_with_cpu(dev) ?
+ PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
+
+ if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
+ drvdata->save_state = devm_kmalloc(dev,
+ sizeof(struct etmv4_save_state), GFP_KERNEL);
+ if (!drvdata->save_state)
+ return -ENOMEM;
+ }
+
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
@@ -1135,6 +1475,10 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (ret < 0)
goto err_arch_supported;
hp_online = ret;
+
+ ret = etm4_cpu_pm_register();
+ if (ret)
+ goto err_arch_supported;
}
cpus_read_unlock();
@@ -1185,6 +1529,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
err_arch_supported:
if (--etm4_count == 0) {
+ etm4_cpu_pm_unregister();
+
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
if (hp_online)
cpuhp_remove_state_nocalls(hp_online);
@@ -1211,6 +1557,7 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4523f10ddd0f..4a695bf90582 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -175,22 +175,28 @@
ETM_MODE_EXCL_USER)
#define TRCSTATR_IDLE_BIT 0
+#define TRCSTATR_PMSTABLE_BIT 1
#define ETM_DEFAULT_ADDR_COMP 0
/* PowerDown Control Register bits */
#define TRCPDCR_PU BIT(3)
-/* secure state access levels */
+/* secure state access levels - TRCACATRn */
#define ETM_EXLEVEL_S_APP BIT(8)
#define ETM_EXLEVEL_S_OS BIT(9)
-#define ETM_EXLEVEL_S_NA BIT(10)
-#define ETM_EXLEVEL_S_HYP BIT(11)
-/* non-secure state access levels */
+#define ETM_EXLEVEL_S_HYP BIT(10)
+#define ETM_EXLEVEL_S_MON BIT(11)
+/* non-secure state access levels - TRCACATRn */
#define ETM_EXLEVEL_NS_APP BIT(12)
#define ETM_EXLEVEL_NS_OS BIT(13)
#define ETM_EXLEVEL_NS_HYP BIT(14)
#define ETM_EXLEVEL_NS_NA BIT(15)
+/* secure / non secure masks - TRCVICTLR, IDR3 */
+#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
+/* NS MON (EL3) mode never implemented */
+#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20)
+
/**
* struct etmv4_config - configuration information related to an ETMv4
* @mode: Controls various modes supported by this ETM.
@@ -221,6 +227,7 @@
* @cntr_val: Sets or returns the value for a counter.
* @res_idx: Resource index selector.
* @res_ctrl: Controls the selection of the resources in the trace unit.
+ * @ss_idx: Single-shot index selector.
* @ss_ctrl: Controls the corresponding single-shot comparator resource.
* @ss_status: The status of the corresponding single-shot comparator.
* @ss_pe_cmp: Selects the PE comparator inputs for Single-shot control.
@@ -237,6 +244,7 @@
* @vmid_mask0: VM ID comparator mask for comparator 0-3.
* @vmid_mask1: VM ID comparator mask for comparator 4-7.
* @ext_inp: External input selection.
+ * @arch: ETM architecture version (for arch dependent config).
*/
struct etmv4_config {
u32 mode;
@@ -263,6 +271,7 @@ struct etmv4_config {
u32 cntr_val[ETMv4_MAX_CNTR];
u8 res_idx;
u32 res_ctrl[ETM_MAX_RES_SEL];
+ u8 ss_idx;
u32 ss_ctrl[ETM_MAX_SS_CMP];
u32 ss_status[ETM_MAX_SS_CMP];
u32 ss_pe_cmp[ETM_MAX_SS_CMP];
@@ -279,6 +288,66 @@ struct etmv4_config {
u32 vmid_mask0;
u32 vmid_mask1;
u32 ext_inp;
+ u8 arch;
+};
+
+/**
+ * struct etm4_save_state - state to be preserved when ETM is without power
+ */
+struct etmv4_save_state {
+ u32 trcprgctlr;
+ u32 trcprocselr;
+ u32 trcconfigr;
+ u32 trcauxctlr;
+ u32 trceventctl0r;
+ u32 trceventctl1r;
+ u32 trcstallctlr;
+ u32 trctsctlr;
+ u32 trcsyncpr;
+ u32 trcccctlr;
+ u32 trcbbctlr;
+ u32 trctraceidr;
+ u32 trcqctlr;
+
+ u32 trcvictlr;
+ u32 trcviiectlr;
+ u32 trcvissctlr;
+ u32 trcvipcssctlr;
+ u32 trcvdctlr;
+ u32 trcvdsacctlr;
+ u32 trcvdarcctlr;
+
+ u32 trcseqevr[ETM_MAX_SEQ_STATES];
+ u32 trcseqrstevr;
+ u32 trcseqstr;
+ u32 trcextinselr;
+ u32 trccntrldvr[ETMv4_MAX_CNTR];
+ u32 trccntctlr[ETMv4_MAX_CNTR];
+ u32 trccntvr[ETMv4_MAX_CNTR];
+
+ u32 trcrsctlr[ETM_MAX_RES_SEL * 2];
+
+ u32 trcssccr[ETM_MAX_SS_CMP];
+ u32 trcsscsr[ETM_MAX_SS_CMP];
+ u32 trcsspcicr[ETM_MAX_SS_CMP];
+
+ u64 trcacvr[ETM_MAX_SINGLE_ADDR_CMP];
+ u64 trcacatr[ETM_MAX_SINGLE_ADDR_CMP];
+ u64 trccidcvr[ETMv4_MAX_CTXID_CMP];
+ u32 trcvmidcvr[ETM_MAX_VMID_CMP];
+ u32 trccidcctlr0;
+ u32 trccidcctlr1;
+ u32 trcvmidcctlr0;
+ u32 trcvmidcctlr1;
+
+ u32 trcclaimset;
+
+ u32 cntr_val[ETMv4_MAX_CNTR];
+ u32 seq_state;
+ u32 vinst_ctrl;
+ u32 ss_status[ETM_MAX_SS_CMP];
+
+ u32 trcpdcr;
};
/**
@@ -336,6 +405,8 @@ struct etmv4_config {
* @atbtrig: If the implementation can support ATB triggers
* @lpoverride: If the implementation can support low-power state over.
* @config: structure holding configuration parameters.
+ * @save_state: State to be preserved across power loss
+ * @state_needs_restore: True when there is context to restore after PM exit
*/
struct etmv4_drvdata {
void __iomem *base;
@@ -381,6 +452,8 @@ struct etmv4_drvdata {
bool atbtrig;
bool lpoverride;
struct etmv4_config config;
+ struct etmv4_save_state *save_state;
+ bool state_needs_restore;
};
/* Address comparator access types */
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 05f7896c3a01..900690a9f7f0 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -38,12 +38,14 @@ DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel");
* @atclk: optional clock for the core parts of the funnel.
* @csdev: component vitals needed by the framework.
* @priority: port selection order.
+ * @spinlock: serialize enable/disable operations.
*/
struct funnel_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
unsigned long priority;
+ spinlock_t spinlock;
};
static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
@@ -76,11 +78,21 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
{
int rc = 0;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- if (drvdata->base)
- rc = dynamic_funnel_enable_hw(drvdata, inport);
-
+ unsigned long flags;
+ bool first_enable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_read(&csdev->refcnt[inport]) == 0) {
+ if (drvdata->base)
+ rc = dynamic_funnel_enable_hw(drvdata, inport);
+ if (!rc)
+ first_enable = true;
+ }
if (!rc)
+ atomic_inc(&csdev->refcnt[inport]);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (first_enable)
dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n", inport);
return rc;
}
@@ -107,11 +119,19 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
int outport)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ bool last_disable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_dec_return(&csdev->refcnt[inport]) == 0) {
+ if (drvdata->base)
+ dynamic_funnel_disable_hw(drvdata, inport);
+ last_disable = true;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (drvdata->base)
- dynamic_funnel_disable_hw(drvdata, inport);
-
- dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
+ if (last_disable)
+ dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
}
static const struct coresight_ops_link funnel_link_ops = {
@@ -233,6 +253,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
+ spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &funnel_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b29ba640eb25..e7dc1c31d20d 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -31,11 +31,13 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
* whether this one is programmable or not.
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
+ * @spinlock: serialize enable/disable operations.
*/
struct replicator_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
+ spinlock_t spinlock;
};
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
@@ -97,10 +99,22 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
int rc = 0;
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- if (drvdata->base)
- rc = dynamic_replicator_enable(drvdata, inport, outport);
+ unsigned long flags;
+ bool first_enable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_read(&csdev->refcnt[outport]) == 0) {
+ if (drvdata->base)
+ rc = dynamic_replicator_enable(drvdata, inport,
+ outport);
+ if (!rc)
+ first_enable = true;
+ }
if (!rc)
+ atomic_inc(&csdev->refcnt[outport]);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (first_enable)
dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
return rc;
}
@@ -137,10 +151,19 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
int outport)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ bool last_disable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_dec_return(&csdev->refcnt[outport]) == 0) {
+ if (drvdata->base)
+ dynamic_replicator_disable(drvdata, inport, outport);
+ last_disable = true;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (drvdata->base)
- dynamic_replicator_disable(drvdata, inport, outport);
- dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
+ if (last_disable)
+ dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
@@ -225,6 +248,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
+ spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc.ops = &replicator_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 807416b75ecc..d0cc3985b72a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -334,9 +334,10 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
static int tmc_enable_etf_link(struct coresight_device *csdev,
int inport, int outport)
{
- int ret;
+ int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
@@ -344,12 +345,18 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
return -EBUSY;
}
- ret = tmc_etf_enable_hw(drvdata);
+ if (atomic_read(&csdev->refcnt[0]) == 0) {
+ ret = tmc_etf_enable_hw(drvdata);
+ if (!ret) {
+ drvdata->mode = CS_MODE_SYSFS;
+ first_enable = true;
+ }
+ }
if (!ret)
- drvdata->mode = CS_MODE_SYSFS;
+ atomic_inc(&csdev->refcnt[0]);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (!ret)
+ if (first_enable)
dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
return ret;
}
@@ -359,6 +366,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
@@ -366,11 +374,15 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
return;
}
- tmc_etf_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
+ tmc_etf_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ last_disable = true;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
+ if (last_disable)
+ dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 6453c67a4d01..ef20f74c85fa 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -253,9 +253,9 @@ static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
{
- int ret;
+ int ret = 0;
int link_subtype;
- int refport, inport, outport;
+ int inport, outport;
if (!parent || !child)
return -EINVAL;
@@ -264,29 +264,17 @@ static int coresight_enable_link(struct coresight_device *csdev,
outport = coresight_find_link_outport(csdev, child);
link_subtype = csdev->subtype.link_subtype;
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
- refport = inport;
- else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
- refport = outport;
- else
- refport = 0;
-
- if (refport < 0)
- return refport;
-
- if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
- if (link_ops(csdev)->enable) {
- ret = link_ops(csdev)->enable(csdev, inport, outport);
- if (ret) {
- atomic_dec(&csdev->refcnt[refport]);
- return ret;
- }
- }
- }
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && inport < 0)
+ return inport;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0)
+ return outport;
- csdev->enable = true;
+ if (link_ops(csdev)->enable)
+ ret = link_ops(csdev)->enable(csdev, inport, outport);
+ if (!ret)
+ csdev->enable = true;
- return 0;
+ return ret;
}
static void coresight_disable_link(struct coresight_device *csdev,
@@ -295,7 +283,7 @@ static void coresight_disable_link(struct coresight_device *csdev,
{
int i, nr_conns;
int link_subtype;
- int refport, inport, outport;
+ int inport, outport;
if (!parent || !child)
return;
@@ -305,20 +293,15 @@ static void coresight_disable_link(struct coresight_device *csdev,
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
- refport = inport;
nr_conns = csdev->pdata->nr_inport;
} else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
- refport = outport;
nr_conns = csdev->pdata->nr_outport;
} else {
- refport = 0;
nr_conns = 1;
}
- if (atomic_dec_return(&csdev->refcnt[refport]) == 0) {
- if (link_ops(csdev)->disable)
- link_ops(csdev)->disable(csdev, inport, outport);
- }
+ if (link_ops(csdev)->disable)
+ link_ops(csdev)->disable(csdev, inport, outport);
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->refcnt[i]) != 0)
@@ -1308,6 +1291,12 @@ static inline int coresight_search_device_idx(struct coresight_dev_list *dict,
return -ENOENT;
}
+bool coresight_loses_context_with_cpu(struct device *dev)
+{
+ return fwnode_property_present(dev_fwnode(dev),
+ "arm,coresight-loses-context-with-cpu");
+}
+
/*
* coresight_alloc_device_name - Get an index for a given device in the
* device index list specific to a driver. An index is allocated for a
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index d5c1821b31c6..0dfd97bbde9e 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -649,10 +649,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
err = intel_th_device_add_resources(thdev, res, subdev->nres);
- if (err) {
- put_device(&thdev->dev);
+ if (err)
goto fail_put_device;
- }
if (subdev->type == INTEL_TH_OUTPUT) {
if (subdev->mknode)
@@ -667,10 +665,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
err = device_add(&thdev->dev);
- if (err) {
- put_device(&thdev->dev);
+ if (err)
goto fail_free_res;
- }
/* need switch driver to be loaded to enumerate the rest */
if (subdev->type == INTEL_TH_SWITCH && !req) {
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 03ca5b1bef9f..ebf3e30e989a 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -210,6 +210,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Ice Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8a29),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Tiger Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9a33),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Tiger Lake PCH */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 603b83ac5085..2712e699ba08 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -832,23 +832,13 @@ stm_char_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return err;
}
-#ifdef CONFIG_COMPAT
-static long
-stm_char_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return stm_char_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#else
-#define stm_char_compat_ioctl NULL
-#endif
-
static const struct file_operations stm_fops = {
.open = stm_char_open,
.release = stm_char_release,
.write = stm_char_write,
.mmap = stm_char_mmap,
.unlocked_ioctl = stm_char_ioctl,
- .compat_ioctl = stm_char_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = no_llseek,
};
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 4b9e44b227d8..4f932a419752 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -345,7 +345,11 @@ void stp_policy_unbind(struct stp_policy *policy)
stm->policy = NULL;
policy->stm = NULL;
+ /*
+ * Drop the reference on the protocol driver and lose the link.
+ */
stm_put_protocol(stm->pdrv);
+ stm->pdrv = NULL;
stm_put_device(stm);
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 146ce40d8e0a..6a0aa76859f3 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -145,6 +145,7 @@ config I2C_I801
Comet Lake (PCH)
Elkhart Lake (PCH)
Tiger Lake (PCH)
+ Jasper Lake (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -292,7 +293,7 @@ config I2C_VIA
select I2C_ALGOBIT
help
If you say yes to this option, support will be included for the VIA
- 82C586B I2C interface
+ 82C586B I2C interface
This driver can also be built as a module. If so, the module
will be called i2c-via.
@@ -677,11 +678,11 @@ config I2C_IMX_LPI2C
tristate "IMX Low Power I2C interface"
depends on ARCH_MXC || COMPILE_TEST
help
- Say Y here if you want to use the Low Power IIC bus controller
- on the Freescale i.MX processors.
+ Say Y here if you want to use the Low Power IIC bus controller
+ on the Freescale i.MX processors.
- This driver can also be built as a module. If so, the module
- will be called i2c-imx-lpi2c.
+ This driver can also be built as a module. If so, the module
+ will be called i2c-imx-lpi2c.
config I2C_IOP3XX
tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
@@ -874,6 +875,7 @@ config I2C_PXA_PCI
config I2C_PXA_SLAVE
bool "Intel PXA2XX I2C Slave comms support"
depends on I2C_PXA && !X86_32
+ select I2C_SLAVE
help
Support I2C slave mode communications on the PXA I2C bus. This
is necessary for systems where the PXA may be a target on the
@@ -1182,9 +1184,9 @@ config I2C_DIOLAN_U2C
will be called i2c-diolan-u2c.
config I2C_DLN2
- tristate "Diolan DLN-2 USB I2C adapter"
- depends on MFD_DLN2
- help
+ tristate "Diolan DLN-2 USB I2C adapter"
+ depends on MFD_DLN2
+ help
If you say yes to this option, support will be included for Diolan
DLN2, a USB to I2C interface.
@@ -1283,9 +1285,9 @@ config I2C_VIPERBOARD
help
Say yes here to access the I2C part of the Nano River
Technologies Viperboard as I2C master.
- See viperboard API specification and Nano
- River Tech's viperboard.h for detailed meaning
- of the module parameters.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
comment "Other I2C/SMBus bus drivers"
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 7b098ff5f5dd..a7be6f24450b 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -952,6 +952,10 @@ static const struct of_device_id aspeed_i2c_bus_of_table[] = {
.compatible = "aspeed,ast2500-i2c-bus",
.data = aspeed_i2c_25xx_get_clk_reg_val,
},
+ {
+ .compatible = "aspeed,ast2600-i2c-bus",
+ .data = aspeed_i2c_25xx_get_clk_reg_val,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index 435c7d7377a3..e13af4874976 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -68,6 +68,9 @@ static struct at91_twi_pdata at91rm9200_config = {
.has_unre_flag = true,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9261_config = {
@@ -76,6 +79,9 @@ static struct at91_twi_pdata at91sam9261_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9260_config = {
@@ -84,6 +90,9 @@ static struct at91_twi_pdata at91sam9260_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9g20_config = {
@@ -92,6 +101,9 @@ static struct at91_twi_pdata at91sam9g20_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9g10_config = {
@@ -100,6 +112,9 @@ static struct at91_twi_pdata at91sam9g10_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static const struct platform_device_id at91_twi_devtypes[] = {
@@ -130,6 +145,9 @@ static struct at91_twi_pdata at91sam9x5_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata sama5d4_config = {
@@ -138,6 +156,9 @@ static struct at91_twi_pdata sama5d4_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = true,
+ .has_dig_filtr = true,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata sama5d2_config = {
@@ -146,6 +167,20 @@ static struct at91_twi_pdata sama5d2_config = {
.has_unre_flag = true,
.has_alt_cmd = true,
.has_hold_field = true,
+ .has_dig_filtr = true,
+ .has_adv_dig_filtr = true,
+ .has_ana_filtr = true,
+};
+
+static struct at91_twi_pdata sam9x60_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = true,
+ .has_alt_cmd = true,
+ .has_hold_field = true,
+ .has_dig_filtr = true,
+ .has_adv_dig_filtr = true,
+ .has_ana_filtr = true,
};
static const struct of_device_id atmel_twi_dt_ids[] = {
@@ -174,6 +209,9 @@ static const struct of_device_id atmel_twi_dt_ids[] = {
.compatible = "atmel,sama5d2-i2c",
.data = &sama5d2_config,
}, {
+ .compatible = "microchip,sam9x60-i2c",
+ .data = &sam9x60_config,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index a3fcc35ffd3b..7a862e00b475 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -31,12 +31,32 @@
void at91_init_twi_bus_master(struct at91_twi_dev *dev)
{
+ struct at91_twi_pdata *pdata = dev->pdata;
+ u32 filtr = 0;
+
/* FIFO should be enabled immediately after the software reset */
if (dev->fifo_size)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
+
+ /* enable digital filter */
+ if (pdata->has_dig_filtr && dev->enable_dig_filt)
+ filtr |= AT91_TWI_FILTR_FILT;
+
+ /* enable advanced digital filter */
+ if (pdata->has_adv_dig_filtr && dev->enable_dig_filt)
+ filtr |= AT91_TWI_FILTR_FILT |
+ (AT91_TWI_FILTR_THRES(dev->filter_width) &
+ AT91_TWI_FILTR_THRES_MASK);
+
+ /* enable analog filter */
+ if (pdata->has_ana_filtr && dev->enable_ana_filt)
+ filtr |= AT91_TWI_FILTR_PADFEN;
+
+ if (filtr)
+ at91_twi_write(dev, AT91_TWI_FILTR, filtr);
}
/*
@@ -45,7 +65,7 @@ void at91_init_twi_bus_master(struct at91_twi_dev *dev)
*/
static void at91_calc_twi_clock(struct at91_twi_dev *dev)
{
- int ckdiv, cdiv, div, hold = 0;
+ int ckdiv, cdiv, div, hold = 0, filter_width = 0;
struct at91_twi_pdata *pdata = dev->pdata;
int offset = pdata->clk_offset;
int max_ckdiv = pdata->clk_max_div;
@@ -84,11 +104,29 @@ static void at91_calc_twi_clock(struct at91_twi_dev *dev)
}
}
+ if (pdata->has_adv_dig_filtr) {
+ /*
+ * filter width = 0 to AT91_TWI_FILTR_THRES_MAX
+ * peripheral clocks
+ */
+ filter_width = DIV_ROUND_UP(t->digital_filter_width_ns
+ * (clk_get_rate(dev->clk) / 1000), 1000000);
+ if (filter_width > AT91_TWI_FILTR_THRES_MAX) {
+ dev_warn(dev->dev,
+ "Filter threshold set to its maximum value (%d instead of %d)\n",
+ AT91_TWI_FILTR_THRES_MAX, filter_width);
+ filter_width = AT91_TWI_FILTR_THRES_MAX;
+ }
+ }
+
dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
| AT91_TWI_CWGR_HOLD(hold);
- dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns)\n",
- cdiv, ckdiv, hold, t->sda_hold_ns);
+ dev->filter_width = filter_width;
+
+ dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n",
+ cdiv, ckdiv, hold, t->sda_hold_ns, filter_width,
+ t->digital_filter_width_ns);
}
static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
@@ -720,14 +758,14 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
slave_config.dst_maxburst = 1;
slave_config.device_fc = false;
- dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
+ dma->chan_tx = dma_request_chan(dev->dev, "tx");
if (IS_ERR(dma->chan_tx)) {
ret = PTR_ERR(dma->chan_tx);
dma->chan_tx = NULL;
goto error;
}
- dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
+ dma->chan_rx = dma_request_chan(dev->dev, "rx");
if (IS_ERR(dma->chan_rx)) {
ret = PTR_ERR(dma->chan_rx);
dma->chan_rx = NULL;
@@ -793,6 +831,11 @@ int at91_twi_probe_master(struct platform_device *pdev,
dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
}
+ dev->enable_dig_filt = of_property_read_bool(pdev->dev.of_node,
+ "i2c-digital-filter");
+
+ dev->enable_ana_filt = of_property_read_bool(pdev->dev.of_node,
+ "i2c-analog-filter");
at91_calc_twi_clock(dev);
dev->adapter.algo = &at91_twi_algorithm;
diff --git a/drivers/i2c/busses/i2c-at91.h b/drivers/i2c/busses/i2c-at91.h
index 499b506f6128..977a67bc0f88 100644
--- a/drivers/i2c/busses/i2c-at91.h
+++ b/drivers/i2c/busses/i2c-at91.h
@@ -84,6 +84,13 @@
#define AT91_TWI_ACR_DATAL(len) ((len) & 0xff)
#define AT91_TWI_ACR_DIR BIT(8)
+#define AT91_TWI_FILTR 0x0044
+#define AT91_TWI_FILTR_FILT BIT(0)
+#define AT91_TWI_FILTR_PADFEN BIT(1)
+#define AT91_TWI_FILTR_THRES(v) ((v) << 8)
+#define AT91_TWI_FILTR_THRES_MAX 7
+#define AT91_TWI_FILTR_THRES_MASK GENMASK(10, 8)
+
#define AT91_TWI_FMR 0x0050 /* FIFO Mode Register */
#define AT91_TWI_FMR_TXRDYM(mode) (((mode) & 0x3) << 0)
#define AT91_TWI_FMR_TXRDYM_MASK (0x3 << 0)
@@ -108,6 +115,9 @@ struct at91_twi_pdata {
bool has_unre_flag;
bool has_alt_cmd;
bool has_hold_field;
+ bool has_dig_filtr;
+ bool has_adv_dig_filtr;
+ bool has_ana_filtr;
struct at_dma_slave dma_slave;
};
@@ -145,6 +155,9 @@ struct at91_twi_dev {
unsigned smr;
struct i2c_client *slave;
#endif
+ bool enable_dig_filt;
+ bool enable_ana_filt;
+ u32 filter_width;
};
unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg);
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 9ffdffaf6141..30efb7913b2e 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -81,6 +81,7 @@
#define M_CMD_PROTOCOL_MASK 0xf
#define M_CMD_PROTOCOL_BLK_WR 0x7
#define M_CMD_PROTOCOL_BLK_RD 0x8
+#define M_CMD_PROTOCOL_PROCESS 0xa
#define M_CMD_PEC_SHIFT 8
#define M_CMD_RD_CNT_SHIFT 0
#define M_CMD_RD_CNT_MASK 0xff
@@ -675,13 +676,20 @@ static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
return 0;
}
-static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
- struct i2c_msg *msg)
+/*
+ * If 'process_call' is true, then this is a multi-msg transfer that requires
+ * a repeated start between the messages.
+ * More specifically, it must be a write (reg) followed by a read (data).
+ * The i2c quirks are set to enforce this rule.
+ */
+static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
+ struct i2c_msg *msgs, bool process_call)
{
int i;
u8 addr;
u32 val, tmp, val_intr_en;
unsigned int tx_bytes;
+ struct i2c_msg *msg = &msgs[0];
/* check if bus is busy */
if (!!(iproc_i2c_rd_reg(iproc_i2c,
@@ -707,14 +715,29 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
val = msg->buf[i];
/* mark the last byte */
- if (i == msg->len - 1)
- val |= BIT(M_TX_WR_STATUS_SHIFT);
+ if (!process_call && (i == msg->len - 1))
+ val |= 1 << M_TX_WR_STATUS_SHIFT;
iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
}
iproc_i2c->tx_bytes = tx_bytes;
}
+ /* Process the read message if this is process call */
+ if (process_call) {
+ msg++;
+ iproc_i2c->msg = msg; /* point to second msg */
+
+ /*
+ * The last byte to be sent out should be a slave
+ * address with read operation
+ */
+ addr = i2c_8bit_addr_from_msg(msg);
+ /* mark it the last byte out */
+ val = addr | (1 << M_TX_WR_STATUS_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
+ }
+
/* mark as incomplete before starting the transaction */
if (iproc_i2c->irq)
reinit_completion(&iproc_i2c->done);
@@ -733,7 +756,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
* underrun interrupt, which will be triggerred when the TX FIFO is
* empty. When that happens we can then pump more data into the FIFO
*/
- if (!(msg->flags & I2C_M_RD) &&
+ if (!process_call && !(msg->flags & I2C_M_RD) &&
msg->len > iproc_i2c->tx_bytes)
val_intr_en |= BIT(IE_M_TX_UNDERRUN_SHIFT);
@@ -743,6 +766,8 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
*/
val = BIT(M_CMD_START_BUSY_SHIFT);
if (msg->flags & I2C_M_RD) {
+ u32 protocol;
+
iproc_i2c->rx_bytes = 0;
if (msg->len > M_RX_FIFO_MAX_THLD_VALUE)
iproc_i2c->thld_bytes = M_RX_FIFO_THLD_VALUE;
@@ -758,7 +783,10 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
/* enable the RX threshold interrupt */
val_intr_en |= BIT(IE_M_RX_THLD_SHIFT);
- val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
+ protocol = process_call ?
+ M_CMD_PROTOCOL_PROCESS : M_CMD_PROTOCOL_BLK_RD;
+
+ val |= (protocol << M_CMD_PROTOCOL_SHIFT) |
(msg->len << M_CMD_RD_CNT_SHIFT);
} else {
val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
@@ -774,17 +802,24 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
struct i2c_msg msgs[], int num)
{
struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter);
- int ret, i;
+ bool process_call = false;
+ int ret;
- /* go through all messages */
- for (i = 0; i < num; i++) {
- ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]);
- if (ret) {
- dev_dbg(iproc_i2c->device, "xfer failed\n");
- return ret;
+ if (num == 2) {
+ /* Repeated start, use process call */
+ process_call = true;
+ if (msgs[1].flags & I2C_M_NOSTART) {
+ dev_err(iproc_i2c->device, "Invalid repeated start\n");
+ return -EOPNOTSUPP;
}
}
+ ret = bcm_iproc_i2c_xfer_internal(iproc_i2c, msgs, process_call);
+ if (ret) {
+ dev_dbg(iproc_i2c->device, "xfer failed\n");
+ return ret;
+ }
+
return num;
}
@@ -809,6 +844,8 @@ static struct i2c_algorithm bcm_iproc_algo = {
};
static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .max_comb_1st_msg_len = M_TX_RX_FIFO_SIZE,
.max_read_len = M_RX_MAX_READ_LEN,
};
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index c551aa96a2e3..958161c71985 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -3,6 +3,7 @@
//
// Copyright (C) 2013 Google, Inc.
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/platform_data/cros_ec_commands.h>
@@ -240,7 +241,6 @@ static const struct i2c_algorithm ec_i2c_algorithm = {
static int ec_i2c_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct ec_i2c_device *bus = NULL;
@@ -256,7 +256,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
if (bus == NULL)
return -ENOMEM;
- err = of_property_read_u32(np, "google,remote-bus", &remote_bus);
+ err = device_property_read_u32(dev, "google,remote-bus", &remote_bus);
if (err) {
dev_err(dev, "Couldn't read remote-bus property\n");
return err;
@@ -271,7 +271,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->adap.algo = &ec_i2c_algorithm;
bus->adap.algo_data = bus;
bus->adap.dev.parent = &pdev->dev;
- bus->adap.dev.of_node = np;
+ bus->adap.dev.of_node = pdev->dev.of_node;
bus->adap.retries = I2C_MAX_RETRIES;
err = i2c_add_adapter(&bus->adap);
@@ -291,19 +291,24 @@ static int ec_i2c_remove(struct platform_device *dev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id cros_ec_i2c_of_match[] = {
{ .compatible = "google,cros-ec-i2c-tunnel" },
{},
};
MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
-#endif
+
+static const struct acpi_device_id cros_ec_i2c_tunnel_acpi_id[] = {
+ { "GOOG001A", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id);
static struct platform_driver ec_i2c_tunnel_driver = {
.probe = ec_i2c_probe,
.remove = ec_i2c_remove,
.driver = {
.name = "cros-ec-i2c-tunnel",
+ .acpi_match_table = ACPI_PTR(cros_ec_i2c_tunnel_acpi_id),
.of_match_table = of_match_ptr(cros_ec_i2c_of_match),
},
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f1c714acc280..f5e69fe56532 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -64,8 +64,10 @@
* Cedar Fork (PCH) 0x18df 32 hard yes yes yes
* Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
* Comet Lake (PCH) 0x02a3 32 hard yes yes yes
+ * Comet Lake-H (PCH) 0x06a3 32 hard yes yes yes
* Elkhart Lake (PCH) 0x4b23 32 hard yes yes yes
* Tiger Lake-LP (PCH) 0xa0a3 32 hard yes yes yes
+ * Jasper Lake (SOC) 0x4da3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -205,6 +207,7 @@
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS 0x06a3
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df
#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
@@ -223,6 +226,7 @@
#define PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS 0x34a3
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
+#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2
@@ -1069,8 +1073,10 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
{ 0, }
};
@@ -1750,8 +1756,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
+ case PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS:
case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS:
case PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS:
+ case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index 8382eb64b424..271470f4d8a9 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -122,7 +122,6 @@ static int icy_probe(struct zorro_dev *z,
struct fwnode_handle *new_fwnode;
struct i2c_board_info ltc2990_info = {
.type = "ltc2990",
- .addr = 0x4c,
};
i2c = devm_kzalloc(&z->dev, sizeof(*i2c), GFP_KERNEL);
@@ -188,10 +187,10 @@ static int icy_probe(struct zorro_dev *z,
ltc2990_info.fwnode = new_fwnode;
i2c->ltc2990_client =
- i2c_new_probed_device(&i2c->adapter,
- &ltc2990_info,
- icy_ltc2990_addresses,
- NULL);
+ i2c_new_scanned_device(&i2c->adapter,
+ &ltc2990_info,
+ icy_ltc2990_addresses,
+ NULL);
}
return 0;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 2c3c3d6935c0..466e4f681d7a 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/i2c-pxa.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -180,7 +179,7 @@ struct pxa_i2c {
struct i2c_adapter adap;
struct clk *clk;
#ifdef CONFIG_I2C_PXA_SLAVE
- struct i2c_slave_client *slave;
+ struct i2c_client *slave;
#endif
unsigned int irqlogidx;
@@ -544,22 +543,23 @@ static void i2c_pxa_slave_txempty(struct pxa_i2c *i2c, u32 isr)
if (isr & ISR_BED) {
/* what should we do here? */
} else {
- int ret = 0;
+ u8 byte = 0;
if (i2c->slave != NULL)
- ret = i2c->slave->read(i2c->slave->data);
+ i2c_slave_event(i2c->slave, I2C_SLAVE_READ_PROCESSED,
+ &byte);
- writel(ret, _IDBR(i2c));
+ writel(byte, _IDBR(i2c));
writel(readl(_ICR(i2c)) | ICR_TB, _ICR(i2c)); /* allow next byte */
}
}
static void i2c_pxa_slave_rxfull(struct pxa_i2c *i2c, u32 isr)
{
- unsigned int byte = readl(_IDBR(i2c));
+ u8 byte = readl(_IDBR(i2c));
if (i2c->slave != NULL)
- i2c->slave->write(i2c->slave->data, byte);
+ i2c_slave_event(i2c->slave, I2C_SLAVE_WRITE_RECEIVED, &byte);
writel(readl(_ICR(i2c)) | ICR_TB, _ICR(i2c));
}
@@ -572,9 +572,18 @@ static void i2c_pxa_slave_start(struct pxa_i2c *i2c, u32 isr)
dev_dbg(&i2c->adap.dev, "SAD, mode is slave-%cx\n",
(isr & ISR_RWM) ? 'r' : 't');
- if (i2c->slave != NULL)
- i2c->slave->event(i2c->slave->data,
- (isr & ISR_RWM) ? I2C_SLAVE_EVENT_START_READ : I2C_SLAVE_EVENT_START_WRITE);
+ if (i2c->slave != NULL) {
+ if (isr & ISR_RWM) {
+ u8 byte = 0;
+
+ i2c_slave_event(i2c->slave, I2C_SLAVE_READ_REQUESTED,
+ &byte);
+ writel(byte, _IDBR(i2c));
+ } else {
+ i2c_slave_event(i2c->slave, I2C_SLAVE_WRITE_REQUESTED,
+ NULL);
+ }
+ }
/*
* slave could interrupt in the middle of us generating a
@@ -607,7 +616,7 @@ static void i2c_pxa_slave_stop(struct pxa_i2c *i2c)
dev_dbg(&i2c->adap.dev, "ISR: SSD (Slave Stop)\n");
if (i2c->slave != NULL)
- i2c->slave->event(i2c->slave->data, I2C_SLAVE_EVENT_STOP);
+ i2c_slave_event(i2c->slave, I2C_SLAVE_STOP, NULL);
if (i2c_debug > 2)
dev_dbg(&i2c->adap.dev, "ISR: SSD (Slave Stop) acked\n");
@@ -619,6 +628,38 @@ static void i2c_pxa_slave_stop(struct pxa_i2c *i2c)
if (i2c->msg)
i2c_pxa_master_complete(i2c, I2C_RETRY);
}
+
+static int i2c_pxa_slave_reg(struct i2c_client *slave)
+{
+ struct pxa_i2c *i2c = slave->adapter->algo_data;
+
+ if (i2c->slave)
+ return -EBUSY;
+
+ if (!i2c->reg_isar)
+ return -EAFNOSUPPORT;
+
+ i2c->slave = slave;
+ i2c->slave_addr = slave->addr;
+
+ writel(i2c->slave_addr, _ISAR(i2c));
+
+ return 0;
+}
+
+static int i2c_pxa_slave_unreg(struct i2c_client *slave)
+{
+ struct pxa_i2c *i2c = slave->adapter->algo_data;
+
+ WARN_ON(!i2c->slave);
+
+ i2c->slave_addr = I2C_PXA_SLAVE_ADDR;
+ writel(i2c->slave_addr, _ISAR(i2c));
+
+ i2c->slave = NULL;
+
+ return 0;
+}
#else
static void i2c_pxa_slave_txempty(struct pxa_i2c *i2c, u32 isr)
{
@@ -1141,11 +1182,19 @@ static u32 i2c_pxa_functionality(struct i2c_adapter *adap)
static const struct i2c_algorithm i2c_pxa_algorithm = {
.master_xfer = i2c_pxa_xfer,
.functionality = i2c_pxa_functionality,
+#ifdef CONFIG_I2C_PXA_SLAVE
+ .reg_slave = i2c_pxa_slave_reg,
+ .unreg_slave = i2c_pxa_slave_unreg,
+#endif
};
static const struct i2c_algorithm i2c_pxa_pio_algorithm = {
.master_xfer = i2c_pxa_pio_xfer,
.functionality = i2c_pxa_functionality,
+#ifdef CONFIG_I2C_PXA_SLAVE
+ .reg_slave = i2c_pxa_slave_reg,
+ .unreg_slave = i2c_pxa_slave_unreg,
+#endif
};
static const struct of_device_id i2c_pxa_dt_ids[] = {
@@ -1270,10 +1319,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->highmode_enter = false;
if (plat) {
-#ifdef CONFIG_I2C_PXA_SLAVE
- i2c->slave_addr = plat->slave_addr;
- i2c->slave = plat->slave;
-#endif
i2c->adap.class = plat->class;
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index e09cd0775ae9..2d7dabe12723 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -628,7 +628,7 @@ static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
int err;
if (!qup->btx.dma) {
- qup->btx.dma = dma_request_slave_channel_reason(qup->dev, "tx");
+ qup->btx.dma = dma_request_chan(qup->dev, "tx");
if (IS_ERR(qup->btx.dma)) {
err = PTR_ERR(qup->btx.dma);
qup->btx.dma = NULL;
@@ -638,7 +638,7 @@ static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
}
if (!qup->brx.dma) {
- qup->brx.dma = dma_request_slave_channel_reason(qup->dev, "rx");
+ qup->brx.dma = dma_request_chan(qup->dev, "rx");
if (IS_ERR(qup->brx.dma)) {
dev_err(qup->dev, "\n rx channel not available");
err = PTR_ERR(qup->brx.dma);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 531c01100b56..879f0e61a496 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -317,7 +317,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timin
scgd_find:
dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
- scl, t->bus_freq_hz, clk_get_rate(priv->clk), round, cdf, scgd);
+ scl, t->bus_freq_hz, rate, round, cdf, scgd);
/* keep icccr value */
priv->icccr = scgd << cdf_width | cdf;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 8777af4c695e..82b3b795e0bd 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -486,7 +486,7 @@ static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev,
char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
int ret;
- chan = dma_request_slave_channel_reason(dev, chan_name);
+ chan = dma_request_chan(dev, chan_name);
if (IS_ERR(chan)) {
dev_dbg(dev, "request_channel failed for %s (%ld)\n", chan_name,
PTR_ERR(chan));
diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
index 07d5dfce68d4..1da347e6a358 100644
--- a/drivers/i2c/busses/i2c-stm32.c
+++ b/drivers/i2c/busses/i2c-stm32.c
@@ -20,13 +20,13 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Request and configure I2C TX dma channel */
- dma->chan_tx = dma_request_slave_channel(dev, "tx");
- if (!dma->chan_tx) {
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
dev_dbg(dev, "can't request DMA tx channel\n");
- ret = -EINVAL;
+ ret = PTR_ERR(dma->chan_tx);
goto fail_al;
}
@@ -42,10 +42,10 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
}
/* Request and configure I2C RX dma channel */
- dma->chan_rx = dma_request_slave_channel(dev, "rx");
- if (!dma->chan_rx) {
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
dev_err(dev, "can't request DMA rx channel\n");
- ret = -EINVAL;
+ ret = PTR_ERR(dma->chan_rx);
goto fail_tx;
}
@@ -75,7 +75,7 @@ fail_al:
devm_kfree(dev, dma);
dev_info(dev, "can't use DMA\n");
- return NULL;
+ return ERR_PTR(ret);
}
void stm32_i2c_dma_free(struct stm32_i2c_dma *dma)
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index b24e7b937f21..b2634afe066d 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1267,8 +1267,8 @@ static int stm32f7_i2c_get_free_slave_id(struct stm32f7_i2c_dev *i2c_dev,
* slave[0] supports 7-bit and 10-bit slave address
* slave[1] supports 7-bit slave address only
*/
- for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) {
- if (i == 1 && (slave->flags & I2C_CLIENT_PEC))
+ for (i = STM32F7_I2C_MAX_SLAVE - 1; i >= 0; i--) {
+ if (i == 1 && (slave->flags & I2C_CLIENT_TEN))
continue;
if (!i2c_dev->slave[i]) {
*id = i;
@@ -1955,6 +1955,15 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
i2c_dev->dma = stm32_i2c_dma_request(i2c_dev->dev, phy_addr,
STM32F7_I2C_TXDR,
STM32F7_I2C_RXDR);
+ if (PTR_ERR(i2c_dev->dma) == -ENODEV)
+ i2c_dev->dma = NULL;
+ else if (IS_ERR(i2c_dev->dma)) {
+ ret = PTR_ERR(i2c_dev->dma);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request dma error %i\n", ret);
+ goto clk_free;
+ }
platform_set_drvdata(pdev, i2c_dev);
@@ -1985,6 +1994,11 @@ pm_disable:
pm_runtime_set_suspended(i2c_dev->dev);
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
+ if (i2c_dev->dma) {
+ stm32_i2c_dma_free(i2c_dev->dma);
+ i2c_dev->dma = NULL;
+ }
+
clk_free:
clk_disable_unprepare(i2c_dev->clk);
@@ -1995,21 +2009,21 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
{
struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
- if (i2c_dev->dma) {
- stm32_i2c_dma_free(i2c_dev->dma);
- i2c_dev->dma = NULL;
- }
-
i2c_del_adapter(&i2c_dev->adap);
pm_runtime_get_sync(i2c_dev->dev);
- clk_disable_unprepare(i2c_dev->clk);
-
pm_runtime_put_noidle(i2c_dev->dev);
pm_runtime_disable(i2c_dev->dev);
pm_runtime_set_suspended(i2c_dev->dev);
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
+ if (i2c_dev->dma) {
+ stm32_i2c_dma_free(i2c_dev->dma);
+ i2c_dev->dma = NULL;
+ }
+
+ clk_disable_unprepare(i2c_dev->clk);
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index c1683f9338b4..a98bf31d0e5c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -413,7 +413,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
return 0;
}
- chan = dma_request_slave_channel_reason(i2c_dev->dev, "rx");
+ chan = dma_request_chan(i2c_dev->dev, "rx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_out;
@@ -421,7 +421,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
i2c_dev->rx_dma_chan = chan;
- chan = dma_request_slave_channel_reason(i2c_dev->dev, "tx");
+ chan = dma_request_chan(i2c_dev->dev, "tx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_out;
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 37b3b9307d07..d8d49f1814c7 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -46,6 +46,7 @@ enum xiic_endian {
/**
* struct xiic_i2c - Internal representation of the XIIC I2C bus
+ * @dev: Pointer to device structure
* @base: Memory base of the HW registers
* @wait: Wait queue for callers
* @adap: Kernel adapter representation
@@ -57,6 +58,7 @@ enum xiic_endian {
* @rx_msg: Current RX message
* @rx_pos: Position within current RX message
* @endianness: big/little-endian byte order
+ * @clk: Pointer to AXI4-lite input clock
*/
struct xiic_i2c {
struct device *dev;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 5f6a4985f2bc..9333c865d4a9 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1656,6 +1656,12 @@ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_de
t->sda_fall_ns = t->scl_fall_ns;
device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns);
+
+ device_property_read_u32(dev, "i2c-digital-filter-width-ns",
+ &t->digital_filter_width_ns);
+
+ device_property_read_u32(dev, "i2c-analog-filter-cutoff-frequency",
+ &t->analog_filter_cutoff_freq_hz);
}
EXPORT_SYMBOL_GPL(i2c_parse_fw_timings);
@@ -1737,38 +1743,6 @@ EXPORT_SYMBOL(i2c_del_driver);
/* ------------------------------------------------------------------------- */
-/**
- * i2c_use_client - increments the reference count of the i2c client structure
- * @client: the client being referenced
- *
- * Each live reference to a client should be refcounted. The driver model does
- * that automatically as part of driver binding, so that most drivers don't
- * need to do this explicitly: they hold a reference until they're unbound
- * from the device.
- *
- * A pointer to the client with the incremented reference counter is returned.
- */
-struct i2c_client *i2c_use_client(struct i2c_client *client)
-{
- if (client && get_device(&client->dev))
- return client;
- return NULL;
-}
-EXPORT_SYMBOL(i2c_use_client);
-
-/**
- * i2c_release_client - release a use of the i2c client structure
- * @client: the client being no longer referenced
- *
- * Must be called when a user of a client is finished with it.
- */
-void i2c_release_client(struct i2c_client *client)
-{
- if (client)
- put_device(&client->dev);
-}
-EXPORT_SYMBOL(i2c_release_client);
-
struct i2c_cmd_arg {
unsigned cmd;
void *arg;
@@ -2271,10 +2245,10 @@ int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr)
EXPORT_SYMBOL_GPL(i2c_probe_func_quick_read);
struct i2c_client *
-i2c_new_probed_device(struct i2c_adapter *adap,
- struct i2c_board_info *info,
- unsigned short const *addr_list,
- int (*probe)(struct i2c_adapter *adap, unsigned short addr))
+i2c_new_scanned_device(struct i2c_adapter *adap,
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr))
{
int i;
@@ -2304,11 +2278,24 @@ i2c_new_probed_device(struct i2c_adapter *adap,
if (addr_list[i] == I2C_CLIENT_END) {
dev_dbg(&adap->dev, "Probing failed, no device found\n");
- return NULL;
+ return ERR_PTR(-ENODEV);
}
info->addr = addr_list[i];
- return i2c_new_device(adap, info);
+ return i2c_new_client_device(adap, info);
+}
+EXPORT_SYMBOL_GPL(i2c_new_scanned_device);
+
+struct i2c_client *
+i2c_new_probed_device(struct i2c_adapter *adap,
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr))
+{
+ struct i2c_client *client;
+
+ client = i2c_new_scanned_device(adap, info, addr_list, probe);
+ return IS_ERR(client) ? NULL : client;
}
EXPORT_SYMBOL_GPL(i2c_new_probed_device);
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 7eb41990bd6d..e4d296b40baa 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -50,6 +50,7 @@ int of_i2c_get_board_info(struct device *dev, struct device_node *node,
info->addr = addr;
info->of_node = node;
+ info->fwnode = of_fwnode_handle(node);
if (of_property_read_bool(node, "host-notify"))
info->flags |= I2C_CLIENT_HOST_NOTIFY;
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 03096f47e6ab..7e2f5d0eacdb 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -66,7 +66,6 @@ static irqreturn_t smbus_alert(int irq, void *d)
{
struct i2c_smbus_alert *alert = d;
struct i2c_client *ara;
- unsigned short prev_addr = 0; /* Not a valid address */
ara = alert->ara;
@@ -90,18 +89,12 @@ static irqreturn_t smbus_alert(int irq, void *d)
data.addr = status >> 1;
data.type = I2C_PROTOCOL_SMBUS_ALERT;
- if (data.addr == prev_addr) {
- dev_warn(&ara->dev, "Duplicate SMBALERT# from dev "
- "0x%02x, skipping\n", data.addr);
- break;
- }
dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n",
data.addr, data.data);
/* Notify driver for the device which issued the alert */
device_for_each_child(&ara->adapter->dev, &data,
smbus_do_alert);
- prev_addr = data.addr;
}
return IRQ_HANDLED;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index c6040aa839ac..1708b1a82da2 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -109,14 +109,14 @@ config I2C_DEMUX_PINCTRL
want to change the I2C master at run-time depending on features.
config I2C_MUX_MLXCPLD
- tristate "Mellanox CPLD based I2C multiplexer"
- help
- If you say yes to this option, support will be included for a
- CPLD based I2C multiplexer. This driver provides access to
- I2C busses connected through a MUX, which is controlled
- by a CPLD register.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-mux-mlxcpld.
+ tristate "Mellanox CPLD based I2C multiplexer"
+ help
+ If you say yes to this option, support will be included for a
+ CPLD based I2C multiplexer. This driver provides access to
+ I2C busses connected through a MUX, which is controlled
+ by a CPLD register.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mux-mlxcpld.
endmenu
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index db1a65f4b490..3e7482695f77 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -19,6 +19,7 @@
#define IDETAPE_VERSION "1.20"
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -1407,14 +1408,10 @@ static long do_idetape_chrdev_ioctl(struct file *file,
if (tape->drv_write_prot)
mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
- if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
- return -EFAULT;
- return 0;
+ return put_user_mtget(argp, &mtget);
case MTIOCPOS:
mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
- if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
- return -EFAULT;
- return 0;
+ return put_user_mtpos(argp, &mtpos);
default:
if (tape->chrdev_dir == IDETAPE_DIR_READ)
ide_tape_discard_merge_buffer(drive, 1);
@@ -1432,6 +1429,22 @@ static long idetape_chrdev_ioctl(struct file *file,
return ret;
}
+static long idetape_chrdev_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ if (cmd == MTIOCPOS32)
+ cmd = MTIOCPOS;
+ else if (cmd == MTIOCGET32)
+ cmd = MTIOCGET;
+
+ mutex_lock(&ide_tape_mutex);
+ ret = do_idetape_chrdev_ioctl(file, cmd, arg);
+ mutex_unlock(&ide_tape_mutex);
+ return ret;
+}
+
/*
* Do a mode sense page 0 with block descriptor and if it succeeds set the tape
* block size with the reported value.
@@ -1886,6 +1899,8 @@ static const struct file_operations idetape_fops = {
.read = idetape_chrdev_read,
.write = idetape_chrdev_write,
.unlocked_ioctl = idetape_chrdev_ioctl,
+ .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
+ idetape_chrdev_compat_ioctl : NULL,
.open = idetape_chrdev_open,
.release = idetape_chrdev_release,
.llseek = noop_llseek,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 347b08b56042..75fd2a7b0842 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1291,8 +1291,8 @@ static void sklh_idle_state_table_update(void)
return;
}
- skl_cstates[5].disabled = 1; /* C8-SKL */
- skl_cstates[6].disabled = 1; /* C9-SKL */
+ skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
+ skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
}
/*
* intel_idle_state_table_update()
@@ -1355,7 +1355,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
continue;
/* if state marked as disabled, skip it */
- if (cpuidle_state_table[cstate].disabled != 0) {
+ if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
cpuidle_state_table[cstate].name);
continue;
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index fcc3f999e482..65f85faf6f31 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -163,16 +163,10 @@ static const struct iio_chan_spec cros_ec_accel_legacy_channels[] = {
static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
struct iio_dev *indio_dev;
struct cros_ec_sensors_core_state *state;
int ret;
- if (!ec || !ec->ec_dev) {
- dev_warn(&pdev->dev, "No EC device found.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 2e37f8a6d8cf..7b837641f166 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index f0af3a42f53c..5d8540b7b427 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -6,6 +6,16 @@
menu "Analog to digital converters"
+config AB8500_GPADC
+ bool "ST-Ericsson AB8500 GPADC driver"
+ depends on AB8500_CORE && REGULATOR_AB8500
+ default y
+ help
+ AB8500 Analog Baseband, mixed signal integrated circuit GPADC
+ (General Purpose Analog to Digital Converter) driver used to monitor
+ internal voltages, convert accessory and battery, AC (charger, mains)
+ and USB voltages integral to the U8500 platform.
+
config AD_SIGMA_DELTA
tristate
select IIO_BUFFER
@@ -45,6 +55,16 @@ config AD7291
To compile this driver as a module, choose M here: the
module will be called ad7291.
+config AD7292
+ tristate "Analog Devices AD7292 ADC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7292
+ 8 Channel ADC with temperature sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7292.
+
config AD7298
tristate "Analog Devices AD7298 ADC driver"
depends on SPI
@@ -432,6 +452,17 @@ config INGENIC_ADC
This driver can also be built as a module. If so, the module will be
called ingenic_adc.
+config INTEL_MRFLD_ADC
+ tristate "Intel Merrifield Basin Cove ADC driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ help
+ Say yes here to have support for Basin Cove power management IC (PMIC) ADC
+ device. Depending on platform configuration, this general purpose ADC can
+ be used for sampling sensors such as thermal resistors.
+
+ To compile this driver as a module, choose M here: the module will be
+ called intel_mrfld_adc.
+
config IMX7D_ADC
tristate "Freescale IMX7D ADC driver"
depends on ARCH_MXC || COMPILE_TEST
@@ -508,8 +539,8 @@ config MAX1027
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for Maxim SPI ADC models
- max1027, max1029 and max1031.
+ Say yes here to build support for Maxim SPI {10,12}-bit ADC models:
+ max1027, max1029, max1031, max1227, max1229 and max1231.
To compile this driver as a module, choose M here: the module will be
called max1027.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ef9cc485fb67..a1f1fbec0f87 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -4,10 +4,12 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD7124) += ad7124.o
obj-$(CONFIG_AD7266) += ad7266.o
obj-$(CONFIG_AD7291) += ad7291.o
+obj-$(CONFIG_AD7292) += ad7292.o
obj-$(CONFIG_AD7298) += ad7298.o
obj-$(CONFIG_AD7923) += ad7923.o
obj-$(CONFIG_AD7476) += ad7476.o
@@ -42,6 +44,7 @@ obj-$(CONFIG_HX711) += hx711.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_INGENIC_ADC) += ingenic-adc.o
+obj-$(CONFIG_INTEL_MRFLD_ADC) += intel_mrfld_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
new file mode 100644
index 000000000000..fd5b18d7f0c2
--- /dev/null
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ * Author: Daniel Willerud <daniel.willerud@stericsson.com>
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: M'boumba Cedric Madianga
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * AB8500 General Purpose ADC driver. The AB8500 uses reference voltages:
+ * VinVADC, and VADC relative to GND to do its job. It monitors main and backup
+ * battery voltages, AC (mains) voltage, USB cable voltage, as well as voltages
+ * representing the temperature of the chip die and battery, accessory
+ * detection by resistance measurements using relative voltages and GSM burst
+ * information.
+ *
+ * Some of the voltages are measured on external pins on the IC, such as
+ * battery temperature or "ADC aux" 1 and 2. Other voltages are internal rails
+ * from other parts of the ASIC such as main charger voltage, main and battery
+ * backup voltage or USB VBUS voltage. For this reason drivers for other
+ * parts of the system are required to obtain handles to the ADC to do work
+ * for them and the IIO driver provides arbitration among these consumers.
+ */
+#include <linux/init.h>
+#include <linux/bits.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+
+/* GPADC register offsets and bit definitions */
+
+#define AB8500_GPADC_CTRL1_REG 0x00
+/* GPADC control register 1 bits */
+#define AB8500_GPADC_CTRL1_DISABLE 0x00
+#define AB8500_GPADC_CTRL1_ENABLE BIT(0)
+#define AB8500_GPADC_CTRL1_TRIG_ENA BIT(1)
+#define AB8500_GPADC_CTRL1_START_SW_CONV BIT(2)
+#define AB8500_GPADC_CTRL1_BTEMP_PULL_UP BIT(3)
+/* 0 = use rising edge, 1 = use falling edge */
+#define AB8500_GPADC_CTRL1_TRIG_EDGE BIT(4)
+/* 0 = use VTVOUT, 1 = use VRTC as pull-up supply for battery temp NTC */
+#define AB8500_GPADC_CTRL1_PUPSUPSEL BIT(5)
+#define AB8500_GPADC_CTRL1_BUF_ENA BIT(6)
+#define AB8500_GPADC_CTRL1_ICHAR_ENA BIT(7)
+
+#define AB8500_GPADC_CTRL2_REG 0x01
+#define AB8500_GPADC_CTRL3_REG 0x02
+/*
+ * GPADC control register 2 and 3 bits
+ * the bit layout is the same for SW and HW conversion set-up
+ */
+#define AB8500_GPADC_CTRL2_AVG_1 0x00
+#define AB8500_GPADC_CTRL2_AVG_4 BIT(5)
+#define AB8500_GPADC_CTRL2_AVG_8 BIT(6)
+#define AB8500_GPADC_CTRL2_AVG_16 (BIT(5) | BIT(6))
+
+enum ab8500_gpadc_channel {
+ AB8500_GPADC_CHAN_UNUSED = 0x00,
+ AB8500_GPADC_CHAN_BAT_CTRL = 0x01,
+ AB8500_GPADC_CHAN_BAT_TEMP = 0x02,
+ /* This is not used on AB8505 */
+ AB8500_GPADC_CHAN_MAIN_CHARGER = 0x03,
+ AB8500_GPADC_CHAN_ACC_DET_1 = 0x04,
+ AB8500_GPADC_CHAN_ACC_DET_2 = 0x05,
+ AB8500_GPADC_CHAN_ADC_AUX_1 = 0x06,
+ AB8500_GPADC_CHAN_ADC_AUX_2 = 0x07,
+ AB8500_GPADC_CHAN_VBAT_A = 0x08,
+ AB8500_GPADC_CHAN_VBUS = 0x09,
+ AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT = 0x0a,
+ AB8500_GPADC_CHAN_USB_CHARGER_CURRENT = 0x0b,
+ AB8500_GPADC_CHAN_BACKUP_BAT = 0x0c,
+ /* Only on AB8505 */
+ AB8505_GPADC_CHAN_DIE_TEMP = 0x0d,
+ AB8500_GPADC_CHAN_ID = 0x0e,
+ AB8500_GPADC_CHAN_INTERNAL_TEST_1 = 0x0f,
+ AB8500_GPADC_CHAN_INTERNAL_TEST_2 = 0x10,
+ AB8500_GPADC_CHAN_INTERNAL_TEST_3 = 0x11,
+ /* FIXME: Applicable to all ASIC variants? */
+ AB8500_GPADC_CHAN_XTAL_TEMP = 0x12,
+ AB8500_GPADC_CHAN_VBAT_TRUE_MEAS = 0x13,
+ /* FIXME: Doesn't seem to work with pure AB8500 */
+ AB8500_GPADC_CHAN_BAT_CTRL_AND_IBAT = 0x1c,
+ AB8500_GPADC_CHAN_VBAT_MEAS_AND_IBAT = 0x1d,
+ AB8500_GPADC_CHAN_VBAT_TRUE_MEAS_AND_IBAT = 0x1e,
+ AB8500_GPADC_CHAN_BAT_TEMP_AND_IBAT = 0x1f,
+ /*
+ * Virtual channel used only for ibat conversion to ampere.
+ * Battery current conversion (ibat) cannot be requested as a
+ * single conversion but it is always requested in combination
+ * with other input requests.
+ */
+ AB8500_GPADC_CHAN_IBAT_VIRTUAL = 0xFF,
+};
+
+#define AB8500_GPADC_AUTO_TIMER_REG 0x03
+
+#define AB8500_GPADC_STAT_REG 0x04
+#define AB8500_GPADC_STAT_BUSY BIT(0)
+
+#define AB8500_GPADC_MANDATAL_REG 0x05
+#define AB8500_GPADC_MANDATAH_REG 0x06
+#define AB8500_GPADC_AUTODATAL_REG 0x07
+#define AB8500_GPADC_AUTODATAH_REG 0x08
+#define AB8500_GPADC_MUX_CTRL_REG 0x09
+#define AB8540_GPADC_MANDATA2L_REG 0x09
+#define AB8540_GPADC_MANDATA2H_REG 0x0A
+#define AB8540_GPADC_APEAAX_REG 0x10
+#define AB8540_GPADC_APEAAT_REG 0x11
+#define AB8540_GPADC_APEAAM_REG 0x12
+#define AB8540_GPADC_APEAAH_REG 0x13
+#define AB8540_GPADC_APEAAL_REG 0x14
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_GPADC_CAL_1 0x0F
+#define AB8500_GPADC_CAL_2 0x10
+#define AB8500_GPADC_CAL_3 0x11
+#define AB8500_GPADC_CAL_4 0x12
+#define AB8500_GPADC_CAL_5 0x13
+#define AB8500_GPADC_CAL_6 0x14
+#define AB8500_GPADC_CAL_7 0x15
+/* New calibration for 8540 */
+#define AB8540_GPADC_OTP4_REG_7 0x38
+#define AB8540_GPADC_OTP4_REG_6 0x39
+#define AB8540_GPADC_OTP4_REG_5 0x3A
+
+#define AB8540_GPADC_DIS_ZERO 0x00
+#define AB8540_GPADC_EN_VBIAS_XTAL_TEMP 0x02
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define AB8500_ADC_RESOLUTION 1024
+#define AB8500_ADC_CH_BTEMP_MIN 0
+#define AB8500_ADC_CH_BTEMP_MAX 1350
+#define AB8500_ADC_CH_DIETEMP_MIN 0
+#define AB8500_ADC_CH_DIETEMP_MAX 1350
+#define AB8500_ADC_CH_CHG_V_MIN 0
+#define AB8500_ADC_CH_CHG_V_MAX 20030
+#define AB8500_ADC_CH_ACCDET2_MIN 0
+#define AB8500_ADC_CH_ACCDET2_MAX 2500
+#define AB8500_ADC_CH_VBAT_MIN 2300
+#define AB8500_ADC_CH_VBAT_MAX 4800
+#define AB8500_ADC_CH_CHG_I_MIN 0
+#define AB8500_ADC_CH_CHG_I_MAX 1500
+#define AB8500_ADC_CH_BKBAT_MIN 0
+#define AB8500_ADC_CH_BKBAT_MAX 3200
+
+/* GPADC constants from AB8540 spec */
+#define AB8500_ADC_CH_IBAT_MIN (-6000) /* mA range measured by ADC for ibat */
+#define AB8500_ADC_CH_IBAT_MAX 6000
+#define AB8500_ADC_CH_IBAT_MIN_V (-60) /* mV range measured by ADC for ibat */
+#define AB8500_ADC_CH_IBAT_MAX_V 60
+#define AB8500_GPADC_IBAT_VDROP_L (-56) /* mV */
+#define AB8500_GPADC_IBAT_VDROP_H 56
+
+/* This is used to not lose precision when dividing to get gain and offset */
+#define AB8500_GPADC_CALIB_SCALE 1000
+/*
+ * Number of bits shift used to not lose precision
+ * when dividing to get ibat gain.
+ */
+#define AB8500_GPADC_CALIB_SHIFT_IBAT 20
+
+/* Time in ms before disabling regulator */
+#define AB8500_GPADC_AUTOSUSPEND_DELAY 1
+
+#define AB8500_GPADC_CONVERSION_TIME 500 /* ms */
+
+enum ab8500_cal_channels {
+ AB8500_CAL_VMAIN = 0,
+ AB8500_CAL_BTEMP,
+ AB8500_CAL_VBAT,
+ AB8500_CAL_IBAT,
+ AB8500_CAL_NR,
+};
+
+/**
+ * struct ab8500_adc_cal_data - Table for storing gain and offset for the
+ * calibrated ADC channels
+ * @gain: Gain of the ADC channel
+ * @offset: Offset of the ADC channel
+ * @otp_calib_hi: Calibration from OTP
+ * @otp_calib_lo: Calibration from OTP
+ */
+struct ab8500_adc_cal_data {
+ s64 gain;
+ s64 offset;
+ u16 otp_calib_hi;
+ u16 otp_calib_lo;
+};
+
+/**
+ * struct ab8500_gpadc_chan_info - per-channel GPADC info
+ * @name: name of the channel
+ * @id: the internal AB8500 ID number for the channel
+ * @hardware_control: indicate that we want to use hardware ADC control
+ * on this channel, the default is software ADC control. Hardware control
+ * is normally only used to test the battery voltage during GSM bursts
+ * and needs a hardware trigger on the GPADCTrig pin of the ASIC.
+ * @falling_edge: indicate that we want to trigger on falling edge
+ * rather than rising edge, rising edge is the default
+ * @avg_sample: how many samples to average: must be 1, 4, 8 or 16.
+ * @trig_timer: how long to wait for the trigger, in 32kHz periods:
+ * 0 .. 255 periods
+ */
+struct ab8500_gpadc_chan_info {
+ const char *name;
+ u8 id;
+ bool hardware_control;
+ bool falling_edge;
+ u8 avg_sample;
+ u8 trig_timer;
+};
+
+/**
+ * struct ab8500_gpadc - AB8500 GPADC device information
+ * @dev: pointer to the containing device
+ * @ab8500: pointer to the parent AB8500 device
+ * @chans: internal per-channel information container
+ * @nchans: number of channels
+ * @complete: pointer to the completion that indicates
+ * the completion of an gpadc conversion cycle
+ * @vddadc: pointer to the regulator supplying VDDADC
+ * @irq_sw: interrupt number that is used by gpadc for software ADC conversion
+ * @irq_hw: interrupt number that is used by gpadc for hardware ADC conversion
+ * @cal_data: array of ADC calibration data structs
+ */
+struct ab8500_gpadc {
+ struct device *dev;
+ struct ab8500 *ab8500;
+ struct ab8500_gpadc_chan_info *chans;
+ unsigned int nchans;
+ struct completion complete;
+ struct regulator *vddadc;
+ int irq_sw;
+ int irq_hw;
+ struct ab8500_adc_cal_data cal_data[AB8500_CAL_NR];
+};
+
+static struct ab8500_gpadc_chan_info *
+ab8500_gpadc_get_channel(struct ab8500_gpadc *gpadc, u8 chan)
+{
+ struct ab8500_gpadc_chan_info *ch;
+ int i;
+
+ for (i = 0; i < gpadc->nchans; i++) {
+ ch = &gpadc->chans[i];
+ if (ch->id == chan)
+ break;
+ }
+ if (i == gpadc->nchans)
+ return NULL;
+
+ return ch;
+}
+
+/**
+ * ab8500_gpadc_ad_to_voltage() - Convert a raw ADC value to a voltage
+ * @gpadc: GPADC instance
+ * @ch: the sampled channel this raw value is coming from
+ * @ad_value: the raw value
+ */
+static int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
+ enum ab8500_gpadc_channel ch,
+ int ad_value)
+{
+ int res;
+
+ switch (ch) {
+ case AB8500_GPADC_CHAN_MAIN_CHARGER:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_VMAIN].gain) {
+ res = AB8500_ADC_CH_CHG_V_MIN + (AB8500_ADC_CH_CHG_V_MAX -
+ AB8500_ADC_CH_CHG_V_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_VMAIN].gain +
+ gpadc->cal_data[AB8500_CAL_VMAIN].offset) / AB8500_GPADC_CALIB_SCALE;
+ break;
+
+ case AB8500_GPADC_CHAN_BAT_CTRL:
+ case AB8500_GPADC_CHAN_BAT_TEMP:
+ case AB8500_GPADC_CHAN_ACC_DET_1:
+ case AB8500_GPADC_CHAN_ADC_AUX_1:
+ case AB8500_GPADC_CHAN_ADC_AUX_2:
+ case AB8500_GPADC_CHAN_XTAL_TEMP:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_BTEMP].gain) {
+ res = AB8500_ADC_CH_BTEMP_MIN + (AB8500_ADC_CH_BTEMP_MAX -
+ AB8500_ADC_CH_BTEMP_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_BTEMP].gain +
+ gpadc->cal_data[AB8500_CAL_BTEMP].offset) / AB8500_GPADC_CALIB_SCALE;
+ break;
+
+ case AB8500_GPADC_CHAN_VBAT_A:
+ case AB8500_GPADC_CHAN_VBAT_TRUE_MEAS:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_VBAT].gain) {
+ res = AB8500_ADC_CH_VBAT_MIN + (AB8500_ADC_CH_VBAT_MAX -
+ AB8500_ADC_CH_VBAT_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_VBAT].gain +
+ gpadc->cal_data[AB8500_CAL_VBAT].offset) / AB8500_GPADC_CALIB_SCALE;
+ break;
+
+ case AB8505_GPADC_CHAN_DIE_TEMP:
+ res = AB8500_ADC_CH_DIETEMP_MIN +
+ (AB8500_ADC_CH_DIETEMP_MAX - AB8500_ADC_CH_DIETEMP_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_ACC_DET_2:
+ res = AB8500_ADC_CH_ACCDET2_MIN +
+ (AB8500_ADC_CH_ACCDET2_MAX - AB8500_ADC_CH_ACCDET2_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_VBUS:
+ res = AB8500_ADC_CH_CHG_V_MIN +
+ (AB8500_ADC_CH_CHG_V_MAX - AB8500_ADC_CH_CHG_V_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT:
+ case AB8500_GPADC_CHAN_USB_CHARGER_CURRENT:
+ res = AB8500_ADC_CH_CHG_I_MIN +
+ (AB8500_ADC_CH_CHG_I_MAX - AB8500_ADC_CH_CHG_I_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_BACKUP_BAT:
+ res = AB8500_ADC_CH_BKBAT_MIN +
+ (AB8500_ADC_CH_BKBAT_MAX - AB8500_ADC_CH_BKBAT_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_IBAT_VIRTUAL:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_IBAT].gain) {
+ res = AB8500_ADC_CH_IBAT_MIN + (AB8500_ADC_CH_IBAT_MAX -
+ AB8500_ADC_CH_IBAT_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_IBAT].gain +
+ gpadc->cal_data[AB8500_CAL_IBAT].offset)
+ >> AB8500_GPADC_CALIB_SHIFT_IBAT;
+ break;
+
+ default:
+ dev_err(gpadc->dev,
+ "unknown channel ID: %d, not possible to convert\n",
+ ch);
+ res = -EINVAL;
+ break;
+
+ }
+
+ return res;
+}
+
+static int ab8500_gpadc_read(struct ab8500_gpadc *gpadc,
+ const struct ab8500_gpadc_chan_info *ch,
+ int *ibat)
+{
+ int ret;
+ int looplimit = 0;
+ unsigned long completion_timeout;
+ u8 val;
+ u8 low_data, high_data, low_data2, high_data2;
+ u8 ctrl1;
+ u8 ctrl23;
+ unsigned int delay_min = 0;
+ unsigned int delay_max = 0;
+ u8 data_low_addr, data_high_addr;
+
+ if (!gpadc)
+ return -ENODEV;
+
+ /* check if conversion is supported */
+ if ((gpadc->irq_sw <= 0) && !ch->hardware_control)
+ return -ENOTSUPP;
+ if ((gpadc->irq_hw <= 0) && ch->hardware_control)
+ return -ENOTSUPP;
+
+ /* Enable vddadc by grabbing PM runtime */
+ pm_runtime_get_sync(gpadc->dev);
+
+ /* Check if ADC is not busy, lock and proceed */
+ do {
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_STAT_REG, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & AB8500_GPADC_STAT_BUSY))
+ break;
+ msleep(20);
+ } while (++looplimit < 10);
+ if (looplimit >= 10 && (val & AB8500_GPADC_STAT_BUSY)) {
+ dev_err(gpadc->dev, "gpadc_conversion: GPADC busy");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Enable GPADC */
+ ctrl1 = AB8500_GPADC_CTRL1_ENABLE;
+
+ /* Select the channel source and set average samples */
+ switch (ch->avg_sample) {
+ case 1:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_1;
+ break;
+ case 4:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_4;
+ break;
+ case 8:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_8;
+ break;
+ default:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_16;
+ break;
+ }
+
+ if (ch->hardware_control) {
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL3_REG, ctrl23);
+ ctrl1 |= AB8500_GPADC_CTRL1_TRIG_ENA;
+ if (ch->falling_edge)
+ ctrl1 |= AB8500_GPADC_CTRL1_TRIG_EDGE;
+ } else {
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL2_REG, ctrl23);
+ }
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: set avg samples failed\n");
+ goto out;
+ }
+
+ /*
+ * Enable ADC, buffering, select rising edge and enable ADC path
+ * charging current sense if it needed, ABB 3.0 needs some special
+ * treatment too.
+ */
+ switch (ch->id) {
+ case AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT:
+ case AB8500_GPADC_CHAN_USB_CHARGER_CURRENT:
+ ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA |
+ AB8500_GPADC_CTRL1_ICHAR_ENA;
+ break;
+ case AB8500_GPADC_CHAN_BAT_TEMP:
+ if (!is_ab8500_2p0_or_earlier(gpadc->ab8500)) {
+ ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA |
+ AB8500_GPADC_CTRL1_BTEMP_PULL_UP;
+ /*
+ * Delay might be needed for ABB8500 cut 3.0, if not,
+ * remove when hardware will be available
+ */
+ delay_min = 1000; /* Delay in micro seconds */
+ delay_max = 10000; /* large range optimises sleepmode */
+ break;
+ }
+ /* Fall through */
+ default:
+ ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA;
+ break;
+ }
+
+ /* Write configuration to control register 1 */
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ctrl1);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: set Control register failed\n");
+ goto out;
+ }
+
+ if (delay_min != 0)
+ usleep_range(delay_min, delay_max);
+
+ if (ch->hardware_control) {
+ /* Set trigger delay timer */
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_AUTO_TIMER_REG,
+ ch->trig_timer);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: trig timer failed\n");
+ goto out;
+ }
+ completion_timeout = 2 * HZ;
+ data_low_addr = AB8500_GPADC_AUTODATAL_REG;
+ data_high_addr = AB8500_GPADC_AUTODATAH_REG;
+ } else {
+ /* Start SW conversion */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
+ AB8500_GPADC_CTRL1_START_SW_CONV,
+ AB8500_GPADC_CTRL1_START_SW_CONV);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: start s/w conv failed\n");
+ goto out;
+ }
+ completion_timeout = msecs_to_jiffies(AB8500_GPADC_CONVERSION_TIME);
+ data_low_addr = AB8500_GPADC_MANDATAL_REG;
+ data_high_addr = AB8500_GPADC_MANDATAH_REG;
+ }
+
+ /* Wait for completion of conversion */
+ if (!wait_for_completion_timeout(&gpadc->complete,
+ completion_timeout)) {
+ dev_err(gpadc->dev,
+ "timeout didn't receive GPADC conv interrupt\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read the converted RAW data */
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, data_low_addr, &low_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read low data failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, data_high_addr, &high_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read high data failed\n");
+ goto out;
+ }
+
+ /* Check if double conversion is required */
+ if ((ch->id == AB8500_GPADC_CHAN_BAT_CTRL_AND_IBAT) ||
+ (ch->id == AB8500_GPADC_CHAN_VBAT_MEAS_AND_IBAT) ||
+ (ch->id == AB8500_GPADC_CHAN_VBAT_TRUE_MEAS_AND_IBAT) ||
+ (ch->id == AB8500_GPADC_CHAN_BAT_TEMP_AND_IBAT)) {
+
+ if (ch->hardware_control) {
+ /* not supported */
+ ret = -ENOTSUPP;
+ dev_err(gpadc->dev,
+ "gpadc_conversion: only SW double conversion supported\n");
+ goto out;
+ } else {
+ /* Read the converted RAW data 2 */
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8540_GPADC_MANDATA2L_REG,
+ &low_data2);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read sw low data 2 failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8540_GPADC_MANDATA2H_REG,
+ &high_data2);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read sw high data 2 failed\n");
+ goto out;
+ }
+ if (ibat != NULL) {
+ *ibat = (high_data2 << 8) | low_data2;
+ } else {
+ dev_warn(gpadc->dev,
+ "gpadc_conversion: ibat not stored\n");
+ }
+
+ }
+ }
+
+ /* Disable GPADC */
+ ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, AB8500_GPADC_CTRL1_DISABLE);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
+ goto out;
+ }
+
+ /* This eventually drops the regulator */
+ pm_runtime_mark_last_busy(gpadc->dev);
+ pm_runtime_put_autosuspend(gpadc->dev);
+
+ return (high_data << 8) | low_data;
+
+out:
+ /*
+ * It has shown to be needed to turn off the GPADC if an error occurs,
+ * otherwise we might have problem when waiting for the busy bit in the
+ * GPADC status register to go low. In V1.1 there wait_for_completion
+ * seems to timeout when waiting for an interrupt.. Not seen in V2.0
+ */
+ (void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, AB8500_GPADC_CTRL1_DISABLE);
+ pm_runtime_put(gpadc->dev);
+ dev_err(gpadc->dev,
+ "gpadc_conversion: Failed to AD convert channel %d\n", ch->id);
+
+ return ret;
+}
+
+/**
+ * ab8500_bm_gpadcconvend_handler() - isr for gpadc conversion completion
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for gpadc conversion completion.
+ * Notifies the gpadc completion is completed and the converted raw value
+ * can be read from the registers.
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_bm_gpadcconvend_handler(int irq, void *data)
+{
+ struct ab8500_gpadc *gpadc = data;
+
+ complete(&gpadc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int otp_cal_regs[] = {
+ AB8500_GPADC_CAL_1,
+ AB8500_GPADC_CAL_2,
+ AB8500_GPADC_CAL_3,
+ AB8500_GPADC_CAL_4,
+ AB8500_GPADC_CAL_5,
+ AB8500_GPADC_CAL_6,
+ AB8500_GPADC_CAL_7,
+};
+
+static int otp4_cal_regs[] = {
+ AB8540_GPADC_OTP4_REG_7,
+ AB8540_GPADC_OTP4_REG_6,
+ AB8540_GPADC_OTP4_REG_5,
+};
+
+static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
+{
+ int i;
+ int ret[ARRAY_SIZE(otp_cal_regs)];
+ u8 gpadc_cal[ARRAY_SIZE(otp_cal_regs)];
+ int ret_otp4[ARRAY_SIZE(otp4_cal_regs)];
+ u8 gpadc_otp4[ARRAY_SIZE(otp4_cal_regs)];
+ int vmain_high, vmain_low;
+ int btemp_high, btemp_low;
+ int vbat_high, vbat_low;
+ int ibat_high, ibat_low;
+ s64 V_gain, V_offset, V2A_gain, V2A_offset;
+
+ /* First we read all OTP registers and store the error code */
+ for (i = 0; i < ARRAY_SIZE(otp_cal_regs); i++) {
+ ret[i] = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_OTP_EMUL, otp_cal_regs[i], &gpadc_cal[i]);
+ if (ret[i] < 0) {
+ /* Continue anyway: maybe the other registers are OK */
+ dev_err(gpadc->dev, "%s: read otp reg 0x%02x failed\n",
+ __func__, otp_cal_regs[i]);
+ } else {
+ /* Put this in the entropy pool as device-unique */
+ add_device_randomness(&ret[i], sizeof(ret[i]));
+ }
+ }
+
+ /*
+ * The ADC calibration data is stored in OTP registers.
+ * The layout of the calibration data is outlined below and a more
+ * detailed description can be found in UM0836
+ *
+ * vm_h/l = vmain_high/low
+ * bt_h/l = btemp_high/low
+ * vb_h/l = vbat_high/low
+ *
+ * Data bits 8500/9540:
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h9 | vm_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ *
+ * Data bits 8540:
+ * OTP2
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h9 | vm_h8 | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ *
+ * Data bits 8540:
+ * OTP4
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | ib_h9 | ib_h8 | ib_h7
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | ib_h6 | ib_h5 | ib_h4 | ib_h3 | ib_h2 | ib_h1 | ib_h0 | ib_l5
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | ib_l4 | ib_l3 | ib_l2 | ib_l1 | ib_l0 |
+ *
+ *
+ * Ideal output ADC codes corresponding to injected input voltages
+ * during manufacturing is:
+ *
+ * vmain_high: Vin = 19500mV / ADC ideal code = 997
+ * vmain_low: Vin = 315mV / ADC ideal code = 16
+ * btemp_high: Vin = 1300mV / ADC ideal code = 985
+ * btemp_low: Vin = 21mV / ADC ideal code = 16
+ * vbat_high: Vin = 4700mV / ADC ideal code = 982
+ * vbat_low: Vin = 2380mV / ADC ideal code = 33
+ */
+
+ if (is_ab8540(gpadc->ab8500)) {
+ /* Calculate gain and offset for VMAIN if all reads succeeded*/
+ if (!(ret[1] < 0 || ret[2] < 0)) {
+ vmain_high = (((gpadc_cal[1] & 0xFF) << 2) |
+ ((gpadc_cal[2] & 0xC0) >> 6));
+ vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_hi =
+ (u16)vmain_high;
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_lo =
+ (u16)vmain_low;
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = AB8500_GPADC_CALIB_SCALE *
+ (19500 - 315) / (vmain_high - vmain_low);
+ gpadc->cal_data[AB8500_CAL_VMAIN].offset = AB8500_GPADC_CALIB_SCALE *
+ 19500 - (AB8500_GPADC_CALIB_SCALE * (19500 - 315) /
+ (vmain_high - vmain_low)) * vmain_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = 0;
+ }
+
+ /* Read IBAT calibration Data */
+ for (i = 0; i < ARRAY_SIZE(otp4_cal_regs); i++) {
+ ret_otp4[i] = abx500_get_register_interruptible(
+ gpadc->dev, AB8500_OTP_EMUL,
+ otp4_cal_regs[i], &gpadc_otp4[i]);
+ if (ret_otp4[i] < 0)
+ dev_err(gpadc->dev,
+ "%s: read otp4 reg 0x%02x failed\n",
+ __func__, otp4_cal_regs[i]);
+ }
+
+ /* Calculate gain and offset for IBAT if all reads succeeded */
+ if (!(ret_otp4[0] < 0 || ret_otp4[1] < 0 || ret_otp4[2] < 0)) {
+ ibat_high = (((gpadc_otp4[0] & 0x07) << 7) |
+ ((gpadc_otp4[1] & 0xFE) >> 1));
+ ibat_low = (((gpadc_otp4[1] & 0x01) << 5) |
+ ((gpadc_otp4[2] & 0xF8) >> 3));
+
+ gpadc->cal_data[AB8500_CAL_IBAT].otp_calib_hi =
+ (u16)ibat_high;
+ gpadc->cal_data[AB8500_CAL_IBAT].otp_calib_lo =
+ (u16)ibat_low;
+
+ V_gain = ((AB8500_GPADC_IBAT_VDROP_H - AB8500_GPADC_IBAT_VDROP_L)
+ << AB8500_GPADC_CALIB_SHIFT_IBAT) / (ibat_high - ibat_low);
+
+ V_offset = (AB8500_GPADC_IBAT_VDROP_H << AB8500_GPADC_CALIB_SHIFT_IBAT) -
+ (((AB8500_GPADC_IBAT_VDROP_H - AB8500_GPADC_IBAT_VDROP_L) <<
+ AB8500_GPADC_CALIB_SHIFT_IBAT) / (ibat_high - ibat_low))
+ * ibat_high;
+ /*
+ * Result obtained is in mV (at a scale factor),
+ * we need to calculate gain and offset to get mA
+ */
+ V2A_gain = (AB8500_ADC_CH_IBAT_MAX - AB8500_ADC_CH_IBAT_MIN)/
+ (AB8500_ADC_CH_IBAT_MAX_V - AB8500_ADC_CH_IBAT_MIN_V);
+ V2A_offset = ((AB8500_ADC_CH_IBAT_MAX_V * AB8500_ADC_CH_IBAT_MIN -
+ AB8500_ADC_CH_IBAT_MAX * AB8500_ADC_CH_IBAT_MIN_V)
+ << AB8500_GPADC_CALIB_SHIFT_IBAT)
+ / (AB8500_ADC_CH_IBAT_MAX_V - AB8500_ADC_CH_IBAT_MIN_V);
+
+ gpadc->cal_data[AB8500_CAL_IBAT].gain =
+ V_gain * V2A_gain;
+ gpadc->cal_data[AB8500_CAL_IBAT].offset =
+ V_offset * V2A_gain + V2A_offset;
+ } else {
+ gpadc->cal_data[AB8500_CAL_IBAT].gain = 0;
+ }
+ } else {
+ /* Calculate gain and offset for VMAIN if all reads succeeded */
+ if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
+ vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
+ ((gpadc_cal[1] & 0x3F) << 2) |
+ ((gpadc_cal[2] & 0xC0) >> 6));
+ vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_hi =
+ (u16)vmain_high;
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_lo =
+ (u16)vmain_low;
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = AB8500_GPADC_CALIB_SCALE *
+ (19500 - 315) / (vmain_high - vmain_low);
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].offset = AB8500_GPADC_CALIB_SCALE *
+ 19500 - (AB8500_GPADC_CALIB_SCALE * (19500 - 315) /
+ (vmain_high - vmain_low)) * vmain_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = 0;
+ }
+ }
+
+ /* Calculate gain and offset for BTEMP if all reads succeeded */
+ if (!(ret[2] < 0 || ret[3] < 0 || ret[4] < 0)) {
+ btemp_high = (((gpadc_cal[2] & 0x01) << 9) |
+ (gpadc_cal[3] << 1) | ((gpadc_cal[4] & 0x80) >> 7));
+ btemp_low = ((gpadc_cal[4] & 0x7C) >> 2);
+
+ gpadc->cal_data[AB8500_CAL_BTEMP].otp_calib_hi = (u16)btemp_high;
+ gpadc->cal_data[AB8500_CAL_BTEMP].otp_calib_lo = (u16)btemp_low;
+
+ gpadc->cal_data[AB8500_CAL_BTEMP].gain =
+ AB8500_GPADC_CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low);
+ gpadc->cal_data[AB8500_CAL_BTEMP].offset = AB8500_GPADC_CALIB_SCALE * 1300 -
+ (AB8500_GPADC_CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low))
+ * btemp_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_BTEMP].gain = 0;
+ }
+
+ /* Calculate gain and offset for VBAT if all reads succeeded */
+ if (!(ret[4] < 0 || ret[5] < 0 || ret[6] < 0)) {
+ vbat_high = (((gpadc_cal[4] & 0x03) << 8) | gpadc_cal[5]);
+ vbat_low = ((gpadc_cal[6] & 0xFC) >> 2);
+
+ gpadc->cal_data[AB8500_CAL_VBAT].otp_calib_hi = (u16)vbat_high;
+ gpadc->cal_data[AB8500_CAL_VBAT].otp_calib_lo = (u16)vbat_low;
+
+ gpadc->cal_data[AB8500_CAL_VBAT].gain = AB8500_GPADC_CALIB_SCALE *
+ (4700 - 2380) / (vbat_high - vbat_low);
+ gpadc->cal_data[AB8500_CAL_VBAT].offset = AB8500_GPADC_CALIB_SCALE * 4700 -
+ (AB8500_GPADC_CALIB_SCALE * (4700 - 2380) /
+ (vbat_high - vbat_low)) * vbat_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_VBAT].gain = 0;
+ }
+}
+
+static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+ const struct ab8500_gpadc_chan_info *ch;
+ int raw_val;
+ int processed;
+
+ ch = ab8500_gpadc_get_channel(gpadc, chan->address);
+ if (!ch) {
+ dev_err(gpadc->dev, "no such channel %lu\n",
+ chan->address);
+ return -EINVAL;
+ }
+
+ raw_val = ab8500_gpadc_read(gpadc, ch, NULL);
+ if (raw_val < 0)
+ return raw_val;
+
+ if (mask == IIO_CHAN_INFO_RAW) {
+ *val = raw_val;
+ return IIO_VAL_INT;
+ }
+
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ processed = ab8500_gpadc_ad_to_voltage(gpadc, ch->id, raw_val);
+ if (processed < 0)
+ return processed;
+
+ /* Return millivolt or milliamps or millicentigrades */
+ *val = processed * 1000;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ab8500_gpadc_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].channel == iiospec->args[0])
+ return i;
+
+ return -EINVAL;
+}
+
+static const struct iio_info ab8500_gpadc_info = {
+ .of_xlate = ab8500_gpadc_of_xlate,
+ .read_raw = ab8500_gpadc_read_raw,
+};
+
+#ifdef CONFIG_PM
+static int ab8500_gpadc_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+
+ regulator_disable(gpadc->vddadc);
+
+ return 0;
+}
+
+static int ab8500_gpadc_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(gpadc->vddadc);
+ if (ret)
+ dev_err(dev, "Failed to enable vddadc: %d\n", ret);
+
+ return ret;
+}
+#endif
+
+/**
+ * ab8500_gpadc_parse_channel() - process devicetree channel configuration
+ * @dev: pointer to containing device
+ * @np: device tree node for the channel to configure
+ * @ch: channel info to fill in
+ * @iio_chan: IIO channel specification to fill in
+ *
+ * The devicetree will set up the channel for use with the specific device,
+ * and define usage for things like AUX GPADC inputs more precisely.
+ */
+static int ab8500_gpadc_parse_channel(struct device *dev,
+ struct device_node *np,
+ struct ab8500_gpadc_chan_info *ch,
+ struct iio_chan_spec *iio_chan)
+{
+ const char *name = np->name;
+ u32 chan;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &chan);
+ if (ret) {
+ dev_err(dev, "invalid channel number %s\n", name);
+ return ret;
+ }
+ if (chan > AB8500_GPADC_CHAN_BAT_TEMP_AND_IBAT) {
+ dev_err(dev, "%s channel number out of range %d\n", name, chan);
+ return -EINVAL;
+ }
+
+ iio_chan->channel = chan;
+ iio_chan->datasheet_name = name;
+ iio_chan->indexed = 1;
+ iio_chan->address = chan;
+ iio_chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED);
+ /* Most are voltages (also temperatures), some are currents */
+ if ((chan == AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT) ||
+ (chan == AB8500_GPADC_CHAN_USB_CHARGER_CURRENT))
+ iio_chan->type = IIO_CURRENT;
+ else
+ iio_chan->type = IIO_VOLTAGE;
+
+ ch->id = chan;
+
+ /* Sensible defaults */
+ ch->avg_sample = 16;
+ ch->hardware_control = false;
+ ch->falling_edge = false;
+ ch->trig_timer = 0;
+
+ return 0;
+}
+
+/**
+ * ab8500_gpadc_parse_channels() - Parse the GPADC channels from DT
+ * @gpadc: the GPADC to configure the channels for
+ * @np: device tree node containing the channel configurations
+ * @chans: the IIO channels we parsed
+ * @nchans: the number of IIO channels we parsed
+ */
+static int ab8500_gpadc_parse_channels(struct ab8500_gpadc *gpadc,
+ struct device_node *np,
+ struct iio_chan_spec **chans_parsed,
+ unsigned int *nchans_parsed)
+{
+ struct device_node *child;
+ struct ab8500_gpadc_chan_info *ch;
+ struct iio_chan_spec *iio_chans;
+ unsigned int nchans;
+ int i;
+
+ nchans = of_get_available_child_count(np);
+ if (!nchans) {
+ dev_err(gpadc->dev, "no channel children\n");
+ return -ENODEV;
+ }
+ dev_info(gpadc->dev, "found %d ADC channels\n", nchans);
+
+ iio_chans = devm_kcalloc(gpadc->dev, nchans,
+ sizeof(*iio_chans), GFP_KERNEL);
+ if (!iio_chans)
+ return -ENOMEM;
+
+ gpadc->chans = devm_kcalloc(gpadc->dev, nchans,
+ sizeof(*gpadc->chans), GFP_KERNEL);
+ if (!gpadc->chans)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_available_child_of_node(np, child) {
+ struct iio_chan_spec *iio_chan;
+ int ret;
+
+ ch = &gpadc->chans[i];
+ iio_chan = &iio_chans[i];
+
+ ret = ab8500_gpadc_parse_channel(gpadc->dev, child, ch,
+ iio_chan);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ i++;
+ }
+ gpadc->nchans = nchans;
+ *chans_parsed = iio_chans;
+ *nchans_parsed = nchans;
+
+ return 0;
+}
+
+static int ab8500_gpadc_probe(struct platform_device *pdev)
+{
+ struct ab8500_gpadc *gpadc;
+ struct iio_dev *indio_dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct iio_chan_spec *iio_chans;
+ unsigned int n_iio_chans;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*gpadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ gpadc = iio_priv(indio_dev);
+
+ gpadc->dev = dev;
+ gpadc->ab8500 = dev_get_drvdata(dev->parent);
+
+ ret = ab8500_gpadc_parse_channels(gpadc, np, &iio_chans, &n_iio_chans);
+ if (ret)
+ return ret;
+
+ gpadc->irq_sw = platform_get_irq_byname(pdev, "SW_CONV_END");
+ if (gpadc->irq_sw < 0) {
+ dev_err(dev, "failed to get platform sw_conv_end irq\n");
+ return gpadc->irq_sw;
+ }
+
+ gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
+ if (gpadc->irq_hw < 0) {
+ dev_err(dev, "failed to get platform hw_conv_end irq\n");
+ return gpadc->irq_hw;
+ }
+
+ /* Initialize completion used to notify completion of conversion */
+ init_completion(&gpadc->complete);
+
+ /* Request interrupts */
+ ret = devm_request_threaded_irq(dev, gpadc->irq_sw, NULL,
+ ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "ab8500-gpadc-sw", gpadc);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to request sw conversion irq %d\n",
+ gpadc->irq_sw);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, gpadc->irq_hw, NULL,
+ ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "ab8500-gpadc-hw", gpadc);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to request hw conversion irq: %d\n",
+ gpadc->irq_hw);
+ return ret;
+ }
+
+ /* The VTVout LDO used to power the AB8500 GPADC */
+ gpadc->vddadc = devm_regulator_get(dev, "vddadc");
+ if (IS_ERR(gpadc->vddadc)) {
+ ret = PTR_ERR(gpadc->vddadc);
+ dev_err(dev, "failed to get vddadc\n");
+ return ret;
+ }
+
+ ret = regulator_enable(gpadc->vddadc);
+ if (ret) {
+ dev_err(dev, "failed to enable vddadc: %d\n", ret);
+ return ret;
+ }
+
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, AB8500_GPADC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ ab8500_gpadc_read_calibration_data(gpadc);
+
+ pm_runtime_put(dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = np;
+ indio_dev->name = "ab8500-gpadc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ab8500_gpadc_info;
+ indio_dev->channels = iio_chans;
+ indio_dev->num_channels = n_iio_chans;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ goto out_dis_pm;
+
+ return 0;
+
+out_dis_pm:
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ regulator_disable(gpadc->vddadc);
+
+ return ret;
+}
+
+static int ab8500_gpadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(gpadc->dev);
+ pm_runtime_put_noidle(gpadc->dev);
+ pm_runtime_disable(gpadc->dev);
+ regulator_disable(gpadc->vddadc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ab8500_gpadc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
+ ab8500_gpadc_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver ab8500_gpadc_driver = {
+ .probe = ab8500_gpadc_probe,
+ .remove = ab8500_gpadc_remove,
+ .driver = {
+ .name = "ab8500-gpadc",
+ .pm = &ab8500_gpadc_pm_ops,
+ },
+};
+builtin_platform_driver(ab8500_gpadc_driver);
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
new file mode 100644
index 000000000000..a6798f7dfdb8
--- /dev/null
+++ b/drivers/iio/adc/ad7292.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices AD7292 SPI ADC driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+
+#define ADI_VENDOR_ID 0x0018
+
+/* AD7292 registers definition */
+#define AD7292_REG_VENDOR_ID 0x00
+#define AD7292_REG_CONF_BANK 0x05
+#define AD7292_REG_CONV_COMM 0x0E
+#define AD7292_REG_ADC_CH(x) (0x10 + (x))
+
+/* AD7292 configuration bank subregisters definition */
+#define AD7292_BANK_REG_VIN_RNG0 0x10
+#define AD7292_BANK_REG_VIN_RNG1 0x11
+#define AD7292_BANK_REG_SAMP_MODE 0x12
+
+#define AD7292_RD_FLAG_MSK(x) (BIT(7) | ((x) & 0x3F))
+
+/* AD7292_REG_ADC_CONVERSION */
+#define AD7292_ADC_DATA_MASK GENMASK(15, 6)
+#define AD7292_ADC_DATA(x) FIELD_GET(AD7292_ADC_DATA_MASK, x)
+
+/* AD7292_CHANNEL_SAMPLING_MODE */
+#define AD7292_CH_SAMP_MODE(reg, ch) (((reg) >> 8) & BIT(ch))
+
+/* AD7292_CHANNEL_VIN_RANGE */
+#define AD7292_CH_VIN_RANGE(reg, ch) ((reg) & BIT(ch))
+
+#define AD7292_VOLTAGE_CHAN(_chan) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = 1, \
+ .channel = _chan, \
+}
+
+static const struct iio_chan_spec ad7292_channels[] = {
+ AD7292_VOLTAGE_CHAN(0),
+ AD7292_VOLTAGE_CHAN(1),
+ AD7292_VOLTAGE_CHAN(2),
+ AD7292_VOLTAGE_CHAN(3),
+ AD7292_VOLTAGE_CHAN(4),
+ AD7292_VOLTAGE_CHAN(5),
+ AD7292_VOLTAGE_CHAN(6),
+ AD7292_VOLTAGE_CHAN(7)
+};
+
+static const struct iio_chan_spec ad7292_channels_diff[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .indexed = 1,
+ .differential = 1,
+ .channel = 0,
+ .channel2 = 1,
+ },
+ AD7292_VOLTAGE_CHAN(2),
+ AD7292_VOLTAGE_CHAN(3),
+ AD7292_VOLTAGE_CHAN(4),
+ AD7292_VOLTAGE_CHAN(5),
+ AD7292_VOLTAGE_CHAN(6),
+ AD7292_VOLTAGE_CHAN(7)
+};
+
+struct ad7292_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned short vref_mv;
+
+ __be16 d16 ____cacheline_aligned;
+ u8 d8[2];
+};
+
+static int ad7292_spi_reg_read(struct ad7292_state *st, unsigned int addr)
+{
+ int ret;
+
+ st->d8[0] = AD7292_RD_FLAG_MSK(addr);
+
+ ret = spi_write_then_read(st->spi, st->d8, 1, &st->d16, 2);
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(st->d16);
+}
+
+static int ad7292_spi_subreg_read(struct ad7292_state *st, unsigned int addr,
+ unsigned int sub_addr, unsigned int len)
+{
+ unsigned int shift = 16 - (8 * len);
+ int ret;
+
+ st->d8[0] = AD7292_RD_FLAG_MSK(addr);
+ st->d8[1] = sub_addr;
+
+ ret = spi_write_then_read(st->spi, st->d8, 2, &st->d16, len);
+ if (ret < 0)
+ return ret;
+
+ return (be16_to_cpu(st->d16) >> shift);
+}
+
+static int ad7292_single_conversion(struct ad7292_state *st,
+ unsigned int chan_addr)
+{
+ int ret;
+
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->d8,
+ .len = 4,
+ .delay_usecs = 6,
+ }, {
+ .rx_buf = &st->d16,
+ .len = 2,
+ },
+ };
+
+ st->d8[0] = chan_addr;
+ st->d8[1] = AD7292_RD_FLAG_MSK(AD7292_REG_CONV_COMM);
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(st->d16);
+}
+
+static int ad7292_vin_range_multiplier(struct ad7292_state *st, int channel)
+{
+ int samp_mode, range0, range1, factor = 1;
+
+ /*
+ * Every AD7292 ADC channel may have its input range adjusted according
+ * to the settings at the ADC sampling mode and VIN range subregisters.
+ * For a given channel, the minimum input range is equal to Vref, and it
+ * may be increased by a multiplier factor of 2 or 4 according to the
+ * following rule:
+ * If channel is being sampled with respect to AGND:
+ * factor = 4 if VIN range0 and VIN range1 equal 0
+ * factor = 2 if only one of VIN ranges equal 1
+ * factor = 1 if both VIN range0 and VIN range1 equal 1
+ * If channel is being sampled with respect to AVDD:
+ * factor = 4 if VIN range0 and VIN range1 equal 0
+ * Behavior is undefined if any of VIN range doesn't equal 0
+ */
+
+ samp_mode = ad7292_spi_subreg_read(st, AD7292_REG_CONF_BANK,
+ AD7292_BANK_REG_SAMP_MODE, 2);
+
+ if (samp_mode < 0)
+ return samp_mode;
+
+ range0 = ad7292_spi_subreg_read(st, AD7292_REG_CONF_BANK,
+ AD7292_BANK_REG_VIN_RNG0, 2);
+
+ if (range0 < 0)
+ return range0;
+
+ range1 = ad7292_spi_subreg_read(st, AD7292_REG_CONF_BANK,
+ AD7292_BANK_REG_VIN_RNG1, 2);
+
+ if (range1 < 0)
+ return range1;
+
+ if (AD7292_CH_SAMP_MODE(samp_mode, channel)) {
+ /* Sampling with respect to AGND */
+ if (!AD7292_CH_VIN_RANGE(range0, channel))
+ factor *= 2;
+
+ if (!AD7292_CH_VIN_RANGE(range1, channel))
+ factor *= 2;
+
+ } else {
+ /* Sampling with respect to AVDD */
+ if (AD7292_CH_VIN_RANGE(range0, channel) ||
+ AD7292_CH_VIN_RANGE(range1, channel))
+ return -EPERM;
+
+ factor = 4;
+ }
+
+ return factor;
+}
+
+static int ad7292_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct ad7292_state *st = iio_priv(indio_dev);
+ unsigned int ch_addr;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ch_addr = AD7292_REG_ADC_CH(chan->channel);
+ ret = ad7292_single_conversion(st, ch_addr);
+ if (ret < 0)
+ return ret;
+
+ *val = AD7292_ADC_DATA(ret);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * To convert a raw value to standard units, the IIO defines
+ * this formula: Scaled value = (raw + offset) * scale.
+ * For the scale to be a correct multiplier for (raw + offset),
+ * it must be calculated as the input range divided by the
+ * number of possible distinct input values. Given the ADC data
+ * is 10 bit long, it may assume 2^10 distinct values.
+ * Hence, scale = range / 2^10. The IIO_VAL_FRACTIONAL_LOG2
+ * return type indicates to the IIO API to divide *val by 2 to
+ * the power of *val2 when returning from read_raw.
+ */
+
+ ret = ad7292_vin_range_multiplier(st, chan->channel);
+ if (ret < 0)
+ return ret;
+
+ *val = st->vref_mv * ret;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info ad7292_info = {
+ .read_raw = ad7292_read_raw,
+};
+
+static void ad7292_regulator_disable(void *data)
+{
+ struct ad7292_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static int ad7292_probe(struct spi_device *spi)
+{
+ struct ad7292_state *st;
+ struct iio_dev *indio_dev;
+ struct device_node *child;
+ bool diff_channels = 0;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ ret = ad7292_spi_reg_read(st, AD7292_REG_VENDOR_ID);
+ if (ret != ADI_VENDOR_ID) {
+ dev_err(&spi->dev, "Wrong vendor id 0x%x\n", ret);
+ return -EINVAL;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to enable external vref supply\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev,
+ ad7292_regulator_disable, st);
+ if (ret) {
+ regulator_disable(st->reg);
+ return ret;
+ }
+
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ return ret;
+
+ st->vref_mv = ret / 1000;
+ } else {
+ /* Use the internal voltage reference. */
+ st->vref_mv = 1250;
+ }
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ad7292_info;
+
+ for_each_available_child_of_node(spi->dev.of_node, child) {
+ diff_channels = of_property_read_bool(child, "diff-channels");
+ if (diff_channels)
+ break;
+ }
+
+ if (diff_channels) {
+ indio_dev->num_channels = ARRAY_SIZE(ad7292_channels_diff);
+ indio_dev->channels = ad7292_channels_diff;
+ } else {
+ indio_dev->num_channels = ARRAY_SIZE(ad7292_channels);
+ indio_dev->channels = ad7292_channels;
+ }
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad7292_id_table[] = {
+ { "ad7292", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7292_id_table);
+
+static const struct of_device_id ad7292_of_match[] = {
+ { .compatible = "adi,ad7292" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ad7292_of_match);
+
+static struct spi_driver ad7292_driver = {
+ .driver = {
+ .name = "ad7292",
+ .of_match_table = ad7292_of_match,
+ },
+ .probe = ad7292_probe,
+ .id_table = ad7292_id_table,
+};
+module_spi_driver(ad7292_driver);
+
+MODULE_AUTHOR("Marcelo Schmitt <marcelo.schmitt1@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD7292 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index ac0ffff6c5ae..5c2b3446fa4a 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -54,38 +54,20 @@ struct ad7949_adc_chip {
u8 resolution;
u16 cfg;
unsigned int current_channel;
- u32 buffer ____cacheline_aligned;
+ u16 buffer ____cacheline_aligned;
};
-static bool ad7949_spi_cfg_is_read_back(struct ad7949_adc_chip *ad7949_adc)
-{
- if (!(ad7949_adc->cfg & AD7949_CFG_READ_BACK))
- return true;
-
- return false;
-}
-
-static int ad7949_spi_bits_per_word(struct ad7949_adc_chip *ad7949_adc)
-{
- int ret = ad7949_adc->resolution;
-
- if (ad7949_spi_cfg_is_read_back(ad7949_adc))
- ret += AD7949_CFG_REG_SIZE_BITS;
-
- return ret;
-}
-
static int ad7949_spi_write_cfg(struct ad7949_adc_chip *ad7949_adc, u16 val,
u16 mask)
{
int ret;
- int bits_per_word = ad7949_spi_bits_per_word(ad7949_adc);
+ int bits_per_word = ad7949_adc->resolution;
int shift = bits_per_word - AD7949_CFG_REG_SIZE_BITS;
struct spi_message msg;
struct spi_transfer tx[] = {
{
.tx_buf = &ad7949_adc->buffer,
- .len = 4,
+ .len = 2,
.bits_per_word = bits_per_word,
},
};
@@ -107,13 +89,13 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
unsigned int channel)
{
int ret;
- int bits_per_word = ad7949_spi_bits_per_word(ad7949_adc);
+ int bits_per_word = ad7949_adc->resolution;
int mask = GENMASK(ad7949_adc->resolution, 0);
struct spi_message msg;
struct spi_transfer tx[] = {
{
.rx_buf = &ad7949_adc->buffer,
- .len = 4,
+ .len = 2,
.bits_per_word = bits_per_word,
},
};
@@ -138,10 +120,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
ad7949_adc->current_channel = channel;
- if (ad7949_spi_cfg_is_read_back(ad7949_adc))
- *val = (ad7949_adc->buffer >> AD7949_CFG_REG_SIZE_BITS) & mask;
- else
- *val = ad7949_adc->buffer & mask;
+ *val = ad7949_adc->buffer & mask;
return 0;
}
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 2640b75fb774..8ba90486c787 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -205,7 +205,7 @@ int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
}
EXPORT_SYMBOL_GPL(ad_sd_reset);
-static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
int ret;
@@ -242,6 +242,7 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(ad_sd_calibrate);
/**
* ad_sd_calibrate_all() - Performs channel calibration
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index d3fc39df535d..1e5375235cfe 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -173,7 +173,6 @@ static int aspeed_adc_probe(struct platform_device *pdev)
struct iio_dev *indio_dev;
struct aspeed_adc_data *data;
const struct aspeed_adc_model_data *model_data;
- struct resource *res;
const char *clk_parent_name;
int ret;
u32 adc_engine_control_reg_val;
@@ -185,8 +184,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
data = iio_priv(indio_dev);
data->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index a2837a0e7cba..e1850f3d5cf3 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1483,7 +1483,7 @@ dma_free_area:
st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
dma_chan_disable:
dma_release_channel(st->dma_st.dma_chan);
- st->dma_st.dma_chan = 0;
+ st->dma_st.dma_chan = NULL;
dma_exit:
dev_info(&pdev->dev, "continuing without DMA support\n");
}
@@ -1506,7 +1506,7 @@ static void at91_adc_dma_disable(struct platform_device *pdev)
dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
dma_release_channel(st->dma_st.dma_chan);
- st->dma_st.dma_chan = 0;
+ st->dma_st.dma_chan = NULL;
dev_info(&pdev->dev, "continuing without DMA support\n");
}
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 646ebdc0a8b4..5e396104ac86 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -308,7 +308,7 @@ static int iproc_adc_do_read(struct iio_dev *indio_dev,
"IntMask set failed. Read will likely fail.");
read_len = -EIO;
goto adc_err;
- };
+ }
}
regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index f93f1d93b80d..fe9257624f16 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -310,7 +310,6 @@ static int cc10001_adc_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct cc10001_adc_device *adc_dev;
unsigned long adc_clk_rate;
- struct resource *res;
struct iio_dev *indio_dev;
unsigned long channel_map;
int ret;
@@ -340,8 +339,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
indio_dev->info = &cc10001_adc_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ adc_dev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc_dev->reg_base)) {
ret = PTR_ERR(adc_dev->reg_base);
goto err_disable_reg;
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index 2d616cafe75f..5086a337f4c9 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -1008,7 +1008,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
cpcap_adc_irq_thread,
- IRQF_TRIGGER_NONE,
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
"cpcap-adc", indio_dev);
if (error) {
dev_err(&pdev->dev, "could not get irq: %i\n",
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 5fa78c273a25..65c7c9329b1c 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -524,6 +524,10 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
u16 conflict;
unsigned int trigger_chan;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&dln2->mutex);
/* Enable ADC */
@@ -537,6 +541,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
(int)conflict);
ret = -EBUSY;
}
+ iio_triggered_buffer_predisable(indio_dev);
return ret;
}
@@ -550,6 +555,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
mutex_unlock(&dln2->mutex);
if (ret < 0) {
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ iio_triggered_buffer_predisable(indio_dev);
return ret;
}
} else {
@@ -557,12 +563,12 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
mutex_unlock(&dln2->mutex);
}
- return iio_triggered_buffer_postenable(indio_dev);
+ return 0;
}
static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
{
- int ret;
+ int ret, ret2;
struct dln2_adc *dln2 = iio_priv(indio_dev);
mutex_lock(&dln2->mutex);
@@ -577,12 +583,14 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
ret = dln2_adc_set_port_enabled(dln2, false, NULL);
mutex_unlock(&dln2->mutex);
- if (ret < 0) {
+ if (ret < 0)
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
- return ret;
- }
- return iio_triggered_buffer_predisable(indio_dev);
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret2;
+
+ return ret;
}
static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 42a3ced11fbd..2df7d057b249 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -651,7 +651,7 @@ static irqreturn_t exynos_ts_isr(int irq, void *dev_id)
input_sync(info->input);
usleep_range(1000, 1100);
- };
+ }
writel(0, ADC_V1_CLRINTPNDNUP(info->regs));
@@ -769,7 +769,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct s3c2410_ts_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct iio_dev *indio_dev = NULL;
- struct resource *mem;
bool has_ts = false;
int ret = -ENODEV;
int irq;
@@ -788,8 +787,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
return -EINVAL;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 62e6c8badd22..c8686558429b 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -23,6 +23,7 @@
/* gain to pulse and scale conversion */
#define HX711_GAIN_MAX 3
+#define HX711_RESET_GAIN 128
struct hx711_gain_to_scale {
int gain;
@@ -185,8 +186,7 @@ static int hx711_wait_for_ready(struct hx711_data *hx711_data)
static int hx711_reset(struct hx711_data *hx711_data)
{
- int ret;
- int val = gpiod_get_value(hx711_data->gpiod_dout);
+ int val = hx711_wait_for_ready(hx711_data);
if (val) {
/*
@@ -202,22 +202,10 @@ static int hx711_reset(struct hx711_data *hx711_data)
msleep(10);
gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
- ret = hx711_wait_for_ready(hx711_data);
- if (ret)
- return ret;
- /*
- * after a reset the gain is 128 so we do a dummy read
- * to set the gain for the next read
- */
- ret = hx711_read(hx711_data);
- if (ret < 0)
- return ret;
-
- /*
- * after a dummy read we need to wait vor readiness
- * for not mixing gain pulses with the clock
- */
val = hx711_wait_for_ready(hx711_data);
+
+ /* after a reset the gain is 128 */
+ hx711_data->gain_set = HX711_RESET_GAIN;
}
return val;
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index e234970b7150..39c0a609fc94 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -25,9 +25,13 @@
#define JZ_ADC_REG_ADSDAT 0x20
#define JZ_ADC_REG_ADCLK 0x28
+#define JZ_ADC_REG_ENABLE_PD BIT(7)
+#define JZ_ADC_REG_CFG_AUX_MD (BIT(0) | BIT(1))
#define JZ_ADC_REG_CFG_BAT_MD BIT(4)
#define JZ_ADC_REG_ADCLK_CLKDIV_LSB 0
-#define JZ_ADC_REG_ADCLK_CLKDIV10US_LSB 16
+#define JZ4725B_ADC_REG_ADCLK_CLKDIV10US_LSB 16
+#define JZ4770_ADC_REG_ADCLK_CLKDIV10US_LSB 8
+#define JZ4770_ADC_REG_ADCLK_CLKDIVMS_LSB 16
#define JZ_ADC_AUX_VREF 3300
#define JZ_ADC_AUX_VREF_BITS 12
@@ -37,6 +41,8 @@
#define JZ4725B_ADC_BATTERY_HIGH_VREF_BITS 10
#define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
#define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
+#define JZ4770_ADC_BATTERY_VREF 6600
+#define JZ4770_ADC_BATTERY_VREF_BITS 12
struct ingenic_adc;
@@ -47,6 +53,8 @@ struct ingenic_adc_soc_data {
size_t battery_raw_avail_size;
const int *battery_scale_avail;
size_t battery_scale_avail_size;
+ unsigned int battery_vref_mode: 1;
+ unsigned int has_aux2: 1;
int (*init_clk_div)(struct device *dev, struct ingenic_adc *adc);
};
@@ -54,6 +62,7 @@ struct ingenic_adc {
void __iomem *base;
struct clk *clk;
struct mutex lock;
+ struct mutex aux_lock;
const struct ingenic_adc_soc_data *soc_data;
bool low_vref_mode;
};
@@ -120,6 +129,8 @@ static int ingenic_adc_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->channel) {
case INGENIC_ADC_BATTERY:
+ if (!adc->soc_data->battery_vref_mode)
+ return -EINVAL;
if (val > JZ_ADC_BATTERY_LOW_VREF) {
ingenic_adc_set_config(adc,
JZ_ADC_REG_CFG_BAT_MD,
@@ -158,6 +169,14 @@ static const int jz4740_adc_battery_scale_avail[] = {
JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
};
+static const int jz4770_adc_battery_raw_avail[] = {
+ 0, 1, (1 << JZ4770_ADC_BATTERY_VREF_BITS) - 1,
+};
+
+static const int jz4770_adc_battery_scale_avail[] = {
+ JZ4770_ADC_BATTERY_VREF, JZ4770_ADC_BATTERY_VREF_BITS,
+};
+
static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
{
struct clk *parent_clk;
@@ -187,7 +206,45 @@ static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
/* We also need a divider that produces a 10us clock. */
div_10us = DIV_ROUND_UP(rate, 100000);
- writel(((div_10us - 1) << JZ_ADC_REG_ADCLK_CLKDIV10US_LSB) |
+ writel(((div_10us - 1) << JZ4725B_ADC_REG_ADCLK_CLKDIV10US_LSB) |
+ (div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
+ adc->base + JZ_ADC_REG_ADCLK);
+
+ return 0;
+}
+
+static int jz4770_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
+{
+ struct clk *parent_clk;
+ unsigned long parent_rate, rate;
+ unsigned int div_main, div_ms, div_10us;
+
+ parent_clk = clk_get_parent(adc->clk);
+ if (!parent_clk) {
+ dev_err(dev, "ADC clock has no parent\n");
+ return -ENODEV;
+ }
+ parent_rate = clk_get_rate(parent_clk);
+
+ /*
+ * The JZ4770 ADC works at 20 kHz to 200 kHz.
+ * We pick the highest rate possible.
+ */
+ div_main = DIV_ROUND_UP(parent_rate, 200000);
+ div_main = clamp(div_main, 1u, 256u);
+ rate = parent_rate / div_main;
+ if (rate < 20000 || rate > 200000) {
+ dev_err(dev, "No valid divider for ADC main clock\n");
+ return -EINVAL;
+ }
+
+ /* We also need a divider that produces a 10us clock. */
+ div_10us = DIV_ROUND_UP(rate, 10000);
+ /* And another, which produces a 1ms clock. */
+ div_ms = DIV_ROUND_UP(rate, 1000);
+
+ writel(((div_ms - 1) << JZ4770_ADC_REG_ADCLK_CLKDIVMS_LSB) |
+ ((div_10us - 1) << JZ4770_ADC_REG_ADCLK_CLKDIV10US_LSB) |
(div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
adc->base + JZ_ADC_REG_ADCLK);
@@ -201,6 +258,8 @@ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
.battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail),
.battery_scale_avail = jz4725b_adc_battery_scale_avail,
.battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail),
+ .battery_vref_mode = true,
+ .has_aux2 = false,
.init_clk_div = jz4725b_adc_init_clk_div,
};
@@ -211,9 +270,23 @@ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
.battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail),
.battery_scale_avail = jz4740_adc_battery_scale_avail,
.battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail),
+ .battery_vref_mode = true,
+ .has_aux2 = false,
.init_clk_div = NULL, /* no ADCLK register on JZ4740 */
};
+static const struct ingenic_adc_soc_data jz4770_adc_soc_data = {
+ .battery_high_vref = JZ4770_ADC_BATTERY_VREF,
+ .battery_high_vref_bits = JZ4770_ADC_BATTERY_VREF_BITS,
+ .battery_raw_avail = jz4770_adc_battery_raw_avail,
+ .battery_raw_avail_size = ARRAY_SIZE(jz4770_adc_battery_raw_avail),
+ .battery_scale_avail = jz4770_adc_battery_scale_avail,
+ .battery_scale_avail_size = ARRAY_SIZE(jz4770_adc_battery_scale_avail),
+ .battery_vref_mode = false,
+ .has_aux2 = true,
+ .init_clk_div = jz4770_adc_init_clk_div,
+};
+
static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan,
const int **vals,
@@ -239,6 +312,42 @@ static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
};
}
+static int ingenic_adc_read_chan_info_raw(struct ingenic_adc *adc,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ int bit, ret, engine = (chan->channel == INGENIC_ADC_BATTERY);
+
+ /* We cannot sample AUX/AUX2 in parallel. */
+ mutex_lock(&adc->aux_lock);
+ if (adc->soc_data->has_aux2 && engine == 0) {
+ bit = BIT(chan->channel == INGENIC_ADC_AUX2);
+ ingenic_adc_set_config(adc, JZ_ADC_REG_CFG_AUX_MD, bit);
+ }
+
+ clk_enable(adc->clk);
+ ret = ingenic_adc_capture(adc, engine);
+ if (ret)
+ goto out;
+
+ switch (chan->channel) {
+ case INGENIC_ADC_AUX:
+ case INGENIC_ADC_AUX2:
+ *val = readw(adc->base + JZ_ADC_REG_ADSDAT);
+ break;
+ case INGENIC_ADC_BATTERY:
+ *val = readw(adc->base + JZ_ADC_REG_ADBDAT);
+ break;
+ }
+
+ ret = IIO_VAL_INT;
+out:
+ clk_disable(adc->clk);
+ mutex_unlock(&adc->aux_lock);
+
+ return ret;
+}
+
static int ingenic_adc_read_raw(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan,
int *val,
@@ -246,32 +355,14 @@ static int ingenic_adc_read_raw(struct iio_dev *iio_dev,
long m)
{
struct ingenic_adc *adc = iio_priv(iio_dev);
- int ret;
switch (m) {
case IIO_CHAN_INFO_RAW:
- clk_enable(adc->clk);
- ret = ingenic_adc_capture(adc, chan->channel);
- if (ret) {
- clk_disable(adc->clk);
- return ret;
- }
-
- switch (chan->channel) {
- case INGENIC_ADC_AUX:
- *val = readw(adc->base + JZ_ADC_REG_ADSDAT);
- break;
- case INGENIC_ADC_BATTERY:
- *val = readw(adc->base + JZ_ADC_REG_ADBDAT);
- break;
- }
-
- clk_disable(adc->clk);
-
- return IIO_VAL_INT;
+ return ingenic_adc_read_chan_info_raw(adc, chan, val);
case IIO_CHAN_INFO_SCALE:
switch (chan->channel) {
case INGENIC_ADC_AUX:
+ case INGENIC_ADC_AUX2:
*val = JZ_ADC_AUX_VREF;
*val2 = JZ_ADC_AUX_VREF_BITS;
break;
@@ -322,6 +413,14 @@ static const struct iio_chan_spec ingenic_channels[] = {
.indexed = 1,
.channel = INGENIC_ADC_BATTERY,
},
+ { /* Must always be last in the array. */
+ .extend_name = "aux2",
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ .channel = INGENIC_ADC_AUX2,
+ },
};
static int ingenic_adc_probe(struct platform_device *pdev)
@@ -329,7 +428,6 @@ static int ingenic_adc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct iio_dev *iio_dev;
struct ingenic_adc *adc;
- struct resource *mem_base;
const struct ingenic_adc_soc_data *soc_data;
int ret;
@@ -343,10 +441,10 @@ static int ingenic_adc_probe(struct platform_device *pdev)
adc = iio_priv(iio_dev);
mutex_init(&adc->lock);
+ mutex_init(&adc->aux_lock);
adc->soc_data = soc_data;
- mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc->base = devm_ioremap_resource(dev, mem_base);
+ adc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
@@ -374,6 +472,7 @@ static int ingenic_adc_probe(struct platform_device *pdev)
/* Put hardware in a known passive state. */
writeb(0x00, adc->base + JZ_ADC_REG_ENABLE);
writeb(0xff, adc->base + JZ_ADC_REG_CTRL);
+ usleep_range(2000, 3000); /* Must wait at least 2ms. */
clk_disable(adc->clk);
ret = devm_add_action_or_reset(dev, ingenic_adc_clk_cleanup, adc->clk);
@@ -387,6 +486,9 @@ static int ingenic_adc_probe(struct platform_device *pdev)
iio_dev->modes = INDIO_DIRECT_MODE;
iio_dev->channels = ingenic_channels;
iio_dev->num_channels = ARRAY_SIZE(ingenic_channels);
+ /* Remove AUX2 from the list of supported channels. */
+ if (!adc->soc_data->has_aux2)
+ iio_dev->num_channels -= 1;
iio_dev->info = &ingenic_adc_info;
ret = devm_iio_device_register(dev, iio_dev);
@@ -400,6 +502,7 @@ static int ingenic_adc_probe(struct platform_device *pdev)
static const struct of_device_id ingenic_adc_of_match[] = {
{ .compatible = "ingenic,jz4725b-adc", .data = &jz4725b_adc_soc_data, },
{ .compatible = "ingenic,jz4740-adc", .data = &jz4740_adc_soc_data, },
+ { .compatible = "ingenic,jz4770-adc", .data = &jz4770_adc_soc_data, },
{ },
};
MODULE_DEVICE_TABLE(of, ingenic_adc_of_match);
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
new file mode 100644
index 000000000000..67d096f8180d
--- /dev/null
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADC driver for Basin Cove PMIC
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Bin Yang <bin.yang@intel.com>
+ *
+ * Rewritten for upstream by:
+ * Vincent Pelletier <plr.vincent@gmail.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/driver.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+
+#include <asm/unaligned.h>
+
+#define BCOVE_GPADCREQ 0xDC
+#define BCOVE_GPADCREQ_BUSY BIT(0)
+#define BCOVE_GPADCREQ_IRQEN BIT(1)
+
+#define BCOVE_ADCIRQ_ALL ( \
+ BCOVE_ADCIRQ_BATTEMP | \
+ BCOVE_ADCIRQ_SYSTEMP | \
+ BCOVE_ADCIRQ_BATTID | \
+ BCOVE_ADCIRQ_VIBATT | \
+ BCOVE_ADCIRQ_CCTICK)
+
+#define BCOVE_ADC_TIMEOUT msecs_to_jiffies(1000)
+
+static const u8 mrfld_adc_requests[] = {
+ BCOVE_ADCIRQ_VIBATT,
+ BCOVE_ADCIRQ_BATTID,
+ BCOVE_ADCIRQ_VIBATT,
+ BCOVE_ADCIRQ_SYSTEMP,
+ BCOVE_ADCIRQ_BATTEMP,
+ BCOVE_ADCIRQ_BATTEMP,
+ BCOVE_ADCIRQ_SYSTEMP,
+ BCOVE_ADCIRQ_SYSTEMP,
+ BCOVE_ADCIRQ_SYSTEMP,
+};
+
+struct mrfld_adc {
+ struct regmap *regmap;
+ struct completion completion;
+ /* Lock to protect the IPC transfers */
+ struct mutex lock;
+};
+
+static irqreturn_t mrfld_adc_thread_isr(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct mrfld_adc *adc = iio_priv(indio_dev);
+
+ complete(&adc->completion);
+ return IRQ_HANDLED;
+}
+
+static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *result)
+{
+ struct mrfld_adc *adc = iio_priv(indio_dev);
+ struct regmap *regmap = adc->regmap;
+ unsigned int req;
+ long timeout;
+ u8 buf[2];
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ regmap_update_bits(regmap, BCOVE_MADCIRQ, BCOVE_ADCIRQ_ALL, 0);
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_ADC, 0);
+
+ ret = regmap_read_poll_timeout(regmap, BCOVE_GPADCREQ, req,
+ !(req & BCOVE_GPADCREQ_BUSY),
+ 2000, 1000000);
+ if (ret)
+ goto done;
+
+ req = mrfld_adc_requests[chan->channel];
+ ret = regmap_write(regmap, BCOVE_GPADCREQ, BCOVE_GPADCREQ_IRQEN | req);
+ if (ret)
+ goto done;
+
+ timeout = wait_for_completion_interruptible_timeout(&adc->completion,
+ BCOVE_ADC_TIMEOUT);
+ if (timeout < 0) {
+ ret = timeout;
+ goto done;
+ }
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ ret = regmap_bulk_read(regmap, chan->address, buf, 2);
+ if (ret)
+ goto done;
+
+ *result = get_unaligned_be16(buf);
+ ret = IIO_VAL_INT;
+
+done:
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_ADC, 0xff);
+ regmap_update_bits(regmap, BCOVE_MADCIRQ, BCOVE_ADCIRQ_ALL, 0xff);
+
+ return ret;
+}
+
+static int mrfld_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mrfld_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ ret = mrfld_adc_single_conv(indio_dev, chan, val);
+ mutex_unlock(&adc->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mrfld_adc_iio_info = {
+ .read_raw = &mrfld_adc_read_raw,
+};
+
+#define BCOVE_ADC_CHANNEL(_type, _channel, _datasheet_name, _address) \
+ { \
+ .indexed = 1, \
+ .type = _type, \
+ .channel = _channel, \
+ .address = _address, \
+ .datasheet_name = _datasheet_name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ }
+
+static const struct iio_chan_spec mrfld_adc_channels[] = {
+ BCOVE_ADC_CHANNEL(IIO_VOLTAGE, 0, "CH0", 0xE9),
+ BCOVE_ADC_CHANNEL(IIO_RESISTANCE, 1, "CH1", 0xEB),
+ BCOVE_ADC_CHANNEL(IIO_CURRENT, 2, "CH2", 0xED),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 3, "CH3", 0xCC),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 4, "CH4", 0xC8),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 5, "CH5", 0xCA),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 6, "CH6", 0xC2),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 7, "CH7", 0xC4),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 8, "CH8", 0xC6),
+};
+
+static struct iio_map iio_maps[] = {
+ IIO_MAP("CH0", "bcove-battery", "VBATRSLT"),
+ IIO_MAP("CH1", "bcove-battery", "BATTID"),
+ IIO_MAP("CH2", "bcove-battery", "IBATRSLT"),
+ IIO_MAP("CH3", "bcove-temp", "PMICTEMP"),
+ IIO_MAP("CH4", "bcove-temp", "BATTEMP0"),
+ IIO_MAP("CH5", "bcove-temp", "BATTEMP1"),
+ IIO_MAP("CH6", "bcove-temp", "SYSTEMP0"),
+ IIO_MAP("CH7", "bcove-temp", "SYSTEMP1"),
+ IIO_MAP("CH8", "bcove-temp", "SYSTEMP2"),
+ {}
+};
+
+static int mrfld_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct iio_dev *indio_dev;
+ struct mrfld_adc *adc;
+ int irq;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*indio_dev));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+
+ mutex_init(&adc->lock);
+ init_completion(&adc->completion);
+ adc->regmap = pmic->regmap;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_adc_thread_isr,
+ IRQF_ONESHOT | IRQF_SHARED, pdev->name,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = pdev->name;
+
+ indio_dev->channels = mrfld_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mrfld_adc_channels);
+ indio_dev->info = &mrfld_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_map_array_register(indio_dev, iio_maps);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret < 0)
+ goto err_array_unregister;
+
+ return 0;
+
+err_array_unregister:
+ iio_map_array_unregister(indio_dev);
+ return ret;
+}
+
+static int mrfld_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_map_array_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct platform_device_id mrfld_adc_id_table[] = {
+ { .name = "mrfld_bcove_adc" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mrfld_adc_id_table);
+
+static struct platform_driver mrfld_adc_driver = {
+ .driver = {
+ .name = "mrfld_bcove_adc",
+ },
+ .probe = mrfld_adc_probe,
+ .remove = mrfld_adc_remove,
+ .id_table = mrfld_adc_id_table,
+};
+module_platform_driver(mrfld_adc_driver);
+
+MODULE_AUTHOR("Bin Yang <bin.yang@intel.com>");
+MODULE_AUTHOR("Vincent Pelletier <plr.vincent@gmail.com>");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("ADC driver for Basin Cove PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index e400a95f553d..4c6ac6644dc0 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -119,7 +119,6 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct lpc18xx_adc *adc;
- struct resource *res;
unsigned int clkdiv;
unsigned long rate;
int ret;
@@ -133,8 +132,7 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
adc->dev = &pdev->dev;
mutex_init(&adc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc->base = devm_ioremap_resource(&pdev->dev, res);
+ adc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 214883458582..e171db20c04a 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -63,12 +63,18 @@ enum max1027_id {
max1027,
max1029,
max1031,
+ max1227,
+ max1229,
+ max1231,
};
static const struct spi_device_id max1027_id[] = {
{"max1027", max1027},
{"max1029", max1029},
{"max1031", max1031},
+ {"max1227", max1227},
+ {"max1229", max1229},
+ {"max1231", max1231},
{}
};
MODULE_DEVICE_TABLE(spi, max1027_id);
@@ -78,12 +84,15 @@ static const struct of_device_id max1027_adc_dt_ids[] = {
{ .compatible = "maxim,max1027" },
{ .compatible = "maxim,max1029" },
{ .compatible = "maxim,max1031" },
+ { .compatible = "maxim,max1227" },
+ { .compatible = "maxim,max1229" },
+ { .compatible = "maxim,max1231" },
{},
};
MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
#endif
-#define MAX1027_V_CHAN(index) \
+#define MAX1027_V_CHAN(index, depth) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -93,7 +102,7 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
.scan_index = index + 1, \
.scan_type = { \
.sign = 'u', \
- .realbits = 10, \
+ .realbits = depth, \
.storagebits = 16, \
.shift = 2, \
.endianness = IIO_BE, \
@@ -115,52 +124,54 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
}, \
}
+#define MAX1X27_CHANNELS(depth) \
+ MAX1027_T_CHAN, \
+ MAX1027_V_CHAN(0, depth), \
+ MAX1027_V_CHAN(1, depth), \
+ MAX1027_V_CHAN(2, depth), \
+ MAX1027_V_CHAN(3, depth), \
+ MAX1027_V_CHAN(4, depth), \
+ MAX1027_V_CHAN(5, depth), \
+ MAX1027_V_CHAN(6, depth), \
+ MAX1027_V_CHAN(7, depth)
+
+#define MAX1X29_CHANNELS(depth) \
+ MAX1X27_CHANNELS(depth), \
+ MAX1027_V_CHAN(8, depth), \
+ MAX1027_V_CHAN(9, depth), \
+ MAX1027_V_CHAN(10, depth), \
+ MAX1027_V_CHAN(11, depth)
+
+#define MAX1X31_CHANNELS(depth) \
+ MAX1X27_CHANNELS(depth), \
+ MAX1X29_CHANNELS(depth), \
+ MAX1027_V_CHAN(12, depth), \
+ MAX1027_V_CHAN(13, depth), \
+ MAX1027_V_CHAN(14, depth), \
+ MAX1027_V_CHAN(15, depth)
+
static const struct iio_chan_spec max1027_channels[] = {
- MAX1027_T_CHAN,
- MAX1027_V_CHAN(0),
- MAX1027_V_CHAN(1),
- MAX1027_V_CHAN(2),
- MAX1027_V_CHAN(3),
- MAX1027_V_CHAN(4),
- MAX1027_V_CHAN(5),
- MAX1027_V_CHAN(6),
- MAX1027_V_CHAN(7)
+ MAX1X27_CHANNELS(10),
};
static const struct iio_chan_spec max1029_channels[] = {
- MAX1027_T_CHAN,
- MAX1027_V_CHAN(0),
- MAX1027_V_CHAN(1),
- MAX1027_V_CHAN(2),
- MAX1027_V_CHAN(3),
- MAX1027_V_CHAN(4),
- MAX1027_V_CHAN(5),
- MAX1027_V_CHAN(6),
- MAX1027_V_CHAN(7),
- MAX1027_V_CHAN(8),
- MAX1027_V_CHAN(9),
- MAX1027_V_CHAN(10),
- MAX1027_V_CHAN(11)
+ MAX1X29_CHANNELS(10),
};
static const struct iio_chan_spec max1031_channels[] = {
- MAX1027_T_CHAN,
- MAX1027_V_CHAN(0),
- MAX1027_V_CHAN(1),
- MAX1027_V_CHAN(2),
- MAX1027_V_CHAN(3),
- MAX1027_V_CHAN(4),
- MAX1027_V_CHAN(5),
- MAX1027_V_CHAN(6),
- MAX1027_V_CHAN(7),
- MAX1027_V_CHAN(8),
- MAX1027_V_CHAN(9),
- MAX1027_V_CHAN(10),
- MAX1027_V_CHAN(11),
- MAX1027_V_CHAN(12),
- MAX1027_V_CHAN(13),
- MAX1027_V_CHAN(14),
- MAX1027_V_CHAN(15)
+ MAX1X31_CHANNELS(10),
+};
+
+static const struct iio_chan_spec max1227_channels[] = {
+ MAX1X27_CHANNELS(12),
+};
+
+static const struct iio_chan_spec max1229_channels[] = {
+ MAX1X29_CHANNELS(12),
+};
+
+static const struct iio_chan_spec max1231_channels[] = {
+ MAX1X31_CHANNELS(12),
};
static const unsigned long max1027_available_scan_masks[] = {
@@ -200,6 +211,21 @@ static const struct max1027_chip_info max1027_chip_info_tbl[] = {
.num_channels = ARRAY_SIZE(max1031_channels),
.available_scan_masks = max1031_available_scan_masks,
},
+ [max1227] = {
+ .channels = max1227_channels,
+ .num_channels = ARRAY_SIZE(max1227_channels),
+ .available_scan_masks = max1027_available_scan_masks,
+ },
+ [max1229] = {
+ .channels = max1229_channels,
+ .num_channels = ARRAY_SIZE(max1229_channels),
+ .available_scan_masks = max1029_available_scan_masks,
+ },
+ [max1231] = {
+ .channels = max1231_channels,
+ .num_channels = ARRAY_SIZE(max1231_channels),
+ .available_scan_masks = max1031_available_scan_masks,
+ },
};
struct max1027_state {
@@ -284,7 +310,7 @@ static int max1027_read_raw(struct iio_dev *indio_dev,
break;
case IIO_VOLTAGE:
*val = 2500;
- *val2 = 10;
+ *val2 = chan->scan_type.realbits;
ret = IIO_VAL_FRACTIONAL_LOG2;
break;
default:
@@ -309,8 +335,11 @@ static int max1027_debugfs_reg_access(struct iio_dev *indio_dev,
struct max1027_state *st = iio_priv(indio_dev);
u8 *val = (u8 *)st->buffer;
- if (readval != NULL)
- return -EINVAL;
+ if (readval) {
+ int ret = spi_read(st->spi, val, 2);
+ *readval = be16_to_cpu(st->buffer[0]);
+ return ret;
+ }
*val = (u8)writeval;
return spi_write(st->spi, val, 1);
@@ -427,34 +456,47 @@ static int max1027_probe(struct spi_device *spi)
return -ENOMEM;
}
- ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
- &iio_pollfunc_store_time,
- &max1027_trigger_handler, NULL);
- if (ret < 0) {
- dev_err(&indio_dev->dev, "Failed to setup buffer\n");
- return ret;
- }
+ if (spi->irq) {
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &max1027_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to setup buffer\n");
+ return ret;
+ }
- st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-trigger",
- indio_dev->name);
- if (st->trig == NULL) {
- ret = -ENOMEM;
- dev_err(&indio_dev->dev, "Failed to allocate iio trigger\n");
- return ret;
- }
+ st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-trigger",
+ indio_dev->name);
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ dev_err(&indio_dev->dev,
+ "Failed to allocate iio trigger\n");
+ return ret;
+ }
- st->trig->ops = &max1027_trigger_ops;
- st->trig->dev.parent = &spi->dev;
- iio_trigger_set_drvdata(st->trig, indio_dev);
- iio_trigger_register(st->trig);
+ st->trig->ops = &max1027_trigger_ops;
+ st->trig->dev.parent = &spi->dev;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ iio_trigger_register(st->trig);
+
+ ret = devm_request_threaded_irq(&spi->dev, spi->irq,
+ iio_trigger_generic_data_rdy_poll,
+ NULL,
+ IRQF_TRIGGER_FALLING,
+ spi->dev.driver->name,
+ st->trig);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to allocate IRQ.\n");
+ return ret;
+ }
+ }
- ret = devm_request_threaded_irq(&spi->dev, spi->irq,
- iio_trigger_generic_data_rdy_poll,
- NULL,
- IRQF_TRIGGER_FALLING,
- spi->dev.driver->name, st->trig);
+ /* Internal reset */
+ st->reg = MAX1027_RST_REG;
+ ret = spi_write(st->spi, &st->reg, 1);
if (ret < 0) {
- dev_err(&indio_dev->dev, "Failed to allocate IRQ.\n");
+ dev_err(&indio_dev->dev, "Failed to reset the ADC\n");
return ret;
}
@@ -480,5 +522,5 @@ static struct spi_driver max1027_driver = {
module_spi_driver(max1027_driver);
MODULE_AUTHOR("Philippe Reynes <tremyfr@yahoo.fr>");
-MODULE_DESCRIPTION("MAX1027/MAX1029/MAX1031 ADC");
+MODULE_DESCRIPTION("MAX1X27/MAX1X29/MAX1X31 ADC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 38bf10085696..465c7625a55a 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -164,7 +164,7 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
case mcp3550_60:
case mcp3551:
case mcp3553: {
- u32 raw = be32_to_cpup((u32 *)adc->rx_buf);
+ u32 raw = be32_to_cpup((__be32 *)adc->rx_buf);
if (!(adc->spi->mode & SPI_CPOL))
raw <<= 1; /* strip Data Ready bit in SPI mode 0,0 */
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 3b2fbb7ce431..196c8226381e 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -167,3 +167,4 @@ MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IIO ADC driver for MEN 16z188 ADC Core");
MODULE_ALIAS("mcb:16z188");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 7b27306330a3..22a470db9ef8 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1187,7 +1187,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
const struct meson_sar_adc_data *match_data;
struct meson_sar_adc_priv *priv;
struct iio_dev *indio_dev;
- struct resource *res;
void __iomem *base;
int irq, ret;
@@ -1214,8 +1213,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &meson_sar_adc_iio_info;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 7bbb64ca3b32..a4776d924f3a 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -237,7 +237,6 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
{
struct mt6577_auxadc_device *adc_dev;
unsigned long adc_clk_rate;
- struct resource *res;
struct iio_dev *indio_dev;
int ret;
@@ -253,8 +252,7 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
indio_dev->channels = mt6577_auxadc_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(mt6577_auxadc_iio_channels);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ adc_dev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc_dev->reg_base)) {
dev_err(&pdev->dev, "failed to get auxadc base address\n");
return PTR_ERR(adc_dev->reg_base);
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
index 910f3585fa54..a6170a37ebe8 100644
--- a/drivers/iio/adc/npcm_adc.c
+++ b/drivers/iio/adc/npcm_adc.c
@@ -183,7 +183,6 @@ static int npcm_adc_probe(struct platform_device *pdev)
int irq;
u32 div;
u32 reg_con;
- struct resource *res;
struct npcm_adc *info;
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
@@ -196,8 +195,7 @@ static int npcm_adc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, res);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index c37f201294b2..63ce743ee7af 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -481,7 +481,6 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rcar_gyroadc *priv;
struct iio_dev *indio_dev;
- struct resource *mem;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
@@ -491,8 +490,7 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
priv = iio_priv(indio_dev);
priv->dev = dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, mem);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index a6c046575ec3..66b387f9b36d 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -477,13 +477,6 @@ static void sc27xx_adc_disable(void *_data)
SC27XX_MODULE_ADC_EN, 0);
}
-static void sc27xx_adc_free_hwlock(void *_data)
-{
- struct hwspinlock *hwlock = _data;
-
- hwspin_lock_free(hwlock);
-}
-
static int sc27xx_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -520,19 +513,12 @@ static int sc27xx_adc_probe(struct platform_device *pdev)
return ret;
}
- sc27xx_data->hwlock = hwspin_lock_request_specific(ret);
+ sc27xx_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
if (!sc27xx_data->hwlock) {
dev_err(dev, "failed to request hwspinlock\n");
return -ENXIO;
}
- ret = devm_add_action_or_reset(dev, sc27xx_adc_free_hwlock,
- sc27xx_data->hwlock);
- if (ret) {
- dev_err(dev, "failed to add hwspinlock action\n");
- return ret;
- }
-
sc27xx_data->dev = dev;
ret = sc27xx_adc_enable(sc27xx_data);
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index 592b97c464da..0ad536494e8f 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -260,7 +260,6 @@ static int spear_adc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct spear_adc_state *st;
- struct resource *res;
struct iio_dev *indio_dev = NULL;
int ret = -ENODEV;
int irq;
@@ -279,8 +278,7 @@ static int spear_adc_probe(struct platform_device *pdev)
* (e.g. SPEAr3xx). Let's provide two register base addresses
* to support multi-arch kernels.
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- st->adc_base_spear6xx = devm_ioremap_resource(&pdev->dev, res);
+ st->adc_base_spear6xx = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(st->adc_base_spear6xx))
return PTR_ERR(st->adc_base_spear6xx);
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 93a096a91f8c..6537f4f776c5 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -38,12 +38,12 @@
#define HAS_ANASWVDD BIT(1)
/**
- * stm32_adc_common_regs - stm32 common registers, compatible dependent data
+ * struct stm32_adc_common_regs - stm32 common registers
* @csr: common status register offset
* @ccr: common control register offset
- * @eoc1: adc1 end of conversion flag in @csr
- * @eoc2: adc2 end of conversion flag in @csr
- * @eoc3: adc3 end of conversion flag in @csr
+ * @eoc1_msk: adc1 end of conversion flag in @csr
+ * @eoc2_msk: adc2 end of conversion flag in @csr
+ * @eoc3_msk: adc3 end of conversion flag in @csr
* @ier: interrupt enable register offset for each adc
* @eocie_msk: end of conversion interrupt enable mask in @ier
*/
@@ -60,7 +60,7 @@ struct stm32_adc_common_regs {
struct stm32_adc_priv;
/**
- * stm32_adc_priv_cfg - stm32 core compatible configuration data
+ * struct stm32_adc_priv_cfg - stm32 core compatible configuration data
* @regs: common registers for all instances
* @clk_sel: clock selection routine
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
@@ -79,6 +79,7 @@ struct stm32_adc_priv_cfg {
* @domain: irq domain reference
* @aclk: clock reference for the analog circuitry
* @bclk: bus clock common for all ADCs, depends on part used
+ * @max_clk_rate: desired maximum clock rate
* @booster: booster supply reference
* @vdd: vdd supply reference
* @vdda: vdda analog supply reference
@@ -95,6 +96,7 @@ struct stm32_adc_priv {
struct irq_domain *domain;
struct clk *aclk;
struct clk *bclk;
+ u32 max_clk_rate;
struct regulator *booster;
struct regulator *vdd;
struct regulator *vdda;
@@ -117,6 +119,7 @@ static int stm32f4_pclk_div[] = {2, 4, 6, 8};
/**
* stm32f4_adc_clk_sel() - Select stm32f4 ADC common clock prescaler
+ * @pdev: platform device
* @priv: stm32 ADC core private data
* Select clock prescaler used for analog conversions, before using ADC.
*/
@@ -140,7 +143,7 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
}
for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
- if ((rate / stm32f4_pclk_div[i]) <= priv->cfg->max_clk_rate_hz)
+ if ((rate / stm32f4_pclk_div[i]) <= priv->max_clk_rate)
break;
}
if (i >= ARRAY_SIZE(stm32f4_pclk_div)) {
@@ -229,7 +232,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
if (ckmode)
continue;
- if ((rate / div) <= priv->cfg->max_clk_rate_hz)
+ if ((rate / div) <= priv->max_clk_rate)
goto out;
}
}
@@ -249,7 +252,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
if (!ckmode)
continue;
- if ((rate / div) <= priv->cfg->max_clk_rate_hz)
+ if ((rate / div) <= priv->max_clk_rate)
goto out;
}
@@ -654,6 +657,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct resource *res;
+ u32 max_rate;
int ret;
if (!pdev->dev.of_node)
@@ -730,6 +734,13 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->common.vref_mv = ret / 1000;
dev_dbg(&pdev->dev, "vref+=%dmV\n", priv->common.vref_mv);
+ ret = of_property_read_u32(pdev->dev.of_node, "st,max-clk-rate-hz",
+ &max_rate);
+ if (!ret)
+ priv->max_clk_rate = min(max_rate, priv->cfg->max_clk_rate_hz);
+ else
+ priv->max_clk_rate = priv->cfg->max_clk_rate_hz;
+
ret = priv->cfg->clk_sel(pdev, priv);
if (ret < 0)
goto err_hw_stop;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 73aee5949b6b..3b291d72701c 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -102,7 +102,7 @@ struct stm32_adc_calib {
};
/**
- * stm32_adc_regs - stm32 ADC misc registers & bitfield desc
+ * struct stm32_adc_regs - stm32 ADC misc registers & bitfield desc
* @reg: register offset
* @mask: bitfield mask
* @shift: left shift
@@ -114,7 +114,7 @@ struct stm32_adc_regs {
};
/**
- * stm32_adc_regspec - stm32 registers definition, compatible dependent data
+ * struct stm32_adc_regspec - stm32 registers definition
* @dr: data register offset
* @ier_eoc: interrupt enable register & eocie bitfield
* @isr_eoc: interrupt status register & eoc bitfield
@@ -140,7 +140,7 @@ struct stm32_adc_regspec {
struct stm32_adc;
/**
- * stm32_adc_cfg - stm32 compatible configuration data
+ * struct stm32_adc_cfg - stm32 compatible configuration data
* @regs: registers descriptions
* @adc_info: per instance input channels definitions
* @trigs: external trigger sources
@@ -183,8 +183,8 @@ struct stm32_adc_cfg {
* @rx_buf: dma rx buffer cpu address
* @rx_dma_buf: dma rx buffer bus address
* @rx_buf_sz: dma rx buffer size
- * @difsel bitmask to set single-ended/differential channel
- * @pcsel bitmask to preselect channels on some devices
+ * @difsel: bitmask to set single-ended/differential channel
+ * @pcsel: bitmask to preselect channels on some devices
* @smpr_val: sampling time settings (e.g. smpr1 / smpr2)
* @cal: optional calibration data on some devices
* @chan_name: channel name array
@@ -254,7 +254,7 @@ static const struct stm32_adc_info stm32h7_adc_info = {
.num_res = ARRAY_SIZE(stm32h7_adc_resolutions),
};
-/**
+/*
* stm32f4_sq - describe regular sequence registers
* - L: sequence len (register & bit field)
* - SQ1..SQ16: sequence entries (register & bit field)
@@ -301,7 +301,7 @@ static struct stm32_adc_trig_info stm32f4_adc_trigs[] = {
{}, /* sentinel */
};
-/**
+/*
* stm32f4_smp_bits[] - describe sampling time register index & bit fields
* Sorted so it can be indexed by channel number.
*/
@@ -392,7 +392,7 @@ static struct stm32_adc_trig_info stm32h7_adc_trigs[] = {
{},
};
-/**
+/*
* stm32h7_smp_bits - describe sampling time register index & bit fields
* Sorted so it can be indexed by channel number.
*/
@@ -994,6 +994,7 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
/**
* stm32_adc_get_trig_extsel() - Get external trigger selection
+ * @indio_dev: IIO device structure
* @trig: trigger
*
* Returns trigger extsel value, if trig matches, -EINVAL otherwise.
@@ -1297,6 +1298,10 @@ static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
/**
* stm32_adc_debugfs_reg_access - read or write register value
+ * @indio_dev: IIO device structure
+ * @reg: register offset
+ * @writeval: value to write
+ * @readval: value to read
*
* To read a value from an ADC register:
* echo [ADC reg offset] > direct_reg_access
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index bd72727fc417..0f88048ea48f 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -175,7 +175,7 @@ static int stmpe_read_raw(struct iio_dev *indio_dev,
static irqreturn_t stmpe_adc_isr(int irq, void *dev_id)
{
struct stmpe_adc *info = (struct stmpe_adc *)dev_id;
- u16 data;
+ __be16 data;
if (info->channel <= STMPE_ADC_LAST_NR) {
int int_sta;
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 55c5119fe575..472b08f37fea 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -495,7 +495,7 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
ret = twl4030_madc_disable_irq(madc, i);
if (ret < 0)
dev_dbg(madc->dev, "Disable interrupt failed %d\n", i);
- madc->requests[i].result_pending = 1;
+ madc->requests[i].result_pending = true;
}
for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
r = &madc->requests[i];
@@ -507,8 +507,8 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
len = twl4030_madc_read_channels(madc, method->rbase,
r->channels, r->rbuf, r->raw);
/* Free request */
- r->result_pending = 0;
- r->active = 0;
+ r->result_pending = false;
+ r->active = false;
}
mutex_unlock(&madc->lock);
@@ -521,15 +521,15 @@ err_i2c:
*/
for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
r = &madc->requests[i];
- if (r->active == 0)
+ if (!r->active)
continue;
method = &twl4030_conversion_methods[r->method];
/* Read results */
len = twl4030_madc_read_channels(madc, method->rbase,
r->channels, r->rbuf, r->raw);
/* Free request */
- r->result_pending = 0;
- r->active = 0;
+ r->result_pending = false;
+ r->active = false;
}
mutex_unlock(&madc->lock);
@@ -652,16 +652,16 @@ static int twl4030_madc_conversion(struct twl4030_madc_request *req)
ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
if (ret < 0)
goto out;
- twl4030_madc->requests[req->method].active = 1;
+ twl4030_madc->requests[req->method].active = true;
/* Wait until conversion is ready (ctrl register returns EOC) */
ret = twl4030_madc_wait_conversion_ready(twl4030_madc, 5, method->ctrl);
if (ret) {
- twl4030_madc->requests[req->method].active = 0;
+ twl4030_madc->requests[req->method].active = false;
goto out;
}
ret = twl4030_madc_read_channels(twl4030_madc, method->rbase,
req->channels, req->rbuf, req->raw);
- twl4030_madc->requests[req->method].active = 0;
+ twl4030_madc->requests[req->method].active = false;
out:
mutex_unlock(&twl4030_madc->lock);
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 98b30475bbc6..cb7380bf07ca 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -802,7 +802,6 @@ static int vf610_adc_probe(struct platform_device *pdev)
{
struct vf610_adc *info;
struct iio_dev *indio_dev;
- struct resource *mem;
int irq;
int ret;
@@ -815,8 +814,7 @@ static int vf610_adc_probe(struct platform_device *pdev)
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 4fd389678dba..ec227b358cd6 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1150,7 +1150,6 @@ static int xadc_probe(struct platform_device *pdev)
const struct of_device_id *id;
struct iio_dev *indio_dev;
unsigned int bipolar_mask;
- struct resource *mem;
unsigned int conf0;
struct xadc *xadc;
int ret;
@@ -1180,8 +1179,7 @@ static int xadc_probe(struct platform_device *pdev)
spin_lock_init(&xadc->lock);
INIT_DELAYED_WORK(&xadc->zynq_unmask_work, xadc_zynq_unmask_worker);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xadc->base = devm_ioremap_resource(&pdev->dev, mem);
+ xadc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xadc->base))
return PTR_ERR(xadc->base);
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index 3a20cb5d9bff..6c175eb1c7a7 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -323,16 +323,16 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev)
struct atlas_data *data = iio_priv(indio_dev);
int ret;
- ret = iio_triggered_buffer_predisable(indio_dev);
+ ret = atlas_set_interrupt(data, false);
if (ret)
return ret;
- ret = atlas_set_interrupt(data, false);
+ pm_runtime_mark_last_busy(&data->client->dev);
+ ret = pm_runtime_put_autosuspend(&data->client->dev);
if (ret)
return ret;
- pm_runtime_mark_last_busy(&data->client->dev);
- return pm_runtime_put_autosuspend(&data->client->dev);
+ return iio_triggered_buffer_predisable(indio_dev);
}
static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
index 8cc8fe5e356d..403e8803471a 100644
--- a/drivers/iio/chemical/sgp30.c
+++ b/drivers/iio/chemical/sgp30.c
@@ -483,7 +483,7 @@ static void sgp_init(struct sgp_data *data)
data->iaq_defval_skip_jiffies =
43 * data->measure_interval_jiffies;
break;
- };
+ }
}
static const struct iio_info sgp_info = {
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index edbb956e81e8..acb9f8ecbb3d 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -117,7 +117,7 @@ static int sps30_do_cmd(struct sps30_state *state, u16 cmd, u8 *data, int size)
break;
case SPS30_READ_AUTO_CLEANING_PERIOD:
buf[0] = SPS30_AUTO_CLEANING_PERIOD >> 8;
- buf[1] = (u8)SPS30_AUTO_CLEANING_PERIOD;
+ buf[1] = (u8)(SPS30_AUTO_CLEANING_PERIOD & 0xff);
/* fall through */
case SPS30_READ_DATA_READY_FLAG:
case SPS30_READ_DATA:
diff --git a/drivers/iio/common/cros_ec_sensors/Kconfig b/drivers/iio/common/cros_ec_sensors/Kconfig
index cdbb29cfb907..fefad9572790 100644
--- a/drivers/iio/common/cros_ec_sensors/Kconfig
+++ b/drivers/iio/common/cros_ec_sensors/Kconfig
@@ -4,7 +4,7 @@
#
config IIO_CROS_EC_SENSORS_CORE
tristate "ChromeOS EC Sensors Core"
- depends on SYSFS && CROS_EC
+ depends on SYSFS && CROS_EC_SENSORHUB
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index a6987726eeb8..7dce04473467 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -222,17 +222,11 @@ static const struct iio_info ec_sensors_info = {
static int cros_ec_sensors_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
struct iio_dev *indio_dev;
struct cros_ec_sensors_state *state;
struct iio_chan_spec *channel;
int ret, i;
- if (!ec_dev || !ec_dev->ec_dev) {
- dev_warn(&pdev->dev, "No CROS EC device found.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index d2609e6feda4..81a7f692de2f 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
static char *cros_ec_loc[] = {
@@ -88,7 +89,8 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
- struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_sensorhub *sensor_hub = dev_get_drvdata(dev->parent);
+ struct cros_ec_dev *ec = sensor_hub->ec;
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
u32 ver_mask;
int ret, i;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index cc42219a64f7..979070196da9 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -60,8 +60,8 @@ config AD5446
help
Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
- AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
- AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
+ AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5602, AD5611,
+ AD5612, AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101.
To compile this driver as a module, choose M here: the
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 7df8b4cc295d..61c670f7fc5f 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -327,6 +327,7 @@ enum ad5446_supported_spi_device_ids {
ID_AD5541A,
ID_AD5512A,
ID_AD5553,
+ ID_AD5600,
ID_AD5601,
ID_AD5611,
ID_AD5621,
@@ -381,6 +382,10 @@ static const struct ad5446_chip_info ad5446_spi_chip_info[] = {
.channel = AD5446_CHANNEL(14, 16, 0),
.write = ad5446_write,
},
+ [ID_AD5600] = {
+ .channel = AD5446_CHANNEL(16, 16, 0),
+ .write = ad5446_write,
+ },
[ID_AD5601] = {
.channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6),
.write = ad5446_write,
@@ -448,6 +453,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
{"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */
{"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */
{"ad5553", ID_AD5553},
+ {"ad5600", ID_AD5600},
{"ad5601", ID_AD5601},
{"ad5611", ID_AD5611},
{"ad5621", ID_AD5621},
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 8de9f40226e6..14bbac6bee98 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -41,6 +41,7 @@ struct ad7303_state {
struct regulator *vdd_reg;
struct regulator *vref_reg;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
@@ -79,7 +80,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (pwr_down)
st->config |= AD7303_CFG_POWER_DOWN(chan->channel);
@@ -90,7 +91,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
* mode, so just write one of the DAC channels again */
ad7303_write(st, chan->channel, st->dac_cache[chan->channel]);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return len;
}
@@ -116,7 +117,9 @@ static int ad7303_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
*val = st->dac_cache[chan->channel];
+ mutex_unlock(&st->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
vref_uv = ad7303_get_vref(st, chan);
@@ -144,11 +147,11 @@ static int ad7303_write_raw(struct iio_dev *indio_dev,
if (val >= (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad7303_write(st, chan->address, val);
if (ret == 0)
st->dac_cache[chan->channel] = val;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -211,6 +214,8 @@ static int ad7303_probe(struct spi_device *spi)
st->spi = spi;
+ mutex_init(&st->lock);
+
st->vdd_reg = devm_regulator_get(&spi->dev, "Vdd");
if (IS_ERR(st->vdd_reg))
return PTR_ERR(st->vdd_reg);
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 883e84e96609..0ab357bd3633 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -106,7 +106,6 @@ static int lpc18xx_dac_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct lpc18xx_dac *dac;
- struct resource *res;
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*dac));
@@ -117,8 +116,7 @@ static int lpc18xx_dac_probe(struct platform_device *pdev)
dac = iio_priv(indio_dev);
mutex_init(&dac->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dac->base = devm_ioremap_resource(&pdev->dev, res);
+ dac->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dac->base))
return PTR_ERR(dac->base);
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index d0fb3124de07..9e6b4cd0a5cc 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -50,6 +51,41 @@ static const struct regmap_config stm32_dac_regmap_cfg = {
.max_register = 0x3fc,
};
+static int stm32_dac_core_hw_start(struct device *dev)
+{
+ struct stm32_dac_common *common = dev_get_drvdata(dev);
+ struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+ int ret;
+
+ ret = regulator_enable(priv->vref);
+ if (ret < 0) {
+ dev_err(dev, "vref enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret < 0) {
+ dev_err(dev, "pclk enable failed: %d\n", ret);
+ goto err_regulator_disable;
+ }
+
+ return 0;
+
+err_regulator_disable:
+ regulator_disable(priv->vref);
+
+ return ret;
+}
+
+static void stm32_dac_core_hw_stop(struct device *dev)
+{
+ struct stm32_dac_common *common = dev_get_drvdata(dev);
+ struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+
+ clk_disable_unprepare(priv->pclk);
+ regulator_disable(priv->vref);
+}
+
static int stm32_dac_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -66,6 +102,8 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ platform_set_drvdata(pdev, &priv->common);
+
cfg = (const struct stm32_dac_cfg *)
of_match_device(dev->driver->of_match_table, dev)->data;
@@ -74,11 +112,19 @@ static int stm32_dac_probe(struct platform_device *pdev)
if (IS_ERR(mmio))
return PTR_ERR(mmio);
- regmap = devm_regmap_init_mmio(dev, mmio, &stm32_dac_regmap_cfg);
+ regmap = devm_regmap_init_mmio_clk(dev, "pclk", mmio,
+ &stm32_dac_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
priv->common.regmap = regmap;
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ dev_err(dev, "pclk get failed\n");
+ return ret;
+ }
+
priv->vref = devm_regulator_get(dev, "vref");
if (IS_ERR(priv->vref)) {
ret = PTR_ERR(priv->vref);
@@ -86,33 +132,22 @@ static int stm32_dac_probe(struct platform_device *pdev)
return ret;
}
- ret = regulator_enable(priv->vref);
- if (ret < 0) {
- dev_err(dev, "vref enable failed\n");
- return ret;
- }
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = stm32_dac_core_hw_start(dev);
+ if (ret)
+ goto err_pm_stop;
ret = regulator_get_voltage(priv->vref);
if (ret < 0) {
dev_err(dev, "vref get voltage failed, %d\n", ret);
- goto err_vref;
+ goto err_hw_stop;
}
priv->common.vref_mv = ret / 1000;
dev_dbg(dev, "vref+=%dmV\n", priv->common.vref_mv);
- priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk)) {
- ret = PTR_ERR(priv->pclk);
- dev_err(dev, "pclk get failed\n");
- goto err_vref;
- }
-
- ret = clk_prepare_enable(priv->pclk);
- if (ret < 0) {
- dev_err(dev, "pclk enable failed\n");
- goto err_vref;
- }
-
priv->rst = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(priv->rst)) {
reset_control_assert(priv->rst);
@@ -128,39 +163,79 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv->common.hfsel ?
STM32H7_DAC_CR_HFSEL : 0);
if (ret)
- goto err_pclk;
+ goto err_hw_stop;
}
- platform_set_drvdata(pdev, &priv->common);
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, dev);
if (ret < 0) {
dev_err(dev, "failed to populate DT children\n");
- goto err_pclk;
+ goto err_hw_stop;
}
+ pm_runtime_put(dev);
+
return 0;
-err_pclk:
- clk_disable_unprepare(priv->pclk);
-err_vref:
- regulator_disable(priv->vref);
+err_hw_stop:
+ stm32_dac_core_hw_stop(dev);
+err_pm_stop:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
static int stm32_dac_remove(struct platform_device *pdev)
{
- struct stm32_dac_common *common = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+ of_platform_depopulate(&pdev->dev);
+ stm32_dac_core_hw_stop(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_dac_core_resume(struct device *dev)
+{
+ struct stm32_dac_common *common = dev_get_drvdata(dev);
struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+ int ret;
- of_platform_depopulate(&pdev->dev);
- clk_disable_unprepare(priv->pclk);
- regulator_disable(priv->vref);
+ if (priv->common.hfsel) {
+ /* restore hfsel (maybe lost under low power state) */
+ ret = regmap_update_bits(priv->common.regmap, STM32_DAC_CR,
+ STM32H7_DAC_CR_HFSEL,
+ STM32H7_DAC_CR_HFSEL);
+ if (ret)
+ return ret;
+ }
+
+ return pm_runtime_force_resume(dev);
+}
+
+static int __maybe_unused stm32_dac_core_runtime_suspend(struct device *dev)
+{
+ stm32_dac_core_hw_stop(dev);
return 0;
}
+static int __maybe_unused stm32_dac_core_runtime_resume(struct device *dev)
+{
+ return stm32_dac_core_hw_start(dev);
+}
+
+static const struct dev_pm_ops stm32_dac_core_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, stm32_dac_core_resume)
+ SET_RUNTIME_PM_OPS(stm32_dac_core_runtime_suspend,
+ stm32_dac_core_runtime_resume,
+ NULL)
+};
+
static const struct stm32_dac_cfg stm32h7_dac_cfg = {
.has_hfsel = true,
};
@@ -182,6 +257,7 @@ static struct platform_driver stm32_dac_driver = {
.driver = {
.name = "stm32-dac-core",
.of_match_table = stm32_dac_of_match,
+ .pm = &stm32_dac_core_pm_ops,
},
};
module_platform_driver(stm32_dac_driver);
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index cce26a3a6627..f22c1d9129b2 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "stm32-dac-core.h"
@@ -20,6 +21,8 @@
#define STM32_DAC_CHANNEL_2 2
#define STM32_DAC_IS_CHAN_1(ch) ((ch) & STM32_DAC_CHANNEL_1)
+#define STM32_DAC_AUTO_SUSPEND_DELAY_MS 2000
+
/**
* struct stm32_dac - private data of DAC driver
* @common: reference to DAC common data
@@ -49,15 +52,34 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
bool enable)
{
struct stm32_dac *dac = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
u32 msk = STM32_DAC_IS_CHAN_1(ch) ? STM32_DAC_CR_EN1 : STM32_DAC_CR_EN2;
u32 en = enable ? msk : 0;
int ret;
+ /* already enabled / disabled ? */
+ mutex_lock(&indio_dev->mlock);
+ ret = stm32_dac_is_enabled(indio_dev, ch);
+ if (ret < 0 || enable == !!ret) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ }
+
ret = regmap_update_bits(dac->common->regmap, STM32_DAC_CR, msk, en);
+ mutex_unlock(&indio_dev->mlock);
if (ret < 0) {
dev_err(&indio_dev->dev, "%s failed\n", en ?
"Enable" : "Disable");
- return ret;
+ goto err_put_pm;
}
/*
@@ -68,7 +90,20 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
if (en && dac->common->hfsel)
udelay(1);
+ if (!enable) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+
return 0;
+
+err_put_pm:
+ if (enable) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
}
static int stm32_dac_get_value(struct stm32_dac *dac, int channel, int *val)
@@ -272,6 +307,7 @@ static int stm32_dac_chan_of_init(struct iio_dev *indio_dev)
static int stm32_dac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct stm32_dac *dac;
int ret;
@@ -296,9 +332,61 @@ static int stm32_dac_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- return devm_iio_device_register(&pdev->dev, indio_dev);
+ /* Get stm32-dac-core PM online */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, STM32_DAC_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_pm_put;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
+ return ret;
}
+static int stm32_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+ iio_device_unregister(indio_dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_dac_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int channel = indio_dev->channels[0].channel;
+ int ret;
+
+ /* Ensure DAC is disabled before suspend */
+ ret = stm32_dac_is_enabled(indio_dev, channel);
+ if (ret)
+ return ret < 0 ? ret : -EBUSY;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static const struct dev_pm_ops stm32_dac_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dac_suspend, pm_runtime_force_resume)
+};
+
static const struct of_device_id stm32_dac_of_match[] = {
{ .compatible = "st,stm32-dac", },
{},
@@ -307,9 +395,11 @@ MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
static struct platform_driver stm32_dac_driver = {
.probe = stm32_dac_probe,
+ .remove = stm32_dac_remove,
.driver = {
.name = "stm32-dac",
.of_match_table = stm32_dac_of_match,
+ .pm = &stm32_dac_pm_ops,
},
};
module_platform_driver(stm32_dac_driver);
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 0ec4d2609ef9..71f8a5c471c4 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -172,7 +172,6 @@ static int vf610_dac_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct vf610_dac *info;
- struct resource *mem;
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev,
@@ -185,8 +184,7 @@ static int vf610_dac_probe(struct platform_device *pdev)
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index 236220d6de02..1b84b8e112fe 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -38,10 +38,12 @@ struct adis16080_chip_info {
* @us: actual spi_device to write data
* @info: chip specific parameters
* @buf: transmit or receive buffer
+ * @lock lock to protect buffer during reads
**/
struct adis16080_state {
struct spi_device *us;
const struct adis16080_chip_info *info;
+ struct mutex lock;
__be16 buf ____cacheline_aligned;
};
@@ -82,9 +84,9 @@ static int adis16080_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = adis16080_read_sample(indio_dev, chan->address, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
@@ -196,6 +198,8 @@ static int adis16080_probe(struct spi_device *spi)
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
+ mutex_init(&st->lock);
+
/* Allocate the comms buffers */
st->us = spi;
st->info = &adis16080_chip_info[id->driver_data];
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index de3f66f89496..79e63c8a2ea8 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -76,9 +76,7 @@ static int adis16130_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
/* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
ret = adis16130_spi_read(indio_dev, chan->address, &temp);
- mutex_unlock(&indio_dev->mlock);
if (ret)
return ret;
*val = temp;
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index 5bec7ad53d8b..d637d52d051a 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -80,19 +80,19 @@ static ssize_t adis16136_show_serial(struct file *file,
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SERIAL_NUM,
&serial);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT1, &lot1);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT2, &lot2);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT3, &lot3);
- if (ret < 0)
+ if (ret)
return ret;
len = snprintf(buf, sizeof(buf), "%.4x%.4x%.4x-%.4x\n", lot1, lot2,
@@ -116,7 +116,7 @@ static int adis16136_show_product_id(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_PROD_ID,
&prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -134,7 +134,7 @@ static int adis16136_show_flash_count(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_FLASH_CNT,
&flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -191,7 +191,7 @@ static int adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
int ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
- if (ret < 0)
+ if (ret)
return ret;
*freq = 32768 / (t + 1);
@@ -228,7 +228,7 @@ static ssize_t adis16136_read_frequency(struct device *dev,
int ret;
ret = adis16136_get_freq(adis16136, &freq);
- if (ret < 0)
+ if (ret)
return ret;
return sprintf(buf, "%d\n", freq);
@@ -256,7 +256,7 @@ static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
int i, ret;
ret = adis16136_get_freq(adis16136, &freq);
- if (ret < 0)
+ if (ret)
return ret;
for (i = ARRAY_SIZE(adis16136_3db_divisors) - 1; i >= 1; i--) {
@@ -277,11 +277,11 @@ static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
mutex_lock(&indio_dev->mlock);
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, &val16);
- if (ret < 0)
+ if (ret)
goto err_unlock;
ret = adis16136_get_freq(adis16136, &freq);
- if (ret < 0)
+ if (ret)
goto err_unlock;
*val = freq / adis16136_3db_divisors[val16 & 0x07];
@@ -318,7 +318,7 @@ static int adis16136_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_CALIBBIAS:
ret = adis_read_reg_32(&adis16136->adis,
ADIS16136_REG_GYRO_OFF2, &val32);
- if (ret < 0)
+ if (ret)
return ret;
*val = sign_extend32(val32, 31);
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 998fb8d66fe3..981ae2291505 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -154,7 +154,7 @@ static int itg3200_write_raw(struct iio_dev *indio_dev,
t);
mutex_unlock(&indio_dev->mlock);
- return ret;
+ return ret;
default:
return -EINVAL;
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 80154bca18b6..8e908a749f95 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -543,7 +543,7 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
toread = bytes_per_datum;
offset = 1;
/* Put in some dummy value */
- fifo_values[0] = 0xAAAA;
+ fifo_values[0] = cpu_to_be16(0xAAAA);
}
ret = regmap_bulk_read(mpu3050->map,
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index c0acbb5d2ffb..57be68b291fa 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index bfe1cdb16846..963ff043eecf 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -278,31 +278,34 @@ static int hdc100x_buffer_postenable(struct iio_dev *indio_dev)
struct hdc100x_data *data = iio_priv(indio_dev);
int ret;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
/* Buffer is enabled. First set ACQ Mode, then attach poll func */
mutex_lock(&data->lock);
ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
HDC100X_REG_CONFIG_ACQ_MODE);
mutex_unlock(&data->lock);
if (ret)
- return ret;
+ iio_triggered_buffer_predisable(indio_dev);
- return iio_triggered_buffer_postenable(indio_dev);
+ return ret;
}
static int hdc100x_buffer_predisable(struct iio_dev *indio_dev)
{
struct hdc100x_data *data = iio_priv(indio_dev);
- int ret;
-
- /* First detach poll func, then reset ACQ mode. OK to disable buffer */
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret)
- return ret;
+ int ret, ret2;
mutex_lock(&data->lock);
ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
mutex_unlock(&data->lock);
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret2;
+
return ret;
}
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index f3c7282321a8..60bb1029e759 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -40,6 +40,33 @@ config ADIS16480
source "drivers/iio/imu/bmi160/Kconfig"
+config FXOS8700
+ tristate
+
+config FXOS8700_I2C
+ tristate "NXP FXOS8700 I2C driver"
+ depends on I2C
+ select FXOS8700
+ select REGMAP_I2C
+ help
+ Say yes here to build support for the NXP FXOS8700 m+g combo
+ sensor on I2C.
+
+ This driver can also be built as a module. If so, the module will be
+ called fxos8700_i2c.
+
+config FXOS8700_SPI
+ tristate "NXP FXOS8700 SPI driver"
+ depends on SPI
+ select FXOS8700
+ select REGMAP_SPI
+ help
+ Say yes here to build support for the NXP FXOS8700 m+g combo
+ sensor on SPI.
+
+ This driver can also be built as a module. If so, the module will be
+ called fxos8700_spi.
+
config KMX61
tristate "Kionix KMX61 6-axis accelerometer and magnetometer"
depends on I2C
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 4a6958865504..5237fd4bc384 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -14,6 +14,11 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
obj-y += bmi160/
+
+obj-$(CONFIG_FXOS8700) += fxos8700_core.o
+obj-$(CONFIG_FXOS8700_I2C) += fxos8700_i2c.o
+obj-$(CONFIG_FXOS8700_SPI) += fxos8700_spi.o
+
obj-y += inv_mpu6050/
obj-$(CONFIG_KMX61) += kmx61.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 2cd2cc2316c6..e14c8536fd09 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -229,7 +229,8 @@ int adis_debugfs_reg_access(struct iio_dev *indio_dev,
int ret;
ret = adis_read_reg_16(adis, reg, &val16);
- *readval = val16;
+ if (ret == 0)
+ *readval = val16;
return ret;
} else {
@@ -286,7 +287,7 @@ int adis_check_status(struct adis *adis)
int i;
ret = adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
- if (ret < 0)
+ if (ret)
return ret;
status &= adis->data->status_error_mask;
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 0575ff706bd4..44e46dc96e00 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -217,16 +217,16 @@ static ssize_t adis16400_show_serial_number(struct file *file,
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID1, &lot1);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID2, &lot2);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&st->adis, ADIS16334_SERIAL_NUMBER,
&serial_number);
- if (ret < 0)
+ if (ret)
return ret;
len = snprintf(buf, sizeof(buf), "%.4x-%.4x-%.4x\n", lot1, lot2,
@@ -249,7 +249,7 @@ static int adis16400_show_product_id(void *arg, u64 *val)
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16400_PRODUCT_ID, &prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -266,7 +266,7 @@ static int adis16400_show_flash_count(void *arg, u64 *val)
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16400_FLASH_CNT, &flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -327,7 +327,7 @@ static int adis16334_get_freq(struct adis16400_state *st)
uint16_t t;
ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
- if (ret < 0)
+ if (ret)
return ret;
t >>= ADIS16334_RATE_DIV_SHIFT;
@@ -359,7 +359,7 @@ static int adis16400_get_freq(struct adis16400_state *st)
uint16_t t;
ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
- if (ret < 0)
+ if (ret)
return ret;
sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 52851 : 1638404;
@@ -416,7 +416,7 @@ static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
}
ret = adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
@@ -615,7 +615,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
ret = adis_read_reg_16(&st->adis,
ADIS16400_SENS_AVG,
&val16);
- if (ret < 0) {
+ if (ret) {
mutex_unlock(&indio_dev->mlock);
return ret;
}
@@ -626,12 +626,12 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val2 = (ret % 1000) * 1000;
}
mutex_unlock(&indio_dev->mlock);
- if (ret < 0)
+ if (ret)
return ret;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = st->variant->get_freq(st);
- if (ret < 0)
+ if (ret)
return ret;
*val = ret / 1000;
*val2 = (ret % 1000) * 1000;
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 6aed9e84abbf..b55812521537 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -80,7 +80,7 @@ static int adis16460_show_serial_number(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_SERIAL_NUM,
&serial);
- if (ret < 0)
+ if (ret)
return ret;
*val = serial;
@@ -98,7 +98,7 @@ static int adis16460_show_product_id(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_PROD_ID,
&prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -116,7 +116,7 @@ static int adis16460_show_flash_count(void *arg, u64 *val)
ret = adis_read_reg_32(&adis16460->adis, ADIS16460_REG_FLASH_CNT,
&flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -176,7 +176,7 @@ static int adis16460_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
unsigned int freq;
ret = adis_read_reg_16(&st->adis, ADIS16460_REG_DEC_RATE, &t);
- if (ret < 0)
+ if (ret)
return ret;
freq = 2048000 / (t + 1);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 8743b2f376e2..748f8bbf184d 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -181,7 +181,7 @@ static ssize_t adis16480_show_firmware_revision(struct file *file,
int ret;
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_REV, &rev);
- if (ret < 0)
+ if (ret)
return ret;
len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
@@ -206,11 +206,11 @@ static ssize_t adis16480_show_firmware_date(struct file *file,
int ret;
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_Y, &year);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_DM, &md);
- if (ret < 0)
+ if (ret)
return ret;
len = snprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n",
@@ -234,7 +234,7 @@ static int adis16480_show_serial_number(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_SERIAL_NUM,
&serial);
- if (ret < 0)
+ if (ret)
return ret;
*val = serial;
@@ -252,7 +252,7 @@ static int adis16480_show_product_id(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_PROD_ID,
&prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -270,7 +270,7 @@ static int adis16480_show_flash_count(void *arg, u64 *val)
ret = adis_read_reg_32(&adis16480->adis, ADIS16480_REG_FLASH_CNT,
&flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -353,7 +353,7 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
struct adis16480 *st = iio_priv(indio_dev);
uint16_t t;
int ret;
- unsigned freq;
+ unsigned int freq;
unsigned int reg;
if (st->clk_mode == ADIS16480_CLK_PPS)
@@ -362,7 +362,7 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
reg = ADIS16480_REG_DEC_RATE;
ret = adis_read_reg_16(&st->adis, reg, &t);
- if (ret < 0)
+ if (ret)
return ret;
/*
@@ -454,18 +454,20 @@ static int adis16480_get_calibbias(struct iio_dev *indio_dev,
case IIO_MAGN:
case IIO_PRESSURE:
ret = adis_read_reg_16(&st->adis, reg, &val16);
- *bias = sign_extend32(val16, 15);
+ if (ret == 0)
+ *bias = sign_extend32(val16, 15);
break;
case IIO_ANGL_VEL:
case IIO_ACCEL:
ret = adis_read_reg_32(&st->adis, reg, &val32);
- *bias = sign_extend32(val32, 31);
+ if (ret == 0)
+ *bias = sign_extend32(val32, 31);
break;
default:
- ret = -EINVAL;
+ ret = -EINVAL;
}
- if (ret < 0)
+ if (ret)
return ret;
return IIO_VAL_INT;
@@ -492,7 +494,7 @@ static int adis16480_get_calibscale(struct iio_dev *indio_dev,
int ret;
ret = adis_read_reg_16(&st->adis, reg, &val16);
- if (ret < 0)
+ if (ret)
return ret;
*scale = sign_extend32(val16, 15);
@@ -538,7 +540,7 @@ static int adis16480_get_filter_freq(struct iio_dev *indio_dev,
enable_mask = BIT(offset + 2);
ret = adis_read_reg_16(&st->adis, reg, &val);
- if (ret < 0)
+ if (ret)
return ret;
if (!(val & enable_mask))
@@ -564,7 +566,7 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
enable_mask = BIT(offset + 2);
ret = adis_read_reg_16(&st->adis, reg, &val);
- if (ret < 0)
+ if (ret)
return ret;
if (freq == 0) {
@@ -623,9 +625,13 @@ static int adis16480_read_raw(struct iio_dev *indio_dev,
*val2 = (st->chip_info->temp_scale % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_PRESSURE:
- *val = 0;
- *val2 = 4000; /* 40ubar = 0.004 kPa */
- return IIO_VAL_INT_PLUS_MICRO;
+ /*
+ * max scale is 1310 mbar
+ * max raw value is 32767 shifted for 32bits
+ */
+ *val = 131; /* 1310mbar = 131 kPa */
+ *val2 = 32767 << 16;
+ return IIO_VAL_FRACTIONAL;
default:
return -EINVAL;
}
@@ -786,13 +792,14 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
/*
- * storing the value in rad/degree and the scale in degree
- * gives us the result in rad and better precession than
- * storing the scale directly in rad.
+ * Typically we do IIO_RAD_TO_DEGREE in the denominator, which
+ * is exactly the same as IIO_DEGREE_TO_RAD in numerator, since
+ * it gives better approximation. However, in this case we
+ * cannot do it since it would not fit in a 32bit variable.
*/
- .gyro_max_val = IIO_RAD_TO_DEGREE(22887),
- .gyro_max_scale = 300,
- .accel_max_val = IIO_M_S_2_TO_G(21973),
+ .gyro_max_val = 22887 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(300),
+ .accel_max_val = IIO_M_S_2_TO_G(21973 << 16),
.accel_max_scale = 18,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -802,9 +809,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16480] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(12500),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(12500 << 16),
.accel_max_scale = 10,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -814,9 +821,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16485] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(20000),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
.accel_max_scale = 5,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -826,9 +833,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16488] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(22500),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(22500 << 16),
.accel_max_scale = 18,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -838,9 +845,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16495_1] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 125,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(125),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -851,9 +858,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16495_2] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(18000),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 18000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -864,9 +871,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16495_3] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 2000,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(2000),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -877,9 +884,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16497_1] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 125,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(125),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -890,9 +897,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16497_2] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(18000),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 18000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -903,9 +910,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16497_3] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 2000,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(2000),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -919,6 +926,7 @@ static const struct iio_info adis16480_info = {
.read_raw = &adis16480_read_raw,
.write_raw = &adis16480_write_raw,
.update_scan_mode = adis_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
};
static int adis16480_stop_device(struct iio_dev *indio_dev)
@@ -940,7 +948,7 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
int ret;
ret = adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
- if (ret < 0)
+ if (ret)
return ret;
val &= ~ADIS16480_DRDY_EN_MSK;
@@ -1118,7 +1126,7 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, &val);
- if (ret < 0)
+ if (ret)
return ret;
pin = adis16480_of_get_ext_clk_pin(st, of_node);
@@ -1144,7 +1152,7 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
val |= mode;
ret = adis_write_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, val);
- if (ret < 0)
+ if (ret)
return ret;
return clk_prepare_enable(st->ext_clk);
diff --git a/drivers/iio/imu/fxos8700.h b/drivers/iio/imu/fxos8700.h
new file mode 100644
index 000000000000..6dfb8d7099e4
--- /dev/null
+++ b/drivers/iio/imu/fxos8700.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef FXOS8700_H_
+#define FXOS8700_H_
+
+extern const struct regmap_config fxos8700_regmap_config;
+
+int fxos8700_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi);
+
+#endif /* FXOS8700_H_ */
diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c
new file mode 100644
index 000000000000..7b47be44ea59
--- /dev/null
+++ b/drivers/iio/imu/fxos8700_core.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FXOS8700 - NXP IMU (accelerometer plus magnetometer)
+ *
+ * IIO core driver for FXOS8700, with support for I2C/SPI busses
+ *
+ * TODO: Buffer, trigger, and IRQ support
+ */
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include "fxos8700.h"
+
+/* Register Definitions */
+#define FXOS8700_STATUS 0x00
+#define FXOS8700_OUT_X_MSB 0x01
+#define FXOS8700_OUT_X_LSB 0x02
+#define FXOS8700_OUT_Y_MSB 0x03
+#define FXOS8700_OUT_Y_LSB 0x04
+#define FXOS8700_OUT_Z_MSB 0x05
+#define FXOS8700_OUT_Z_LSB 0x06
+#define FXOS8700_F_SETUP 0x09
+#define FXOS8700_TRIG_CFG 0x0a
+#define FXOS8700_SYSMOD 0x0b
+#define FXOS8700_INT_SOURCE 0x0c
+#define FXOS8700_WHO_AM_I 0x0d
+#define FXOS8700_XYZ_DATA_CFG 0x0e
+#define FXOS8700_HP_FILTER_CUTOFF 0x0f
+#define FXOS8700_PL_STATUS 0x10
+#define FXOS8700_PL_CFG 0x11
+#define FXOS8700_PL_COUNT 0x12
+#define FXOS8700_PL_BF_ZCOMP 0x13
+#define FXOS8700_PL_THS_REG 0x14
+#define FXOS8700_A_FFMT_CFG 0x15
+#define FXOS8700_A_FFMT_SRC 0x16
+#define FXOS8700_A_FFMT_THS 0x17
+#define FXOS8700_A_FFMT_COUNT 0x18
+#define FXOS8700_TRANSIENT_CFG 0x1d
+#define FXOS8700_TRANSIENT_SRC 0x1e
+#define FXOS8700_TRANSIENT_THS 0x1f
+#define FXOS8700_TRANSIENT_COUNT 0x20
+#define FXOS8700_PULSE_CFG 0x21
+#define FXOS8700_PULSE_SRC 0x22
+#define FXOS8700_PULSE_THSX 0x23
+#define FXOS8700_PULSE_THSY 0x24
+#define FXOS8700_PULSE_THSZ 0x25
+#define FXOS8700_PULSE_TMLT 0x26
+#define FXOS8700_PULSE_LTCY 0x27
+#define FXOS8700_PULSE_WIND 0x28
+#define FXOS8700_ASLP_COUNT 0x29
+#define FXOS8700_CTRL_REG1 0x2a
+#define FXOS8700_CTRL_REG2 0x2b
+#define FXOS8700_CTRL_REG3 0x2c
+#define FXOS8700_CTRL_REG4 0x2d
+#define FXOS8700_CTRL_REG5 0x2e
+#define FXOS8700_OFF_X 0x2f
+#define FXOS8700_OFF_Y 0x30
+#define FXOS8700_OFF_Z 0x31
+#define FXOS8700_M_DR_STATUS 0x32
+#define FXOS8700_M_OUT_X_MSB 0x33
+#define FXOS8700_M_OUT_X_LSB 0x34
+#define FXOS8700_M_OUT_Y_MSB 0x35
+#define FXOS8700_M_OUT_Y_LSB 0x36
+#define FXOS8700_M_OUT_Z_MSB 0x37
+#define FXOS8700_M_OUT_Z_LSB 0x38
+#define FXOS8700_CMP_X_MSB 0x39
+#define FXOS8700_CMP_X_LSB 0x3a
+#define FXOS8700_CMP_Y_MSB 0x3b
+#define FXOS8700_CMP_Y_LSB 0x3c
+#define FXOS8700_CMP_Z_MSB 0x3d
+#define FXOS8700_CMP_Z_LSB 0x3e
+#define FXOS8700_M_OFF_X_MSB 0x3f
+#define FXOS8700_M_OFF_X_LSB 0x40
+#define FXOS8700_M_OFF_Y_MSB 0x41
+#define FXOS8700_M_OFF_Y_LSB 0x42
+#define FXOS8700_M_OFF_Z_MSB 0x43
+#define FXOS8700_M_OFF_Z_LSB 0x44
+#define FXOS8700_MAX_X_MSB 0x45
+#define FXOS8700_MAX_X_LSB 0x46
+#define FXOS8700_MAX_Y_MSB 0x47
+#define FXOS8700_MAX_Y_LSB 0x48
+#define FXOS8700_MAX_Z_MSB 0x49
+#define FXOS8700_MAX_Z_LSB 0x4a
+#define FXOS8700_MIN_X_MSB 0x4b
+#define FXOS8700_MIN_X_LSB 0x4c
+#define FXOS8700_MIN_Y_MSB 0x4d
+#define FXOS8700_MIN_Y_LSB 0x4e
+#define FXOS8700_MIN_Z_MSB 0x4f
+#define FXOS8700_MIN_Z_LSB 0x50
+#define FXOS8700_TEMP 0x51
+#define FXOS8700_M_THS_CFG 0x52
+#define FXOS8700_M_THS_SRC 0x53
+#define FXOS8700_M_THS_X_MSB 0x54
+#define FXOS8700_M_THS_X_LSB 0x55
+#define FXOS8700_M_THS_Y_MSB 0x56
+#define FXOS8700_M_THS_Y_LSB 0x57
+#define FXOS8700_M_THS_Z_MSB 0x58
+#define FXOS8700_M_THS_Z_LSB 0x59
+#define FXOS8700_M_THS_COUNT 0x5a
+#define FXOS8700_M_CTRL_REG1 0x5b
+#define FXOS8700_M_CTRL_REG2 0x5c
+#define FXOS8700_M_CTRL_REG3 0x5d
+#define FXOS8700_M_INT_SRC 0x5e
+#define FXOS8700_A_VECM_CFG 0x5f
+#define FXOS8700_A_VECM_THS_MSB 0x60
+#define FXOS8700_A_VECM_THS_LSB 0x61
+#define FXOS8700_A_VECM_CNT 0x62
+#define FXOS8700_A_VECM_INITX_MSB 0x63
+#define FXOS8700_A_VECM_INITX_LSB 0x64
+#define FXOS8700_A_VECM_INITY_MSB 0x65
+#define FXOS8700_A_VECM_INITY_LSB 0x66
+#define FXOS8700_A_VECM_INITZ_MSB 0x67
+#define FXOS8700_A_VECM_INITZ_LSB 0x68
+#define FXOS8700_M_VECM_CFG 0x69
+#define FXOS8700_M_VECM_THS_MSB 0x6a
+#define FXOS8700_M_VECM_THS_LSB 0x6b
+#define FXOS8700_M_VECM_CNT 0x6c
+#define FXOS8700_M_VECM_INITX_MSB 0x6d
+#define FXOS8700_M_VECM_INITX_LSB 0x6e
+#define FXOS8700_M_VECM_INITY_MSB 0x6f
+#define FXOS8700_M_VECM_INITY_LSB 0x70
+#define FXOS8700_M_VECM_INITZ_MSB 0x71
+#define FXOS8700_M_VECM_INITZ_LSB 0x72
+#define FXOS8700_A_FFMT_THS_X_MSB 0x73
+#define FXOS8700_A_FFMT_THS_X_LSB 0x74
+#define FXOS8700_A_FFMT_THS_Y_MSB 0x75
+#define FXOS8700_A_FFMT_THS_Y_LSB 0x76
+#define FXOS8700_A_FFMT_THS_Z_MSB 0x77
+#define FXOS8700_A_FFMT_THS_Z_LSB 0x78
+#define FXOS8700_A_TRAN_INIT_MSB 0x79
+#define FXOS8700_A_TRAN_INIT_LSB_X 0x7a
+#define FXOS8700_A_TRAN_INIT_LSB_Y 0x7b
+#define FXOS8700_A_TRAN_INIT_LSB_Z 0x7d
+#define FXOS8700_TM_NVM_LOCK 0x7e
+#define FXOS8700_NVM_DATA0_35 0x80
+#define FXOS8700_NVM_DATA_BNK3 0xa4
+#define FXOS8700_NVM_DATA_BNK2 0xa5
+#define FXOS8700_NVM_DATA_BNK1 0xa6
+#define FXOS8700_NVM_DATA_BNK0 0xa7
+
+/* Bit definitions for FXOS8700_CTRL_REG1 */
+#define FXOS8700_CTRL_ODR_MSK 0x38
+#define FXOS8700_CTRL_ODR_MAX 0x00
+#define FXOS8700_CTRL_ODR_MIN GENMASK(4, 3)
+
+/* Bit definitions for FXOS8700_M_CTRL_REG1 */
+#define FXOS8700_HMS_MASK GENMASK(1, 0)
+#define FXOS8700_OS_MASK GENMASK(4, 2)
+
+/* Bit definitions for FXOS8700_M_CTRL_REG2 */
+#define FXOS8700_MAXMIN_RST BIT(2)
+#define FXOS8700_MAXMIN_DIS_THS BIT(3)
+#define FXOS8700_MAXMIN_DIS BIT(4)
+
+#define FXOS8700_ACTIVE 0x01
+#define FXOS8700_ACTIVE_MIN_USLEEP 4000 /* from table 6 in datasheet */
+
+#define FXOS8700_DEVICE_ID 0xC7
+#define FXOS8700_PRE_DEVICE_ID 0xC4
+#define FXOS8700_DATA_BUF_SIZE 3
+
+struct fxos8700_data {
+ struct regmap *regmap;
+ struct iio_trigger *trig;
+ __be16 buf[FXOS8700_DATA_BUF_SIZE] ____cacheline_aligned;
+};
+
+/* Regmap info */
+static const struct regmap_range read_range[] = {
+ {
+ .range_min = FXOS8700_STATUS,
+ .range_max = FXOS8700_A_FFMT_COUNT,
+ }, {
+ .range_min = FXOS8700_TRANSIENT_CFG,
+ .range_max = FXOS8700_A_FFMT_THS_Z_LSB,
+ },
+};
+
+static const struct regmap_range write_range[] = {
+ {
+ .range_min = FXOS8700_F_SETUP,
+ .range_max = FXOS8700_TRIG_CFG,
+ }, {
+ .range_min = FXOS8700_XYZ_DATA_CFG,
+ .range_max = FXOS8700_HP_FILTER_CUTOFF,
+ }, {
+ .range_min = FXOS8700_PL_CFG,
+ .range_max = FXOS8700_A_FFMT_CFG,
+ }, {
+ .range_min = FXOS8700_A_FFMT_THS,
+ .range_max = FXOS8700_TRANSIENT_CFG,
+ }, {
+ .range_min = FXOS8700_TRANSIENT_THS,
+ .range_max = FXOS8700_PULSE_CFG,
+ }, {
+ .range_min = FXOS8700_PULSE_THSX,
+ .range_max = FXOS8700_OFF_Z,
+ }, {
+ .range_min = FXOS8700_M_OFF_X_MSB,
+ .range_max = FXOS8700_M_OFF_Z_LSB,
+ }, {
+ .range_min = FXOS8700_M_THS_CFG,
+ .range_max = FXOS8700_M_THS_CFG,
+ }, {
+ .range_min = FXOS8700_M_THS_X_MSB,
+ .range_max = FXOS8700_M_CTRL_REG3,
+ }, {
+ .range_min = FXOS8700_A_VECM_CFG,
+ .range_max = FXOS8700_A_FFMT_THS_Z_LSB,
+ },
+};
+
+static const struct regmap_access_table driver_read_table = {
+ .yes_ranges = read_range,
+ .n_yes_ranges = ARRAY_SIZE(read_range),
+};
+
+static const struct regmap_access_table driver_write_table = {
+ .yes_ranges = write_range,
+ .n_yes_ranges = ARRAY_SIZE(write_range),
+};
+
+const struct regmap_config fxos8700_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FXOS8700_NVM_DATA_BNK0,
+ .rd_table = &driver_read_table,
+ .wr_table = &driver_write_table,
+};
+EXPORT_SYMBOL(fxos8700_regmap_config);
+
+#define FXOS8700_CHANNEL(_type, _axis) { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+enum fxos8700_accel_scale_bits {
+ MODE_2G = 0,
+ MODE_4G,
+ MODE_8G,
+};
+
+/* scan indexes follow DATA register order */
+enum fxos8700_scan_axis {
+ FXOS8700_SCAN_ACCEL_X = 0,
+ FXOS8700_SCAN_ACCEL_Y,
+ FXOS8700_SCAN_ACCEL_Z,
+ FXOS8700_SCAN_MAGN_X,
+ FXOS8700_SCAN_MAGN_Y,
+ FXOS8700_SCAN_MAGN_Z,
+ FXOS8700_SCAN_RHALL,
+ FXOS8700_SCAN_TIMESTAMP,
+};
+
+enum fxos8700_sensor {
+ FXOS8700_ACCEL = 0,
+ FXOS8700_MAGN,
+ FXOS8700_NUM_SENSORS /* must be last */
+};
+
+enum fxos8700_int_pin {
+ FXOS8700_PIN_INT1,
+ FXOS8700_PIN_INT2
+};
+
+struct fxos8700_scale {
+ u8 bits;
+ int uscale;
+};
+
+struct fxos8700_odr {
+ u8 bits;
+ int odr;
+ int uodr;
+};
+
+static const struct fxos8700_scale fxos8700_accel_scale[] = {
+ { MODE_2G, 244},
+ { MODE_4G, 488},
+ { MODE_8G, 976},
+};
+
+/*
+ * Accellerometer and magnetometer have the same ODR options, set in the
+ * CTRL_REG1 register. ODR is halved when using both sensors at once in
+ * hybrid mode.
+ */
+static const struct fxos8700_odr fxos8700_odr[] = {
+ {0x00, 800, 0},
+ {0x01, 400, 0},
+ {0x02, 200, 0},
+ {0x03, 100, 0},
+ {0x04, 50, 0},
+ {0x05, 12, 500000},
+ {0x06, 6, 250000},
+ {0x07, 1, 562500},
+};
+
+static const struct iio_chan_spec fxos8700_channels[] = {
+ FXOS8700_CHANNEL(IIO_ACCEL, X),
+ FXOS8700_CHANNEL(IIO_ACCEL, Y),
+ FXOS8700_CHANNEL(IIO_ACCEL, Z),
+ FXOS8700_CHANNEL(IIO_MAGN, X),
+ FXOS8700_CHANNEL(IIO_MAGN, Y),
+ FXOS8700_CHANNEL(IIO_MAGN, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(FXOS8700_SCAN_TIMESTAMP),
+};
+
+static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type)
+{
+ switch (iio_type) {
+ case IIO_ACCEL:
+ return FXOS8700_ACCEL;
+ case IIO_ANGL_VEL:
+ return FXOS8700_MAGN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxos8700_set_active_mode(struct fxos8700_data *data,
+ enum fxos8700_sensor t, bool mode)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, mode);
+ if (ret)
+ return ret;
+
+ usleep_range(FXOS8700_ACTIVE_MIN_USLEEP,
+ FXOS8700_ACTIVE_MIN_USLEEP + 1000);
+
+ return 0;
+}
+
+static int fxos8700_set_scale(struct fxos8700_data *data,
+ enum fxos8700_sensor t, int uscale)
+{
+ int i;
+ static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
+ struct device *dev = regmap_get_device(data->regmap);
+
+ if (t == FXOS8700_MAGN) {
+ dev_err(dev, "Magnetometer scale is locked at 1200uT\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < scale_num; i++)
+ if (fxos8700_accel_scale[i].uscale == uscale)
+ break;
+
+ if (i == scale_num)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
+ fxos8700_accel_scale[i].bits);
+}
+
+static int fxos8700_get_scale(struct fxos8700_data *data,
+ enum fxos8700_sensor t, int *uscale)
+{
+ int i, ret, val;
+ static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
+
+ if (t == FXOS8700_MAGN) {
+ *uscale = 1200; /* Magnetometer is locked at 1200uT */
+ return 0;
+ }
+
+ ret = regmap_read(data->regmap, FXOS8700_XYZ_DATA_CFG, &val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < scale_num; i++) {
+ if (fxos8700_accel_scale[i].bits == (val & 0x3)) {
+ *uscale = fxos8700_accel_scale[i].uscale;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int fxos8700_get_data(struct fxos8700_data *data, int chan_type,
+ int axis, int *val)
+{
+ u8 base, reg;
+ int ret;
+ enum fxos8700_sensor type = fxos8700_to_sensor(chan_type);
+
+ base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB;
+
+ /* Block read 6 bytes of device output registers to avoid data loss */
+ ret = regmap_bulk_read(data->regmap, base, data->buf,
+ FXOS8700_DATA_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ /* Convert axis to buffer index */
+ reg = axis - IIO_MOD_X;
+
+ /* Convert to native endianness */
+ *val = sign_extend32(be16_to_cpu(data->buf[reg]), 15);
+
+ return 0;
+}
+
+static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
+ int odr, int uodr)
+{
+ int i, ret, val;
+ bool active_mode;
+ static const int odr_num = ARRAY_SIZE(fxos8700_odr);
+
+ ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
+ if (ret)
+ return ret;
+
+ active_mode = val & FXOS8700_ACTIVE;
+
+ if (active_mode) {
+ /*
+ * The device must be in standby mode to change any of the
+ * other fields within CTRL_REG1
+ */
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ val & ~FXOS8700_ACTIVE);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < odr_num; i++)
+ if (fxos8700_odr[i].odr == odr && fxos8700_odr[i].uodr == uodr)
+ break;
+
+ if (i >= odr_num)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ FXOS8700_CTRL_REG1,
+ FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE,
+ fxos8700_odr[i].bits << 3 | active_mode);
+}
+
+static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
+ int *odr, int *uodr)
+{
+ int i, val, ret;
+ static const int odr_num = ARRAY_SIZE(fxos8700_odr);
+
+ ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
+ if (ret)
+ return ret;
+
+ val &= FXOS8700_CTRL_ODR_MSK;
+
+ for (i = 0; i < odr_num; i++)
+ if (val == fxos8700_odr[i].bits)
+ break;
+
+ if (i >= odr_num)
+ return -EINVAL;
+
+ *odr = fxos8700_odr[i].odr;
+ *uodr = fxos8700_odr[i].uodr;
+
+ return 0;
+}
+
+static int fxos8700_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct fxos8700_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = fxos8700_get_data(data, chan->type, chan->channel2, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ ret = fxos8700_get_scale(data, fxos8700_to_sensor(chan->type),
+ val2);
+ return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = fxos8700_get_odr(data, fxos8700_to_sensor(chan->type),
+ val, val2);
+ return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxos8700_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct fxos8700_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return fxos8700_set_scale(data, fxos8700_to_sensor(chan->type),
+ val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return fxos8700_set_odr(data, fxos8700_to_sensor(chan->type),
+ val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static IIO_CONST_ATTR(in_accel_sampling_frequency_available,
+ "1.5625 6.25 12.5 50 100 200 400 800");
+static IIO_CONST_ATTR(in_magn_sampling_frequency_available,
+ "1.5625 6.25 12.5 50 100 200 400 800");
+static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976");
+static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200");
+
+static struct attribute *fxos8700_attrs[] = {
+ &iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_magn_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_const_attr_in_magn_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fxos8700_attrs_group = {
+ .attrs = fxos8700_attrs,
+};
+
+static const struct iio_info fxos8700_info = {
+ .read_raw = fxos8700_read_raw,
+ .write_raw = fxos8700_write_raw,
+ .attrs = &fxos8700_attrs_group,
+};
+
+static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi)
+{
+ int ret;
+ unsigned int val;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_read(data->regmap, FXOS8700_WHO_AM_I, &val);
+ if (ret) {
+ dev_err(dev, "Error reading chip id\n");
+ return ret;
+ }
+ if (val != FXOS8700_DEVICE_ID && val != FXOS8700_PRE_DEVICE_ID) {
+ dev_err(dev, "Wrong chip id, got %x expected %x or %x\n",
+ val, FXOS8700_DEVICE_ID, FXOS8700_PRE_DEVICE_ID);
+ return -ENODEV;
+ }
+
+ ret = fxos8700_set_active_mode(data, FXOS8700_ACCEL, true);
+ if (ret)
+ return ret;
+
+ ret = fxos8700_set_active_mode(data, FXOS8700_MAGN, true);
+ if (ret)
+ return ret;
+
+ /*
+ * The device must be in standby mode to change any of the other fields
+ * within CTRL_REG1
+ */
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, 0x00);
+ if (ret)
+ return ret;
+
+ /* Set max oversample ratio (OSR) and both devices active */
+ ret = regmap_write(data->regmap, FXOS8700_M_CTRL_REG1,
+ FXOS8700_HMS_MASK | FXOS8700_OS_MASK);
+ if (ret)
+ return ret;
+
+ /* Disable and rst min/max measurements & threshold */
+ ret = regmap_write(data->regmap, FXOS8700_M_CTRL_REG2,
+ FXOS8700_MAXMIN_RST | FXOS8700_MAXMIN_DIS_THS |
+ FXOS8700_MAXMIN_DIS);
+ if (ret)
+ return ret;
+
+ /* Max ODR (800Hz individual or 400Hz hybrid), active mode */
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE);
+ if (ret)
+ return ret;
+
+ /* Set for max full-scale range (+/-8G) */
+ return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
+}
+
+static void fxos8700_chip_uninit(void *data)
+{
+ struct fxos8700_data *fxos8700_data = data;
+
+ fxos8700_set_active_mode(fxos8700_data, FXOS8700_ACCEL, false);
+ fxos8700_set_active_mode(fxos8700_data, FXOS8700_MAGN, false);
+}
+
+int fxos8700_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi)
+{
+ struct iio_dev *indio_dev;
+ struct fxos8700_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+
+ ret = fxos8700_chip_init(data, use_spi);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, fxos8700_chip_uninit, data);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = fxos8700_channels;
+ indio_dev->num_channels = ARRAY_SIZE(fxos8700_channels);
+ indio_dev->name = name ? name : "fxos8700";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &fxos8700_info;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_GPL(fxos8700_core_probe);
+
+MODULE_AUTHOR("Robert Jones <rjones@gateworks.com>");
+MODULE_DESCRIPTION("FXOS8700 6-Axis Acc and Mag Combo Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/fxos8700_i2c.c b/drivers/iio/imu/fxos8700_i2c.c
new file mode 100644
index 000000000000..3ceb76366313
--- /dev/null
+++ b/drivers/iio/imu/fxos8700_i2c.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FXOS8700 - NXP IMU, I2C bits
+ *
+ * 7-bit I2C slave address determined by SA1 and SA0 logic level
+ * inputs represented in the following table:
+ * SA1 | SA0 | Slave Address
+ * 0 | 0 | 0x1E
+ * 0 | 1 | 0x1D
+ * 1 | 0 | 0x1C
+ * 1 | 1 | 0x1F
+ */
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+
+#include "fxos8700.h"
+
+static int fxos8700_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name = NULL;
+
+ regmap = devm_regmap_init_i2c(client, &fxos8700_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ if (id)
+ name = id->name;
+
+ return fxos8700_core_probe(&client->dev, regmap, name, false);
+}
+
+static const struct i2c_device_id fxos8700_i2c_id[] = {
+ {"fxos8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, fxos8700_i2c_id);
+
+static const struct acpi_device_id fxos8700_acpi_match[] = {
+ {"FXOS8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fxos8700_acpi_match);
+
+static const struct of_device_id fxos8700_of_match[] = {
+ { .compatible = "nxp,fxos8700" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fxos8700_of_match);
+
+static struct i2c_driver fxos8700_i2c_driver = {
+ .driver = {
+ .name = "fxos8700_i2c",
+ .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .of_match_table = fxos8700_of_match,
+ },
+ .probe = fxos8700_i2c_probe,
+ .id_table = fxos8700_i2c_id,
+};
+module_i2c_driver(fxos8700_i2c_driver);
+
+MODULE_AUTHOR("Robert Jones <rjones@gateworks.com>");
+MODULE_DESCRIPTION("FXOS8700 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/fxos8700_spi.c b/drivers/iio/imu/fxos8700_spi.c
new file mode 100644
index 000000000000..57e7bb6444e7
--- /dev/null
+++ b/drivers/iio/imu/fxos8700_spi.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FXOS8700 - NXP IMU, SPI bits
+ */
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "fxos8700.h"
+
+static int fxos8700_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ regmap = devm_regmap_init_spi(spi, &fxos8700_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return fxos8700_core_probe(&spi->dev, regmap, id->name, true);
+}
+
+static const struct spi_device_id fxos8700_spi_id[] = {
+ {"fxos8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, fxos8700_spi_id);
+
+static const struct acpi_device_id fxos8700_acpi_match[] = {
+ {"FXOS8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fxos8700_acpi_match);
+
+static const struct of_device_id fxos8700_of_match[] = {
+ { .compatible = "nxp,fxos8700" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fxos8700_of_match);
+
+static struct spi_driver fxos8700_spi_driver = {
+ .probe = fxos8700_spi_probe,
+ .id_table = fxos8700_spi_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .of_match_table = fxos8700_of_match,
+ .name = "fxos8700_spi",
+ },
+};
+module_spi_driver(fxos8700_spi_driver);
+
+MODULE_AUTHOR("Robert Jones <rjones@gateworks.com>");
+MODULE_DESCRIPTION("FXOS8700 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
index 70ffe0d13d8c..c103441a906b 100644
--- a/drivers/iio/imu/inv_mpu6050/Makefile
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -4,10 +4,11 @@
#
obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o
-inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
+inv-mpu6050-y := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o \
+ inv_mpu_aux.o inv_mpu_magn.o
obj-$(CONFIG_INV_MPU6050_I2C) += inv-mpu6050-i2c.o
-inv-mpu6050-i2c-objs := inv_mpu_i2c.o inv_mpu_acpi.o
+inv-mpu6050-i2c-y := inv_mpu_i2c.o inv_mpu_acpi.o
obj-$(CONFIG_INV_MPU6050_SPI) += inv-mpu6050-spi.o
-inv-mpu6050-spi-objs := inv_mpu_spi.o
+inv-mpu6050-spi-y := inv_mpu_spi.o
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
new file mode 100644
index 000000000000..7327e5723f96
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+
+#include "inv_mpu_aux.h"
+#include "inv_mpu_iio.h"
+
+/*
+ * i2c master auxiliary bus transfer function.
+ * Requires the i2c operations to be correctly setup before.
+ */
+static int inv_mpu_i2c_master_xfer(const struct inv_mpu6050_state *st)
+{
+ /* use 50hz frequency for xfer */
+ const unsigned int freq = 50;
+ const unsigned int period_ms = 1000 / freq;
+ uint8_t d;
+ unsigned int user_ctrl;
+ int ret;
+
+ /* set sample rate */
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(freq);
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ return ret;
+
+ /* start i2c master */
+ user_ctrl = st->chip_config.user_ctrl | INV_MPU6050_BIT_I2C_MST_EN;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ goto error_restore_rate;
+
+ /* wait for xfer: 1 period + half-period margin */
+ msleep(period_ms + period_ms / 2);
+
+ /* stop i2c master */
+ user_ctrl = st->chip_config.user_ctrl;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ goto error_stop_i2c;
+
+ /* restore sample rate */
+ d = st->chip_config.divider;
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ goto error_restore_rate;
+
+ return 0;
+
+error_stop_i2c:
+ regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl);
+error_restore_rate:
+ regmap_write(st->map, st->reg->sample_rate_div, st->chip_config.divider);
+ return ret;
+}
+
+/**
+ * inv_mpu_aux_init() - init i2c auxiliary bus
+ * @st: driver internal state
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int inv_mpu_aux_init(const struct inv_mpu6050_state *st)
+{
+ unsigned int val;
+ int ret;
+
+ /* configure i2c master */
+ val = INV_MPU6050_BITS_I2C_MST_CLK_400KHZ |
+ INV_MPU6050_BIT_WAIT_FOR_ES;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_MST_CTRL, val);
+ if (ret)
+ return ret;
+
+ /* configure i2c master delay */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV4_CTRL, 0);
+ if (ret)
+ return ret;
+
+ val = INV_MPU6050_BIT_I2C_SLV0_DLY_EN |
+ INV_MPU6050_BIT_I2C_SLV1_DLY_EN |
+ INV_MPU6050_BIT_I2C_SLV2_DLY_EN |
+ INV_MPU6050_BIT_I2C_SLV3_DLY_EN |
+ INV_MPU6050_BIT_DELAY_ES_SHADOW;
+ return regmap_write(st->map, INV_MPU6050_REG_I2C_MST_DELAY_CTRL, val);
+}
+
+/**
+ * inv_mpu_aux_read() - read register function for i2c auxiliary bus
+ * @st: driver internal state.
+ * @addr: chip i2c Address
+ * @reg: chip register address
+ * @val: buffer for storing read bytes
+ * @size: number of bytes to read
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int inv_mpu_aux_read(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t *val, size_t size)
+{
+ unsigned int status;
+ int ret;
+
+ if (size > 0x0F)
+ return -EINVAL;
+
+ /* setup i2c SLV0 control: i2c addr, register, enable + size */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0),
+ INV_MPU6050_BIT_I2C_SLV_RNW | addr);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0), reg);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
+ INV_MPU6050_BIT_SLV_EN | size);
+ if (ret)
+ return ret;
+
+ /* do i2c xfer */
+ ret = inv_mpu_i2c_master_xfer(st);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* disable i2c slave */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* check i2c status */
+ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
+ if (ret)
+ return ret;
+ if (status & INV_MPU6050_BIT_I2C_SLV0_NACK)
+ return -EIO;
+
+ /* read data in registers */
+ return regmap_bulk_read(st->map, INV_MPU6050_REG_EXT_SENS_DATA,
+ val, size);
+
+error_disable_i2c:
+ regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ return ret;
+}
+
+/**
+ * inv_mpu_aux_write() - write register function for i2c auxiliary bus
+ * @st: driver internal state.
+ * @addr: chip i2c Address
+ * @reg: chip register address
+ * @val: 1 byte value to write
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int inv_mpu_aux_write(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t val)
+{
+ unsigned int status;
+ int ret;
+
+ /* setup i2c SLV0 control: i2c addr, register, value, enable + size */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0), addr);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0), reg);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(0), val);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
+ INV_MPU6050_BIT_SLV_EN | 1);
+ if (ret)
+ return ret;
+
+ /* do i2c xfer */
+ ret = inv_mpu_i2c_master_xfer(st);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* disable i2c slave */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* check i2c status */
+ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
+ if (ret)
+ return ret;
+ if (status & INV_MPU6050_BIT_I2C_SLV0_NACK)
+ return -EIO;
+
+ return 0;
+
+error_disable_i2c:
+ regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ return ret;
+}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h
new file mode 100644
index 000000000000..b66997545762
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#ifndef INV_MPU_AUX_H_
+#define INV_MPU_AUX_H_
+
+#include "inv_mpu_iio.h"
+
+int inv_mpu_aux_init(const struct inv_mpu6050_state *st);
+
+int inv_mpu_aux_read(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t *val, size_t size);
+
+int inv_mpu_aux_write(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t val);
+
+#endif /* INV_MPU_AUX_H_ */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 868281b8adb0..45e77b308238 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include "inv_mpu_iio.h"
+#include "inv_mpu_magn.h"
/*
* this is the gyro scale translated from dynamic range plus/minus
@@ -103,6 +104,7 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE),
.gyro_fifo_enable = false,
.accl_fifo_enable = false,
+ .magn_fifo_enable = false,
.accl_fs = INV_MPU6050_FS_02G,
.user_ctrl = 0,
};
@@ -341,6 +343,11 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
*/
st->chip_period = NSEC_PER_MSEC;
+ /* magn chip init, noop if not present in the chip */
+ result = inv_mpu_magn_probe(st);
+ if (result)
+ goto error_power_off;
+
return inv_mpu6050_set_power_itg(st, false);
error_power_off:
@@ -420,6 +427,9 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
ret = inv_mpu6050_sensor_show(st, st->reg->temperature,
IIO_MOD_X, val);
break;
+ case IIO_MAGN:
+ ret = inv_mpu_magn_read(st, chan->channel2, val);
+ break;
default:
ret = -EINVAL;
break;
@@ -478,6 +488,8 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
*val2 = INV_MPU6050_TEMP_SCALE;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MAGN:
+ return inv_mpu_magn_get_scale(st, chan, val, val2);
default:
return -EINVAL;
}
@@ -719,6 +731,11 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
goto fifo_rate_fail_power_off;
+ /* update rate for magn, noop if not present in chip */
+ result = inv_mpu_magn_set_rate(st, fifo_rate);
+ if (result)
+ goto fifo_rate_fail_power_off;
+
fifo_rate_fail_power_off:
result |= inv_mpu6050_set_power_itg(st, false);
fifo_rate_fail_unlock:
@@ -804,8 +821,14 @@ inv_get_mount_matrix(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct inv_mpu6050_state *data = iio_priv(indio_dev);
+ const struct iio_mount_matrix *matrix;
+
+ if (chan->type == IIO_MAGN)
+ matrix = &data->magn_orient;
+ else
+ matrix = &data->orientation;
- return &data->orientation;
+ return matrix;
}
static const struct iio_chan_spec_ext_info inv_ext_info[] = {
@@ -873,6 +896,98 @@ static const unsigned long inv_mpu_scan_masks[] = {
0,
};
+#define INV_MPU9X50_MAGN_CHAN(_chan2, _bits, _index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+ .ext_info = inv_ext_info, \
+ }
+
+static const struct iio_chan_spec inv_mpu9250_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU9X50_SCAN_TIMESTAMP),
+ /*
+ * Note that temperature should only be via polled reading only,
+ * not the final scan elements output.
+ */
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_OFFSET)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
+ },
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+
+ /* Magnetometer resolution is 16 bits */
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_X, 16, INV_MPU9X50_SCAN_MAGN_X),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Y, 16, INV_MPU9X50_SCAN_MAGN_Y),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Z, 16, INV_MPU9X50_SCAN_MAGN_Z),
+};
+
+static const unsigned long inv_mpu9x50_scan_masks[] = {
+ /* 3-axis accel */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z),
+ /* 3-axis gyro */
+ BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ /* 3-axis magn */
+ BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ /* 6-axis accel + gyro */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ /* 6-axis accel + magn */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ /* 6-axis gyro + magn */
+ BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z)
+ | BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ /* 9-axis accel + gyro + magn */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z)
+ | BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ 0,
+};
+
static const struct iio_chan_spec inv_icm20602_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
{
@@ -1034,14 +1149,14 @@ error_power_off:
return result;
}
-static int inv_mpu_core_enable_regulator(struct inv_mpu6050_state *st)
+static int inv_mpu_core_enable_regulator_vddio(struct inv_mpu6050_state *st)
{
int result;
result = regulator_enable(st->vddio_supply);
if (result) {
dev_err(regmap_get_device(st->map),
- "Failed to enable regulator: %d\n", result);
+ "Failed to enable vddio regulator: %d\n", result);
} else {
/* Give the device a little bit of time to start up. */
usleep_range(35000, 70000);
@@ -1050,21 +1165,29 @@ static int inv_mpu_core_enable_regulator(struct inv_mpu6050_state *st)
return result;
}
-static int inv_mpu_core_disable_regulator(struct inv_mpu6050_state *st)
+static int inv_mpu_core_disable_regulator_vddio(struct inv_mpu6050_state *st)
{
int result;
result = regulator_disable(st->vddio_supply);
if (result)
dev_err(regmap_get_device(st->map),
- "Failed to disable regulator: %d\n", result);
+ "Failed to disable vddio regulator: %d\n", result);
return result;
}
static void inv_mpu_core_disable_regulator_action(void *_data)
{
- inv_mpu_core_disable_regulator(_data);
+ struct inv_mpu6050_state *st = _data;
+ int result;
+
+ result = regulator_disable(st->vdd_supply);
+ if (result)
+ dev_err(regmap_get_device(st->map),
+ "Failed to disable vdd regulator: %d\n", result);
+
+ inv_mpu_core_disable_regulator_vddio(st);
}
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
@@ -1133,6 +1256,15 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return -EINVAL;
}
+ st->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(st->vdd_supply)) {
+ if (PTR_ERR(st->vdd_supply) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get vdd regulator %d\n",
+ (int)PTR_ERR(st->vdd_supply));
+
+ return PTR_ERR(st->vdd_supply);
+ }
+
st->vddio_supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(st->vddio_supply)) {
if (PTR_ERR(st->vddio_supply) != -EPROBE_DEFER)
@@ -1142,9 +1274,17 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return PTR_ERR(st->vddio_supply);
}
- result = inv_mpu_core_enable_regulator(st);
- if (result)
+ result = regulator_enable(st->vdd_supply);
+ if (result) {
+ dev_err(dev, "Failed to enable vdd regulator: %d\n", result);
+ return result;
+ }
+
+ result = inv_mpu_core_enable_regulator_vddio(st);
+ if (result) {
+ regulator_disable(st->vdd_supply);
return result;
+ }
result = devm_add_action_or_reset(dev, inv_mpu_core_disable_regulator_action,
st);
@@ -1154,6 +1294,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return result;
}
+ /* fill magnetometer orientation */
+ result = inv_mpu_magn_set_orient(st);
+ if (result)
+ return result;
+
/* power is turned on inside check chip type*/
result = inv_check_and_setup_chip(st);
if (result)
@@ -1165,9 +1310,6 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return result;
}
- if (inv_mpu_bus_setup)
- inv_mpu_bus_setup(indio_dev);
-
dev_set_drvdata(dev, indio_dev);
indio_dev->dev.parent = dev;
/* name will be NULL when enumerated via ACPI */
@@ -1176,14 +1318,37 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
else
indio_dev->name = dev_name(dev);
- if (chip_type == INV_ICM20602) {
+ /* requires parent device set in indio_dev */
+ if (inv_mpu_bus_setup)
+ inv_mpu_bus_setup(indio_dev);
+
+ switch (chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ /*
+ * Use magnetometer inside the chip only if there is no i2c
+ * auxiliary device in use.
+ */
+ if (!st->magn_disabled) {
+ indio_dev->channels = inv_mpu9250_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
+ indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
+ } else {
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+ indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ }
+ break;
+ case INV_ICM20602:
indio_dev->channels = inv_icm20602_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
indio_dev->available_scan_masks = inv_icm20602_scan_masks;
- } else {
+ break;
+ default:
indio_dev->channels = inv_mpu_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ break;
}
indio_dev->info = &mpu_info;
@@ -1221,7 +1386,7 @@ static int inv_mpu_resume(struct device *dev)
int result;
mutex_lock(&st->lock);
- result = inv_mpu_core_enable_regulator(st);
+ result = inv_mpu_core_enable_regulator_vddio(st);
if (result)
goto out_unlock;
@@ -1239,7 +1404,7 @@ static int inv_mpu_suspend(struct device *dev)
mutex_lock(&st->lock);
result = inv_mpu6050_set_power_itg(st, false);
- inv_mpu_core_disable_regulator(st);
+ inv_mpu_core_disable_regulator_vddio(st);
mutex_unlock(&st->lock);
return result;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 4b8b5a87398c..389cc8505e0e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -68,6 +68,56 @@ static const char *inv_mpu_match_acpi_device(struct device *dev,
return dev_name(dev);
}
+static bool inv_mpu_i2c_aux_bus(struct device *dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
+
+ switch (st->chip_type) {
+ case INV_ICM20608:
+ case INV_ICM20602:
+ /* no i2c auxiliary bus on the chip */
+ return false;
+ case INV_MPU9250:
+ case INV_MPU9255:
+ if (st->magn_disabled)
+ return true;
+ else
+ return false;
+ default:
+ return true;
+ }
+}
+
+/*
+ * MPU9xxx magnetometer support requires to disable i2c auxiliary bus support.
+ * To ensure backward compatibility with existing setups, do not disable
+ * i2c auxiliary bus if it used.
+ * Check for i2c-gate node in devicetree and set magnetometer disabled.
+ * Only MPU6500 is supported by ACPI, no need to check.
+ */
+static int inv_mpu_magn_disable(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
+ struct device_node *mux_node;
+
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ mux_node = of_get_child_by_name(dev->of_node, "i2c-gate");
+ if (mux_node != NULL) {
+ st->magn_disabled = true;
+ dev_warn(dev, "disable internal use of magnetometer\n");
+ }
+ of_node_put(mux_node);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/**
* inv_mpu_probe() - probe function.
* @client: i2c client.
@@ -112,17 +162,12 @@ static int inv_mpu_probe(struct i2c_client *client,
}
result = inv_mpu_core_probe(regmap, client->irq, name,
- NULL, chip_type);
+ inv_mpu_magn_disable, chip_type);
if (result < 0)
return result;
st = iio_priv(dev_get_drvdata(&client->dev));
- switch (st->chip_type) {
- case INV_ICM20608:
- case INV_ICM20602:
- /* no i2c auxiliary bus on the chip */
- break;
- default:
+ if (inv_mpu_i2c_aux_bus(&client->dev)) {
/* declare i2c auxiliary bus */
st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
@@ -137,7 +182,6 @@ static int inv_mpu_probe(struct i2c_client *client,
result = inv_mpu_acpi_create_mux_client(client);
if (result)
goto out_del_mux;
- break;
}
return 0;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 51235677c534..f1fb7b6bdab1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -2,6 +2,10 @@
/*
* Copyright (C) 2012 Invensense, Inc.
*/
+
+#ifndef INV_MPU_IIO_H_
+#define INV_MPU_IIO_H_
+
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/mutex.h>
@@ -82,6 +86,7 @@ enum inv_devices {
* @accl_fs: accel full scale range.
* @accl_fifo_enable: enable accel data output
* @gyro_fifo_enable: enable gyro data output
+ * @magn_fifo_enable: enable magn data output
* @divider: chip sample rate divider (sample rate divider - 1)
*/
struct inv_mpu6050_chip_config {
@@ -90,6 +95,7 @@ struct inv_mpu6050_chip_config {
unsigned int accl_fs:2;
unsigned int accl_fifo_enable:1;
unsigned int gyro_fifo_enable:1;
+ unsigned int magn_fifo_enable:1;
u8 divider;
u8 user_ctrl;
};
@@ -126,7 +132,11 @@ struct inv_mpu6050_hw {
* @chip_period: chip internal period estimation (~1kHz).
* @it_timestamp: timestamp from previous interrupt.
* @data_timestamp: timestamp for next data sample.
- * @vddio_supply voltage regulator for the chip.
+ * @vdd_supply: VDD voltage regulator for the chip.
+ * @vddio_supply I/O voltage regulator for the chip.
+ * @magn_disabled: magnetometer disabled for backward compatibility reason.
+ * @magn_raw_to_gauss: coefficient to convert mag raw value to Gauss.
+ * @magn_orient: magnetometer sensor chip orientation if available.
*/
struct inv_mpu6050_state {
struct mutex lock;
@@ -147,7 +157,11 @@ struct inv_mpu6050_state {
s64 chip_period;
s64 it_timestamp;
s64 data_timestamp;
+ struct regulator *vdd_supply;
struct regulator *vddio_supply;
+ bool magn_disabled;
+ s32 magn_raw_to_gauss[3];
+ struct iio_mount_matrix magn_orient;
};
/*register and associated bit definition*/
@@ -160,9 +174,41 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
#define INV_MPU6050_REG_FIFO_EN 0x23
+#define INV_MPU6050_BIT_SLAVE_0 0x01
+#define INV_MPU6050_BIT_SLAVE_1 0x02
+#define INV_MPU6050_BIT_SLAVE_2 0x04
#define INV_MPU6050_BIT_ACCEL_OUT 0x08
#define INV_MPU6050_BITS_GYRO_OUT 0x70
+#define INV_MPU6050_REG_I2C_MST_CTRL 0x24
+#define INV_MPU6050_BITS_I2C_MST_CLK_400KHZ 0x0D
+#define INV_MPU6050_BIT_I2C_MST_P_NSR 0x10
+#define INV_MPU6050_BIT_SLV3_FIFO_EN 0x20
+#define INV_MPU6050_BIT_WAIT_FOR_ES 0x40
+#define INV_MPU6050_BIT_MULT_MST_EN 0x80
+
+/* control I2C slaves from 0 to 3 */
+#define INV_MPU6050_REG_I2C_SLV_ADDR(_x) (0x25 + 3 * (_x))
+#define INV_MPU6050_BIT_I2C_SLV_RNW 0x80
+
+#define INV_MPU6050_REG_I2C_SLV_REG(_x) (0x26 + 3 * (_x))
+
+#define INV_MPU6050_REG_I2C_SLV_CTRL(_x) (0x27 + 3 * (_x))
+#define INV_MPU6050_BIT_SLV_GRP 0x10
+#define INV_MPU6050_BIT_SLV_REG_DIS 0x20
+#define INV_MPU6050_BIT_SLV_BYTE_SW 0x40
+#define INV_MPU6050_BIT_SLV_EN 0x80
+
+/* I2C master delay register */
+#define INV_MPU6050_REG_I2C_SLV4_CTRL 0x34
+#define INV_MPU6050_BITS_I2C_MST_DLY(_x) ((_x) & 0x1F)
+
+#define INV_MPU6050_REG_I2C_MST_STATUS 0x36
+#define INV_MPU6050_BIT_I2C_SLV0_NACK 0x01
+#define INV_MPU6050_BIT_I2C_SLV1_NACK 0x02
+#define INV_MPU6050_BIT_I2C_SLV2_NACK 0x04
+#define INV_MPU6050_BIT_I2C_SLV3_NACK 0x08
+
#define INV_MPU6050_REG_INT_ENABLE 0x38
#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
#define INV_MPU6050_BIT_DMP_INT_EN 0x02
@@ -175,6 +221,18 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BIT_FIFO_OVERFLOW_INT 0x10
#define INV_MPU6050_BIT_RAW_DATA_RDY_INT 0x01
+#define INV_MPU6050_REG_EXT_SENS_DATA 0x49
+
+/* I2C slaves data output from 0 to 3 */
+#define INV_MPU6050_REG_I2C_SLV_DO(_x) (0x63 + (_x))
+
+#define INV_MPU6050_REG_I2C_MST_DELAY_CTRL 0x67
+#define INV_MPU6050_BIT_I2C_SLV0_DLY_EN 0x01
+#define INV_MPU6050_BIT_I2C_SLV1_DLY_EN 0x02
+#define INV_MPU6050_BIT_I2C_SLV2_DLY_EN 0x04
+#define INV_MPU6050_BIT_I2C_SLV3_DLY_EN 0x08
+#define INV_MPU6050_BIT_DELAY_ES_SHADOW 0x80
+
#define INV_MPU6050_REG_USER_CTRL 0x6A
#define INV_MPU6050_BIT_FIFO_RST 0x04
#define INV_MPU6050_BIT_DMP_RST 0x08
@@ -202,6 +260,9 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
#define INV_MPU6050_FIFO_COUNT_BYTE 2
+/* MPU9X50 9-axis magnetometer */
+#define INV_MPU9X50_BYTES_MAGN 7
+
/* ICM20602 FIFO samples include temperature readings */
#define INV_ICM20602_BYTES_PER_TEMP_SENSOR 2
@@ -229,8 +290,8 @@ struct inv_mpu6050_state {
#define INV_ICM20602_TEMP_OFFSET 8170
#define INV_ICM20602_TEMP_SCALE 3060
-/* 6 + 6 round up and plus 8 */
-#define INV_MPU6050_OUTPUT_DATA_SIZE 24
+/* 6 + 6 + 7 (for MPU9x50) = 19 round up to 24 and plus 8 */
+#define INV_MPU6050_OUTPUT_DATA_SIZE 32
#define INV_MPU6050_REG_INT_PIN_CFG 0x37
#define INV_MPU6050_ACTIVE_HIGH 0x00
@@ -279,6 +340,11 @@ enum inv_mpu6050_scan {
INV_MPU6050_SCAN_GYRO_Y,
INV_MPU6050_SCAN_GYRO_Z,
INV_MPU6050_SCAN_TIMESTAMP,
+
+ INV_MPU9X50_SCAN_MAGN_X = INV_MPU6050_SCAN_GYRO_Z + 1,
+ INV_MPU9X50_SCAN_MAGN_Y,
+ INV_MPU9X50_SCAN_MAGN_Z,
+ INV_MPU9X50_SCAN_TIMESTAMP,
};
/* scan element definition for ICM20602, which includes temperature */
@@ -344,3 +410,5 @@ void inv_mpu_acpi_delete_mux_client(struct i2c_client *client);
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type);
extern const struct dev_pm_ops inv_mpu_pmops;
+
+#endif
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
new file mode 100644
index 000000000000..02735af152c8
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/string.h>
+
+#include "inv_mpu_aux.h"
+#include "inv_mpu_iio.h"
+#include "inv_mpu_magn.h"
+
+/*
+ * MPU9250 magnetometer is an AKM AK8963 chip on I2C aux bus
+ */
+#define INV_MPU_MAGN_I2C_ADDR 0x0C
+
+#define INV_MPU_MAGN_REG_WIA 0x00
+#define INV_MPU_MAGN_BITS_WIA 0x48
+
+#define INV_MPU_MAGN_REG_ST1 0x02
+#define INV_MPU_MAGN_BIT_DRDY 0x01
+#define INV_MPU_MAGN_BIT_DOR 0x02
+
+#define INV_MPU_MAGN_REG_DATA 0x03
+
+#define INV_MPU_MAGN_REG_ST2 0x09
+#define INV_MPU_MAGN_BIT_HOFL 0x08
+#define INV_MPU_MAGN_BIT_BITM 0x10
+
+#define INV_MPU_MAGN_REG_CNTL1 0x0A
+#define INV_MPU_MAGN_BITS_MODE_PWDN 0x00
+#define INV_MPU_MAGN_BITS_MODE_SINGLE 0x01
+#define INV_MPU_MAGN_BITS_MODE_FUSE 0x0F
+#define INV_MPU_MAGN_BIT_OUTPUT_BIT 0x10
+
+#define INV_MPU_MAGN_REG_CNTL2 0x0B
+#define INV_MPU_MAGN_BIT_SRST 0x01
+
+#define INV_MPU_MAGN_REG_ASAX 0x10
+#define INV_MPU_MAGN_REG_ASAY 0x11
+#define INV_MPU_MAGN_REG_ASAZ 0x12
+
+/* Magnetometer maximum frequency */
+#define INV_MPU_MAGN_FREQ_HZ_MAX 50
+
+static bool inv_magn_supported(const struct inv_mpu6050_state *st)
+{
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* init magnetometer chip */
+static int inv_magn_init(struct inv_mpu6050_state *st)
+{
+ uint8_t val;
+ uint8_t asa[3];
+ int ret;
+
+ /* check whoami */
+ ret = inv_mpu_aux_read(st, INV_MPU_MAGN_I2C_ADDR, INV_MPU_MAGN_REG_WIA,
+ &val, sizeof(val));
+ if (ret)
+ return ret;
+ if (val != INV_MPU_MAGN_BITS_WIA)
+ return -ENODEV;
+
+ /* reset chip */
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU_MAGN_REG_CNTL2,
+ INV_MPU_MAGN_BIT_SRST);
+ if (ret)
+ return ret;
+
+ /* read fuse ROM data */
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU_MAGN_REG_CNTL1,
+ INV_MPU_MAGN_BITS_MODE_FUSE);
+ if (ret)
+ return ret;
+
+ ret = inv_mpu_aux_read(st, INV_MPU_MAGN_I2C_ADDR, INV_MPU_MAGN_REG_ASAX,
+ asa, sizeof(asa));
+ if (ret)
+ return ret;
+
+ /* switch back to power-down */
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU_MAGN_REG_CNTL1,
+ INV_MPU_MAGN_BITS_MODE_PWDN);
+ if (ret)
+ return ret;
+
+ /*
+ * Sensitivity adjustement and scale to Gauss
+ *
+ * Hadj = H * (((ASA - 128) * 0.5 / 128) + 1)
+ * Factor simplification:
+ * Hadj = H * ((ASA + 128) / 256)
+ *
+ * Sensor sentivity
+ * 0.15 uT in 16 bits mode
+ * 1 uT = 0.01 G and value is in micron (1e6)
+ * sensitvity = 0.15 uT * 0.01 * 1e6
+ *
+ * raw_to_gauss = Hadj * 1500
+ */
+ st->magn_raw_to_gauss[0] = (((int32_t)asa[0] + 128) * 1500) / 256;
+ st->magn_raw_to_gauss[1] = (((int32_t)asa[1] + 128) * 1500) / 256;
+ st->magn_raw_to_gauss[2] = (((int32_t)asa[2] + 128) * 1500) / 256;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_magn_probe() - probe and setup magnetometer chip
+ * @st: driver internal state
+ *
+ * Returns 0 on success, a negative error code otherwise
+ *
+ * It is probing the chip and setting up all needed i2c transfers.
+ * Noop if there is no magnetometer in the chip.
+ */
+int inv_mpu_magn_probe(struct inv_mpu6050_state *st)
+{
+ int ret;
+
+ /* quit if chip is not supported */
+ if (!inv_magn_supported(st))
+ return 0;
+
+ /* configure i2c master aux port */
+ ret = inv_mpu_aux_init(st);
+ if (ret)
+ return ret;
+
+ /* check and init mag chip */
+ ret = inv_magn_init(st);
+ if (ret)
+ return ret;
+
+ /*
+ * configure mpu i2c master accesses
+ * i2c SLV0: read sensor data, 7 bytes data(6)-ST2
+ * Byte swap data to store them in big-endian in impair address groups
+ */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0),
+ INV_MPU6050_BIT_I2C_SLV_RNW | INV_MPU_MAGN_I2C_ADDR);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0),
+ INV_MPU_MAGN_REG_DATA);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
+ INV_MPU6050_BIT_SLV_EN |
+ INV_MPU6050_BIT_SLV_BYTE_SW |
+ INV_MPU6050_BIT_SLV_GRP |
+ INV_MPU9X50_BYTES_MAGN);
+ if (ret)
+ return ret;
+
+ /* i2c SLV1: launch single measurement */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(1),
+ INV_MPU_MAGN_I2C_ADDR);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(1),
+ INV_MPU_MAGN_REG_CNTL1);
+ if (ret)
+ return ret;
+
+ /* add 16 bits mode */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1),
+ INV_MPU_MAGN_BITS_MODE_SINGLE |
+ INV_MPU_MAGN_BIT_OUTPUT_BIT);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(1),
+ INV_MPU6050_BIT_SLV_EN | 1);
+}
+
+/**
+ * inv_mpu_magn_set_rate() - set magnetometer sampling rate
+ * @st: driver internal state
+ * @fifo_rate: mpu set fifo rate
+ *
+ * Returns 0 on success, a negative error code otherwise
+ *
+ * Limit sampling frequency to the maximum value supported by the
+ * magnetometer chip. Resulting in duplicated data for higher frequencies.
+ * Noop if there is no magnetometer in the chip.
+ */
+int inv_mpu_magn_set_rate(const struct inv_mpu6050_state *st, int fifo_rate)
+{
+ uint8_t d;
+
+ /* quit if chip is not supported */
+ if (!inv_magn_supported(st))
+ return 0;
+
+ /*
+ * update i2c master delay to limit mag sampling to max frequency
+ * compute fifo_rate divider d: rate = fifo_rate / (d + 1)
+ */
+ if (fifo_rate > INV_MPU_MAGN_FREQ_HZ_MAX)
+ d = fifo_rate / INV_MPU_MAGN_FREQ_HZ_MAX - 1;
+ else
+ d = 0;
+
+ return regmap_write(st->map, INV_MPU6050_REG_I2C_SLV4_CTRL, d);
+}
+
+/**
+ * inv_mpu_magn_set_orient() - fill magnetometer mounting matrix
+ * @st: driver internal state
+ *
+ * Returns 0 on success, a negative error code otherwise
+ *
+ * Fill magnetometer mounting matrix using the provided chip matrix.
+ */
+int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st)
+{
+ const char *orient;
+ char *str;
+ int i;
+
+ /* fill magnetometer orientation */
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ /* x <- y */
+ st->magn_orient.rotation[0] = st->orientation.rotation[3];
+ st->magn_orient.rotation[1] = st->orientation.rotation[4];
+ st->magn_orient.rotation[2] = st->orientation.rotation[5];
+ /* y <- x */
+ st->magn_orient.rotation[3] = st->orientation.rotation[0];
+ st->magn_orient.rotation[4] = st->orientation.rotation[1];
+ st->magn_orient.rotation[5] = st->orientation.rotation[2];
+ /* z <- -z */
+ for (i = 0; i < 3; ++i) {
+ orient = st->orientation.rotation[6 + i];
+ /* use length + 2 for adding minus sign if needed */
+ str = devm_kzalloc(regmap_get_device(st->map),
+ strlen(orient) + 2, GFP_KERNEL);
+ if (str == NULL)
+ return -ENOMEM;
+ if (strcmp(orient, "0") == 0) {
+ strcpy(str, orient);
+ } else if (orient[0] == '-') {
+ strcpy(str, &orient[1]);
+ } else {
+ str[0] = '-';
+ strcpy(&str[1], orient);
+ }
+ st->magn_orient.rotation[6 + i] = str;
+ }
+ break;
+ default:
+ st->magn_orient = st->orientation;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * inv_mpu_magn_read() - read magnetometer data
+ * @st: driver internal state
+ * @axis: IIO modifier axis value
+ * @val: store corresponding axis value
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val)
+{
+ unsigned int user_ctrl, status;
+ __be16 data[3];
+ uint8_t addr;
+ uint8_t d;
+ unsigned int period_ms;
+ int ret;
+
+ /* quit if chip is not supported */
+ if (!inv_magn_supported(st))
+ return -ENODEV;
+
+ /* Mag data: X - Y - Z */
+ switch (axis) {
+ case IIO_MOD_X:
+ addr = 0;
+ break;
+ case IIO_MOD_Y:
+ addr = 1;
+ break;
+ case IIO_MOD_Z:
+ addr = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set sample rate to max mag freq */
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU_MAGN_FREQ_HZ_MAX);
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ return ret;
+
+ /* start i2c master, wait for xfer, stop */
+ user_ctrl = st->chip_config.user_ctrl | INV_MPU6050_BIT_I2C_MST_EN;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ return ret;
+
+ /* need to wait 2 periods + half-period margin */
+ period_ms = 1000 / INV_MPU_MAGN_FREQ_HZ_MAX;
+ msleep(period_ms * 2 + period_ms / 2);
+ user_ctrl = st->chip_config.user_ctrl;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ return ret;
+
+ /* restore sample rate */
+ d = st->chip_config.divider;
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ return ret;
+
+ /* check i2c status and read raw data */
+ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
+ if (ret)
+ return ret;
+
+ if (status & INV_MPU6050_BIT_I2C_SLV0_NACK ||
+ status & INV_MPU6050_BIT_I2C_SLV1_NACK)
+ return -EIO;
+
+ ret = regmap_bulk_read(st->map, INV_MPU6050_REG_EXT_SENS_DATA,
+ data, sizeof(data));
+ if (ret)
+ return ret;
+
+ *val = (int16_t)be16_to_cpu(data[addr]);
+
+ return IIO_VAL_INT;
+}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h
new file mode 100644
index 000000000000..b41bd0578478
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#ifndef INV_MPU_MAGN_H_
+#define INV_MPU_MAGN_H_
+
+#include <linux/kernel.h>
+
+#include "inv_mpu_iio.h"
+
+int inv_mpu_magn_probe(struct inv_mpu6050_state *st);
+
+/**
+ * inv_mpu_magn_get_scale() - get magnetometer scale value
+ * @st: driver internal state
+ *
+ * Returns IIO data format.
+ */
+static inline int inv_mpu_magn_get_scale(const struct inv_mpu6050_state *st,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2)
+{
+ *val = 0;
+ *val2 = st->magn_raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+int inv_mpu_magn_set_rate(const struct inv_mpu6050_state *st, int fifo_rate);
+
+int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st);
+
+int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val);
+
+#endif /* INV_MPU_MAGN_H_ */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 72d8c5790076..10d16ec5104b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -124,7 +124,8 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
- st->chip_config.gyro_fifo_enable) {
+ st->chip_config.gyro_fifo_enable ||
+ st->chip_config.magn_fifo_enable) {
result = regmap_write(st->map, st->reg->int_enable,
INV_MPU6050_BIT_DATA_RDY_EN);
if (result)
@@ -141,6 +142,8 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
d |= INV_MPU6050_BITS_GYRO_OUT;
if (st->chip_config.accl_fifo_enable)
d |= INV_MPU6050_BIT_ACCEL_OUT;
+ if (st->chip_config.magn_fifo_enable)
+ d |= INV_MPU6050_BIT_SLAVE_0;
result = regmap_write(st->map, st->reg->fifo_en, d);
if (result)
goto reset_fifo_fail;
@@ -187,7 +190,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
}
if (!(st->chip_config.accl_fifo_enable |
- st->chip_config.gyro_fifo_enable))
+ st->chip_config.gyro_fifo_enable |
+ st->chip_config.magn_fifo_enable))
goto end_session;
bytes_per_datum = 0;
if (st->chip_config.accl_fifo_enable)
@@ -199,6 +203,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (st->chip_type == INV_ICM20602)
bytes_per_datum += INV_ICM20602_BYTES_PER_TEMP_SENSOR;
+ if (st->chip_config.magn_fifo_enable)
+ bytes_per_datum += INV_MPU9X50_BYTES_MAGN;
+
/*
* read fifo_count register to know how many bytes are inside the FIFO
* right now
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index dd55e70b6f77..d7d951927a44 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -5,7 +5,7 @@
#include "inv_mpu_iio.h"
-static void inv_scan_query(struct iio_dev *indio_dev)
+static void inv_scan_query_mpu6050(struct iio_dev *indio_dev)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
@@ -26,6 +26,60 @@ static void inv_scan_query(struct iio_dev *indio_dev)
indio_dev->active_scan_mask);
}
+static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ inv_scan_query_mpu6050(indio_dev);
+
+ /* no magnetometer if i2c auxiliary bus is used */
+ if (st->magn_disabled)
+ return;
+
+ st->chip_config.magn_fifo_enable =
+ test_bit(INV_MPU9X50_SCAN_MAGN_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU9X50_SCAN_MAGN_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU9X50_SCAN_MAGN_Z,
+ indio_dev->active_scan_mask);
+}
+
+static void inv_scan_query(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ return inv_scan_query_mpu9x50(indio_dev);
+ default:
+ return inv_scan_query_mpu6050(indio_dev);
+ }
+}
+
+static unsigned int inv_compute_skip_samples(const struct inv_mpu6050_state *st)
+{
+ unsigned int gyro_skip = 0;
+ unsigned int magn_skip = 0;
+ unsigned int skip_samples;
+
+ /* gyro first sample is out of specs, skip it */
+ if (st->chip_config.gyro_fifo_enable)
+ gyro_skip = 1;
+
+ /* mag first sample is always not ready, skip it */
+ if (st->chip_config.magn_fifo_enable)
+ magn_skip = 1;
+
+ /* compute first samples to skip */
+ skip_samples = gyro_skip;
+ if (magn_skip > skip_samples)
+ skip_samples = magn_skip;
+
+ return skip_samples;
+}
+
/**
* inv_mpu6050_set_enable() - enable chip functions.
* @indio_dev: Device driver instance.
@@ -34,6 +88,7 @@ static void inv_scan_query(struct iio_dev *indio_dev)
static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ uint8_t d;
int result;
if (enable) {
@@ -41,14 +96,11 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
return result;
inv_scan_query(indio_dev);
- st->skip_samples = 0;
if (st->chip_config.gyro_fifo_enable) {
result = inv_mpu6050_switch_engine(st, true,
INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
goto error_power_off;
- /* gyro first sample is out of specs, skip it */
- st->skip_samples = 1;
}
if (st->chip_config.accl_fifo_enable) {
result = inv_mpu6050_switch_engine(st, true,
@@ -56,22 +108,32 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
goto error_gyro_off;
}
+ if (st->chip_config.magn_fifo_enable) {
+ d = st->chip_config.user_ctrl |
+ INV_MPU6050_BIT_I2C_MST_EN;
+ result = regmap_write(st->map, st->reg->user_ctrl, d);
+ if (result)
+ goto error_accl_off;
+ st->chip_config.user_ctrl = d;
+ }
+ st->skip_samples = inv_compute_skip_samples(st);
result = inv_reset_fifo(indio_dev);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
} else {
result = regmap_write(st->map, st->reg->fifo_en, 0);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
result = regmap_write(st->map, st->reg->int_enable, 0);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
- result = regmap_write(st->map, st->reg->user_ctrl,
- st->chip_config.user_ctrl);
+ d = st->chip_config.user_ctrl & ~INV_MPU6050_BIT_I2C_MST_EN;
+ result = regmap_write(st->map, st->reg->user_ctrl, d);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
+ st->chip_config.user_ctrl = d;
result = inv_mpu6050_switch_engine(st, false,
INV_MPU6050_BIT_PWR_ACCL_STBY);
@@ -90,6 +152,10 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
return 0;
+error_magn_off:
+ /* always restore user_ctrl to disable fifo properly */
+ st->chip_config.user_ctrl &= ~INV_MPU6050_BIT_I2C_MST_EN;
+ regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl);
error_accl_off:
if (st->chip_config.accl_fifo_enable)
inv_mpu6050_switch_engine(st, false,
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 77aa0e77212d..28f59d09208a 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -12,7 +12,8 @@ config IIO_ST_LSM6DSX
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr, lsm6ds3tr-c,
- ism330dhcx and the accelerometer/gyroscope of lsm9ds1.
+ ism330dhcx, lsm6dsrx, lsm6ds0 and the accelerometer/gyroscope
+ of lsm9ds1.
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 0fe6999b8257..c605b153be41 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -12,6 +12,7 @@
#define ST_LSM6DSX_H
#include <linux/device.h>
+#include <linux/iio/iio.h>
#define ST_LSM6DS3_DEV_NAME "lsm6ds3"
#define ST_LSM6DS3H_DEV_NAME "lsm6ds3h"
@@ -25,6 +26,8 @@
#define ST_LSM6DS3TRC_DEV_NAME "lsm6ds3tr-c"
#define ST_ISM330DHCX_DEV_NAME "ism330dhcx"
#define ST_LSM9DS1_DEV_NAME "lsm9ds1-imu"
+#define ST_LSM6DS0_DEV_NAME "lsm6ds0"
+#define ST_LSM6DSRX_DEV_NAME "lsm6dsrx"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
@@ -39,6 +42,8 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DS3TRC_ID,
ST_ISM330DHCX_ID,
ST_LSM9DS1_ID,
+ ST_LSM6DS0_ID,
+ ST_LSM6DSRX_ID,
ST_LSM6DSX_MAX_ID,
};
@@ -54,6 +59,26 @@ enum st_lsm6dsx_hw_id {
* ST_LSM6DSX_TAGGED_SAMPLE_SIZE)
#define ST_LSM6DSX_SHIFT_VAL(val, mask) (((val) << __ffs(mask)) & (mask))
+#define ST_LSM6DSX_CHANNEL_ACC(chan_type, addr, mod, scan_idx) \
+{ \
+ .type = chan_type, \
+ .address = addr, \
+ .modified = 1, \
+ .channel2 = mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = scan_idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+ .event_spec = &st_lsm6dsx_event, \
+ .num_event_specs = 1, \
+}
+
#define ST_LSM6DSX_CHANNEL(chan_type, addr, mod, scan_idx) \
{ \
.type = chan_type, \
@@ -81,14 +106,16 @@ struct st_lsm6dsx_sensor;
struct st_lsm6dsx_hw;
struct st_lsm6dsx_odr {
- u16 hz;
+ u32 milli_hz;
u8 val;
};
#define ST_LSM6DSX_ODR_LIST_SIZE 6
struct st_lsm6dsx_odr_table_entry {
struct st_lsm6dsx_reg reg;
+
struct st_lsm6dsx_odr odr_avl[ST_LSM6DSX_ODR_LIST_SIZE];
+ int odr_len;
};
struct st_lsm6dsx_fs {
@@ -132,12 +159,14 @@ struct st_lsm6dsx_fifo_ops {
* @hr_timer: Hw timer resolution register info (addr + mask).
* @fifo_en: Hw timer FIFO enable register info (addr + mask).
* @decimator: Hw timer FIFO decimator register info (addr + mask).
+ * @freq_fine: Difference in % of ODR with respect to the typical.
*/
struct st_lsm6dsx_hw_ts_settings {
struct st_lsm6dsx_reg timer_en;
struct st_lsm6dsx_reg hr_timer;
struct st_lsm6dsx_reg fifo_en;
struct st_lsm6dsx_reg decimator;
+ u8 freq_fine;
};
/**
@@ -164,6 +193,16 @@ struct st_lsm6dsx_shub_settings {
u8 batch_en;
};
+struct st_lsm6dsx_event_settings {
+ struct st_lsm6dsx_reg enable_reg;
+ struct st_lsm6dsx_reg wakeup_reg;
+ u8 wakeup_src_reg;
+ u8 wakeup_src_status_mask;
+ u8 wakeup_src_z_mask;
+ u8 wakeup_src_y_mask;
+ u8 wakeup_src_x_mask;
+};
+
enum st_lsm6dsx_ext_sensor_id {
ST_LSM6DSX_ID_MAGN,
};
@@ -207,12 +246,14 @@ struct st_lsm6dsx_ext_dev_settings {
/**
* struct st_lsm6dsx_settings - ST IMU sensor settings
* @wai: Sensor WhoAmI default value.
- * @int1_addr: Control Register address for INT1
- * @int2_addr: Control Register address for INT2
- * @reset_addr: register address for reset/reboot
+ * @reset: register address for reset.
+ * @boot: register address for boot.
+ * @bdu: register address for Block Data Update.
* @max_fifo_size: Sensor max fifo length in FIFO words.
* @id: List of hw id/device name supported by the driver configuration.
* @channels: IIO channels supported by the device.
+ * @irq_config: interrupts related registers.
+ * @drdy_mask: register info for data-ready mask (addr + mask).
* @odr_table: Hw sensors odr table (Hz + val).
* @fs_table: Hw sensors gain table (gain + val).
* @decimator: List of decimator register info (addr + mask).
@@ -223,9 +264,9 @@ struct st_lsm6dsx_ext_dev_settings {
*/
struct st_lsm6dsx_settings {
u8 wai;
- u8 int1_addr;
- u8 int2_addr;
- u8 reset_addr;
+ struct st_lsm6dsx_reg reset;
+ struct st_lsm6dsx_reg boot;
+ struct st_lsm6dsx_reg bdu;
u16 max_fifo_size;
struct {
enum st_lsm6dsx_hw_id hw_id;
@@ -235,6 +276,17 @@ struct st_lsm6dsx_settings {
const struct iio_chan_spec *chan;
int len;
} channels[2];
+ struct {
+ struct st_lsm6dsx_reg irq1;
+ struct st_lsm6dsx_reg irq2;
+ struct st_lsm6dsx_reg irq1_func;
+ struct st_lsm6dsx_reg irq2_func;
+ struct st_lsm6dsx_reg lir;
+ struct st_lsm6dsx_reg clear_on_read;
+ struct st_lsm6dsx_reg hla;
+ struct st_lsm6dsx_reg od;
+ } irq_config;
+ struct st_lsm6dsx_reg drdy_mask;
struct st_lsm6dsx_odr_table_entry odr_table[2];
struct st_lsm6dsx_fs_table_entry fs_table[2];
struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
@@ -242,6 +294,7 @@ struct st_lsm6dsx_settings {
struct st_lsm6dsx_fifo_ops fifo_ops;
struct st_lsm6dsx_hw_ts_settings ts_settings;
struct st_lsm6dsx_shub_settings shub_settings;
+ struct st_lsm6dsx_event_settings event_settings;
};
enum st_lsm6dsx_sensor_id {
@@ -277,7 +330,7 @@ struct st_lsm6dsx_sensor {
struct st_lsm6dsx_hw *hw;
u32 gain;
- u16 odr;
+ u32 odr;
u16 watermark;
u8 sip;
@@ -301,9 +354,13 @@ struct st_lsm6dsx_sensor {
* @fifo_mode: FIFO operating mode supported by the device.
* @suspend_mask: Suspended sensor bitmask.
* @enable_mask: Enabled sensor bitmask.
+ * @ts_gain: Hw timestamp rate after internal calibration.
* @ts_sip: Total number of timestamp samples in a given pattern.
* @sip: Total number of samples (acc/gyro/ts) in a given pattern.
* @buff: Device read buffer.
+ * @irq_routing: pointer to interrupt routing configuration.
+ * @event_threshold: wakeup event threshold.
+ * @enable_event: enabled event bitmask.
* @iio_devs: Pointers to acc/gyro iio_dev instances.
* @settings: Pointer to the specific sensor settings in use.
*/
@@ -319,9 +376,14 @@ struct st_lsm6dsx_hw {
enum st_lsm6dsx_fifo_mode fifo_mode;
u8 suspend_mask;
u8 enable_mask;
+ s64 ts_gain;
u8 ts_sip;
u8 sip;
+ const struct st_lsm6dsx_reg *irq_routing;
+ u8 event_threshold;
+ u8 enable_event;
+
u8 *buff;
struct iio_dev *iio_devs[ST_LSM6DSX_ID_MAX];
@@ -329,6 +391,13 @@ struct st_lsm6dsx_hw {
const struct st_lsm6dsx_settings *settings;
};
+static const struct iio_event_spec st_lsm6dsx_event = {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE)
+};
+
static const unsigned long st_lsm6dsx_available_scan_masks[] = {0x7, 0x0};
extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
@@ -346,7 +415,7 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
enum st_lsm6dsx_fifo_mode fifo_mode);
int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw);
int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw);
-int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr, u8 *val);
+int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u32 odr, u8 *val);
int st_lsm6dsx_shub_probe(struct st_lsm6dsx_hw *hw, const char *name);
int st_lsm6dsx_shub_set_enable(struct st_lsm6dsx_sensor *sensor, bool enable);
int st_lsm6dsx_set_page(struct st_lsm6dsx_hw *hw, bool enable);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index b0f3da1976e4..d416990ae309 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -14,10 +14,10 @@
* (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
* value of the decimation factor and ODR set for each FIFO data set.
*
- * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX: The FIFO buffer can be
- * configured to store data from gyroscope and accelerometer. Each sample
- * is queued with a tag (1B) indicating data source (gyroscope, accelerometer,
- * hw timer).
+ * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/LSM6DSRX/ISM330DHCX:
+ * The FIFO buffer can be configured to store data from gyroscope and
+ * accelerometer. Each sample is queued with a tag (1B) indicating data
+ * source (gyroscope, accelerometer, hw timer).
*
* FIFO supported modes:
* - BYPASS: FIFO disabled
@@ -30,8 +30,6 @@
* Denis Ciocca <denis.ciocca@st.com>
*/
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -42,10 +40,6 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_HLACTIVE_ADDR 0x12
-#define ST_LSM6DSX_REG_HLACTIVE_MASK BIT(5)
-#define ST_LSM6DSX_REG_PP_OD_ADDR 0x12
-#define ST_LSM6DSX_REG_PP_OD_MASK BIT(4)
#define ST_LSM6DSX_REG_FIFO_MODE_ADDR 0x0a
#define ST_LSM6DSX_FIFO_MODE_MASK GENMASK(2, 0)
#define ST_LSM6DSX_FIFO_ODR_MASK GENMASK(6, 3)
@@ -56,7 +50,6 @@
#define ST_LSM6DSX_MAX_FIFO_ODR_VAL 0x08
-#define ST_LSM6DSX_TS_SENSITIVITY 25000UL /* 25us */
#define ST_LSM6DSX_TS_RESET_VAL 0xaa
struct st_lsm6dsx_decimator_entry {
@@ -98,7 +91,7 @@ static int st_lsm6dsx_get_decimator_val(u8 val)
}
static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
- u16 *max_odr, u16 *min_odr)
+ u32 *max_odr, u32 *min_odr)
{
struct st_lsm6dsx_sensor *sensor;
int i;
@@ -113,16 +106,17 @@ static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
if (!(hw->enable_mask & BIT(sensor->id)))
continue;
- *max_odr = max_t(u16, *max_odr, sensor->odr);
- *min_odr = min_t(u16, *min_odr, sensor->odr);
+ *max_odr = max_t(u32, *max_odr, sensor->odr);
+ *min_odr = min_t(u32, *min_odr, sensor->odr);
}
}
static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
{
- u16 max_odr, min_odr, sip = 0, ts_sip = 0;
const struct st_lsm6dsx_reg *ts_dec_reg;
struct st_lsm6dsx_sensor *sensor;
+ u16 sip = 0, ts_sip = 0;
+ u32 max_odr, min_odr;
int err = 0, i;
u8 data;
@@ -429,7 +423,7 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
*/
if (!reset_ts && ts >= 0xff0000)
reset_ts = true;
- ts *= ST_LSM6DSX_TS_SENSITIVITY;
+ ts *= hw->ts_gain;
offset += ST_LSM6DSX_SAMPLE_SIZE;
}
@@ -456,13 +450,19 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
return read_len;
}
+#define ST_LSM6DSX_INVALID_SAMPLE 0x7ffd
static int
st_lsm6dsx_push_tagged_data(struct st_lsm6dsx_hw *hw, u8 tag,
u8 *data, s64 ts)
{
+ s16 val = le16_to_cpu(*(__le16 *)data);
struct st_lsm6dsx_sensor *sensor;
struct iio_dev *iio_dev;
+ /* invalid sample during bootstrap phase */
+ if (val >= ST_LSM6DSX_INVALID_SAMPLE)
+ return -EINVAL;
+
/*
* EXT_TAG are managed in FIFO fashion so ST_LSM6DSX_EXT0_TAG
* corresponds to the first enabled channel, ST_LSM6DSX_EXT1_TAG
@@ -572,7 +572,7 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
*/
if (!reset_ts && ts >= 0xffff0000)
reset_ts = true;
- ts *= ST_LSM6DSX_TS_SENSITIVITY;
+ ts *= hw->ts_gain;
} else {
st_lsm6dsx_push_tagged_data(hw, tag, iio_buff,
ts);
@@ -592,6 +592,9 @@ int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw)
{
int err;
+ if (!hw->settings->fifo_ops.read_fifo)
+ return -ENOTSUPP;
+
mutex_lock(&hw->fifo_lock);
hw->settings->fifo_ops.read_fifo(hw);
@@ -654,25 +657,6 @@ out:
return err;
}
-static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
-{
- struct st_lsm6dsx_hw *hw = private;
-
- return hw->sip > 0 ? IRQ_WAKE_THREAD : IRQ_NONE;
-}
-
-static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
-{
- struct st_lsm6dsx_hw *hw = private;
- int count;
-
- mutex_lock(&hw->fifo_lock);
- count = hw->settings->fifo_ops.read_fifo(hw);
- mutex_unlock(&hw->fifo_lock);
-
- return count ? IRQ_HANDLED : IRQ_NONE;
-}
-
static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
@@ -702,59 +686,8 @@ static const struct iio_buffer_setup_ops st_lsm6dsx_buffer_ops = {
int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
{
- struct device_node *np = hw->dev->of_node;
- struct st_sensors_platform_data *pdata;
struct iio_buffer *buffer;
- unsigned long irq_type;
- bool irq_active_low;
- int i, err;
-
- irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
-
- switch (irq_type) {
- case IRQF_TRIGGER_HIGH:
- case IRQF_TRIGGER_RISING:
- irq_active_low = false;
- break;
- case IRQF_TRIGGER_LOW:
- case IRQF_TRIGGER_FALLING:
- irq_active_low = true;
- break;
- default:
- dev_info(hw->dev, "mode %lx unsupported\n", irq_type);
- return -EINVAL;
- }
-
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_HLACTIVE_ADDR,
- ST_LSM6DSX_REG_HLACTIVE_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_HLACTIVE_MASK,
- irq_active_low));
- if (err < 0)
- return err;
-
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "drive-open-drain")) ||
- (pdata && pdata->open_drain)) {
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_PP_OD_ADDR,
- ST_LSM6DSX_REG_PP_OD_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_PP_OD_MASK,
- 1));
- if (err < 0)
- return err;
-
- irq_type |= IRQF_SHARED;
- }
-
- err = devm_request_threaded_irq(hw->dev, hw->irq,
- st_lsm6dsx_handler_irq,
- st_lsm6dsx_handler_thread,
- irq_type | IRQF_ONESHOT,
- "lsm6dsx", hw);
- if (err) {
- dev_err(hw->dev, "failed to request trigger irq %d\n",
- hw->irq);
- return err;
- }
+ int i;
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
if (!hw->iio_devs[i])
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index fd5ebe1e1594..11b2c7bc8041 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -32,7 +32,7 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 3KB
*
- * - LSM9DS1:
+ * - LSM9DS1/LSM6DS0:
* - Accelerometer supported ODR [Hz]: 10, 50, 119, 238, 476, 952
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported ODR [Hz]: 15, 60, 119, 238, 476, 952
@@ -48,8 +48,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/bitfield.h>
@@ -58,17 +61,14 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK BIT(3)
#define ST_LSM6DSX_REG_WHOAMI_ADDR 0x0f
-#define ST_LSM6DSX_REG_RESET_MASK BIT(0)
-#define ST_LSM6DSX_REG_BOOT_MASK BIT(7)
-#define ST_LSM6DSX_REG_BDU_ADDR 0x12
-#define ST_LSM6DSX_REG_BDU_MASK BIT(6)
+
+#define ST_LSM6DSX_TS_SENSITIVITY 25000UL /* 25us */
static const struct iio_chan_spec st_lsm6dsx_acc_channels[] = {
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x28, IIO_MOD_X, 0),
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x2a, IIO_MOD_Y, 1),
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x2c, IIO_MOD_Z, 2),
+ ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x28, IIO_MOD_X, 0),
+ ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x2a, IIO_MOD_Y, 1),
+ ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x2c, IIO_MOD_Z, 2),
IIO_CHAN_SOFT_TIMESTAMP(3),
};
@@ -89,14 +89,26 @@ static const struct iio_chan_spec st_lsm6ds0_gyro_channels[] = {
static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
{
.wai = 0x68,
- .int1_addr = 0x0c,
- .int2_addr = 0x0d,
- .reset_addr = 0x22,
+ .reset = {
+ .addr = 0x22,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x22,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x22,
+ .mask = BIT(6),
+ },
.max_fifo_size = 32,
.id = {
{
.hw_id = ST_LSM9DS1_ID,
.name = ST_LSM9DS1_DEV_NAME,
+ }, {
+ .hw_id = ST_LSM6DS0_ID,
+ .name = ST_LSM6DS0_DEV_NAME,
},
},
.channels = {
@@ -115,24 +127,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x20,
.mask = GENMASK(7, 5),
},
- .odr_avl[0] = { 10, 0x01 },
- .odr_avl[1] = { 50, 0x02 },
- .odr_avl[2] = { 119, 0x03 },
- .odr_avl[3] = { 238, 0x04 },
- .odr_avl[4] = { 476, 0x05 },
- .odr_avl[5] = { 952, 0x06 },
+ .odr_avl[0] = { 10000, 0x01 },
+ .odr_avl[1] = { 50000, 0x02 },
+ .odr_avl[2] = { 119000, 0x03 },
+ .odr_avl[3] = { 238000, 0x04 },
+ .odr_avl[4] = { 476000, 0x05 },
+ .odr_avl[5] = { 952000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 5),
},
- .odr_avl[0] = { 15, 0x01 },
- .odr_avl[1] = { 60, 0x02 },
- .odr_avl[2] = { 119, 0x03 },
- .odr_avl[3] = { 238, 0x04 },
- .odr_avl[4] = { 476, 0x05 },
- .odr_avl[5] = { 952, 0x06 },
+ .odr_avl[0] = { 14900, 0x01 },
+ .odr_avl[1] = { 59500, 0x02 },
+ .odr_avl[2] = { 119000, 0x03 },
+ .odr_avl[3] = { 238000, 0x04 },
+ .odr_avl[4] = { 476000, 0x05 },
+ .odr_avl[5] = { 952000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -152,18 +166,46 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(4, 3),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 },
+
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
.fs_len = 3,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0c,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .hla = {
+ .addr = 0x22,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x22,
+ .mask = BIT(4),
+ },
+ },
},
{
.wai = 0x69,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 1365,
.id = {
{
@@ -187,24 +229,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -231,6 +275,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x58,
+ .mask = BIT(0),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -272,12 +346,32 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .event_settings = {
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x69,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 682,
.id = {
{
@@ -301,24 +395,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -345,6 +441,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x58,
+ .mask = BIT(0),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -386,12 +512,32 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .event_settings = {
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x6a,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 682,
.id = {
{
@@ -424,24 +570,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -468,6 +616,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x58,
+ .mask = BIT(0),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -509,12 +687,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x6c,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 512,
.id = {
{
@@ -535,30 +737,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
},
},
+ .drdy_mask = {
+ .addr = 0x13,
+ .mask = BIT(3),
+ },
.odr_table = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -585,6 +793,40 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x56,
+ .mask = BIT(0),
+ },
+ .clear_on_read = {
+ .addr = 0x56,
+ .mask = BIT(6),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -617,6 +859,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x0a,
.mask = GENMASK(7, 6),
},
+ .freq_fine = 0x63,
},
.shub_settings = {
.page_mux = {
@@ -643,13 +886,37 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
- }
+ },
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5b,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x6b,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 512,
.id = {
{
@@ -667,30 +934,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
},
},
+ .drdy_mask = {
+ .addr = 0x13,
+ .mask = BIT(3),
+ },
.odr_table = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -717,6 +990,40 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x56,
+ .mask = BIT(0),
+ },
+ .clear_on_read = {
+ .addr = 0x56,
+ .mask = BIT(6),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -749,13 +1056,38 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x0a,
.mask = GENMASK(7, 6),
},
+ .freq_fine = 0x63,
+ },
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
},
},
{
.wai = 0x6b,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 512,
.id = {
{
@@ -764,6 +1096,9 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
}, {
.hw_id = ST_ISM330DHCX_ID,
.name = ST_ISM330DHCX_DEV_NAME,
+ }, {
+ .hw_id = ST_LSM6DSRX_ID,
+ .name = ST_LSM6DSRX_DEV_NAME,
},
},
.channels = {
@@ -776,30 +1111,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
},
},
+ .drdy_mask = {
+ .addr = 0x13,
+ .mask = BIT(3),
+ },
.odr_table = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -826,6 +1167,40 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x56,
+ .mask = BIT(0),
+ },
+ .clear_on_read = {
+ .addr = 0x56,
+ .mask = BIT(6),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -858,6 +1233,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x0a,
.mask = GENMASK(7, 6),
},
+ .freq_fine = 0x63,
},
.shub_settings = {
.page_mux = {
@@ -884,6 +1260,21 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
+ },
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
}
},
};
@@ -967,36 +1358,37 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
return 0;
}
-int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr, u8 *val)
+int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u32 odr, u8 *val)
{
const struct st_lsm6dsx_odr_table_entry *odr_table;
int i;
odr_table = &sensor->hw->settings->odr_table[sensor->id];
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
+ for (i = 0; i < odr_table->odr_len; i++) {
/*
* ext devices can run at different odr respect to
* accel sensor
*/
- if (odr_table->odr_avl[i].hz >= odr)
+ if (odr_table->odr_avl[i].milli_hz >= odr)
break;
+ }
- if (i == ST_LSM6DSX_ODR_LIST_SIZE)
+ if (i == odr_table->odr_len)
return -EINVAL;
*val = odr_table->odr_avl[i].val;
-
- return 0;
+ return odr_table->odr_avl[i].milli_hz;
}
-static u16 st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u16 odr,
- enum st_lsm6dsx_sensor_id id)
+static int
+st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u32 odr,
+ enum st_lsm6dsx_sensor_id id)
{
struct st_lsm6dsx_sensor *ref = iio_priv(hw->iio_devs[id]);
if (odr > 0) {
if (hw->enable_mask & BIT(id))
- return max_t(u16, ref->odr, odr);
+ return max_t(u32, ref->odr, odr);
else
return odr;
} else {
@@ -1004,7 +1396,8 @@ static u16 st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u16 odr,
}
}
-static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 req_odr)
+static int
+st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr)
{
struct st_lsm6dsx_sensor *ref_sensor = sensor;
struct st_lsm6dsx_hw *hw = sensor->hw;
@@ -1018,7 +1411,7 @@ static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 req_odr)
case ST_LSM6DSX_ID_EXT1:
case ST_LSM6DSX_ID_EXT2:
case ST_LSM6DSX_ID_ACC: {
- u16 odr;
+ u32 odr;
int i;
/*
@@ -1058,7 +1451,7 @@ int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
bool enable)
{
struct st_lsm6dsx_hw *hw = sensor->hw;
- u16 odr = enable ? sensor->odr : 0;
+ u32 odr = enable ? sensor->odr : 0;
int err;
err = st_lsm6dsx_set_odr(sensor, odr);
@@ -1084,14 +1477,15 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
- delay = 1000000 / sensor->odr;
+ delay = 1000000000 / sensor->odr;
usleep_range(delay, 2 * delay);
err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data));
if (err < 0)
return err;
- st_lsm6dsx_sensor_set_enable(sensor, false);
+ if (!hw->enable_event)
+ st_lsm6dsx_sensor_set_enable(sensor, false);
*val = (s16)le16_to_cpu(data);
@@ -1115,8 +1509,9 @@ static int st_lsm6dsx_read_raw(struct iio_dev *iio_dev,
iio_device_release_direct_mode(iio_dev);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = sensor->odr;
- ret = IIO_VAL_INT;
+ *val = sensor->odr / 1000;
+ *val2 = (sensor->odr % 1000) * 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -1149,8 +1544,11 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SAMP_FREQ: {
u8 data;
- err = st_lsm6dsx_check_odr(sensor, val, &data);
- if (!err)
+ val = val * 1000 + val2 / 1000;
+ val = st_lsm6dsx_check_odr(sensor, val, &data);
+ if (val < 0)
+ err = val;
+ else
sensor->odr = val;
break;
}
@@ -1164,6 +1562,144 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
return err;
}
+static int st_lsm6dsx_event_setup(struct st_lsm6dsx_hw *hw, int state)
+{
+ const struct st_lsm6dsx_reg *reg;
+ unsigned int data;
+ int err;
+
+ if (!hw->settings->irq_config.irq1_func.addr)
+ return -ENOTSUPP;
+
+ reg = &hw->settings->event_settings.enable_reg;
+ if (reg->addr) {
+ data = ST_LSM6DSX_SHIFT_VAL(state, reg->mask);
+ err = st_lsm6dsx_update_bits_locked(hw, reg->addr,
+ reg->mask, data);
+ if (err < 0)
+ return err;
+ }
+
+ /* Enable wakeup interrupt */
+ data = ST_LSM6DSX_SHIFT_VAL(state, hw->irq_routing->mask);
+ return st_lsm6dsx_update_bits_locked(hw, hw->irq_routing->addr,
+ hw->irq_routing->mask, data);
+}
+
+static int st_lsm6dsx_read_event(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ *val2 = 0;
+ *val = hw->event_threshold;
+
+ return IIO_VAL_INT;
+}
+
+static int
+st_lsm6dsx_write_event(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ const struct st_lsm6dsx_reg *reg;
+ unsigned int data;
+ int err;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ if (val < 0 || val > 31)
+ return -EINVAL;
+
+ reg = &hw->settings->event_settings.wakeup_reg;
+ data = ST_LSM6DSX_SHIFT_VAL(val, reg->mask);
+ err = st_lsm6dsx_update_bits_locked(hw, reg->addr,
+ reg->mask, data);
+ if (err < 0)
+ return -EINVAL;
+
+ hw->event_threshold = val;
+
+ return 0;
+}
+
+static int
+st_lsm6dsx_read_event_config(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ return !!(hw->enable_event & BIT(chan->channel2));
+}
+
+static int
+st_lsm6dsx_write_event_config(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ u8 enable_event;
+ int err = 0;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ if (state) {
+ enable_event = hw->enable_event | BIT(chan->channel2);
+
+ /* do not enable events if they are already enabled */
+ if (hw->enable_event)
+ goto out;
+ } else {
+ enable_event = hw->enable_event & ~BIT(chan->channel2);
+
+ /* only turn off sensor if no events is enabled */
+ if (enable_event)
+ goto out;
+ }
+
+ /* stop here if no changes have been made */
+ if (hw->enable_event == enable_event)
+ return 0;
+
+ err = st_lsm6dsx_event_setup(hw, state);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&hw->conf_lock);
+ err = st_lsm6dsx_sensor_set_enable(sensor, state);
+ mutex_unlock(&hw->conf_lock);
+ if (err < 0)
+ return err;
+
+out:
+ hw->enable_event = enable_event;
+
+ return 0;
+}
+
int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
@@ -1193,13 +1729,14 @@ st_lsm6dsx_sysfs_sampling_frequency_avail(struct device *dev,
char *buf)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
- enum st_lsm6dsx_sensor_id id = sensor->id;
- struct st_lsm6dsx_hw *hw = sensor->hw;
+ const struct st_lsm6dsx_odr_table_entry *odr_table;
int i, len = 0;
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- hw->settings->odr_table[id].odr_avl[i].hz);
+ odr_table = &sensor->hw->settings->odr_table[sensor->id];
+ for (i = 0; i < odr_table->odr_len; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%03d ",
+ odr_table->odr_avl[i].milli_hz / 1000,
+ odr_table->odr_avl[i].milli_hz % 1000);
buf[len - 1] = '\n';
return len;
@@ -1243,6 +1780,10 @@ static const struct iio_info st_lsm6dsx_acc_info = {
.attrs = &st_lsm6dsx_acc_attribute_group,
.read_raw = st_lsm6dsx_read_raw,
.write_raw = st_lsm6dsx_write_raw,
+ .read_event_value = st_lsm6dsx_read_event,
+ .write_event_value = st_lsm6dsx_write_event,
+ .read_event_config = st_lsm6dsx_read_event_config,
+ .write_event_config = st_lsm6dsx_write_event_config,
.hwfifo_set_watermark = st_lsm6dsx_set_watermark,
};
@@ -1273,7 +1814,9 @@ static int st_lsm6dsx_of_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
return of_property_read_u32(np, "st,drdy-int-pin", drdy_pin);
}
-static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
+static int
+st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
+ const struct st_lsm6dsx_reg **drdy_reg)
{
int err = 0, drdy_pin;
@@ -1287,10 +1830,12 @@ static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
switch (drdy_pin) {
case 1:
- *drdy_reg = hw->settings->int1_addr;
+ hw->irq_routing = &hw->settings->irq_config.irq1_func;
+ *drdy_reg = &hw->settings->irq_config.irq1;
break;
case 2:
- *drdy_reg = hw->settings->int2_addr;
+ hw->irq_routing = &hw->settings->irq_config.irq2_func;
+ *drdy_reg = &hw->settings->irq_config.irq2;
break;
default:
dev_err(hw->dev, "unsupported data ready pin\n");
@@ -1381,51 +1926,95 @@ static int st_lsm6dsx_init_hw_timer(struct st_lsm6dsx_hw *hw)
if (err < 0)
return err;
}
+
+ /* calibrate timestamp sensitivity */
+ hw->ts_gain = ST_LSM6DSX_TS_SENSITIVITY;
+ if (ts_settings->freq_fine) {
+ err = regmap_read(hw->regmap, ts_settings->freq_fine, &val);
+ if (err < 0)
+ return err;
+
+ /*
+ * linearize the AN5192 formula:
+ * 1 / (1 + x) ~= 1 - x (Taylor’s Series)
+ * ttrim[s] = 1 / (40000 * (1 + 0.0015 * val))
+ * ttrim[ns] ~= 25000 - 37.5 * val
+ * ttrim[ns] ~= 25000 - (37500 * val) / 1000
+ */
+ hw->ts_gain -= ((s8)val * 37500) / 1000;
+ }
+
return 0;
}
static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
{
- u8 drdy_int_reg;
+ const struct st_lsm6dsx_reg *reg;
int err;
/* device sw reset */
- err = regmap_update_bits(hw->regmap, hw->settings->reset_addr,
- ST_LSM6DSX_REG_RESET_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_RESET_MASK, 1));
+ reg = &hw->settings->reset;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
msleep(50);
/* reload trimming parameter */
- err = regmap_update_bits(hw->regmap, hw->settings->reset_addr,
- ST_LSM6DSX_REG_BOOT_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_BOOT_MASK, 1));
+ reg = &hw->settings->boot;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
msleep(50);
/* enable Block Data Update */
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_BDU_ADDR,
- ST_LSM6DSX_REG_BDU_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_BDU_MASK, 1));
+ reg = &hw->settings->bdu;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
/* enable FIFO watermak interrupt */
- err = st_lsm6dsx_get_drdy_reg(hw, &drdy_int_reg);
+ err = st_lsm6dsx_get_drdy_reg(hw, &reg);
if (err < 0)
return err;
- err = regmap_update_bits(hw->regmap, drdy_int_reg,
- ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
- 1));
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
+ /* enable Latched interrupts for device events */
+ if (hw->settings->irq_config.lir.addr) {
+ reg = &hw->settings->irq_config.lir;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+
+ /* enable clear on read for latched interrupts */
+ if (hw->settings->irq_config.clear_on_read.addr) {
+ reg = &hw->settings->irq_config.clear_on_read;
+ err = regmap_update_bits(hw->regmap,
+ reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+ }
+ }
+
+ /* enable drdy-mas if available */
+ if (hw->settings->drdy_mask.addr) {
+ reg = &hw->settings->drdy_mask;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+ }
+
err = st_lsm6dsx_init_shub(hw);
if (err < 0)
return err;
@@ -1453,7 +2042,7 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
sensor = iio_priv(iio_dev);
sensor->id = id;
sensor->hw = hw;
- sensor->odr = hw->settings->odr_table[id].odr_avl[0].hz;
+ sensor->odr = hw->settings->odr_table[id].odr_avl[0].milli_hz;
sensor->gain = hw->settings->fs_table[id].fs_avl[0].gain;
sensor->watermark = 1;
@@ -1476,10 +2065,138 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
return iio_dev;
}
+static bool
+st_lsm6dsx_report_motion_event(struct st_lsm6dsx_hw *hw)
+{
+ const struct st_lsm6dsx_event_settings *event_settings;
+ int err, data;
+ s64 timestamp;
+
+ if (!hw->enable_event)
+ return false;
+
+ event_settings = &hw->settings->event_settings;
+ err = st_lsm6dsx_read_locked(hw, event_settings->wakeup_src_reg,
+ &data, sizeof(data));
+ if (err < 0)
+ return false;
+
+ timestamp = iio_get_time_ns(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
+ if ((data & hw->settings->event_settings.wakeup_src_z_mask) &&
+ (hw->enable_event & BIT(IIO_MOD_Z)))
+ iio_push_event(hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+
+ if ((data & hw->settings->event_settings.wakeup_src_y_mask) &&
+ (hw->enable_event & BIT(IIO_MOD_Y)))
+ iio_push_event(hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+
+ if ((data & hw->settings->event_settings.wakeup_src_x_mask) &&
+ (hw->enable_event & BIT(IIO_MOD_X)))
+ iio_push_event(hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+
+ return data & event_settings->wakeup_src_status_mask;
+}
+
+static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
+{
+ struct st_lsm6dsx_hw *hw = private;
+ bool event;
+ int count;
+
+ event = st_lsm6dsx_report_motion_event(hw);
+
+ if (!hw->settings->fifo_ops.read_fifo)
+ return event ? IRQ_HANDLED : IRQ_NONE;
+
+ mutex_lock(&hw->fifo_lock);
+ count = hw->settings->fifo_ops.read_fifo(hw);
+ mutex_unlock(&hw->fifo_lock);
+
+ return count || event ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
+{
+ struct device_node *np = hw->dev->of_node;
+ struct st_sensors_platform_data *pdata;
+ const struct st_lsm6dsx_reg *reg;
+ unsigned long irq_type;
+ bool irq_active_low;
+ int err;
+
+ irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ irq_active_low = false;
+ break;
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_FALLING:
+ irq_active_low = true;
+ break;
+ default:
+ dev_info(hw->dev, "mode %lx unsupported\n", irq_type);
+ return -EINVAL;
+ }
+
+ reg = &hw->settings->irq_config.hla;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(irq_active_low,
+ reg->mask));
+ if (err < 0)
+ return err;
+
+ pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
+ if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ (pdata && pdata->open_drain)) {
+ reg = &hw->settings->irq_config.od;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+
+ irq_type |= IRQF_SHARED;
+ }
+
+ err = devm_request_threaded_irq(hw->dev, hw->irq,
+ NULL,
+ st_lsm6dsx_handler_thread,
+ irq_type | IRQF_ONESHOT,
+ "lsm6dsx", hw);
+ if (err) {
+ dev_err(hw->dev, "failed to request trigger irq %d\n",
+ hw->irq);
+ return err;
+ }
+
+ return 0;
+}
+
int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
struct regmap *regmap)
{
+ struct st_sensors_platform_data *pdata = dev->platform_data;
const struct st_lsm6dsx_shub_settings *hub_settings;
+ struct device_node *np = dev->of_node;
struct st_lsm6dsx_hw *hw;
const char *name = NULL;
int i, err;
@@ -1524,6 +2241,10 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
}
if (hw->irq > 0) {
+ err = st_lsm6dsx_irq_setup(hw);
+ if (err < 0)
+ return err;
+
err = st_lsm6dsx_fifo_setup(hw);
if (err < 0)
return err;
@@ -1538,6 +2259,10 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
+ if ((np && of_property_read_bool(np, "wakeup-source")) ||
+ (pdata && pdata->wakeup_source))
+ device_init_wakeup(dev, true);
+
return 0;
}
EXPORT_SYMBOL(st_lsm6dsx_probe);
@@ -1556,6 +2281,13 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
if (!(hw->enable_mask & BIT(sensor->id)))
continue;
+ if (device_may_wakeup(dev) &&
+ sensor->id == ST_LSM6DSX_ID_ACC && hw->enable_event) {
+ /* Enable wake from IRQ */
+ enable_irq_wake(hw->irq);
+ continue;
+ }
+
if (sensor->id == ST_LSM6DSX_ID_EXT0 ||
sensor->id == ST_LSM6DSX_ID_EXT1 ||
sensor->id == ST_LSM6DSX_ID_EXT2)
@@ -1585,6 +2317,10 @@ static int __maybe_unused st_lsm6dsx_resume(struct device *dev)
continue;
sensor = iio_priv(hw->iio_devs[i]);
+ if (device_may_wakeup(dev) &&
+ sensor->id == ST_LSM6DSX_ID_ACC && hw->enable_event)
+ disable_irq_wake(hw->irq);
+
if (!(hw->suspend_mask & BIT(sensor->id)))
continue;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index f52511059545..cd47ec1fedcb 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -87,6 +87,14 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,lsm9ds1-imu",
.data = (void *)ST_LSM9DS1_ID,
},
+ {
+ .compatible = "st,lsm6ds0",
+ .data = (void *)ST_LSM6DS0_ID,
+ },
+ {
+ .compatible = "st,lsm6dsrx",
+ .data = (void *)ST_LSM6DSRX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -104,6 +112,8 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DS3TRC_DEV_NAME, ST_LSM6DS3TRC_ID },
{ ST_ISM330DHCX_DEV_NAME, ST_ISM330DHCX_ID },
{ ST_LSM9DS1_DEV_NAME, ST_LSM9DS1_ID },
+ { ST_LSM6DS0_DEV_NAME, ST_LSM6DS0_ID },
+ { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index ea472cf6db7b..fa5d1001a46c 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -51,10 +51,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
.addr = 0x60,
.mask = GENMASK(3, 2),
},
- .odr_avl[0] = { 10, 0x0 },
- .odr_avl[1] = { 20, 0x1 },
- .odr_avl[2] = { 50, 0x2 },
- .odr_avl[3] = { 100, 0x3 },
+ .odr_avl[0] = { 10000, 0x0 },
+ .odr_avl[1] = { 20000, 0x1 },
+ .odr_avl[2] = { 50000, 0x2 },
+ .odr_avl[3] = { 100000, 0x3 },
+ .odr_len = 4,
},
.fs_table = {
.fs_avl[0] = {
@@ -93,11 +94,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
{
struct st_lsm6dsx_sensor *sensor;
- u16 odr;
+ u32 odr;
sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
- odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 13;
- msleep((2000U / odr) + 1);
+ odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 12500;
+ msleep((2000000U / odr) + 1);
}
/**
@@ -317,17 +318,18 @@ st_lsm6dsx_shub_write_with_mask(struct st_lsm6dsx_sensor *sensor,
static int
st_lsm6dsx_shub_get_odr_val(struct st_lsm6dsx_sensor *sensor,
- u16 odr, u16 *val)
+ u32 odr, u16 *val)
{
const struct st_lsm6dsx_ext_dev_settings *settings;
int i;
settings = sensor->ext_info.settings;
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
- if (settings->odr_table.odr_avl[i].hz == odr)
+ for (i = 0; i < settings->odr_table.odr_len; i++) {
+ if (settings->odr_table.odr_avl[i].milli_hz == odr)
break;
+ }
- if (i == ST_LSM6DSX_ODR_LIST_SIZE)
+ if (i == settings->odr_table.odr_len)
return -EINVAL;
*val = settings->odr_table.odr_avl[i].val;
@@ -335,7 +337,7 @@ st_lsm6dsx_shub_get_odr_val(struct st_lsm6dsx_sensor *sensor,
}
static int
-st_lsm6dsx_shub_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
+st_lsm6dsx_shub_set_odr(struct st_lsm6dsx_sensor *sensor, u32 odr)
{
const struct st_lsm6dsx_ext_dev_settings *settings;
u16 val;
@@ -440,7 +442,7 @@ st_lsm6dsx_shub_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
- delay = 1000000 / sensor->odr;
+ delay = 1000000000 / sensor->odr;
usleep_range(delay, 2 * delay);
len = min_t(int, sizeof(data), ch->scan_type.realbits >> 3);
@@ -480,8 +482,9 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev,
iio_device_release_direct_mode(iio_dev);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = sensor->odr;
- ret = IIO_VAL_INT;
+ *val = sensor->odr / 1000;
+ *val2 = (sensor->odr % 1000) * 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -512,6 +515,7 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SAMP_FREQ: {
u16 data;
+ val = val * 1000 + val2 / 1000;
err = st_lsm6dsx_shub_get_odr_val(sensor, val, &data);
if (!err)
sensor->odr = val;
@@ -537,12 +541,11 @@ st_lsm6dsx_shub_sampling_freq_avail(struct device *dev,
int i, len = 0;
settings = sensor->ext_info.settings;
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++) {
- u16 val = settings->odr_table.odr_avl[i].hz;
+ for (i = 0; i < settings->odr_table.odr_len; i++) {
+ u32 val = settings->odr_table.odr_avl[i].milli_hz;
- if (val > 0)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- val);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%03d ",
+ val / 1000, val % 1000);
}
buf[len - 1] = '\n';
@@ -607,7 +610,7 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw,
sensor = iio_priv(iio_dev);
sensor->id = id;
sensor->hw = hw;
- sensor->odr = info->odr_table.odr_avl[0].hz;
+ sensor->odr = info->odr_table.odr_avl[0].milli_hz;
sensor->gain = info->fs_table.fs_avl[0].gain;
sensor->ext_info.settings = info;
sensor->ext_info.addr = i2c_addr;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 344b28dddebb..67ff36eac247 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -87,6 +87,14 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,lsm9ds1-imu",
.data = (void *)ST_LSM9DS1_ID,
},
+ {
+ .compatible = "st,lsm6ds0",
+ .data = (void *)ST_LSM6DS0_ID,
+ },
+ {
+ .compatible = "st,lsm6dsrx",
+ .data = (void *)ST_LSM6DSRX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -104,6 +112,8 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DS3TRC_DEV_NAME, ST_LSM6DS3TRC_ID },
{ ST_ISM330DHCX_DEV_NAME, ST_ISM330DHCX_ID },
{ ST_LSM9DS1_DEV_NAME, ST_LSM9DS1_ID },
+ { ST_LSM6DS0_DEV_NAME, ST_LSM6DS0_ID },
+ { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 524a686077ca..a46cdf2d8833 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1238,6 +1238,16 @@ static ssize_t iio_show_dev_name(struct device *dev,
static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
+static ssize_t iio_show_dev_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", indio_dev->label);
+}
+
+static DEVICE_ATTR(label, S_IRUGO, iio_show_dev_label, NULL);
+
static ssize_t iio_show_timestamp_clock(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1354,6 +1364,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
if (indio_dev->name)
attrcount++;
+ if (indio_dev->label)
+ attrcount++;
if (clk)
attrcount++;
@@ -1376,6 +1388,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
if (indio_dev->name)
indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
+ if (indio_dev->label)
+ indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr;
if (clk)
indio_dev->chan_attr_group.attrs[attrn++] = clk;
@@ -1610,7 +1624,7 @@ static const struct file_operations iio_buffer_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = iio_ioctl,
- .compat_ioctl = iio_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
@@ -1647,6 +1661,9 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
+ indio_dev->label = of_get_property(indio_dev->dev.of_node, "label",
+ NULL);
+
ret = iio_check_unique_scan_index(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 4a1a883dc061..9968f982fbc7 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -32,6 +32,17 @@ config ADJD_S311
This driver can also be built as a module. If so, the module
will be called adjd_s311.
+config ADUX1020
+ tristate "ADUX1020 photometric sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Analog Devices
+ ADUX1020 photometric sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adux1020.
+
config AL3320A
tristate "AL3320A ambient light sensor"
depends on I2C
@@ -496,6 +507,17 @@ config VCNL4035
To compile this driver as a module, choose M here: the
module will be called vcnl4035.
+config VEML6030
+ tristate "VEML6030 ambient light sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VEML6030
+ ambient light sensor (ALS).
+
+ To compile this driver as a module, choose M here: the
+ module will be called veml6030.
+
config VEML6070
tristate "VEML6070 UV A light sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 00d1f9b98f39..c98d1cefb861 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ACPI_ALS) += acpi-als.o
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
+obj-$(CONFIG_ADUX1020) += adux1020.o
obj-$(CONFIG_AL3320A) += al3320a.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9960) += apds9960.o
@@ -48,6 +49,7 @@ obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
obj-$(CONFIG_VCNL4035) += vcnl4035.o
+obj-$(CONFIG_VEML6030) += veml6030.o
obj-$(CONFIG_VEML6070) += veml6070.o
obj-$(CONFIG_VL6180) += vl6180.o
obj-$(CONFIG_ZOPT2201) += zopt2201.o
diff --git a/drivers/iio/light/adux1020.c b/drivers/iio/light/adux1020.c
new file mode 100644
index 000000000000..b07797ac10d7
--- /dev/null
+++ b/drivers/iio/light/adux1020.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * adux1020.c - Support for Analog Devices ADUX1020 photometric sensor
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * TODO: Triggered buffer support
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+#define ADUX1020_REGMAP_NAME "adux1020_regmap"
+#define ADUX1020_DRV_NAME "adux1020"
+
+/* System registers */
+#define ADUX1020_REG_CHIP_ID 0x08
+#define ADUX1020_REG_SLAVE_ADDRESS 0x09
+
+#define ADUX1020_REG_SW_RESET 0x0f
+#define ADUX1020_REG_INT_ENABLE 0x1c
+#define ADUX1020_REG_INT_POLARITY 0x1d
+#define ADUX1020_REG_PROX_TH_ON1 0x2a
+#define ADUX1020_REG_PROX_TH_OFF1 0x2b
+#define ADUX1020_REG_PROX_TYPE 0x2f
+#define ADUX1020_REG_TEST_MODES_3 0x32
+#define ADUX1020_REG_FORCE_MODE 0x33
+#define ADUX1020_REG_FREQUENCY 0x40
+#define ADUX1020_REG_LED_CURRENT 0x41
+#define ADUX1020_REG_OP_MODE 0x45
+#define ADUX1020_REG_INT_MASK 0x48
+#define ADUX1020_REG_INT_STATUS 0x49
+#define ADUX1020_REG_DATA_BUFFER 0x60
+
+/* Chip ID bits */
+#define ADUX1020_CHIP_ID_MASK GENMASK(11, 0)
+#define ADUX1020_CHIP_ID 0x03fc
+
+#define ADUX1020_SW_RESET BIT(1)
+#define ADUX1020_FIFO_FLUSH BIT(15)
+#define ADUX1020_OP_MODE_MASK GENMASK(3, 0)
+#define ADUX1020_DATA_OUT_MODE_MASK GENMASK(7, 4)
+#define ADUX1020_DATA_OUT_PROX_I FIELD_PREP(ADUX1020_DATA_OUT_MODE_MASK, 1)
+
+#define ADUX1020_MODE_INT_MASK GENMASK(7, 0)
+#define ADUX1020_INT_ENABLE 0x2094
+#define ADUX1020_INT_DISABLE 0x2090
+#define ADUX1020_PROX_INT_ENABLE 0x00f0
+#define ADUX1020_PROX_ON1_INT BIT(0)
+#define ADUX1020_PROX_OFF1_INT BIT(1)
+#define ADUX1020_FIFO_INT_ENABLE 0x7f
+#define ADUX1020_MODE_INT_DISABLE 0xff
+#define ADUX1020_MODE_INT_STATUS_MASK GENMASK(7, 0)
+#define ADUX1020_FIFO_STATUS_MASK GENMASK(15, 8)
+#define ADUX1020_INT_CLEAR 0xff
+#define ADUX1020_PROX_TYPE BIT(15)
+
+#define ADUX1020_INT_PROX_ON1 BIT(0)
+#define ADUX1020_INT_PROX_OFF1 BIT(1)
+
+#define ADUX1020_FORCE_CLOCK_ON 0x0f4f
+#define ADUX1020_FORCE_CLOCK_RESET 0x0040
+#define ADUX1020_ACTIVE_4_STATE 0x0008
+
+#define ADUX1020_PROX_FREQ_MASK GENMASK(7, 4)
+#define ADUX1020_PROX_FREQ(x) FIELD_PREP(ADUX1020_PROX_FREQ_MASK, x)
+
+#define ADUX1020_LED_CURRENT_MASK GENMASK(3, 0)
+#define ADUX1020_LED_PIREF_EN BIT(12)
+
+/* Operating modes */
+enum adux1020_op_modes {
+ ADUX1020_MODE_STANDBY,
+ ADUX1020_MODE_PROX_I,
+ ADUX1020_MODE_PROX_XY,
+ ADUX1020_MODE_GEST,
+ ADUX1020_MODE_SAMPLE,
+ ADUX1020_MODE_FORCE = 0x0e,
+ ADUX1020_MODE_IDLE = 0x0f,
+};
+
+struct adux1020_data {
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ struct mutex lock;
+ struct regmap *regmap;
+};
+
+struct adux1020_mode_data {
+ u8 bytes;
+ u8 buf_len;
+ u16 int_en;
+};
+
+static const struct adux1020_mode_data adux1020_modes[] = {
+ [ADUX1020_MODE_PROX_I] = {
+ .bytes = 2,
+ .buf_len = 1,
+ .int_en = ADUX1020_PROX_INT_ENABLE,
+ },
+};
+
+static const struct regmap_config adux1020_regmap_config = {
+ .name = ADUX1020_REGMAP_NAME,
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0x6F,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct reg_sequence adux1020_def_conf[] = {
+ { 0x000c, 0x000f },
+ { 0x0010, 0x1010 },
+ { 0x0011, 0x004c },
+ { 0x0012, 0x5f0c },
+ { 0x0013, 0xada5 },
+ { 0x0014, 0x0080 },
+ { 0x0015, 0x0000 },
+ { 0x0016, 0x0600 },
+ { 0x0017, 0x0000 },
+ { 0x0018, 0x2693 },
+ { 0x0019, 0x0004 },
+ { 0x001a, 0x4280 },
+ { 0x001b, 0x0060 },
+ { 0x001c, 0x2094 },
+ { 0x001d, 0x0020 },
+ { 0x001e, 0x0001 },
+ { 0x001f, 0x0100 },
+ { 0x0020, 0x0320 },
+ { 0x0021, 0x0A13 },
+ { 0x0022, 0x0320 },
+ { 0x0023, 0x0113 },
+ { 0x0024, 0x0000 },
+ { 0x0025, 0x2412 },
+ { 0x0026, 0x2412 },
+ { 0x0027, 0x0022 },
+ { 0x0028, 0x0000 },
+ { 0x0029, 0x0300 },
+ { 0x002a, 0x0700 },
+ { 0x002b, 0x0600 },
+ { 0x002c, 0x6000 },
+ { 0x002d, 0x4000 },
+ { 0x002e, 0x0000 },
+ { 0x002f, 0x0000 },
+ { 0x0030, 0x0000 },
+ { 0x0031, 0x0000 },
+ { 0x0032, 0x0040 },
+ { 0x0033, 0x0008 },
+ { 0x0034, 0xE400 },
+ { 0x0038, 0x8080 },
+ { 0x0039, 0x8080 },
+ { 0x003a, 0x2000 },
+ { 0x003b, 0x1f00 },
+ { 0x003c, 0x2000 },
+ { 0x003d, 0x2000 },
+ { 0x003e, 0x0000 },
+ { 0x0040, 0x8069 },
+ { 0x0041, 0x1f2f },
+ { 0x0042, 0x4000 },
+ { 0x0043, 0x0000 },
+ { 0x0044, 0x0008 },
+ { 0x0046, 0x0000 },
+ { 0x0048, 0x00ef },
+ { 0x0049, 0x0000 },
+ { 0x0045, 0x0000 },
+};
+
+static const int adux1020_rates[][2] = {
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 500000 },
+ { 1, 0 },
+ { 2, 0 },
+ { 5, 0 },
+ { 10, 0 },
+ { 20, 0 },
+ { 50, 0 },
+ { 100, 0 },
+ { 190, 0 },
+ { 450, 0 },
+ { 820, 0 },
+ { 1400, 0 },
+};
+
+static const int adux1020_led_currents[][2] = {
+ { 0, 25000 },
+ { 0, 40000 },
+ { 0, 55000 },
+ { 0, 70000 },
+ { 0, 85000 },
+ { 0, 100000 },
+ { 0, 115000 },
+ { 0, 130000 },
+ { 0, 145000 },
+ { 0, 160000 },
+ { 0, 175000 },
+ { 0, 190000 },
+ { 0, 205000 },
+ { 0, 220000 },
+ { 0, 235000 },
+ { 0, 250000 },
+};
+
+static int adux1020_flush_fifo(struct adux1020_data *data)
+{
+ int ret;
+
+ /* Force Idle mode */
+ ret = regmap_write(data->regmap, ADUX1020_REG_FORCE_MODE,
+ ADUX1020_ACTIVE_4_STATE);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_OP_MODE_MASK, ADUX1020_MODE_FORCE);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_OP_MODE_MASK, ADUX1020_MODE_IDLE);
+ if (ret < 0)
+ return ret;
+
+ /* Flush FIFO */
+ ret = regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_ON);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_STATUS,
+ ADUX1020_FIFO_FLUSH);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_RESET);
+}
+
+static int adux1020_read_fifo(struct adux1020_data *data, u16 *buf, u8 buf_len)
+{
+ unsigned int regval;
+ int i, ret;
+
+ /* Enable 32MHz clock */
+ ret = regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_ON);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < buf_len; i++) {
+ ret = regmap_read(data->regmap, ADUX1020_REG_DATA_BUFFER,
+ &regval);
+ if (ret < 0)
+ return ret;
+
+ buf[i] = regval;
+ }
+
+ /* Set 32MHz clock to be controlled by internal state machine */
+ return regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_RESET);
+}
+
+static int adux1020_set_mode(struct adux1020_data *data,
+ enum adux1020_op_modes mode)
+{
+ int ret;
+
+ /* Switch to standby mode before changing the mode */
+ ret = regmap_write(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_MODE_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ /* Set data out and switch to the desired mode */
+ switch (mode) {
+ case ADUX1020_MODE_PROX_I:
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_DATA_OUT_MODE_MASK,
+ ADUX1020_DATA_OUT_PROX_I);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_OP_MODE_MASK,
+ ADUX1020_MODE_PROX_I);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adux1020_measure(struct adux1020_data *data,
+ enum adux1020_op_modes mode,
+ u16 *val)
+{
+ unsigned int status;
+ int ret, tries = 50;
+
+ /* Disable INT pin as polling is going to be used */
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_ENABLE,
+ ADUX1020_INT_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /* Enable mode interrupt */
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ ADUX1020_MODE_INT_MASK,
+ adux1020_modes[mode].int_en);
+ if (ret < 0)
+ return ret;
+
+ while (tries--) {
+ ret = regmap_read(data->regmap, ADUX1020_REG_INT_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ status &= ADUX1020_FIFO_STATUS_MASK;
+ if (status >= adux1020_modes[mode].bytes)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0)
+ return -EIO;
+
+ ret = adux1020_read_fifo(data, val, adux1020_modes[mode].buf_len);
+ if (ret < 0)
+ return ret;
+
+ /* Clear mode interrupt */
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_STATUS,
+ (~adux1020_modes[mode].int_en));
+ if (ret < 0)
+ return ret;
+
+ /* Disable mode interrupts */
+ return regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ ADUX1020_MODE_INT_MASK,
+ ADUX1020_MODE_INT_DISABLE);
+}
+
+static int adux1020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ u16 buf[3];
+ int ret = -EINVAL;
+ unsigned int regval;
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = adux1020_set_mode(data, ADUX1020_MODE_PROX_I);
+ if (ret < 0)
+ goto fail;
+
+ ret = adux1020_measure(data, ADUX1020_MODE_PROX_I, buf);
+ if (ret < 0)
+ goto fail;
+
+ *val = buf[0];
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ ret = regmap_read(data->regmap,
+ ADUX1020_REG_LED_CURRENT, &regval);
+ if (ret < 0)
+ goto fail;
+
+ regval = regval & ADUX1020_LED_CURRENT_MASK;
+
+ *val = adux1020_led_currents[regval][0];
+ *val2 = adux1020_led_currents[regval][1];
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = regmap_read(data->regmap, ADUX1020_REG_FREQUENCY,
+ &regval);
+ if (ret < 0)
+ goto fail;
+
+ regval = FIELD_GET(ADUX1020_PROX_FREQ_MASK, regval);
+
+ *val = adux1020_rates[regval][0];
+ *val2 = adux1020_rates[regval][1];
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+};
+
+static inline int adux1020_find_index(const int array[][2], int count, int val,
+ int val2)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (val == array[i][0] && val2 == array[i][1])
+ return i;
+
+ return -EINVAL;
+}
+
+static int adux1020_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int i, ret = -EINVAL;
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (chan->type == IIO_PROXIMITY) {
+ i = adux1020_find_index(adux1020_rates,
+ ARRAY_SIZE(adux1020_rates),
+ val, val2);
+ if (i < 0) {
+ ret = i;
+ goto fail;
+ }
+
+ ret = regmap_update_bits(data->regmap,
+ ADUX1020_REG_FREQUENCY,
+ ADUX1020_PROX_FREQ_MASK,
+ ADUX1020_PROX_FREQ(i));
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type == IIO_CURRENT) {
+ i = adux1020_find_index(adux1020_led_currents,
+ ARRAY_SIZE(adux1020_led_currents),
+ val, val2);
+ if (i < 0) {
+ ret = i;
+ goto fail;
+ }
+
+ ret = regmap_update_bits(data->regmap,
+ ADUX1020_REG_LED_CURRENT,
+ ADUX1020_LED_CURRENT_MASK, i);
+ }
+ break;
+ default:
+ break;
+ }
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int adux1020_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int ret, mask;
+
+ mutex_lock(&data->lock);
+
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_ENABLE,
+ ADUX1020_INT_ENABLE);
+ if (ret < 0)
+ goto fail;
+
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_POLARITY, 0);
+ if (ret < 0)
+ goto fail;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = ADUX1020_PROX_ON1_INT;
+ else
+ mask = ADUX1020_PROX_OFF1_INT;
+
+ if (state)
+ state = 0;
+ else
+ state = mask;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ mask, state);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * Trigger proximity interrupt when the intensity is above
+ * or below threshold
+ */
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_PROX_TYPE,
+ ADUX1020_PROX_TYPE,
+ ADUX1020_PROX_TYPE);
+ if (ret < 0)
+ goto fail;
+
+ /* Set proximity mode */
+ ret = adux1020_set_mode(data, ADUX1020_MODE_PROX_I);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int adux1020_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int ret, mask;
+ unsigned int regval;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = ADUX1020_PROX_ON1_INT;
+ else
+ mask = ADUX1020_PROX_OFF1_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, ADUX1020_REG_INT_MASK, &regval);
+ if (ret < 0)
+ return ret;
+
+ return !(regval & mask);
+}
+
+static int adux1020_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ u8 reg;
+ int ret;
+ unsigned int regval;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ reg = ADUX1020_REG_PROX_TH_ON1;
+ else
+ reg = ADUX1020_REG_PROX_TH_OFF1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = regval;
+
+ return IIO_VAL_INT;
+}
+
+static int adux1020_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ u8 reg;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ reg = ADUX1020_REG_PROX_TH_ON1;
+ else
+ reg = ADUX1020_REG_PROX_TH_OFF1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Full scale threshold value is 0-65535 */
+ if (val < 0 || val > 65535)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, reg, val);
+}
+
+static const struct iio_event_spec adux1020_proximity_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec adux1020_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .event_spec = adux1020_proximity_event,
+ .num_event_specs = ARRAY_SIZE(adux1020_proximity_event),
+ },
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .extend_name = "led",
+ .output = 1,
+ },
+};
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "0.1 0.2 0.5 1 2 5 10 20 50 100 190 450 820 1400");
+
+static struct attribute *adux1020_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adux1020_attribute_group = {
+ .attrs = adux1020_attributes,
+};
+
+static const struct iio_info adux1020_info = {
+ .attrs = &adux1020_attribute_group,
+ .read_raw = adux1020_read_raw,
+ .write_raw = adux1020_write_raw,
+ .read_event_config = adux1020_read_event_config,
+ .write_event_config = adux1020_write_event_config,
+ .read_event_value = adux1020_read_thresh,
+ .write_event_value = adux1020_write_thresh,
+};
+
+static irqreturn_t adux1020_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int ret, status;
+
+ ret = regmap_read(data->regmap, ADUX1020_REG_INT_STATUS, &status);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ status &= ADUX1020_MODE_INT_STATUS_MASK;
+
+ if (status & ADUX1020_INT_PROX_ON1) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if (status & ADUX1020_INT_PROX_OFF1) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ regmap_update_bits(data->regmap, ADUX1020_REG_INT_STATUS,
+ ADUX1020_MODE_INT_MASK, ADUX1020_INT_CLEAR);
+
+ return IRQ_HANDLED;
+}
+
+static int adux1020_chip_init(struct adux1020_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, ADUX1020_REG_CHIP_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ if ((val & ADUX1020_CHIP_ID_MASK) != ADUX1020_CHIP_ID) {
+ dev_err(&client->dev, "invalid chip id 0x%04x\n", val);
+ return -ENODEV;
+ }
+
+ dev_dbg(&client->dev, "Detected ADUX1020 with chip id: 0x%04x\n", val);
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_SW_RESET,
+ ADUX1020_SW_RESET, ADUX1020_SW_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* Load default configuration */
+ ret = regmap_multi_reg_write(data->regmap, adux1020_def_conf,
+ ARRAY_SIZE(adux1020_def_conf));
+ if (ret < 0)
+ return ret;
+
+ ret = adux1020_flush_fifo(data);
+ if (ret < 0)
+ return ret;
+
+ /* Use LED_IREF for proximity mode */
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_LED_CURRENT,
+ ADUX1020_LED_PIREF_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Mask all interrupts */
+ return regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ ADUX1020_MODE_INT_MASK, ADUX1020_MODE_INT_DISABLE);
+}
+
+static int adux1020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adux1020_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &adux1020_info;
+ indio_dev->name = ADUX1020_DRV_NAME;
+ indio_dev->channels = adux1020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adux1020_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data = iio_priv(indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &adux1020_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "regmap initialization failed.\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ data->client = client;
+ data->indio_dev = indio_dev;
+ mutex_init(&data->lock);
+
+ ret = adux1020_chip_init(data);
+ if (ret)
+ return ret;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, adux1020_interrupt_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ ADUX1020_DRV_NAME, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "irq request error %d\n", -ret);
+ return ret;
+ }
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id adux1020_id[] = {
+ { "adux1020", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adux1020_id);
+
+static const struct of_device_id adux1020_of_match[] = {
+ { .compatible = "adi,adux1020" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adux1020_of_match);
+
+static struct i2c_driver adux1020_driver = {
+ .driver = {
+ .name = ADUX1020_DRV_NAME,
+ .of_match_table = adux1020_of_match,
+ },
+ .probe = adux1020_probe,
+ .id_table = adux1020_id,
+};
+module_i2c_driver(adux1020_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("ADUX1020 photometric sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 28347df78cff..adb5ab9e3439 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -59,9 +59,9 @@ struct bh1750_chip_info {
u16 int_time_low_mask;
u16 int_time_high_mask;
-}
+};
-static const bh1750_chip_info_tbl[] = {
+static const struct bh1750_chip_info bh1750_chip_info_tbl[] = {
[BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 },
[BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 },
[BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 },
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 1019d625adb1..90e38fcc974b 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -532,7 +532,7 @@ static int cm36651_write_prox_event_config(struct iio_dev *indio_dev,
int state)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
- int cmd, ret = -EINVAL;
+ int cmd, ret;
mutex_lock(&cm36651->lock);
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index c5263b563fc1..d85a391e50c5 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -169,17 +169,11 @@ static const struct iio_info cros_ec_light_prox_info = {
static int cros_ec_light_prox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
struct iio_dev *indio_dev;
struct cros_ec_light_prox_state *state;
struct iio_chan_spec *channel;
int ret;
- if (!ec_dev || !ec_dev->ec_dev) {
- dev_warn(dev, "No CROS EC device found.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 7c0291c5fe76..b542e5619ead 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -240,32 +240,42 @@ static const struct iio_info tcs3414_info = {
.attrs = &tcs3414_attribute_group,
};
-static int tcs3414_buffer_preenable(struct iio_dev *indio_dev)
+static int tcs3414_buffer_postenable(struct iio_dev *indio_dev)
{
struct tcs3414_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
data->control |= TCS3414_CONTROL_ADC_EN;
- return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
data->control);
+ if (ret)
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
}
static int tcs3414_buffer_predisable(struct iio_dev *indio_dev)
{
struct tcs3414_data *data = iio_priv(indio_dev);
- int ret;
-
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret < 0)
- return ret;
+ int ret, ret2;
data->control &= ~TCS3414_CONTROL_ADC_EN;
- return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
data->control);
+
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (!ret)
+ ret = ret2;
+
+ return ret;
}
static const struct iio_buffer_setup_ops tcs3414_buffer_setup_ops = {
- .preenable = tcs3414_buffer_preenable,
- .postenable = &iio_triggered_buffer_postenable,
+ .postenable = tcs3414_buffer_postenable,
.predisable = tcs3414_buffer_predisable,
};
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
new file mode 100644
index 000000000000..aa25b87fca8f
--- /dev/null
+++ b/drivers/iio/light/veml6030.c
@@ -0,0 +1,908 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VEML6030 Ambient Light Sensor
+ *
+ * Copyright (c) 2019, Rishi Gupta <gupt21@gmail.com>
+ *
+ * Datasheet: https://www.vishay.com/docs/84366/veml6030.pdf
+ * Appnote-84367: https://www.vishay.com/docs/84367/designingveml6030.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+/* Device registers */
+#define VEML6030_REG_ALS_CONF 0x00
+#define VEML6030_REG_ALS_WH 0x01
+#define VEML6030_REG_ALS_WL 0x02
+#define VEML6030_REG_ALS_PSM 0x03
+#define VEML6030_REG_ALS_DATA 0x04
+#define VEML6030_REG_WH_DATA 0x05
+#define VEML6030_REG_ALS_INT 0x06
+
+/* Bit masks for specific functionality */
+#define VEML6030_ALS_IT GENMASK(9, 6)
+#define VEML6030_PSM GENMASK(2, 1)
+#define VEML6030_ALS_PERS GENMASK(5, 4)
+#define VEML6030_ALS_GAIN GENMASK(12, 11)
+#define VEML6030_PSM_EN BIT(0)
+#define VEML6030_INT_TH_LOW BIT(15)
+#define VEML6030_INT_TH_HIGH BIT(14)
+#define VEML6030_ALS_INT_EN BIT(1)
+#define VEML6030_ALS_SD BIT(0)
+
+/*
+ * The resolution depends on both gain and integration time. The
+ * cur_resolution stores one of the resolution mentioned in the
+ * table during startup and gets updated whenever integration time
+ * or gain is changed.
+ *
+ * Table 'resolution and maximum detection range' in appnote 84367
+ * is visualized as a 2D array. The cur_gain stores index of gain
+ * in this table (0-3) while the cur_integration_time holds index
+ * of integration time (0-5).
+ */
+struct veml6030_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ int cur_resolution;
+ int cur_gain;
+ int cur_integration_time;
+};
+
+/* Integration time available in seconds */
+static IIO_CONST_ATTR(in_illuminance_integration_time_available,
+ "0.025 0.05 0.1 0.2 0.4 0.8");
+
+/*
+ * Scale is 1/gain. Value 0.125 is ALS gain x (1/8), 0.25 is
+ * ALS gain x (1/4), 1.0 = ALS gain x 1 and 2.0 is ALS gain x 2.
+ */
+static IIO_CONST_ATTR(in_illuminance_scale_available,
+ "0.125 0.25 1.0 2.0");
+
+static struct attribute *veml6030_attributes[] = {
+ &iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ &iio_const_attr_in_illuminance_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group veml6030_attr_group = {
+ .attrs = veml6030_attributes,
+};
+
+/*
+ * Persistence = 1/2/4/8 x integration time
+ * Minimum time for which light readings must stay above configured
+ * threshold to assert the interrupt.
+ */
+static const char * const period_values[] = {
+ "0.1 0.2 0.4 0.8",
+ "0.2 0.4 0.8 1.6",
+ "0.4 0.8 1.6 3.2",
+ "0.8 1.6 3.2 6.4",
+ "0.05 0.1 0.2 0.4",
+ "0.025 0.050 0.1 0.2"
+};
+
+/*
+ * Return list of valid period values in seconds corresponding to
+ * the currently active integration time.
+ */
+static ssize_t in_illuminance_period_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret, reg, x;
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ ret = ((reg >> 6) & 0xF);
+ switch (ret) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ x = ret;
+ break;
+ case 8:
+ x = 4;
+ break;
+ case 12:
+ x = 5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", period_values[x]);
+}
+
+static IIO_DEVICE_ATTR_RO(in_illuminance_period_available, 0);
+
+static struct attribute *veml6030_event_attributes[] = {
+ &iio_dev_attr_in_illuminance_period_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group veml6030_event_attr_group = {
+ .attrs = veml6030_event_attributes,
+};
+
+static int veml6030_als_pwr_on(struct veml6030_data *data)
+{
+ return regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_SD, 0);
+}
+
+static int veml6030_als_shut_down(struct veml6030_data *data)
+{
+ return regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_SD, 1);
+}
+
+static void veml6030_als_shut_down_action(void *data)
+{
+ veml6030_als_shut_down(data);
+}
+
+static const struct iio_event_spec veml6030_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+/* Channel number */
+enum veml6030_chan {
+ CH_ALS,
+ CH_WHITE,
+};
+
+static const struct iio_chan_spec veml6030_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .channel = CH_ALS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = veml6030_event_spec,
+ .num_event_specs = ARRAY_SIZE(veml6030_event_spec),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .channel = CH_WHITE,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static const struct regmap_config veml6030_regmap_config = {
+ .name = "veml6030_regmap",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = VEML6030_REG_ALS_INT,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int veml6030_get_intgrn_tm(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ switch ((reg >> 6) & 0xF) {
+ case 0:
+ *val2 = 100000;
+ break;
+ case 1:
+ *val2 = 200000;
+ break;
+ case 2:
+ *val2 = 400000;
+ break;
+ case 3:
+ *val2 = 800000;
+ break;
+ case 8:
+ *val2 = 50000;
+ break;
+ case 12:
+ *val2 = 25000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6030_set_intgrn_tm(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ int ret, new_int_time, int_idx;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (val)
+ return -EINVAL;
+
+ switch (val2) {
+ case 25000:
+ new_int_time = 0x300;
+ int_idx = 5;
+ break;
+ case 50000:
+ new_int_time = 0x200;
+ int_idx = 4;
+ break;
+ case 100000:
+ new_int_time = 0x00;
+ int_idx = 3;
+ break;
+ case 200000:
+ new_int_time = 0x40;
+ int_idx = 2;
+ break;
+ case 400000:
+ new_int_time = 0x80;
+ int_idx = 1;
+ break;
+ case 800000:
+ new_int_time = 0xC0;
+ int_idx = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_IT, new_int_time);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't update als integration time %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Cache current integration time and update resolution. For every
+ * increase in integration time to next level, resolution is halved
+ * and vice-versa.
+ */
+ if (data->cur_integration_time < int_idx)
+ data->cur_resolution <<= int_idx - data->cur_integration_time;
+ else if (data->cur_integration_time > int_idx)
+ data->cur_resolution >>= data->cur_integration_time - int_idx;
+
+ data->cur_integration_time = int_idx;
+
+ return ret;
+}
+
+static int veml6030_read_persistence(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ int ret, reg, period, x, y;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ }
+
+ /* integration time multiplied by 1/2/4/8 */
+ period = y * (1 << ((reg >> 4) & 0x03));
+
+ *val = period / 1000000;
+ *val2 = period % 1000000;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6030_write_persistence(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ int ret, period, x, y;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ if (ret < 0)
+ return ret;
+
+ if (!val) {
+ period = val2 / y;
+ } else {
+ if ((val == 1) && (val2 == 600000))
+ period = 1600000 / y;
+ else if ((val == 3) && (val2 == 200000))
+ period = 3200000 / y;
+ else if ((val == 6) && (val2 == 400000))
+ period = 6400000 / y;
+ else
+ period = -1;
+ }
+
+ if (period <= 0 || period > 8 || hweight8(period) != 1)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_PERS, (ffs(period) - 1) << 4);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't set persistence value %d\n", ret);
+
+ return ret;
+}
+
+static int veml6030_set_als_gain(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ int ret, new_gain, gain_idx;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (val == 0 && val2 == 125000) {
+ new_gain = 0x1000; /* 0x02 << 11 */
+ gain_idx = 3;
+ } else if (val == 0 && val2 == 250000) {
+ new_gain = 0x1800;
+ gain_idx = 2;
+ } else if (val == 1 && val2 == 0) {
+ new_gain = 0x00;
+ gain_idx = 1;
+ } else if (val == 2 && val2 == 0) {
+ new_gain = 0x800;
+ gain_idx = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_GAIN, new_gain);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't set als gain %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Cache currently set gain & update resolution. For every
+ * increase in the gain to next level, resolution is halved
+ * and vice-versa.
+ */
+ if (data->cur_gain < gain_idx)
+ data->cur_resolution <<= gain_idx - data->cur_gain;
+ else if (data->cur_gain > gain_idx)
+ data->cur_resolution >>= data->cur_gain - gain_idx;
+
+ data->cur_gain = gain_idx;
+
+ return ret;
+}
+
+static int veml6030_get_als_gain(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ switch ((reg >> 11) & 0x03) {
+ case 0:
+ *val = 1;
+ *val2 = 0;
+ break;
+ case 1:
+ *val = 2;
+ *val2 = 0;
+ break;
+ case 2:
+ *val = 0;
+ *val2 = 125000;
+ break;
+ case 3:
+ *val = 0;
+ *val2 = 250000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6030_read_thresh(struct iio_dev *indio_dev,
+ int *val, int *val2, int dir)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (dir == IIO_EV_DIR_RISING)
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_WH, &reg);
+ else
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_WL, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als threshold value %d\n", ret);
+ return ret;
+ }
+
+ *val = reg & 0xffff;
+ return IIO_VAL_INT;
+}
+
+static int veml6030_write_thresh(struct iio_dev *indio_dev,
+ int val, int val2, int dir)
+{
+ int ret;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (val > 0xFFFF || val < 0 || val2)
+ return -EINVAL;
+
+ if (dir == IIO_EV_DIR_RISING) {
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WH, val);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't set high threshold %d\n", ret);
+ } else {
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WL, val);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't set low threshold %d\n", ret);
+ }
+
+ return ret;
+}
+
+/*
+ * Provide both raw as well as light reading in lux.
+ * light (in lux) = resolution * raw reading
+ */
+static int veml6030_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+ struct regmap *regmap = data->regmap;
+ struct device *dev = &data->client->dev;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = regmap_read(regmap, VEML6030_REG_ALS_DATA, &reg);
+ if (ret < 0) {
+ dev_err(dev, "can't read als data %d\n", ret);
+ return ret;
+ }
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ *val = (reg * data->cur_resolution) / 10000;
+ *val2 = (reg * data->cur_resolution) % 10000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ *val = reg;
+ return IIO_VAL_INT;
+ case IIO_INTENSITY:
+ ret = regmap_read(regmap, VEML6030_REG_WH_DATA, &reg);
+ if (ret < 0) {
+ dev_err(dev, "can't read white data %d\n", ret);
+ return ret;
+ }
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ *val = (reg * data->cur_resolution) / 10000;
+ *val2 = (reg * data->cur_resolution) % 10000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ *val = reg;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type == IIO_LIGHT)
+ return veml6030_get_intgrn_tm(indio_dev, val, val2);
+ return -EINVAL;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_LIGHT)
+ return veml6030_get_als_gain(indio_dev, val, val2);
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ return veml6030_set_intgrn_tm(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ return veml6030_set_als_gain(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_read_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ case IIO_EV_DIR_FALLING:
+ return veml6030_read_thresh(indio_dev, val, val2, dir);
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_EV_INFO_PERIOD:
+ return veml6030_read_persistence(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_write_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return veml6030_write_thresh(indio_dev, val, val2, dir);
+ case IIO_EV_INFO_PERIOD:
+ return veml6030_write_persistence(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_read_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ if (reg & VEML6030_ALS_INT_EN)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Sensor should not be measuring light when interrupt is configured.
+ * Therefore correct sequence to configure interrupt functionality is:
+ * shut down -> enable/disable interrupt -> power on
+ *
+ * state = 1 enables interrupt, state = 0 disables interrupt
+ */
+static int veml6030_write_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ int ret;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (state < 0 || state > 1)
+ return -EINVAL;
+
+ ret = veml6030_als_shut_down(data);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "can't disable als to configure interrupt %d\n", ret);
+ return ret;
+ }
+
+ /* enable interrupt + power on */
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_INT_EN | VEML6030_ALS_SD, state << 1);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't enable interrupt & poweron als %d\n", ret);
+
+ return ret;
+}
+
+static const struct iio_info veml6030_info = {
+ .read_raw = veml6030_read_raw,
+ .write_raw = veml6030_write_raw,
+ .read_event_value = veml6030_read_event_val,
+ .write_event_value = veml6030_write_event_val,
+ .read_event_config = veml6030_read_interrupt_config,
+ .write_event_config = veml6030_write_interrupt_config,
+ .attrs = &veml6030_attr_group,
+ .event_attrs = &veml6030_event_attr_group,
+};
+
+static const struct iio_info veml6030_info_no_irq = {
+ .read_raw = veml6030_read_raw,
+ .write_raw = veml6030_write_raw,
+ .attrs = &veml6030_attr_group,
+};
+
+static irqreturn_t veml6030_event_handler(int irq, void *private)
+{
+ int ret, reg, evtdir;
+ struct iio_dev *indio_dev = private;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_INT, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als interrupt register %d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ /* Spurious interrupt handling */
+ if (!(reg & (VEML6030_INT_TH_HIGH | VEML6030_INT_TH_LOW)))
+ return IRQ_NONE;
+
+ if (reg & VEML6030_INT_TH_HIGH)
+ evtdir = IIO_EV_DIR_RISING;
+ else
+ evtdir = IIO_EV_DIR_FALLING;
+
+ iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
+ 0, IIO_EV_TYPE_THRESH, evtdir),
+ iio_get_time_ns(indio_dev));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set ALS gain to 1/8, integration time to 100 ms, PSM to mode 2,
+ * persistence to 1 x integration time and the threshold
+ * interrupt disabled by default. First shutdown the sensor,
+ * update registers and then power on the sensor.
+ */
+static int veml6030_hw_init(struct iio_dev *indio_dev)
+{
+ int ret, val;
+ struct veml6030_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+
+ ret = veml6030_als_shut_down(data);
+ if (ret) {
+ dev_err(&client->dev, "can't shutdown als %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_CONF, 0x1001);
+ if (ret) {
+ dev_err(&client->dev, "can't setup als configs %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_PSM,
+ VEML6030_PSM | VEML6030_PSM_EN, 0x03);
+ if (ret) {
+ dev_err(&client->dev, "can't setup default PSM %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WH, 0xFFFF);
+ if (ret) {
+ dev_err(&client->dev, "can't setup high threshold %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WL, 0x0000);
+ if (ret) {
+ dev_err(&client->dev, "can't setup low threshold %d\n", ret);
+ return ret;
+ }
+
+ ret = veml6030_als_pwr_on(data);
+ if (ret) {
+ dev_err(&client->dev, "can't poweron als %d\n", ret);
+ return ret;
+ }
+
+ /* Wait 4 ms to let processor & oscillator start correctly */
+ usleep_range(4000, 4002);
+
+ /* Clear stale interrupt status bits if any during start */
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_INT, &val);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "can't clear als interrupt status %d\n", ret);
+ return ret;
+ }
+
+ /* Cache currently active measurement parameters */
+ data->cur_gain = 3;
+ data->cur_resolution = 4608;
+ data->cur_integration_time = 3;
+
+ return ret;
+}
+
+static int veml6030_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct veml6030_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c adapter doesn't support plain i2c\n");
+ return -EOPNOTSUPP;
+ }
+
+ regmap = devm_regmap_init_i2c(client, &veml6030_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "can't setup regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->regmap = regmap;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = "veml6030";
+ indio_dev->channels = veml6030_channels;
+ indio_dev->num_channels = ARRAY_SIZE(veml6030_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, veml6030_event_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "veml6030", indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "irq %d request failed\n", client->irq);
+ return ret;
+ }
+ indio_dev->info = &veml6030_info;
+ } else {
+ indio_dev->info = &veml6030_info_no_irq;
+ }
+
+ ret = veml6030_hw_init(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev,
+ veml6030_als_shut_down_action, data);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int __maybe_unused veml6030_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_als_shut_down(data);
+ if (ret < 0)
+ dev_err(&data->client->dev, "can't suspend als %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused veml6030_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_als_pwr_on(data);
+ if (ret < 0)
+ dev_err(&data->client->dev, "can't resume als %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops veml6030_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(veml6030_runtime_suspend,
+ veml6030_runtime_resume, NULL)
+};
+
+static const struct of_device_id veml6030_of_match[] = {
+ { .compatible = "vishay,veml6030" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, veml6030_of_match);
+
+static const struct i2c_device_id veml6030_id[] = {
+ { "veml6030", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, veml6030_id);
+
+static struct i2c_driver veml6030_driver = {
+ .driver = {
+ .name = "veml6030",
+ .of_match_table = veml6030_of_match,
+ .pm = &veml6030_pm_ops,
+ },
+ .probe = veml6030_probe,
+ .id_table = veml6030_id,
+};
+module_i2c_driver(veml6030_driver);
+
+MODULE_AUTHOR("Rishi Gupta <gupt21@gmail.com>");
+MODULE_DESCRIPTION("VEML6030 Ambient Light Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index a3a268ee2896..e68184a93a6d 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 8d0f15f27dc5..29c209cc1108 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -74,6 +74,12 @@ struct bmp280_calib {
s8 H6;
};
+static const char *const bmp280_supply_names[] = {
+ "vddd", "vdda"
+};
+
+#define BMP280_NUM_SUPPLIES ARRAY_SIZE(bmp280_supply_names)
+
struct bmp280_data {
struct device *dev;
struct mutex lock;
@@ -85,8 +91,7 @@ struct bmp280_data {
struct bmp180_calib bmp180;
struct bmp280_calib bmp280;
} calib;
- struct regulator *vddd;
- struct regulator *vdda;
+ struct regulator_bulk_data supplies[BMP280_NUM_SUPPLIES];
unsigned int start_up_time; /* in microseconds */
/* log of base 2 of oversampling rate */
@@ -148,6 +153,8 @@ static int bmp280_read_calib(struct bmp280_data *data,
{
int ret;
unsigned int tmp;
+ __le16 l16;
+ __be16 b16;
struct device *dev = data->dev;
__le16 t_buf[BMP280_COMP_TEMP_REG_COUNT / 2];
__le16 p_buf[BMP280_COMP_PRESS_REG_COUNT / 2];
@@ -207,12 +214,12 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H1 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &l16, 2);
if (ret < 0) {
dev_err(dev, "failed to read H2 comp value\n");
return ret;
}
- calib->H2 = sign_extend32(le16_to_cpu(tmp), 15);
+ calib->H2 = sign_extend32(le16_to_cpu(l16), 15);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
if (ret < 0) {
@@ -221,20 +228,20 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H3 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &b16, 2);
if (ret < 0) {
dev_err(dev, "failed to read H4 comp value\n");
return ret;
}
- calib->H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
- (be16_to_cpu(tmp) & 0xf), 11);
+ calib->H4 = sign_extend32(((be16_to_cpu(b16) >> 4) & 0xff0) |
+ (be16_to_cpu(b16) & 0xf), 11);
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &l16, 2);
if (ret < 0) {
dev_err(dev, "failed to read H5 comp value\n");
return ret;
}
- calib->H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
+ calib->H5 = sign_extend32(((le16_to_cpu(l16) >> 4) & 0xfff), 11);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
if (ret < 0) {
@@ -979,6 +986,22 @@ static int bmp085_fetch_eoc_irq(struct device *dev,
return 0;
}
+static void bmp280_pm_disable(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+}
+
+static void bmp280_regulators_disable(void *data)
+{
+ struct regulator_bulk_data *supplies = data;
+
+ regulator_bulk_disable(BMP280_NUM_SUPPLIES, supplies);
+}
+
int bmp280_common_probe(struct device *dev,
struct regmap *regmap,
unsigned int chip,
@@ -1033,27 +1056,28 @@ int bmp280_common_probe(struct device *dev,
}
/* Bring up regulators */
- data->vddd = devm_regulator_get(dev, "vddd");
- if (IS_ERR(data->vddd)) {
- dev_err(dev, "failed to get VDDD regulator\n");
- return PTR_ERR(data->vddd);
- }
- ret = regulator_enable(data->vddd);
+ regulator_bulk_set_supply_names(data->supplies,
+ bmp280_supply_names,
+ BMP280_NUM_SUPPLIES);
+
+ ret = devm_regulator_bulk_get(dev,
+ BMP280_NUM_SUPPLIES, data->supplies);
if (ret) {
- dev_err(dev, "failed to enable VDDD regulator\n");
+ dev_err(dev, "failed to get regulators\n");
return ret;
}
- data->vdda = devm_regulator_get(dev, "vdda");
- if (IS_ERR(data->vdda)) {
- dev_err(dev, "failed to get VDDA regulator\n");
- ret = PTR_ERR(data->vdda);
- goto out_disable_vddd;
- }
- ret = regulator_enable(data->vdda);
+
+ ret = regulator_bulk_enable(BMP280_NUM_SUPPLIES, data->supplies);
if (ret) {
- dev_err(dev, "failed to enable VDDA regulator\n");
- goto out_disable_vddd;
+ dev_err(dev, "failed to enable regulators\n");
+ return ret;
}
+
+ ret = devm_add_action_or_reset(dev, bmp280_regulators_disable,
+ data->supplies);
+ if (ret)
+ return ret;
+
/* Wait to make sure we started up properly */
usleep_range(data->start_up_time, data->start_up_time + 100);
@@ -1068,17 +1092,16 @@ int bmp280_common_probe(struct device *dev,
data->regmap = regmap;
ret = regmap_read(regmap, BMP280_REG_ID, &chip_id);
if (ret < 0)
- goto out_disable_vdda;
+ return ret;
if (chip_id != chip) {
dev_err(dev, "bad chip id: expected %x got %x\n",
chip, chip_id);
- ret = -EINVAL;
- goto out_disable_vdda;
+ return -EINVAL;
}
ret = data->chip_info->chip_config(data);
if (ret < 0)
- goto out_disable_vdda;
+ return ret;
dev_set_drvdata(dev, indio_dev);
@@ -1092,14 +1115,14 @@ int bmp280_common_probe(struct device *dev,
if (ret < 0) {
dev_err(data->dev,
"failed to read calibration coefficients\n");
- goto out_disable_vdda;
+ return ret;
}
} else if (chip_id == BMP280_CHIP_ID || chip_id == BME280_CHIP_ID) {
ret = bmp280_read_calib(data, &data->calib.bmp280, chip_id);
if (ret < 0) {
dev_err(data->dev,
"failed to read calibration coefficients\n");
- goto out_disable_vdda;
+ return ret;
}
}
@@ -1111,7 +1134,7 @@ int bmp280_common_probe(struct device *dev,
if (irq > 0 || (chip_id == BMP180_CHIP_ID)) {
ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
if (ret)
- goto out_disable_vdda;
+ return ret;
}
/* Enable runtime PM */
@@ -1126,51 +1149,21 @@ int bmp280_common_probe(struct device *dev,
pm_runtime_use_autosuspend(dev);
pm_runtime_put(dev);
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(dev, bmp280_pm_disable, dev);
if (ret)
- goto out_runtime_pm_disable;
-
-
- return 0;
+ return ret;
-out_runtime_pm_disable:
- pm_runtime_get_sync(data->dev);
- pm_runtime_put_noidle(data->dev);
- pm_runtime_disable(data->dev);
-out_disable_vdda:
- regulator_disable(data->vdda);
-out_disable_vddd:
- regulator_disable(data->vddd);
- return ret;
+ return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL(bmp280_common_probe);
-int bmp280_common_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct bmp280_data *data = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- pm_runtime_get_sync(data->dev);
- pm_runtime_put_noidle(data->dev);
- pm_runtime_disable(data->dev);
- regulator_disable(data->vdda);
- regulator_disable(data->vddd);
- return 0;
-}
-EXPORT_SYMBOL(bmp280_common_remove);
-
#ifdef CONFIG_PM
static int bmp280_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmp280_data *data = iio_priv(indio_dev);
- int ret;
- ret = regulator_disable(data->vdda);
- if (ret)
- return ret;
- return regulator_disable(data->vddd);
+ return regulator_bulk_disable(BMP280_NUM_SUPPLIES, data->supplies);
}
static int bmp280_runtime_resume(struct device *dev)
@@ -1179,10 +1172,7 @@ static int bmp280_runtime_resume(struct device *dev)
struct bmp280_data *data = iio_priv(indio_dev);
int ret;
- ret = regulator_enable(data->vddd);
- if (ret)
- return ret;
- ret = regulator_enable(data->vdda);
+ ret = regulator_bulk_enable(BMP280_NUM_SUPPLIES, data->supplies);
if (ret)
return ret;
usleep_range(data->start_up_time, data->start_up_time + 100);
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index acd9a3784fb4..3109c8e2cc11 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -38,11 +38,6 @@ static int bmp280_i2c_probe(struct i2c_client *client,
client->irq);
}
-static int bmp280_i2c_remove(struct i2c_client *client)
-{
- return bmp280_common_remove(&client->dev);
-}
-
static const struct acpi_device_id bmp280_acpi_i2c_match[] = {
{"BMP0280", BMP280_CHIP_ID },
{"BMP0180", BMP180_CHIP_ID },
@@ -82,7 +77,6 @@ static struct i2c_driver bmp280_i2c_driver = {
.pm = &bmp280_dev_pm_ops,
},
.probe = bmp280_i2c_probe,
- .remove = bmp280_i2c_remove,
.id_table = bmp280_i2c_id,
};
module_i2c_driver(bmp280_i2c_driver);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 9d57b7a3b134..625b86878ad8 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -86,11 +86,6 @@ static int bmp280_spi_probe(struct spi_device *spi)
spi->irq);
}
-static int bmp280_spi_remove(struct spi_device *spi)
-{
- return bmp280_common_remove(&spi->dev);
-}
-
static const struct of_device_id bmp280_of_spi_match[] = {
{ .compatible = "bosch,bmp085", },
{ .compatible = "bosch,bmp180", },
@@ -118,7 +113,6 @@ static struct spi_driver bmp280_spi_driver = {
},
.id_table = bmp280_spi_id,
.probe = bmp280_spi_probe,
- .remove = bmp280_spi_remove,
};
module_spi_driver(bmp280_spi_driver);
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index eda50ef65706..57ba0e85db91 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -112,7 +112,6 @@ int bmp280_common_probe(struct device *dev,
unsigned int chip,
const char *name,
int irq);
-int bmp280_common_remove(struct device *dev);
/* PM ops */
extern const struct dev_pm_ops bmp280_dev_pm_ops;
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 2354302375de..52f53f3123b1 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -114,6 +114,7 @@ static int cros_ec_baro_write(struct iio_dev *indio_dev,
static const struct iio_info cros_ec_baro_info = {
.read_raw = &cros_ec_baro_read,
.write_raw = &cros_ec_baro_write,
+ .read_avail = &cros_ec_sensors_core_read_avail,
};
static int cros_ec_baro_probe(struct platform_device *pdev)
@@ -149,6 +150,8 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_FREQUENCY);
+ channel->info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
channel->scan_type.shift = 0;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index ca6863b32a5f..bd972cec4830 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 9d0d07930236..99dfe33ee402 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -1243,6 +1243,11 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
const struct zpa2326_private *priv = iio_priv(indio_dev);
int err;
+ /* Plug our own trigger event handler. */
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err)
+ goto err;
+
if (!priv->waken) {
/*
* We were already power supplied. Just clear hardware FIFO to
@@ -1250,7 +1255,7 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
*/
err = zpa2326_clear_fifo(indio_dev, 0);
if (err)
- goto err;
+ goto err_buffer_predisable;
}
if (!iio_trigger_using_own(indio_dev) && priv->waken) {
@@ -1260,16 +1265,13 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
*/
err = zpa2326_config_oneshot(indio_dev, priv->irq);
if (err)
- goto err;
+ goto err_buffer_predisable;
}
- /* Plug our own trigger event handler. */
- err = iio_triggered_buffer_postenable(indio_dev);
- if (err)
- goto err;
-
return 0;
+err_buffer_predisable:
+ iio_triggered_buffer_predisable(indio_dev);
err:
zpa2326_err(indio_dev, "failed to enable buffering (%d)", err);
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 47af54f14756..5b369645ef49 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -136,12 +136,13 @@ static inline int lidar_write_power(struct lidar_data *data, int val)
static int lidar_read_measurement(struct lidar_data *data, u16 *reg)
{
+ __be16 value;
int ret = data->xfer(data, LIDAR_REG_DATA_HBYTE |
(data->i2c_enabled ? LIDAR_REG_DATA_WORD_READ : 0),
- (u8 *) reg, 2);
+ (u8 *) &value, 2);
if (!ret)
- *reg = be16_to_cpu(*reg);
+ *reg = be16_to_cpu(value);
return ret;
}
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 612f79c53cfc..287d288e40c2 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -675,11 +675,15 @@ out:
return IRQ_HANDLED;
}
-static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
+static int sx9500_buffer_postenable(struct iio_dev *indio_dev)
{
struct sx9500_data *data = iio_priv(indio_dev);
int ret = 0, i;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&data->mutex);
for (i = 0; i < SX9500_NUM_CHANNELS; i++)
@@ -696,6 +700,9 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
mutex_unlock(&data->mutex);
+ if (ret)
+ iio_triggered_buffer_predisable(indio_dev);
+
return ret;
}
@@ -704,8 +711,6 @@ static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
struct sx9500_data *data = iio_priv(indio_dev);
int ret = 0, i;
- iio_triggered_buffer_predisable(indio_dev);
-
mutex_lock(&data->mutex);
for (i = 0; i < SX9500_NUM_CHANNELS; i++)
@@ -722,12 +727,13 @@ static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
mutex_unlock(&data->mutex);
+ iio_triggered_buffer_predisable(indio_dev);
+
return ret;
}
static const struct iio_buffer_setup_ops sx9500_buffer_setup_ops = {
- .preenable = sx9500_buffer_preenable,
- .postenable = iio_triggered_buffer_postenable,
+ .postenable = sx9500_buffer_postenable,
.predisable = sx9500_buffer_predisable,
};
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index 737faa0901fe..e1ccb4003015 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -4,6 +4,17 @@
#
menu "Temperature sensors"
+config LTC2983
+ tristate "Analog Devices Multi-Sensor Digital Temperature Measurement System"
+ depends on SPI
+ select REGMAP_SPI
+ help
+ Say yes here to build support for the LTC2983 Multi-Sensor
+ high accuracy digital temperature measurement system.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ltc2983.
+
config MAXIM_THERMOCOUPLE
tristate "Maxim thermocouple sensors"
depends on SPI
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index baca4776ca0d..d6b850b0cf63 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -3,6 +3,7 @@
# Makefile for industrial I/O temperature drivers
#
+obj-$(CONFIG_LTC2983) += ltc2983.o
obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o
obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
obj-$(CONFIG_MAX31856) += max31856.o
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
new file mode 100644
index 000000000000..ddf47023364b
--- /dev/null
+++ b/drivers/iio/temperature/ltc2983.c
@@ -0,0 +1,1557 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2983 Multi-Sensor Digital Temperature Measurement System
+ * driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+/* register map */
+#define LTC2983_STATUS_REG 0x0000
+#define LTC2983_TEMP_RES_START_REG 0x0010
+#define LTC2983_TEMP_RES_END_REG 0x005F
+#define LTC2983_GLOBAL_CONFIG_REG 0x00F0
+#define LTC2983_MULT_CHANNEL_START_REG 0x00F4
+#define LTC2983_MULT_CHANNEL_END_REG 0x00F7
+#define LTC2983_MUX_CONFIG_REG 0x00FF
+#define LTC2983_CHAN_ASSIGN_START_REG 0x0200
+#define LTC2983_CHAN_ASSIGN_END_REG 0x024F
+#define LTC2983_CUST_SENS_TBL_START_REG 0x0250
+#define LTC2983_CUST_SENS_TBL_END_REG 0x03CF
+
+#define LTC2983_DIFFERENTIAL_CHAN_MIN 2
+#define LTC2983_MAX_CHANNELS_NR 20
+#define LTC2983_MIN_CHANNELS_NR 1
+#define LTC2983_SLEEP 0x97
+#define LTC2983_CUSTOM_STEINHART_SIZE 24
+#define LTC2983_CUSTOM_SENSOR_ENTRY_SZ 6
+#define LTC2983_CUSTOM_STEINHART_ENTRY_SZ 4
+
+#define LTC2983_CHAN_START_ADDR(chan) \
+ (((chan - 1) * 4) + LTC2983_CHAN_ASSIGN_START_REG)
+#define LTC2983_CHAN_RES_ADDR(chan) \
+ (((chan - 1) * 4) + LTC2983_TEMP_RES_START_REG)
+#define LTC2983_THERMOCOUPLE_DIFF_MASK BIT(3)
+#define LTC2983_THERMOCOUPLE_SGL(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_DIFF_MASK, x)
+#define LTC2983_THERMOCOUPLE_OC_CURR_MASK GENMASK(1, 0)
+#define LTC2983_THERMOCOUPLE_OC_CURR(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_OC_CURR_MASK, x)
+#define LTC2983_THERMOCOUPLE_OC_CHECK_MASK BIT(2)
+#define LTC2983_THERMOCOUPLE_OC_CHECK(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_OC_CHECK_MASK, x)
+
+#define LTC2983_THERMISTOR_DIFF_MASK BIT(2)
+#define LTC2983_THERMISTOR_SGL(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_DIFF_MASK, x)
+#define LTC2983_THERMISTOR_R_SHARE_MASK BIT(1)
+#define LTC2983_THERMISTOR_R_SHARE(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_R_SHARE_MASK, x)
+#define LTC2983_THERMISTOR_C_ROTATE_MASK BIT(0)
+#define LTC2983_THERMISTOR_C_ROTATE(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_C_ROTATE_MASK, x)
+
+#define LTC2983_DIODE_DIFF_MASK BIT(2)
+#define LTC2983_DIODE_SGL(x) \
+ FIELD_PREP(LTC2983_DIODE_DIFF_MASK, x)
+#define LTC2983_DIODE_3_CONV_CYCLE_MASK BIT(1)
+#define LTC2983_DIODE_3_CONV_CYCLE(x) \
+ FIELD_PREP(LTC2983_DIODE_3_CONV_CYCLE_MASK, x)
+#define LTC2983_DIODE_AVERAGE_ON_MASK BIT(0)
+#define LTC2983_DIODE_AVERAGE_ON(x) \
+ FIELD_PREP(LTC2983_DIODE_AVERAGE_ON_MASK, x)
+
+#define LTC2983_RTD_4_WIRE_MASK BIT(3)
+#define LTC2983_RTD_ROTATION_MASK BIT(1)
+#define LTC2983_RTD_C_ROTATE(x) \
+ FIELD_PREP(LTC2983_RTD_ROTATION_MASK, x)
+#define LTC2983_RTD_KELVIN_R_SENSE_MASK GENMASK(3, 2)
+#define LTC2983_RTD_N_WIRES_MASK GENMASK(3, 2)
+#define LTC2983_RTD_N_WIRES(x) \
+ FIELD_PREP(LTC2983_RTD_N_WIRES_MASK, x)
+#define LTC2983_RTD_R_SHARE_MASK BIT(0)
+#define LTC2983_RTD_R_SHARE(x) \
+ FIELD_PREP(LTC2983_RTD_R_SHARE_MASK, 1)
+
+#define LTC2983_COMMON_HARD_FAULT_MASK GENMASK(31, 30)
+#define LTC2983_COMMON_SOFT_FAULT_MASK GENMASK(27, 25)
+
+#define LTC2983_STATUS_START_MASK BIT(7)
+#define LTC2983_STATUS_START(x) FIELD_PREP(LTC2983_STATUS_START_MASK, x)
+
+#define LTC2983_STATUS_CHAN_SEL_MASK GENMASK(4, 0)
+#define LTC2983_STATUS_CHAN_SEL(x) \
+ FIELD_PREP(LTC2983_STATUS_CHAN_SEL_MASK, x)
+
+#define LTC2983_TEMP_UNITS_MASK BIT(2)
+#define LTC2983_TEMP_UNITS(x) FIELD_PREP(LTC2983_TEMP_UNITS_MASK, x)
+
+#define LTC2983_NOTCH_FREQ_MASK GENMASK(1, 0)
+#define LTC2983_NOTCH_FREQ(x) FIELD_PREP(LTC2983_NOTCH_FREQ_MASK, x)
+
+#define LTC2983_RES_VALID_MASK BIT(24)
+#define LTC2983_DATA_MASK GENMASK(23, 0)
+#define LTC2983_DATA_SIGN_BIT 23
+
+#define LTC2983_CHAN_TYPE_MASK GENMASK(31, 27)
+#define LTC2983_CHAN_TYPE(x) FIELD_PREP(LTC2983_CHAN_TYPE_MASK, x)
+
+/* cold junction for thermocouples and rsense for rtd's and thermistor's */
+#define LTC2983_CHAN_ASSIGN_MASK GENMASK(26, 22)
+#define LTC2983_CHAN_ASSIGN(x) FIELD_PREP(LTC2983_CHAN_ASSIGN_MASK, x)
+
+#define LTC2983_CUSTOM_LEN_MASK GENMASK(5, 0)
+#define LTC2983_CUSTOM_LEN(x) FIELD_PREP(LTC2983_CUSTOM_LEN_MASK, x)
+
+#define LTC2983_CUSTOM_ADDR_MASK GENMASK(11, 6)
+#define LTC2983_CUSTOM_ADDR(x) FIELD_PREP(LTC2983_CUSTOM_ADDR_MASK, x)
+
+#define LTC2983_THERMOCOUPLE_CFG_MASK GENMASK(21, 18)
+#define LTC2983_THERMOCOUPLE_CFG(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_CFG_MASK, x)
+#define LTC2983_THERMOCOUPLE_HARD_FAULT_MASK GENMASK(31, 29)
+#define LTC2983_THERMOCOUPLE_SOFT_FAULT_MASK GENMASK(28, 25)
+
+#define LTC2983_RTD_CFG_MASK GENMASK(21, 18)
+#define LTC2983_RTD_CFG(x) FIELD_PREP(LTC2983_RTD_CFG_MASK, x)
+#define LTC2983_RTD_EXC_CURRENT_MASK GENMASK(17, 14)
+#define LTC2983_RTD_EXC_CURRENT(x) \
+ FIELD_PREP(LTC2983_RTD_EXC_CURRENT_MASK, x)
+#define LTC2983_RTD_CURVE_MASK GENMASK(13, 12)
+#define LTC2983_RTD_CURVE(x) FIELD_PREP(LTC2983_RTD_CURVE_MASK, x)
+
+#define LTC2983_THERMISTOR_CFG_MASK GENMASK(21, 19)
+#define LTC2983_THERMISTOR_CFG(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_CFG_MASK, x)
+#define LTC2983_THERMISTOR_EXC_CURRENT_MASK GENMASK(18, 15)
+#define LTC2983_THERMISTOR_EXC_CURRENT(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_EXC_CURRENT_MASK, x)
+
+#define LTC2983_DIODE_CFG_MASK GENMASK(26, 24)
+#define LTC2983_DIODE_CFG(x) FIELD_PREP(LTC2983_DIODE_CFG_MASK, x)
+#define LTC2983_DIODE_EXC_CURRENT_MASK GENMASK(23, 22)
+#define LTC2983_DIODE_EXC_CURRENT(x) \
+ FIELD_PREP(LTC2983_DIODE_EXC_CURRENT_MASK, x)
+#define LTC2983_DIODE_IDEAL_FACTOR_MASK GENMASK(21, 0)
+#define LTC2983_DIODE_IDEAL_FACTOR(x) \
+ FIELD_PREP(LTC2983_DIODE_IDEAL_FACTOR_MASK, x)
+
+#define LTC2983_R_SENSE_VAL_MASK GENMASK(26, 0)
+#define LTC2983_R_SENSE_VAL(x) FIELD_PREP(LTC2983_R_SENSE_VAL_MASK, x)
+
+#define LTC2983_ADC_SINGLE_ENDED_MASK BIT(26)
+#define LTC2983_ADC_SINGLE_ENDED(x) \
+ FIELD_PREP(LTC2983_ADC_SINGLE_ENDED_MASK, x)
+
+enum {
+ LTC2983_SENSOR_THERMOCOUPLE = 1,
+ LTC2983_SENSOR_THERMOCOUPLE_CUSTOM = 9,
+ LTC2983_SENSOR_RTD = 10,
+ LTC2983_SENSOR_RTD_CUSTOM = 18,
+ LTC2983_SENSOR_THERMISTOR = 19,
+ LTC2983_SENSOR_THERMISTOR_STEINHART = 26,
+ LTC2983_SENSOR_THERMISTOR_CUSTOM = 27,
+ LTC2983_SENSOR_DIODE = 28,
+ LTC2983_SENSOR_SENSE_RESISTOR = 29,
+ LTC2983_SENSOR_DIRECT_ADC = 30,
+};
+
+#define to_thermocouple(_sensor) \
+ container_of(_sensor, struct ltc2983_thermocouple, sensor)
+
+#define to_rtd(_sensor) \
+ container_of(_sensor, struct ltc2983_rtd, sensor)
+
+#define to_thermistor(_sensor) \
+ container_of(_sensor, struct ltc2983_thermistor, sensor)
+
+#define to_diode(_sensor) \
+ container_of(_sensor, struct ltc2983_diode, sensor)
+
+#define to_rsense(_sensor) \
+ container_of(_sensor, struct ltc2983_rsense, sensor)
+
+#define to_adc(_sensor) \
+ container_of(_sensor, struct ltc2983_adc, sensor)
+
+struct ltc2983_data {
+ struct regmap *regmap;
+ struct spi_device *spi;
+ struct mutex lock;
+ struct completion completion;
+ struct iio_chan_spec *iio_chan;
+ struct ltc2983_sensor **sensors;
+ u32 mux_delay_config;
+ u32 filter_notch_freq;
+ u16 custom_table_size;
+ u8 num_channels;
+ u8 iio_channels;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * Holds the converted temperature
+ */
+ __be32 temp ____cacheline_aligned;
+};
+
+struct ltc2983_sensor {
+ int (*fault_handler)(const struct ltc2983_data *st, const u32 result);
+ int (*assign_chan)(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor);
+ /* specifies the sensor channel */
+ u32 chan;
+ /* sensor type */
+ u32 type;
+};
+
+struct ltc2983_custom_sensor {
+ /* raw table sensor data */
+ u8 *table;
+ size_t size;
+ /* address offset */
+ s8 offset;
+ bool is_steinhart;
+};
+
+struct ltc2983_thermocouple {
+ struct ltc2983_sensor sensor;
+ struct ltc2983_custom_sensor *custom;
+ u32 sensor_config;
+ u32 cold_junction_chan;
+};
+
+struct ltc2983_rtd {
+ struct ltc2983_sensor sensor;
+ struct ltc2983_custom_sensor *custom;
+ u32 sensor_config;
+ u32 r_sense_chan;
+ u32 excitation_current;
+ u32 rtd_curve;
+};
+
+struct ltc2983_thermistor {
+ struct ltc2983_sensor sensor;
+ struct ltc2983_custom_sensor *custom;
+ u32 sensor_config;
+ u32 r_sense_chan;
+ u32 excitation_current;
+};
+
+struct ltc2983_diode {
+ struct ltc2983_sensor sensor;
+ u32 sensor_config;
+ u32 excitation_current;
+ u32 ideal_factor_value;
+};
+
+struct ltc2983_rsense {
+ struct ltc2983_sensor sensor;
+ u32 r_sense_val;
+};
+
+struct ltc2983_adc {
+ struct ltc2983_sensor sensor;
+ bool single_ended;
+};
+
+/*
+ * Convert to Q format numbers. These number's are integers where
+ * the number of integer and fractional bits are specified. The resolution
+ * is given by 1/@resolution and tell us the number of fractional bits. For
+ * instance a resolution of 2^-10 means we have 10 fractional bits.
+ */
+static u32 __convert_to_raw(const u64 val, const u32 resolution)
+{
+ u64 __res = val * resolution;
+
+ /* all values are multiplied by 1000000 to remove the fraction */
+ do_div(__res, 1000000);
+
+ return __res;
+}
+
+static u32 __convert_to_raw_sign(const u64 val, const u32 resolution)
+{
+ s64 __res = -(s32)val;
+
+ __res = __convert_to_raw(__res, resolution);
+
+ return (u32)-__res;
+}
+
+static int __ltc2983_fault_handler(const struct ltc2983_data *st,
+ const u32 result, const u32 hard_mask,
+ const u32 soft_mask)
+{
+ const struct device *dev = &st->spi->dev;
+
+ if (result & hard_mask) {
+ dev_err(dev, "Invalid conversion: Sensor HARD fault\n");
+ return -EIO;
+ } else if (result & soft_mask) {
+ /* just print a warning */
+ dev_warn(dev, "Suspicious conversion: Sensor SOFT fault\n");
+ }
+
+ return 0;
+}
+
+static int __ltc2983_chan_assign_common(const struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor,
+ u32 chan_val)
+{
+ u32 reg = LTC2983_CHAN_START_ADDR(sensor->chan);
+ __be32 __chan_val;
+
+ chan_val |= LTC2983_CHAN_TYPE(sensor->type);
+ dev_dbg(&st->spi->dev, "Assign reg:0x%04X, val:0x%08X\n", reg,
+ chan_val);
+ __chan_val = cpu_to_be32(chan_val);
+ return regmap_bulk_write(st->regmap, reg, &__chan_val,
+ sizeof(__chan_val));
+}
+
+static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st,
+ struct ltc2983_custom_sensor *custom,
+ u32 *chan_val)
+{
+ u32 reg;
+ u8 mult = custom->is_steinhart ? LTC2983_CUSTOM_STEINHART_ENTRY_SZ :
+ LTC2983_CUSTOM_SENSOR_ENTRY_SZ;
+ const struct device *dev = &st->spi->dev;
+ /*
+ * custom->size holds the raw size of the table. However, when
+ * configuring the sensor channel, we must write the number of
+ * entries of the table minus 1. For steinhart sensors 0 is written
+ * since the size is constant!
+ */
+ const u8 len = custom->is_steinhart ? 0 :
+ (custom->size / LTC2983_CUSTOM_SENSOR_ENTRY_SZ) - 1;
+ /*
+ * Check if the offset was assigned already. It should be for steinhart
+ * sensors. When coming from sleep, it should be assigned for all.
+ */
+ if (custom->offset < 0) {
+ /*
+ * This needs to be done again here because, from the moment
+ * when this test was done (successfully) for this custom
+ * sensor, a steinhart sensor might have been added changing
+ * custom_table_size...
+ */
+ if (st->custom_table_size + custom->size >
+ (LTC2983_CUST_SENS_TBL_END_REG -
+ LTC2983_CUST_SENS_TBL_START_REG) + 1) {
+ dev_err(dev,
+ "Not space left(%d) for new custom sensor(%zu)",
+ st->custom_table_size,
+ custom->size);
+ return -EINVAL;
+ }
+
+ custom->offset = st->custom_table_size /
+ LTC2983_CUSTOM_SENSOR_ENTRY_SZ;
+ st->custom_table_size += custom->size;
+ }
+
+ reg = (custom->offset * mult) + LTC2983_CUST_SENS_TBL_START_REG;
+
+ *chan_val |= LTC2983_CUSTOM_LEN(len);
+ *chan_val |= LTC2983_CUSTOM_ADDR(custom->offset);
+ dev_dbg(dev, "Assign custom sensor, reg:0x%04X, off:%d, sz:%zu",
+ reg, custom->offset,
+ custom->size);
+ /* write custom sensor table */
+ return regmap_bulk_write(st->regmap, reg, custom->table, custom->size);
+}
+
+static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
+ struct ltc2983_data *st,
+ const struct device_node *np,
+ const char *propname,
+ const bool is_steinhart,
+ const u32 resolution,
+ const bool has_signed)
+{
+ struct ltc2983_custom_sensor *new_custom;
+ u8 index, n_entries, tbl = 0;
+ struct device *dev = &st->spi->dev;
+ /*
+ * For custom steinhart, the full u32 is taken. For all the others
+ * the MSB is discarded.
+ */
+ const u8 n_size = (is_steinhart == true) ? 4 : 3;
+ const u8 e_size = (is_steinhart == true) ? sizeof(u32) : sizeof(u64);
+
+ n_entries = of_property_count_elems_of_size(np, propname, e_size);
+ /* n_entries must be an even number */
+ if (!n_entries || (n_entries % 2) != 0) {
+ dev_err(dev, "Number of entries either 0 or not even\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ new_custom = devm_kzalloc(dev, sizeof(*new_custom), GFP_KERNEL);
+ if (!new_custom)
+ return ERR_PTR(-ENOMEM);
+
+ new_custom->size = n_entries * n_size;
+ /* check Steinhart size */
+ if (is_steinhart && new_custom->size != LTC2983_CUSTOM_STEINHART_SIZE) {
+ dev_err(dev, "Steinhart sensors size(%zu) must be 24",
+ new_custom->size);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Check space on the table. */
+ if (st->custom_table_size + new_custom->size >
+ (LTC2983_CUST_SENS_TBL_END_REG -
+ LTC2983_CUST_SENS_TBL_START_REG) + 1) {
+ dev_err(dev, "No space left(%d) for new custom sensor(%zu)",
+ st->custom_table_size, new_custom->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* allocate the table */
+ new_custom->table = devm_kzalloc(dev, new_custom->size, GFP_KERNEL);
+ if (!new_custom->table)
+ return ERR_PTR(-ENOMEM);
+
+ for (index = 0; index < n_entries; index++) {
+ u64 temp = 0, j;
+ /*
+ * Steinhart sensors are configured with raw values in the
+ * devicetree. For the other sensors we must convert the
+ * value to raw. The odd index's correspond to temperarures
+ * and always have 1/1024 of resolution. Temperatures also
+ * come in kelvin, so signed values is not possible
+ */
+ if (!is_steinhart) {
+ of_property_read_u64_index(np, propname, index, &temp);
+
+ if ((index % 2) != 0)
+ temp = __convert_to_raw(temp, 1024);
+ else if (has_signed && (s64)temp < 0)
+ temp = __convert_to_raw_sign(temp, resolution);
+ else
+ temp = __convert_to_raw(temp, resolution);
+ } else {
+ of_property_read_u32_index(np, propname, index,
+ (u32 *)&temp);
+ }
+
+ for (j = 0; j < n_size; j++)
+ new_custom->table[tbl++] =
+ temp >> (8 * (n_size - j - 1));
+ }
+
+ new_custom->is_steinhart = is_steinhart;
+ /*
+ * This is done to first add all the steinhart sensors to the table,
+ * in order to maximize the table usage. If we mix adding steinhart
+ * with the other sensors, we might have to do some roundup to make
+ * sure that sensor_addr - 0x250(start address) is a multiple of 4
+ * (for steinhart), and a multiple of 6 for all the other sensors.
+ * Since we have const 24 bytes for steinhart sensors and 24 is
+ * also a multiple of 6, we guarantee that the first non-steinhart
+ * sensor will sit in a correct address without the need of filling
+ * addresses.
+ */
+ if (is_steinhart) {
+ new_custom->offset = st->custom_table_size /
+ LTC2983_CUSTOM_STEINHART_ENTRY_SZ;
+ st->custom_table_size += new_custom->size;
+ } else {
+ /* mark as unset. This is checked later on the assign phase */
+ new_custom->offset = -1;
+ }
+
+ return new_custom;
+}
+
+static int ltc2983_thermocouple_fault_handler(const struct ltc2983_data *st,
+ const u32 result)
+{
+ return __ltc2983_fault_handler(st, result,
+ LTC2983_THERMOCOUPLE_HARD_FAULT_MASK,
+ LTC2983_THERMOCOUPLE_SOFT_FAULT_MASK);
+}
+
+static int ltc2983_common_fault_handler(const struct ltc2983_data *st,
+ const u32 result)
+{
+ return __ltc2983_fault_handler(st, result,
+ LTC2983_COMMON_HARD_FAULT_MASK,
+ LTC2983_COMMON_SOFT_FAULT_MASK);
+}
+
+static int ltc2983_thermocouple_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermocouple *thermo = to_thermocouple(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_CHAN_ASSIGN(thermo->cold_junction_chan);
+ chan_val |= LTC2983_THERMOCOUPLE_CFG(thermo->sensor_config);
+
+ if (thermo->custom) {
+ int ret;
+
+ ret = __ltc2983_chan_custom_sensor_assign(st, thermo->custom,
+ &chan_val);
+ if (ret)
+ return ret;
+ }
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_rtd_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rtd *rtd = to_rtd(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_CHAN_ASSIGN(rtd->r_sense_chan);
+ chan_val |= LTC2983_RTD_CFG(rtd->sensor_config);
+ chan_val |= LTC2983_RTD_EXC_CURRENT(rtd->excitation_current);
+ chan_val |= LTC2983_RTD_CURVE(rtd->rtd_curve);
+
+ if (rtd->custom) {
+ int ret;
+
+ ret = __ltc2983_chan_custom_sensor_assign(st, rtd->custom,
+ &chan_val);
+ if (ret)
+ return ret;
+ }
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_thermistor_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermistor *thermistor = to_thermistor(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_CHAN_ASSIGN(thermistor->r_sense_chan);
+ chan_val |= LTC2983_THERMISTOR_CFG(thermistor->sensor_config);
+ chan_val |=
+ LTC2983_THERMISTOR_EXC_CURRENT(thermistor->excitation_current);
+
+ if (thermistor->custom) {
+ int ret;
+
+ ret = __ltc2983_chan_custom_sensor_assign(st,
+ thermistor->custom,
+ &chan_val);
+ if (ret)
+ return ret;
+ }
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_diode_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_diode *diode = to_diode(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_DIODE_CFG(diode->sensor_config);
+ chan_val |= LTC2983_DIODE_EXC_CURRENT(diode->excitation_current);
+ chan_val |= LTC2983_DIODE_IDEAL_FACTOR(diode->ideal_factor_value);
+
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_r_sense_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rsense *rsense = to_rsense(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_R_SENSE_VAL(rsense->r_sense_val);
+
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_adc_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_adc *adc = to_adc(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_ADC_SINGLE_ENDED(adc->single_ended);
+
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static struct ltc2983_sensor *ltc2983_thermocouple_new(
+ const struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermocouple *thermo;
+ struct device_node *phandle;
+ u32 oc_current;
+ int ret;
+
+ thermo = devm_kzalloc(&st->spi->dev, sizeof(*thermo), GFP_KERNEL);
+ if (!thermo)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_bool(child, "adi,single-ended"))
+ thermo->sensor_config = LTC2983_THERMOCOUPLE_SGL(1);
+
+ ret = of_property_read_u32(child, "adi,sensor-oc-current-microamp",
+ &oc_current);
+ if (!ret) {
+ switch (oc_current) {
+ case 10:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(0);
+ break;
+ case 100:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(1);
+ break;
+ case 500:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(2);
+ break;
+ case 1000:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(3);
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid open circuit current:%u", oc_current);
+ return ERR_PTR(-EINVAL);
+ }
+
+ thermo->sensor_config |= LTC2983_THERMOCOUPLE_OC_CHECK(1);
+ }
+ /* validate channel index */
+ if (!(thermo->sensor_config & LTC2983_THERMOCOUPLE_DIFF_MASK) &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for differential thermocouple",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ phandle = of_parse_phandle(child, "adi,cold-junction-handle", 0);
+ if (phandle) {
+ int ret;
+
+ ret = of_property_read_u32(phandle, "reg",
+ &thermo->cold_junction_chan);
+ if (ret) {
+ /*
+ * This would be catched later but we can just return
+ * the error right away.
+ */
+ dev_err(&st->spi->dev, "Property reg must be given\n");
+ of_node_put(phandle);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* check custom sensor */
+ if (sensor->type == LTC2983_SENSOR_THERMOCOUPLE_CUSTOM) {
+ const char *propname = "adi,custom-thermocouple";
+
+ thermo->custom = __ltc2983_custom_sensor_new(st, child,
+ propname, false,
+ 16384, true);
+ if (IS_ERR(thermo->custom)) {
+ of_node_put(phandle);
+ return ERR_CAST(thermo->custom);
+ }
+ }
+
+ /* set common parameters */
+ thermo->sensor.fault_handler = ltc2983_thermocouple_fault_handler;
+ thermo->sensor.assign_chan = ltc2983_thermocouple_assign_chan;
+
+ of_node_put(phandle);
+ return &thermo->sensor;
+}
+
+static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rtd *rtd;
+ int ret = 0;
+ struct device *dev = &st->spi->dev;
+ struct device_node *phandle;
+ u32 excitation_current = 0, n_wires = 0;
+
+ rtd = devm_kzalloc(dev, sizeof(*rtd), GFP_KERNEL);
+ if (!rtd)
+ return ERR_PTR(-ENOMEM);
+
+ phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
+ if (!phandle) {
+ dev_err(dev, "Property adi,rsense-handle missing or invalid");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(phandle, "reg", &rtd->r_sense_chan);
+ if (ret) {
+ dev_err(dev, "Property reg must be given\n");
+ goto fail;
+ }
+
+ ret = of_property_read_u32(child, "adi,number-of-wires", &n_wires);
+ if (!ret) {
+ switch (n_wires) {
+ case 2:
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(0);
+ break;
+ case 3:
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(1);
+ break;
+ case 4:
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(2);
+ break;
+ case 5:
+ /* 4 wires, Kelvin Rsense */
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(3);
+ break;
+ default:
+ dev_err(dev, "Invalid number of wires:%u\n", n_wires);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (of_property_read_bool(child, "adi,rsense-share")) {
+ /* Current rotation is only available with rsense sharing */
+ if (of_property_read_bool(child, "adi,current-rotate")) {
+ if (n_wires == 2 || n_wires == 3) {
+ dev_err(dev,
+ "Rotation not allowed for 2/3 Wire RTDs");
+ ret = -EINVAL;
+ goto fail;
+ }
+ rtd->sensor_config |= LTC2983_RTD_C_ROTATE(1);
+ } else {
+ rtd->sensor_config |= LTC2983_RTD_R_SHARE(1);
+ }
+ }
+ /*
+ * rtd channel indexes are a bit more complicated to validate.
+ * For 4wire RTD with rotation, the channel selection cannot be
+ * >=19 since the chann + 1 is used in this configuration.
+ * For 4wire RTDs with kelvin rsense, the rsense channel cannot be
+ * <=1 since chanel - 1 and channel - 2 are used.
+ */
+ if (rtd->sensor_config & LTC2983_RTD_4_WIRE_MASK) {
+ /* 4-wire */
+ u8 min = LTC2983_DIFFERENTIAL_CHAN_MIN,
+ max = LTC2983_MAX_CHANNELS_NR;
+
+ if (rtd->sensor_config & LTC2983_RTD_ROTATION_MASK)
+ max = LTC2983_MAX_CHANNELS_NR - 1;
+
+ if (((rtd->sensor_config & LTC2983_RTD_KELVIN_R_SENSE_MASK)
+ == LTC2983_RTD_KELVIN_R_SENSE_MASK) &&
+ (rtd->r_sense_chan <= min)) {
+ /* kelvin rsense*/
+ dev_err(dev,
+ "Invalid rsense chann:%d to use in kelvin rsense",
+ rtd->r_sense_chan);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (sensor->chan < min || sensor->chan > max) {
+ dev_err(dev, "Invalid chann:%d for the rtd config",
+ sensor->chan);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else {
+ /* same as differential case */
+ if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for RTD", sensor->chan);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ /* check custom sensor */
+ if (sensor->type == LTC2983_SENSOR_RTD_CUSTOM) {
+ rtd->custom = __ltc2983_custom_sensor_new(st, child,
+ "adi,custom-rtd",
+ false, 2048, false);
+ if (IS_ERR(rtd->custom)) {
+ of_node_put(phandle);
+ return ERR_CAST(rtd->custom);
+ }
+ }
+
+ /* set common parameters */
+ rtd->sensor.fault_handler = ltc2983_common_fault_handler;
+ rtd->sensor.assign_chan = ltc2983_rtd_assign_chan;
+
+ ret = of_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
+ if (ret) {
+ /* default to 5uA */
+ rtd->excitation_current = 1;
+ } else {
+ switch (excitation_current) {
+ case 5:
+ rtd->excitation_current = 0x01;
+ break;
+ case 10:
+ rtd->excitation_current = 0x02;
+ break;
+ case 25:
+ rtd->excitation_current = 0x03;
+ break;
+ case 50:
+ rtd->excitation_current = 0x04;
+ break;
+ case 100:
+ rtd->excitation_current = 0x05;
+ break;
+ case 250:
+ rtd->excitation_current = 0x06;
+ break;
+ case 500:
+ rtd->excitation_current = 0x07;
+ break;
+ case 1000:
+ rtd->excitation_current = 0x08;
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid value for excitation current(%u)",
+ excitation_current);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ of_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve);
+
+ of_node_put(phandle);
+ return &rtd->sensor;
+fail:
+ of_node_put(phandle);
+ return ERR_PTR(ret);
+}
+
+static struct ltc2983_sensor *ltc2983_thermistor_new(
+ const struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermistor *thermistor;
+ struct device *dev = &st->spi->dev;
+ struct device_node *phandle;
+ u32 excitation_current = 0;
+ int ret = 0;
+
+ thermistor = devm_kzalloc(dev, sizeof(*thermistor), GFP_KERNEL);
+ if (!thermistor)
+ return ERR_PTR(-ENOMEM);
+
+ phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
+ if (!phandle) {
+ dev_err(dev, "Property adi,rsense-handle missing or invalid");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(phandle, "reg", &thermistor->r_sense_chan);
+ if (ret) {
+ dev_err(dev, "rsense channel must be configured...\n");
+ goto fail;
+ }
+
+ if (of_property_read_bool(child, "adi,single-ended")) {
+ thermistor->sensor_config = LTC2983_THERMISTOR_SGL(1);
+ } else if (of_property_read_bool(child, "adi,rsense-share")) {
+ /* rotation is only possible if sharing rsense */
+ if (of_property_read_bool(child, "adi,current-rotate"))
+ thermistor->sensor_config =
+ LTC2983_THERMISTOR_C_ROTATE(1);
+ else
+ thermistor->sensor_config =
+ LTC2983_THERMISTOR_R_SHARE(1);
+ }
+ /* validate channel index */
+ if (!(thermistor->sensor_config & LTC2983_THERMISTOR_DIFF_MASK) &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for differential thermistor",
+ sensor->chan);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* check custom sensor */
+ if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART) {
+ bool steinhart = false;
+ const char *propname;
+
+ if (sensor->type == LTC2983_SENSOR_THERMISTOR_STEINHART) {
+ steinhart = true;
+ propname = "adi,custom-steinhart";
+ } else {
+ propname = "adi,custom-thermistor";
+ }
+
+ thermistor->custom = __ltc2983_custom_sensor_new(st, child,
+ propname,
+ steinhart,
+ 64, false);
+ if (IS_ERR(thermistor->custom)) {
+ of_node_put(phandle);
+ return ERR_CAST(thermistor->custom);
+ }
+ }
+ /* set common parameters */
+ thermistor->sensor.fault_handler = ltc2983_common_fault_handler;
+ thermistor->sensor.assign_chan = ltc2983_thermistor_assign_chan;
+
+ ret = of_property_read_u32(child, "adi,excitation-current-nanoamp",
+ &excitation_current);
+ if (ret) {
+ /* Auto range is not allowed for custom sensors */
+ if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART)
+ /* default to 1uA */
+ thermistor->excitation_current = 0x03;
+ else
+ /* default to auto-range */
+ thermistor->excitation_current = 0x0c;
+ } else {
+ switch (excitation_current) {
+ case 0:
+ /* auto range */
+ if (sensor->type >=
+ LTC2983_SENSOR_THERMISTOR_STEINHART) {
+ dev_err(&st->spi->dev,
+ "Auto Range not allowed for custom sensors\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ thermistor->excitation_current = 0x0c;
+ break;
+ case 250:
+ thermistor->excitation_current = 0x01;
+ break;
+ case 500:
+ thermistor->excitation_current = 0x02;
+ break;
+ case 1000:
+ thermistor->excitation_current = 0x03;
+ break;
+ case 5000:
+ thermistor->excitation_current = 0x04;
+ break;
+ case 10000:
+ thermistor->excitation_current = 0x05;
+ break;
+ case 25000:
+ thermistor->excitation_current = 0x06;
+ break;
+ case 50000:
+ thermistor->excitation_current = 0x07;
+ break;
+ case 100000:
+ thermistor->excitation_current = 0x08;
+ break;
+ case 250000:
+ thermistor->excitation_current = 0x09;
+ break;
+ case 500000:
+ thermistor->excitation_current = 0x0a;
+ break;
+ case 1000000:
+ thermistor->excitation_current = 0x0b;
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid value for excitation current(%u)",
+ excitation_current);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ of_node_put(phandle);
+ return &thermistor->sensor;
+fail:
+ of_node_put(phandle);
+ return ERR_PTR(ret);
+}
+
+static struct ltc2983_sensor *ltc2983_diode_new(
+ const struct device_node *child,
+ const struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_diode *diode;
+ u32 temp = 0, excitation_current = 0;
+ int ret;
+
+ diode = devm_kzalloc(&st->spi->dev, sizeof(*diode), GFP_KERNEL);
+ if (!diode)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_bool(child, "adi,single-ended"))
+ diode->sensor_config = LTC2983_DIODE_SGL(1);
+
+ if (of_property_read_bool(child, "adi,three-conversion-cycles"))
+ diode->sensor_config |= LTC2983_DIODE_3_CONV_CYCLE(1);
+
+ if (of_property_read_bool(child, "adi,average-on"))
+ diode->sensor_config |= LTC2983_DIODE_AVERAGE_ON(1);
+
+ /* validate channel index */
+ if (!(diode->sensor_config & LTC2983_DIODE_DIFF_MASK) &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for differential thermistor",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+ /* set common parameters */
+ diode->sensor.fault_handler = ltc2983_common_fault_handler;
+ diode->sensor.assign_chan = ltc2983_diode_assign_chan;
+
+ ret = of_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
+ if (!ret) {
+ switch (excitation_current) {
+ case 10:
+ diode->excitation_current = 0x00;
+ break;
+ case 20:
+ diode->excitation_current = 0x01;
+ break;
+ case 40:
+ diode->excitation_current = 0x02;
+ break;
+ case 80:
+ diode->excitation_current = 0x03;
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid value for excitation current(%u)",
+ excitation_current);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ of_property_read_u32(child, "adi,ideal-factor-value", &temp);
+
+ /* 2^20 resolution */
+ diode->ideal_factor_value = __convert_to_raw(temp, 1048576);
+
+ return &diode->sensor;
+}
+
+static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rsense *rsense;
+ int ret;
+ u32 temp;
+
+ rsense = devm_kzalloc(&st->spi->dev, sizeof(*rsense), GFP_KERNEL);
+ if (!rsense)
+ return ERR_PTR(-ENOMEM);
+
+ /* validate channel index */
+ if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev, "Invalid chann:%d for r_sense",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
+ if (ret) {
+ dev_err(&st->spi->dev, "Property adi,rsense-val-milli-ohms missing\n");
+ return ERR_PTR(-EINVAL);
+ }
+ /*
+ * Times 1000 because we have milli-ohms and __convert_to_raw
+ * expects scales of 1000000 which are used for all other
+ * properties.
+ * 2^10 resolution
+ */
+ rsense->r_sense_val = __convert_to_raw((u64)temp * 1000, 1024);
+
+ /* set common parameters */
+ rsense->sensor.assign_chan = ltc2983_r_sense_assign_chan;
+
+ return &rsense->sensor;
+}
+
+static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_adc *adc;
+
+ adc = devm_kzalloc(&st->spi->dev, sizeof(*adc), GFP_KERNEL);
+ if (!adc)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_bool(child, "adi,single-ended"))
+ adc->single_ended = true;
+
+ if (!adc->single_ended &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev, "Invalid chan:%d for differential adc\n",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+ /* set common parameters */
+ adc->sensor.assign_chan = ltc2983_adc_assign_chan;
+ adc->sensor.fault_handler = ltc2983_common_fault_handler;
+
+ return &adc->sensor;
+}
+
+static int ltc2983_chan_read(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor, int *val)
+{
+ u32 start_conversion = 0;
+ int ret;
+ unsigned long time;
+
+ start_conversion = LTC2983_STATUS_START(true);
+ start_conversion |= LTC2983_STATUS_CHAN_SEL(sensor->chan);
+ dev_dbg(&st->spi->dev, "Start conversion on chan:%d, status:%02X\n",
+ sensor->chan, start_conversion);
+ /* start conversion */
+ ret = regmap_write(st->regmap, LTC2983_STATUS_REG, start_conversion);
+ if (ret)
+ return ret;
+
+ reinit_completion(&st->completion);
+ /*
+ * wait for conversion to complete.
+ * 300 ms should be more than enough to complete the conversion.
+ * Depending on the sensor configuration, there are 2/3 conversions
+ * cycles of 82ms.
+ */
+ time = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(300));
+ if (!time) {
+ dev_warn(&st->spi->dev, "Conversion timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* read the converted data */
+ ret = regmap_bulk_read(st->regmap, LTC2983_CHAN_RES_ADDR(sensor->chan),
+ &st->temp, sizeof(st->temp));
+ if (ret)
+ return ret;
+
+ *val = __be32_to_cpu(st->temp);
+
+ if (!(LTC2983_RES_VALID_MASK & *val)) {
+ dev_err(&st->spi->dev, "Invalid conversion detected\n");
+ return -EIO;
+ }
+
+ ret = sensor->fault_handler(st, *val);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32((*val) & LTC2983_DATA_MASK, LTC2983_DATA_SIGN_BIT);
+ return 0;
+}
+
+static int ltc2983_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltc2983_data *st = iio_priv(indio_dev);
+ int ret;
+
+ /* sanity check */
+ if (chan->address >= st->num_channels) {
+ dev_err(&st->spi->dev, "Invalid chan address:%ld",
+ chan->address);
+ return -EINVAL;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ ret = ltc2983_chan_read(st, st->sensors[chan->address], val);
+ mutex_unlock(&st->lock);
+ return ret ?: IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* value in milli degrees */
+ *val = 1000;
+ /* 2^10 */
+ *val2 = 1024;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_VOLTAGE:
+ /* value in millivolt */
+ *val = 1000;
+ /* 2^21 */
+ *val2 = 2097152;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ltc2983_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ltc2983_data *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+ else
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static irqreturn_t ltc2983_irq_handler(int irq, void *data)
+{
+ struct ltc2983_data *st = data;
+
+ complete(&st->completion);
+ return IRQ_HANDLED;
+}
+
+#define LTC2983_CHAN(__type, index, __address) ({ \
+ struct iio_chan_spec __chan = { \
+ .type = __type, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = __address, \
+ }; \
+ __chan; \
+})
+
+static int ltc2983_parse_dt(struct ltc2983_data *st)
+{
+ struct device_node *child;
+ struct device *dev = &st->spi->dev;
+ int ret = 0, chan = 0, channel_avail_mask = 0;
+
+ of_property_read_u32(dev->of_node, "adi,mux-delay-config-us",
+ &st->mux_delay_config);
+
+ of_property_read_u32(dev->of_node, "adi,filter-notch-freq",
+ &st->filter_notch_freq);
+
+ st->num_channels = of_get_available_child_count(dev->of_node);
+ st->sensors = devm_kcalloc(dev, st->num_channels, sizeof(*st->sensors),
+ GFP_KERNEL);
+ if (!st->sensors)
+ return -ENOMEM;
+
+ st->iio_channels = st->num_channels;
+ for_each_available_child_of_node(dev->of_node, child) {
+ struct ltc2983_sensor sensor;
+
+ ret = of_property_read_u32(child, "reg", &sensor.chan);
+ if (ret) {
+ dev_err(dev, "reg property must given for child nodes\n");
+ return ret;
+ }
+
+ /* check if we have a valid channel */
+ if (sensor.chan < LTC2983_MIN_CHANNELS_NR ||
+ sensor.chan > LTC2983_MAX_CHANNELS_NR) {
+ dev_err(dev,
+ "chan:%d must be from 1 to 20\n", sensor.chan);
+ return -EINVAL;
+ } else if (channel_avail_mask & BIT(sensor.chan)) {
+ dev_err(dev, "chan:%d already in use\n", sensor.chan);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(child, "adi,sensor-type",
+ &sensor.type);
+ if (ret) {
+ dev_err(dev,
+ "adi,sensor-type property must given for child nodes\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Create new sensor, type %u, chann %u",
+ sensor.type,
+ sensor.chan);
+
+ if (sensor.type >= LTC2983_SENSOR_THERMOCOUPLE &&
+ sensor.type <= LTC2983_SENSOR_THERMOCOUPLE_CUSTOM) {
+ st->sensors[chan] = ltc2983_thermocouple_new(child, st,
+ &sensor);
+ } else if (sensor.type >= LTC2983_SENSOR_RTD &&
+ sensor.type <= LTC2983_SENSOR_RTD_CUSTOM) {
+ st->sensors[chan] = ltc2983_rtd_new(child, st, &sensor);
+ } else if (sensor.type >= LTC2983_SENSOR_THERMISTOR &&
+ sensor.type <= LTC2983_SENSOR_THERMISTOR_CUSTOM) {
+ st->sensors[chan] = ltc2983_thermistor_new(child, st,
+ &sensor);
+ } else if (sensor.type == LTC2983_SENSOR_DIODE) {
+ st->sensors[chan] = ltc2983_diode_new(child, st,
+ &sensor);
+ } else if (sensor.type == LTC2983_SENSOR_SENSE_RESISTOR) {
+ st->sensors[chan] = ltc2983_r_sense_new(child, st,
+ &sensor);
+ /* don't add rsense to iio */
+ st->iio_channels--;
+ } else if (sensor.type == LTC2983_SENSOR_DIRECT_ADC) {
+ st->sensors[chan] = ltc2983_adc_new(child, st, &sensor);
+ } else {
+ dev_err(dev, "Unknown sensor type %d\n", sensor.type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(st->sensors[chan])) {
+ dev_err(dev, "Failed to create sensor %ld",
+ PTR_ERR(st->sensors[chan]));
+ return PTR_ERR(st->sensors[chan]);
+ }
+ /* set generic sensor parameters */
+ st->sensors[chan]->chan = sensor.chan;
+ st->sensors[chan]->type = sensor.type;
+
+ channel_avail_mask |= BIT(sensor.chan);
+ chan++;
+ }
+
+ return 0;
+}
+
+static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
+{
+ u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0;
+ int ret;
+ unsigned long time;
+
+ /* make sure the device is up */
+ time = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(250));
+
+ if (!time) {
+ dev_err(&st->spi->dev, "Device startup timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ st->iio_chan = devm_kzalloc(&st->spi->dev,
+ st->iio_channels * sizeof(*st->iio_chan),
+ GFP_KERNEL);
+
+ if (!st->iio_chan)
+ return -ENOMEM;
+
+ ret = regmap_update_bits(st->regmap, LTC2983_GLOBAL_CONFIG_REG,
+ LTC2983_NOTCH_FREQ_MASK,
+ LTC2983_NOTCH_FREQ(st->filter_notch_freq));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, LTC2983_MUX_CONFIG_REG,
+ st->mux_delay_config);
+ if (ret)
+ return ret;
+
+ for (chan = 0; chan < st->num_channels; chan++) {
+ u32 chan_type = 0, *iio_chan;
+
+ ret = st->sensors[chan]->assign_chan(st, st->sensors[chan]);
+ if (ret)
+ return ret;
+ /*
+ * The assign_iio flag is necessary for when the device is
+ * coming out of sleep. In that case, we just need to
+ * re-configure the device channels.
+ * We also don't assign iio channels for rsense.
+ */
+ if (st->sensors[chan]->type == LTC2983_SENSOR_SENSE_RESISTOR ||
+ !assign_iio)
+ continue;
+
+ /* assign iio channel */
+ if (st->sensors[chan]->type != LTC2983_SENSOR_DIRECT_ADC) {
+ chan_type = IIO_TEMP;
+ iio_chan = &iio_chan_t;
+ } else {
+ chan_type = IIO_VOLTAGE;
+ iio_chan = &iio_chan_v;
+ }
+
+ /*
+ * add chan as the iio .address so that, we can directly
+ * reference the sensor given the iio_chan_spec
+ */
+ st->iio_chan[iio_idx++] = LTC2983_CHAN(chan_type, (*iio_chan)++,
+ chan);
+ }
+
+ return 0;
+}
+
+static const struct regmap_range ltc2983_reg_ranges[] = {
+ regmap_reg_range(LTC2983_STATUS_REG, LTC2983_STATUS_REG),
+ regmap_reg_range(LTC2983_TEMP_RES_START_REG, LTC2983_TEMP_RES_END_REG),
+ regmap_reg_range(LTC2983_GLOBAL_CONFIG_REG, LTC2983_GLOBAL_CONFIG_REG),
+ regmap_reg_range(LTC2983_MULT_CHANNEL_START_REG,
+ LTC2983_MULT_CHANNEL_END_REG),
+ regmap_reg_range(LTC2983_MUX_CONFIG_REG, LTC2983_MUX_CONFIG_REG),
+ regmap_reg_range(LTC2983_CHAN_ASSIGN_START_REG,
+ LTC2983_CHAN_ASSIGN_END_REG),
+ regmap_reg_range(LTC2983_CUST_SENS_TBL_START_REG,
+ LTC2983_CUST_SENS_TBL_END_REG),
+};
+
+static const struct regmap_access_table ltc2983_reg_table = {
+ .yes_ranges = ltc2983_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ltc2983_reg_ranges),
+};
+
+/*
+ * The reg_bits are actually 12 but the device needs the first *complete*
+ * byte for the command (R/W).
+ */
+static const struct regmap_config ltc2983_regmap_config = {
+ .reg_bits = 24,
+ .val_bits = 8,
+ .wr_table = &ltc2983_reg_table,
+ .rd_table = &ltc2983_reg_table,
+ .read_flag_mask = GENMASK(1, 0),
+ .write_flag_mask = BIT(1),
+};
+
+static const struct iio_info ltc2983_iio_info = {
+ .read_raw = ltc2983_read_raw,
+ .debugfs_reg_access = ltc2983_reg_access,
+};
+
+static int ltc2983_probe(struct spi_device *spi)
+{
+ struct ltc2983_data *st;
+ struct iio_dev *indio_dev;
+ const char *name = spi_get_device_id(spi)->name;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->regmap = devm_regmap_init_spi(spi, &ltc2983_regmap_config);
+ if (IS_ERR(st->regmap)) {
+ dev_err(&spi->dev, "Failed to initialize regmap\n");
+ return PTR_ERR(st->regmap);
+ }
+
+ mutex_init(&st->lock);
+ init_completion(&st->completion);
+ st->spi = spi;
+ spi_set_drvdata(spi, st);
+
+ ret = ltc2983_parse_dt(st);
+ if (ret)
+ return ret;
+ /*
+ * let's request the irq now so it is used to sync the device
+ * startup in ltc2983_setup()
+ */
+ ret = devm_request_irq(&spi->dev, spi->irq, ltc2983_irq_handler,
+ IRQF_TRIGGER_RISING, name, st);
+ if (ret) {
+ dev_err(&spi->dev, "failed to request an irq, %d", ret);
+ return ret;
+ }
+
+ ret = ltc2983_setup(st, true);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = name;
+ indio_dev->num_channels = st->iio_channels;
+ indio_dev->channels = st->iio_chan;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ltc2983_iio_info;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static int __maybe_unused ltc2983_resume(struct device *dev)
+{
+ struct ltc2983_data *st = spi_get_drvdata(to_spi_device(dev));
+ int dummy;
+
+ /* dummy read to bring the device out of sleep */
+ regmap_read(st->regmap, LTC2983_STATUS_REG, &dummy);
+ /* we need to re-assign the channels */
+ return ltc2983_setup(st, false);
+}
+
+static int __maybe_unused ltc2983_suspend(struct device *dev)
+{
+ struct ltc2983_data *st = spi_get_drvdata(to_spi_device(dev));
+
+ return regmap_write(st->regmap, LTC2983_STATUS_REG, LTC2983_SLEEP);
+}
+
+static SIMPLE_DEV_PM_OPS(ltc2983_pm_ops, ltc2983_suspend, ltc2983_resume);
+
+static const struct spi_device_id ltc2983_id_table[] = {
+ { "ltc2983" },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, ltc2983_id_table);
+
+static const struct of_device_id ltc2983_of_match[] = {
+ { .compatible = "adi,ltc2983" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltc2983_of_match);
+
+static struct spi_driver ltc2983_driver = {
+ .driver = {
+ .name = "ltc2983",
+ .of_match_table = ltc2983_of_match,
+ .pm = &ltc2983_pm_ops,
+ },
+ .probe = ltc2983_probe,
+ .id_table = ltc2983_id_table,
+};
+
+module_spi_driver(ltc2983_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices LTC2983 SPI Temperature sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index f184ba5601d9..73ed550e3fc9 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -284,6 +284,8 @@ static int max31856_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
indio_dev->info = &max31856_info;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = max31856_channels;
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 2ab68282d0b6..d1360605209c 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -194,7 +194,7 @@ static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
default:
*val = 250; /* 1000 * 0.25 */
ret = IIO_VAL_INT;
- };
+ }
break;
}
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index b44b1c322ec8..ade86388434f 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -83,7 +83,6 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
-source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/i40iw/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 09881bd5f12d..9a8871e21545 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,7 +11,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- nldev.o restrack.o counters.o
+ nldev.o restrack.o counters.o ib_core_uverbs.o
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 00fb3eacda19..d535995711c3 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -819,22 +819,16 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
struct ib_gid_table *table)
{
int i;
- bool deleted = false;
if (!table)
return;
mutex_lock(&table->lock);
for (i = 0; i < table->sz; ++i) {
- if (is_gid_entry_valid(table->data_vec[i])) {
+ if (is_gid_entry_valid(table->data_vec[i]))
del_gid(ib_dev, port, table, i);
- deleted = true;
- }
}
mutex_unlock(&table->lock);
-
- if (deleted)
- dispatch_gid_change_event(ib_dev, port);
}
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 5920c0085d35..455b3659d84b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,36 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/completion.h>
@@ -246,7 +220,7 @@ struct cm_work {
};
struct cm_timewait_info {
- struct cm_work work; /* Must be first. */
+ struct cm_work work;
struct list_head list;
struct rb_node remote_qp_node;
struct rb_node remote_id_node;
@@ -263,7 +237,7 @@ struct cm_id_private {
struct rb_node sidr_id_node;
spinlock_t lock; /* Do not acquire inside cm.lock */
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
* Protected by the cm.lock spinlock. */
int listen_sharecount;
@@ -308,7 +282,7 @@ static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
- if (atomic_dec_and_test(&cm_id_priv->refcount))
+ if (refcount_dec_and_test(&cm_id_priv->refcount))
complete(&cm_id_priv->comp);
}
@@ -365,7 +339,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
m->ah = ah;
m->retries = cm_id_priv->max_cm_retries;
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
@@ -626,7 +600,7 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
@@ -883,7 +857,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
INIT_LIST_HEAD(&cm_id_priv->prim_list);
INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
- atomic_set(&cm_id_priv->refcount, 1);
+ refcount_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
error:
@@ -1230,7 +1204,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
spin_unlock_irqrestore(&cm.lock, flags);
return ERR_PTR(-EINVAL);
}
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
++cm_id_priv->listen_sharecount;
spin_unlock_irqrestore(&cm.lock, flags);
@@ -1525,14 +1499,6 @@ static int cm_issue_rej(struct cm_port *port,
return ret;
}
-static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
- __be32 local_qpn, __be32 remote_qpn)
-{
- return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
- ((local_ca_guid == remote_ca_guid) &&
- (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
-}
-
static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
{
return ((req_msg->alt_local_lid) ||
@@ -1895,8 +1861,8 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
NULL, 0);
goto out;
}
- atomic_inc(&listen_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&listen_cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
cm_id_priv->id.state = IB_CM_REQ_RCVD;
atomic_inc(&cm_id_priv->work_count);
spin_unlock_irq(&cm.lock);
@@ -2052,7 +2018,7 @@ static int cm_req_handler(struct cm_work *work)
return 0;
rejected:
- atomic_dec(&cm_id_priv->refcount);
+ refcount_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
free_timeinfo:
kfree(cm_id_priv->timewait_info);
@@ -2826,7 +2792,7 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
cm_local_id(timewait_info->work.local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
@@ -3434,7 +3400,7 @@ static int cm_timewait_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
int ret;
- timewait_info = (struct cm_timewait_info *)work;
+ timewait_info = container_of(work, struct cm_timewait_info, work);
spin_lock_irq(&cm.lock);
list_del(&timewait_info->list);
spin_unlock_irq(&cm.lock);
@@ -3596,8 +3562,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
goto out; /* No match. */
}
- atomic_inc(&cur_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cur_cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 3d16d614aff6..92d7260ac913 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -1,37 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING the madirectory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use source and binary forms, with or
- * withmodification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retathe above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
-#if !defined(CM_MSGS_H)
+#ifndef CM_MSGS_H
#define CM_MSGS_H
#include <rdma/ib_mad.h>
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d78f67623f24..25f2b70fd8ef 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1,36 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
- * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
+ * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
* Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/completion.h>
@@ -2531,7 +2504,9 @@ EXPORT_SYMBOL(rdma_set_service_type);
* This function should be called before rdma_connect() on active side,
* and on passive side before rdma_accept(). It is applicable to primary
* path only. The timeout will affect the local side of the QP, it is not
- * negotiated with remote side and zero disables the timer.
+ * negotiated with remote side and zero disables the timer. In case it is
+ * set before rdma_resolve_route, the value will also be used to determine
+ * PacketLifeTime for RoCE.
*
* Return: 0 for success
*/
@@ -2828,22 +2803,65 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
return 0;
}
-static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
{
- int prio;
struct net_device *dev;
- prio = rt_tos2priority(tos);
- dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ dev = vlan_dev_real_dev(vlan_ndev);
if (dev->num_tc)
return netdev_get_prio_tc_map(dev, prio);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+struct iboe_prio_tc_map {
+ int input_prio;
+ int output_tc;
+ bool found;
+};
+
+static int get_lower_vlan_dev_tc(struct net_device *dev, void *data)
+{
+ struct iboe_prio_tc_map *map = data;
+
+ if (is_vlan_dev(dev))
+ map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
+ else if (dev->num_tc)
+ map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
+ else
+ map->output_tc = 0;
+ /* We are interested only in first level VLAN device, so always
+ * return 1 to stop iterating over next level devices.
+ */
+ map->found = true;
+ return 1;
+}
+
+static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+{
+ struct iboe_prio_tc_map prio_tc_map = {};
+ int prio = rt_tos2priority(tos);
+
+ /* If VLAN device, get it directly from the VLAN netdev */
if (is_vlan_dev(ndev))
- return (vlan_dev_get_egress_qos_mask(ndev, prio) &
- VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-#endif
- return 0;
+ return get_vlan_ndev_tc(ndev, prio);
+
+ prio_tc_map.input_prio = prio;
+ rcu_read_lock();
+ netdev_walk_all_lower_dev_rcu(ndev,
+ get_lower_vlan_dev_tc,
+ &prio_tc_map);
+ rcu_read_unlock();
+ /* If map is found from lower device, use it; Otherwise
+ * continue with the current netdevice to get priority to tc map.
+ */
+ if (prio_tc_map.found)
+ return prio_tc_map.output_tc;
+ else if (ndev->num_tc)
+ return netdev_get_prio_tc_map(ndev, prio);
+ else
+ return 0;
}
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
@@ -2897,7 +2915,16 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->path_rec->rate = iboe_get_rate(ndev);
dev_put(ndev);
route->path_rec->packet_life_time_selector = IB_SA_EQ;
- route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+ /* In case ACK timeout is set, use this value to calculate
+ * PacketLifeTime. As per IBTA 12.7.34,
+ * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
+ * Assuming a negligible local ACK delay, we can use
+ * PacketLifeTime = local ACK timeout/2
+ * as a reasonable approximation for RoCE networks.
+ */
+ route->path_rec->packet_life_time = id_priv->timeout_set ?
+ id_priv->timeout - 1 : CMA_IBOE_PACKET_LIFETIME;
+
if (!route->path_rec->mtu) {
ret = -EINVAL;
goto err2;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 9d07378b5b42..3645e092e1c7 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -388,4 +388,15 @@ int ib_device_set_netns_put(struct sk_buff *skb,
int rdma_nl_net_init(struct rdma_dev_net *rnet);
void rdma_nl_net_exit(struct rdma_dev_net *rnet);
+
+struct rdma_umap_priv {
+ struct vm_area_struct *vma;
+ struct list_head list;
+ struct rdma_user_mmap_entry *entry;
+};
+
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 680ad27f497d..8434ec082c3a 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -149,11 +149,18 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
struct auto_mode_param *param = &counter->mode.param;
bool match = true;
- if (!rdma_is_visible_in_pid_ns(&qp->res))
- return false;
-
- /* Ensure that counter belongs to the right PID */
- if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
+ /*
+ * Ensure that counter belongs to the right PID. This operation can
+ * race with user space which kills the process and leaves QP and
+ * counters orphans.
+ *
+ * It is not a big deal because exitted task will leave both QP and
+ * counter in the same bucket of zombie process. Just ensure that
+ * process is still alive before procedding.
+ *
+ */
+ if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task) ||
+ !task_pid_nr(qp->res.task))
return false;
if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -229,9 +236,6 @@ static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp,
rt = &dev->res[RDMA_RESTRACK_COUNTER];
xa_lock(&rt->xa);
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
counter = container_of(res, struct rdma_counter, res);
if ((counter->device != qp->device) || (counter->port != port))
goto next;
@@ -412,9 +416,6 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
if (IS_ERR(res))
return NULL;
- if (!rdma_is_visible_in_pid_ns(res))
- goto err;
-
qp = container_of(res, struct ib_qp, res);
if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
goto err;
@@ -445,11 +446,6 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
if (IS_ERR(res))
return NULL;
- if (!rdma_is_visible_in_pid_ns(res)) {
- rdma_restrack_put(res);
- return NULL;
- }
-
counter = container_of(res, struct rdma_counter, res);
kref_get(&counter->kref);
rdma_restrack_put(res);
@@ -463,10 +459,15 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
u32 qp_num, u32 counter_id)
{
+ struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
struct ib_qp *qp;
int ret;
+ port_counter = &dev->port_data[port].port_counter;
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
@@ -503,6 +504,7 @@ err:
int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
u32 qp_num, u32 *counter_id)
{
+ struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
struct ib_qp *qp;
int ret;
@@ -510,9 +512,13 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
if (!rdma_is_port_valid(dev, port))
return -EINVAL;
- if (!dev->port_data[port].port_counter.hstats)
+ port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
return -EOPNOTSUPP;
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 50a92442c4f7..84dd74fe13b8 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -128,17 +128,14 @@ module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
MODULE_PARM_DESC(netns_mode,
"Share device among net namespaces; default=1 (shared)");
/**
- * rdma_dev_access_netns() - Return whether a rdma device can be accessed
+ * rdma_dev_access_netns() - Return whether an rdma device can be accessed
* from a specified net namespace or not.
- * @device: Pointer to rdma device which needs to be checked
+ * @dev: Pointer to rdma device which needs to be checked
* @net: Pointer to net namesapce for which access to be checked
*
- * rdma_dev_access_netns() - Return whether a rdma device can be accessed
- * from a specified net namespace or not. When
- * rdma device is in shared mode, it ignores the
- * net namespace. When rdma device is exclusive
- * to a net namespace, rdma device net namespace is
- * checked against the specified one.
+ * When the rdma device is in shared mode, it ignores the net namespace.
+ * When the rdma device is exclusive to a net namespace, rdma device net
+ * namespace is checked against the specified one.
*/
bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
{
@@ -1199,9 +1196,21 @@ static void setup_dma_device(struct ib_device *device)
WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
- /* Setup default max segment size for all IB devices */
- dma_set_max_seg_size(device->dma_device, SZ_2G);
+ if (!device->dev.dma_parms) {
+ if (parent) {
+ /*
+ * The caller did not provide DMA parameters, so
+ * 'parent' probably represents a PCI device. The PCI
+ * core sets the maximum segment size to 64
+ * KB. Increase this parameter to 2 GB.
+ */
+ device->dev.dma_parms = parent->dma_parms;
+ dma_set_max_seg_size(device->dma_device, SZ_2G);
+ } else {
+ WARN_ON_ONCE(true);
+ }
+ }
}
/*
@@ -1317,7 +1326,9 @@ out:
/**
* ib_register_device - Register an IB device with IB core
- * @device:Device to register
+ * @device: Device to register
+ * @name: unique string device name. This may include a '%' which will
+ * cause a unique index to be added to the passed device name.
*
* Low-level drivers use ib_register_device() to register their
* devices with the IB core. All registered clients will receive a
@@ -1444,7 +1455,7 @@ out:
/**
* ib_unregister_device - Unregister an IB device
- * @device: The device to unregister
+ * @ib_dev: The device to unregister
*
* Unregister an IB device. All clients will receive a remove callback.
*
@@ -1466,7 +1477,7 @@ EXPORT_SYMBOL(ib_unregister_device);
/**
* ib_unregister_device_and_put - Unregister a device while holding a 'get'
- * device: The device to unregister
+ * @ib_dev: The device to unregister
*
* This is the same as ib_unregister_device(), except it includes an internal
* ib_device_put() that should match a 'get' obtained by the caller.
@@ -1536,7 +1547,7 @@ static void ib_unregister_work(struct work_struct *work)
/**
* ib_unregister_device_queued - Unregister a device using a work queue
- * device: The device to unregister
+ * @ib_dev: The device to unregister
*
* This schedules an asynchronous unregistration using a WQ for the device. A
* driver should use this to avoid holding locks while doing unregistration,
@@ -2366,7 +2377,7 @@ int ib_modify_device(struct ib_device *device,
struct ib_device_modify *device_modify)
{
if (!device->ops.modify_device)
- return -ENOSYS;
+ return -EOPNOTSUPP;
return device->ops.modify_device(device, device_modify_mask,
device_modify);
@@ -2397,8 +2408,12 @@ int ib_modify_port(struct ib_device *device,
rc = device->ops.modify_port(device, port_num,
port_modify_mask,
port_modify);
+ else if (rdma_protocol_roce(device, port_num) &&
+ ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
+ (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
+ rc = 0;
else
- rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
+ rc = -EOPNOTSUPP;
return rc;
}
EXPORT_SYMBOL(ib_modify_port);
@@ -2607,6 +2622,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, drain_sq);
SET_DEVICE_OP(dev_ops, enable_driver);
SET_DEVICE_OP(dev_ops, fill_res_entry);
+ SET_DEVICE_OP(dev_ops, fill_stat_entry);
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
SET_DEVICE_OP(dev_ops, get_dma_mr);
SET_DEVICE_OP(dev_ops, get_hw_stats);
@@ -2615,9 +2631,9 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_port_immutable);
SET_DEVICE_OP(dev_ops, get_vector_affinity);
SET_DEVICE_OP(dev_ops, get_vf_config);
+ SET_DEVICE_OP(dev_ops, get_vf_guid);
SET_DEVICE_OP(dev_ops, get_vf_stats);
SET_DEVICE_OP(dev_ops, init_port);
- SET_DEVICE_OP(dev_ops, invalidate_range);
SET_DEVICE_OP(dev_ops, iw_accept);
SET_DEVICE_OP(dev_ops, iw_add_ref);
SET_DEVICE_OP(dev_ops, iw_connect);
@@ -2630,6 +2646,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
+ SET_DEVICE_OP(dev_ops, mmap_free);
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
new file mode 100644
index 000000000000..f509c478b469
--- /dev/null
+++ b/drivers/infiniband/core/ib_core_uverbs.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2019 Marvell. All rights reserved.
+ */
+#include <linux/xarray.h>
+#include "uverbs.h"
+#include "core_priv.h"
+
+/**
+ * rdma_umap_priv_init() - Initialize the private data of a vma
+ *
+ * @priv: The already allocated private data
+ * @vma: The vm area struct that needs private data
+ * @entry: entry into the mmap_xa that needs to be linked with
+ * this vma
+ *
+ * Each time we map IO memory into user space this keeps track of the
+ * mapping. When the device is hot-unplugged we 'zap' the mmaps in user space
+ * to point to the zero page and allow the hot unplug to proceed.
+ *
+ * This is necessary for cases like PCI physical hot unplug as the actual BAR
+ * memory may vanish after this and access to it from userspace could MCE.
+ *
+ * RDMA drivers supporting disassociation must have their user space designed
+ * to cope in some way with their IO pages going to the zero page.
+ *
+ */
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+
+ priv->vma = vma;
+ if (entry) {
+ kref_get(&entry->ref);
+ priv->entry = entry;
+ }
+ vma->vm_private_data = priv;
+ /* vm_ops is setup in ib_uverbs_mmap() to avoid module dependencies */
+
+ mutex_lock(&ufile->umap_lock);
+ list_add(&priv->list, &ufile->umaps);
+ mutex_unlock(&ufile->umap_lock);
+}
+EXPORT_SYMBOL(rdma_umap_priv_init);
+
+/**
+ * rdma_user_mmap_io() - Map IO memory into a process
+ *
+ * @ucontext: associated user context
+ * @vma: the vma related to the current mmap call
+ * @pfn: pfn to map
+ * @size: size to map
+ * @prot: pgprot to use in remap call
+ * @entry: mmap_entry retrieved from rdma_user_mmap_entry_get(), or NULL
+ * if mmap_entry is not used by the driver
+ *
+ * This is to be called by drivers as part of their mmap() functions if they
+ * wish to send something like PCI-E BAR memory to userspace.
+ *
+ * Return -EINVAL on wrong flags or size, -EAGAIN on failure to map. 0 on
+ * success.
+ */
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ struct rdma_umap_priv *priv;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start != size)
+ return -EINVAL;
+
+ /* Driver is using this wrong, must be called by ib_uverbs_mmap */
+ if (WARN_ON(!vma->vm_file ||
+ vma->vm_file->private_data != ufile))
+ return -EINVAL;
+ lockdep_assert_held(&ufile->device->disassociate_srcu);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ vma->vm_page_prot = prot;
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma, entry);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_io);
+
+/**
+ * rdma_user_mmap_entry_get_pgoff() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @pgoff: The mmap offset >> PAGE_SHIFT
+ *
+ * This function is called when a user tries to mmap with an offset (returned
+ * by rdma_user_mmap_get_offset()) it initially received from the driver. The
+ * rdma_user_mmap_entry was created by the function
+ * rdma_user_mmap_entry_insert(). This function increases the refcnt of the
+ * entry so that it won't be deleted from the xarray in the meantime.
+ *
+ * Return an reference to an entry if exists or NULL if there is no
+ * match. rdma_user_mmap_entry_put() must be called to put the reference.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
+ unsigned long pgoff)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (pgoff > U32_MAX)
+ return NULL;
+
+ xa_lock(&ucontext->mmap_xa);
+
+ entry = xa_load(&ucontext->mmap_xa, pgoff);
+
+ /*
+ * If refcount is zero, entry is already being deleted, driver_removed
+ * indicates that the no further mmaps are possible and we waiting for
+ * the active VMAs to be closed.
+ */
+ if (!entry || entry->start_pgoff != pgoff || entry->driver_removed ||
+ !kref_get_unless_zero(&entry->ref))
+ goto err;
+
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n",
+ pgoff, entry->npages);
+
+ return entry;
+
+err:
+ xa_unlock(&ucontext->mmap_xa);
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get_pgoff);
+
+/**
+ * rdma_user_mmap_entry_get() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @vma: the vma being mmap'd into
+ *
+ * This function is like rdma_user_mmap_entry_get_pgoff() except that it also
+ * checks that the VMA is correct.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return NULL;
+ entry = rdma_user_mmap_entry_get_pgoff(ucontext, vma->vm_pgoff);
+ if (!entry)
+ return NULL;
+ if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
+ rdma_user_mmap_entry_put(entry);
+ return NULL;
+ }
+ return entry;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get);
+
+static void rdma_user_mmap_entry_free(struct kref *kref)
+{
+ struct rdma_user_mmap_entry *entry =
+ container_of(kref, struct rdma_user_mmap_entry, ref);
+ struct ib_ucontext *ucontext = entry->ucontext;
+ unsigned long i;
+
+ /*
+ * Erase all entries occupied by this single entry, this is deferred
+ * until all VMA are closed so that the mmap offsets remain unique.
+ */
+ xa_lock(&ucontext->mmap_xa);
+ for (i = 0; i < entry->npages; i++)
+ __xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i);
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n",
+ entry->start_pgoff, entry->npages);
+
+ if (ucontext->device->ops.mmap_free)
+ ucontext->device->ops.mmap_free(entry);
+}
+
+/**
+ * rdma_user_mmap_entry_put() - Drop reference to the mmap entry
+ *
+ * @entry: an entry in the mmap_xa
+ *
+ * This function is called when the mapping is closed if it was
+ * an io mapping or when the driver is done with the entry for
+ * some other reason.
+ * Should be called after rdma_user_mmap_entry_get was called
+ * and entry is no longer needed. This function will erase the
+ * entry and free it if its refcnt reaches zero.
+ */
+void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry)
+{
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_put);
+
+/**
+ * rdma_user_mmap_entry_remove() - Drop reference to entry and
+ * mark it as unmmapable
+ *
+ * @entry: the entry to insert into the mmap_xa
+ *
+ * Drivers can call this to prevent userspace from creating more mappings for
+ * entry, however existing mmaps continue to exist and ops->mmap_free() will
+ * not be called until all user mmaps are destroyed.
+ */
+void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->driver_removed = true;
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
+
+/**
+ * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa
+ *
+ * @ucontext: associated user context.
+ * @entry: the entry to insert into the mmap_xa
+ * @length: length of the address that will be mmapped
+ *
+ * This function should be called by drivers that use the rdma_user_mmap
+ * interface for implementing their mmap syscall A database of mmap offsets is
+ * handled in the core and helper functions are provided to insert entries
+ * into the database and extract entries when the user calls mmap with the
+ * given offset. The function allocates a unique page offset that should be
+ * provided to user, the user will use the offset to retrieve information such
+ * as address to be mapped and how.
+ *
+ * Return: 0 on success and -ENOMEM on failure
+ */
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ XA_STATE(xas, &ucontext->mmap_xa, 0);
+ u32 xa_first, xa_last, npages;
+ int err;
+ u32 i;
+
+ if (!entry)
+ return -EINVAL;
+
+ kref_init(&entry->ref);
+ entry->ucontext = ucontext;
+
+ /*
+ * We want the whole allocation to be done without interruption from a
+ * different thread. The allocation requires finding a free range and
+ * storing. During the xa_insert the lock could be released, possibly
+ * allowing another thread to choose the same range.
+ */
+ mutex_lock(&ufile->umap_lock);
+
+ xa_lock(&ucontext->mmap_xa);
+
+ /* We want to find an empty range */
+ npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
+ entry->npages = npages;
+ while (true) {
+ /* First find an empty index */
+ xas_find_marked(&xas, U32_MAX, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART)
+ goto err_unlock;
+
+ xa_first = xas.xa_index;
+
+ /* Is there enough room to have the range? */
+ if (check_add_overflow(xa_first, npages, &xa_last))
+ goto err_unlock;
+
+ /*
+ * Now look for the next present entry. If an entry doesn't
+ * exist, we found an empty range and can proceed.
+ */
+ xas_next_entry(&xas, xa_last - 1);
+ if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last)
+ break;
+ }
+
+ for (i = xa_first; i < xa_last; i++) {
+ err = __xa_insert(&ucontext->mmap_xa, i, entry, GFP_KERNEL);
+ if (err)
+ goto err_undo;
+ }
+
+ /*
+ * Internally the kernel uses a page offset, in libc this is a byte
+ * offset. Drivers should not return pgoff to userspace.
+ */
+ entry->start_pgoff = xa_first;
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#x] inserted\n",
+ entry->start_pgoff, npages);
+
+ return 0;
+
+err_undo:
+ for (; i > xa_first; i--)
+ __xa_erase(&ucontext->mmap_xa, i - 1);
+
+err_unlock:
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 7e2bcc72f66c..1bf87d9fd0bd 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -210,8 +210,10 @@ int iwpm_mapinfo_available(void);
/**
* iwpm_compare_sockaddr - Compare two sockaddr storage structs
+ * @a_sockaddr: first sockaddr to compare
+ * @b_sockaddr: second sockaddr to compare
*
- * Returns 0 if they are holding the same ip/tcp address info,
+ * Return: 0 if they are holding the same ip/tcp address info,
* otherwise returns 1
*/
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
@@ -272,6 +274,7 @@ void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg);
* iwpm_send_hello - Send hello response to iwpmd
*
* @nl_client: The index of the netlink client
+ * @iwpm_pid: The pid of the user space port mapper
* @abi_version: The kernel's abi_version
*
* Returns 0 on success or a negative error code
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9947d16edef2..c54db13fa9b0 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -913,9 +913,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
/* No GRH for DR SMP */
ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
- (const struct ib_mad_hdr *)smp, mad_size,
- (struct ib_mad_hdr *)mad_priv->mad,
- &mad_size, &out_mad_pkey_index);
+ (const struct ib_mad *)smp,
+ (struct ib_mad *)mad_priv->mad, &mad_size,
+ &out_mad_pkey_index);
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
@@ -1397,25 +1397,6 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
}
EXPORT_SYMBOL(ib_free_recv_mad);
-struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
-{
- return ERR_PTR(-EINVAL); /* XXX: for now */
-}
-EXPORT_SYMBOL(ib_redirect_mad_qp);
-
-int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
- struct ib_wc *wc)
-{
- dev_err(&mad_agent->device->dev,
- "ib_process_mad_wc() not implemented yet\n");
- return 0;
-}
-EXPORT_SYMBOL(ib_process_mad_wc);
-
static int method_in_use(struct ib_mad_mgmt_method_table **method,
struct ib_mad_reg_req *mad_reg_req)
{
@@ -2340,9 +2321,9 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
if (port_priv->device->ops.process_mad) {
ret = port_priv->device->ops.process_mad(
port_priv->device, 0, port_priv->port_num, wc,
- &recv->grh, (const struct ib_mad_hdr *)recv->mad,
- recv->mad_size, (struct ib_mad_hdr *)response->mad,
- &mad_size, &resp_mad_pkey_index);
+ &recv->grh, (const struct ib_mad *)recv->mad,
+ (struct ib_mad *)response->mad, &mad_size,
+ &resp_mad_pkey_index);
if (opa)
wc->pkey_index = resp_mad_pkey_index;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index c03af08b80e7..cbf6041a5d4a 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -42,6 +42,9 @@
#include "cma_priv.h"
#include "restrack.h"
+typedef int (*res_fill_func_t)(struct sk_buff*, bool,
+ struct rdma_restrack_entry*, uint32_t);
+
/*
* Sort array elements by the netlink attribute name
*/
@@ -180,6 +183,19 @@ static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
return 0;
}
+int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
+ const char *str)
+{
+ if (put_driver_name_print_type(msg, name,
+ RDMA_NLDEV_PRINT_TYPE_UNSPEC))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
+ return -EMSGSIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_string);
+
int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
{
return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
@@ -399,20 +415,34 @@ err:
static int fill_res_name_pid(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
+ int err = 0;
+
/*
* For user resources, user is should read /proc/PID/comm to get the
* name of the task file.
*/
if (rdma_is_kernel_res(res)) {
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
- res->kern_name))
- return -EMSGSIZE;
+ err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
+ res->kern_name);
} else {
- if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
- task_pid_vnr(res->task)))
- return -EMSGSIZE;
+ pid_t pid;
+
+ pid = task_pid_vnr(res->task);
+ /*
+ * Task is dead and in zombie state.
+ * There is no need to print PID anymore.
+ */
+ if (pid)
+ /*
+ * This part is racy, task can be killed and PID will
+ * be zero right here but it is ok, next query won't
+ * return PID. We don't promise real-time reflection
+ * of SW objects.
+ */
+ err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
}
- return 0;
+
+ return err ? -EMSGSIZE : 0;
}
static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
@@ -423,6 +453,14 @@ static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
return dev->ops.fill_res_entry(msg, res);
}
+static bool fill_stat_entry(struct ib_device *dev, struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (!dev->ops.fill_stat_entry)
+ return false;
+ return dev->ops.fill_stat_entry(msg, res);
+}
+
static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
@@ -698,9 +736,6 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
rt = &counter->device->res[RDMA_RESTRACK_QP];
xa_lock(&rt->xa);
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
qp = container_of(res, struct ib_qp, res);
if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
continue;
@@ -723,8 +758,8 @@ err:
return ret;
}
-static int fill_stat_hwcounter_entry(struct sk_buff *msg,
- const char *name, u64 value)
+int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
+ u64 value)
{
struct nlattr *entry_attr;
@@ -746,6 +781,25 @@ err:
nla_nest_cancel(msg, entry_attr);
return -EMSGSIZE;
}
+EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
+
+static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_mr *mr = container_of(res, struct ib_mr, res);
+ struct ib_device *dev = mr->pd->device;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
+ goto err;
+
+ if (fill_stat_entry(dev, msg, res))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
static int fill_stat_counter_hwcounters(struct sk_buff *msg,
struct rdma_counter *counter)
@@ -759,7 +813,7 @@ static int fill_stat_counter_hwcounters(struct sk_buff *msg,
return -EMSGSIZE;
for (i = 0; i < st->num_counters; i++)
- if (fill_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
+ if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
goto err;
nla_nest_end(msg, table_attr);
@@ -1117,8 +1171,6 @@ static int nldev_res_get_dumpit(struct sk_buff *skb,
}
struct nldev_fill_res_entry {
- int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
- struct rdma_restrack_entry *res, u32 port);
enum rdma_nldev_attr nldev_attr;
enum rdma_nldev_command nldev_cmd;
u8 flags;
@@ -1132,21 +1184,18 @@ enum nldev_res_flags {
static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
[RDMA_RESTRACK_QP] = {
- .fill_res_func = fill_res_qp_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
.id = RDMA_NLDEV_ATTR_RES_LQPN,
},
[RDMA_RESTRACK_CM_ID] = {
- .fill_res_func = fill_res_cm_id_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
},
[RDMA_RESTRACK_CQ] = {
- .fill_res_func = fill_res_cq_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
.flags = NLDEV_PER_DEV,
@@ -1154,7 +1203,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_CQN,
},
[RDMA_RESTRACK_MR] = {
- .fill_res_func = fill_res_mr_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
.flags = NLDEV_PER_DEV,
@@ -1162,7 +1210,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_MRN,
},
[RDMA_RESTRACK_PD] = {
- .fill_res_func = fill_res_pd_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
.flags = NLDEV_PER_DEV,
@@ -1170,7 +1217,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_PDN,
},
[RDMA_RESTRACK_COUNTER] = {
- .fill_res_func = fill_res_counter_entry,
.nldev_cmd = RDMA_NLDEV_CMD_STAT_GET,
.nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
@@ -1180,7 +1226,8 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
- enum rdma_restrack_type res_type)
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
{
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
@@ -1222,11 +1269,6 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err;
}
- if (!rdma_is_visible_in_pid_ns(res)) {
- ret = -ENOENT;
- goto err_get;
- }
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
@@ -1243,7 +1285,9 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
}
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
- ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
+
+ ret = fill_func(msg, has_cap_net_admin, res, port);
+
rdma_restrack_put(res);
if (ret)
goto err_free;
@@ -1263,7 +1307,8 @@ err:
static int res_get_common_dumpit(struct sk_buff *skb,
struct netlink_callback *cb,
- enum rdma_restrack_type res_type)
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
{
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
@@ -1334,9 +1379,6 @@ static int res_get_common_dumpit(struct sk_buff *skb,
* objects.
*/
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
if (idx < start || !rdma_restrack_get(res))
goto next;
@@ -1351,7 +1393,8 @@ static int res_get_common_dumpit(struct sk_buff *skb,
goto msg_full;
}
- ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
+ ret = fill_func(skb, has_cap_net_admin, res, port);
+
rdma_restrack_put(res);
if (ret) {
@@ -1394,17 +1437,19 @@ err_index:
return ret;
}
-#define RES_GET_FUNCS(name, type) \
- static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
+#define RES_GET_FUNCS(name, type) \
+ static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
struct netlink_callback *cb) \
- { \
- return res_get_common_dumpit(skb, cb, type); \
- } \
- static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
- struct nlmsghdr *nlh, \
+ { \
+ return res_get_common_dumpit(skb, cb, type, \
+ fill_res_##name##_entry); \
+ } \
+ static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
+ struct nlmsghdr *nlh, \
struct netlink_ext_ack *extack) \
- { \
- return res_get_common_doit(skb, nlh, extack, type); \
+ { \
+ return res_get_common_doit(skb, nlh, extack, type, \
+ fill_res_##name##_entry); \
}
RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
@@ -1880,7 +1925,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
for (i = 0; i < num_cnts; i++) {
v = stats->value[i] +
rdma_counter_get_hwstat_value(device, port, i);
- if (fill_stat_hwcounter_entry(msg, stats->names[i], v)) {
+ if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
ret = -EMSGSIZE;
goto err_table;
}
@@ -1989,7 +2034,10 @@ static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
case RDMA_NLDEV_ATTR_RES_QP:
ret = stat_get_doit_qp(skb, nlh, extack, tb);
break;
-
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
default:
ret = -EINVAL;
break;
@@ -2013,7 +2061,10 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb,
case RDMA_NLDEV_ATTR_RES_QP:
ret = nldev_res_get_counter_dumpit(skb, cb);
break;
-
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index ccf4d069c25c..6c72773faf29 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -817,6 +817,7 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
rdma_restrack_del(&ucontext->res);
ib_dev->ops.dealloc_ucontext(ucontext);
+ WARN_ON(!xa_empty(&ucontext->mmap_xa));
kfree(ucontext);
ufile->ucontext = NULL;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index a07665f7ef8c..62fbb0ae9cb4 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -116,11 +116,8 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
u32 cnt = 0;
xa_lock(&rt->xa);
- xas_for_each(&xas, e, U32_MAX) {
- if (!rdma_is_visible_in_pid_ns(e))
- continue;
+ xas_for_each(&xas, e, U32_MAX)
cnt++;
- }
xa_unlock(&rt->xa);
return cnt;
}
@@ -346,18 +343,3 @@ out:
}
}
EXPORT_SYMBOL(rdma_restrack_del);
-
-bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
-{
- /*
- * 1. Kern resources should be visible in init
- * namespace only
- * 2. Present only resources visible in the current
- * namespace
- */
- if (rdma_is_kernel_res(res))
- return task_active_pid_ns(current) == &init_pid_ns;
-
- /* PID 0 means that resource is not found in current namespace */
- return task_pid_vnr(res->task);
-}
diff --git a/drivers/infiniband/core/restrack.h b/drivers/infiniband/core/restrack.h
index 7bd177cc0a61..d084e5f89849 100644
--- a/drivers/infiniband/core/restrack.h
+++ b/drivers/infiniband/core/restrack.h
@@ -27,5 +27,4 @@ int rdma_restrack_init(struct ib_device *dev);
void rdma_restrack_clean(struct ib_device *dev);
void rdma_restrack_attach_task(struct rdma_restrack_entry *res,
struct task_struct *task);
-bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res);
#endif /* _RDMA_CORE_RESTRACK_H_ */
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 5337393d4dfe..4fad732f9b3c 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -20,14 +20,17 @@ module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
/*
- * Check if the device might use memory registration. This is currently only
- * true for iWarp devices. In the future we can hopefully fine tune this based
- * on HCA driver input.
+ * Report whether memory registration should be used. Memory registration must
+ * be used for iWarp devices because of iWARP-specific limitations. Memory
+ * registration is also enabled if registering memory might yield better
+ * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
*/
static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
{
if (rdma_protocol_iwarp(dev, port_num))
return true;
+ if (dev->attrs.max_sgl_rd)
+ return true;
if (unlikely(rdma_rw_force_mr))
return true;
return false;
@@ -35,17 +38,19 @@ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
/*
* Check if the device will use memory registration for this RW operation.
- * We currently always use memory registrations for iWarp RDMA READs, and
- * have a debug option to force usage of MRs.
- *
- * XXX: In the future we can hopefully fine tune this based on HCA driver
- * input.
+ * For RDMA READs we must use MRs on iWarp and can optionally use them as an
+ * optimization otherwise. Additionally we have a debug option to force usage
+ * of MRs to help testing this code path.
*/
static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
enum dma_data_direction dir, int dma_nents)
{
- if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE)
- return true;
+ if (dir == DMA_FROM_DEVICE) {
+ if (rdma_protocol_iwarp(dev, port_num))
+ return true;
+ if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
+ return true;
+ }
if (unlikely(rdma_rw_force_mr))
return true;
return false;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 17fc2936c077..8917125ea16d 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1246,7 +1246,7 @@ static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
* @port_num: Port on the specified device.
* @rec: path record entry to use for ah attributes initialization.
* @ah_attr: address handle attributes to initialization from path record.
- * @sgid_attr: SGID attribute to consider during initialization.
+ * @gid_attr: SGID attribute to consider during initialization.
*
* When ib_init_ah_attr_from_path() returns success,
* (a) for IB link layer it optionally contains a reference to SGID attribute
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7a50cedcef1f..087682e6969e 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -481,8 +481,8 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (!dev->ops.process_mad)
return -ENOSYS;
- in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
- out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kzalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad) {
ret = -ENOMEM;
goto out;
@@ -497,10 +497,8 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (attr != IB_PMA_CLASS_PORT_INFO)
in_mad->data[41] = port_num; /* PortSelect field */
- if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY,
- port_num, NULL, NULL,
- (const struct ib_mad_hdr *)in_mad, mad_size,
- (struct ib_mad_hdr *)out_mad, &mad_size,
+ if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, port_num, NULL, NULL,
+ in_mad, out_mad, &mad_size,
&out_mad_pkey_index) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
@@ -1268,7 +1266,7 @@ static ssize_t node_desc_store(struct device *device,
int ret;
if (!dev->ops.modify_device)
- return -EIO;
+ return -EOPNOTSUPP;
memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 24244a2f68cc..7a3b99597ead 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -185,10 +185,9 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
- * @dmasync: flush in-flight DMA when the memory region is written
*/
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access, int dmasync)
+ size_t size, int access)
{
struct ib_ucontext *context;
struct ib_umem *umem;
@@ -199,7 +198,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct mm_struct *mm;
unsigned long npages;
int ret;
- unsigned long dma_attrs = 0;
struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE;
@@ -211,9 +209,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
if (!context)
return ERR_PTR(-EIO);
- if (dmasync)
- dma_attrs |= DMA_ATTR_WRITE_BARRIER;
-
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
@@ -294,11 +289,10 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
sg_mark_end(sg);
- umem->nmap = ib_dma_map_sg_attrs(context->device,
+ umem->nmap = ib_dma_map_sg(context->device,
umem->sg_head.sgl,
umem->sg_nents,
- DMA_BIDIRECTIONAL,
- dma_attrs);
+ DMA_BIDIRECTIONAL);
if (!umem->nmap) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 163ff7ba92b7..e42d44e501fd 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -48,197 +48,33 @@
#include "uverbs.h"
-static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp)
+static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ const struct mmu_interval_notifier_ops *ops)
{
- mutex_lock(&umem_odp->umem_mutex);
- if (umem_odp->notifiers_count++ == 0)
- /*
- * Initialize the completion object for waiting on
- * notifiers. Since notifier_count is zero, no one should be
- * waiting right now.
- */
- reinit_completion(&umem_odp->notifier_completion);
- mutex_unlock(&umem_odp->umem_mutex);
-}
-
-static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
-{
- mutex_lock(&umem_odp->umem_mutex);
- /*
- * This sequence increase will notify the QP page fault that the page
- * that is going to be mapped in the spte could have been freed.
- */
- ++umem_odp->notifiers_seq;
- if (--umem_odp->notifiers_count == 0)
- complete_all(&umem_odp->notifier_completion);
- mutex_unlock(&umem_odp->umem_mutex);
-}
-
-static void ib_umem_notifier_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct ib_ucontext_per_mm *per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
- struct rb_node *node;
-
- down_read(&per_mm->umem_rwsem);
- if (!per_mm->mn.users)
- goto out;
-
- for (node = rb_first_cached(&per_mm->umem_tree); node;
- node = rb_next(node)) {
- struct ib_umem_odp *umem_odp =
- rb_entry(node, struct ib_umem_odp, interval_tree.rb);
-
- /*
- * Increase the number of notifiers running, to prevent any
- * further fault handling on this MR.
- */
- ib_umem_notifier_start_account(umem_odp);
- complete_all(&umem_odp->notifier_completion);
- umem_odp->umem.ibdev->ops.invalidate_range(
- umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- }
-
-out:
- up_read(&per_mm->umem_rwsem);
-}
-
-static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
- u64 start, u64 end, void *cookie)
-{
- ib_umem_notifier_start_account(item);
- item->umem.ibdev->ops.invalidate_range(item, start, end);
- return 0;
-}
-
-static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct ib_ucontext_per_mm *per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
- int rc;
-
- if (mmu_notifier_range_blockable(range))
- down_read(&per_mm->umem_rwsem);
- else if (!down_read_trylock(&per_mm->umem_rwsem))
- return -EAGAIN;
-
- if (!per_mm->mn.users) {
- up_read(&per_mm->umem_rwsem);
- /*
- * At this point users is permanently zero and visible to this
- * CPU without a lock, that fact is relied on to skip the unlock
- * in range_end.
- */
- return 0;
- }
-
- rc = rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
- range->end,
- invalidate_range_start_trampoline,
- mmu_notifier_range_blockable(range),
- NULL);
- if (rc)
- up_read(&per_mm->umem_rwsem);
- return rc;
-}
-
-static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
- u64 end, void *cookie)
-{
- ib_umem_notifier_end_account(item);
- return 0;
-}
-
-static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct ib_ucontext_per_mm *per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
-
- if (unlikely(!per_mm->mn.users))
- return;
-
- rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
- range->end,
- invalidate_range_end_trampoline, true, NULL);
- up_read(&per_mm->umem_rwsem);
-}
-
-static struct mmu_notifier *ib_umem_alloc_notifier(struct mm_struct *mm)
-{
- struct ib_ucontext_per_mm *per_mm;
-
- per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL);
- if (!per_mm)
- return ERR_PTR(-ENOMEM);
-
- per_mm->umem_tree = RB_ROOT_CACHED;
- init_rwsem(&per_mm->umem_rwsem);
-
- WARN_ON(mm != current->mm);
- rcu_read_lock();
- per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- return &per_mm->mn;
-}
-
-static void ib_umem_free_notifier(struct mmu_notifier *mn)
-{
- struct ib_ucontext_per_mm *per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
-
- WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
-
- put_pid(per_mm->tgid);
- kfree(per_mm);
-}
-
-static const struct mmu_notifier_ops ib_umem_notifiers = {
- .release = ib_umem_notifier_release,
- .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
- .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
- .alloc_notifier = ib_umem_alloc_notifier,
- .free_notifier = ib_umem_free_notifier,
-};
-
-static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp)
-{
- struct ib_ucontext_per_mm *per_mm;
- struct mmu_notifier *mn;
int ret;
umem_odp->umem.is_odp = 1;
+ mutex_init(&umem_odp->umem_mutex);
+
if (!umem_odp->is_implicit_odp) {
size_t page_size = 1UL << umem_odp->page_shift;
+ unsigned long start;
+ unsigned long end;
size_t pages;
- umem_odp->interval_tree.start =
- ALIGN_DOWN(umem_odp->umem.address, page_size);
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
if (check_add_overflow(umem_odp->umem.address,
(unsigned long)umem_odp->umem.length,
- &umem_odp->interval_tree.last))
+ &end))
return -EOVERFLOW;
- umem_odp->interval_tree.last =
- ALIGN(umem_odp->interval_tree.last, page_size);
- if (unlikely(umem_odp->interval_tree.last < page_size))
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
return -EOVERFLOW;
- pages = (umem_odp->interval_tree.last -
- umem_odp->interval_tree.start) >>
- umem_odp->page_shift;
+ pages = (end - start) >> umem_odp->page_shift;
if (!pages)
return -EINVAL;
- /*
- * Note that the representation of the intervals in the
- * interval tree considers the ending point as contained in
- * the interval.
- */
- umem_odp->interval_tree.last--;
-
umem_odp->page_list = kvcalloc(
pages, sizeof(*umem_odp->page_list), GFP_KERNEL);
if (!umem_odp->page_list)
@@ -250,26 +86,13 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp)
ret = -ENOMEM;
goto out_page_list;
}
- }
- mn = mmu_notifier_get(&ib_umem_notifiers, umem_odp->umem.owning_mm);
- if (IS_ERR(mn)) {
- ret = PTR_ERR(mn);
- goto out_dma_list;
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm,
+ start, end - start, ops);
+ if (ret)
+ goto out_dma_list;
}
- umem_odp->per_mm = per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
-
- mutex_init(&umem_odp->umem_mutex);
- init_completion(&umem_odp->notifier_completion);
-
- if (!umem_odp->is_implicit_odp) {
- down_write(&per_mm->umem_rwsem);
- interval_tree_insert(&umem_odp->interval_tree,
- &per_mm->umem_tree);
- up_write(&per_mm->umem_rwsem);
- }
- mmgrab(umem_odp->umem.owning_mm);
return 0;
@@ -305,8 +128,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
if (!context)
return ERR_PTR(-EIO);
- if (WARN_ON_ONCE(!context->device->ops.invalidate_range))
- return ERR_PTR(-EINVAL);
umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
if (!umem_odp)
@@ -318,8 +139,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
umem_odp->is_implicit_odp = 1;
umem_odp->page_shift = PAGE_SHIFT;
- ret = ib_init_umem_odp(umem_odp);
+ umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ ret = ib_init_umem_odp(umem_odp, NULL);
if (ret) {
+ put_pid(umem_odp->tgid);
kfree(umem_odp);
return ERR_PTR(ret);
}
@@ -336,8 +159,10 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
* @addr: The starting userspace VA
* @size: The length of the userspace VA
*/
-struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root,
- unsigned long addr, size_t size)
+struct ib_umem_odp *
+ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
+ size_t size,
+ const struct mmu_interval_notifier_ops *ops)
{
/*
* Caller must ensure that root cannot be freed during the call to
@@ -360,9 +185,12 @@ struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root,
umem->writable = root->umem.writable;
umem->owning_mm = root->umem.owning_mm;
odp_data->page_shift = PAGE_SHIFT;
+ odp_data->notifier.ops = ops;
- ret = ib_init_umem_odp(odp_data);
+ odp_data->tgid = get_pid(root->tgid);
+ ret = ib_init_umem_odp(odp_data, ops);
if (ret) {
+ put_pid(odp_data->tgid);
kfree(odp_data);
return ERR_PTR(ret);
}
@@ -383,7 +211,8 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
* conjunction with MMU notifiers.
*/
struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access)
+ size_t size, int access,
+ const struct mmu_interval_notifier_ops *ops)
{
struct ib_umem_odp *umem_odp;
struct ib_ucontext *context;
@@ -398,8 +227,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
if (!context)
return ERR_PTR(-EIO);
- if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)) ||
- WARN_ON_ONCE(!context->device->ops.invalidate_range))
+ if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
return ERR_PTR(-EINVAL);
umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
@@ -411,6 +239,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
umem_odp->umem.address = addr;
umem_odp->umem.writable = ib_access_writable(access);
umem_odp->umem.owning_mm = mm = current->mm;
+ umem_odp->notifier.ops = ops;
umem_odp->page_shift = PAGE_SHIFT;
if (access & IB_ACCESS_HUGETLB) {
@@ -429,11 +258,14 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
up_read(&mm->mmap_sem);
}
- ret = ib_init_umem_odp(umem_odp);
+ umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ ret = ib_init_umem_odp(umem_odp, ops);
if (ret)
- goto err_free;
+ goto err_put_pid;
return umem_odp;
+err_put_pid:
+ put_pid(umem_odp->tgid);
err_free:
kfree(umem_odp);
return ERR_PTR(ret);
@@ -442,8 +274,6 @@ EXPORT_SYMBOL(ib_umem_odp_get);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
-
/*
* Ensure that no more pages are mapped in the umem.
*
@@ -455,28 +285,11 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
mutex_unlock(&umem_odp->umem_mutex);
+ mmu_interval_notifier_remove(&umem_odp->notifier);
kvfree(umem_odp->dma_list);
kvfree(umem_odp->page_list);
+ put_pid(umem_odp->tgid);
}
-
- down_write(&per_mm->umem_rwsem);
- if (!umem_odp->is_implicit_odp) {
- interval_tree_remove(&umem_odp->interval_tree,
- &per_mm->umem_tree);
- complete_all(&umem_odp->notifier_completion);
- }
- /*
- * NOTE! mmu_notifier_unregister() can happen between a start/end
- * callback, resulting in a missing end, and thus an unbalanced
- * lock. This doesn't really matter to us since we are about to kfree
- * the memory that holds the lock, however LOCKDEP doesn't like this.
- * Thus we call the mmu_notifier_put under the rwsem and test the
- * internal users count to reliably see if we are past this point.
- */
- mmu_notifier_put(&per_mm->mn);
- up_write(&per_mm->umem_rwsem);
-
- mmdrop(umem_odp->umem.owning_mm);
kfree(umem_odp);
}
EXPORT_SYMBOL(ib_umem_odp_release);
@@ -501,22 +314,16 @@ EXPORT_SYMBOL(ib_umem_odp_release);
*/
static int ib_umem_odp_map_dma_single_page(
struct ib_umem_odp *umem_odp,
- int page_index,
+ unsigned int page_index,
struct page *page,
u64 access_mask,
unsigned long current_seq)
{
struct ib_device *dev = umem_odp->umem.ibdev;
dma_addr_t dma_addr;
- int remove_existing_mapping = 0;
int ret = 0;
- /*
- * Note: we avoid writing if seq is different from the initial seq, to
- * handle case of a racing notifier. This check also allows us to bail
- * early if we have a notifier running in parallel with us.
- */
- if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) {
+ if (mmu_interval_check_retry(&umem_odp->notifier, current_seq)) {
ret = -EAGAIN;
goto out;
}
@@ -534,28 +341,29 @@ static int ib_umem_odp_map_dma_single_page(
} else if (umem_odp->page_list[page_index] == page) {
umem_odp->dma_list[page_index] |= access_mask;
} else {
- pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
- umem_odp->page_list[page_index], page);
- /* Better remove the mapping now, to prevent any further
- * damage. */
- remove_existing_mapping = 1;
+ /*
+ * This is a race here where we could have done:
+ *
+ * CPU0 CPU1
+ * get_user_pages()
+ * invalidate()
+ * page_fault()
+ * mutex_lock(umem_mutex)
+ * page from GUP != page in ODP
+ *
+ * It should be prevented by the retry test above as reading
+ * the seq number should be reliable under the
+ * umem_mutex. Thus something is really not working right if
+ * things get here.
+ */
+ WARN(true,
+ "Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
+ umem_odp->page_list[page_index], page);
+ ret = -EAGAIN;
}
out:
put_user_page(page);
-
- if (remove_existing_mapping) {
- ib_umem_notifier_start_account(umem_odp);
- dev->ops.invalidate_range(
- umem_odp,
- ib_umem_start(umem_odp) +
- (page_index << umem_odp->page_shift),
- ib_umem_start(umem_odp) +
- ((page_index + 1) << umem_odp->page_shift));
- ib_umem_notifier_end_account(umem_odp);
- ret = -EAGAIN;
- }
-
return ret;
}
@@ -618,7 +426,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
* existing beyond the lifetime of the originating process.. Presumably
* mmget_not_zero will fail in this case.
*/
- owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
+ owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID);
if (!owning_process || !mmget_not_zero(owning_mm)) {
ret = -EINVAL;
goto out_put_task;
@@ -762,32 +570,3 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
}
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
-
-/* @last is not a part of the interval. See comment for function
- * node_last.
- */
-int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
- u64 start, u64 last,
- umem_call_back cb,
- bool blockable,
- void *cookie)
-{
- int ret_val = 0;
- struct interval_tree_node *node, *next;
- struct ib_umem_odp *umem;
-
- if (unlikely(start == last))
- return ret_val;
-
- for (node = interval_tree_iter_first(root, start, last - 1);
- node; node = next) {
- /* TODO move the blockable decision up to the callback */
- if (!blockable)
- return -EAGAIN;
- next = interval_tree_iter_next(node, start, last - 1);
- umem = container_of(node, struct ib_umem_odp, interval_tree);
- ret_val = cb(umem, start, last, cookie) || ret_val;
- }
-
- return ret_val;
-}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 14a80fd9f464..06ed32c8662f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -252,6 +252,8 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
ucontext->closing = false;
ucontext->cleanup_retryable = false;
+ xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
+
ret = get_unused_fd_flags(O_CLOEXEC);
if (ret < 0)
goto err_free;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 61758201d9b2..269938f59d3f 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -795,6 +795,9 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
{
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
if (size < attr->ptr_attr.len) {
if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
attr->ptr_attr.len - size))
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index db98111b47f4..970d8e31dd65 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -772,6 +772,8 @@ out_unlock:
return (ret) ? : count;
}
+static const struct vm_operations_struct rdma_umap_ops;
+
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ib_uverbs_file *file = filp->private_data;
@@ -785,7 +787,7 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
ret = PTR_ERR(ucontext);
goto out;
}
-
+ vma->vm_ops = &rdma_umap_ops;
ret = ucontext->device->ops.mmap(ucontext, vma);
out:
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
@@ -793,38 +795,6 @@ out:
}
/*
- * Each time we map IO memory into user space this keeps track of the mapping.
- * When the device is hot-unplugged we 'zap' the mmaps in user space to point
- * to the zero page and allow the hot unplug to proceed.
- *
- * This is necessary for cases like PCI physical hot unplug as the actual BAR
- * memory may vanish after this and access to it from userspace could MCE.
- *
- * RDMA drivers supporting disassociation must have their user space designed
- * to cope in some way with their IO pages going to the zero page.
- */
-struct rdma_umap_priv {
- struct vm_area_struct *vma;
- struct list_head list;
-};
-
-static const struct vm_operations_struct rdma_umap_ops;
-
-static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
- struct vm_area_struct *vma)
-{
- struct ib_uverbs_file *ufile = vma->vm_file->private_data;
-
- priv->vma = vma;
- vma->vm_private_data = priv;
- vma->vm_ops = &rdma_umap_ops;
-
- mutex_lock(&ufile->umap_lock);
- list_add(&priv->list, &ufile->umaps);
- mutex_unlock(&ufile->umap_lock);
-}
-
-/*
* The VMA has been dup'd, initialize the vm_private_data with a new tracking
* struct
*/
@@ -849,7 +819,7 @@ static void rdma_umap_open(struct vm_area_struct *vma)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto out_unlock;
- rdma_umap_priv_init(priv, vma);
+ rdma_umap_priv_init(priv, vma, opriv->entry);
up_read(&ufile->hw_destroy_rwsem);
return;
@@ -880,6 +850,9 @@ static void rdma_umap_close(struct vm_area_struct *vma)
* this point.
*/
mutex_lock(&ufile->umap_lock);
+ if (priv->entry)
+ rdma_user_mmap_entry_put(priv->entry);
+
list_del(&priv->list);
mutex_unlock(&ufile->umap_lock);
kfree(priv);
@@ -931,44 +904,6 @@ static const struct vm_operations_struct rdma_umap_ops = {
.fault = rdma_umap_fault,
};
-/*
- * Map IO memory into a process. This is to be called by drivers as part of
- * their mmap() functions if they wish to send something like PCI-E BAR memory
- * to userspace.
- */
-int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- struct ib_uverbs_file *ufile = ucontext->ufile;
- struct rdma_umap_priv *priv;
-
- if (!(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- if (vma->vm_end - vma->vm_start != size)
- return -EINVAL;
-
- /* Driver is using this wrong, must be called by ib_uverbs_mmap */
- if (WARN_ON(!vma->vm_file ||
- vma->vm_file->private_data != ufile))
- return -EINVAL;
- lockdep_assert_held(&ufile->device->disassociate_srcu);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- vma->vm_page_prot = prot;
- if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
- kfree(priv);
- return -EAGAIN;
- }
-
- rdma_umap_priv_init(priv, vma);
- return 0;
-}
-EXPORT_SYMBOL(rdma_user_mmap_io);
-
void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
{
struct rdma_umap_priv *priv, *next_priv;
@@ -1018,6 +953,11 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
zap_vma_ptes(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
+
+ if (priv->entry) {
+ rdma_user_mmap_entry_put(priv->entry);
+ priv->entry = NULL;
+ }
}
mutex_unlock(&ufile->umap_lock);
skip_mm:
@@ -1139,7 +1079,7 @@ static const struct file_operations uverbs_fops = {
.release = ib_uverbs_close,
.llseek = no_llseek,
.unlocked_ioctl = ib_uverbs_ioctl,
- .compat_ioctl = ib_uverbs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static const struct file_operations uverbs_mmap_fops = {
@@ -1150,7 +1090,7 @@ static const struct file_operations uverbs_mmap_fops = {
.release = ib_uverbs_close,
.llseek = no_llseek,
.unlocked_ioctl = ib_uverbs_ioctl,
- .compat_ioctl = ib_uverbs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static int ib_uverbs_get_nl_info(struct ib_device *ibdev, void *client_data,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 35c2841a569e..dd765e176cdd 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -244,6 +244,8 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
/**
* ib_alloc_pd - Allocates an unused protection domain.
* @device: The device on which to allocate the protection domain.
+ * @flags: protection domain flags
+ * @caller: caller's build-time module name
*
* A protection domain object provides an association between QPs, shared
* receive queues, address handles, memory regions, and memory windows.
@@ -2459,6 +2461,16 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
}
EXPORT_SYMBOL(ib_set_vf_guid);
+int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ if (!device->ops.get_vf_guid)
+ return -EOPNOTSUPP;
+
+ return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
+}
+EXPORT_SYMBOL(ib_get_vf_guid);
/**
* ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
* information) and set an appropriate memory region for registration.
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 433fca59febd..0aeccd984889 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
-obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index ab8779d23382..b83f1cc38c52 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_BNXT_RE
- tristate "Broadcom Netxtreme HCA support"
- depends on 64BIT
- depends on ETHERNET && NETDEVICES && PCI && INET && DCB
- select NET_VENDOR_BROADCOM
- select BNXT
- ---help---
+ tristate "Broadcom Netxtreme HCA support"
+ depends on 64BIT
+ depends on ETHERNET && NETDEVICES && PCI && INET && DCB
+ select NET_VENDOR_BROADCOM
+ select BNXT
+ ---help---
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
RoCE HCAs. To compile this driver as a module, choose M here:
the module will be called bnxt_re.
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index e55a1666c0cd..725b2350e349 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -108,6 +108,7 @@ struct bnxt_re_sqp_entries {
#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
+#define BNXT_RE_GEN_P5_MAX_VF 64
struct bnxt_re_dev {
struct ib_device ibdev;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index b4149dc9e824..9b6ca15a183c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -191,24 +191,6 @@ int bnxt_re_query_device(struct ib_device *ibdev,
return 0;
}
-int bnxt_re_modify_device(struct ib_device *ibdev,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- switch (device_modify_mask) {
- case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
- /* Modify the GUID requires the modification of the GID table */
- /* GUID should be made as READ-ONLY */
- break;
- case IB_DEVICE_MODIFY_NODE_DESC:
- /* Node Desc should be made as READ-ONLY */
- break;
- default:
- break;
- }
- return 0;
-}
-
/* Port */
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr)
@@ -855,7 +837,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -869,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.qprva, bytes,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
@@ -1322,7 +1304,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -2565,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->umem = ib_umem_get(udata, req.cq_va,
entries * sizeof(struct cq_base),
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
rc = PTR_ERR(cq->umem);
goto fail;
@@ -3530,7 +3512,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
- umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 31662b1ee35a..23d972da5652 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -145,9 +145,6 @@ struct bnxt_re_ucontext {
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
-int bnxt_re_modify_device(struct ib_device *ibdev,
- int device_modify_mask,
- struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 30a54f8aa42c..e7e8a0f49464 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -119,61 +119,76 @@ static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
* reserved for the function. The driver may choose to allocate fewer
* resources than the firmware maximum.
*/
-static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
{
- u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
- u32 i;
- u32 vf_pct;
- u32 num_vfs;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *attr;
+ struct bnxt_qplib_ctx *ctx;
+ int i;
- rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
- dev_attr->max_qp);
+ attr = &rdev->dev_attr;
+ ctx = &rdev->qplib_ctx;
- rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ attr->max_qp);
+ ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
/* Use max_mr from fw since max_mrw does not get set */
- rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
- dev_attr->max_mr);
- rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
- dev_attr->max_srq);
- rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
- dev_attr->max_cq);
-
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
-
- if (rdev->num_vfs) {
- /*
- * Reserve a set of resources for the PF. Divide the remaining
- * resources among the VFs
- */
- vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
- num_vfs = 100 * rdev->num_vfs;
- vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
- vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
- vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
- /*
- * The driver allows many more MRs than other resources. If the
- * firmware does also, then reserve a fixed amount for the PF
- * and divide the rest among VFs. VFs may use many MRs for NFS
- * mounts, ISER, NVME applications, etc. If the firmware
- * severely restricts the number of MRs, then let PF have
- * half and divide the rest among VFs, as for the other
- * resource types.
- */
- if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
- vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
- else
- vf_mrws = (rdev->qplib_ctx.mrw_count -
- BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
- vf_gids = BNXT_RE_MAX_GID_PER_VF;
+ ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
+ ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ attr->max_srq);
+ ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_count[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+}
+
+static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
+{
+ struct bnxt_qplib_vf_res *vf_res;
+ u32 mrws = 0;
+ u32 vf_pct;
+ u32 nvfs;
+
+ vf_res = &qplib_ctx->vf_res;
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ nvfs = num_vf;
+ num_vf = 100 * num_vf;
+ vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
+ vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
+ vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF and
+ * divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware severely
+ * restricts the number of MRs, then let PF have half and divide
+ * the rest among VFs, as for the other resource types.
+ */
+ if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
+ mrws = qplib_ctx->mrw_count * vf_pct;
+ nvfs = num_vf;
+ } else {
+ mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
}
- rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
- rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
- rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
- rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
- rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
+ vf_res->max_mrw_per_vf = (mrws / nvfs);
+ vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
+}
+
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 num_vfs;
+
+ memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
+ bnxt_re_limit_pf_res(rdev);
+
+ num_vfs = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
+ if (num_vfs)
+ bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
}
/* for handling bnxt_en callbacks later */
@@ -193,9 +208,11 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
return;
rdev->num_vfs = num_vfs;
- bnxt_re_set_resource_limits(rdev);
- bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) {
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
+ }
}
static void bnxt_re_shutdown(void *p)
@@ -477,6 +494,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
@@ -625,7 +643,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.map_mr_sg = bnxt_re_map_mr_sg,
.mmap = bnxt_re_mmap,
.modify_ah = bnxt_re_modify_ah,
- .modify_device = bnxt_re_modify_device,
.modify_qp = bnxt_re_modify_qp,
.modify_srq = bnxt_re_modify_srq,
.poll_cq = bnxt_re_poll_cq,
@@ -895,10 +912,14 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
return 0;
}
+#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
+#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
{
return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
- 0x10000 : rdev->msix_entries[indx].db_offset;
+ (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
+ BNXT_RE_GEN_P5_PF_NQ_DB) :
+ rdev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
@@ -1270,10 +1291,10 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
return;
}
rdev->qplib_ctx.hwrm_intf_ver =
- (u64)resp.hwrm_intf_major << 48 |
- (u64)resp.hwrm_intf_minor << 32 |
- (u64)resp.hwrm_intf_build << 16 |
- resp.hwrm_intf_patch;
+ (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
+ (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
+ (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
+ le16_to_cpu(resp.hwrm_intf_patch);
}
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
@@ -1408,8 +1429,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
rdev->is_virtfn);
if (rc)
goto disable_rcfw;
- if (!rdev->is_virtfn)
- bnxt_re_set_resource_limits(rdev);
+
+ bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0,
bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx));
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 60c8f76aab33..5cdfa84faf85 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -494,8 +494,10 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
* shall setup this area for VF. Skipping the
* HW programming
*/
- if (is_virtfn || bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ if (is_virtfn)
goto skip_ctx_setup;
+ if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ goto config_vf_res;
level = ctx->qpc_tbl.level;
req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
@@ -540,6 +542,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
+config_vf_res:
req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index fbda11a7ab1a..aaa76d792185 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -186,7 +186,9 @@ struct bnxt_qplib_chip_ctx {
u8 chip_metal;
};
-#define CHIP_NUM_57500 0x1750
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
struct bnxt_qplib_res {
struct pci_dev *pdev;
@@ -203,7 +205,9 @@ struct bnxt_qplib_res {
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
{
- return (cctx->chip_num == CHIP_NUM_57500);
+ return (cctx->chip_num == CHIP_NUM_57508 ||
+ cctx->chip_num == CHIP_NUM_57504 ||
+ cctx->chip_num == CHIP_NUM_57502);
}
static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
deleted file mode 100644
index 8c1a72bff447..000000000000
--- a/drivers/infiniband/hw/cxgb3/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_CXGB3
- tristate "Chelsio RDMA Driver"
- depends on CHELSIO_T3
- select GENERIC_ALLOCATOR
- ---help---
- This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
- 10GbE adapters.
-
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
-
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
-
- Please send feedback to <linux-bugs@chelsio.com>.
-
- To compile this driver as a module, choose M here: the module
- will be called iw_cxgb3.
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
deleted file mode 100644
index 34bb86a6ae3a..000000000000
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb3
-
-obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
-
-iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
- iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
deleted file mode 100644
index 95b22a651673..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ /dev/null
@@ -1,1312 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <asm/delay.h>
-
-#include <linux/mutex.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-#include "sge_defs.h"
-
-static LIST_HEAD(rdev_list);
-static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (!strcmp(rdev->dev_name, dev_name))
- return rdev;
- return NULL;
-}
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (rdev->t3cdev_p == tdev)
- return rdev;
- return NULL;
-}
-
-int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit)
-{
- int ret;
- struct t3_cqe *cqe;
- u32 rptr;
-
- struct rdma_cq_op setup;
- setup.id = cq->cqid;
- setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
- setup.op = op;
- ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
-
- if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
- return ret;
-
- /*
- * If the rearm returned an index other than our current index,
- * then there might be CQE's in flight (being DMA'd). We must wait
- * here for them to complete or the consumer can miss a notification.
- */
- if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
- int i=0;
-
- rptr = cq->rptr;
-
- /*
- * Keep the generation correct by bumping rptr until it
- * matches the index returned by the rearm - 1.
- */
- while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
- rptr++;
-
- /*
- * Now rptr is the index for the (last) cqe that was
- * in-flight at the time the HW rearmed the CQ. We
- * spin until that CQE is valid.
- */
- cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
- while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
- udelay(1);
- if (i++ > 1000000) {
- pr_err("%s: stalled rnic\n", rdev_p->dev_name);
- return -EIO;
- }
- }
-
- return 1;
- }
-
- return 0;
-}
-
-static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
-{
- struct rdma_cq_setup setup;
- setup.id = cqid;
- setup.base_addr = 0; /* NULL address */
- setup.size = 0; /* disaable the CQ */
- setup.credits = 0;
- setup.credit_thres = 0;
- setup.ovfl_mode = 0;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
-{
- u64 sge_cmd;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- pr_debug("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
- T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
- T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = qpid << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
-{
- struct rdma_cq_setup setup;
- int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
-
- size += 1; /* one extra page for storing cq-in-err state */
- cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
- if (!cq->cqid)
- return -ENOMEM;
- if (kernel) {
- cq->sw_queue = kzalloc(size, GFP_KERNEL);
- if (!cq->sw_queue)
- return -ENOMEM;
- }
- cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
- &(cq->dma_addr), GFP_KERNEL);
- if (!cq->queue) {
- kfree(cq->sw_queue);
- return -ENOMEM;
- }
- dma_unmap_addr_set(cq, mapping, cq->dma_addr);
- setup.id = cq->cqid;
- setup.base_addr = (u64) (cq->dma_addr);
- setup.size = 1UL << cq->size_log2;
- setup.credits = 65535;
- setup.credit_thres = 1;
- if (rdev_p->t3cdev_p->type != T3A)
- setup.ovfl_mode = 0;
- else
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
- u32 qpid;
- int i;
-
- mutex_lock(&uctx->lock);
- if (!list_empty(&uctx->qpids)) {
- entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
- entry);
- list_del(&entry->entry);
- qpid = entry->qpid;
- kfree(entry);
- } else {
- qpid = cxio_hal_get_qpid(rdev_p->rscp);
- if (!qpid)
- goto out;
- for (i = qpid+1; i & rdev_p->qpmask; i++) {
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- break;
- entry->qpid = i;
- list_add_tail(&entry->entry, &uctx->qpids);
- }
- }
-out:
- mutex_unlock(&uctx->lock);
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
- struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return;
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- entry->qpid = qpid;
- mutex_lock(&uctx->lock);
- list_add_tail(&entry->entry, &uctx->qpids);
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct list_head *pos, *nxt;
- struct cxio_qpid_list *entry;
-
- mutex_lock(&uctx->lock);
- list_for_each_safe(pos, nxt, &uctx->qpids) {
- entry = list_entry(pos, struct cxio_qpid_list, entry);
- list_del_init(&entry->entry);
- if (!(entry->qpid & rdev_p->qpmask))
- cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
- kfree(entry);
- }
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- INIT_LIST_HEAD(&uctx->qpids);
- mutex_init(&uctx->lock);
-}
-
-int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
- struct t3_wq *wq, struct cxio_ucontext *uctx)
-{
- int depth = 1UL << wq->size_log2;
- int rqsize = 1UL << wq->rq_size_log2;
-
- wq->qpid = get_qpid(rdev_p, uctx);
- if (!wq->qpid)
- return -ENOMEM;
-
- wq->rq = kcalloc(depth, sizeof(struct t3_swrq), GFP_KERNEL);
- if (!wq->rq)
- goto err1;
-
- wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
- if (!wq->rq_addr)
- goto err2;
-
- wq->sq = kcalloc(depth, sizeof(struct t3_swsq), GFP_KERNEL);
- if (!wq->sq)
- goto err3;
-
- wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- depth * sizeof(union t3_wr),
- &(wq->dma_addr), GFP_KERNEL);
- if (!wq->queue)
- goto err4;
-
- dma_unmap_addr_set(wq, mapping, wq->dma_addr);
- wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
- if (!kernel_domain)
- wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
- (wq->qpid << rdev_p->qpshift);
- wq->rdev = rdev_p;
- pr_debug("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n",
- __func__, wq->qpid, wq->doorbell, (unsigned long long)wq->udb);
- return 0;
-err4:
- kfree(wq->sq);
-err3:
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
-err2:
- kfree(wq->rq);
-err1:
- put_qpid(rdev_p, wq->qpid, uctx);
- return -ENOMEM;
-}
-
-void cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
-{
- cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
- kfree(cq->sw_queue);
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (cq->size_log2))
- * sizeof(struct t3_cqe) + 1, cq->queue,
- dma_unmap_addr(cq, mapping));
- cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
-}
-
-int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
- struct cxio_ucontext *uctx)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (wq->size_log2))
- * sizeof(union t3_wr), wq->queue,
- dma_unmap_addr(wq, mapping));
- kfree(wq->sq);
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
- kfree(wq->rq);
- put_qpid(rdev_p, wq->qpid, uctx);
- return 0;
-}
-
-static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_cqe cqe;
-
- pr_debug("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(T3_SEND) |
- V_CQE_TYPE(0) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- u32 ptr;
- int flushed = 0;
-
- pr_debug("%s wq %p cq %p\n", __func__, wq, cq);
-
- /* flush RQ */
- pr_debug("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
- wq->rq_rptr, wq->rq_wptr, count);
- ptr = wq->rq_rptr + count;
- while (ptr++ != wq->rq_wptr) {
- insert_recv_cqe(wq, cq);
- flushed++;
- }
- return flushed;
-}
-
-static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
- struct t3_swsq *sqp)
-{
- struct t3_cqe cqe;
-
- pr_debug("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(sqp->opcode) |
- V_CQE_TYPE(1) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- cqe.u.scqe.wrid_hi = sqp->sq_wptr;
-
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- __u32 ptr = wq->sq_rptr + count;
- int flushed = 0;
- struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
-
- while (ptr != wq->sq_wptr) {
- sqp->signaled = 0;
- insert_sq_cqe(wq, cq, sqp);
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- flushed++;
- }
- return flushed;
-}
-
-/*
- * Move all CQEs from the HWCQ into the SWCQ.
- */
-void cxio_flush_hw_cq(struct t3_cq *cq)
-{
- struct t3_cqe *cqe, *swcqe;
-
- pr_debug("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
- cqe = cxio_next_hw_cqe(cq);
- while (cqe) {
- pr_debug("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
- __func__, cq->rptr, cq->sw_wptr);
- swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
- *swcqe = *cqe;
- swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
- cq->sw_wptr++;
- cq->rptr++;
- cqe = cxio_next_hw_cqe(cq);
- }
-}
-
-static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
-{
- if (CQE_OPCODE(*cqe) == T3_TERMINATE)
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
- return 0;
-
- if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
- return 0;
-
- return 1;
-}
-
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if ((SQ_TYPE(*cqe) ||
- ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
- (CQE_QPID(*cqe) == wq->qpid))
- (*count)++;
- ptr++;
- }
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- pr_debug("%s count zero %d\n", __func__, *count);
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
- (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
- (*count)++;
- ptr++;
- }
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
-{
- struct rdma_cq_setup setup;
- setup.id = 0;
- setup.base_addr = 0; /* NULL address */
- setup.size = 1; /* enable the CQ */
- setup.credits = 0;
-
- /* force SGE to redirect to RspQ and interrupt */
- setup.credit_thres = 0;
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- int err;
- u64 sge_cmd, ctx0, ctx1;
- u64 base_addr;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb;
-
- skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- pr_debug("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- err = cxio_hal_init_ctrl_cq(rdev_p);
- if (err) {
- pr_debug("%s err %d initializing ctrl_cq\n", __func__, err);
- goto err;
- }
- rdev_p->ctrl_qp.workq = dma_alloc_coherent(
- &(rdev_p->rnic_info.pdev->dev),
- (1 << T3_CTRL_QP_SIZE_LOG2) *
- sizeof(union t3_wr),
- &(rdev_p->ctrl_qp.dma_addr),
- GFP_KERNEL);
- if (!rdev_p->ctrl_qp.workq) {
- pr_debug("%s dma_alloc_coherent failed\n", __func__);
- err = -ENOMEM;
- goto err;
- }
- dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
- rdev_p->ctrl_qp.dma_addr);
- rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
-
- mutex_init(&rdev_p->ctrl_qp.lock);
- init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
-
- /* update HW Ctrl QP context */
- base_addr = rdev_p->ctrl_qp.dma_addr;
- base_addr >>= 12;
- ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
- V_EC_BASE_LO((u32) base_addr & 0xffff));
- ctx0 <<= 32;
- ctx0 |= V_EC_CREDITS(FW_WR_NUM);
- base_addr >>= 16;
- ctx1 = (u32) base_addr;
- base_addr >>= 32;
- ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
- V_EC_TYPE(0) | V_EC_GEN(1) |
- V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
- wqe = skb_put_zero(skb, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
- T3_CTL_QP_TID, 7, T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- wqe->ctx1 = cpu_to_be64(ctx1);
- wqe->ctx0 = cpu_to_be64(ctx0);
- pr_debug("CtrlQP dma_addr %pad workq %p size %d\n",
- &rdev_p->ctrl_qp.dma_addr, rdev_p->ctrl_qp.workq,
- 1 << T3_CTRL_QP_SIZE_LOG2);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-err:
- kfree_skb(skb);
- return err;
-}
-
-static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << T3_CTRL_QP_SIZE_LOG2)
- * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
- dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
- return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
-}
-
-/* write len bytes of data into addr (32B aligned address)
- * If data is NULL, clear len byte of memory to zero.
- * caller acquires the ctrl_qp lock before the call
- */
-static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
- u32 len, void *data)
-{
- u32 i, nr_wqe, copy_len;
- u8 *copy_data;
- u8 wr_len, utx_len; /* length in 8 byte flit */
- enum t3_wr_flags flag;
- __be64 *wqe;
- u64 utx_cmd;
- addr &= 0x7FFFFFF;
- nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
- pr_debug("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
- __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
- nr_wqe, data, addr);
- utx_len = 3; /* in 32B unit */
- for (i = 0; i < nr_wqe; i++) {
- if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2)) {
- pr_debug("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, wait for more space i %d\n",
- __func__,
- rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- !Q_FULL(rdev_p->ctrl_qp.rptr,
- rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2))) {
- pr_debug("%s ctrl_qp workq interrupted\n",
- __func__);
- return -ERESTARTSYS;
- }
- pr_debug("%s ctrl_qp wakeup, continue posting work request i %d\n",
- __func__, i);
- }
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
- flag = 0;
- if (i == (nr_wqe - 1)) {
- /* last WQE */
- flag = T3_COMPLETION_FLAG;
- if (len % 32)
- utx_len = len / 32 + 1;
- else
- utx_len = len / 32;
- }
-
- /*
- * Force a CQE to return the credit to the workq in case
- * we posted more than half the max QP size of WRs
- */
- if ((i != 0) &&
- (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
- flag = T3_COMPLETION_FLAG;
- pr_debug("%s force completion at i %d\n", __func__, i);
- }
-
- /* build the utx mem command */
- wqe += (sizeof(struct t3_bypass_wr) >> 3);
- utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
- utx_cmd <<= 32;
- utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
- *wqe = cpu_to_be64(utx_cmd);
- wqe++;
- copy_data = (u8 *) data + i * 96;
- copy_len = len > 96 ? 96 : len;
-
- /* clear memory content if data is NULL */
- if (data)
- memcpy(wqe, copy_data, copy_len);
- else
- memset(wqe, 0, copy_len);
- if (copy_len % 32)
- memset(((u8 *) wqe) + copy_len, 0,
- 32 - (copy_len % 32));
- wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
- (utx_len << 2);
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
-
- /* wptr in the WRID[31:0] */
- ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
-
- /*
- * This must be the last write with a memory barrier
- * for the genbit
- */
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
- Q_GENBIT(rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
- wr_len, T3_SOPEOP);
- if (flag == T3_COMPLETION_FLAG)
- ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
- len -= 96;
- rdev_p->ctrl_qp.wptr++;
- }
- return 0;
-}
-
-/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
- * OUT: stag index
- * TBD: shared memory region support
- */
-static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
- u32 *stag, u8 stag_state, u32 pdid,
- enum tpt_mem_type type, enum tpt_mem_perm perm,
- u32 zbva, u64 to, u32 len, u8 page_size,
- u32 pbl_size, u32 pbl_addr)
-{
- int err;
- struct tpt_entry tpt;
- u32 stag_idx;
- u32 wptr;
-
- if (cxio_fatal_error(rdev_p))
- return -EIO;
-
- stag_state = stag_state > 0;
- stag_idx = (*stag) >> 8;
-
- if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
- stag_idx = cxio_hal_get_stag(rdev_p->rscp);
- if (!stag_idx)
- return -ENOMEM;
- *stag = (stag_idx << 8) | ((*stag) & 0xFF);
- }
- pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
-
- /* write TPT entry */
- if (reset_tpt_entry)
- memset(&tpt, 0, sizeof(tpt));
- else {
- tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
- V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
- V_TPT_STAG_STATE(stag_state) |
- V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
- BUG_ON(page_size >= 28);
- tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
- ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
- V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
- V_TPT_PAGE_SIZE(page_size));
- tpt.rsvd_pbl_addr = cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
- tpt.len = cpu_to_be32(len);
- tpt.va_hi = cpu_to_be32((u32) (to >> 32));
- tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
- tpt.rsvd_bind_cnt_or_pstag = 0;
- tpt.rsvd_pbl_size = cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
- }
- err = cxio_hal_ctrl_qp_write_mem(rdev_p,
- stag_idx +
- (rdev_p->rnic_info.tpt_base >> 5),
- sizeof(tpt), &tpt);
-
- /* release the stag index to free pool */
- if (reset_tpt_entry)
- cxio_hal_put_stag(rdev_p->rscp, stag_idx);
-
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (!err)
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
- return err;
-}
-
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size)
-{
- u32 wptr;
- int err;
-
- pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, pbl_addr, rdev_p->rnic_info.pbl_base,
- pbl_size);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
- err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
- pbl);
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (err)
- return err;
-
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
-
- return 0;
-}
-
-int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
- u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- pbl_size, pbl_addr);
-}
-
-int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
- 0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
-}
-
-int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
-{
- struct t3_rdma_init_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
- pr_debug("%s rdev_p %p\n", __func__, rdev_p);
- wqe = __skb_put(skb, sizeof(*wqe));
- wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
- wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
- V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
- wqe->wrid.id1 = 0;
- wqe->qpid = cpu_to_be32(attr->qpid);
- wqe->pdid = cpu_to_be32(attr->pdid);
- wqe->scqid = cpu_to_be32(attr->scqid);
- wqe->rcqid = cpu_to_be32(attr->rcqid);
- wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
- wqe->rq_size = cpu_to_be32(attr->rq_size);
- wqe->mpaattrs = attr->mpaattrs;
- wqe->qpcaps = attr->qpcaps;
- wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
- wqe->rqe_count = cpu_to_be16(attr->rqe_count);
- wqe->flags_rtr_type = cpu_to_be16(attr->flags |
- V_RTR_TYPE(attr->rtr_type) |
- V_CHAN(attr->chan));
- wqe->ord = cpu_to_be32(attr->ord);
- wqe->ird = cpu_to_be32(attr->ird);
- wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
- wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
- wqe->irs = cpu_to_be32(attr->irs);
- skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = ev_cb;
-}
-
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = NULL;
-}
-
-static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
-{
- static int cnt;
- struct cxio_rdev *rdev_p = NULL;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- pr_debug("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x se %0x notify %0x cqbranch %0x creditth %0x\n",
- cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
- RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
- RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
- RSPQ_CREDIT_THRESH(rsp_msg));
- pr_debug("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
- rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
- if (!rdev_p) {
- pr_debug("%s called by t3cdev %p with null ulp\n", __func__,
- t3cdev_p);
- return 0;
- }
- if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
- rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
- wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
- dev_kfree_skb_irq(skb);
- } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
- dev_kfree_skb_irq(skb);
- else if (cxio_ev_cb)
- (*cxio_ev_cb) (rdev_p, skb);
- else
- dev_kfree_skb_irq(skb);
- cnt++;
- return 0;
-}
-
-/* Caller takes care of locking if needed */
-int cxio_rdev_open(struct cxio_rdev *rdev_p)
-{
- struct net_device *netdev_p = NULL;
- int err = 0;
- if (strlen(rdev_p->dev_name)) {
- if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
- return -EBUSY;
- }
- netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name);
- if (!netdev_p) {
- return -EINVAL;
- }
- dev_put(netdev_p);
- } else if (rdev_p->t3cdev_p) {
- if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
- return -EBUSY;
- }
- netdev_p = rdev_p->t3cdev_p->lldev;
- strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
- T3_MAX_DEV_NAME_LEN);
- } else {
- pr_debug("%s t3cdev_p or dev_name must be set\n", __func__);
- return -EINVAL;
- }
-
- list_add_tail(&rdev_p->entry, &rdev_list);
-
- pr_debug("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
- memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
- if (!rdev_p->t3cdev_p)
- rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
- rdev_p->t3cdev_p->ulp = (void *) rdev_p;
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
- &(rdev_p->fw_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
- pr_err("fatal firmware version mismatch: need version %u but adapter has version %u\n",
- CXIO_FW_MAJ,
- G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
- err = -EINVAL;
- goto err1;
- }
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
- &(rdev_p->rnic_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
- &(rdev_p->port_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
-
- /*
- * qpshift is the number of bits to shift the qpid left in order
- * to get the correct address of the doorbell for that qp.
- */
- cxio_init_ucontext(rdev_p, &rdev_p->uctx);
- rdev_p->qpshift = PAGE_SHIFT -
- ilog2(65536 >>
- ilog2(rdev_p->rnic_info.udbell_len >>
- PAGE_SHIFT));
- rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
- rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
- pr_debug("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
- __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
- rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
- rdev_p->rnic_info.pbl_base,
- rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
- rdev_p->rnic_info.rqt_top);
- pr_debug("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu qpnr %d qpmask 0x%x\n",
- rdev_p->rnic_info.udbell_len,
- rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
- rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
-
- err = cxio_hal_init_ctrl_qp(rdev_p);
- if (err) {
- pr_err("%s error %d initializing ctrl_qp\n", __func__, err);
- goto err1;
- }
- err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
- 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
- T3_MAX_NUM_PD);
- if (err) {
- pr_err("%s error %d initializing hal resources\n",
- __func__, err);
- goto err2;
- }
- err = cxio_hal_pblpool_create(rdev_p);
- if (err) {
- pr_err("%s error %d initializing pbl mem pool\n",
- __func__, err);
- goto err3;
- }
- err = cxio_hal_rqtpool_create(rdev_p);
- if (err) {
- pr_err("%s error %d initializing rqt mem pool\n",
- __func__, err);
- goto err4;
- }
- return 0;
-err4:
- cxio_hal_pblpool_destroy(rdev_p);
-err3:
- cxio_hal_destroy_resource(rdev_p->rscp);
-err2:
- cxio_hal_destroy_ctrl_qp(rdev_p);
-err1:
- rdev_p->t3cdev_p->ulp = NULL;
- list_del(&rdev_p->entry);
- return err;
-}
-
-void cxio_rdev_close(struct cxio_rdev *rdev_p)
-{
- if (rdev_p) {
- cxio_hal_pblpool_destroy(rdev_p);
- cxio_hal_rqtpool_destroy(rdev_p);
- list_del(&rdev_p->entry);
- cxio_hal_destroy_ctrl_qp(rdev_p);
- cxio_hal_destroy_resource(rdev_p->rscp);
- rdev_p->t3cdev_p->ulp = NULL;
- }
-}
-
-int __init cxio_hal_init(void)
-{
- if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
- return -ENOMEM;
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
- return 0;
-}
-
-void __exit cxio_hal_exit(void)
-{
- struct cxio_rdev *rdev, *tmp;
-
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
- list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
- cxio_rdev_close(rdev);
- cxio_hal_destroy_rhdl_resource();
-}
-
-static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_swsq *sqp;
- __u32 ptr = wq->sq_rptr;
- int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
-
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- while (count--)
- if (!sqp->signaled) {
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- } else if (sqp->complete) {
-
- /*
- * Insert this completed cqe into the swcq.
- */
- pr_debug("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
- __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
- Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
- sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
- = sqp->cqe;
- cq->sw_wptr++;
- sqp->signaled = 0;
- break;
- } else
- break;
-}
-
-static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
- struct t3_cqe *read_cqe)
-{
- read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
- read_cqe->len = wq->oldest_read->read_len;
- read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
- V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
- V_CQE_OPCODE(T3_READ_REQ) |
- V_CQE_TYPE(1));
-}
-
-/*
- * Return a ptr to the next read wr in the SWSQ or NULL.
- */
-static void advance_oldest_read(struct t3_wq *wq)
-{
-
- u32 rptr = wq->oldest_read - wq->sq + 1;
- u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
-
- while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
- wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
-
- if (wq->oldest_read->opcode == T3_READ_REQ)
- return;
- rptr++;
- }
- wq->oldest_read = NULL;
-}
-
-/*
- * cxio_poll_cq
- *
- * Caller must:
- * check the validity of the first CQE,
- * supply the wq assicated with the qpid.
- *
- * credit: cq credit to return to sge.
- * cqe_flushed: 1 iff the CQE is flushed.
- * cqe: copy of the polled CQE.
- *
- * return value:
- * 0 CQE returned,
- * -1 CQE skipped, try again.
- */
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit)
-{
- int ret = 0;
- struct t3_cqe *hw_cqe, read_cqe;
-
- *cqe_flushed = 0;
- *credit = 0;
- hw_cqe = cxio_next_cqe(cq);
-
- pr_debug("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
- CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
- CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
- CQE_WRID_LOW(*hw_cqe));
-
- /*
- * skip cqe's not affiliated with a QP.
- */
- if (wq == NULL) {
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Gotta tweak READ completions:
- * 1) the cqe doesn't contain the sq_wptr from the wr.
- * 2) opcode not reflected from the wr.
- * 3) read_len not reflected from the wr.
- * 4) cq_type is RQ_TYPE not SQ_TYPE.
- */
- if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
-
- /*
- * If this is an unsolicited read response, then the read
- * was generated by the kernel driver as part of peer-2-peer
- * connection setup. So ignore the completion.
- */
- if (!wq->oldest_read) {
- if (CQE_STATUS(*hw_cqe))
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Don't write to the HWCQ, so create a new read req CQE
- * in local memory.
- */
- create_read_req_cqe(wq, hw_cqe, &read_cqe);
- hw_cqe = &read_cqe;
- advance_oldest_read(wq);
- }
-
- /*
- * T3A: Discard TERMINATE CQEs.
- */
- if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
- ret = -1;
- wq->error = 1;
- goto skip_cqe;
- }
-
- if (CQE_STATUS(*hw_cqe) || wq->error) {
- *cqe_flushed = wq->error;
- wq->error = 1;
-
- /*
- * T3A inserts errors into the CQE. We cannot return
- * these as work completions.
- */
- /* incoming write failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
- && RQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
- /* incoming read request failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
-
- /* incoming SEND with no receive posted failures */
- if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- ret = -1;
- goto skip_cqe;
- }
- BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
- goto proc_cqe;
- }
-
- /*
- * RECV completion.
- */
- if (RQ_TYPE(*hw_cqe)) {
-
- /*
- * HW only validates 4 bits of MSN. So we must validate that
- * the MSN in the SEND is the next expected MSN. If its not,
- * then we complete this with TPT_ERR_MSN and mark the wq in
- * error.
- */
-
- if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
- wq->error = 1;
- hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
- goto proc_cqe;
- }
- goto proc_cqe;
- }
-
- /*
- * If we get here its a send completion.
- *
- * Handle out of order completion. These get stuffed
- * in the SW SQ. Then the SW SQ is walked to move any
- * now in-order completions into the SW CQ. This handles
- * 2 cases:
- * 1) reaping unsignaled WRs when the first subsequent
- * signaled WR is completed.
- * 2) out of order read completions.
- */
- if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
- struct t3_swsq *sqp;
-
- pr_debug("%s out of order completion going in swsq at idx %ld\n",
- __func__,
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe),
- wq->sq_size_log2));
- sqp = wq->sq +
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
- sqp->cqe = *hw_cqe;
- sqp->complete = 1;
- ret = -1;
- goto flush_wq;
- }
-
-proc_cqe:
- *cqe = *hw_cqe;
-
- /*
- * Reap the associated WR(s) that are freed up with this
- * completion.
- */
- if (SQ_TYPE(*hw_cqe)) {
- wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
- pr_debug("%s completing sq idx %ld\n", __func__,
- Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
- *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id;
- wq->sq_rptr++;
- } else {
- pr_debug("%s completing rq idx %ld\n", __func__,
- Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
- *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id;
- if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr)
- cxio_hal_pblpool_free(wq->rdev,
- wq->rq[Q_PTR2IDX(wq->rq_rptr,
- wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
- BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
- wq->rq_rptr++;
- }
-
-flush_wq:
- /*
- * Flush any completed cqes that are now in-order.
- */
- flush_completed_wrs(wq, cq);
-
-skip_cqe:
- if (SW_CQE(*hw_cqe)) {
- pr_debug("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->sw_rptr);
- ++cq->sw_rptr;
- } else {
- pr_debug("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->rptr);
- ++cq->rptr;
-
- /*
- * T3A: compute credits.
- */
- if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
- || ((cq->rptr - cq->wptr) >= 128)) {
- *credit = cq->rptr - cq->wptr;
- cq->wptr = cq->rptr;
- }
- }
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
deleted file mode 100644
index 40c029ffa425..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_HAL_H__
-#define __CXIO_HAL_H__
-
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/kfifo.h>
-
-#include "t3_cpl.h"
-#include "t3cdev.h"
-#include "cxgb3_ctl_defs.h"
-#include "cxio_wr.h"
-
-#define T3_CTRL_QP_ID FW_RI_SGEEC_START
-#define T3_CTL_QP_TID FW_RI_TID_START
-#define T3_CTRL_QP_SIZE_LOG2 8
-#define T3_CTRL_CQ_ID 0
-
-#define T3_MAX_NUM_RI (1<<15)
-#define T3_MAX_NUM_QP (1<<15)
-#define T3_MAX_NUM_CQ (1<<15)
-#define T3_MAX_NUM_PD (1<<15)
-#define T3_MAX_PBL_SIZE 256
-#define T3_MAX_RQ_SIZE 1024
-#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 65536
-#define T3_MAX_NUM_STAG (1<<15)
-#define T3_MAX_MR_SIZE 0x100000000ULL
-#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
-
-#define T3_STAG_UNSET 0xffffffff
-
-#define T3_MAX_DEV_NAME_LEN 32
-
-#define CXIO_FW_MAJ 7
-
-struct cxio_hal_ctrl_qp {
- u32 wptr;
- u32 rptr;
- struct mutex lock; /* for the wtpr, can sleep */
- wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
- union t3_wr *workq; /* the work request queue */
- dma_addr_t dma_addr; /* pci bus address of the workq */
- DEFINE_DMA_UNMAP_ADDR(mapping);
- void __iomem *doorbell;
-};
-
-struct cxio_hal_resource {
- struct kfifo tpt_fifo;
- spinlock_t tpt_fifo_lock;
- struct kfifo qpid_fifo;
- spinlock_t qpid_fifo_lock;
- struct kfifo cqid_fifo;
- spinlock_t cqid_fifo_lock;
- struct kfifo pdid_fifo;
- spinlock_t pdid_fifo_lock;
-};
-
-struct cxio_qpid_list {
- struct list_head entry;
- u32 qpid;
-};
-
-struct cxio_ucontext {
- struct list_head qpids;
- struct mutex lock;
-};
-
-struct cxio_rdev {
- char dev_name[T3_MAX_DEV_NAME_LEN];
- struct t3cdev *t3cdev_p;
- struct rdma_info rnic_info;
- struct adap_ports port_info;
- struct cxio_hal_resource *rscp;
- struct cxio_hal_ctrl_qp ctrl_qp;
- void *ulp;
- unsigned long qpshift;
- u32 qpnr;
- u32 qpmask;
- struct cxio_ucontext uctx;
- struct gen_pool *pbl_pool;
- struct gen_pool *rqt_pool;
- struct list_head entry;
- struct ch_embedded_info fw_info;
- u32 flags;
-#define CXIO_ERROR_FATAL 1
-};
-
-static inline int cxio_fatal_error(struct cxio_rdev *rdev_p)
-{
- return rdev_p->flags & CXIO_ERROR_FATAL;
-}
-
-static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
-{
- return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
-}
-
-typedef void (*cxio_hal_ev_callback_func_t) (struct cxio_rdev * rdev_p,
- struct sk_buff * skb);
-
-#define RSPQ_CQID(rsp) (be32_to_cpu(rsp->cq_ptrid) & 0xffff)
-#define RSPQ_CQPTR(rsp) ((be32_to_cpu(rsp->cq_ptrid) >> 16) & 0xffff)
-#define RSPQ_GENBIT(rsp) ((be32_to_cpu(rsp->flags) >> 16) & 1)
-#define RSPQ_OVERFLOW(rsp) ((be32_to_cpu(rsp->flags) >> 17) & 1)
-#define RSPQ_AN(rsp) ((be32_to_cpu(rsp->flags) >> 18) & 1)
-#define RSPQ_SE(rsp) ((be32_to_cpu(rsp->flags) >> 19) & 1)
-#define RSPQ_NOTIFY(rsp) ((be32_to_cpu(rsp->flags) >> 20) & 1)
-#define RSPQ_CQBRANCH(rsp) ((be32_to_cpu(rsp->flags) >> 21) & 1)
-#define RSPQ_CREDIT_THRESH(rsp) ((be32_to_cpu(rsp->flags) >> 22) & 1)
-
-struct respQ_msg_t {
- __be32 flags; /* flit 0 */
- __be32 cq_ptrid;
- __be64 rsvd; /* flit 1 */
- struct t3_cqe cqe; /* flits 2-3 */
-};
-
-enum t3_cq_opcode {
- CQ_ARM_AN = 0x2,
- CQ_ARM_SE = 0x6,
- CQ_FORCE_AN = 0x3,
- CQ_CREDIT_UPDATE = 0x7
-};
-
-int cxio_rdev_open(struct cxio_rdev *rdev);
-void cxio_rdev_close(struct cxio_rdev *rdev);
-int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit);
-int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
-void cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
-void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-void cxio_init_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size);
-int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr);
-int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
-int cxio_allocate_stag(struct cxio_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr);
-int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
-int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
-int __init cxio_hal_init(void);
-void __exit cxio_hal_exit(void);
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_flush_hw_cq(struct t3_cq *cq);
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit);
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb);
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
deleted file mode 100644
index c6e7bc4420b6..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/* Crude resource management */
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-
-static struct kfifo rhdl_fifo;
-static spinlock_t rhdl_fifo_lock;
-
-#define RANDOM_SIZE 16
-
-static int __cxio_init_resource_fifo(struct kfifo *fifo,
- spinlock_t *fifo_lock,
- u32 nr, u32 skip_low,
- u32 skip_high,
- int random)
-{
- u32 i, j, entry = 0, idx;
- u32 random_bytes;
- u32 rarray[16];
- spin_lock_init(fifo_lock);
-
- if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 0; i < skip_low + skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
- if (random) {
- j = 0;
- random_bytes = prandom_u32();
- for (i = 0; i < RANDOM_SIZE; i++)
- rarray[i] = i + skip_low;
- for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
- if (j >= RANDOM_SIZE) {
- j = 0;
- random_bytes = prandom_u32();
- }
- idx = (random_bytes >> (j * 2)) & 0xF;
- kfifo_in(fifo,
- (unsigned char *) &rarray[idx],
- sizeof(u32));
- rarray[idx] = i;
- j++;
- }
- for (i = 0; i < RANDOM_SIZE; i++)
- kfifo_in(fifo,
- (unsigned char *) &rarray[i],
- sizeof(u32));
- } else
- for (i = skip_low; i < nr - skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
-
- for (i = 0; i < skip_low + skip_high; i++)
- if (kfifo_out_locked(fifo, (unsigned char *) &entry,
- sizeof(u32), fifo_lock) != sizeof(u32))
- break;
- return 0;
-}
-
-static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 0));
-}
-
-static int cxio_init_resource_fifo_random(struct kfifo *fifo,
- spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
-
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 1));
-}
-
-static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
-{
- u32 i;
-
- spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
-
- if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
- GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 16; i < T3_MAX_NUM_QP; i++)
- if (!(i & rdev_p->qpmask))
- kfifo_in(&rdev_p->rscp->qpid_fifo,
- (unsigned char *) &i, sizeof(u32));
- return 0;
-}
-
-int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
-{
- return cxio_init_resource_fifo(&rhdl_fifo, &rhdl_fifo_lock, nr_rhdl, 1,
- 0);
-}
-
-void cxio_hal_destroy_rhdl_resource(void)
-{
- kfifo_free(&rhdl_fifo);
-}
-
-/* nr_* must be power of 2 */
-int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid, u32 nr_pdid)
-{
- int err = 0;
- struct cxio_hal_resource *rscp;
-
- rscp = kmalloc(sizeof(*rscp), GFP_KERNEL);
- if (!rscp)
- return -ENOMEM;
- rdev_p->rscp = rscp;
- err = cxio_init_resource_fifo_random(&rscp->tpt_fifo,
- &rscp->tpt_fifo_lock,
- nr_tpt, 1, 0);
- if (err)
- goto tpt_err;
- err = cxio_init_qpid_fifo(rdev_p);
- if (err)
- goto qpid_err;
- err = cxio_init_resource_fifo(&rscp->cqid_fifo, &rscp->cqid_fifo_lock,
- nr_cqid, 1, 0);
- if (err)
- goto cqid_err;
- err = cxio_init_resource_fifo(&rscp->pdid_fifo, &rscp->pdid_fifo_lock,
- nr_pdid, 1, 0);
- if (err)
- goto pdid_err;
- return 0;
-pdid_err:
- kfifo_free(&rscp->cqid_fifo);
-cqid_err:
- kfifo_free(&rscp->qpid_fifo);
-qpid_err:
- kfifo_free(&rscp->tpt_fifo);
-tpt_err:
- return -ENOMEM;
-}
-
-/*
- * returns 0 if no resource available
- */
-static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock)
-{
- u32 entry;
- if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
- return entry;
- else
- return 0; /* fifo emptry */
-}
-
-static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock,
- u32 entry)
-{
- BUG_ON(
- kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)
- == 0);
-}
-
-u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock);
-}
-
-void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
-{
- cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag);
-}
-
-u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
-{
- u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo,
- &rscp->qpid_fifo_lock);
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
-{
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid);
-}
-
-u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock);
-}
-
-void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
-{
- cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid);
-}
-
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock);
-}
-
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
-{
- cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid);
-}
-
-void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
-{
- kfifo_free(&rscp->tpt_fifo);
- kfifo_free(&rscp->cqid_fifo);
- kfifo_free(&rscp->qpid_fifo);
- kfifo_free(&rscp->pdid_fifo);
- kfree(rscp);
-}
-
-/*
- * PBL Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
-
-u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
- return (u32)addr;
-}
-
-void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
- gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
-}
-
-int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned pbl_start, pbl_chunk;
-
- rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
- if (!rdev_p->pbl_pool)
- return -ENOMEM;
-
- pbl_start = rdev_p->rnic_info.pbl_base;
- pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
-
- while (pbl_start < rdev_p->rnic_info.pbl_top) {
- pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
- pbl_chunk);
- if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
- pr_debug("%s failed to add PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
- pr_warn("%s: Failed to add all PBL chunks (%x/%x)\n",
- __func__, pbl_start,
- rdev_p->rnic_info.pbl_top - pbl_start);
- return 0;
- }
- pbl_chunk >>= 1;
- } else {
- pr_debug("%s added PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- pbl_start += pbl_chunk;
- }
- }
-
- return 0;
-}
-
-void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->pbl_pool);
-}
-
-/*
- * RQT Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_RQT_SHIFT 10 /* 1KB == mini RQT size (16 entries) */
-#define RQT_CHUNK 2*1024*1024
-
-u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
- return (u32)addr;
-}
-
-void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
- gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
-}
-
-int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned long i;
- rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
- if (rdev_p->rqt_pool)
- for (i = rdev_p->rnic_info.rqt_base;
- i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
- i += RQT_CHUNK)
- gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
- return rdev_p->rqt_pool ? 0 : -ENOMEM;
-}
-
-void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->rqt_pool);
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.h b/drivers/infiniband/hw/cxgb3/cxio_resource.h
deleted file mode 100644
index a2703a3d882d..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_RESOURCE_H__
-#define __CXIO_RESOURCE_H__
-
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/genalloc.h>
-#include "cxio_hal.h"
-
-extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
-extern void cxio_hal_destroy_rhdl_resource(void);
-extern int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid,
- u32 nr_pdid);
-extern u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag);
-extern u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid);
-extern u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid);
-extern void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp);
-
-#define PBL_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.pbl_base )
-extern int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-
-#define RQT_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.rqt_base )
-extern int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
deleted file mode 100644
index 53aa5c36247a..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_WR_H__
-#define __CXIO_WR_H__
-
-#include <asm/io.h>
-#include <linux/pci.h>
-#include <linux/timer.h>
-#include "firmware_exports.h"
-
-#define T3_MAX_SGE 4
-#define T3_MAX_INLINE 64
-#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
-#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
-#define T3_STAG0_PAGE_SHIFT 15
-
-#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
-#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
- ((rptr)!=(wptr)) )
-#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
-#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
-#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
-#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
-
-static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)
-{
- writel(((1<<31) | qpid), doorbell);
-}
-
-#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
-
-enum t3_wr_flags {
- T3_COMPLETION_FLAG = 0x01,
- T3_NOTIFY_FLAG = 0x02,
- T3_SOLICITED_EVENT_FLAG = 0x04,
- T3_READ_FENCE_FLAG = 0x08,
- T3_LOCAL_FENCE_FLAG = 0x10
-} __packed;
-
-enum t3_wr_opcode {
- T3_WR_BP = FW_WROPCODE_RI_BYPASS,
- T3_WR_SEND = FW_WROPCODE_RI_SEND,
- T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
- T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
- T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
- T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
- T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
- T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
- T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
- T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
-} __packed;
-
-enum t3_rdma_opcode {
- T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
- T3_READ_REQ,
- T3_READ_RESP,
- T3_SEND,
- T3_SEND_WITH_INV,
- T3_SEND_WITH_SE,
- T3_SEND_WITH_SE_INV,
- T3_TERMINATE,
- T3_RDMA_INIT, /* CHELSIO RI specific ... */
- T3_BIND_MW,
- T3_FAST_REGISTER,
- T3_LOCAL_INV,
- T3_QP_MOD,
- T3_BYPASS,
- T3_RDMA_READ_REQ_WITH_INV,
-} __packed;
-
-static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
-{
- switch (wrop) {
- case T3_WR_BP: return T3_BYPASS;
- case T3_WR_SEND: return T3_SEND;
- case T3_WR_WRITE: return T3_RDMA_WRITE;
- case T3_WR_READ: return T3_READ_REQ;
- case T3_WR_INV_STAG: return T3_LOCAL_INV;
- case T3_WR_BIND: return T3_BIND_MW;
- case T3_WR_INIT: return T3_RDMA_INIT;
- case T3_WR_QP_MOD: return T3_QP_MOD;
- case T3_WR_FASTREG: return T3_FAST_REGISTER;
- default: break;
- }
- return -1;
-}
-
-
-/* Work request id */
-union t3_wrid {
- struct {
- u32 hi;
- u32 low;
- } id0;
- u64 id1;
-};
-
-#define WRID(wrid) (wrid.id1)
-#define WRID_GEN(wrid) (wrid.id0.wr_gen)
-#define WRID_IDX(wrid) (wrid.id0.wr_idx)
-#define WRID_LO(wrid) (wrid.id0.wr_lo)
-
-struct fw_riwrh {
- __be32 op_seop_flags;
- __be32 gen_tid_len;
-};
-
-#define S_FW_RIWR_OP 24
-#define M_FW_RIWR_OP 0xff
-#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
-#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
-
-#define S_FW_RIWR_SOPEOP 22
-#define M_FW_RIWR_SOPEOP 0x3
-#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
-
-#define S_FW_RIWR_FLAGS 8
-#define M_FW_RIWR_FLAGS 0x3fffff
-#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
-#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
-
-#define S_FW_RIWR_TID 8
-#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
-
-#define S_FW_RIWR_LEN 0
-#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
-
-#define S_FW_RIWR_GEN 31
-#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
-
-struct t3_sge {
- __be32 stag;
- __be32 len;
- __be64 to;
-};
-
-/* If num_sgle is zero, flit 5+ contains immediate data.*/
-struct t3_send_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
-
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 rem_stag;
- __be32 plen; /* 3 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
-};
-
-#define T3_MAX_FASTREG_DEPTH 10
-#define T3_MAX_FASTREG_FRAG 10
-
-struct t3_fastreg_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 len;
- __be32 va_base_hi; /* 3 */
- __be32 va_base_lo_fbo;
- __be32 page_type_perms; /* 4 */
- __be32 reserved1;
- __be64 pbl_addrs[0]; /* 5+ */
-};
-
-/*
- * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
- */
-struct t3_pbl_frag {
- struct fw_riwrh wrh; /* 0 */
- __be64 pbl_addrs[14]; /* 1..14 */
-};
-
-#define S_FR_PAGE_COUNT 24
-#define M_FR_PAGE_COUNT 0xff
-#define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
-#define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
-
-#define S_FR_PAGE_SIZE 16
-#define M_FR_PAGE_SIZE 0x1f
-#define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
-#define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
-
-#define S_FR_TYPE 8
-#define M_FR_TYPE 0x1
-#define V_FR_TYPE(x) ((x) << S_FR_TYPE)
-#define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
-
-#define S_FR_PERMS 0
-#define M_FR_PERMS 0xff
-#define V_FR_PERMS(x) ((x) << S_FR_PERMS)
-#define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
-
-struct t3_local_inv_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 reserved;
-};
-
-struct t3_rdma_write_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 stag_sink;
- __be64 to_sink; /* 3 */
- __be32 plen; /* 4 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
-};
-
-struct t3_rdma_read_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 local_inv;
- u8 reserved[2];
- __be32 rem_stag;
- __be64 rem_to; /* 3 */
- __be32 local_stag; /* 4 */
- __be32 local_len;
- __be64 local_to; /* 5 */
-};
-
-struct t3_bind_mw_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u16 reserved; /* 2 */
- u8 type;
- u8 perms;
- __be32 mr_stag;
- __be32 mw_stag; /* 3 */
- __be32 mw_len;
- __be64 mw_va; /* 4 */
- __be32 mr_pbl_addr; /* 5 */
- u8 reserved2[3];
- u8 mr_pagesz;
-};
-
-struct t3_receive_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 pagesz[T3_MAX_SGE];
- __be32 num_sgle; /* 2 */
- struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
- __be32 pbl_addr[T3_MAX_SGE];
-};
-
-struct t3_bypass_wr {
- struct fw_riwrh wrh;
- union t3_wrid wrid; /* 1 */
-};
-
-struct t3_modify_qp_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 flags; /* 2 */
- __be32 quiesce; /* 2 */
- __be32 max_ird; /* 3 */
- __be32 max_ord; /* 3 */
- __be64 sge_cmd; /* 4 */
- __be64 ctx1; /* 5 */
- __be64 ctx0; /* 6 */
-};
-
-enum t3_modify_qp_flags {
- MODQP_QUIESCE = 0x01,
- MODQP_MAX_IRD = 0x02,
- MODQP_MAX_ORD = 0x04,
- MODQP_WRITE_EC = 0x08,
- MODQP_READ_EC = 0x10,
-};
-
-
-enum t3_mpa_attrs {
- uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
- uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
- uP_RI_MPA_CRC_ENABLE = 0x4,
- uP_RI_MPA_IETF_ENABLE = 0x8
-} __packed;
-
-enum t3_qp_caps {
- uP_RI_QP_RDMA_READ_ENABLE = 0x01,
- uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
- uP_RI_QP_BIND_ENABLE = 0x04,
- uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
- uP_RI_QP_STAG0_ENABLE = 0x10
-} __packed;
-
-enum rdma_init_rtr_types {
- RTR_READ = 1,
- RTR_WRITE = 2,
- RTR_SEND = 3,
-};
-
-#define S_RTR_TYPE 2
-#define M_RTR_TYPE 0x3
-#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
-#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
-
-#define S_CHAN 4
-#define M_CHAN 0x3
-#define V_CHAN(x) ((x) << S_CHAN)
-#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
-
-struct t3_rdma_init_attr {
- u32 tid;
- u32 qpid;
- u32 pdid;
- u32 scqid;
- u32 rcqid;
- u32 rq_addr;
- u32 rq_size;
- enum t3_mpa_attrs mpaattrs;
- enum t3_qp_caps qpcaps;
- u16 tcp_emss;
- u32 ord;
- u32 ird;
- u64 qp_dma_addr;
- u32 qp_dma_size;
- enum rdma_init_rtr_types rtr_type;
- u16 flags;
- u16 rqe_count;
- u32 irs;
- u32 chan;
-};
-
-struct t3_rdma_init_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 qpid; /* 2 */
- __be32 pdid;
- __be32 scqid; /* 3 */
- __be32 rcqid;
- __be32 rq_addr; /* 4 */
- __be32 rq_size;
- u8 mpaattrs; /* 5 */
- u8 qpcaps;
- __be16 ulpdu_size;
- __be16 flags_rtr_type;
- __be16 rqe_count;
- __be32 ord; /* 6 */
- __be32 ird;
- __be64 qp_dma_addr; /* 7 */
- __be32 qp_dma_size; /* 8 */
- __be32 irs;
-};
-
-struct t3_genbit {
- u64 flit[15];
- __be64 genbit;
-};
-
-struct t3_wq_in_err {
- u64 flit[13];
- u64 err;
-};
-
-enum rdma_init_wr_flags {
- MPA_INITIATOR = (1<<0),
- PRIV_QP = (1<<1),
-};
-
-union t3_wr {
- struct t3_send_wr send;
- struct t3_rdma_write_wr write;
- struct t3_rdma_read_wr read;
- struct t3_receive_wr recv;
- struct t3_fastreg_wr fastreg;
- struct t3_pbl_frag pbl_frag;
- struct t3_local_inv_wr local_inv;
- struct t3_bind_mw_wr bind;
- struct t3_bypass_wr bypass;
- struct t3_rdma_init_wr init;
- struct t3_modify_qp_wr qp_mod;
- struct t3_genbit genbit;
- struct t3_wq_in_err wq_in_err;
- __be64 flit[16];
-};
-
-#define T3_SQ_CQE_FLIT 13
-#define T3_SQ_COOKIE_FLIT 14
-
-#define T3_RQ_COOKIE_FLIT 13
-#define T3_RQ_CQE_FLIT 14
-
-static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
-{
- return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
-}
-
-enum t3_wr_hdr_bits {
- T3_EOP = 1,
- T3_SOP = 2,
- T3_SOPEOP = T3_EOP|T3_SOP,
-};
-
-static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
- enum t3_wr_flags flags, u8 genbit, u32 tid,
- u8 len, u8 sopeop)
-{
- wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
- V_FW_RIWR_SOPEOP(sopeop) |
- V_FW_RIWR_FLAGS(flags));
- wmb();
- wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
- V_FW_RIWR_TID(tid) |
- V_FW_RIWR_LEN(len));
- /* 2nd gen bit... */
- ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);
-}
-
-/*
- * T3 ULP2_TX commands
- */
-enum t3_utx_mem_op {
- T3_UTX_MEM_READ = 2,
- T3_UTX_MEM_WRITE = 3
-};
-
-/* T3 MC7 RDMA TPT entry format */
-
-enum tpt_mem_type {
- TPT_NON_SHARED_MR = 0x0,
- TPT_SHARED_MR = 0x1,
- TPT_MW = 0x2,
- TPT_MW_RELAXED_PROTECTION = 0x3
-};
-
-enum tpt_addr_type {
- TPT_ZBTO = 0,
- TPT_VATO = 1
-};
-
-enum tpt_mem_perm {
- TPT_MW_BIND = 0x10,
- TPT_LOCAL_READ = 0x8,
- TPT_LOCAL_WRITE = 0x4,
- TPT_REMOTE_READ = 0x2,
- TPT_REMOTE_WRITE = 0x1
-};
-
-struct tpt_entry {
- __be32 valid_stag_pdid;
- __be32 flags_pagesize_qpid;
-
- __be32 rsvd_pbl_addr;
- __be32 len;
- __be32 va_hi;
- __be32 va_low_or_fbo;
-
- __be32 rsvd_bind_cnt_or_pstag;
- __be32 rsvd_pbl_size;
-};
-
-#define S_TPT_VALID 31
-#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
-#define F_TPT_VALID V_TPT_VALID(1U)
-
-#define S_TPT_STAG_KEY 23
-#define M_TPT_STAG_KEY 0xFF
-#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
-#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
-
-#define S_TPT_STAG_STATE 22
-#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
-#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
-
-#define S_TPT_STAG_TYPE 20
-#define M_TPT_STAG_TYPE 0x3
-#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
-#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
-
-#define S_TPT_PDID 0
-#define M_TPT_PDID 0xFFFFF
-#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
-#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
-
-#define S_TPT_PERM 28
-#define M_TPT_PERM 0xF
-#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
-#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
-
-#define S_TPT_REM_INV_DIS 27
-#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
-#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
-
-#define S_TPT_ADDR_TYPE 26
-#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
-#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
-
-#define S_TPT_MW_BIND_ENABLE 25
-#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
-#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
-
-#define S_TPT_PAGE_SIZE 20
-#define M_TPT_PAGE_SIZE 0x1F
-#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
-#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
-
-#define S_TPT_PBL_ADDR 0
-#define M_TPT_PBL_ADDR 0x1FFFFFFF
-#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
-#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
-
-#define S_TPT_QPID 0
-#define M_TPT_QPID 0xFFFFF
-#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
-#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
-
-#define S_TPT_PSTAG 0
-#define M_TPT_PSTAG 0xFFFFFF
-#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
-#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
-
-#define S_TPT_PBL_SIZE 0
-#define M_TPT_PBL_SIZE 0xFFFFF
-#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
-#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
-
-/*
- * CQE defs
- */
-struct t3_cqe {
- __be32 header;
- __be32 len;
- union {
- struct {
- __be32 stag;
- __be32 msn;
- } rcqe;
- struct {
- u32 wrid_hi;
- u32 wrid_low;
- } scqe;
- } u;
-};
-
-#define S_CQE_OOO 31
-#define M_CQE_OOO 0x1
-#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
-#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
-
-#define S_CQE_QPID 12
-#define M_CQE_QPID 0x7FFFF
-#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
-#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
-
-#define S_CQE_SWCQE 11
-#define M_CQE_SWCQE 0x1
-#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
-#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
-
-#define S_CQE_GENBIT 10
-#define M_CQE_GENBIT 0x1
-#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
-
-#define S_CQE_STATUS 5
-#define M_CQE_STATUS 0x1F
-#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
-#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
-
-#define S_CQE_TYPE 4
-#define M_CQE_TYPE 0x1
-#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
-#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
-
-#define S_CQE_OPCODE 0
-#define M_CQE_OPCODE 0xF
-#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
-#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
-
-#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
-#define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
-#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
-#define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
-#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
-#define SQ_TYPE(x) (CQE_TYPE((x)))
-#define RQ_TYPE(x) (!CQE_TYPE((x)))
-#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
-#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
-
-#define CQE_SEND_OPCODE(x)( \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
-
-#define CQE_LEN(x) (be32_to_cpu((x).len))
-
-/* used for RQ completion processing */
-#define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
-#define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
-
-/* used for SQ completion processing */
-#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
-
-/* generic accessor macros */
-#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
-
-#define TPT_ERR_SUCCESS 0x0
-#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
- /* STAG is offlimt, being 0, */
- /* or STAG_key mismatch */
-#define TPT_ERR_PDID 0x2 /* PDID mismatch */
-#define TPT_ERR_QPID 0x3 /* QPID mismatch */
-#define TPT_ERR_ACCESS 0x4 /* Invalid access right */
-#define TPT_ERR_WRAP 0x5 /* Wrap error */
-#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
-#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_ECC 0x9 /* ECC error detected */
-#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
- /* reading PSTAG for a MW */
- /* Invalidate */
-#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
- /* software error */
-#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
-#define TPT_ERR_CRC 0x10 /* CRC error */
-#define TPT_ERR_MARKER 0x11 /* Marker error */
-#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
-#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
-#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
-#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
-#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
-#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
-#define TPT_ERR_MSN 0x18 /* MSN error */
-#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
-#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
- /* or READ_REQ */
-#define TPT_ERR_MSN_GAP 0x1B
-#define TPT_ERR_MSN_RANGE 0x1C
-#define TPT_ERR_IRD_OVERFLOW 0x1D
-#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
- /* software error */
-#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
- /* mismatch) */
-
-struct t3_swsq {
- __u64 wr_id;
- struct t3_cqe cqe;
- __u32 sq_wptr;
- __be32 read_len;
- int opcode;
- int complete;
- int signaled;
-};
-
-struct t3_swrq {
- __u64 wr_id;
- __u32 pbl_addr;
-};
-
-/*
- * A T3 WQ implements both the SQ and RQ.
- */
-struct t3_wq {
- union t3_wr *queue; /* DMA accessible memory */
- dma_addr_t dma_addr; /* DMA address for HW */
- DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
- u32 error; /* 1 once we go to ERROR */
- u32 qpid;
- u32 wptr; /* idx to next available WR slot */
- u32 size_log2; /* total wq size */
- struct t3_swsq *sq; /* SW SQ */
- struct t3_swsq *oldest_read; /* tracks oldest pending read */
- u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
- u32 sq_rptr; /* pending wrs */
- u32 sq_size_log2; /* sq size */
- struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
- u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
- u32 rq_rptr; /* pending wrs */
- struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
- u32 rq_size_log2; /* rq size */
- u32 rq_addr; /* rq adapter address */
- void __iomem *doorbell; /* kernel db */
- u64 udb; /* user db if any */
- struct cxio_rdev *rdev;
-};
-
-struct t3_cq {
- u32 cqid;
- u32 rptr;
- u32 wptr;
- u32 size_log2;
- dma_addr_t dma_addr;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- struct t3_cqe *queue;
- struct t3_cqe *sw_queue;
- u32 sw_rptr;
- u32 sw_wptr;
-};
-
-#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
- CQE_GENBIT(*cqe))
-
-struct t3_cq_status_page {
- u32 cq_err;
-};
-
-static inline int cxio_cq_in_error(struct t3_cq *cq)
-{
- return ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err;
-}
-
-static inline void cxio_set_cq_in_error(struct t3_cq *cq)
-{
- ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err = 1;
-}
-
-static inline void cxio_set_wq_in_error(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 1;
-}
-
-static inline void cxio_disable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 2;
-}
-
-static inline void cxio_enable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err &= ~2;
-}
-
-static inline int cxio_wq_db_enabled(struct t3_wq *wq)
-{
- return !(wq->queue->wq_in_err.err & 2);
-}
-
-static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
deleted file mode 100644
index 56a8ab6210cf..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-#include <rdma/cxgb3-abi.h>
-#include "iwch.h"
-#include "iwch_cm.h"
-
-#define DRV_VERSION "1.1"
-
-MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
-MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static void open_rnic_dev(struct t3cdev *);
-static void close_rnic_dev(struct t3cdev *);
-static void iwch_event_handler(struct t3cdev *, u32, u32);
-
-struct cxgb3_client t3c_client = {
- .name = "iw_cxgb3",
- .add = open_rnic_dev,
- .remove = close_rnic_dev,
- .handlers = t3c_handlers,
- .redirect = iwch_ep_redirect,
- .event_handler = iwch_event_handler
-};
-
-static LIST_HEAD(dev_list);
-static DEFINE_MUTEX(dev_mutex);
-
-static void disable_dbs(struct iwch_dev *rnicp)
-{
- unsigned long index;
- struct iwch_qp *qhp;
-
- xa_lock_irq(&rnicp->qps);
- xa_for_each(&rnicp->qps, index, qhp)
- cxio_disable_wq_db(&qhp->wq);
- xa_unlock_irq(&rnicp->qps);
-}
-
-static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
-{
- unsigned long index;
- struct iwch_qp *qhp;
-
- xa_lock_irq(&rnicp->qps);
- xa_for_each(&rnicp->qps, index, qhp) {
- if (ring_db)
- ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell,
- qhp->wq.qpid);
- cxio_enable_wq_db(&qhp->wq);
- }
- xa_unlock_irq(&rnicp->qps);
-}
-
-static void iwch_db_drop_task(struct work_struct *work)
-{
- struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
- db_drop_task.work);
- enable_dbs(rnicp, 1);
-}
-
-static void rnic_init(struct iwch_dev *rnicp)
-{
- pr_debug("%s iwch_dev %p\n", __func__, rnicp);
- xa_init_flags(&rnicp->cqs, XA_FLAGS_LOCK_IRQ);
- xa_init_flags(&rnicp->qps, XA_FLAGS_LOCK_IRQ);
- xa_init_flags(&rnicp->mrs, XA_FLAGS_LOCK_IRQ);
- INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
-
- rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
- rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
- rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
- rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
- rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
- rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
- rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
- rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
- rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
- rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
- rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
- rnicp->attr.can_resize_wq = 0;
- rnicp->attr.max_rdma_reads_per_qp = 8;
- rnicp->attr.max_rdma_read_resources =
- rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
- rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
- rnicp->attr.max_rdma_read_depth =
- rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
- rnicp->attr.rq_overflow_handled = 0;
- rnicp->attr.can_modify_ird = 0;
- rnicp->attr.can_modify_ord = 0;
- rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
- rnicp->attr.stag0_value = 1;
- rnicp->attr.zbva_support = 1;
- rnicp->attr.local_invalidate_fence = 1;
- rnicp->attr.cq_overflow_detection = 1;
- return;
-}
-
-static void open_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *rnicp;
-
- pr_debug("%s t3cdev %p\n", __func__, tdev);
- pr_info_once("Chelsio T3 RDMA Driver - version %s\n", DRV_VERSION);
- rnicp = ib_alloc_device(iwch_dev, ibdev);
- if (!rnicp) {
- pr_err("Cannot allocate ib device\n");
- return;
- }
- rnicp->rdev.ulp = rnicp;
- rnicp->rdev.t3cdev_p = tdev;
-
- mutex_lock(&dev_mutex);
-
- if (cxio_rdev_open(&rnicp->rdev)) {
- mutex_unlock(&dev_mutex);
- pr_err("Unable to open CXIO rdev\n");
- ib_dealloc_device(&rnicp->ibdev);
- return;
- }
-
- rnic_init(rnicp);
-
- list_add_tail(&rnicp->entry, &dev_list);
- mutex_unlock(&dev_mutex);
-
- if (iwch_register_device(rnicp)) {
- pr_err("Unable to register device\n");
- close_rnic_dev(tdev);
- }
- pr_info("Initialized device %s\n",
- pci_name(rnicp->rdev.rnic_info.pdev));
- return;
-}
-
-static void close_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *dev, *tmp;
- pr_debug("%s t3cdev %p\n", __func__, tdev);
- mutex_lock(&dev_mutex);
- list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
- if (dev->rdev.t3cdev_p == tdev) {
- dev->rdev.flags = CXIO_ERROR_FATAL;
- synchronize_net();
- cancel_delayed_work_sync(&dev->db_drop_task);
- list_del(&dev->entry);
- iwch_unregister_device(dev);
- cxio_rdev_close(&dev->rdev);
- WARN_ON(!xa_empty(&dev->cqs));
- WARN_ON(!xa_empty(&dev->qps));
- WARN_ON(!xa_empty(&dev->mrs));
- ib_dealloc_device(&dev->ibdev);
- break;
- }
- }
- mutex_unlock(&dev_mutex);
-}
-
-static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
-{
- struct cxio_rdev *rdev = tdev->ulp;
- struct iwch_dev *rnicp;
- struct ib_event event;
- u32 portnum = port_id + 1;
- int dispatch = 0;
-
- if (!rdev)
- return;
- rnicp = rdev_to_iwch_dev(rdev);
- switch (evt) {
- case OFFLOAD_STATUS_DOWN: {
- rdev->flags = CXIO_ERROR_FATAL;
- synchronize_net();
- event.event = IB_EVENT_DEVICE_FATAL;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_DOWN: {
- event.event = IB_EVENT_PORT_ERR;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_UP: {
- event.event = IB_EVENT_PORT_ACTIVE;
- dispatch = 1;
- break;
- }
- case OFFLOAD_DB_FULL: {
- disable_dbs(rnicp);
- break;
- }
- case OFFLOAD_DB_EMPTY: {
- enable_dbs(rnicp, 1);
- break;
- }
- case OFFLOAD_DB_DROP: {
- unsigned long delay = 1000;
- unsigned short r;
-
- disable_dbs(rnicp);
- get_random_bytes(&r, 2);
- delay += r & 1023;
-
- /*
- * delay is between 1000-2023 usecs.
- */
- schedule_delayed_work(&rnicp->db_drop_task,
- usecs_to_jiffies(delay));
- break;
- }
- }
-
- if (dispatch) {
- event.device = &rnicp->ibdev;
- event.element.port_num = portnum;
- ib_dispatch_event(&event);
- }
-
- return;
-}
-
-static int __init iwch_init_module(void)
-{
- int err;
-
- err = cxio_hal_init();
- if (err)
- return err;
- err = iwch_cm_init();
- if (err)
- return err;
- cxio_register_ev_cb(iwch_ev_dispatch);
- cxgb3_register_client(&t3c_client);
- return 0;
-}
-
-static void __exit iwch_exit_module(void)
-{
- cxgb3_unregister_client(&t3c_client);
- cxio_unregister_ev_cb(iwch_ev_dispatch);
- iwch_cm_term();
- cxio_hal_exit();
-}
-
-module_init(iwch_init_module);
-module_exit(iwch_exit_module);
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
deleted file mode 100644
index 310a937bffcf..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_H__
-#define __IWCH_H__
-
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/xarray.h>
-#include <linux/workqueue.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-
-struct iwch_pd;
-struct iwch_cq;
-struct iwch_qp;
-struct iwch_mr;
-
-struct iwch_rnic_attributes {
- u32 max_qps;
- u32 max_wrs; /* Max for any SQ/RQ */
- u32 max_sge_per_wr;
- u32 max_sge_per_rdma_write_wr; /* for RDMA Write WR */
- u32 max_cqs;
- u32 max_cqes_per_cq;
- u32 max_mem_regs;
- u32 max_phys_buf_entries; /* for phys buf list */
- u32 max_pds;
-
- /*
- * The memory page sizes supported by this RNIC.
- * Bit position i in bitmap indicates page of
- * size (4k)^i. Phys block list mode unsupported.
- */
- u32 mem_pgsizes_bitmask;
- u64 max_mr_size;
- u8 can_resize_wq;
-
- /*
- * The maximum number of RDMA Reads that can be outstanding
- * per QP with this RNIC as the target.
- */
- u32 max_rdma_reads_per_qp;
-
- /*
- * The maximum number of resources used for RDMA Reads
- * by this RNIC with this RNIC as the target.
- */
- u32 max_rdma_read_resources;
-
- /*
- * The max depth per QP for initiation of RDMA Read
- * by this RNIC.
- */
- u32 max_rdma_read_qp_depth;
-
- /*
- * The maximum depth for initiation of RDMA Read
- * operations by this RNIC on all QPs
- */
- u32 max_rdma_read_depth;
- u8 rq_overflow_handled;
- u32 can_modify_ird;
- u32 can_modify_ord;
- u32 max_mem_windows;
- u32 stag0_value;
- u8 zbva_support;
- u8 local_invalidate_fence;
- u32 cq_overflow_detection;
-};
-
-struct iwch_dev {
- struct ib_device ibdev;
- struct cxio_rdev rdev;
- u32 device_cap_flags;
- struct iwch_rnic_attributes attr;
- struct xarray cqs;
- struct xarray qps;
- struct xarray mrs;
- struct list_head entry;
- struct delayed_work db_drop_task;
-};
-
-static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct iwch_dev, ibdev);
-}
-
-static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
-{
- return container_of(rdev, struct iwch_dev, rdev);
-}
-
-static inline int t3b_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3B;
-}
-
-static inline int t3a_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3A;
-}
-
-static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
-{
- return xa_load(&rhp->cqs, cqid);
-}
-
-static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
-{
- return xa_load(&rhp->qps, qpid);
-}
-
-static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
-{
- return xa_load(&rhp->mrs, mmid);
-}
-
-extern struct cxgb3_client t3c_client;
-extern cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
-extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
deleted file mode 100644
index 0bca72cb4d9a..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ /dev/null
@@ -1,2258 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/skbuff.h>
-#include <linux/timer.h>
-#include <linux/notifier.h>
-#include <linux/inetdevice.h>
-
-#include <net/neighbour.h>
-#include <net/netevent.h>
-#include <net/route.h>
-
-#include "tcb.h"
-#include "cxgb3_offload.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-
-static char *states[] = {
- "idle",
- "listen",
- "connecting",
- "mpa_wait_req",
- "mpa_req_sent",
- "mpa_req_rcvd",
- "mpa_rep_sent",
- "fpdu_mode",
- "aborting",
- "closing",
- "moribund",
- "dead",
- NULL,
-};
-
-int peer2peer = 0;
-module_param(peer2peer, int, 0644);
-MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
-
-static int ep_timeout_secs = 60;
-module_param(ep_timeout_secs, int, 0644);
-MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
- "in seconds (default=60)");
-
-static int mpa_rev = 1;
-module_param(mpa_rev, int, 0644);
-MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
- "1 is spec compliant. (default=1)");
-
-static int markers_enabled = 0;
-module_param(markers_enabled, int, 0644);
-MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
-
-static int crc_enabled = 1;
-module_param(crc_enabled, int, 0644);
-MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
-
-static int rcv_win = 256 * 1024;
-module_param(rcv_win, int, 0644);
-MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
-
-static int snd_win = 32 * 1024;
-module_param(snd_win, int, 0644);
-MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
-
-static unsigned int nocong = 0;
-module_param(nocong, uint, 0644);
-MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
-
-static unsigned int cong_flavor = 1;
-module_param(cong_flavor, uint, 0644);
-MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
-
-static struct workqueue_struct *workq;
-
-static struct sk_buff_head rxq;
-
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(struct timer_list *t);
-static void connect_reply_upcall(struct iwch_ep *ep, int status);
-
-static void start_ep_timer(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p\n", __func__, ep);
- if (timer_pending(&ep->timer)) {
- pr_debug("%s stopped / restarted timer ep %p\n", __func__, ep);
- del_timer_sync(&ep->timer);
- } else
- get_ep(&ep->com);
- ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- add_timer(&ep->timer);
-}
-
-static void stop_ep_timer(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p\n", __func__, ep);
- if (!timer_pending(&ep->timer)) {
- WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
- __func__, ep, ep->com.state);
- return;
- }
- del_timer_sync(&ep->timer);
- put_ep(&ep->com);
-}
-
-static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = l2t_send(tdev, skb, l2e);
- if (error < 0)
- kfree_skb(skb);
- return error < 0 ? error : 0;
-}
-
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = cxgb3_ofld_send(tdev, skb);
- if (error < 0)
- kfree_skb(skb);
- return error < 0 ? error : 0;
-}
-
-static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
-{
- struct cpl_tid_release *req;
-
- skb = get_skb(skb, sizeof(*req), GFP_KERNEL);
- if (!skb)
- return;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_cxgb3_ofld_send(tdev, skb);
- return;
-}
-
-int iwch_quiesce_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-int iwch_resume_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = 0;
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static void set_emss(struct iwch_ep *ep, u16 opt)
-{
- pr_debug("%s ep %p opt %u\n", __func__, ep, opt);
- ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
- if (G_TCPOPT_TSTAMP(opt))
- ep->emss -= 12;
- if (ep->emss < 128)
- ep->emss = 128;
- pr_debug("emss=%d\n", ep->emss);
-}
-
-static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
-{
- unsigned long flags;
- enum iwch_ep_state state;
-
- spin_lock_irqsave(&epc->lock, flags);
- state = epc->state;
- spin_unlock_irqrestore(&epc->lock, flags);
- return state;
-}
-
-static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- epc->state = new;
-}
-
-static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&epc->lock, flags);
- pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
- __state_set(epc, new);
- spin_unlock_irqrestore(&epc->lock, flags);
- return;
-}
-
-static void *alloc_ep(int size, gfp_t gfp)
-{
- struct iwch_ep_common *epc;
-
- epc = kzalloc(size, gfp);
- if (epc) {
- kref_init(&epc->kref);
- spin_lock_init(&epc->lock);
- init_waitqueue_head(&epc->waitq);
- }
- pr_debug("%s alloc ep %p\n", __func__, epc);
- return epc;
-}
-
-void __free_ep(struct kref *kref)
-{
- struct iwch_ep *ep;
- ep = container_of(container_of(kref, struct iwch_ep_common, kref),
- struct iwch_ep, com);
- pr_debug("%s ep %p state %s\n",
- __func__, ep, states[state_read(&ep->com)]);
- if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
- cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- }
- kfree(ep);
-}
-
-static void release_ep_resources(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- set_bit(RELEASE_RESOURCES, &ep->com.flags);
- put_ep(&ep->com);
-}
-
-static int status2errno(int status)
-{
- switch (status) {
- case CPL_ERR_NONE:
- return 0;
- case CPL_ERR_CONN_RESET:
- return -ECONNRESET;
- case CPL_ERR_ARP_MISS:
- return -EHOSTUNREACH;
- case CPL_ERR_CONN_TIMEDOUT:
- return -ETIMEDOUT;
- case CPL_ERR_TCAM_FULL:
- return -ENOMEM;
- case CPL_ERR_CONN_EXIST:
- return -EADDRINUSE;
- default:
- return -EIO;
- }
-}
-
-/*
- * Try and reuse skbs already allocated...
- */
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
-{
- if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
- skb_trim(skb, 0);
- skb_get(skb);
- } else {
- skb = alloc_skb(len, gfp);
- }
- return skb;
-}
-
-static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
- __be32 peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- return rt;
-}
-
-static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
-{
- int i = 0;
-
- while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
- ++i;
- return i;
-}
-
-static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
-{
- pr_debug("%s t3cdev %p\n", __func__, dev);
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for an active open.
- */
-static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- pr_err("ARP failure during connect\n");
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
- * and send it along.
- */
-static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- struct cpl_abort_req *req = cplhdr(skb);
-
- pr_debug("%s t3cdev %p\n", __func__, dev);
- req->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(dev, skb);
-}
-
-static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
-{
- struct cpl_close_con_req *req;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), gfp);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- struct cpl_abort_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(skb, sizeof(*req), gfp);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, abort_arp_failure);
- req = skb_put_zero(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
- req->cmd = CPL_ABORT_SEND_RST;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_connect(struct iwch_ep *ep)
-{
- struct cpl_act_open_req *req;
- struct sk_buff *skb;
- u32 opt0h, opt0l, opt2;
- unsigned int mtu_idx;
- int wscale;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
- skb->priority = CPL_PRIORITY_SETUP;
- set_arp_failure_handler(skb, act_open_req_arp_failure);
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
- req->local_port = ep->com.local_addr.sin_port;
- req->peer_port = ep->com.remote_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- req->opt0h = htonl(opt0h);
- req->opt0l = htonl(opt0l);
- req->params = 0;
- req->opt2 = htonl(opt2);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
-
- pr_debug("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
-
- BUG_ON(skb_cloned(skb));
-
- mpalen = sizeof(*mpa) + ep->plen;
- if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
- kfree_skb(skb);
- skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- connect_reply_upcall(ep, -ENOMEM);
- return;
- }
- }
- skb_trim(skb, 0);
- skb_reserve(skb, sizeof(*req));
- skb_put(skb, mpalen);
- skb->priority = CPL_PRIORITY_DATA;
- mpa = (struct mpa_message *) skb->data;
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
- mpa->flags = (crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->private_data_size = htons(ep->plen);
- mpa->revision = mpa_rev;
-
- if (ep->plen)
- memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
- start_ep_timer(ep);
- state_set(&ep->com, MPA_REQ_SENT);
- return;
-}
-
-static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb_reserve(skb, sizeof(*req));
- mpa = skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = MPA_REJECT;
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb again. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(mpalen);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- skb_reserve(skb, sizeof(*req));
- mpa = skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- ep->mpa_skb = skb;
- state_set(&ep->com, MPA_REP_SENT);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_establish *req = cplhdr(skb);
- unsigned int tid = GET_TID(req);
-
- pr_debug("%s ep %p tid %d\n", __func__, ep, tid);
-
- dst_confirm(ep->dst);
-
- /* setup the hwtid for this connection */
- ep->hwtid = tid;
- cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
-
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- /* dealloc the atid */
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-
- /* start MPA negotiation */
- send_mpa_req(ep, skb);
-
- return 0;
-}
-
-static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- pr_debug("%s ep %p\n", __FILE__, ep);
- state_set(&ep->com, ABORTING);
- send_abort(ep, skb, gfp);
-}
-
-static void close_complete_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- if (ep->com.cm_id) {
- pr_debug("close complete delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void peer_close_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_DISCONNECT;
- if (ep->com.cm_id) {
- pr_debug("peer close delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static void peer_abort_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- event.status = -ECONNRESET;
- if (ep->com.cm_id) {
- pr_debug("abort delivered ep %p cm_id %p tid %d\n", ep,
- ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_reply_upcall(struct iwch_ep *ep, int status)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p status %d\n", __func__, ep, status);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REPLY;
- event.status = status;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.remote_addr));
-
- if ((status == 0) || (status == -ECONNREFUSED)) {
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- }
- if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %d status %d\n", __func__, ep,
- ep->hwtid, status);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
- if (status < 0) {
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_request_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REQUEST;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.local_addr));
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- event.provider_data = ep;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (state_read(&ep->parent_ep->com) != DEAD) {
- get_ep(&ep->com);
- ep->parent_ep->com.cm_id->event_handler(
- ep->parent_ep->com.cm_id,
- &event);
- }
- put_ep(&ep->parent_ep->com);
- ep->parent_ep = NULL;
-}
-
-static void established_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_ESTABLISHED;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static int update_rx_credits(struct iwch_ep *ep, u32 credits)
-{
- struct cpl_rx_data_ack *req;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("update_rx_credits - cannot alloc skb!\n");
- return 0;
- }
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
- req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
- skb->priority = CPL_PRIORITY_ACK;
- iwch_cxgb3_ofld_send(ep->com.tdev, skb);
- return credits;
-}
-
-static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- int err;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_SENT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- err = -EINVAL;
- goto err;
- }
-
- /*
- * copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * if we don't even have the mpa message, then bail.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /* Validate MPA header. */
- if (mpa->revision != mpa_rev) {
- err = -EPROTO;
- goto err;
- }
- if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
- err = -EPROTO;
- goto err;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- err = -EPROTO;
- goto err;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- err = -EPROTO;
- goto err;
- }
-
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- if (mpa->flags & MPA_REJECT) {
- err = -ECONNREFUSED;
- goto err;
- }
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data. And
- * the MPA header is valid.
- */
- state_set(&ep->com, FPDU_MODE);
- ep->mpa_attr.initiator = 1;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
- __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
-
- /* bind QP and TID with INIT_WR */
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err;
-
- if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
- iwch_post_zb_read(ep);
- }
-
- goto out;
-err:
- abort_connection(ep, skb, GFP_KERNEL);
-out:
- connect_reply_upcall(ep, err);
- return;
-}
-
-static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_WAIT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
-
- /*
- * Copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * If we don't even have the mpa message, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /*
- * Validate MPA Header.
- */
- if (mpa->revision != mpa_rev) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data.
- */
- ep->mpa_attr.initiator = 0;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
- __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- state_set(&ep->com, MPA_REQ_RCVD);
-
- /* drive upcall */
- connect_request_upcall(ep);
- return;
-}
-
-static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_rx_data *hdr = cplhdr(skb);
- unsigned int dlen = ntohs(hdr->len);
-
- pr_debug("%s ep %p dlen %u\n", __func__, ep, dlen);
-
- skb_pull(skb, sizeof(*hdr));
- skb_trim(skb, dlen);
-
- ep->rcv_seq += dlen;
- BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
-
- switch (state_read(&ep->com)) {
- case MPA_REQ_SENT:
- process_mpa_reply(ep, skb);
- break;
- case MPA_REQ_WAIT:
- process_mpa_request(ep, skb);
- break;
- case MPA_REP_SENT:
- break;
- default:
- pr_err("%s Unexpected streaming data. ep %p state %d tid %d\n",
- __func__, ep, state_read(&ep->com), ep->hwtid);
-
- /*
- * The ep will timeout and inform the ULP of the failure.
- * See ep_timeout().
- */
- break;
- }
-
- /* update RX credits */
- update_rx_credits(ep, dlen);
-
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Upcall from the adapter indicating data has been transmitted.
- * For us its just the single MPA request or reply. We can now free
- * the skb holding the mpa message.
- */
-static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_wr_ack *hdr = cplhdr(skb);
- unsigned int credits = ntohs(hdr->credits);
- unsigned long flags;
- int post_zb = 0;
-
- pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
-
- if (credits == 0) {
- pr_debug("%s 0 credit ack ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- BUG_ON(credits != 1);
- dst_confirm(ep->dst);
- if (!ep->mpa_skb) {
- pr_debug("%s rdma_init wr_ack ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->mpa_attr.initiator) {
- pr_debug("%s initiator ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (peer2peer && ep->com.state == FPDU_MODE)
- post_zb = 1;
- } else {
- pr_debug("%s responder ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->com.state == MPA_REQ_RCVD) {
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- }
- }
- } else {
- pr_debug("%s lsm ack ep %p state %u freeing skb\n",
- __func__, ep, ep->com.state);
- kfree_skb(ep->mpa_skb);
- ep->mpa_skb = NULL;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (post_zb)
- iwch_post_zb_read(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- unsigned long flags;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /*
- * We get 2 abort replies from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case ABORTING:
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- default:
- pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Return whether a failed active open has allocated a TID
- */
-static inline int act_open_has_tid(int status)
-{
- return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
- status != CPL_ERR_ARP_MISS;
-}
-
-static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_open_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
- status2errno(rpl->status));
- connect_reply_upcall(ep, status2errno(rpl->status));
- state_set(&ep->com, DEAD);
- if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
- release_tid(ep->com.tdev, GET_TID(rpl), NULL);
- cxgb3_free_atid(ep->com.tdev, ep->atid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- put_ep(&ep->com);
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_start(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_pass_open_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("t3c_listen_start failed to alloc skb!\n");
- return -ENOMEM;
- }
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
- req->local_port = ep->com.local_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_port = 0;
- req->peer_ip = 0;
- req->peer_netmask = 0;
- req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
- req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
- req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
-
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_pass_open_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p status %d error %d\n", __func__, ep,
- rpl->status, status2errno(rpl->status));
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_stop(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_close_listserv_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->cpu_idx = 0;
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
- void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p\n", __func__, ep);
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- return CPL_RET_BUF_DONE;
-}
-
-static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
-{
- struct cpl_pass_accept_rpl *rpl;
- unsigned int mtu_idx;
- u32 opt0h, opt0l, opt2;
- int wscale;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(*rpl));
- skb_get(skb);
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
-
- rpl = cplhdr(skb);
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(opt0h);
- rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
- rpl->opt2 = htonl(opt2);
- rpl->rsvd = rpl->opt2; /* workaround for HW bug */
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-
- return;
-}
-
-static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
- struct sk_buff *skb)
-{
- pr_debug("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
- peer_ip);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(struct cpl_tid_release));
- skb_get(skb);
-
- if (tdev->type != T3A)
- release_tid(tdev, hwtid, skb);
- else {
- struct cpl_pass_accept_rpl *rpl;
-
- rpl = cplhdr(skb);
- skb->priority = CPL_PRIORITY_SETUP;
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(F_TCAM_BYPASS);
- rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
- rpl->opt2 = 0;
- rpl->rsvd = rpl->opt2;
- iwch_cxgb3_ofld_send(tdev, skb);
- }
-}
-
-static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *child_ep, *parent_ep = ctx;
- struct cpl_pass_accept_req *req = cplhdr(skb);
- unsigned int hwtid = GET_TID(req);
- struct dst_entry *dst;
- struct l2t_entry *l2t;
- struct rtable *rt;
- struct iff_mac tim;
-
- pr_debug("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
-
- if (state_read(&parent_ep->com) != LISTEN) {
- pr_err("%s - listening ep not in LISTEN\n", __func__);
- goto reject;
- }
-
- /*
- * Find the netdev for this connection request.
- */
- tim.mac_addr = req->dst_mac;
- tim.vlan_tag = ntohs(req->vlan_tag);
- if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
- pr_err("%s bad dst mac %pM\n", __func__, req->dst_mac);
- goto reject;
- }
-
- /* Find output route */
- rt = find_route(tdev,
- req->local_ip,
- req->peer_ip,
- req->local_port,
- req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
- if (!rt) {
- pr_err("%s - failed to find dst entry!\n", __func__);
- goto reject;
- }
- dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
- if (!l2t) {
- pr_err("%s - failed to allocate l2t entry!\n", __func__);
- dst_release(dst);
- goto reject;
- }
- child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
- if (!child_ep) {
- pr_err("%s - failed to allocate ep entry!\n", __func__);
- l2t_release(tdev, l2t);
- dst_release(dst);
- goto reject;
- }
- state_set(&child_ep->com, CONNECTING);
- child_ep->com.tdev = tdev;
- child_ep->com.cm_id = NULL;
- child_ep->com.local_addr.sin_family = AF_INET;
- child_ep->com.local_addr.sin_port = req->local_port;
- child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
- child_ep->com.remote_addr.sin_family = AF_INET;
- child_ep->com.remote_addr.sin_port = req->peer_port;
- child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
- get_ep(&parent_ep->com);
- child_ep->parent_ep = parent_ep;
- child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
- child_ep->l2t = l2t;
- child_ep->dst = dst;
- child_ep->hwtid = hwtid;
- timer_setup(&child_ep->timer, ep_timeout, 0);
- cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
- accept_cr(child_ep, req->peer_ip, skb);
- goto out;
-reject:
- reject_cr(tdev, hwtid, req->peer_ip, skb);
-out:
- return CPL_RET_BUF_DONE;
-}
-
-static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_pass_establish *req = cplhdr(skb);
-
- pr_debug("%s ep %p\n", __func__, ep);
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- dst_confirm(ep->dst);
- state_set(&ep->com, MPA_REQ_WAIT);
- start_ep_timer(ep);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int disconnect = 1;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- dst_confirm(ep->dst);
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- __state_set(&ep->com, CLOSING);
- break;
- case MPA_REQ_SENT:
- __state_set(&ep->com, CLOSING);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REP_SENT:
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case FPDU_MODE:
- start_ep_timer(ep);
- __state_set(&ep->com, CLOSING);
- attrs.next_state = IWCH_QP_STATE_CLOSING;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- peer_close_upcall(ep);
- break;
- case ABORTING:
- disconnect = 0;
- break;
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- disconnect = 0;
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- disconnect = 0;
- break;
- case DEAD:
- disconnect = 0;
- break;
- default:
- BUG_ON(1);
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (disconnect)
- iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Returns whether an ABORT_REQ_RSS message is a negative advice.
- */
-static int is_neg_adv_abort(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE;
-}
-
-static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct iwch_ep *ep = ctx;
- struct cpl_abort_rpl *rpl;
- struct sk_buff *rpl_skb;
- struct iwch_qp_attributes attrs;
- int ret;
- int release = 0;
- unsigned long flags;
-
- if (is_neg_adv_abort(req->status)) {
- pr_debug("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
- ep->hwtid);
- t3_l2t_send_event(ep->com.tdev, ep->l2t);
- return CPL_RET_BUF_DONE;
- }
-
- /*
- * We get 2 peer aborts from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- pr_debug("%s ep %p state %u\n", __func__, ep, ep->com.state);
- switch (ep->com.state) {
- case CONNECTING:
- break;
- case MPA_REQ_WAIT:
- stop_ep_timer(ep);
- break;
- case MPA_REQ_SENT:
- stop_ep_timer(ep);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REP_SENT:
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MORIBUND:
- case CLOSING:
- stop_ep_timer(ep);
- /*FALLTHROUGH*/
- case FPDU_MODE:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- ret = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (ret)
- pr_err("%s - qp <- error failed!\n", __func__);
- }
- peer_abort_upcall(ep);
- break;
- case ABORTING:
- break;
- case DEAD:
- pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
- spin_unlock_irqrestore(&ep->com.lock, flags);
- return CPL_RET_BUF_DONE;
- default:
- BUG_ON(1);
- break;
- }
- dst_confirm(ep->dst);
- if (ep->com.state != ABORTING) {
- __state_set(&ep->com, DEAD);
- release = 1;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
- if (!rpl_skb) {
- pr_err("%s - cannot allocate skb!\n", __func__);
- release = 1;
- goto out;
- }
- rpl_skb->priority = CPL_PRIORITY_DATA;
- rpl = skb_put(rpl_skb, sizeof(*rpl));
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
- rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
- rpl->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
-out:
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /* The cm_id may be null if we failed to connect */
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if ((ep->com.cm_id) && (ep->com.qp)) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- case ABORTING:
- case DEAD:
- break;
- default:
- BUG_ON(1);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * T3A does 3 things when a TERM is received:
- * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
- * 2) generate an async event on the QP with the TERMINATE opcode
- * 3) post a TERMINATE opcode cqe into the associated CQ.
- *
- * For (1), we save the message in the qp for later consumer consumption.
- * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
- * For (3), we toss the CQE in cxio_poll_cq().
- *
- * terminate() handles case (1)...
- */
-static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
-
- if (state_read(&ep->com) != FPDU_MODE)
- return CPL_RET_BUF_DONE;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb_pull(skb, sizeof(struct cpl_rdma_terminate));
- pr_debug("%s saving %d bytes of term msg\n", __func__, skb->len);
- skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
- skb->len);
- ep->com.qp->attr.terminate_msg_len = skb->len;
- ep->com.qp->attr.is_terminate_local = 0;
- return CPL_RET_BUF_DONE;
-}
-
-static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_rdma_ec_status *rep = cplhdr(skb);
- struct iwch_ep *ep = ctx;
-
- pr_debug("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
- rep->status);
- if (rep->status) {
- struct iwch_qp_attributes attrs;
-
- pr_err("%s BAD CLOSE - Aborting tid %u\n",
- __func__, ep->hwtid);
- stop_ep_timer(ep);
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- abort_connection(ep, NULL, GFP_KERNEL);
- }
- return CPL_RET_BUF_DONE;
-}
-
-static void ep_timeout(struct timer_list *t)
-{
- struct iwch_ep *ep = from_timer(ep, t, timer);
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int abort = 1;
-
- spin_lock_irqsave(&ep->com.lock, flags);
- pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
- switch (ep->com.state) {
- case MPA_REQ_SENT:
- __state_set(&ep->com, ABORTING);
- connect_reply_upcall(ep, -ETIMEDOUT);
- break;
- case MPA_REQ_WAIT:
- __state_set(&ep->com, ABORTING);
- break;
- case CLOSING:
- case MORIBUND:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- __state_set(&ep->com, ABORTING);
- break;
- default:
- WARN(1, "%s unexpected state ep %p state %u\n",
- __func__, ep, ep->com.state);
- abort = 0;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (abort)
- abort_connection(ep, NULL, GFP_ATOMIC);
- put_ep(&ep->com);
-}
-
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- struct iwch_ep *ep = to_ep(cm_id);
-
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-
- if (state_read(&ep->com) == DEAD) {
- put_ep(&ep->com);
- return -ECONNRESET;
- }
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- if (mpa_rev == 0)
- abort_connection(ep, NULL, GFP_KERNEL);
- else {
- send_mpa_reject(ep, pdata, pdata_len);
- iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- }
- put_ep(&ep->com);
- return 0;
-}
-
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- int err;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- struct iwch_ep *ep = to_ep(cm_id);
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
-
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD) {
- err = -ECONNRESET;
- goto err;
- }
-
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- BUG_ON(!qp);
-
- if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
- (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
- abort_connection(ep, NULL, GFP_KERNEL);
- err = -EINVAL;
- goto err;
- }
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = qp;
-
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ird == 0)
- ep->ird = 1;
-
- pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
-
- /* bind QP to EP and move to RTS */
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- /* bind QP and TID with INIT_WR */
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_MAX_ORD;
-
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err1;
-
- /* if needed, wait for wr_ack */
- if (iwch_rqes_posted(qp)) {
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (err)
- goto err1;
- }
-
- err = send_mpa_reply(ep, conn_param->private_data,
- conn_param->private_data_len);
- if (err)
- goto err1;
-
-
- state_set(&ep->com, FPDU_MODE);
- established_upcall(ep);
- put_ep(&ep->com);
- return 0;
-err1:
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- cm_id->rem_ref(cm_id);
-err:
- put_ep(&ep->com);
- return err;
-}
-
-static int is_loopback_dst(struct iw_cm_id *cm_id)
-{
- struct net_device *dev;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
-
- dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
- if (!dev)
- return 0;
- dev_put(dev);
- return 1;
-}
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_ep *ep;
- struct rtable *rt;
- int err = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
-
- if (cm_id->m_remote_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto out;
- }
-
- if (is_loopback_dst(cm_id)) {
- err = -ENOSYS;
- goto out;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- pr_err("%s - cannot alloc ep\n", __func__);
- err = -ENOMEM;
- goto out;
- }
- timer_setup(&ep->timer, ep_timeout, 0);
- ep->plen = conn_param->private_data_len;
- if (ep->plen)
- memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
- conn_param->private_data, ep->plen);
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ord == 0)
- ep->ord = 1;
-
- ep->com.tdev = h->rdev.t3cdev_p;
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = get_qhp(h, conn_param->qpn);
- BUG_ON(!ep->com.qp);
- pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
- ep->com.qp, cm_id);
-
- /*
- * Allocate an active TID to initiate a TCP connection.
- */
- ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->atid == -1) {
- pr_err("%s - cannot alloc atid\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- /* find a route */
- rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, IPTOS_LOWDELAY);
- if (!rt) {
- pr_err("%s - cannot find route\n", __func__);
- err = -EHOSTUNREACH;
- goto fail3;
- }
- ep->dst = &rt->dst;
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
- &raddr->sin_addr.s_addr);
- if (!ep->l2t) {
- pr_err("%s - cannot alloc l2e\n", __func__);
- err = -ENOMEM;
- goto fail4;
- }
-
- state_set(&ep->com, CONNECTING);
- ep->tos = IPTOS_LOWDELAY;
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
- sizeof(ep->com.remote_addr));
-
- /* send connect request to rnic */
- err = send_connect(ep);
- if (!err)
- goto out;
-
- l2t_release(h->rdev.t3cdev_p, ep->l2t);
-fail4:
- dst_release(ep->dst);
-fail3:
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-out:
- return err;
-}
-
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
-{
- int err = 0;
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_listen_ep *ep;
-
-
- might_sleep();
-
- if (cm_id->m_local_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto fail1;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- pr_err("%s - cannot alloc ep\n", __func__);
- err = -ENOMEM;
- goto fail1;
- }
- pr_debug("%s ep %p\n", __func__, ep);
- ep->com.tdev = h->rdev.t3cdev_p;
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
-
- /*
- * Allocate a server TID.
- */
- ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->stid == -1) {
- pr_err("%s - cannot alloc atid\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- state_set(&ep->com, LISTEN);
- err = listen_start(ep);
- if (err)
- goto fail3;
-
- /* wait for pass_open_rpl */
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (!err) {
- cm_id->provider_data = ep;
- goto out;
- }
-fail3:
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-fail1:
-out:
- return err;
-}
-
-int iwch_destroy_listen(struct iw_cm_id *cm_id)
-{
- int err;
- struct iwch_listen_ep *ep = to_listen_ep(cm_id);
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- might_sleep();
- state_set(&ep->com, DEAD);
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
- err = listen_stop(ep);
- if (err)
- goto done;
- wait_event(ep->com.waitq, ep->com.rpl_done);
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-done:
- err = ep->com.rpl_err;
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
- return err;
-}
-
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
-{
- int ret=0;
- unsigned long flags;
- int close = 0;
- int fatal = 0;
- struct t3cdev *tdev;
- struct cxio_rdev *rdev;
-
- spin_lock_irqsave(&ep->com.lock, flags);
-
- pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
- states[ep->com.state], abrupt);
-
- tdev = (struct t3cdev *)ep->com.tdev;
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- fatal = 1;
- close_complete_upcall(ep);
- ep->com.state = DEAD;
- }
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- case MPA_REQ_SENT:
- case MPA_REQ_RCVD:
- case MPA_REP_SENT:
- case FPDU_MODE:
- close = 1;
- if (abrupt)
- ep->com.state = ABORTING;
- else {
- ep->com.state = CLOSING;
- start_ep_timer(ep);
- }
- set_bit(CLOSE_SENT, &ep->com.flags);
- break;
- case CLOSING:
- if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
- close = 1;
- if (abrupt) {
- stop_ep_timer(ep);
- ep->com.state = ABORTING;
- } else
- ep->com.state = MORIBUND;
- }
- break;
- case MORIBUND:
- case ABORTING:
- case DEAD:
- pr_debug("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
- break;
- default:
- BUG();
- break;
- }
-
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (close) {
- if (abrupt)
- ret = send_abort(ep, NULL, gfp);
- else
- ret = send_halfclose(ep, gfp);
- if (ret)
- fatal = 1;
- }
- if (fatal)
- release_ep_resources(ep);
- return ret;
-}
-
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
- struct l2t_entry *l2t)
-{
- struct iwch_ep *ep = ctx;
-
- if (ep->dst != old)
- return 0;
-
- pr_debug("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
- l2t);
- dst_hold(new);
- l2t_release(ep->com.tdev, ep->l2t);
- ep->l2t = l2t;
- dst_release(old);
- ep->dst = new;
- return 1;
-}
-
-/*
- * All the CM events are handled on a work queue to have a safe context.
- * These are the real handlers that are called from the work queue.
- */
-static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = act_establish,
- [CPL_ACT_OPEN_RPL] = act_open_rpl,
- [CPL_RX_DATA] = rx_data,
- [CPL_TX_DMA_ACK] = tx_ack,
- [CPL_ABORT_RPL_RSS] = abort_rpl,
- [CPL_ABORT_RPL] = abort_rpl,
- [CPL_PASS_OPEN_RPL] = pass_open_rpl,
- [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
- [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
- [CPL_PASS_ESTABLISH] = pass_establish,
- [CPL_PEER_CLOSE] = peer_close,
- [CPL_ABORT_REQ_RSS] = peer_abort,
- [CPL_CLOSE_CON_RPL] = close_con_rpl,
- [CPL_RDMA_TERMINATE] = terminate,
- [CPL_RDMA_EC_STATUS] = ec_status,
-};
-
-static void process_work(struct work_struct *work)
-{
- struct sk_buff *skb = NULL;
- void *ep;
- struct t3cdev *tdev;
- int ret;
-
- while ((skb = skb_dequeue(&rxq))) {
- ep = *((void **) (skb->cb));
- tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
- ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
- if (ret & CPL_RET_BUF_DONE)
- kfree_skb(skb);
-
- /*
- * ep was referenced in sched(), and is freed here.
- */
- put_ep((struct iwch_ep_common *)ep);
- }
-}
-
-static DECLARE_WORK(skb_work, process_work);
-
-static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep_common *epc = ctx;
-
- get_ep(epc);
-
- /*
- * Save ctx and tdev in the skb->cb area.
- */
- *((void **) skb->cb) = ctx;
- *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
-
- /*
- * Queue the skb and schedule the worker thread.
- */
- skb_queue_tail(&rxq, skb);
- queue_work(workq, &skb_work);
- return 0;
-}
-
-static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
-
- if (rpl->status != CPL_ERR_NONE) {
- pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
- rpl->status, GET_TID(rpl));
- }
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * All upcalls from the T3 Core go to sched() to schedule the
- * processing on a work queue.
- */
-cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = sched,
- [CPL_ACT_OPEN_RPL] = sched,
- [CPL_RX_DATA] = sched,
- [CPL_TX_DMA_ACK] = sched,
- [CPL_ABORT_RPL_RSS] = sched,
- [CPL_ABORT_RPL] = sched,
- [CPL_PASS_OPEN_RPL] = sched,
- [CPL_CLOSE_LISTSRV_RPL] = sched,
- [CPL_PASS_ACCEPT_REQ] = sched,
- [CPL_PASS_ESTABLISH] = sched,
- [CPL_PEER_CLOSE] = sched,
- [CPL_CLOSE_CON_RPL] = sched,
- [CPL_ABORT_REQ_RSS] = sched,
- [CPL_RDMA_TERMINATE] = sched,
- [CPL_RDMA_EC_STATUS] = sched,
- [CPL_SET_TCB_RPL] = set_tcb_rpl,
-};
-
-int __init iwch_cm_init(void)
-{
- skb_queue_head_init(&rxq);
-
- workq = alloc_ordered_workqueue("iw_cxgb3", WQ_MEM_RECLAIM);
- if (!workq)
- return -ENOMEM;
-
- return 0;
-}
-
-void __exit iwch_cm_term(void)
-{
- flush_workqueue(workq);
- destroy_workqueue(workq);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
deleted file mode 100644
index cc7fe644d260..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _IWCH_CM_H_
-#define _IWCH_CM_H_
-
-#include <linux/inet.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/kref.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/iw_cm.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-
-#define MPA_KEY_REQ "MPA ID Req Frame"
-#define MPA_KEY_REP "MPA ID Rep Frame"
-
-#define MPA_MAX_PRIVATE_DATA 256
-#define MPA_REV 0 /* XXX - amso1100 uses rev 0 ! */
-#define MPA_REJECT 0x20
-#define MPA_CRC 0x40
-#define MPA_MARKERS 0x80
-#define MPA_FLAGS_MASK 0xE0
-
-#define put_ep(ep) { \
- pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
- __func__, __LINE__, ep, kref_read(&((ep)->kref))); \
- WARN_ON(kref_read(&((ep)->kref)) < 1); \
- kref_put(&((ep)->kref), __free_ep); \
-}
-
-#define get_ep(ep) { \
- pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
- __func__, __LINE__, ep, kref_read(&((ep)->kref))); \
- kref_get(&((ep)->kref)); \
-}
-
-struct mpa_message {
- u8 key[16];
- u8 flags;
- u8 revision;
- __be16 private_data_size;
- u8 private_data[0];
-};
-
-struct terminate_message {
- u8 layer_etype;
- u8 ecode;
- __be16 hdrct_rsvd;
- u8 len_hdrs[0];
-};
-
-#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
-
-enum iwch_layers_types {
- LAYER_RDMAP = 0x00,
- LAYER_DDP = 0x10,
- LAYER_MPA = 0x20,
- RDMAP_LOCAL_CATA = 0x00,
- RDMAP_REMOTE_PROT = 0x01,
- RDMAP_REMOTE_OP = 0x02,
- DDP_LOCAL_CATA = 0x00,
- DDP_TAGGED_ERR = 0x01,
- DDP_UNTAGGED_ERR = 0x02,
- DDP_LLP = 0x03
-};
-
-enum iwch_rdma_ecodes {
- RDMAP_INV_STAG = 0x00,
- RDMAP_BASE_BOUNDS = 0x01,
- RDMAP_ACC_VIOL = 0x02,
- RDMAP_STAG_NOT_ASSOC = 0x03,
- RDMAP_TO_WRAP = 0x04,
- RDMAP_INV_VERS = 0x05,
- RDMAP_INV_OPCODE = 0x06,
- RDMAP_STREAM_CATA = 0x07,
- RDMAP_GLOBAL_CATA = 0x08,
- RDMAP_CANT_INV_STAG = 0x09,
- RDMAP_UNSPECIFIED = 0xff
-};
-
-enum iwch_ddp_ecodes {
- DDPT_INV_STAG = 0x00,
- DDPT_BASE_BOUNDS = 0x01,
- DDPT_STAG_NOT_ASSOC = 0x02,
- DDPT_TO_WRAP = 0x03,
- DDPT_INV_VERS = 0x04,
- DDPU_INV_QN = 0x01,
- DDPU_INV_MSN_NOBUF = 0x02,
- DDPU_INV_MSN_RANGE = 0x03,
- DDPU_INV_MO = 0x04,
- DDPU_MSG_TOOBIG = 0x05,
- DDPU_INV_VERS = 0x06
-};
-
-enum iwch_mpa_ecodes {
- MPA_CRC_ERR = 0x02,
- MPA_MARKER_ERR = 0x03
-};
-
-enum iwch_ep_state {
- IDLE = 0,
- LISTEN,
- CONNECTING,
- MPA_REQ_WAIT,
- MPA_REQ_SENT,
- MPA_REQ_RCVD,
- MPA_REP_SENT,
- FPDU_MODE,
- ABORTING,
- CLOSING,
- MORIBUND,
- DEAD,
-};
-
-enum iwch_ep_flags {
- PEER_ABORT_IN_PROGRESS = 0,
- ABORT_REQ_IN_PROGRESS = 1,
- RELEASE_RESOURCES = 2,
- CLOSE_SENT = 3,
-};
-
-struct iwch_ep_common {
- struct iw_cm_id *cm_id;
- struct iwch_qp *qp;
- struct t3cdev *tdev;
- enum iwch_ep_state state;
- struct kref kref;
- spinlock_t lock;
- struct sockaddr_in local_addr;
- struct sockaddr_in remote_addr;
- wait_queue_head_t waitq;
- int rpl_done;
- int rpl_err;
- unsigned long flags;
-};
-
-struct iwch_listen_ep {
- struct iwch_ep_common com;
- unsigned int stid;
- int backlog;
-};
-
-struct iwch_ep {
- struct iwch_ep_common com;
- struct iwch_ep *parent_ep;
- struct timer_list timer;
- unsigned int atid;
- u32 hwtid;
- u32 snd_seq;
- u32 rcv_seq;
- struct l2t_entry *l2t;
- struct dst_entry *dst;
- struct sk_buff *mpa_skb;
- struct iwch_mpa_attributes mpa_attr;
- unsigned int mpa_pkt_len;
- u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
- u8 tos;
- u16 emss;
- u16 plen;
- u32 ird;
- u32 ord;
-};
-
-static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline int compute_wscale(int win)
-{
- int wscale = 0;
-
- while (wscale < 14 && (65535<<wscale) < win)
- wscale++;
- return wscale;
-}
-
-/* CM prototypes */
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
-int iwch_destroy_listen(struct iw_cm_id *cm_id);
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp);
-int iwch_quiesce_tid(struct iwch_ep *ep);
-int iwch_resume_tid(struct iwch_ep *ep);
-void __free_ep(struct kref *kref);
-void iwch_rearp(struct iwch_ep *ep);
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t);
-
-int __init iwch_cm_init(void);
-void __exit iwch_cm_term(void);
-extern int peer2peer;
-
-#endif /* _IWCH_CM_H_ */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
deleted file mode 100644
index a098c0140580..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "iwch_provider.h"
-#include "iwch.h"
-
-static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct iwch_qp *qhp, struct ib_wc *wc)
-{
- struct t3_wq *wq = qhp ? &qhp->wq : NULL;
- struct t3_cqe cqe;
- u32 credit = 0;
- u8 cqe_flushed;
- u64 cookie;
- int ret = 1;
-
- ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
- &credit);
- if (t3a_device(chp->rhp) && credit) {
- pr_debug("%s updating %d cq credits on id %d\n", __func__,
- credit, chp->cq.cqid);
- cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
- }
-
- if (ret) {
- ret = -EAGAIN;
- goto out;
- }
- ret = 1;
-
- wc->wr_id = cookie;
- wc->qp = qhp ? &qhp->ibqp : NULL;
- wc->vendor_err = CQE_STATUS(cqe);
- wc->wc_flags = 0;
-
- pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
- __func__,
- CQE_QPID(cqe), CQE_TYPE(cqe),
- CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
- CQE_WRID_LOW(cqe), (unsigned long long)cookie);
-
- if (CQE_TYPE(cqe) == 0) {
- if (!CQE_STATUS(cqe))
- wc->byte_len = CQE_LEN(cqe);
- else
- wc->byte_len = 0;
- wc->opcode = IB_WC_RECV;
- if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
- CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
- wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- }
- } else {
- switch (CQE_OPCODE(cqe)) {
- case T3_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case T3_READ_REQ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = CQE_LEN(cqe);
- break;
- case T3_SEND:
- case T3_SEND_WITH_SE:
- case T3_SEND_WITH_INV:
- case T3_SEND_WITH_SE_INV:
- wc->opcode = IB_WC_SEND;
- break;
- case T3_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- break;
- case T3_FAST_REGISTER:
- wc->opcode = IB_WC_REG_MR;
- break;
- default:
- pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
- CQE_OPCODE(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- goto out;
- }
- }
-
- if (cqe_flushed)
- wc->status = IB_WC_WR_FLUSH_ERR;
- else {
-
- switch (CQE_STATUS(cqe)) {
- case TPT_ERR_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case TPT_ERR_STAG:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_PDID:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_WRAP:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- case TPT_ERR_BOUND:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- case TPT_ERR_OPCODE:
- wc->status = IB_WC_FATAL_ERR;
- break;
- case TPT_ERR_SWFLUSH:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- default:
- pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
- CQE_STATUS(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- }
- }
-out:
- return ret;
-}
-
-/*
- * Get one cq entry from cxio and map it to openib.
- *
- * Returns:
- * 0 EMPTY;
- * 1 cqe returned
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct ib_wc *wc)
-{
- struct iwch_qp *qhp;
- struct t3_cqe *rd_cqe;
- int ret;
-
- rd_cqe = cxio_next_cqe(&chp->cq);
-
- if (!rd_cqe)
- return 0;
-
- qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
- if (qhp) {
- spin_lock(&qhp->lock);
- ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
- spin_unlock(&qhp->lock);
- } else {
- ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
- }
- return ret;
-}
-
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- unsigned long flags;
- int npolled;
- int err = 0;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
-
- spin_lock_irqsave(&chp->lock, flags);
- for (npolled = 0; npolled < num_entries; ++npolled) {
-
- /*
- * Because T3 can post CQEs that are _not_ associated
- * with a WR, we might have to poll again after removing
- * one of these.
- */
- do {
- err = iwch_poll_cq_one(rhp, chp, wc + npolled);
- } while (err == -EAGAIN);
- if (err <= 0)
- break;
- }
- spin_unlock_irqrestore(&chp->lock, flags);
-
- if (err < 0)
- return err;
- else {
- return npolled;
- }
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
deleted file mode 100644
index 9d356c1301c7..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/gfp.h>
-#include <linux/mman.h>
-#include <net/sock.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_wr.h"
-
-static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
- struct respQ_msg_t *rsp_msg,
- enum ib_event_type ib_event,
- int send_term)
-{
- struct ib_event event;
- struct iwch_qp_attributes attrs;
- struct iwch_qp *qhp;
- unsigned long flag;
-
- xa_lock(&rnicp->qps);
- qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
-
- if (!qhp) {
- pr_err("%s unaffiliated error 0x%x qpid 0x%x\n",
- __func__, CQE_STATUS(rsp_msg->cqe),
- CQE_QPID(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- return;
- }
-
- if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
- (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
- pr_debug("%s AE received after RTS - qp state %d qpid 0x%x status 0x%x\n",
- __func__,
- qhp->attr.state, qhp->wq.qpid,
- CQE_STATUS(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- return;
- }
-
- pr_err("%s - AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- __func__,
- CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
- CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
-
- atomic_inc(&qhp->refcnt);
- xa_unlock(&rnicp->qps);
-
- if (qhp->attr.state == IWCH_QP_STATE_RTS) {
- attrs.next_state = IWCH_QP_STATE_TERMINATE;
- iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (send_term)
- iwch_post_terminate(qhp, rsp_msg);
- }
-
- event.event = ib_event;
- event.device = chp->ibcq.device;
- if (ib_event == IB_EVENT_CQ_ERR)
- event.element.cq = &chp->ibcq;
- else
- event.element.qp = &qhp->ibqp;
-
- if (qhp->ibqp.event_handler)
- (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
-
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-}
-
-void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
-{
- struct iwch_dev *rnicp;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- struct iwch_cq *chp;
- struct iwch_qp *qhp;
- u32 cqid = RSPQ_CQID(rsp_msg);
- unsigned long flag;
-
- rnicp = (struct iwch_dev *) rdev_p->ulp;
- xa_lock(&rnicp->qps);
- chp = get_chp(rnicp, cqid);
- qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
- if (!chp || !qhp) {
- pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- cqid, CQE_QPID(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
- CQE_WRID_LOW(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- goto out;
- }
- iwch_qp_add_ref(&qhp->ibqp);
- atomic_inc(&chp->refcnt);
- xa_unlock(&rnicp->qps);
-
- /*
- * 1) completion of our sending a TERMINATE.
- * 2) incoming TERMINATE message.
- */
- if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
- (CQE_STATUS(rsp_msg->cqe) == 0)) {
- if (SQ_TYPE(rsp_msg->cqe)) {
- pr_debug("%s QPID 0x%x ep %p disconnecting\n",
- __func__, qhp->wq.qpid, qhp->ep);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- } else {
- pr_debug("%s post REQ_ERR AE QPID 0x%x\n", __func__,
- qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg,
- IB_EVENT_QP_REQ_ERR, 0);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- }
- goto done;
- }
-
- /* Bad incoming Read request */
- if (SQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- /* Bad incoming write */
- if (RQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- switch (CQE_STATUS(rsp_msg->cqe)) {
-
- /* Completion Events */
- case TPT_ERR_SUCCESS:
-
- /*
- * Confirm the destination entry if this is a RECV completion.
- */
- if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
- dst_confirm(qhp->ep->dst);
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
- break;
-
- case TPT_ERR_STAG:
- case TPT_ERR_PDID:
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- case TPT_ERR_WRAP:
- case TPT_ERR_BOUND:
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
- break;
-
- /* Device Fatal Errors */
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
- break;
-
- /* QP Fatal Errors */
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_PBL_ADDR_BOUND:
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_OPCODE:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_RQE_ADDR_BOUND:
- case TPT_ERR_IRD_OVERFLOW:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
-
- default:
- pr_err("Unknown T3 status 0x%x QPID 0x%x\n",
- CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
- }
-done:
- if (atomic_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
- iwch_qp_rem_ref(&qhp->ibqp);
-out:
- dev_kfree_skb_irq(skb);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
deleted file mode 100644
index ce0f2741821d..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-
-static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
-{
- u32 mmid;
-
- mhp->attr.state = 1;
- mhp->attr.stag = stag;
- mmid = stag >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
- return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
-}
-
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift)
-{
- u32 stag;
- int ret;
-
- if (cxio_register_phys_mem(&rhp->rdev,
- &stag, mhp->attr.pdid,
- mhp->attr.perms,
- mhp->attr.zbva,
- mhp->attr.va_fbo,
- mhp->attr.len,
- shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr))
- return -ENOMEM;
-
- ret = iwch_finish_mem_reg(mhp, stag);
- if (ret)
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- return ret;
-}
-
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
-{
- mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
- npages << 3);
-
- if (!mhp->attr.pbl_addr)
- return -ENOMEM;
-
- mhp->attr.pbl_size = npages;
-
- return 0;
-}
-
-void iwch_free_pbl(struct iwch_mr *mhp)
-{
- cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
- mhp->attr.pbl_size << 3);
-}
-
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
-{
- return cxio_write_pbl(&mhp->rhp->rdev, pages,
- mhp->attr.pbl_addr + (offset << 3), npages);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
deleted file mode 100644
index dcf02ec02810..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ /dev/null
@@ -1,1321 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/sched/mm.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/rtnetlink.h>
-#include <linux/inetdevice.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/uverbs_ioctl.h>
-
-#include "cxio_hal.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-#include <rdma/cxgb3-abi.h>
-#include "common.h"
-
-static void iwch_dealloc_ucontext(struct ib_ucontext *context)
-{
- struct iwch_dev *rhp = to_iwch_dev(context->device);
- struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
- struct iwch_mm_entry *mm, *tmp;
-
- pr_debug("%s context %p\n", __func__, context);
- list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
- kfree(mm);
- cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
-}
-
-static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = ucontext->device;
- struct iwch_ucontext *context = to_iwch_ucontext(ucontext);
- struct iwch_dev *rhp = to_iwch_dev(ibdev);
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- cxio_init_ucontext(&rhp->rdev, &context->uctx);
- INIT_LIST_HEAD(&context->mmaps);
- spin_lock_init(&context->mmap_lock);
- return 0;
-}
-
-static void iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
-{
- struct iwch_cq *chp;
-
- pr_debug("%s ib_cq %p\n", __func__, ib_cq);
- chp = to_iwch_cq(ib_cq);
-
- xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
- atomic_dec(&chp->refcnt);
- wait_event(chp->wait, !atomic_read(&chp->refcnt));
-
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
-}
-
-static int iwch_create_cq(struct ib_cq *ibcq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = ibcq->device;
- int entries = attr->cqe;
- struct iwch_dev *rhp = to_iwch_dev(ibcq->device);
- struct iwch_cq *chp = to_iwch_cq(ibcq);
- struct iwch_create_cq_resp uresp;
- struct iwch_create_cq_req ureq;
- static int warned;
- size_t resplen;
-
- pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
- if (attr->flags)
- return -EINVAL;
-
- if (udata) {
- if (!t3a_device(rhp)) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return -EFAULT;
-
- chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
- }
- }
-
- if (t3a_device(rhp)) {
-
- /*
- * T3A: Add some fluff to handle extra CQEs inserted
- * for various errors.
- * Additional CQE possibilities:
- * TERMINATE,
- * incoming RDMA WRITE Failures
- * incoming RDMA READ REQUEST FAILUREs
- * NOTE: We cannot ensure the CQ won't overflow.
- */
- entries += 16;
- }
- entries = roundup_pow_of_two(entries);
- chp->cq.size_log2 = ilog2(entries);
-
- if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata))
- return -ENOMEM;
-
- chp->rhp = rhp;
- chp->ibcq.cqe = 1 << chp->cq.size_log2;
- spin_lock_init(&chp->lock);
- spin_lock_init(&chp->comp_handler_lock);
- atomic_set(&chp->refcnt, 1);
- init_waitqueue_head(&chp->wait);
- if (xa_store_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL)) {
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
- return -ENOMEM;
- }
-
- if (udata) {
- struct iwch_mm_entry *mm;
- struct iwch_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct iwch_ucontext, ibucontext);
-
- mm = kmalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- iwch_destroy_cq(&chp->ibcq, udata);
- return -ENOMEM;
- }
- uresp.cqid = chp->cq.cqid;
- uresp.size_log2 = chp->cq.size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
- if (udata->outlen < sizeof(uresp)) {
- if (!warned++)
- pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
- mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
- sizeof(struct t3_cqe));
- resplen = sizeof(struct iwch_create_cq_resp_v0);
- } else {
- mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
- sizeof(struct t3_cqe));
- uresp.memsize = mm->len;
- uresp.reserved = 0;
- resplen = sizeof(uresp);
- }
- if (ib_copy_to_udata(udata, &uresp, resplen)) {
- kfree(mm);
- iwch_destroy_cq(&chp->ibcq, udata);
- return -EFAULT;
- }
- insert_mmap(ucontext, mm);
- }
- pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr %pad\n",
- chp->cq.cqid, chp, (1 << chp->cq.size_log2),
- &chp->cq.dma_addr);
- return 0;
-}
-
-static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- enum t3_cq_opcode cq_op;
- int err;
- unsigned long flag;
- u32 rptr;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
- if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- cq_op = CQ_ARM_SE;
- else
- cq_op = CQ_ARM_AN;
- if (chp->user_rptr_addr) {
- if (get_user(rptr, chp->user_rptr_addr))
- return -EFAULT;
- spin_lock_irqsave(&chp->lock, flag);
- chp->cq.rptr = rptr;
- } else
- spin_lock_irqsave(&chp->lock, flag);
- pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr);
- err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
- spin_unlock_irqrestore(&chp->lock, flag);
- if (err < 0)
- pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid);
- if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
- err = 0;
- return err;
-}
-
-static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- int len = vma->vm_end - vma->vm_start;
- u32 key = vma->vm_pgoff << PAGE_SHIFT;
- struct cxio_rdev *rdev_p;
- int ret = 0;
- struct iwch_mm_entry *mm;
- struct iwch_ucontext *ucontext;
- u64 addr;
-
- pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
- key, len);
-
- if (vma->vm_start & (PAGE_SIZE-1)) {
- return -EINVAL;
- }
-
- rdev_p = &(to_iwch_dev(context->device)->rdev);
- ucontext = to_iwch_ucontext(context);
-
- mm = remove_mmap(ucontext, key, len);
- if (!mm)
- return -EINVAL;
- addr = mm->addr;
- kfree(mm);
-
- if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
- (addr < (rdev_p->rnic_info.udbell_physbase +
- rdev_p->rnic_info.udbell_len))) {
-
- /*
- * Map T3 DB register.
- */
- if (vma->vm_flags & VM_READ) {
- return -EPERM;
- }
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- vma->vm_flags &= ~VM_MAYREAD;
- ret = io_remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- } else {
-
- /*
- * Map WQ or CQ contig dma memory...
- */
- ret = remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- }
-
- return ret;
-}
-
-static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
- cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
-}
-
-static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
-{
- struct iwch_pd *php = to_iwch_pd(pd);
- struct ib_device *ibdev = pd->device;
- u32 pdid;
- struct iwch_dev *rhp;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- rhp = (struct iwch_dev *) ibdev;
- pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
- if (!pdid)
- return -EINVAL;
-
- php->pdid = pdid;
- php->rhp = rhp;
- if (udata) {
- struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
-
- if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- iwch_deallocate_pd(&php->ibpd, udata);
- return -EFAULT;
- }
- }
- pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
- return 0;
-}
-
-static int iwch_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_mr *mhp;
- u32 mmid;
-
- pr_debug("%s ib_mr %p\n", __func__, ib_mr);
-
- mhp = to_iwch_mr(ib_mr);
- kfree(mhp->pages);
- rhp = mhp->rhp;
- mmid = mhp->attr.stag >> 8;
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- iwch_free_pbl(mhp);
- xa_erase_irq(&rhp->mrs, mmid);
- if (mhp->kva)
- kfree((void *) (unsigned long) mhp->kva);
- ib_umem_release(mhp->umem);
- pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
-{
- const u64 total_size = 0xffffffff;
- const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
- struct iwch_pd *php = to_iwch_pd(pd);
- struct iwch_dev *rhp = php->rhp;
- struct iwch_mr *mhp;
- __be64 *page_list;
- int shift = 26, npages, ret, i;
-
- pr_debug("%s ib_pd %p\n", __func__, pd);
-
- /*
- * T3 only supports 32 bits of size.
- */
- if (sizeof(phys_addr_t) > 4) {
- pr_warn_once("Cannot support dma_mrs on this platform\n");
- return ERR_PTR(-ENOTSUPP);
- }
-
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- npages = (total_size + (1ULL << shift) - 1) >> shift;
- if (!npages) {
- ret = -EINVAL;
- goto err;
- }
-
- page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
- if (!page_list) {
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < npages; i++)
- page_list[i] = cpu_to_be64((u64)i << shift);
-
- pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
- __func__, mask, shift, total_size, npages);
-
- ret = iwch_alloc_pbl(mhp, npages);
- if (ret) {
- kfree(page_list);
- goto err_pbl;
- }
-
- ret = iwch_write_pbl(mhp, page_list, npages, 0);
- kfree(page_list);
- if (ret)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
-
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = 0;
- mhp->attr.page_size = shift - 12;
-
- mhp->attr.len = (u32) total_size;
- mhp->attr.pbl_size = npages;
- ret = iwch_register_mem(rhp, php, mhp, shift);
- if (ret)
- goto err_pbl;
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- kfree(mhp);
- return ERR_PTR(ret);
-}
-
-static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
-{
- __be64 *pages;
- int shift, n, i;
- int err = 0;
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- struct iwch_reg_user_mr_resp uresp;
- struct sg_dma_page_iter sg_iter;
- pr_debug("%s ib_pd %p\n", __func__, pd);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- mhp->umem = ib_umem_get(udata, start, length, acc, 0);
- if (IS_ERR(mhp->umem)) {
- err = PTR_ERR(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
- }
-
- shift = PAGE_SHIFT;
-
- n = ib_umem_num_pages(mhp->umem);
-
- err = iwch_alloc_pbl(mhp, n);
- if (err)
- goto err;
-
- pages = (__be64 *) __get_free_page(GFP_KERNEL);
- if (!pages) {
- err = -ENOMEM;
- goto err_pbl;
- }
-
- i = n = 0;
-
- for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
- pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
- if (i == PAGE_SIZE / sizeof(*pages)) {
- err = iwch_write_pbl(mhp, pages, i, n);
- if (err)
- goto pbl_done;
- n += i;
- i = 0;
- }
- }
-
- if (i)
- err = iwch_write_pbl(mhp, pages, i, n);
-
-pbl_done:
- free_page((unsigned long) pages);
- if (err)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = virt;
- mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) length;
-
- err = iwch_register_mem(rhp, php, mhp, shift);
- if (err)
- goto err_pbl;
-
- if (udata && !t3a_device(rhp)) {
- uresp.pbl_addr = (mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3;
- pr_debug("%s user resp pbl_addr 0x%x\n", __func__,
- uresp.pbl_addr);
-
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- iwch_dereg_mr(&mhp->ibmr, udata);
- err = -EFAULT;
- goto err;
- }
- }
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- ib_umem_release(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
-}
-
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mw *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret;
-
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
- ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
- }
- mhp->rhp = rhp;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_MW;
- mhp->attr.stag = stag;
- mmid = (stag) >> 8;
- mhp->ibmw.rkey = stag;
- if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
- }
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmw);
-}
-
-static int iwch_dealloc_mw(struct ib_mw *mw)
-{
- struct iwch_dev *rhp;
- struct iwch_mw *mhp;
- u32 mmid;
-
- mhp = to_iwch_mw(mw);
- rhp = mhp->rhp;
- mmid = (mw->rkey) >> 8;
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- xa_erase_irq(&rhp->mrs, mmid);
- pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret = -ENOMEM;
-
- if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > T3_MAX_FASTREG_DEPTH)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- goto err;
-
- mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
- if (!mhp->pages)
- goto pl_err;
-
- mhp->rhp = rhp;
- ret = iwch_alloc_pbl(mhp, max_num_sg);
- if (ret)
- goto err1;
- mhp->attr.pbl_size = max_num_sg;
- ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
- if (ret)
- goto err2;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_NON_SHARED_MR;
- mhp->attr.stag = stag;
- mhp->attr.state = 1;
- mmid = (stag) >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- ret = xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL);
- if (ret)
- goto err3;
-
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmr);
-err3:
- cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
-err2:
- iwch_free_pbl(mhp);
-err1:
- kfree(mhp->pages);
-pl_err:
- kfree(mhp);
-err:
- return ERR_PTR(ret);
-}
-
-static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct iwch_mr *mhp = to_iwch_mr(ibmr);
-
- if (unlikely(mhp->npages == mhp->attr.pbl_size))
- return -ENOMEM;
-
- mhp->pages[mhp->npages++] = addr;
-
- return 0;
-}
-
-static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
-{
- struct iwch_mr *mhp = to_iwch_mr(ibmr);
-
- mhp->npages = 0;
-
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
-}
-
-static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_qp_attributes attrs;
- struct iwch_ucontext *ucontext;
-
- qhp = to_iwch_qp(ib_qp);
- rhp = qhp->rhp;
-
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
- wait_event(qhp->wait, !qhp->ep);
-
- xa_erase_irq(&rhp->qps, qhp->wq.qpid);
-
- atomic_dec(&qhp->refcnt);
- wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
-
- ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
- ibucontext);
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
- pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
- ib_qp, qhp->wq.qpid, qhp);
- kfree(qhp);
- return 0;
-}
-
-static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_pd *php;
- struct iwch_cq *schp;
- struct iwch_cq *rchp;
- struct iwch_create_qp_resp uresp;
- int wqsize, sqsize, rqsize;
- struct iwch_ucontext *ucontext;
-
- pr_debug("%s ib_pd %p\n", __func__, pd);
- if (attrs->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
- rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
- if (!schp || !rchp)
- return ERR_PTR(-EINVAL);
-
- /* The RQT size must be # of entries + 1 rounded up to a power of two */
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
- if (rqsize == attrs->cap.max_recv_wr)
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
-
- /* T3 doesn't support RQT depth < 16 */
- if (rqsize < 16)
- rqsize = 16;
-
- if (rqsize > T3_MAX_RQ_SIZE)
- return ERR_PTR(-EINVAL);
-
- if (attrs->cap.max_inline_data > T3_MAX_INLINE)
- return ERR_PTR(-EINVAL);
-
- /*
- * NOTE: The SQ and total WQ sizes don't need to be
- * a power of two. However, all the code assumes
- * they are. EG: Q_FREECNT() and friends.
- */
- sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
- wqsize = roundup_pow_of_two(rqsize + sqsize);
-
- /*
- * Kernel users need more wq space for fastreg WRs which can take
- * 2 WR fragments.
- */
- ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
- ibucontext);
- if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
- wqsize = roundup_pow_of_two(rqsize +
- roundup_pow_of_two(attrs->cap.max_send_wr * 2));
- pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__,
- wqsize, sqsize, rqsize);
- qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
- if (!qhp)
- return ERR_PTR(-ENOMEM);
- qhp->wq.size_log2 = ilog2(wqsize);
- qhp->wq.rq_size_log2 = ilog2(rqsize);
- qhp->wq.sq_size_log2 = ilog2(sqsize);
- if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- attrs->cap.max_recv_wr = rqsize - 1;
- attrs->cap.max_send_wr = sqsize;
- attrs->cap.max_inline_data = T3_MAX_INLINE;
-
- qhp->rhp = rhp;
- qhp->attr.pd = php->pdid;
- qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
- qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
- qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
- qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
- qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
- qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
- qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.next_state = IWCH_QP_STATE_IDLE;
-
- /*
- * XXX - These don't get passed in from the openib user
- * at create time. The CM sets them via a QP modify.
- * Need to fix... I think the CM should
- */
- qhp->attr.enable_rdma_read = 1;
- qhp->attr.enable_rdma_write = 1;
- qhp->attr.enable_bind = 1;
- qhp->attr.max_ord = 1;
- qhp->attr.max_ird = 1;
-
- spin_lock_init(&qhp->lock);
- init_waitqueue_head(&qhp->wait);
- atomic_set(&qhp->refcnt, 1);
-
- if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) {
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- if (udata) {
-
- struct iwch_mm_entry *mm1, *mm2;
-
- mm1 = kmalloc(sizeof(*mm1), GFP_KERNEL);
- if (!mm1) {
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-ENOMEM);
- }
-
- mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
- if (!mm2) {
- kfree(mm1);
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-ENOMEM);
- }
-
- uresp.qpid = qhp->wq.qpid;
- uresp.size_log2 = qhp->wq.size_log2;
- uresp.sq_size_log2 = qhp->wq.sq_size_log2;
- uresp.rq_size_log2 = qhp->wq.rq_size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- uresp.db_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- kfree(mm1);
- kfree(mm2);
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-EFAULT);
- }
- mm1->key = uresp.key;
- mm1->addr = virt_to_phys(qhp->wq.queue);
- mm1->len = PAGE_ALIGN(wqsize * sizeof(union t3_wr));
- insert_mmap(ucontext, mm1);
- mm2->key = uresp.db_key;
- mm2->addr = qhp->wq.udb & PAGE_MASK;
- mm2->len = PAGE_SIZE;
- insert_mmap(ucontext, mm2);
- }
- qhp->ibqp.qp_num = qhp->wq.qpid;
- pr_debug(
- "%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr %pad size %d rq_addr 0x%x\n",
- __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
- qhp->wq.qpid, qhp, &qhp->wq.dma_addr, 1 << qhp->wq.size_log2,
- qhp->wq.rq_addr);
- return &qhp->ibqp;
-}
-
-static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- enum iwch_qp_attr_mask mask = 0;
- struct iwch_qp_attributes attrs = {};
-
- pr_debug("%s ib_qp %p\n", __func__, ibqp);
-
- /* iwarp does not support the RTR state */
- if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
- attr_mask &= ~IB_QP_STATE;
-
- /* Make sure we still have something left to do */
- if (!attr_mask)
- return 0;
-
- qhp = to_iwch_qp(ibqp);
- rhp = qhp->rhp;
-
- attrs.next_state = iwch_convert_state(attr->qp_state);
- attrs.enable_rdma_read = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_READ) ? 1 : 0;
- attrs.enable_rdma_write = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
- attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
-
-
- mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
- mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
- (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
-
- return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp)
-{
- pr_debug("%s ib_qp %p\n", __func__, qp);
- atomic_inc(&(to_iwch_qp(qp)->refcnt));
-}
-
-void iwch_qp_rem_ref(struct ib_qp *qp)
-{
- pr_debug("%s ib_qp %p\n", __func__, qp);
- if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
- wake_up(&(to_iwch_qp(qp)->wait));
-}
-
-static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
-{
- pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
- return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
-}
-
-
-static int iwch_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 * pkey)
-{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- *pkey = 0;
- return 0;
-}
-
-static int iwch_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct iwch_dev *dev;
-
- pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
- __func__, ibdev, port, index, gid);
- dev = to_iwch_dev(ibdev);
- BUG_ON(port == 0 || port > 2);
- memset(&(gid->raw[0]), 0, sizeof(gid->raw));
- memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
- return 0;
-}
-
-static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
-{
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
- char *cp, *next;
- unsigned fw_maj, fw_min, fw_mic;
-
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
-
- next = info.fw_version + 1;
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_maj);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_min);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_mic);
-
- return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
- (fw_mic & 0xffff);
-}
-
-static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
-
- struct iwch_dev *dev;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- dev = to_iwch_dev(ibdev);
- memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- props->hw_ver = dev->rdev.t3cdev_p->type;
- props->fw_ver = fw_vers_string_to_u64(dev);
- props->device_cap_flags = dev->device_cap_flags;
- props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
- props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
- props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
- props->max_mr_size = dev->attr.max_mr_size;
- props->max_qp = dev->attr.max_qps;
- props->max_qp_wr = dev->attr.max_wrs;
- props->max_send_sge = dev->attr.max_sge_per_wr;
- props->max_recv_sge = dev->attr.max_sge_per_wr;
- props->max_sge_rd = 1;
- props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_cq = dev->attr.max_cqs;
- props->max_cqe = dev->attr.max_cqes_per_cq;
- props->max_mr = dev->attr.max_mem_regs;
- props->max_pd = dev->attr.max_pds;
- props->local_ca_ack_delay = 0;
- props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
-
- return 0;
-}
-
-static int iwch_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
-
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_SNMP_TUNNEL_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
- props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
- props->max_msg_sz = -1;
-
- return 0;
-}
-
-static ssize_t hw_rev_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
-}
-static DEVICE_ATTR_RO(hw_rev);
-
-static ssize_t hca_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.driver);
-}
-static DEVICE_ATTR_RO(hca_type);
-
-static ssize_t board_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
- iwch_dev->rdev.rnic_info.pdev->device);
-}
-static DEVICE_ATTR_RO(board_id);
-
-enum counters {
- IPINRECEIVES,
- IPINHDRERRORS,
- IPINADDRERRORS,
- IPINUNKNOWNPROTOS,
- IPINDISCARDS,
- IPINDELIVERS,
- IPOUTREQUESTS,
- IPOUTDISCARDS,
- IPOUTNOROUTES,
- IPREASMTIMEOUT,
- IPREASMREQDS,
- IPREASMOKS,
- IPREASMFAILS,
- TCPACTIVEOPENS,
- TCPPASSIVEOPENS,
- TCPATTEMPTFAILS,
- TCPESTABRESETS,
- TCPCURRESTAB,
- TCPINSEGS,
- TCPOUTSEGS,
- TCPRETRANSSEGS,
- TCPINERRS,
- TCPOUTRSTS,
- TCPRTOMIN,
- TCPRTOMAX,
- NR_COUNTERS
-};
-
-static const char * const names[] = {
- [IPINRECEIVES] = "ipInReceives",
- [IPINHDRERRORS] = "ipInHdrErrors",
- [IPINADDRERRORS] = "ipInAddrErrors",
- [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
- [IPINDISCARDS] = "ipInDiscards",
- [IPINDELIVERS] = "ipInDelivers",
- [IPOUTREQUESTS] = "ipOutRequests",
- [IPOUTDISCARDS] = "ipOutDiscards",
- [IPOUTNOROUTES] = "ipOutNoRoutes",
- [IPREASMTIMEOUT] = "ipReasmTimeout",
- [IPREASMREQDS] = "ipReasmReqds",
- [IPREASMOKS] = "ipReasmOKs",
- [IPREASMFAILS] = "ipReasmFails",
- [TCPACTIVEOPENS] = "tcpActiveOpens",
- [TCPPASSIVEOPENS] = "tcpPassiveOpens",
- [TCPATTEMPTFAILS] = "tcpAttemptFails",
- [TCPESTABRESETS] = "tcpEstabResets",
- [TCPCURRESTAB] = "tcpCurrEstab",
- [TCPINSEGS] = "tcpInSegs",
- [TCPOUTSEGS] = "tcpOutSegs",
- [TCPRETRANSSEGS] = "tcpRetransSegs",
- [TCPINERRS] = "tcpInErrs",
- [TCPOUTRSTS] = "tcpOutRsts",
- [TCPRTOMIN] = "tcpRtoMin",
- [TCPRTOMAX] = "tcpRtoMax",
-};
-
-static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
- u8 port_num)
-{
- BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
-
- /* Our driver only supports device level stats */
- if (port_num != 0)
- return NULL;
-
- return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
-}
-
-static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port, int index)
-{
- struct iwch_dev *dev;
- struct tp_mib_stats m;
- int ret;
-
- if (port != 0 || !stats)
- return -ENOSYS;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- dev = to_iwch_dev(ibdev);
- ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
- if (ret)
- return -ENOSYS;
-
- stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
- stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
- stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
- stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
- stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
- stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
- stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
- stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
- stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
- stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
- stats->value[IPREASMREQDS] = m.ipReasmReqds;
- stats->value[IPREASMOKS] = m.ipReasmOKs;
- stats->value[IPREASMFAILS] = m.ipReasmFails;
- stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
- stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
- stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
- stats->value[TCPESTABRESETS] = m.tcpEstabResets;
- stats->value[TCPCURRESTAB] = m.tcpOutRsts;
- stats->value[TCPINSEGS] = m.tcpCurrEstab;
- stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
- stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
- stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
- stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
- stats->value[TCPRTOMIN] = m.tcpRtoMin;
- stats->value[TCPRTOMAX] = m.tcpRtoMax;
-
- return stats->num_counters;
-}
-
-static struct attribute *iwch_class_attributes[] = {
- &dev_attr_hw_rev.attr,
- &dev_attr_hca_type.attr,
- &dev_attr_board_id.attr,
- NULL
-};
-
-static const struct attribute_group iwch_attr_group = {
- .attrs = iwch_class_attributes,
-};
-
-static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
-{
- struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
-}
-
-static const struct ib_device_ops iwch_dev_ops = {
- .owner = THIS_MODULE,
- .driver_id = RDMA_DRIVER_CXGB3,
- .uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION,
- .uverbs_no_driver_id_binding = 1,
-
- .alloc_hw_stats = iwch_alloc_stats,
- .alloc_mr = iwch_alloc_mr,
- .alloc_mw = iwch_alloc_mw,
- .alloc_pd = iwch_allocate_pd,
- .alloc_ucontext = iwch_alloc_ucontext,
- .create_cq = iwch_create_cq,
- .create_qp = iwch_create_qp,
- .dealloc_mw = iwch_dealloc_mw,
- .dealloc_pd = iwch_deallocate_pd,
- .dealloc_ucontext = iwch_dealloc_ucontext,
- .dereg_mr = iwch_dereg_mr,
- .destroy_cq = iwch_destroy_cq,
- .destroy_qp = iwch_destroy_qp,
- .get_dev_fw_str = get_dev_fw_ver_str,
- .get_dma_mr = iwch_get_dma_mr,
- .get_hw_stats = iwch_get_mib,
- .get_port_immutable = iwch_port_immutable,
- .iw_accept = iwch_accept_cr,
- .iw_add_ref = iwch_qp_add_ref,
- .iw_connect = iwch_connect,
- .iw_create_listen = iwch_create_listen,
- .iw_destroy_listen = iwch_destroy_listen,
- .iw_get_qp = iwch_get_qp,
- .iw_reject = iwch_reject_cr,
- .iw_rem_ref = iwch_qp_rem_ref,
- .map_mr_sg = iwch_map_mr_sg,
- .mmap = iwch_mmap,
- .modify_qp = iwch_ib_modify_qp,
- .poll_cq = iwch_poll_cq,
- .post_recv = iwch_post_receive,
- .post_send = iwch_post_send,
- .query_device = iwch_query_device,
- .query_gid = iwch_query_gid,
- .query_pkey = iwch_query_pkey,
- .query_port = iwch_query_port,
- .reg_user_mr = iwch_reg_user_mr,
- .req_notify_cq = iwch_arm_cq,
- INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd),
- INIT_RDMA_OBJ_SIZE(ib_cq, iwch_cq, ibcq),
- INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
-};
-
-static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev)
-{
- int ret;
- int i;
-
- for (i = 0; i < rdev->port_info.nports; i++) {
- ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i],
- i + 1);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-int iwch_register_device(struct iwch_dev *dev)
-{
- int err;
-
- pr_debug("%s iwch_dev %p\n", __func__, dev);
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW |
- IB_DEVICE_MEM_MGT_EXTENSIONS;
-
- /* cxgb3 supports STag 0. */
- dev->ibdev.local_dma_lkey = 0;
-
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
- dev->ibdev.node_type = RDMA_NODE_RNIC;
- BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
- memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
- dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
- dev->ibdev.num_comp_vectors = 1;
- dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
-
- memcpy(dev->ibdev.iw_ifname, dev->rdev.t3cdev_p->lldev->name,
- sizeof(dev->ibdev.iw_ifname));
-
- rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
- ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
- err = set_netdevs(&dev->ibdev, &dev->rdev);
- if (err)
- return err;
-
- return ib_register_device(&dev->ibdev, "cxgb3_%d");
-}
-
-void iwch_unregister_device(struct iwch_dev *dev)
-{
- pr_debug("%s iwch_dev %p\n", __func__, dev);
- ib_unregister_device(&dev->ibdev);
- return;
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
deleted file mode 100644
index 8adbe9658935..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_PROVIDER_H__
-#define __IWCH_PROVIDER_H__
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <rdma/ib_verbs.h>
-#include <asm/types.h>
-#include "t3cdev.h"
-#include "iwch.h"
-#include "cxio_wr.h"
-#include "cxio_hal.h"
-
-struct iwch_pd {
- struct ib_pd ibpd;
- u32 pdid;
- struct iwch_dev *rhp;
-};
-
-static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct iwch_pd, ibpd);
-}
-
-struct tpt_attributes {
- u32 stag;
- u32 state:1;
- u32 type:2;
- u32 rsvd:1;
- enum tpt_mem_perm perms;
- u32 remote_invaliate_disable:1;
- u32 zbva:1;
- u32 mw_bind_enable:1;
- u32 page_size:5;
-
- u32 pdid;
- u32 qpid;
- u32 pbl_addr;
- u32 len;
- u64 va_fbo;
- u32 pbl_size;
-};
-
-struct iwch_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
- u64 *pages;
- u32 npages;
-};
-
-typedef struct iwch_mw iwch_mw_handle;
-
-static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct iwch_mr, ibmr);
-}
-
-struct iwch_mw {
- struct ib_mw ibmw;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
-};
-
-static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
-{
- return container_of(ibmw, struct iwch_mw, ibmw);
-}
-
-struct iwch_cq {
- struct ib_cq ibcq;
- struct iwch_dev *rhp;
- struct t3_cq cq;
- spinlock_t lock;
- spinlock_t comp_handler_lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- u32 __user *user_rptr_addr;
-};
-
-static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct iwch_cq, ibcq);
-}
-
-enum IWCH_QP_FLAGS {
- QP_QUIESCED = 0x01
-};
-
-struct iwch_mpa_attributes {
- u8 initiator;
- u8 recv_marker_enabled;
- u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
- u8 crc_enabled;
- u8 version; /* 0 or 1 */
-};
-
-struct iwch_qp_attributes {
- u32 scq;
- u32 rcq;
- u32 sq_num_entries;
- u32 rq_num_entries;
- u32 sq_max_sges;
- u32 sq_max_sges_rdma_write;
- u32 rq_max_sges;
- u32 state;
- u8 enable_rdma_read;
- u8 enable_rdma_write; /* enable inbound Read Resp. */
- u8 enable_bind;
- u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
- /*
- * Next QP state. If specify the current state, only the
- * QP attributes will be modified.
- */
- u32 max_ord;
- u32 max_ird;
- u32 pd; /* IN */
- u32 next_state;
- char terminate_buffer[52];
- u32 terminate_msg_len;
- u8 is_terminate_local;
- struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
- struct iwch_ep *llp_stream_handle;
- char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
- u32 stream_msg_buf_len; /* Only on Idle -> RTS */
-};
-
-struct iwch_qp {
- struct ib_qp ibqp;
- struct iwch_dev *rhp;
- struct iwch_ep *ep;
- struct iwch_qp_attributes attr;
- struct t3_wq wq;
- spinlock_t lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- enum IWCH_QP_FLAGS flags;
-};
-
-static inline int qp_quiesced(struct iwch_qp *qhp)
-{
- return qhp->flags & QP_QUIESCED;
-}
-
-static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct iwch_qp, ibqp);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp);
-void iwch_qp_rem_ref(struct ib_qp *qp);
-
-struct iwch_ucontext {
- struct ib_ucontext ibucontext;
- struct cxio_ucontext uctx;
- u32 key;
- spinlock_t mmap_lock;
- struct list_head mmaps;
-};
-
-static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
-{
- return container_of(c, struct iwch_ucontext, ibucontext);
-}
-
-struct iwch_mm_entry {
- struct list_head entry;
- u64 addr;
- u32 key;
- unsigned len;
-};
-
-static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
- u32 key, unsigned len)
-{
- struct list_head *pos, *nxt;
- struct iwch_mm_entry *mm;
-
- spin_lock(&ucontext->mmap_lock);
- list_for_each_safe(pos, nxt, &ucontext->mmaps) {
-
- mm = list_entry(pos, struct iwch_mm_entry, entry);
- if (mm->key == key && mm->len == len) {
- list_del_init(&mm->entry);
- spin_unlock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, key,
- (unsigned long long)mm->addr, mm->len);
- return mm;
- }
- }
- spin_unlock(&ucontext->mmap_lock);
- return NULL;
-}
-
-static inline void insert_mmap(struct iwch_ucontext *ucontext,
- struct iwch_mm_entry *mm)
-{
- spin_lock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, mm->key, (unsigned long long)mm->addr, mm->len);
- list_add_tail(&mm->entry, &ucontext->mmaps);
- spin_unlock(&ucontext->mmap_lock);
-}
-
-enum iwch_qp_attr_mask {
- IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
- IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
- IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
- IWCH_QP_ATTR_MAX_ORD = 1 << 11,
- IWCH_QP_ATTR_MAX_IRD = 1 << 12,
- IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
- IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
- IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
- IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_MAX_ORD |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_STREAM_MSG_BUFFER |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
-};
-
-int iwch_modify_qp(struct iwch_dev *rhp,
- struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal);
-
-enum iwch_qp_state {
- IWCH_QP_STATE_IDLE,
- IWCH_QP_STATE_RTS,
- IWCH_QP_STATE_ERROR,
- IWCH_QP_STATE_TERMINATE,
- IWCH_QP_STATE_CLOSING,
- IWCH_QP_STATE_TOT
-};
-
-static inline int iwch_convert_state(enum ib_qp_state ib_state)
-{
- switch (ib_state) {
- case IB_QPS_RESET:
- case IB_QPS_INIT:
- return IWCH_QP_STATE_IDLE;
- case IB_QPS_RTS:
- return IWCH_QP_STATE_RTS;
- case IB_QPS_SQD:
- return IWCH_QP_STATE_CLOSING;
- case IB_QPS_SQE:
- return IWCH_QP_STATE_TERMINATE;
- case IB_QPS_ERR:
- return IWCH_QP_STATE_ERROR;
- default:
- return -1;
- }
-}
-
-static inline u32 iwch_ib_to_tpt_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
- (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
- TPT_LOCAL_READ;
-}
-
-static inline u32 iwch_ib_to_tpt_bind_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
-}
-
-enum iwch_mmid_state {
- IWCH_STAG_STATE_VALID,
- IWCH_STAG_STATE_INVALID
-};
-
-enum iwch_qp_query_flags {
- IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
- IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
- IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
-
- /*
- * Quiesce QP context; Consumer
- * will NOT replay outstanding WR
- */
- IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
- IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
- IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
-};
-
-u16 iwch_rqes_posted(struct iwch_qp *qhp);
-int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
-int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
-int iwch_post_zb_read(struct iwch_ep *ep);
-int iwch_register_device(struct iwch_dev *dev);
-void iwch_unregister_device(struct iwch_dev *dev);
-void stop_read_rep_timer(struct iwch_qp *qhp);
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift);
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
-void iwch_free_pbl(struct iwch_mr *mhp);
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
-
-#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
deleted file mode 100644
index c649faad63f9..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ /dev/null
@@ -1,1082 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-
-#define NO_SUPPORT -1
-
-static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- int i;
- u32 plen;
-
- switch (wr->opcode) {
- case IB_WR_SEND:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE;
- else
- wqe->send.rdmaop = T3_SEND;
- wqe->send.rem_stag = 0;
- break;
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
- else
- wqe->send.rdmaop = T3_SEND_WITH_INV;
- wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
- break;
- default:
- return -EINVAL;
- }
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->send.reserved[0] = 0;
- wqe->send.reserved[1] = 0;
- wqe->send.reserved[2] = 0;
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen)
- return -EMSGSIZE;
-
- plen += wr->sg_list[i].length;
- wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 4 + ((wr->num_sge) << 1);
- wqe->send.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_write(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- int i;
- u32 plen;
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->write.rdmaop = T3_RDMA_WRITE;
- wqe->write.reserved[0] = 0;
- wqe->write.reserved[1] = 0;
- wqe->write.reserved[2] = 0;
- wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
- wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
-
- if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
- plen = 4;
- wqe->write.sgl[0].stag = wr->ex.imm_data;
- wqe->write.sgl[0].len = cpu_to_be32(0);
- wqe->write.num_sgle = cpu_to_be32(0);
- *flit_cnt = 6;
- } else {
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen) {
- return -EMSGSIZE;
- }
- plen += wr->sg_list[i].length;
- wqe->write.sgl[i].stag =
- cpu_to_be32(wr->sg_list[i].lkey);
- wqe->write.sgl[i].len =
- cpu_to_be32(wr->sg_list[i].length);
- wqe->write.sgl[i].to =
- cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 5 + ((wr->num_sge) << 1);
- }
- wqe->write.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_read(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- if (wr->num_sge > 1)
- return -EINVAL;
- wqe->read.rdmaop = T3_READ_REQ;
- if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
- wqe->read.local_inv = 1;
- else
- wqe->read.local_inv = 0;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(rdma_wr(wr)->rkey);
- wqe->read.rem_to = cpu_to_be64(rdma_wr(wr)->remote_addr);
- wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
- wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
- wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
- *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
- return 0;
-}
-
-static int build_memreg(union t3_wr *wqe, const struct ib_reg_wr *wr,
- u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
-{
- struct iwch_mr *mhp = to_iwch_mr(wr->mr);
- int i;
- __be64 *p;
-
- if (mhp->npages > T3_MAX_FASTREG_DEPTH)
- return -EINVAL;
- *wr_cnt = 1;
- wqe->fastreg.stag = cpu_to_be32(wr->key);
- wqe->fastreg.len = cpu_to_be32(mhp->ibmr.length);
- wqe->fastreg.va_base_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
- wqe->fastreg.va_base_lo_fbo =
- cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
- wqe->fastreg.page_type_perms = cpu_to_be32(
- V_FR_PAGE_COUNT(mhp->npages) |
- V_FR_PAGE_SIZE(ilog2(wr->mr->page_size) - 12) |
- V_FR_TYPE(TPT_VATO) |
- V_FR_PERMS(iwch_ib_to_tpt_access(wr->access)));
- p = &wqe->fastreg.pbl_addrs[0];
- for (i = 0; i < mhp->npages; i++, p++) {
-
- /* If we need a 2nd WR, then set it up */
- if (i == T3_MAX_FASTREG_FRAG) {
- *wr_cnt = 2;
- wqe = (union t3_wr *)(wq->queue +
- Q_PTR2IDX((wq->wptr+1), wq->size_log2));
- build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
- Q_GENBIT(wq->wptr + 1, wq->size_log2),
- 0, 1 + mhp->npages - T3_MAX_FASTREG_FRAG,
- T3_EOP);
-
- p = &wqe->pbl_frag.pbl_addrs[0];
- }
- *p = cpu_to_be64((u64)mhp->pages[i]);
- }
- *flit_cnt = 5 + mhp->npages;
- if (*flit_cnt > 15)
- *flit_cnt = 15;
- return 0;
-}
-
-static int build_inv_stag(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
- wqe->local_inv.reserved = 0;
- *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
- return 0;
-}
-
-static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
- u32 num_sgle, u32 * pbl_addr, u8 * page_size)
-{
- int i;
- struct iwch_mr *mhp;
- u64 offset;
- for (i = 0; i < num_sgle; i++) {
-
- mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
- if (!mhp) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (!mhp->attr.state) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (mhp->attr.zbva) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
-
- if (sg_list[i].addr < mhp->attr.va_fbo) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) <
- sg_list[i].addr) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) >
- mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- offset = sg_list[i].addr - mhp->attr.va_fbo;
- offset += mhp->attr.va_fbo &
- ((1UL << (12 + mhp->attr.page_size)) - 1);
- pbl_addr[i] = ((mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3) +
- (offset >> (12 + mhp->attr.page_size));
- page_size[i] = mhp->attr.page_size;
- }
- return 0;
-}
-
-static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- const struct ib_recv_wr *wr)
-{
- int i, err = 0;
- u32 pbl_addr[T3_MAX_SGE];
- u8 page_size[T3_MAX_SGE];
-
- err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
- page_size);
- if (err)
- return err;
- wqe->recv.pagesz[0] = page_size[0];
- wqe->recv.pagesz[1] = page_size[1];
- wqe->recv.pagesz[2] = page_size[2];
- wqe->recv.pagesz[3] = page_size[3];
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
- for (i = 0; i < wr->num_sge; i++) {
- wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
-
- /* to in the WQE == the offset into the page */
- wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
- ((1UL << (12 + page_size[i])) - 1));
-
- /* pbl_addr is the adapters address in the PBL */
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = 0;
- return 0;
-}
-
-static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- const struct ib_recv_wr *wr)
-{
- int i;
- u32 pbl_addr;
- u32 pbl_offset;
-
-
- /*
- * The T3 HW requires the PBL in the HW recv descriptor to reference
- * a PBL entry. So we allocate the max needed PBL memory here and pass
- * it to the uP in the recv WR. The uP will build the PBL and setup
- * the HW recv descriptor.
- */
- pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
- if (!pbl_addr)
- return -ENOMEM;
-
- /*
- * Compute the 8B aligned offset.
- */
- pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
-
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
-
- for (i = 0; i < wr->num_sge; i++) {
-
- /*
- * Use a 128MB page size. This and an imposed 128MB
- * sge length limit allows us to require only a 2-entry HW
- * PBL for each SGE. This restriction is acceptable since
- * since it is not possible to allocate 128MB of contiguous
- * DMA coherent memory!
- */
- if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
- return -EINVAL;
- wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
-
- /*
- * T3 restricts a recv to all zero-stag or all non-zero-stag.
- */
- if (wr->sg_list[i].lkey != 0)
- return -EINVAL;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
- pbl_offset += 2;
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.pagesz[i] = 0;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
- return 0;
-}
-
-int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- int err = 0;
- u8 uninitialized_var(t3_wr_flit_cnt);
- enum t3_wr_opcode t3_wr_opcode = 0;
- enum t3_wr_flags t3_wr_flags;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
- struct t3_swsq *sqp;
- int wr_cnt = 1;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
- qhp->wq.sq_size_log2);
- if (num_wrs == 0) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (num_wrs == 0) {
- err = -ENOMEM;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- t3_wr_flags = 0;
- if (wr->send_flags & IB_SEND_SOLICITED)
- t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
- if (wr->send_flags & IB_SEND_SIGNALED)
- t3_wr_flags |= T3_COMPLETION_FLAG;
- sqp = qhp->wq.sq +
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
- switch (wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_READ_FENCE_FLAG;
- t3_wr_opcode = T3_WR_SEND;
- err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- t3_wr_opcode = T3_WR_WRITE;
- err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_READ_WITH_INV:
- t3_wr_opcode = T3_WR_READ;
- t3_wr_flags = 0; /* T3 reads are always signaled */
- err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
- if (err)
- break;
- sqp->read_len = wqe->read.local_len;
- if (!qhp->wq.oldest_read)
- qhp->wq.oldest_read = sqp;
- break;
- case IB_WR_REG_MR:
- t3_wr_opcode = T3_WR_FASTREG;
- err = build_memreg(wqe, reg_wr(wr), &t3_wr_flit_cnt,
- &wr_cnt, &qhp->wq);
- break;
- case IB_WR_LOCAL_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
- t3_wr_opcode = T3_WR_INV_STAG;
- err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
- break;
- default:
- pr_debug("%s post of type=%d TBD!\n", __func__,
- wr->opcode);
- err = -EINVAL;
- }
- if (err)
- break;
- wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
- sqp->wr_id = wr->wr_id;
- sqp->opcode = wr2opcode(t3_wr_opcode);
- sqp->sq_wptr = qhp->wq.sq_wptr;
- sqp->complete = 0;
- sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
-
- build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, t3_wr_flit_cnt,
- (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
- pr_debug("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
- __func__, (unsigned long long)wr->wr_id, idx,
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
- sqp->opcode);
- wr = wr->next;
- num_wrs--;
- qhp->wq.wptr += wr_cnt;
- ++(qhp->wq.sq_wptr);
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- int err = 0;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2) - 1;
- if (!wr) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (wr->num_sge > T3_MAX_SGE) {
- err = -EINVAL;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- if (num_wrs)
- if (wr->sg_list[0].lkey)
- err = build_rdma_recv(qhp, wqe, wr);
- else
- err = build_zero_stag_recv(qhp, wqe, wr);
- else
- err = -ENOMEM;
-
- if (err)
- break;
-
- build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
- pr_debug("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x wqe %p\n",
- __func__, (unsigned long long)wr->wr_id,
- idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
- ++(qhp->wq.rq_wptr);
- ++(qhp->wq.wptr);
- wr = wr->next;
- num_wrs--;
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
- u8 *layer_type, u8 *ecode)
-{
- int status = TPT_ERR_INTERNAL_ERR;
- int tagged = 0;
- int opcode = -1;
- int rqtype = 0;
- int send_inv = 0;
-
- if (rsp_msg) {
- status = CQE_STATUS(rsp_msg->cqe);
- opcode = CQE_OPCODE(rsp_msg->cqe);
- rqtype = RQ_TYPE(rsp_msg->cqe);
- send_inv = (opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV);
- tagged = (opcode == T3_RDMA_WRITE) ||
- (rqtype && (opcode == T3_READ_RESP));
- }
-
- switch (status) {
- case TPT_ERR_STAG:
- if (send_inv) {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_INV_STAG;
- }
- break;
- case TPT_ERR_PDID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- if ((opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV))
- *ecode = RDMAP_CANT_INV_STAG;
- else
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_QPID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_ACCESS:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_ACC_VIOL;
- break;
- case TPT_ERR_WRAP:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_TO_WRAP;
- break;
- case TPT_ERR_BOUND:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_BASE_BOUNDS;
- }
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- break;
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_OUT_OF_RQE:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_NOBUF;
- break;
- case TPT_ERR_PBL_ADDR_BOUND:
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- break;
- case TPT_ERR_CRC:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_CRC_ERR;
- break;
- case TPT_ERR_MARKER:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_MARKER_ERR;
- break;
- case TPT_ERR_PDU_LEN_ERR:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_MSG_TOOBIG;
- break;
- case TPT_ERR_DDP_VERSION:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_INV_VERS;
- } else {
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_VERS;
- }
- break;
- case TPT_ERR_RDMA_VERSION:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_VERS;
- break;
- case TPT_ERR_OPCODE:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_OPCODE;
- break;
- case TPT_ERR_DDP_QUEUE_NUM:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_QN;
- break;
- case TPT_ERR_MSN:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_RANGE;
- break;
- case TPT_ERR_TBIT:
- *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_MO:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MO;
- break;
- default:
- *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- }
-}
-
-int iwch_post_zb_read(struct iwch_ep *ep)
-{
- union t3_wr *wqe;
- struct sk_buff *skb;
- u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
-
- pr_debug("%s enter\n", __func__);
- skb = alloc_skb(40, GFP_KERNEL);
- if (!skb) {
- pr_err("%s cannot send zb_read!!\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, sizeof(struct t3_rdma_read_wr));
- wqe->read.rdmaop = T3_READ_REQ;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(1);
- wqe->read.rem_to = cpu_to_be64(1);
- wqe->read.local_stag = cpu_to_be32(1);
- wqe->read.local_len = cpu_to_be32(0);
- wqe->read.local_to = cpu_to_be64(1);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
- V_FW_RIWR_LEN(flit_cnt));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * This posts a TERMINATE with layer=RDMA, type=catastrophic.
- */
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
-{
- union t3_wr *wqe;
- struct terminate_message *term;
- struct sk_buff *skb;
-
- pr_debug("%s %d\n", __func__, __LINE__);
- skb = alloc_skb(40, GFP_ATOMIC);
- if (!skb) {
- pr_err("%s cannot send TERMINATE!\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, 40);
- wqe->send.rdmaop = T3_TERMINATE;
-
- /* immediate data length */
- wqe->send.plen = htonl(4);
-
- /* immediate data starts here. */
- term = (struct terminate_message *)wqe->send.sgl;
- build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
- V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * Assumes qhp lock is held.
- */
-static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
- struct iwch_cq *schp)
- __releases(&qhp->lock)
- __acquires(&qhp->lock)
-{
- int count;
- int flushed;
-
- lockdep_assert_held(&qhp->lock);
-
- pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
- /* take a ref on the qhp since we must release the lock */
- atomic_inc(&qhp->refcnt);
- spin_unlock(&qhp->lock);
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&rchp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&rchp->cq);
- cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
- flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&rchp->lock);
- if (flushed) {
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- }
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&schp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&schp->cq);
- cxio_count_scqes(&schp->cq, &qhp->wq, &count);
- flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&schp->lock);
- if (flushed) {
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
-
- /* deref */
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-
- spin_lock(&qhp->lock);
-}
-
-static void flush_qp(struct iwch_qp *qhp)
-{
- struct iwch_cq *rchp, *schp;
-
- rchp = get_chp(qhp->rhp, qhp->attr.rcq);
- schp = get_chp(qhp->rhp, qhp->attr.scq);
-
- if (qhp->ibqp.uobject) {
- cxio_set_wq_in_error(&qhp->wq);
- cxio_set_cq_in_error(&rchp->cq);
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- if (schp != rchp) {
- cxio_set_cq_in_error(&schp->cq);
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq,
- schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
- return;
- }
- __flush_qp(qhp, rchp, schp);
-}
-
-
-/*
- * Return count of RECV WRs posted
- */
-u16 iwch_rqes_posted(struct iwch_qp *qhp)
-{
- union t3_wr *wqe = qhp->wq.queue;
- u16 count = 0;
-
- while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
- count++;
- wqe++;
- }
- pr_debug("%s qhp %p count %u\n", __func__, qhp, count);
- return count;
-}
-
-static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs)
-{
- struct t3_rdma_init_attr init_attr;
- int ret;
-
- init_attr.tid = qhp->ep->hwtid;
- init_attr.qpid = qhp->wq.qpid;
- init_attr.pdid = qhp->attr.pd;
- init_attr.scqid = qhp->attr.scq;
- init_attr.rcqid = qhp->attr.rcq;
- init_attr.rq_addr = qhp->wq.rq_addr;
- init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
- init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
- qhp->attr.mpa_attr.recv_marker_enabled |
- (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
- (qhp->attr.mpa_attr.crc_enabled << 2);
-
- init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
- uP_RI_QP_RDMA_WRITE_ENABLE |
- uP_RI_QP_BIND_ENABLE;
- if (!qhp->ibqp.uobject)
- init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
- uP_RI_QP_FAST_REGISTER_ENABLE;
-
- init_attr.tcp_emss = qhp->ep->emss;
- init_attr.ord = qhp->attr.max_ord;
- init_attr.ird = qhp->attr.max_ird;
- init_attr.qp_dma_addr = qhp->wq.dma_addr;
- init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
- init_attr.rqe_count = iwch_rqes_posted(qhp);
- init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
- init_attr.chan = qhp->ep->l2t->smt_idx;
- if (peer2peer) {
- init_attr.rtr_type = RTR_READ;
- if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
- init_attr.ord = 1;
- if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
- init_attr.ird = 1;
- } else
- init_attr.rtr_type = 0;
- init_attr.irs = qhp->ep->rcv_seq;
- pr_debug("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d flags 0x%x qpcaps 0x%x\n",
- __func__,
- init_attr.rq_addr, init_attr.rq_size,
- init_attr.flags, init_attr.qpcaps);
- ret = cxio_rdma_init(&rhp->rdev, &init_attr);
- pr_debug("%s ret %d\n", __func__, ret);
- return ret;
-}
-
-int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal)
-{
- int ret = 0;
- struct iwch_qp_attributes newattr = qhp->attr;
- unsigned long flag;
- int disconnect = 0;
- int terminate = 0;
- int abort = 0;
- int free = 0;
- struct iwch_ep *ep = NULL;
-
- pr_debug("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
- qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
- (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
-
- spin_lock_irqsave(&qhp->lock, flag);
-
- /* Process attr changes if in IDLE */
- if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
- if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
- ret = -EIO;
- goto out;
- }
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
- newattr.enable_rdma_read = attrs->enable_rdma_read;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
- newattr.enable_rdma_write = attrs->enable_rdma_write;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
- newattr.enable_bind = attrs->enable_bind;
- if (mask & IWCH_QP_ATTR_MAX_ORD) {
- if (attrs->max_ord >
- rhp->attr.max_rdma_read_qp_depth) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ord = attrs->max_ord;
- }
- if (mask & IWCH_QP_ATTR_MAX_IRD) {
- if (attrs->max_ird >
- rhp->attr.max_rdma_reads_per_qp) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ird = attrs->max_ird;
- }
- qhp->attr = newattr;
- }
-
- if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
- goto out;
- if (qhp->attr.state == attrs->next_state)
- goto out;
-
- switch (qhp->attr.state) {
- case IWCH_QP_STATE_IDLE:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_RTS:
- if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
- ret = -EINVAL;
- goto out;
- }
- if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.mpa_attr = attrs->mpa_attr;
- qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
- qhp->ep = qhp->attr.llp_stream_handle;
- qhp->attr.state = IWCH_QP_STATE_RTS;
-
- /*
- * Ref the endpoint here and deref when we
- * disassociate the endpoint from the QP. This
- * happens in CLOSING->IDLE transition or *->ERROR
- * transition.
- */
- get_ep(&qhp->ep->com);
- spin_unlock_irqrestore(&qhp->lock, flag);
- ret = rdma_init(rhp, qhp, mask, attrs);
- spin_lock_irqsave(&qhp->lock, flag);
- if (ret)
- goto err;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- flush_qp(qhp);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_RTS:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_CLOSING:
- BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
- qhp->attr.state = IWCH_QP_STATE_CLOSING;
- if (!internal) {
- abort=0;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- break;
- case IWCH_QP_STATE_TERMINATE:
- qhp->attr.state = IWCH_QP_STATE_TERMINATE;
- if (qhp->ibqp.uobject)
- cxio_set_wq_in_error(&qhp->wq);
- if (!internal)
- terminate = 1;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- if (!internal) {
- abort=1;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- goto err;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_CLOSING:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- switch (attrs->next_state) {
- case IWCH_QP_STATE_IDLE:
- flush_qp(qhp);
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.llp_stream_handle = NULL;
- put_ep(&qhp->ep->com);
- qhp->ep = NULL;
- wake_up(&qhp->wait);
- break;
- case IWCH_QP_STATE_ERROR:
- goto err;
- default:
- ret = -EINVAL;
- goto err;
- }
- break;
- case IWCH_QP_STATE_ERROR:
- if (attrs->next_state != IWCH_QP_STATE_IDLE) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
- !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- break;
- case IWCH_QP_STATE_TERMINATE:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- goto err;
- break;
- default:
- pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
- ret = -EINVAL;
- goto err;
- break;
- }
- goto out;
-err:
- pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
- qhp->wq.qpid);
-
- /* disassociate the LLP connection */
- qhp->attr.llp_stream_handle = NULL;
- ep = qhp->ep;
- qhp->ep = NULL;
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- free=1;
- wake_up(&qhp->wait);
- BUG_ON(!ep);
- flush_qp(qhp);
-out:
- spin_unlock_irqrestore(&qhp->lock, flag);
-
- if (terminate)
- iwch_post_terminate(qhp, NULL);
-
- /*
- * If disconnect is 1, then we need to initiate a disconnect
- * on the EP. This can be a normal close (RTS->CLOSING) or
- * an abnormal close (RTS/CLOSING->ERROR).
- */
- if (disconnect) {
- iwch_ep_disconnect(ep, abort, GFP_KERNEL);
- put_ep(&ep->com);
- }
-
- /*
- * If free is 1, then we've disassociated the EP from the QP
- * and we need to dereference the EP.
- */
- if (free)
- put_ep(&ep->com);
-
- pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/tcb.h b/drivers/infiniband/hw/cxgb3/tcb.h
deleted file mode 100644
index c702dc199e18..000000000000
--- a/drivers/infiniband/hw/cxgb3/tcb.h
+++ /dev/null
@@ -1,632 +0,0 @@
-/*
- * Copyright (c) 2007 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _TCB_DEFS_H
-#define _TCB_DEFS_H
-
-#define W_TCB_T_STATE 0
-#define S_TCB_T_STATE 0
-#define M_TCB_T_STATE 0xfULL
-#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
-
-#define W_TCB_TIMER 0
-#define S_TCB_TIMER 4
-#define M_TCB_TIMER 0x1ULL
-#define V_TCB_TIMER(x) ((x) << S_TCB_TIMER)
-
-#define W_TCB_DACK_TIMER 0
-#define S_TCB_DACK_TIMER 5
-#define M_TCB_DACK_TIMER 0x1ULL
-#define V_TCB_DACK_TIMER(x) ((x) << S_TCB_DACK_TIMER)
-
-#define W_TCB_DEL_FLAG 0
-#define S_TCB_DEL_FLAG 6
-#define M_TCB_DEL_FLAG 0x1ULL
-#define V_TCB_DEL_FLAG(x) ((x) << S_TCB_DEL_FLAG)
-
-#define W_TCB_L2T_IX 0
-#define S_TCB_L2T_IX 7
-#define M_TCB_L2T_IX 0x7ffULL
-#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
-
-#define W_TCB_SMAC_SEL 0
-#define S_TCB_SMAC_SEL 18
-#define M_TCB_SMAC_SEL 0x3ULL
-#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
-
-#define W_TCB_TOS 0
-#define S_TCB_TOS 20
-#define M_TCB_TOS 0x3fULL
-#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
-
-#define W_TCB_MAX_RT 0
-#define S_TCB_MAX_RT 26
-#define M_TCB_MAX_RT 0xfULL
-#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
-
-#define W_TCB_T_RXTSHIFT 0
-#define S_TCB_T_RXTSHIFT 30
-#define M_TCB_T_RXTSHIFT 0xfULL
-#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
-
-#define W_TCB_T_DUPACKS 1
-#define S_TCB_T_DUPACKS 2
-#define M_TCB_T_DUPACKS 0xfULL
-#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
-
-#define W_TCB_T_MAXSEG 1
-#define S_TCB_T_MAXSEG 6
-#define M_TCB_T_MAXSEG 0xfULL
-#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
-
-#define W_TCB_T_FLAGS1 1
-#define S_TCB_T_FLAGS1 10
-#define M_TCB_T_FLAGS1 0xffffffffULL
-#define V_TCB_T_FLAGS1(x) ((x) << S_TCB_T_FLAGS1)
-
-#define W_TCB_T_MIGRATION 1
-#define S_TCB_T_MIGRATION 20
-#define M_TCB_T_MIGRATION 0x1ULL
-#define V_TCB_T_MIGRATION(x) ((x) << S_TCB_T_MIGRATION)
-
-#define W_TCB_T_FLAGS2 2
-#define S_TCB_T_FLAGS2 10
-#define M_TCB_T_FLAGS2 0x7fULL
-#define V_TCB_T_FLAGS2(x) ((x) << S_TCB_T_FLAGS2)
-
-#define W_TCB_SND_SCALE 2
-#define S_TCB_SND_SCALE 17
-#define M_TCB_SND_SCALE 0xfULL
-#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
-
-#define W_TCB_RCV_SCALE 2
-#define S_TCB_RCV_SCALE 21
-#define M_TCB_RCV_SCALE 0xfULL
-#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
-
-#define W_TCB_SND_UNA_RAW 2
-#define S_TCB_SND_UNA_RAW 25
-#define M_TCB_SND_UNA_RAW 0x7ffffffULL
-#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
-
-#define W_TCB_SND_NXT_RAW 3
-#define S_TCB_SND_NXT_RAW 20
-#define M_TCB_SND_NXT_RAW 0x7ffffffULL
-#define V_TCB_SND_NXT_RAW(x) ((x) << S_TCB_SND_NXT_RAW)
-
-#define W_TCB_RCV_NXT 4
-#define S_TCB_RCV_NXT 15
-#define M_TCB_RCV_NXT 0xffffffffULL
-#define V_TCB_RCV_NXT(x) ((x) << S_TCB_RCV_NXT)
-
-#define W_TCB_RCV_ADV 5
-#define S_TCB_RCV_ADV 15
-#define M_TCB_RCV_ADV 0xffffULL
-#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
-
-#define W_TCB_SND_MAX_RAW 5
-#define S_TCB_SND_MAX_RAW 31
-#define M_TCB_SND_MAX_RAW 0x7ffffffULL
-#define V_TCB_SND_MAX_RAW(x) ((x) << S_TCB_SND_MAX_RAW)
-
-#define W_TCB_SND_CWND 6
-#define S_TCB_SND_CWND 26
-#define M_TCB_SND_CWND 0x7ffffffULL
-#define V_TCB_SND_CWND(x) ((x) << S_TCB_SND_CWND)
-
-#define W_TCB_SND_SSTHRESH 7
-#define S_TCB_SND_SSTHRESH 21
-#define M_TCB_SND_SSTHRESH 0x7ffffffULL
-#define V_TCB_SND_SSTHRESH(x) ((x) << S_TCB_SND_SSTHRESH)
-
-#define W_TCB_T_RTT_TS_RECENT_AGE 8
-#define S_TCB_T_RTT_TS_RECENT_AGE 16
-#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
-#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
-
-#define W_TCB_T_RTSEQ_RECENT 9
-#define S_TCB_T_RTSEQ_RECENT 16
-#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
-#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
-
-#define W_TCB_T_SRTT 10
-#define S_TCB_T_SRTT 16
-#define M_TCB_T_SRTT 0xffffULL
-#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
-
-#define W_TCB_T_RTTVAR 11
-#define S_TCB_T_RTTVAR 0
-#define M_TCB_T_RTTVAR 0xffffULL
-#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
-
-#define W_TCB_TS_LAST_ACK_SENT_RAW 11
-#define S_TCB_TS_LAST_ACK_SENT_RAW 16
-#define M_TCB_TS_LAST_ACK_SENT_RAW 0x7ffffffULL
-#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
-
-#define W_TCB_DIP 12
-#define S_TCB_DIP 11
-#define M_TCB_DIP 0xffffffffULL
-#define V_TCB_DIP(x) ((x) << S_TCB_DIP)
-
-#define W_TCB_SIP 13
-#define S_TCB_SIP 11
-#define M_TCB_SIP 0xffffffffULL
-#define V_TCB_SIP(x) ((x) << S_TCB_SIP)
-
-#define W_TCB_DP 14
-#define S_TCB_DP 11
-#define M_TCB_DP 0xffffULL
-#define V_TCB_DP(x) ((x) << S_TCB_DP)
-
-#define W_TCB_SP 14
-#define S_TCB_SP 27
-#define M_TCB_SP 0xffffULL
-#define V_TCB_SP(x) ((x) << S_TCB_SP)
-
-#define W_TCB_TIMESTAMP 15
-#define S_TCB_TIMESTAMP 11
-#define M_TCB_TIMESTAMP 0xffffffffULL
-#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
-
-#define W_TCB_TIMESTAMP_OFFSET 16
-#define S_TCB_TIMESTAMP_OFFSET 11
-#define M_TCB_TIMESTAMP_OFFSET 0xfULL
-#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
-
-#define W_TCB_TX_MAX 16
-#define S_TCB_TX_MAX 15
-#define M_TCB_TX_MAX 0xffffffffULL
-#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
-
-#define W_TCB_TX_HDR_PTR_RAW 17
-#define S_TCB_TX_HDR_PTR_RAW 15
-#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
-
-#define W_TCB_TX_LAST_PTR_RAW 18
-#define S_TCB_TX_LAST_PTR_RAW 0
-#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_LAST_PTR_RAW(x) ((x) << S_TCB_TX_LAST_PTR_RAW)
-
-#define W_TCB_TX_COMPACT 18
-#define S_TCB_TX_COMPACT 17
-#define M_TCB_TX_COMPACT 0x1ULL
-#define V_TCB_TX_COMPACT(x) ((x) << S_TCB_TX_COMPACT)
-
-#define W_TCB_RX_COMPACT 18
-#define S_TCB_RX_COMPACT 18
-#define M_TCB_RX_COMPACT 0x1ULL
-#define V_TCB_RX_COMPACT(x) ((x) << S_TCB_RX_COMPACT)
-
-#define W_TCB_RCV_WND 18
-#define S_TCB_RCV_WND 19
-#define M_TCB_RCV_WND 0x7ffffffULL
-#define V_TCB_RCV_WND(x) ((x) << S_TCB_RCV_WND)
-
-#define W_TCB_RX_HDR_OFFSET 19
-#define S_TCB_RX_HDR_OFFSET 14
-#define M_TCB_RX_HDR_OFFSET 0x7ffffffULL
-#define V_TCB_RX_HDR_OFFSET(x) ((x) << S_TCB_RX_HDR_OFFSET)
-
-#define W_TCB_RX_FRAG0_START_IDX_RAW 20
-#define S_TCB_RX_FRAG0_START_IDX_RAW 9
-#define M_TCB_RX_FRAG0_START_IDX_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((x) << S_TCB_RX_FRAG0_START_IDX_RAW)
-
-#define W_TCB_RX_FRAG1_START_IDX_OFFSET 21
-#define S_TCB_RX_FRAG1_START_IDX_OFFSET 4
-#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0x7ffffffULL
-#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
-
-#define W_TCB_RX_FRAG0_LEN 21
-#define S_TCB_RX_FRAG0_LEN 31
-#define M_TCB_RX_FRAG0_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG0_LEN(x) ((x) << S_TCB_RX_FRAG0_LEN)
-
-#define W_TCB_RX_FRAG1_LEN 22
-#define S_TCB_RX_FRAG1_LEN 26
-#define M_TCB_RX_FRAG1_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG1_LEN(x) ((x) << S_TCB_RX_FRAG1_LEN)
-
-#define W_TCB_NEWRENO_RECOVER 23
-#define S_TCB_NEWRENO_RECOVER 21
-#define M_TCB_NEWRENO_RECOVER 0x7ffffffULL
-#define V_TCB_NEWRENO_RECOVER(x) ((x) << S_TCB_NEWRENO_RECOVER)
-
-#define W_TCB_PDU_HAVE_LEN 24
-#define S_TCB_PDU_HAVE_LEN 16
-#define M_TCB_PDU_HAVE_LEN 0x1ULL
-#define V_TCB_PDU_HAVE_LEN(x) ((x) << S_TCB_PDU_HAVE_LEN)
-
-#define W_TCB_PDU_LEN 24
-#define S_TCB_PDU_LEN 17
-#define M_TCB_PDU_LEN 0xffffULL
-#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
-
-#define W_TCB_RX_QUIESCE 25
-#define S_TCB_RX_QUIESCE 1
-#define M_TCB_RX_QUIESCE 0x1ULL
-#define V_TCB_RX_QUIESCE(x) ((x) << S_TCB_RX_QUIESCE)
-
-#define W_TCB_RX_PTR_RAW 25
-#define S_TCB_RX_PTR_RAW 2
-#define M_TCB_RX_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_PTR_RAW(x) ((x) << S_TCB_RX_PTR_RAW)
-
-#define W_TCB_CPU_NO 25
-#define S_TCB_CPU_NO 19
-#define M_TCB_CPU_NO 0x7fULL
-#define V_TCB_CPU_NO(x) ((x) << S_TCB_CPU_NO)
-
-#define W_TCB_ULP_TYPE 25
-#define S_TCB_ULP_TYPE 26
-#define M_TCB_ULP_TYPE 0xfULL
-#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
-
-#define W_TCB_RX_FRAG1_PTR_RAW 25
-#define S_TCB_RX_FRAG1_PTR_RAW 30
-#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
-#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
-#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
-
-#define W_TCB_RX_FRAG2_PTR_RAW 27
-#define S_TCB_RX_FRAG2_PTR_RAW 10
-#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_LEN_RAW 27
-#define S_TCB_RX_FRAG2_LEN_RAW 27
-#define M_TCB_RX_FRAG2_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_LEN_RAW(x) ((x) << S_TCB_RX_FRAG2_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_PTR_RAW 28
-#define S_TCB_RX_FRAG3_PTR_RAW 22
-#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG3_PTR_RAW(x) ((x) << S_TCB_RX_FRAG3_PTR_RAW)
-
-#define W_TCB_RX_FRAG3_LEN_RAW 29
-#define S_TCB_RX_FRAG3_LEN_RAW 7
-#define M_TCB_RX_FRAG3_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_LEN_RAW(x) ((x) << S_TCB_RX_FRAG3_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
-#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 2
-#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
-
-#define W_TCB_PDU_HDR_LEN 30
-#define S_TCB_PDU_HDR_LEN 29
-#define M_TCB_PDU_HDR_LEN 0xffULL
-#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
-
-#define W_TCB_SLUSH1 31
-#define S_TCB_SLUSH1 5
-#define M_TCB_SLUSH1 0x7ffffULL
-#define V_TCB_SLUSH1(x) ((x) << S_TCB_SLUSH1)
-
-#define W_TCB_ULP_RAW 31
-#define S_TCB_ULP_RAW 24
-#define M_TCB_ULP_RAW 0xffULL
-#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
-
-#define W_TCB_DDP_RDMAP_VERSION 25
-#define S_TCB_DDP_RDMAP_VERSION 30
-#define M_TCB_DDP_RDMAP_VERSION 0x1ULL
-#define V_TCB_DDP_RDMAP_VERSION(x) ((x) << S_TCB_DDP_RDMAP_VERSION)
-
-#define W_TCB_MARKER_ENABLE_RX 25
-#define S_TCB_MARKER_ENABLE_RX 31
-#define M_TCB_MARKER_ENABLE_RX 0x1ULL
-#define V_TCB_MARKER_ENABLE_RX(x) ((x) << S_TCB_MARKER_ENABLE_RX)
-
-#define W_TCB_MARKER_ENABLE_TX 26
-#define S_TCB_MARKER_ENABLE_TX 0
-#define M_TCB_MARKER_ENABLE_TX 0x1ULL
-#define V_TCB_MARKER_ENABLE_TX(x) ((x) << S_TCB_MARKER_ENABLE_TX)
-
-#define W_TCB_CRC_ENABLE 26
-#define S_TCB_CRC_ENABLE 1
-#define M_TCB_CRC_ENABLE 0x1ULL
-#define V_TCB_CRC_ENABLE(x) ((x) << S_TCB_CRC_ENABLE)
-
-#define W_TCB_IRS_ULP 26
-#define S_TCB_IRS_ULP 2
-#define M_TCB_IRS_ULP 0x1ffULL
-#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
-
-#define W_TCB_ISS_ULP 26
-#define S_TCB_ISS_ULP 11
-#define M_TCB_ISS_ULP 0x1ffULL
-#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
-
-#define W_TCB_TX_PDU_LEN 26
-#define S_TCB_TX_PDU_LEN 20
-#define M_TCB_TX_PDU_LEN 0x3fffULL
-#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
-
-#define W_TCB_TX_PDU_OUT 27
-#define S_TCB_TX_PDU_OUT 2
-#define M_TCB_TX_PDU_OUT 0x1ULL
-#define V_TCB_TX_PDU_OUT(x) ((x) << S_TCB_TX_PDU_OUT)
-
-#define W_TCB_CQ_IDX_SQ 27
-#define S_TCB_CQ_IDX_SQ 3
-#define M_TCB_CQ_IDX_SQ 0xffffULL
-#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
-
-#define W_TCB_CQ_IDX_RQ 27
-#define S_TCB_CQ_IDX_RQ 19
-#define M_TCB_CQ_IDX_RQ 0xffffULL
-#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
-
-#define W_TCB_QP_ID 28
-#define S_TCB_QP_ID 3
-#define M_TCB_QP_ID 0xffffULL
-#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
-
-#define W_TCB_PD_ID 28
-#define S_TCB_PD_ID 19
-#define M_TCB_PD_ID 0xffffULL
-#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
-
-#define W_TCB_STAG 29
-#define S_TCB_STAG 3
-#define M_TCB_STAG 0xffffffffULL
-#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
-
-#define W_TCB_RQ_START 30
-#define S_TCB_RQ_START 3
-#define M_TCB_RQ_START 0x3ffffffULL
-#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
-
-#define W_TCB_RQ_MSN 30
-#define S_TCB_RQ_MSN 29
-#define M_TCB_RQ_MSN 0x3ffULL
-#define V_TCB_RQ_MSN(x) ((x) << S_TCB_RQ_MSN)
-
-#define W_TCB_RQ_MAX_OFFSET 31
-#define S_TCB_RQ_MAX_OFFSET 7
-#define M_TCB_RQ_MAX_OFFSET 0xfULL
-#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
-
-#define W_TCB_RQ_WRITE_PTR 31
-#define S_TCB_RQ_WRITE_PTR 11
-#define M_TCB_RQ_WRITE_PTR 0x3ffULL
-#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
-
-#define W_TCB_INB_WRITE_PERM 31
-#define S_TCB_INB_WRITE_PERM 21
-#define M_TCB_INB_WRITE_PERM 0x1ULL
-#define V_TCB_INB_WRITE_PERM(x) ((x) << S_TCB_INB_WRITE_PERM)
-
-#define W_TCB_INB_READ_PERM 31
-#define S_TCB_INB_READ_PERM 22
-#define M_TCB_INB_READ_PERM 0x1ULL
-#define V_TCB_INB_READ_PERM(x) ((x) << S_TCB_INB_READ_PERM)
-
-#define W_TCB_ORD_L_BIT_VLD 31
-#define S_TCB_ORD_L_BIT_VLD 23
-#define M_TCB_ORD_L_BIT_VLD 0x1ULL
-#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
-
-#define W_TCB_RDMAP_OPCODE 31
-#define S_TCB_RDMAP_OPCODE 24
-#define M_TCB_RDMAP_OPCODE 0xfULL
-#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
-
-#define W_TCB_TX_FLUSH 31
-#define S_TCB_TX_FLUSH 28
-#define M_TCB_TX_FLUSH 0x1ULL
-#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
-
-#define W_TCB_TX_OOS_RXMT 31
-#define S_TCB_TX_OOS_RXMT 29
-#define M_TCB_TX_OOS_RXMT 0x1ULL
-#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
-
-#define W_TCB_TX_OOS_TXMT 31
-#define S_TCB_TX_OOS_TXMT 30
-#define M_TCB_TX_OOS_TXMT 0x1ULL
-#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
-
-#define W_TCB_SLUSH_AUX2 31
-#define S_TCB_SLUSH_AUX2 31
-#define M_TCB_SLUSH_AUX2 0x1ULL
-#define V_TCB_SLUSH_AUX2(x) ((x) << S_TCB_SLUSH_AUX2)
-
-#define W_TCB_RX_FRAG1_PTR_RAW2 25
-#define S_TCB_RX_FRAG1_PTR_RAW2 30
-#define M_TCB_RX_FRAG1_PTR_RAW2 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW2(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW2)
-
-#define W_TCB_RX_DDP_FLAGS 26
-#define S_TCB_RX_DDP_FLAGS 15
-#define M_TCB_RX_DDP_FLAGS 0x3ffULL
-#define V_TCB_RX_DDP_FLAGS(x) ((x) << S_TCB_RX_DDP_FLAGS)
-
-#define W_TCB_SLUSH_AUX3 26
-#define S_TCB_SLUSH_AUX3 31
-#define M_TCB_SLUSH_AUX3 0x1ffULL
-#define V_TCB_SLUSH_AUX3(x) ((x) << S_TCB_SLUSH_AUX3)
-
-#define W_TCB_RX_DDP_BUF0_OFFSET 27
-#define S_TCB_RX_DDP_BUF0_OFFSET 8
-#define M_TCB_RX_DDP_BUF0_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
-
-#define W_TCB_RX_DDP_BUF0_LEN 27
-#define S_TCB_RX_DDP_BUF0_LEN 30
-#define M_TCB_RX_DDP_BUF0_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_LEN(x) ((x) << S_TCB_RX_DDP_BUF0_LEN)
-
-#define W_TCB_RX_DDP_BUF1_OFFSET 28
-#define S_TCB_RX_DDP_BUF1_OFFSET 20
-#define M_TCB_RX_DDP_BUF1_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
-
-#define W_TCB_RX_DDP_BUF1_LEN 29
-#define S_TCB_RX_DDP_BUF1_LEN 10
-#define M_TCB_RX_DDP_BUF1_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
-
-#define W_TCB_RX_DDP_BUF0_TAG 30
-#define S_TCB_RX_DDP_BUF0_TAG 0
-#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
-
-#define W_TCB_RX_DDP_BUF1_TAG 31
-#define S_TCB_RX_DDP_BUF1_TAG 0
-#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
-
-#define S_TF_DACK 10
-#define V_TF_DACK(x) ((x) << S_TF_DACK)
-
-#define S_TF_NAGLE 11
-#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
-
-#define S_TF_RECV_SCALE 12
-#define V_TF_RECV_SCALE(x) ((x) << S_TF_RECV_SCALE)
-
-#define S_TF_RECV_TSTMP 13
-#define V_TF_RECV_TSTMP(x) ((x) << S_TF_RECV_TSTMP)
-
-#define S_TF_RECV_SACK 14
-#define V_TF_RECV_SACK(x) ((x) << S_TF_RECV_SACK)
-
-#define S_TF_TURBO 15
-#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
-
-#define S_TF_KEEPALIVE 16
-#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
-
-#define S_TF_TCAM_BYPASS 17
-#define V_TF_TCAM_BYPASS(x) ((x) << S_TF_TCAM_BYPASS)
-
-#define S_TF_CORE_FIN 18
-#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
-
-#define S_TF_CORE_MORE 19
-#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
-
-#define S_TF_MIGRATING 20
-#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
-
-#define S_TF_ACTIVE_OPEN 21
-#define V_TF_ACTIVE_OPEN(x) ((x) << S_TF_ACTIVE_OPEN)
-
-#define S_TF_ASK_MODE 22
-#define V_TF_ASK_MODE(x) ((x) << S_TF_ASK_MODE)
-
-#define S_TF_NON_OFFLOAD 23
-#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
-
-#define S_TF_MOD_SCHD 24
-#define V_TF_MOD_SCHD(x) ((x) << S_TF_MOD_SCHD)
-
-#define S_TF_MOD_SCHD_REASON0 25
-#define V_TF_MOD_SCHD_REASON0(x) ((x) << S_TF_MOD_SCHD_REASON0)
-
-#define S_TF_MOD_SCHD_REASON1 26
-#define V_TF_MOD_SCHD_REASON1(x) ((x) << S_TF_MOD_SCHD_REASON1)
-
-#define S_TF_MOD_SCHD_RX 27
-#define V_TF_MOD_SCHD_RX(x) ((x) << S_TF_MOD_SCHD_RX)
-
-#define S_TF_CORE_PUSH 28
-#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
-
-#define S_TF_RCV_COALESCE_ENABLE 29
-#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
-
-#define S_TF_RCV_COALESCE_PUSH 30
-#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
-
-#define S_TF_RCV_COALESCE_LAST_PSH 31
-#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
-
-#define S_TF_RCV_COALESCE_HEARTBEAT 32
-#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((x) << S_TF_RCV_COALESCE_HEARTBEAT)
-
-#define S_TF_HALF_CLOSE 33
-#define V_TF_HALF_CLOSE(x) ((x) << S_TF_HALF_CLOSE)
-
-#define S_TF_DACK_MSS 34
-#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
-
-#define S_TF_CCTRL_SEL0 35
-#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
-
-#define S_TF_CCTRL_SEL1 36
-#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
-
-#define S_TF_TCP_NEWRENO_FAST_RECOVERY 37
-#define V_TF_TCP_NEWRENO_FAST_RECOVERY(x) ((x) << S_TF_TCP_NEWRENO_FAST_RECOVERY)
-
-#define S_TF_TX_PACE_AUTO 38
-#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
-
-#define S_TF_PEER_FIN_HELD 39
-#define V_TF_PEER_FIN_HELD(x) ((x) << S_TF_PEER_FIN_HELD)
-
-#define S_TF_CORE_URG 40
-#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
-
-#define S_TF_RDMA_ERROR 41
-#define V_TF_RDMA_ERROR(x) ((x) << S_TF_RDMA_ERROR)
-
-#define S_TF_SSWS_DISABLED 42
-#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
-
-#define S_TF_DUPACK_COUNT_ODD 43
-#define V_TF_DUPACK_COUNT_ODD(x) ((x) << S_TF_DUPACK_COUNT_ODD)
-
-#define S_TF_TX_CHANNEL 44
-#define V_TF_TX_CHANNEL(x) ((x) << S_TF_TX_CHANNEL)
-
-#define S_TF_RX_CHANNEL 45
-#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
-
-#define S_TF_TX_PACE_FIXED 46
-#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
-
-#define S_TF_RDMA_FLM_ERROR 47
-#define V_TF_RDMA_FLM_ERROR(x) ((x) << S_TF_RDMA_FLM_ERROR)
-
-#define S_TF_RX_FLOW_CONTROL_DISABLE 48
-#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
-
-#endif /* _TCB_DEFS_H */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 347dc242fb88..ee1182f9b627 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3379,7 +3379,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
@@ -3401,7 +3401,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 35c284af574d..fe3a7e8561df 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(udata, start, length, acc, 0);
+ mhp->umem = ib_umem_get(udata, start, length, acc);
if (IS_ERR(mhp->umem))
goto err_free_skb;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index d373ac0fe2cb..ba83d942997c 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -305,7 +305,10 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
+ int ret = 0;
pr_debug("ibdev %p\n", ibdev);
+ ret = ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width);
props->port_cap_flags =
IB_PORT_CM_SUP |
@@ -315,11 +318,9 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
props->max_msg_sz = -1;
- return 0;
+ return ret;
}
static ssize_t hw_rev_show(struct device *dev,
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 2283e432693e..aa7396a1588a 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -60,8 +60,6 @@ struct efa_dev {
u64 mem_bar_len;
u64 db_bar_addr;
u64 db_bar_len;
- u8 addr[EFA_GID_SIZE];
- u32 mtu;
int admin_msix_vector_idx;
struct efa_irq admin_irq;
@@ -71,8 +69,6 @@ struct efa_dev {
struct efa_ucontext {
struct ib_ucontext ibucontext;
- struct xarray mmap_xa;
- u32 mmap_xa_page;
u16 uarn;
};
@@ -91,6 +87,7 @@ struct efa_cq {
struct efa_ucontext *ucontext;
dma_addr_t dma_addr;
void *cpu_addr;
+ struct rdma_user_mmap_entry *mmap_entry;
size_t size;
u16 cq_idx;
};
@@ -101,6 +98,13 @@ struct efa_qp {
void *rq_cpu_addr;
size_t rq_size;
enum ib_qp_state state;
+
+ /* Used for saving mmap_xa entries */
+ struct rdma_user_mmap_entry *sq_db_mmap_entry;
+ struct rdma_user_mmap_entry *llq_desc_mmap_entry;
+ struct rdma_user_mmap_entry *rq_db_mmap_entry;
+ struct rdma_user_mmap_entry *rq_mmap_entry;
+
u32 qp_handle;
u32 max_send_wr;
u32 max_recv_wr;
@@ -147,6 +151,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
int efa_mmap(struct ib_ucontext *ibucontext,
struct vm_area_struct *vma);
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int efa_create_ah(struct ib_ah *ibah,
struct rdma_ah_attr *ah_attr,
u32 flags,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 2be0469d545f..e96bcb16bd2b 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -362,9 +362,13 @@ struct efa_admin_reg_mr_cmd {
/*
* permissions
- * 0 : local_write_enable - Write permissions: value
- * of 1 needed for RQ buffers and for RDMA write
- * 7:1 : reserved1 - remote access flags, etc
+ * 0 : local_write_enable - Local write permissions:
+ * must be set for RQ buffers and buffers posted for
+ * RDMA Read requests
+ * 1 : reserved1 - MBZ
+ * 2 : remote_read_enable - Remote read permissions:
+ * must be set to enable RDMA read from the region
+ * 7:3 : reserved2 - MBZ
*/
u8 permissions;
@@ -558,6 +562,16 @@ struct efa_admin_feature_device_attr_desc {
/* Indicates how many bits are used virtual address access */
u8 virt_addr_width;
+
+ /*
+ * 0 : rdma_read - If set, RDMA Read is supported on
+ * TX queues
+ * 31:1 : reserved - MBZ
+ */
+ u32 device_caps;
+
+ /* Max RDMA transfer size in bytes */
+ u32 max_rdma_size;
};
struct efa_admin_feature_queue_attr_desc {
@@ -604,6 +618,9 @@ struct efa_admin_feature_queue_attr_desc {
/* The maximum size of LLQ in bytes */
u32 max_llq_size;
+
+ /* Maximum number of SGEs for a single RDMA read WQE */
+ u16 max_wr_rdma_sges;
};
struct efa_admin_feature_aenq_desc {
@@ -618,6 +635,7 @@ struct efa_admin_feature_network_attr_desc {
/* Raw address data in network byte order */
u8 addr[16];
+ /* max packet payload size in bytes */
u32 mtu;
};
@@ -780,6 +798,8 @@ struct efa_admin_mmio_req_read_less_resp {
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT 7
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_SHIFT 2
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
/* create_cq_cmd */
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
@@ -791,4 +811,7 @@ struct efa_admin_mmio_req_read_less_resp {
/* get_set_feature_common_desc */
#define EFA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+/* feature_device_attr_desc */
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
+
#endif /* _EFA_ADMIN_CMDS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 3c412bc5b94f..0778f4f7dccd 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -317,6 +317,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
struct efa_admin_acq_entry *comp,
size_t comp_size_in_bytes)
{
+ struct efa_admin_aq_entry *aqe;
struct efa_comp_ctx *comp_ctx;
u16 queue_size_mask;
u16 cmd_id;
@@ -350,7 +351,9 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
reinit_completion(&comp_ctx->wait_event);
- memcpy(&aq->sq.entries[pi], cmd, cmd_size_in_bytes);
+ aqe = &aq->sq.entries[pi];
+ memset(aqe, 0, sizeof(*aqe));
+ memcpy(aqe, cmd, cmd_size_in_bytes);
aq->sq.pc++;
atomic64_inc(&aq->stats.submitted_cmd);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index c079f1332082..e20bd84a1014 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -230,8 +230,7 @@ int efa_com_register_mr(struct efa_com_dev *edev,
mr_cmd.flags |= params->page_shift &
EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
mr_cmd.iova = params->iova;
- mr_cmd.permissions |= params->permissions &
- EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK;
+ mr_cmd.permissions = params->permissions;
if (params->inline_pbl) {
memcpy(mr_cmd.pbl.inline_pbl_array,
@@ -423,28 +422,6 @@ static int efa_com_get_feature(struct efa_com_dev *edev,
return efa_com_get_feature_ex(edev, get_resp, feature_id, 0, 0);
}
-int efa_com_get_network_attr(struct efa_com_dev *edev,
- struct efa_com_get_network_attr_result *result)
-{
- struct efa_admin_get_feature_resp resp;
- int err;
-
- err = efa_com_get_feature(edev, &resp,
- EFA_ADMIN_NETWORK_ATTR);
- if (err) {
- ibdev_err_ratelimited(edev->efa_dev,
- "Failed to get network attributes %d\n",
- err);
- return err;
- }
-
- memcpy(result->addr, resp.u.network_attr.addr,
- sizeof(resp.u.network_attr.addr));
- result->mtu = resp.u.network_attr.mtu;
-
- return 0;
-}
-
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result)
{
@@ -467,6 +444,8 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->phys_addr_width = resp.u.device_attr.phys_addr_width;
result->virt_addr_width = resp.u.device_attr.virt_addr_width;
result->db_bar = resp.u.device_attr.db_bar;
+ result->max_rdma_size = resp.u.device_attr.max_rdma_size;
+ result->device_caps = resp.u.device_attr.device_caps;
if (result->admin_api_version < 1) {
ibdev_err_ratelimited(
@@ -500,6 +479,19 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->max_ah = resp.u.queue_attr.max_ah;
result->max_llq_size = resp.u.queue_attr.max_llq_size;
result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
+ result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges;
+
+ err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get network attributes %d\n",
+ err);
+ return err;
+ }
+
+ memcpy(result->addr, resp.u.network_attr.addr,
+ sizeof(resp.u.network_attr.addr));
+ result->mtu = resp.u.network_attr.mtu;
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 7f6c13052f49..31db5a0cbd5b 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -100,14 +100,11 @@ struct efa_com_destroy_ah_params {
u16 pdn;
};
-struct efa_com_get_network_attr_result {
- u8 addr[EFA_GID_SIZE];
- u32 mtu;
-};
-
struct efa_com_get_device_attr_result {
+ u8 addr[EFA_GID_SIZE];
u64 page_size_cap;
u64 max_mr_pages;
+ u32 mtu;
u32 fw_version;
u32 admin_api_version;
u32 device_version;
@@ -124,9 +121,12 @@ struct efa_com_get_device_attr_result {
u32 max_pd;
u32 max_ah;
u32 max_llq_size;
+ u32 max_rdma_size;
+ u32 device_caps;
u16 sub_cqs_per_cq;
u16 max_sq_sge;
u16 max_rq_sge;
+ u16 max_wr_rdma_sge;
u8 db_bar;
};
@@ -181,12 +181,7 @@ struct efa_com_reg_mr_params {
* address mapping
*/
u8 page_shift;
- /*
- * permissions
- * 0: local_write_enable - Write permissions: value of 1 needed
- * for RQ buffers and for RDMA write:1: reserved1 - remote
- * access flags, etc
- */
+ /* see permissions field of struct efa_admin_reg_mr_cmd */
u8 permissions;
u8 inline_pbl;
u8 indirect;
@@ -271,8 +266,6 @@ int efa_com_create_ah(struct efa_com_dev *edev,
struct efa_com_create_ah_result *result);
int efa_com_destroy_ah(struct efa_com_dev *edev,
struct efa_com_destroy_ah_params *params);
-int efa_com_get_network_attr(struct efa_com_dev *edev,
- struct efa_com_get_network_attr_result *result);
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result);
int efa_com_get_hw_hints(struct efa_com_dev *edev,
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 83858f7e83d0..faf3ff1bca2a 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -30,15 +30,6 @@ MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
-static void efa_update_network_attr(struct efa_dev *dev,
- struct efa_com_get_network_attr_result *network_attr)
-{
- memcpy(dev->addr, network_attr->addr, sizeof(network_attr->addr));
- dev->mtu = network_attr->mtu;
-
- dev_dbg(&dev->pdev->dev, "Full address %pI6\n", dev->addr);
-}
-
/* This handler will called for unknown event group or unimplemented handlers */
static void unimplemented_aenq_handler(void *data,
struct efa_admin_aenq_entry *aenq_e)
@@ -217,6 +208,7 @@ static const struct ib_device_ops efa_dev_ops = {
.get_link_layer = efa_port_link_layer,
.get_port_immutable = efa_get_port_immutable,
.mmap = efa_mmap,
+ .mmap_free = efa_mmap_free,
.modify_qp = efa_modify_qp,
.query_device = efa_query_device,
.query_gid = efa_query_gid,
@@ -233,7 +225,6 @@ static const struct ib_device_ops efa_dev_ops = {
static int efa_ib_device_add(struct efa_dev *dev)
{
- struct efa_com_get_network_attr_result network_attr;
struct efa_com_get_hw_hints_result hw_hints;
struct pci_dev *pdev = dev->pdev;
int err;
@@ -249,12 +240,6 @@ static int efa_ib_device_add(struct efa_dev *dev)
if (err)
return err;
- err = efa_com_get_network_attr(&dev->edev, &network_attr);
- if (err)
- goto err_release_doorbell_bar;
-
- efa_update_network_attr(dev, &network_attr);
-
err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
if (err)
goto err_release_doorbell_bar;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 4edae89e8e3c..c9d294caa27a 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -13,10 +13,6 @@
#include "efa.h"
-#define EFA_MMAP_FLAG_SHIFT 56
-#define EFA_MMAP_PAGE_MASK GENMASK(EFA_MMAP_FLAG_SHIFT - 1, 0)
-#define EFA_MMAP_INVALID U64_MAX
-
enum {
EFA_MMAP_DMA_PAGE = 0,
EFA_MMAP_IO_WC,
@@ -27,20 +23,12 @@ enum {
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
-struct efa_mmap_entry {
- void *obj;
+struct efa_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
u64 address;
- u64 length;
- u32 mmap_page;
u8 mmap_flag;
};
-static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
-{
- return ((u64)efa->mmap_flag << EFA_MMAP_FLAG_SHIFT) |
- ((u64)efa->mmap_page << PAGE_SHIFT);
-}
-
#define EFA_DEFINE_STATS(op) \
op(EFA_TX_BYTES, "tx_bytes") \
op(EFA_TX_PKTS, "tx_pkts") \
@@ -82,8 +70,6 @@ static const char *const efa_stats_names[] = {
#define EFA_CHUNK_USED_SIZE \
((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
-#define EFA_SUPPORTED_ACCESS_FLAGS IB_ACCESS_LOCAL_WRITE
-
struct pbl_chunk {
dma_addr_t dma_addr;
u64 *buf;
@@ -147,6 +133,17 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah)
return container_of(ibah, struct efa_ah, ibah);
}
+static inline struct efa_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
+}
+
+static inline bool is_rdma_read_cap(struct efa_dev *dev)
+{
+ return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
+}
+
#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
FIELD_SIZEOF(typeof(x), fld) <= (sz))
@@ -172,106 +169,6 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
return addr;
}
-/*
- * This is only called when the ucontext is destroyed and there can be no
- * concurrent query via mmap or allocate on the xarray, thus we can be sure no
- * other thread is using the entry pointer. We also know that all the BAR
- * pages have either been zap'd or munmaped at this point. Normal pages are
- * refcounted and will be freed at the proper time.
- */
-static void mmap_entries_remove_free(struct efa_dev *dev,
- struct efa_ucontext *ucontext)
-{
- struct efa_mmap_entry *entry;
- unsigned long mmap_page;
-
- xa_for_each(&ucontext->mmap_xa, mmap_page, entry) {
- xa_erase(&ucontext->mmap_xa, mmap_page);
-
- ibdev_dbg(
- &dev->ibdev,
- "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
- entry->obj, get_mmap_key(entry), entry->address,
- entry->length);
- if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
- /* DMA mapping is already gone, now free the pages */
- free_pages_exact(phys_to_virt(entry->address),
- entry->length);
- kfree(entry);
- }
-}
-
-static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev,
- struct efa_ucontext *ucontext,
- u64 key, u64 len)
-{
- struct efa_mmap_entry *entry;
- u64 mmap_page;
-
- mmap_page = (key & EFA_MMAP_PAGE_MASK) >> PAGE_SHIFT;
- if (mmap_page > U32_MAX)
- return NULL;
-
- entry = xa_load(&ucontext->mmap_xa, mmap_page);
- if (!entry || get_mmap_key(entry) != key || entry->length != len)
- return NULL;
-
- ibdev_dbg(&dev->ibdev,
- "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
- entry->obj, key, entry->address, entry->length);
-
- return entry;
-}
-
-/*
- * Note this locking scheme cannot support removal of entries, except during
- * ucontext destruction when the core code guarentees no concurrency.
- */
-static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
- void *obj, u64 address, u64 length, u8 mmap_flag)
-{
- struct efa_mmap_entry *entry;
- u32 next_mmap_page;
- int err;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return EFA_MMAP_INVALID;
-
- entry->obj = obj;
- entry->address = address;
- entry->length = length;
- entry->mmap_flag = mmap_flag;
-
- xa_lock(&ucontext->mmap_xa);
- if (check_add_overflow(ucontext->mmap_xa_page,
- (u32)(length >> PAGE_SHIFT),
- &next_mmap_page))
- goto err_unlock;
-
- entry->mmap_page = ucontext->mmap_xa_page;
- ucontext->mmap_xa_page = next_mmap_page;
- err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
- GFP_KERNEL);
- if (err)
- goto err_unlock;
-
- xa_unlock(&ucontext->mmap_xa);
-
- ibdev_dbg(
- &dev->ibdev,
- "mmap: obj[0x%p] addr[%#llx], len[%#llx], key[%#llx] inserted\n",
- entry->obj, entry->address, entry->length, get_mmap_key(entry));
-
- return get_mmap_key(entry);
-
-err_unlock:
- xa_unlock(&ucontext->mmap_xa);
- kfree(entry);
- return EFA_MMAP_INVALID;
-
-}
-
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata)
@@ -306,12 +203,17 @@ int efa_query_device(struct ib_device *ibdev,
dev_attr->max_rq_depth);
props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge;
+ props->max_sge_rd = dev_attr->max_wr_rdma_sge;
if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge;
resp.max_rq_sge = dev_attr->max_rq_sge;
resp.max_sq_wr = dev_attr->max_sq_depth;
resp.max_rq_wr = dev_attr->max_rq_depth;
+ resp.max_rdma_size = dev_attr->max_rdma_size;
+
+ if (is_rdma_read_cap(dev))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
@@ -338,9 +240,9 @@ int efa_query_port(struct ib_device *ibdev, u8 port,
props->pkey_tbl_len = 1;
props->active_speed = IB_SPEED_EDR;
props->active_width = IB_WIDTH_4X;
- props->max_mtu = ib_mtu_int_to_enum(dev->mtu);
- props->active_mtu = ib_mtu_int_to_enum(dev->mtu);
- props->max_msg_sz = dev->mtu;
+ props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->max_msg_sz = dev->dev_attr.mtu;
props->max_vl_num = 1;
return 0;
@@ -401,7 +303,7 @@ int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
{
struct efa_dev *dev = to_edev(ibdev);
- memcpy(gid->raw, dev->addr, sizeof(dev->addr));
+ memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
return 0;
}
@@ -485,8 +387,19 @@ static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
return efa_com_destroy_qp(&dev->edev, &params);
}
+static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
+ struct efa_qp *qp)
+{
+ rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
+}
+
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
+ struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct efa_ucontext, ibucontext);
struct efa_dev *dev = to_edev(ibqp->pd->device);
struct efa_qp *qp = to_eqp(ibqp);
int err;
@@ -505,61 +418,101 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
DMA_TO_DEVICE);
}
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
kfree(qp);
return 0;
}
+static struct rdma_user_mmap_entry*
+efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ u64 address, size_t length,
+ u8 mmap_flag, u64 *offset)
+{
+ struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int err;
+
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+ entry->mmap_flag = mmap_flag;
+
+ err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
+ length);
+ if (err) {
+ kfree(entry);
+ return NULL;
+ }
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
static int qp_mmap_entries_setup(struct efa_qp *qp,
struct efa_dev *dev,
struct efa_ucontext *ucontext,
struct efa_com_create_qp_params *params,
struct efa_ibv_create_qp_resp *resp)
{
- /*
- * Once an entry is inserted it might be mmapped, hence cannot be
- * cleaned up until dealloc_ucontext.
- */
- resp->sq_db_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->db_bar_addr + resp->sq_db_offset,
- PAGE_SIZE, EFA_MMAP_IO_NC);
- if (resp->sq_db_mmap_key == EFA_MMAP_INVALID)
+ size_t length;
+ u64 address;
+
+ address = dev->db_bar_addr + resp->sq_db_offset;
+ qp->sq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address,
+ PAGE_SIZE, EFA_MMAP_IO_NC,
+ &resp->sq_db_mmap_key);
+ if (!qp->sq_db_mmap_entry)
return -ENOMEM;
resp->sq_db_offset &= ~PAGE_MASK;
- resp->llq_desc_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->mem_bar_addr + resp->llq_desc_offset,
- PAGE_ALIGN(params->sq_ring_size_in_bytes +
- (resp->llq_desc_offset & ~PAGE_MASK)),
- EFA_MMAP_IO_WC);
- if (resp->llq_desc_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = dev->mem_bar_addr + resp->llq_desc_offset;
+ length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
+ (resp->llq_desc_offset & ~PAGE_MASK));
+
+ qp->llq_desc_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, length,
+ EFA_MMAP_IO_WC,
+ &resp->llq_desc_mmap_key);
+ if (!qp->llq_desc_mmap_entry)
+ goto err_remove_mmap;
resp->llq_desc_offset &= ~PAGE_MASK;
if (qp->rq_size) {
- resp->rq_db_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->db_bar_addr + resp->rq_db_offset,
- PAGE_SIZE, EFA_MMAP_IO_NC);
- if (resp->rq_db_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = dev->db_bar_addr + resp->rq_db_offset;
+
+ qp->rq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, PAGE_SIZE,
+ EFA_MMAP_IO_NC,
+ &resp->rq_db_mmap_key);
+ if (!qp->rq_db_mmap_entry)
+ goto err_remove_mmap;
resp->rq_db_offset &= ~PAGE_MASK;
- resp->rq_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- virt_to_phys(qp->rq_cpu_addr),
- qp->rq_size, EFA_MMAP_DMA_PAGE);
- if (resp->rq_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = virt_to_phys(qp->rq_cpu_addr);
+ qp->rq_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, qp->rq_size,
+ EFA_MMAP_DMA_PAGE,
+ &resp->rq_mmap_key);
+ if (!qp->rq_mmap_entry)
+ goto err_remove_mmap;
resp->rq_mmap_size = qp->rq_size;
}
return 0;
+
+err_remove_mmap:
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
+
+ return -ENOMEM;
}
static int efa_qp_validate_cap(struct efa_dev *dev,
@@ -634,7 +587,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_ibv_create_qp_resp resp = {};
struct efa_ibv_create_qp cmd = {};
- bool rq_entry_inserted = false;
struct efa_ucontext *ucontext;
struct efa_qp *qp;
int err;
@@ -742,7 +694,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
if (err)
goto err_destroy_qp;
- rq_entry_inserted = true;
qp->qp_handle = create_qp_resp.qp_handle;
qp->ibqp.qp_num = create_qp_resp.qp_num;
qp->ibqp.qp_type = init_attr->qp_type;
@@ -759,7 +710,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
ibdev_dbg(&dev->ibdev,
"Failed to copy udata for qp[%u]\n",
create_qp_resp.qp_num);
- goto err_destroy_qp;
+ goto err_remove_mmap_entries;
}
}
@@ -767,13 +718,16 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
return &qp->ibqp;
+err_remove_mmap_entries:
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
err_destroy_qp:
efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
err_free_mapped:
if (qp->rq_size) {
dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
DMA_TO_DEVICE);
- if (!rq_entry_inserted)
+
+ if (!qp->rq_mmap_entry)
free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
}
err_free_qp:
@@ -897,16 +851,18 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
efa_destroy_cq_idx(dev, cq->cq_idx);
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
}
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
struct efa_ibv_create_cq_resp *resp)
{
resp->q_mmap_size = cq->size;
- resp->q_mmap_key = mmap_entry_insert(dev, cq->ucontext, cq,
- virt_to_phys(cq->cpu_addr),
- cq->size, EFA_MMAP_DMA_PAGE);
- if (resp->q_mmap_key == EFA_MMAP_INVALID)
+ cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
+ virt_to_phys(cq->cpu_addr),
+ cq->size, EFA_MMAP_DMA_PAGE,
+ &resp->q_mmap_key);
+ if (!cq->mmap_entry)
return -ENOMEM;
return 0;
@@ -924,7 +880,6 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct efa_dev *dev = to_edev(ibdev);
struct efa_ibv_create_cq cmd = {};
struct efa_cq *cq = to_ecq(ibcq);
- bool cq_entry_inserted = false;
int entries = attr->cqe;
int err;
@@ -1013,15 +968,13 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_destroy_cq;
}
- cq_entry_inserted = true;
-
if (udata->outlen) {
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
if (err) {
ibdev_dbg(ibdev,
"Failed to copy udata for create_cq\n");
- goto err_destroy_cq;
+ goto err_remove_mmap;
}
}
@@ -1030,13 +983,16 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
+err_remove_mmap:
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mapped:
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
- if (!cq_entry_inserted)
+ if (!cq->mmap_entry)
free_pages_exact(cq->cpu_addr, cq->size);
+
err_out:
atomic64_inc(&dev->stats.sw_stats.create_cq_err);
return err;
@@ -1396,6 +1352,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
struct efa_com_reg_mr_params params = {};
struct efa_com_reg_mr_result result = {};
struct pbl_context pbl;
+ int supp_access_flags;
unsigned int pg_sz;
struct efa_mr *mr;
int inline_size;
@@ -1409,10 +1366,14 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- if (access_flags & ~EFA_SUPPORTED_ACCESS_FLAGS) {
+ supp_access_flags =
+ IB_ACCESS_LOCAL_WRITE |
+ (is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
+
+ if (access_flags & ~supp_access_flags) {
ibdev_dbg(&dev->ibdev,
"Unsupported access flags[%#x], supported[%#x]\n",
- access_flags, EFA_SUPPORTED_ACCESS_FLAGS);
+ access_flags, supp_access_flags);
err = -EOPNOTSUPP;
goto err_out;
}
@@ -1423,7 +1384,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
@@ -1434,7 +1395,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
params.pd = to_epd(ibpd)->pdn;
params.iova = virt_addr;
params.mr_length_in_bytes = length;
- params.permissions = access_flags & 0x1;
+ params.permissions = access_flags;
pg_sz = ib_umem_find_best_pgsz(mr->umem,
dev->dev_attr.page_size_cap,
@@ -1556,7 +1517,6 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
goto err_out;
ucontext->uarn = result.uarn;
- xa_init(&ucontext->mmap_xa);
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
@@ -1585,38 +1545,56 @@ void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device);
- mmap_entries_remove_free(dev, ucontext);
efa_dealloc_uar(dev, ucontext->uarn);
}
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
+
+ /* DMA mapping is already gone, now free the pages */
+ if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
+ free_pages_exact(phys_to_virt(entry->address),
+ entry->rdma_entry.npages * PAGE_SIZE);
+ kfree(entry);
+}
+
static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
- struct vm_area_struct *vma, u64 key, u64 length)
+ struct vm_area_struct *vma)
{
- struct efa_mmap_entry *entry;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct efa_user_mmap_entry *entry;
unsigned long va;
+ int err = 0;
u64 pfn;
- int err;
- entry = mmap_entry_get(dev, ucontext, key, length);
- if (!entry) {
- ibdev_dbg(&dev->ibdev, "key[%#llx] does not have valid entry\n",
- key);
+ rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&dev->ibdev,
+ "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
return -EINVAL;
}
+ entry = to_emmap(rdma_entry);
ibdev_dbg(&dev->ibdev,
- "Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n",
- entry->address, length, entry->mmap_flag);
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag);
pfn = entry->address >> PAGE_SHIFT;
switch (entry->mmap_flag) {
case EFA_MMAP_IO_NC:
- err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
- pgprot_noncached(vma->vm_page_prot));
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
break;
case EFA_MMAP_IO_WC:
- err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
- pgprot_writecombine(vma->vm_page_prot));
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
break;
case EFA_MMAP_DMA_PAGE:
for (va = vma->vm_start; va < vma->vm_end;
@@ -1633,12 +1611,13 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
if (err) {
ibdev_dbg(
&dev->ibdev,
- "Couldn't mmap address[%#llx] length[%#llx] mmap_flag[%d] err[%d]\n",
- entry->address, length, entry->mmap_flag, err);
- return err;
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag, err);
}
- return 0;
+ rdma_user_mmap_entry_put(rdma_entry);
+ return err;
}
int efa_mmap(struct ib_ucontext *ibucontext,
@@ -1646,26 +1625,13 @@ int efa_mmap(struct ib_ucontext *ibucontext,
{
struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device);
- u64 length = vma->vm_end - vma->vm_start;
- u64 key = vma->vm_pgoff << PAGE_SHIFT;
+ size_t length = vma->vm_end - vma->vm_start;
ibdev_dbg(&dev->ibdev,
- "start %#lx, end %#lx, length = %#llx, key = %#llx\n",
- vma->vm_start, vma->vm_end, length, key);
-
- if (length % PAGE_SIZE != 0 || !(vma->vm_flags & VM_SHARED)) {
- ibdev_dbg(&dev->ibdev,
- "length[%#llx] is not page size aligned[%#lx] or VM_SHARED is not set [%#lx]\n",
- length, PAGE_SIZE, vma->vm_flags);
- return -EINVAL;
- }
-
- if (vma->vm_flags & VM_EXEC) {
- ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
- return -EPERM;
- }
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
- return __efa_mmap(dev, ucontext, vma, key, length);
+ return __efa_mmap(dev, ucontext, vma);
}
static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index f9a7e9d29c8b..7c5e3fb22413 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1138,7 +1138,7 @@ static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
/* adjust flag if this fd is not able to cache */
- if (!fd->handler)
+ if (!fd->use_mn)
cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
cinfo.num_active = hfi1_count_active_units();
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index fa45350a9a1d..fc10d65fc3e1 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1444,7 +1444,7 @@ struct hfi1_filedata {
/* for cpu affinity; -1 if none */
int rec_cpu_num;
u32 tid_n_pinned;
- struct mmu_rb_handler *handler;
+ bool use_mn;
struct tid_rb_node **entry_to_rb;
spinlock_t tid_lock; /* protect tid_[limit,used] counters */
u32 tid_limit;
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index d8ff063a5419..a51bcd2b4391 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -4915,16 +4915,11 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
*/
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
- switch (in_mad->base_version) {
+ switch (in_mad->mad_hdr.base_version) {
case OPA_MGMT_BASE_VERSION:
- if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
- dev_err(ibdev->dev.parent, "invalid in_mad_size\n");
- return IB_MAD_RESULT_FAILURE;
- }
return hfi1_process_opa_mad(ibdev, mad_flags, port,
in_wc, in_grh,
(struct opa_mad *)in_mad,
@@ -4932,10 +4927,8 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
out_mad_size,
out_mad_pkey_index);
case IB_MGMT_BASE_VERSION:
- return hfi1_process_ib_mad(ibdev, mad_flags, port,
- in_wc, in_grh,
- (const struct ib_mad *)in_mad,
- (struct ib_mad *)out_mad);
+ return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc,
+ in_grh, in_mad, out_mad);
default:
break;
}
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index cbf7faa5038c..36593f2efe26 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -634,7 +634,7 @@ static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
u32 config_data, const char *message)
{
u8 i;
- int ret = HCMD_SUCCESS;
+ int ret;
for (i = 0; i < 4; i++) {
ret = load_8051_config(ppd->dd, field_id, i, config_data);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 3592a9ec155e..f05742ac0949 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -59,11 +59,11 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
struct tid_user_buf *tbuf,
u32 rcventry, struct tid_group *grp,
u16 pageidx, unsigned int npages);
-static int tid_rb_insert(void *arg, struct mmu_rb_node *node);
static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
struct tid_rb_node *tnode);
-static void tid_rb_remove(void *arg, struct mmu_rb_node *node);
-static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
+static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
struct tid_group *grp,
unsigned int start, u16 count,
@@ -73,10 +73,8 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
struct tid_group **grp);
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
-static struct mmu_rb_ops tid_rb_ops = {
- .insert = tid_rb_insert,
- .remove = tid_rb_remove,
- .invalidate = tid_rb_invalidate
+static const struct mmu_interval_notifier_ops tid_mn_ops = {
+ .invalidate = tid_rb_invalidate,
};
/*
@@ -87,7 +85,6 @@ static struct mmu_rb_ops tid_rb_ops = {
int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
struct hfi1_ctxtdata *uctxt)
{
- struct hfi1_devdata *dd = uctxt->dd;
int ret = 0;
spin_lock_init(&fd->tid_lock);
@@ -109,20 +106,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
fd->entry_to_rb = NULL;
return -ENOMEM;
}
-
- /*
- * Register MMU notifier callbacks. If the registration
- * fails, continue without TID caching for this context.
- */
- ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
- dd->pport->hfi1_wq,
- &fd->handler);
- if (ret) {
- dd_dev_info(dd,
- "Failed MMU notifier registration %d\n",
- ret);
- ret = 0;
- }
+ fd->use_mn = true;
}
/*
@@ -139,7 +123,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
* init.
*/
spin_lock(&fd->tid_lock);
- if (uctxt->subctxt_cnt && fd->handler) {
+ if (uctxt->subctxt_cnt && fd->use_mn) {
u16 remainder;
fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
@@ -158,18 +142,10 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- /*
- * The notifier would have been removed when the process'es mm
- * was freed.
- */
- if (fd->handler) {
- hfi1_mmu_rb_unregister(fd->handler);
- } else {
- if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
- unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
- if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
- unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
- }
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
kfree(fd->invalid_tids);
fd->invalid_tids = NULL;
@@ -201,7 +177,7 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
if (mapped) {
pci_unmap_single(dd->pcidev, node->dma_addr,
- node->mmu.len, PCI_DMA_FROMDEVICE);
+ node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
pages = &node->pages[idx];
} else {
pages = &tidbuf->pages[idx];
@@ -777,8 +753,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
return -EFAULT;
}
- node->mmu.addr = tbuf->vaddr + (pageidx * PAGE_SIZE);
- node->mmu.len = npages * PAGE_SIZE;
+ node->fdata = fd;
node->phys = page_to_phys(pages[0]);
node->npages = npages;
node->rcventry = rcventry;
@@ -787,23 +762,35 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
node->freed = false;
memcpy(node->pages, pages, sizeof(struct page *) * npages);
- if (!fd->handler)
- ret = tid_rb_insert(fd, &node->mmu);
- else
- ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
-
- if (ret) {
- hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
- node->rcventry, node->mmu.addr, node->phys, ret);
- pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- kfree(node);
- return -EFAULT;
+ if (fd->use_mn) {
+ ret = mmu_interval_notifier_insert(
+ &node->notifier, fd->mm,
+ tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
+ &tid_mn_ops);
+ if (ret)
+ goto out_unmap;
+ /*
+ * FIXME: This is in the wrong order, the notifier should be
+ * established before the pages are pinned by pin_rcv_pages.
+ */
+ mmu_interval_read_begin(&node->notifier);
}
+ fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
+
hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
- node->mmu.addr, node->phys, phys);
+ node->notifier.interval_tree.start, node->phys,
+ phys);
return 0;
+
+out_unmap:
+ hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
+ node->rcventry, node->notifier.interval_tree.start,
+ node->phys, ret);
+ pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ kfree(node);
+ return -EFAULT;
}
static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
@@ -833,10 +820,9 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
if (grp)
*grp = node->grp;
- if (!fd->handler)
- cacheless_tid_rb_remove(fd, node);
- else
- hfi1_mmu_rb_remove(fd->handler, &node->mmu);
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(&node->notifier);
+ cacheless_tid_rb_remove(fd, node);
return 0;
}
@@ -847,7 +833,8 @@ static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
struct hfi1_devdata *dd = uctxt->dd;
trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
- node->npages, node->mmu.addr, node->phys,
+ node->npages,
+ node->notifier.interval_tree.start, node->phys,
node->dma_addr);
/*
@@ -894,30 +881,29 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
if (!node || node->rcventry != rcventry)
continue;
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(
+ &node->notifier);
cacheless_tid_rb_remove(fd, node);
}
}
}
}
-/*
- * Always return 0 from this function. A non-zero return indicates that the
- * remove operation will be called and that memory should be unpinned.
- * However, the driver cannot unpin out from under PSM. Instead, retain the
- * memory (by returning 0) and inform PSM that the memory is going away. PSM
- * will call back later when it has removed the memory from its list.
- */
-static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
+static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct hfi1_filedata *fdata = arg;
- struct hfi1_ctxtdata *uctxt = fdata->uctxt;
struct tid_rb_node *node =
- container_of(mnode, struct tid_rb_node, mmu);
+ container_of(mni, struct tid_rb_node, notifier);
+ struct hfi1_filedata *fdata = node->fdata;
+ struct hfi1_ctxtdata *uctxt = fdata->uctxt;
if (node->freed)
- return 0;
+ return true;
- trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
+ trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
+ node->notifier.interval_tree.start,
node->rcventry, node->npages, node->dma_addr);
node->freed = true;
@@ -946,18 +932,7 @@ static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
fdata->invalid_tid_idx++;
}
spin_unlock(&fdata->invalid_lock);
- return 0;
-}
-
-static int tid_rb_insert(void *arg, struct mmu_rb_node *node)
-{
- struct hfi1_filedata *fdata = arg;
- struct tid_rb_node *tnode =
- container_of(node, struct tid_rb_node, mmu);
- u32 base = fdata->uctxt->expected_base;
-
- fdata->entry_to_rb[tnode->rcventry - base] = tnode;
- return 0;
+ return true;
}
static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
@@ -968,12 +943,3 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
fdata->entry_to_rb[tnode->rcventry - base] = NULL;
clear_tid_node(fdata, tnode);
}
-
-static void tid_rb_remove(void *arg, struct mmu_rb_node *node)
-{
- struct hfi1_filedata *fdata = arg;
- struct tid_rb_node *tnode =
- container_of(node, struct tid_rb_node, mmu);
-
- cacheless_tid_rb_remove(fdata, tnode);
-}
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 43b105de1d54..6257eee083a1 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -65,7 +65,8 @@ struct tid_user_buf {
};
struct tid_rb_node {
- struct mmu_rb_node mmu;
+ struct mmu_interval_notifier notifier;
+ struct hfi1_filedata *fdata;
unsigned long phys;
struct tid_group *grp;
u32 rcventry;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index ae9582ddbc8f..b0e9bf7cd150 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -330,9 +330,8 @@ void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
/*
* The PSN_MASK and PSN_SHIFT allow for
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index d602b698b57e..4921c1e40ccd 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,23 +1,34 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HNS
- bool "HNS RoCE Driver"
+ tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on (HNS_DSAF && HNS_ENET) || HNS3
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
is used in Hisilicon Hip06 and more further ICT SoC based on
platform device.
+ To compile HIP06 or HIP08 driver as module, choose M here.
+
config INFINIBAND_HNS_HIP06
- tristate "Hisilicon Hip06 Family RoCE support"
+ bool "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
+ depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices.
+ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+ module will be called hns-roce-hw-v1
+
config INFINIBAND_HNS_HIP08
- tristate "Hisilicon Hip08 Family RoCE support"
+ bool "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
+ depends on INFINIBAND_HNS=m || HNS3=y
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
+
+ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+ module will be called hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 449a2d81319d..e105945b94a1 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,8 +9,12 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
+ifdef CONFIG_INFINIBAND_HNS_HIP06
hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
+endif
+ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
+endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 90e08c0c332d..8a522e14ef62 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -46,32 +46,32 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
struct hns_roce_ah *ah = to_hr_ah(ibah);
- u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ u16 vlan_id = 0xffff;
bool vlan_en = false;
int ret;
gid_attr = ah_attr->grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan_tag, NULL);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
if (ret)
return ret;
/* Get mac address */
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
- if (vlan_tag < VLAN_CFI_MASK) {
+ if (vlan_id < VLAN_N_VID) {
vlan_en = true;
- vlan_tag |= (rdma_ah_get_sl(ah_attr) &
+ vlan_id |= (rdma_ah_get_sl(ah_attr) &
HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
}
ah->av.port = rdma_ah_get_port_num(ah_attr);
ah->av.gid_index = grh->sgid_index;
- ah->av.vlan = vlan_tag;
+ ah->av.vlan_id = vlan_id;
ah->av.vlan_en = vlan_en;
- dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
- ah->av.vlan);
+ dev_dbg(dev, "gid_index = 0x%x,vlan_id = 0x%x\n", ah->av.gid_index,
+ ah->av.vlan_id);
if (rdma_ah_get_static_rate(ah_attr))
ah->av.stat_rate = IB_RATE_10_GBPS;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 8c063c598d2a..da574c26e063 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -55,7 +55,7 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
bitmap->last = 0;
*obj |= bitmap->top;
} else {
- ret = -1;
+ ret = -EINVAL;
}
spin_unlock(&bitmap->lock);
@@ -100,7 +100,7 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
}
*obj |= bitmap->top;
} else {
- ret = -1;
+ ret = -EINVAL;
}
spin_unlock(&bitmap->lock);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 2b6ac646ca9a..1915bacaded0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -115,12 +115,12 @@ enum {
enum {
/* TPT commands */
- HNS_ROCE_CMD_SW2HW_MPT = 0xd,
- HNS_ROCE_CMD_HW2SW_MPT = 0xf,
+ HNS_ROCE_CMD_CREATE_MPT = 0xd,
+ HNS_ROCE_CMD_DESTROY_MPT = 0xf,
/* CQ commands */
- HNS_ROCE_CMD_SW2HW_CQ = 0x16,
- HNS_ROCE_CMD_HW2SW_CQ = 0x17,
+ HNS_ROCE_CMD_CREATE_CQC = 0x16,
+ HNS_ROCE_CMD_DESTROY_CQC = 0x17,
/* QP/EE commands */
HNS_ROCE_CMD_RST2INIT_QP = 0x19,
@@ -129,14 +129,14 @@ enum {
HNS_ROCE_CMD_RTS2RTS_QP = 0x1c,
HNS_ROCE_CMD_2ERR_QP = 0x1e,
HNS_ROCE_CMD_RTS2SQD_QP = 0x1f,
- HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
HNS_ROCE_CMD_2RST_QP = 0x21,
HNS_ROCE_CMD_QUERY_QP = 0x22,
- HNS_ROCE_CMD_SW2HW_SRQ = 0x70,
+ HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
+ HNS_ROCE_CMD_CREATE_SRQ = 0x70,
HNS_ROCE_CMD_MODIFY_SRQC = 0x72,
HNS_ROCE_CMD_QUERY_SRQC = 0x73,
- HNS_ROCE_CMD_HW2SW_SRQ = 0x74,
+ HNS_ROCE_CMD_DESTROY_SRQ = 0x74,
};
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 22541d19cd09..af1d8823b3f0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -39,51 +39,8 @@
#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
-static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
-{
- struct ib_cq *ibcq = &hr_cq->ib_cq;
-
- ibcq->comp_handler(ibcq, ibcq->cq_context);
-}
-
-static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
- enum hns_roce_event event_type)
-{
- struct hns_roce_dev *hr_dev;
- struct ib_event event;
- struct ib_cq *ibcq;
-
- ibcq = &hr_cq->ib_cq;
- hr_dev = to_hr_dev(ibcq->device);
-
- if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(hr_dev->dev,
- "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
- event_type, hr_cq->cqn);
- return;
- }
-
- if (ibcq->event_handler) {
- event.device = ibcq->device;
- event.event = IB_EVENT_CQ_ERR;
- event.element.cq = ibcq;
- ibcq->event_handler(&event, ibcq->cq_context);
- }
-}
-
-static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
- HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
- struct hns_roce_mtt *hr_mtt,
- struct hns_roce_cq *hr_cq, int vector)
+static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_table *mtt_table;
@@ -101,35 +58,32 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
else
mtt_table = &hr_dev->mr_table.mtt_table;
- mtts = hns_roce_table_find(hr_dev, mtt_table,
- hr_mtt->first_seg, &dma_handle);
- if (!mtts) {
- dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
- return -EINVAL;
- }
+ mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
+ &dma_handle);
- if (vector >= hr_dev->caps.num_comp_vectors) {
- dev_err(dev, "CQ alloc.Invalid vector.\n");
+ if (!mtts) {
+ dev_err(dev, "Failed to find mtt for CQ buf.\n");
return -EINVAL;
}
- hr_cq->vector = vector;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
- if (ret == -1) {
- dev_err(dev, "CQ alloc.Failed to alloc index.\n");
- return -ENOMEM;
+ if (ret) {
+ dev_err(dev, "Num of CQ out of range.\n");
+ return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to get context mem.\n");
+ dev_err(dev,
+ "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_out;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- dev_err(dev, "CQ alloc failed xa_store.\n");
+ dev_err(dev, "Failed to xa_store CQ.\n");
goto err_put;
}
@@ -140,14 +94,16 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
goto err_xa;
}
- hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
- nent, vector);
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
/* Send mailbox to hw */
- ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
+ HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
+ dev_err(dev,
+ "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_xa;
}
@@ -170,24 +126,17 @@ err_out:
return ret;
}
-static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
int ret;
- ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
+ HNS_ROCE_CMD_DESTROY_CQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
- dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
+ dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
xa_erase(&cq_table->array, hr_cq->cqn);
@@ -204,103 +153,91 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
-static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
- struct ib_udata *udata,
- struct hns_roce_cq_buf *buf,
- struct ib_umem **umem, u64 buf_addr, int cqe)
+static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct hns_roce_ib_create_cq ucmd,
+ struct ib_udata *udata)
{
- int ret;
- u32 page_shift;
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
+ struct ib_umem **umem = &hr_cq->umem;
u32 npages;
+ int ret;
- *umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
- IB_ACCESS_LOCAL_WRITE, 1);
+ *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
-
- if (hr_dev->caps.cqe_buf_pg_sz) {
- npages = (ib_umem_page_count(*umem) +
- (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.cqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
- &buf->hr_mtt);
- } else {
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
- PAGE_SHIFT, &buf->hr_mtt);
- }
+ mtt->mtt_type = MTT_TYPE_WQE;
+
+ npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
+ 1 << hr_dev->caps.cqe_buf_pg_sz);
+ ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
ib_umem_release(*umem);
return ret;
}
-static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, u32 nent)
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
int ret;
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- (1 << page_shift) * 2, &buf->hr_buf,
- page_shift);
+ ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
+ buf, buf->page_shift);
if (ret)
goto out;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+ mtt->mtt_type = MTT_TYPE_WQE;
- ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
- buf->hr_buf.page_shift, &buf->hr_mtt);
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
+ ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
- hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, buf->size, buf);
+
out:
return ret;
}
-static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, int cqe)
+static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
- hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
}
static int create_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp,
- int cq_entries)
+ struct hns_roce_ib_create_cq_resp *resp)
{
struct hns_roce_ib_create_cq ucmd;
struct device *dev = hr_dev->dev;
@@ -314,9 +251,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
}
/* Get user space address, write it into mtt table */
- ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
- &hr_cq->umem, ucmd.buf_addr,
- cq_entries);
+ ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n");
return ret;
@@ -337,17 +272,16 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
return ret;
}
static int create_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, int cq_entries)
+ struct hns_roce_cq *hr_cq)
{
struct device *dev = hr_dev->dev;
- struct hns_roce_uar *uar;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
@@ -361,15 +295,14 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
}
/* Init mtt table and write buff address to mtt table */
- ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
+ ret = alloc_cq_buf(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db;
}
- uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * uar->index;
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
return 0;
@@ -392,64 +325,69 @@ static void destroy_user_cq(struct hns_roce_dev *hr_dev,
(udata->outlen >= sizeof(*resp)))
hns_roce_db_unmap_user(context, &hr_cq->db);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
}
static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
- struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct device *dev = hr_dev->dev;
int vector = attr->comp_vector;
- int cq_entries = attr->cqe;
+ u32 cq_entries = attr->cqe;
int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
+ dev_err(dev, "Create CQ failed. entries=%d, max=%d\n",
cq_entries, hr_dev->caps.max_cqes);
return -EINVAL;
}
- if (hr_dev->caps.min_cqes)
- cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ if (vector >= hr_dev->caps.num_comp_vectors) {
+ dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
+ vector, hr_dev->caps.num_comp_vectors);
+ return -EINVAL;
+ }
- cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
- hr_cq->ib_cq.cqe = cq_entries - 1;
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ cq_entries = roundup_pow_of_two(cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
+ hr_cq->cq_depth = cq_entries;
+ hr_cq->vector = vector;
+ hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+ hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock);
if (udata) {
- ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
+ ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
if (ret) {
dev_err(dev, "Create cq failed in user mode!\n");
goto err_cq;
}
} else {
- ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
+ ret = create_kernel_cq(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Create cq failed in kernel mode!\n");
goto err_cq;
}
}
- /* Allocate cq index, fill cq_context */
- ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
- hr_cq, vector);
+ ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
if (ret) {
- dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
+ dev_err(dev, "Alloc CQ failed(%d).\n", ret);
goto err_dbmap;
}
@@ -462,11 +400,6 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
if (!udata && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0;
- /* Get created cq handler and carry out event */
- hr_cq->comp = hns_roce_ib_cq_comp;
- hr_cq->event = hns_roce_ib_cq_event;
- hr_cq->cq_depth = cq_entries;
-
if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -477,7 +410,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
return 0;
err_cqc:
- hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_free_cqc(hr_dev, hr_cq);
err_dbmap:
if (udata)
@@ -489,7 +422,7 @@ err_cq:
return ret;
}
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -499,8 +432,8 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
return;
}
- hns_roce_free_cq(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_free_cqc(hr_dev, hr_cq);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (udata) {
@@ -512,7 +445,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
&hr_cq->db);
} else {
/* Free the buff of stored cq */
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
@@ -520,38 +453,57 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
- struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_cq *ibcq;
- cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1));
- if (!cq) {
- dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
+ cqn);
return;
}
- ++cq->arm_sn;
- cq->comp(cq);
+ ++hr_cq->arm_sn;
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->comp_handler)
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
}
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
- struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_event event;
+ struct ib_cq *ibcq;
- cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
+ return;
+ }
- if (!cq) {
- dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+ dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
+ event_type, cqn);
return;
}
- cq->event(cq, (enum hns_roce_event)event_type);
+ atomic_inc(&hr_cq->refcount);
+
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
}
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index c00714c2f16a..10af6958ab69 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -31,7 +31,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
refcount_set(&page->refcount, 1);
page->user_virt = page_addr;
- page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 96d1302abde1..5617434cbfb4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -45,7 +45,7 @@
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
-#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
+#define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
@@ -53,8 +53,6 @@
#define BA_BYTE_LEN 8
-#define BITS_PER_BYTE 8
-
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
#define HNS_ROCE_MIN_WQE_NUM 0x20
@@ -426,7 +424,6 @@ struct hns_roce_wq {
u64 *wrid; /* Work request ID */
spinlock_t lock;
int wqe_cnt; /* WQE num */
- u32 max_post;
int max_gs;
int offset;
int wqe_shift; /* WQE size */
@@ -451,6 +448,7 @@ struct hns_roce_buf {
struct hns_roce_buf_list *page_list;
int nbufs;
u32 npages;
+ u32 size;
int page_shift;
};
@@ -482,22 +480,14 @@ struct hns_roce_db {
int order;
};
-struct hns_roce_cq_buf {
- struct hns_roce_buf hr_buf;
- struct hns_roce_mtt hr_mtt;
-};
-
struct hns_roce_cq {
struct ib_cq ib_cq;
- struct hns_roce_cq_buf hr_buf;
+ struct hns_roce_buf buf;
+ struct hns_roce_mtt mtt;
struct hns_roce_db db;
u8 db_en;
spinlock_t lock;
struct ib_umem *umem;
- void (*comp)(struct hns_roce_cq *cq);
- void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type);
-
- struct hns_roce_uar *uar;
u32 cq_depth;
u32 cons_index;
u32 *set_ci_db;
@@ -521,9 +511,8 @@ struct hns_roce_idx_que {
struct hns_roce_srq {
struct ib_srq ibsrq;
- void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
unsigned long srqn;
- int max;
+ u32 wqe_cnt;
int max_gs;
int wqe_shift;
void __iomem *db_reg_l;
@@ -539,8 +528,8 @@ struct hns_roce_srq {
spinlock_t lock;
int head;
int tail;
- u16 wqe_ctr;
struct mutex mutex;
+ void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
};
struct hns_roce_uar_table {
@@ -582,7 +571,7 @@ struct hns_roce_av {
u8 tclass;
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[ETH_ALEN];
- u16 vlan;
+ u16 vlan_id;
bool vlan_en;
};
@@ -695,10 +684,6 @@ struct hns_roce_qp {
struct hns_roce_rinl_buf rq_inl_buf;
};
-struct hns_roce_sqp {
- struct hns_roce_qp hr_qp;
-};
-
struct hns_roce_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
@@ -821,8 +806,8 @@ struct hns_roce_caps {
int max_qp_init_rdma;
int max_qp_dest_rdma;
int num_cqs;
- int max_cqes;
- int min_cqes;
+ u32 max_cqes;
+ u32 min_cqes;
u32 min_wqes;
int reserved_cqs;
int reserved_srqs;
@@ -953,7 +938,7 @@ struct hns_roce_hw {
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
- dma_addr_t dma_handle, int nent, u32 vector);
+ dma_addr_t dma_handle);
int (*set_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev,
@@ -1092,11 +1077,6 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}
-static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
-{
- return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
-}
-
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
__raw_writeq(*(u64 *) val, dest);
@@ -1198,9 +1178,9 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index);
+int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
@@ -1257,12 +1237,11 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata);
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5f74bf55f471..2a2b2112f886 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -732,7 +732,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
if (!cq)
return -ENOMEM;
- ret = hns_roce_ib_create_cq(cq, &cq_init_attr, NULL);
+ ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
if (ret) {
dev_err(dev, "Create cq for reserved loop qp failed!");
goto alloc_cq_failed;
@@ -868,7 +868,7 @@ alloc_pd_failed:
kfree(pd);
alloc_mem_failed:
- hns_roce_ib_destroy_cq(cq, NULL);
+ hns_roce_destroy_cq(cq, NULL);
alloc_cq_failed:
kfree(cq);
return ret;
@@ -897,7 +897,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
i, ret);
}
- hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
+ hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
kfree(&free_mr->mr_free_cq->ib_cq);
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
kfree(&free_mr->mr_free_pd->ibpd);
@@ -1114,9 +1114,10 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
free_mr = &priv->free_mr;
if (mr->enabled) {
- if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
- & (hr_dev->caps.num_mtpts - 1)))
- dev_warn(dev, "HW2SW_MPT failed!\n");
+ if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mr->key) &
+ (hr_dev->caps.num_mtpts - 1)))
+ dev_warn(dev, "DESTROY_MPT failed!\n");
}
mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
@@ -1979,8 +1980,7 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
- n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
@@ -1989,7 +1989,7 @@ static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
- !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
+ !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
}
static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
@@ -2072,8 +2072,7 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle, int nent,
- u32 vector)
+ u64 *mtts, dma_addr_t dma_handle)
{
struct hns_roce_cq_context *cq_context = NULL;
struct hns_roce_buf_list *tptr_buf;
@@ -2108,9 +2107,9 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->cqc_byte_12,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
- ilog2((unsigned int)nent));
+ ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
- CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
+ CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
@@ -3644,10 +3643,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
- if (hr_qp->ibqp.qp_type == IB_QPT_RC)
- kfree(hr_qp);
- else
- kfree(hr_to_hr_sqp(hr_qp));
+ kfree(hr_qp);
return 0;
}
@@ -3658,10 +3654,9 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct device *dev = &hr_dev->pdev->dev;
u32 cqe_cnt_ori;
u32 cqe_cnt_cur;
- u32 cq_buf_size;
int wait_time = 0;
- hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_free_cqc(hr_dev, hr_cq);
/*
* Before freeing cq buffer, we need to ensure that the outstanding CQE
@@ -3686,13 +3681,12 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
wait_time++;
}
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (!udata) {
/* Free the buff of stored cq */
- cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
- hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
+ hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index e82567fcdeb7..cb8071a3e0d5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -389,7 +389,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_VLAN_M,
V2_UD_SEND_WQE_BYTE_36_VLAN_S,
- le16_to_cpu(ah->av.vlan));
+ ah->av.vlan_id);
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
@@ -2447,8 +2447,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
- n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
@@ -2457,7 +2456,7 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
- !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
+ !!(n & hr_cq->cq_depth)) ? cqe : NULL;
}
static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
@@ -2550,8 +2549,7 @@ static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle, int nent,
- u32 vector)
+ u64 *mtts, dma_addr_t dma_handle)
{
struct hns_roce_v2_cq_context *cq_context;
@@ -2563,9 +2561,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
- V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
+ V2_CQC_BYTE_4_SHIFT_S,
+ ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
- V2_CQC_BYTE_4_CEQN_S, vector);
+ V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
@@ -4061,8 +4060,8 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
const struct ib_gid_attr *gid_attr = NULL;
int is_roce_protocol;
+ u16 vlan_id = 0xffff;
bool is_udp = false;
- u16 vlan = 0xffff;
u8 ib_port;
u8 hr_port;
int ret;
@@ -4074,7 +4073,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
if (is_roce_protocol) {
gid_attr = attr->ah_attr.grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
if (ret)
return ret;
@@ -4083,7 +4082,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
IB_GID_TYPE_ROCE_UDP_ENCAP);
}
- if (vlan < VLAN_CFI_MASK) {
+ if (vlan_id < VLAN_N_VID) {
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
@@ -4095,7 +4094,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
}
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, vlan);
+ V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0);
@@ -4650,16 +4649,14 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
{
struct hns_roce_cq *send_cq, *recv_cq;
struct ib_device *ibdev = &hr_dev->ib_dev;
- int ret;
+ int ret = 0;
if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
- if (ret) {
+ if (ret)
ibdev_err(ibdev, "modify QP to Reset failed.\n");
- return ret;
- }
}
send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
@@ -4715,7 +4712,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
kfree(hr_qp->rq_inl_buf.wqe_list);
}
- return 0;
+ return ret;
}
static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
@@ -4725,16 +4722,11 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
int ret;
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
- if (ret) {
+ if (ret)
ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
- return ret;
- }
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
- kfree(hr_to_hr_sqp(hr_qp));
- else
- kfree(hr_qp);
+ kfree(hr_qp);
return 0;
}
@@ -4951,10 +4943,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
{
struct hns_roce_dev *hr_dev = eq->hr_dev;
- __le32 doorbell[2];
-
- doorbell[0] = 0;
- doorbell[1] = 0;
+ __le32 doorbell[2] = {};
if (eq->type_flag == HNS_ROCE_AEQ) {
roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
@@ -6047,7 +6036,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
hr_dev->caps.srqwqe_hop_num));
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
- ilog2(srq->max));
+ ilog2(srq->wqe_cnt));
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
SRQC_BYTE_4_SRQN_S, srq->srqn);
@@ -6092,11 +6081,11 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
- hr_dev->caps.idx_ba_pg_sz);
+ hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
- hr_dev->caps.idx_buf_pg_sz);
+ hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
srq_context->idx_nxt_blk_addr =
cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
@@ -6133,7 +6122,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
int ret;
if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit >= srq->max)
+ if (srq_attr->srq_limit >= srq->wqe_cnt)
return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -6193,7 +6182,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
SRQC_BYTE_8_SRQ_LIMIT_WL_S);
attr->srq_limit = limit_wl;
- attr->max_wr = srq->max - 1;
+ attr->max_wr = srq->wqe_cnt - 1;
attr->max_sge = srq->max_gs;
memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
@@ -6246,7 +6235,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
spin_lock_irqsave(&srq->lock, flags);
- ind = srq->head & (srq->max - 1);
+ ind = srq->head & (srq->wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->max_gs)) {
@@ -6261,7 +6250,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
break;
}
- wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
+ wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
if (wqe_idx < 0) {
ret = -ENOMEM;
*bad_wr = wr;
@@ -6285,7 +6274,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
srq->wrid[wqe_idx] = wr->wr_id;
- ind = (ind + 1) & (srq->max - 1);
+ ind = (ind + 1) & (srq->wqe_cnt - 1);
}
if (likely(nreq)) {
@@ -6380,12 +6369,14 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
-static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
int i;
+ hr_dev->pci_dev = handle->pdev;
+ hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
@@ -6410,8 +6401,6 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
priv->handle = handle;
-
- return 0;
}
static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
@@ -6429,14 +6418,7 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_kzalloc;
}
- hr_dev->pci_dev = handle->pdev;
- hr_dev->dev = &handle->pdev->dev;
-
- ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
- if (ret) {
- dev_err(hr_dev->dev, "Get Configuration failed!\n");
- goto error_failed_get_cfg;
- }
+ hns_roce_hw_v2_get_cfg(hr_dev, handle);
ret = hns_roce_init(hr_dev);
if (ret) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 43219d2f7de0..76a14db7028d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -87,8 +87,8 @@
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
-#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
-#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
+#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
+#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index b5d196c119ee..854ef6e74788 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -111,7 +111,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
- dev_err(dev, "port(%d) can't find netdev\n", port);
+ dev_err(dev, "Can't find netdev on port(%u)!\n", port);
return -ENODEV;
}
@@ -253,7 +253,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
net_dev = hr_dev->iboe.netdevs[port];
if (!net_dev) {
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_err(dev, "find netdev %d failed!\r\n", port);
+ dev_err(dev, "Find netdev %u failed!\n", port);
return -EINVAL;
}
@@ -301,12 +301,6 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
return 0;
}
-static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
@@ -359,7 +353,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
return rdma_user_mmap_io(context, vma,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
/* vm_pgoff: 1 -- TPTR */
case 1:
@@ -372,7 +367,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
return rdma_user_mmap_io(context, vma,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
hr_dev->tptr_size,
- vma->vm_page_prot);
+ vma->vm_page_prot,
+ NULL);
default:
return -EINVAL;
@@ -423,14 +419,14 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.alloc_pd = hns_roce_alloc_pd,
.alloc_ucontext = hns_roce_alloc_ucontext,
.create_ah = hns_roce_create_ah,
- .create_cq = hns_roce_ib_create_cq,
+ .create_cq = hns_roce_create_cq,
.create_qp = hns_roce_create_qp,
.dealloc_pd = hns_roce_dealloc_pd,
.dealloc_ucontext = hns_roce_dealloc_ucontext,
.del_gid = hns_roce_del_gid,
.dereg_mr = hns_roce_dereg_mr,
.destroy_ah = hns_roce_destroy_ah,
- .destroy_cq = hns_roce_ib_destroy_cq,
+ .destroy_cq = hns_roce_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
.fill_res_entry = hns_roce_fill_res_entry,
.get_dma_mr = hns_roce_get_dma_mr,
@@ -438,7 +434,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
.modify_device = hns_roce_modify_device,
- .modify_port = hns_roce_modify_port,
.modify_qp = hns_roce_modify_qp,
.query_ah = hns_roce_query_ah,
.query_device = hns_roce_query_device,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 5f8416ba09a9..9ad19170c3f9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -48,21 +48,21 @@ unsigned long key_to_hw_index(u32 key)
return (key << 24) | (key >> 8);
}
-static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
+static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
- HNS_ROCE_CMD_SW2HW_MPT,
+ HNS_ROCE_CMD_CREATE_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
+int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
- mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
+ mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
@@ -83,7 +83,7 @@ static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
}
}
spin_unlock(&buddy->lock);
- return -1;
+ return -EINVAL;
found:
clear_bit(*seg, buddy->bits[o]);
@@ -206,13 +206,14 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
}
ret = hns_roce_buddy_alloc(buddy, order, seg);
- if (ret == -1)
- return -1;
+ if (ret)
+ return ret;
- if (hns_roce_table_get_range(hr_dev, table, *seg,
- *seg + (1 << order) - 1)) {
+ ret = hns_roce_table_get_range(hr_dev, table, *seg,
+ *seg + (1 << order) - 1);
+ if (ret) {
hns_roce_buddy_free(buddy, *seg, order);
- return -1;
+ return ret;
}
return 0;
@@ -578,7 +579,7 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
/* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
mr->iova = iova; /* MR va starting addr */
@@ -707,10 +708,11 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
int ret;
if (mr->enabled) {
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
- & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mr->key) &
+ (hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
}
if (mr->size != ~0ULL) {
@@ -763,10 +765,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
- dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
goto err_page;
}
@@ -1143,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
goto err_free;
@@ -1228,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
}
ib_umem_release(mr->umem);
- mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
@@ -1308,9 +1310,9 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
if (ret)
goto free_cmd_mbox;
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
if (ret)
- dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
mr->enabled = 0;
@@ -1332,9 +1334,9 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
goto free_cmd_mbox;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) {
- dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
ib_umem_release(mr->umem);
goto free_cmd_mbox;
}
@@ -1448,10 +1450,11 @@ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
int ret;
if (mw->enabled) {
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
- & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mw->rkey) &
+ (hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
key_to_hw_index(mw->rkey));
@@ -1487,10 +1490,10 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
- dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
+ dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
goto err_page;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 912b89b4da34..780c780fdb22 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -96,7 +96,7 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
/* Using bitmap to manager UAR index */
ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index bd78ff90d998..a6565b674801 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -318,7 +318,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
* hr_qp->rq.max_gs);
}
- cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
+ cap->max_recv_wr = hr_qp->rq.wqe_cnt;
cap->max_recv_sge = hr_qp->rq.max_gs;
return 0;
@@ -332,9 +332,8 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
u8 max_sq_stride = ilog2(roundup_sq_stride);
/* Sanity check SQ size before proceeding */
- if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
- ucmd->log_sq_stride > max_sq_stride ||
- ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
+ if (ucmd->log_sq_stride > max_sq_stride ||
+ ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
return -EINVAL;
}
@@ -358,13 +357,16 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
u32 max_cnt;
int ret;
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) ||
+ hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes)
+ return -EINVAL;
+
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
return ret;
}
- hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge);
@@ -391,37 +393,37 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->rq.offset = HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt = ex_sge_num ?
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
- HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift), page_size) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
if (ex_sge_num) {
- hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
+ hr_qp->sge.offset = HNS_ROCE_ALIGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
page_size);
hr_qp->rq.offset = hr_qp->sge.offset +
- HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift),
page_size);
} else {
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
+ hr_qp->rq.offset = HNS_ROCE_ALIGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
page_size);
@@ -591,24 +593,24 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
- size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
+ size = HNS_ROCE_ALIGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
- size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
+ size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, page_size);
}
hr_qp->rq.offset = size;
- size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
+ size += HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
page_size);
hr_qp->buff_size = size;
/* Get wr and sge number which send */
- cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
+ cap->max_send_wr = hr_qp->sq.wqe_cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */
@@ -743,7 +745,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
- hr_qp->buff_size, 0, 0);
+ hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
@@ -1017,7 +1019,6 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp;
int ret;
@@ -1030,7 +1031,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
hr_qp);
if (ret) {
- ibdev_err(ibdev, "Create RC QP 0x%06lx failed(%d)\n",
+ ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
kfree(hr_qp);
return ERR_PTR(ret);
@@ -1047,11 +1048,10 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
- hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
- if (!hr_sqp)
+ hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
+ if (!hr_qp)
return ERR_PTR(-ENOMEM);
- hr_qp = &hr_sqp->hr_qp;
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
@@ -1066,7 +1066,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp->ibqp.qp_num, hr_qp);
if (ret) {
ibdev_err(ibdev, "Create GSI QP failed!\n");
- kfree(hr_sqp);
+ kfree(hr_qp);
return ERR_PTR(ret);
}
@@ -1289,7 +1289,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
u32 cur;
cur = hr_wq->head - hr_wq->tail;
- if (likely(cur + nreq < hr_wq->max_post))
+ if (likely(cur + nreq < hr_wq->wqe_cnt))
return false;
hr_cq = to_hr_cq(ib_cq);
@@ -1297,7 +1297,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
cur = hr_wq->head - hr_wq->tail;
spin_unlock(&hr_cq->lock);
- return cur + nreq >= hr_wq->max_post;
+ return cur + nreq >= hr_wq->wqe_cnt;
}
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 0a31d0a3d657..06871731ac43 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -98,11 +98,15 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
- if (!table_attr)
+ if (!table_attr) {
+ ret = -EMSGSIZE;
goto err;
+ }
- if (hns_roce_fill_cq(msg, context))
+ if (hns_roce_fill_cq(msg, context)) {
+ ret = -EMSGSIZE;
goto err_cancel_table;
+ }
nla_nest_end(msg, table_attr);
kfree(context);
@@ -113,7 +117,7 @@ err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
kfree(context);
- return -EMSGSIZE;
+ return ret;
}
int hns_roce_fill_res_entry(struct sk_buff *msg,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 43ea2c13b212..7113ebfdb4f0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -59,21 +59,21 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
}
}
-static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
+static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
- HNS_ROCE_CMD_SW2HW_SRQ,
+ HNS_ROCE_CMD_CREATE_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
+static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
+ mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
@@ -95,8 +95,7 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
srq->mtt.first_seg,
&dma_handle_wqe);
if (!mtts_wqe) {
- dev_err(hr_dev->dev,
- "SRQ alloc.Failed to find srq buf addr.\n");
+ dev_err(hr_dev->dev, "Failed to find mtt for srq buf.\n");
return -EINVAL;
}
@@ -106,13 +105,14 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
&dma_handle_idx);
if (!mtts_idx) {
dev_err(hr_dev->dev,
- "SRQ alloc.Failed to find idx que buf addr.\n");
+ "Failed to find mtt for srq idx queue buf.\n");
return -EINVAL;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
- if (ret == -1) {
- dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Failed to alloc a bit from srq bitmap.\n");
return -ENOMEM;
}
@@ -134,7 +134,7 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
mtts_wqe, mtts_idx, dma_handle_wqe,
dma_handle_idx);
- ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
+ ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
goto err_xa;
@@ -160,9 +160,9 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
- ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
+ ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
if (ret)
- dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
+ dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
ret, srq->srqn);
xa_erase(&srq_table->xa, srq->srqn);
@@ -180,22 +180,23 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct hns_roce_ib_create_srq ucmd;
- u32 page_shift;
- u32 npages;
+ struct hns_roce_buf *buf;
int ret;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
- npages = (ib_umem_page_count(srq->umem) +
- (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.srqwqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
+ buf = &srq->buf;
+ buf->npages = (ib_umem_page_count(srq->umem) +
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz);
+ buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
+ &srq->mtt);
if (ret)
goto err_user_buf;
@@ -205,16 +206,19 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
- srq->idx_que.buf_size, 0, 0);
+ srq->idx_que.buf_size, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
ret = PTR_ERR(srq->idx_que.umem);
goto err_user_srq_mtt;
}
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem),
- PAGE_SHIFT, &srq->idx_que.mtt);
-
+ buf = &srq->idx_que.idx_buf;
+ buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
+ 1 << hr_dev->caps.idx_buf_pg_sz);
+ buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
+ &srq->idx_que.mtt);
if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
goto err_user_idx_mtt;
@@ -251,7 +255,7 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
- idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
+ idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap)
return -ENOMEM;
@@ -277,7 +281,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
return -ENOMEM;
srq->head = 0;
- srq->tail = srq->max - 1;
+ srq->tail = srq->wqe_cnt - 1;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt);
@@ -308,7 +312,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
if (ret)
goto err_kernel_idx_buf;
- srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
+ srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_kernel_idx_buf;
@@ -354,7 +358,7 @@ static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
}
int hns_roce_create_srq(struct ib_srq *ib_srq,
- struct ib_srq_init_attr *srq_init_attr,
+ struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
@@ -366,24 +370,24 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
- if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
- srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
+ if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
+ init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
return -EINVAL;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
- srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
- srq->max_gs = srq_init_attr->attr.max_sge;
+ srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+ srq->max_gs = init_attr->attr.max_sge;
srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
srq->wqe_shift = ilog2(srq_desc_size);
- srq_buf_size = srq->max * srq_desc_size;
+ srq_buf_size = srq->wqe_cnt * srq_desc_size;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
- srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
+ srq->idx_que.buf_size = srq->wqe_cnt * srq->idx_que.entry_sz;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
@@ -401,8 +405,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
}
}
- cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
- to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
+ cqn = ib_srq_has_cq(init_attr->srq_type) ?
+ to_hr_cq(init_attr->ext.cq)->cqn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
@@ -449,7 +453,7 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
} else {
kvfree(srq->wrid);
- hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
+ hns_roce_buf_free(hr_dev, srq->wqe_cnt << srq->wqe_shift,
&srq->buf);
}
ib_umem_release(srq->idx_que.umem);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 2d6a378e8560..bb78d3280acc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2079,9 +2079,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
if (!dst || dst->error) {
if (dst) {
- dst_release(dst);
i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return rc;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index cd9ee1664a69..86375947bc67 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1763,7 +1763,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(udata, start, length, acc, 0);
+ region = ib_umem_get(udata, start, length, acc);
if (IS_ERR(region))
return (struct ib_mr *)region;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a7d238d312f0..306b21281fa2 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -145,7 +145,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
int n;
*umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 0f390351cef0..714f9df5bf39 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -64,7 +64,7 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 57079110af9b..abe68708d6d6 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -966,7 +966,6 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
mutex_unlock(&dev->counters_table[port_num - 1].mutex);
if (stats_avail) {
- memset(out_mad->data, 0, sizeof out_mad->data);
switch (counter_stats.counter_mode & 0xf) {
case 0:
edit_counter(&counter_stats,
@@ -984,38 +983,31 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
-
/* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
* queries, should be called only by VFs and for that specific purpose
*/
if (link == IB_LINK_LAYER_INFINIBAND) {
if (mlx4_is_slave(dev->dev) &&
- (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
- in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
- in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
+ in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
+ in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
+ return iboe_process_mad(ibdev, mad_flags, port_num,
+ in_wc, in_grh, in, out);
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ in, out);
}
if (link == IB_LINK_LAYER_ETHERNET)
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ in_grh, in, out);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d2f1e38b891..0b5dc1d5928f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -256,6 +256,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
int hw_update = 0;
int i;
struct gid_entry *gids = NULL;
+ u16 vlan_id = 0xffff;
+ u8 mac[ETH_ALEN];
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -266,12 +268,16 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
if (!context)
return -EINVAL;
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+ if (ret)
+ return ret;
port_gid_table = &iboe->gids[attr->port_num - 1];
spin_lock_bh(&iboe->lock);
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
if (!memcmp(&port_gid_table->gids[i].gid,
&attr->gid, sizeof(attr->gid)) &&
- port_gid_table->gids[i].gid_type == attr->gid_type) {
+ port_gid_table->gids[i].gid_type == attr->gid_type &&
+ port_gid_table->gids[i].vlan_id == vlan_id) {
found = i;
break;
}
@@ -291,6 +297,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
memcpy(&port_gid_table->gids[free].gid,
&attr->gid, sizeof(attr->gid));
port_gid_table->gids[free].gid_type = attr->gid_type;
+ port_gid_table->gids[free].vlan_id = vlan_id;
port_gid_table->gids[free].ctx->real_index = free;
port_gid_table->gids[free].ctx->refcount = 1;
hw_update = 1;
@@ -1146,7 +1153,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return rdma_user_mmap_io(context, vma,
to_mucontext(context)->uar.pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
case 1:
if (dev->dev->caps.bf_reg_size == 0)
@@ -1155,7 +1163,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
context, vma,
to_mucontext(context)->uar.pfn +
dev->dev->caps.num_uars,
- PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
+ NULL);
case 3: {
struct mlx4_clock_params params;
@@ -1171,7 +1180,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
params.bar) +
params.offset) >>
PAGE_SHIFT,
- PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
+ NULL);
}
default:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index eb53bb4c0c91..d188573187fa 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -508,6 +508,7 @@ struct gid_entry {
union ib_gid gid;
enum ib_gid_type gid_type;
struct gid_cache_context *ctx;
+ u16 vlan_id;
};
struct mlx4_port_gid_table {
@@ -786,11 +787,10 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 6ae503cfc526..dfa17bcdcdbc 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
up_read(&current->mm->mmap_sem);
}
- return ib_umem_get(udata, start, length, access_flags, 0);
+ return ib_umem_get(udata, start, length, access_flags);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bd4aa04416c6..85f57b76e446 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
- qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0);
+ qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -1110,8 +1110,7 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (err)
goto err;
- qp->umem =
- ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0);
+ qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 848db7264cc9..8dcf6e3d9ae2 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -110,7 +110,7 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 9924be8384d8..d0a043ccbe58 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
- cong.o
+ cong.o restrack.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 45f48cde6b9d..dd8d24ee8e1d 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -423,9 +423,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_cqe64 *cqe64;
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
- struct mlx5_sig_err_cqe *sig_err_cqe;
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
uint8_t opcode;
uint32_t qpn;
u16 wqe_ctr;
@@ -519,27 +516,29 @@ repoll:
}
}
break;
- case MLX5_CQE_SIG_ERR:
- sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
+ case MLX5_CQE_SIG_ERR: {
+ struct mlx5_sig_err_cqe *sig_err_cqe =
+ (struct mlx5_sig_err_cqe *)cqe64;
+ struct mlx5_core_sig_ctx *sig;
- xa_lock(&dev->mdev->priv.mkey_table);
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ xa_lock(&dev->sig_mrs);
+ sig = xa_load(&dev->sig_mrs,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
- mr = to_mibmr(mmkey);
- get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
- mr->sig->sig_err_exists = true;
- mr->sig->sigerr_count++;
+ get_sig_err_item(sig_err_cqe, &sig->err_item);
+ sig->sig_err_exists = true;
+ sig->sigerr_count++;
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
- cq->mcq.cqn, mr->sig->err_item.key,
- mr->sig->err_item.err_type,
- mr->sig->err_item.sig_err_offset,
- mr->sig->err_item.expected,
- mr->sig->err_item.actual);
+ cq->mcq.cqn, sig->err_item.key,
+ sig->err_item.err_type,
+ sig->err_item.sig_err_offset,
+ sig->err_item.expected,
+ sig->err_item.actual);
- xa_unlock(&dev->mdev->priv.mkey_table);
+ xa_unlock(&dev->sig_mrs);
goto repoll;
}
+ }
return 0;
}
@@ -710,7 +709,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->buf.umem =
ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
@@ -1111,7 +1110,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
umem = ib_umem_get(udata, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
return err;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index d609f4659afb..9d0a18cf9e5e 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -100,6 +100,7 @@ struct devx_obj {
struct mlx5_ib_devx_mr devx_mr;
struct mlx5_core_dct core_dct;
struct mlx5_core_cq core_cq;
+ u32 flow_counter_bulk_size;
};
struct list_head event_sub; /* holds devx_event_subscription entries */
};
@@ -192,15 +193,20 @@ bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
}
}
-bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
+bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id)
{
struct devx_obj *devx_obj = obj;
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
+
+ if (offset && offset >= devx_obj->flow_counter_bulk_size)
+ return false;
+
*counter_id = MLX5_GET(dealloc_flow_counter_in,
devx_obj->dinbox,
flow_counter_id);
+ *counter_id += offset;
return true;
}
@@ -1265,8 +1271,8 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
mkey->pd = MLX5_GET(mkc, mkc, pd);
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
- return xa_err(xa_store(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
+ return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
+ GFP_KERNEL));
}
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
@@ -1345,9 +1351,9 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
- xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
+ xa_erase(&obj->ib_dev->odp_mkeys,
mlx5_base_mkey(obj->devx_mr.mmkey.key));
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->odp_srcu);
}
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
@@ -1463,6 +1469,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (err)
goto obj_free;
+ if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
+ u8 bulk = MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk);
+ obj->flow_counter_bulk_size = 128UL * bulk;
+ }
+
uobj->object = obj;
INIT_LIST_HEAD(&obj->event_sub);
obj->ib_dev = dev;
@@ -2121,7 +2134,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
+ obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 8f4e5f22b84c..12737c509aa2 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -64,7 +64,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index b198ff10cde9..dbee17d22d50 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -85,6 +85,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
int len, ret, i;
u32 counter_id = 0;
+ u32 *offset_attr;
+ u32 offset = 0;
if (!capable(CAP_NET_RAW))
return -EPERM;
@@ -151,8 +153,27 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
if (len) {
devx_obj = arr_flow_actions[0]->object;
- if (!mlx5_ib_devx_is_flow_counter(devx_obj, &counter_id))
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) {
+
+ int num_offsets = uverbs_attr_ptr_get_array_size(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ sizeof(u32));
+
+ if (num_offsets != 1)
+ return -EINVAL;
+
+ offset_attr = uverbs_attr_get_alloced_ptr(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET);
+ offset = *offset_attr;
+ }
+
+ if (!mlx5_ib_devx_is_flow_counter(devx_obj, offset,
+ &counter_id))
return -EINVAL;
+
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
@@ -598,7 +619,11 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
MLX5_IB_OBJECT_DEVX_OBJ,
UVERBS_ACCESS_READ, 1, 1,
- UA_OPTIONAL));
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
+ UA_OPTIONAL,
+ UA_ALLOC_AND_COPY));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 4950df3f71b6..ac4d8d1b9a07 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -263,7 +263,7 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
},
.sq_sig_type = gsi->sq_sig_type,
.qp_type = IB_QPT_UD,
- .create_flags = mlx5_ib_create_qp_sqpn_qp1(),
+ .create_flags = MLX5_IB_QP_CREATE_SQPN_QP1,
};
return ib_create_qp(pd, &init_attr);
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index 649a3364f838..4f0edd4832bd 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -201,3 +201,27 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
return -EINVAL;
}
+
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 1, vf+1, rep);
+ if (err)
+ goto ex;
+
+ port_guid->guid = rep->port_guid;
+ node_guid->guid = rep->node_guid;
+ex:
+ kfree(rep);
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 348c1df69cdc..14e0c17de6a9 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -74,58 +74,6 @@ static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
port);
}
-static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in_mad, struct ib_mad *out_mad)
-{
- u16 slid;
- int err;
-
- slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
-
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
- return IB_MAD_RESULT_SUCCESS;
-
- /* Don't process SMInfo queries -- the SMA can't handle them.
- */
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
- return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
- return IB_MAD_RESULT_SUCCESS;
- } else {
- return IB_MAD_RESULT_SUCCESS;
- }
-
- err = mlx5_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
- if (err)
- return IB_MAD_RESULT_FAILURE;
-
- /* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
-
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
- /* no response for trap repress */
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
void *out)
{
@@ -271,30 +219,66 @@ done:
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
- int ret;
+ u8 mgmt_class = in->mad_hdr.mgmt_class;
+ u8 method = in->mad_hdr.method;
+ u16 slid;
+ int err;
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) :
+ be16_to_cpu(IB_LID_PERMISSIVE);
- memset(out_mad->data, 0, sizeof(out_mad->data));
+ if (method == IB_MGMT_METHOD_TRAP && !slid)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
- ret = process_pma_cmd(dev, port_num, in_mad, out_mad);
- } else {
- ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
- in_mad, out_mad);
+ switch (mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET &&
+ method != IB_MGMT_METHOD_TRAP_REPRESS)
+ return IB_MAD_RESULT_SUCCESS;
+
+ /* Don't process SMInfo queries -- the SMA can't handle them.
+ */
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
+ method == IB_MGMT_METHOD_GET)
+ return process_pma_cmd(dev, port_num, in, out);
+ /* fallthrough */
+ case MLX5_IB_VENDOR_CLASS1:
+ /* fallthrough */
+ case MLX5_IB_VENDOR_CLASS2:
+ case IB_MGMT_CLASS_CONG_MGMT: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ default:
+ return IB_MAD_RESULT_SUCCESS;
}
- return ret;
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
+ if (err)
+ return IB_MAD_RESULT_FAILURE;
+
+ /* set return bit in status of directed route responses */
+ if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
+
+ if (method == IB_MGMT_METHOD_TRAP_REPRESS)
+ /* no response for trap repress */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 46ea4f0b9b51..51100350b688 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -67,6 +67,7 @@
#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem_odp.h>
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -693,21 +694,6 @@ static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
get_atomic_caps(dev, atomic_size_qp, props);
}
-static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
- struct ib_device_attr *props)
-{
- u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
-
- get_atomic_caps(dev, atomic_size_qp, props);
-}
-
-bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
-{
- struct ib_device_attr props = {};
-
- get_atomic_caps_dc(dev, &props);
- return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
-}
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
@@ -844,8 +830,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
if (uhw->outlen && uhw->outlen < resp_len)
return -EINVAL;
- else
- resp.response_length = resp_len;
+
+ resp.response_length = resp_len;
if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
@@ -1011,6 +997,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
props->max_pi_fast_reg_page_list_len =
props->max_fast_reg_page_list_len / 2;
+ props->max_sgl_rd =
+ MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
@@ -1161,8 +1149,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
- resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ else
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
resp.striding_rq_caps.supported_qpts =
@@ -1808,7 +1802,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
return -EINVAL;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+ if (dev->wc_support)
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
@@ -2168,7 +2162,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
- prot);
+ prot, NULL);
if (err) {
mlx5_ib_err(dev,
"rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
@@ -2210,7 +2204,8 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
PAGE_SHIFT) +
page_idx;
return rdma_user_mmap_io(context, vma, pfn, map_size,
- pgprot_writecombine(vma->vm_page_prot));
+ pgprot_writecombine(vma->vm_page_prot),
+ NULL);
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2248,7 +2243,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
PAGE_SHIFT;
return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
@@ -5705,11 +5701,10 @@ static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
- if (!dev->delay_drop.dbg)
+ if (!dev->delay_drop.dir_debugfs)
return;
- debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
- kfree(dev->delay_drop.dbg);
- dev->delay_drop.dbg = NULL;
+ debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
+ dev->delay_drop.dir_debugfs = NULL;
}
static void cancel_delay_drop(struct mlx5_ib_dev *dev)
@@ -5760,52 +5755,22 @@ static const struct file_operations fops_delay_drop_timeout = {
.read = delay_drop_timeout_read,
};
-static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
+static void delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
{
- struct mlx5_ib_dbg_delay_drop *dbg;
+ struct dentry *root;
if (!mlx5_debugfs_root)
- return 0;
-
- dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
- if (!dbg)
- return -ENOMEM;
-
- dev->delay_drop.dbg = dbg;
-
- dbg->dir_debugfs =
- debugfs_create_dir("delay_drop",
- dev->mdev->priv.dbg_root);
- if (!dbg->dir_debugfs)
- goto out_debugfs;
-
- dbg->events_cnt_debugfs =
- debugfs_create_atomic_t("num_timeout_events", 0400,
- dbg->dir_debugfs,
- &dev->delay_drop.events_cnt);
- if (!dbg->events_cnt_debugfs)
- goto out_debugfs;
-
- dbg->rqs_cnt_debugfs =
- debugfs_create_atomic_t("num_rqs", 0400,
- dbg->dir_debugfs,
- &dev->delay_drop.rqs_cnt);
- if (!dbg->rqs_cnt_debugfs)
- goto out_debugfs;
-
- dbg->timeout_debugfs =
- debugfs_create_file("timeout", 0600,
- dbg->dir_debugfs,
- &dev->delay_drop,
- &fops_delay_drop_timeout);
- if (!dbg->timeout_debugfs)
- goto out_debugfs;
+ return;
- return 0;
+ root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root);
+ dev->delay_drop.dir_debugfs = root;
-out_debugfs:
- delay_drop_debugfs_cleanup(dev);
- return -ENOMEM;
+ debugfs_create_atomic_t("num_timeout_events", 0400, root,
+ &dev->delay_drop.events_cnt);
+ debugfs_create_atomic_t("num_rqs", 0400, root,
+ &dev->delay_drop.rqs_cnt);
+ debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
+ &fops_delay_drop_timeout);
}
static void init_delay_drop(struct mlx5_ib_dev *dev)
@@ -5821,8 +5786,7 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
atomic_set(&dev->delay_drop.rqs_cnt, 0);
atomic_set(&dev->delay_drop.events_cnt, 0);
- if (delay_drop_debugfs_init(dev))
- mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
+ delay_drop_debugfs_init(dev);
}
static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
@@ -6140,11 +6104,10 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- srcu_barrier(&dev->mr_srcu);
- cleanup_srcu_struct(&dev->mr_srcu);
- }
+ WARN_ON(!xa_empty(&dev->odp_mkeys));
+ cleanup_srcu_struct(&dev->odp_srcu);
+ WARN_ON(!xa_empty(&dev->sig_mrs));
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
}
@@ -6196,15 +6159,15 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
+ xa_init(&dev->odp_mkeys);
+ xa_init(&dev->sig_mrs);
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- err = init_srcu_struct(&dev->mr_srcu);
- if (err)
- goto err_mp;
- }
+ err = init_srcu_struct(&dev->odp_srcu);
+ if (err)
+ goto err_mp;
return 0;
@@ -6264,6 +6227,9 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
.drain_rq = mlx5_ib_drain_rq,
.drain_sq = mlx5_ib_drain_sq,
+ .enable_driver = mlx5_ib_enable_driver,
+ .fill_res_entry = mlx5_ib_fill_res_entry,
+ .fill_stat_entry = mlx5_ib_fill_stat_entry,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mlx5_ib_get_dma_mr,
.get_link_layer = mlx5_ib_port_link_layer,
@@ -6310,6 +6276,7 @@ static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
.get_vf_config = mlx5_ib_get_vf_config,
+ .get_vf_guid = mlx5_ib_get_vf_guid,
.get_vf_stats = mlx5_ib_get_vf_stats,
.set_vf_guid = mlx5_ib_set_vf_guid,
.set_vf_link_state = mlx5_ib_set_vf_link_state,
@@ -6705,6 +6672,18 @@ static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
}
}
+int mlx5_ib_enable_driver(struct ib_device *dev)
+{
+ struct mlx5_ib_dev *mdev = to_mdev(dev);
+ int ret;
+
+ ret = mlx5_ib_test_wc(mdev);
+ mlx5_ib_dbg(mdev, "Write-Combining %s",
+ mdev->wc_support ? "supported" : "not supported");
+
+ return ret;
+}
+
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage)
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b5aece786b36..048f4e974a61 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -34,6 +34,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
+#include <linux/jiffies.h>
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
@@ -216,3 +217,201 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
*offset = buf_off >> ilog2(off_size);
return 0;
}
+
+#define WR_ID_BF 0xBF
+#define WR_ID_END 0xBAD
+#define TEST_WC_NUM_WQES 255
+#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ bool signaled)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ struct mlx5_bf *bf = &qp->bf;
+ __be32 mmio_wqe[16] = {};
+ unsigned long flags;
+ unsigned int idx;
+ int i;
+
+ if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ return -EIO;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+
+ memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg));
+ ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ ctrl->opmod_idx_opcode =
+ cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
+ ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) |
+ (qp->trans_qp.base.mqp.qpn << 8));
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
+ qp->sq.wqe_head[idx] = qp->sq.head + 1;
+ qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+ qp->sq.head++;
+
+ memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
+ ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
+ MLX5_WQE_CTRL_CQ_UPDATE;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell
+ */
+ wmb();
+ for (i = 0; i < 8; i++)
+ mlx5_write64(&mmio_wqe[i * 2],
+ bf->bfreg->map + bf->offset + i * 8);
+
+ bf->offset ^= bf->buf_size;
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return 0;
+}
+
+static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq)
+{
+ int ret;
+ struct ib_wc wc = {};
+ unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
+
+ do {
+ ret = ib_poll_cq(cq, 1, &wc);
+ if (ret < 0 || wc.status)
+ return ret < 0 ? ret : -EINVAL;
+ if (ret)
+ break;
+ } while (!time_after(jiffies, end));
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (wc.wr_id != WR_ID_BF)
+ ret = 0;
+
+ return ret;
+}
+
+static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp)
+{
+ int err, i;
+
+ for (i = 0; i < TEST_WC_NUM_WQES; i++) {
+ err = post_send_nop(dev, qp, WR_ID_BF, false);
+ if (err)
+ return err;
+ }
+
+ return post_send_nop(dev, qp, WR_ID_END, true);
+}
+
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
+{
+ struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
+ int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
+ struct ib_qp_init_attr qp_init_attr = {
+ .cap = { .max_send_wr = TEST_WC_NUM_WQES },
+ .qp_type = IB_QPT_UD,
+ .sq_sig_type = IB_SIGNAL_REQ_WR,
+ .create_flags = MLX5_IB_QP_CREATE_WC_TEST,
+ };
+ struct ib_qp_attr qp_attr = { .port_num = 1 };
+ struct ib_device *ibdev = &dev->ib_dev;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev->mdev, bf))
+ return 0;
+
+ if (!dev->mdev->roce.roce_en &&
+ port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
+ if (mlx5_core_is_pf(dev->mdev))
+ dev->wc_support = true;
+ return 0;
+ }
+
+ ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false);
+ if (ret)
+ goto print_err;
+
+ if (!dev->wc_bfreg.wc)
+ goto out1;
+
+ pd = ib_alloc_pd(ibdev, 0);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto out1;
+ }
+
+ cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto out2;
+ }
+
+ qp_init_attr.recv_cq = cq;
+ qp_init_attr.send_cq = cq;
+ qp = ib_create_qp(pd, &qp_init_attr);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto out3;
+ }
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ ret = ib_modify_qp(qp, &qp_attr,
+ IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX |
+ IB_QP_QKEY);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
+ if (ret)
+ goto out4;
+
+ ret = test_wc_do_send(dev, qp);
+ if (ret < 0)
+ goto out4;
+
+ ret = test_wc_poll_cq_result(dev, cq);
+ if (ret > 0) {
+ dev->wc_support = true;
+ ret = 0;
+ }
+
+out4:
+ ib_destroy_qp(qp);
+out3:
+ ib_destroy_cq(cq);
+out2:
+ ib_dealloc_pd(pd);
+out1:
+ mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg);
+print_err:
+ if (ret)
+ mlx5_ib_err(
+ dev,
+ "Error %d while trying to test write-combining support\n",
+ ret);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 1a98ee2e01c4..5986953ec2fa 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -247,12 +247,8 @@ struct mlx5_ib_flow_db {
* These flags are intended for internal use by the mlx5_ib driver, and they
* rely on the range reserved for that use in the ib_qp_create_flags enum.
*/
-
-/* Create a UD QP whose source QP number is 1 */
-static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
-{
- return IB_QP_CREATE_RESERVED_START;
-}
+#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
+#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
struct wr_list {
u16 opcode;
@@ -295,6 +291,7 @@ enum mlx5_ib_wq_flags {
#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
+#define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
struct mlx5_ib_rwq {
struct ib_wq ibwq;
@@ -585,6 +582,9 @@ struct mlx5_ib_dm {
IB_ACCESS_REMOTE_READ |\
IB_ZERO_BASED)
+#define mlx5_update_odp_stats(mr, counter_name, value) \
+ atomic64_add(value, &((mr)->odp_stats.counter_name))
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
void *descs;
@@ -606,7 +606,6 @@ struct mlx5_ib_mr {
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
- unsigned int live;
void *descs_alloc;
int access_flags; /* Needed for rereg MR */
@@ -618,10 +617,18 @@ struct mlx5_ib_mr {
u64 data_iova;
u64 pi_iova;
- atomic_t num_leaf_free;
- wait_queue_head_t q_leaf_free;
+ /* For ODP and implicit */
+ atomic_t num_deferred_work;
+ struct xarray implicit_children;
+ union {
+ struct rcu_head rcu;
+ struct list_head elm;
+ struct work_struct work;
+ } odp_destroy;
+ struct ib_odp_counters odp_stats;
+ bool is_odp_implicit;
+
struct mlx5_async_work cb_work;
- atomic_t num_pending_prefetch;
};
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
@@ -792,13 +799,6 @@ enum {
MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
};
-struct mlx5_ib_dbg_delay_drop {
- struct dentry *dir_debugfs;
- struct dentry *rqs_cnt_debugfs;
- struct dentry *events_cnt_debugfs;
- struct dentry *timeout_debugfs;
-};
-
struct mlx5_ib_delay_drop {
struct mlx5_ib_dev *dev;
struct work_struct delay_drop_work;
@@ -808,7 +808,7 @@ struct mlx5_ib_delay_drop {
bool activate;
atomic_t events_cnt;
atomic_t rqs_cnt;
- struct mlx5_ib_dbg_delay_drop *dbg;
+ struct dentry *dir_debugfs;
};
enum mlx5_ib_stages {
@@ -957,7 +957,11 @@ struct mlx5_ib_dev {
/* serialize update of capability mask
*/
struct mutex cap_mask_mutex;
- bool ib_active;
+ u8 ib_active:1;
+ u8 fill_delay:1;
+ u8 is_rep:1;
+ u8 lag_active:1;
+ u8 wc_support:1;
struct umr_common umrc;
/* sync used page count stats
*/
@@ -966,7 +970,6 @@ struct mlx5_ib_dev {
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex;
- int fill_delay;
struct ib_odp_caps odp_caps;
u64 odp_max_size;
struct mlx5_ib_pf_eq odp_pf_eq;
@@ -975,7 +978,9 @@ struct mlx5_ib_dev {
* Sleepable RCU that prevents destruction of MRs while they are still
* being used by a page fault handler.
*/
- struct srcu_struct mr_srcu;
+ struct srcu_struct odp_srcu;
+ struct xarray odp_mkeys;
+
u32 null_mkey;
struct mlx5_ib_flow_db *flow_db;
/* protect resources needed as part of reset flow */
@@ -984,11 +989,10 @@ struct mlx5_ib_dev {
/* Array with num_ports elements */
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg wc_bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile;
- bool is_rep;
- int lag_active;
struct mlx5_ib_lb_state lb;
u8 umr_fence;
@@ -999,6 +1003,8 @@ struct mlx5_ib_dev {
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
struct mlx5_devx_event_table devx_event_table;
+
+ struct xarray sig_mrs;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1162,6 +1168,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
+void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
@@ -1179,9 +1186,8 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
unsigned int *meta_sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
@@ -1223,6 +1229,8 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
+
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
@@ -1235,7 +1243,6 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
-bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
@@ -1251,8 +1258,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
- unsigned long end);
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
size_t nentries, struct mlx5_ib_mr *mr, int flags);
@@ -1282,11 +1287,10 @@ mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
{
return -EOPNOTSUPP;
}
-static inline void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp,
- unsigned long start,
- unsigned long end){};
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
+
/* Needed for rep profile */
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
@@ -1300,6 +1304,9 @@ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
u8 port, int state);
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
@@ -1334,6 +1341,10 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
u8 *native_port_num);
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
+int mlx5_ib_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
+int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
@@ -1349,7 +1360,7 @@ struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_flow_act *flow_act, u32 counter_id,
void *cmd_in, int inlen, int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
-bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id);
+bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id);
int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else
@@ -1491,4 +1502,7 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
return true;
}
+
+int mlx5_ib_enable_driver(struct ib_device *dev);
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 7019c12005f4..ea8bfc3e2d8d 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -50,7 +50,6 @@ enum {
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
-static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
@@ -59,13 +58,9 @@ static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- /* Wait until all page fault handlers using the mr complete. */
- synchronize_srcu(&dev->mr_srcu);
-
- return err;
+ return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
static int order2idx(struct mlx5_ib_dev *dev, int order)
@@ -94,8 +89,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
- struct xarray *mkeys = &dev->mdev->priv.mkey_table;
- int err;
spin_lock_irqsave(&ent->lock, flags);
ent->pending--;
@@ -122,13 +115,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
ent->size++;
spin_unlock_irqrestore(&ent->lock, flags);
- xa_lock_irqsave(mkeys, flags);
- err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_ATOMIC));
- xa_unlock_irqrestore(mkeys, flags);
- if (err)
- pr_err("Error inserting to mkey tree. 0x%x\n", -err);
-
if (!completion_done(&ent->compl))
complete(&ent->compl);
}
@@ -218,9 +204,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- synchronize_srcu(&dev->mr_srcu);
-
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
@@ -428,7 +411,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
ent = &cache->ent[entry];
@@ -511,7 +494,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
c = order2idx(dev, mr->order);
WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
- if (unreg_umr(dev, mr)) {
+ if (mlx5_mr_cache_invalidate(mr)) {
mr->allocated_from_cache = false;
destroy_mkey(dev, mr);
ent = &cache->ent[c];
@@ -555,10 +538,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- synchronize_srcu(&dev->mr_srcu);
-#endif
-
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
@@ -679,6 +658,20 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
return 0;
}
+static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
+ struct ib_pd *pd)
+{
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
+}
+
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -702,16 +695,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
-
MLX5_SET(mkc, mkc, length64, 1);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr, 0);
+ set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
@@ -764,7 +749,8 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (access_flags & IB_ACCESS_ON_DEMAND) {
struct ib_umem_odp *odp;
- odp = ib_umem_odp_get(udata, start, length, access_flags);
+ odp = ib_umem_odp_get(udata, start, length, access_flags,
+ &mlx5_mn_ops);
if (IS_ERR(odp)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
PTR_ERR(odp));
@@ -779,7 +765,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order)
*order = ilog2(roundup_pow_of_two(*ncont));
} else {
- u = ib_umem_get(udata, start, length, access_flags, 0);
+ u = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u);
@@ -1169,16 +1155,8 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
-
MLX5_SET64(mkc, mkc, len, length);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr, start_addr);
+ set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
@@ -1337,10 +1315,15 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (is_odp_mr(mr)) {
to_ib_umem_odp(mr->umem)->private = mr;
- atomic_set(&mr->num_pending_prefetch, 0);
+ atomic_set(&mr->num_deferred_work, 0);
+ err = xa_err(xa_store(&dev->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
+ GFP_KERNEL));
+ if (err) {
+ dereg_mr(dev, mr);
+ return ERR_PTR(err);
+ }
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- smp_store_release(&mr->live, 1);
return &mr->ibmr;
error:
@@ -1348,22 +1331,29 @@ error:
return ERR_PTR(err);
}
-static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+/**
+ * mlx5_mr_cache_invalidate - Fence all DMA on the MR
+ * @mr: The MR to fence
+ *
+ * Upon return the NIC will not be doing any DMA to the pages under the MR,
+ * and any DMA inprogress will be completed. Failure of this function
+ * indicates the HW has failed catastrophically.
+ */
+int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
{
- struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_umr_wr umrwr = {};
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
- umrwr.pd = dev->umrc.pd;
+ umrwr.pd = mr->dev->umrc.pd;
umrwr.mkey = mr->mmkey.key;
umrwr.ignore_free_state = 1;
- return mlx5_ib_post_send_wait(dev, &umrwr);
+ return mlx5_ib_post_send_wait(mr->dev, &umrwr);
}
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
@@ -1447,7 +1437,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* UMR can't be used - MKey needs to be replaced.
*/
if (mr->allocated_from_cache)
- err = unreg_umr(dev, mr);
+ err = mlx5_mr_cache_invalidate(mr);
else
err = destroy_mkey(dev, mr);
if (err)
@@ -1560,6 +1550,7 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
+ xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
kfree(mr->sig);
mr->sig = NULL;
}
@@ -1575,54 +1566,20 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
int npages = mr->npages;
struct ib_umem *umem = mr->umem;
- if (is_odp_mr(mr)) {
- struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
-
- /* Prevent new page faults and
- * prefetch requests from succeeding
- */
- WRITE_ONCE(mr->live, 0);
-
- /* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&dev->mr_srcu);
-
- /* dequeue pending prefetch requests for the mr */
- if (atomic_read(&mr->num_pending_prefetch))
- flush_workqueue(system_unbound_wq);
- WARN_ON(atomic_read(&mr->num_pending_prefetch));
-
- /* Destroy all page mappings */
- if (!umem_odp->is_implicit_odp)
- mlx5_ib_invalidate_range(umem_odp,
- ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- else
- mlx5_ib_free_implicit_mr(mr);
- /*
- * We kill the umem before the MR for ODP,
- * so that there will not be any invalidations in
- * flight, looking at the *mr struct.
- */
- ib_umem_odp_release(umem_odp);
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
-
- /* Avoid double-freeing the umem. */
- umem = NULL;
- }
+ /* Stop all DMA */
+ if (is_odp_mr(mr))
+ mlx5_ib_fence_odp_mr(mr);
+ else
+ clean_mr(dev, mr);
- clean_mr(dev, mr);
+ if (mr->allocated_from_cache)
+ mlx5_mr_cache_free(dev, mr);
+ else
+ kfree(mr);
- /*
- * We should unregister the DMA address from the HCA before
- * remove the DMA mapping.
- */
- mlx5_mr_cache_free(dev, mr);
ib_umem_release(umem);
- if (umem)
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
- if (!mr->allocated_from_cache)
- kfree(mr);
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1634,6 +1591,11 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
}
+ if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
+ mlx5_ib_free_implicit_mr(mmr);
+ return 0;
+ }
+
dereg_mr(to_mdev(ibmr->device), mmr);
return 0;
@@ -1797,8 +1759,15 @@ static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
if (err)
goto err_free_mtt_mr;
+ err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, GFP_KERNEL));
+ if (err)
+ goto err_free_descs;
return 0;
+err_free_descs:
+ destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
err_free_mtt_mr:
dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
mr->mtt_mr = NULL;
@@ -1951,9 +1920,19 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
}
}
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ err = xa_err(xa_store(&dev->odp_mkeys,
+ mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
+ GFP_KERNEL));
+ if (err)
+ goto free_mkey;
+ }
+
kfree(in);
return &mw->ibmw;
+free_mkey:
+ mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
free:
kfree(mw);
kfree(in);
@@ -1967,13 +1946,12 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
int err;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- xa_erase_irq(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(mmw->mmkey.key));
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
/*
* pagefault_single_data_segment() may be accessing mmw under
* SRCU if the user bound an ODP MR to this MW.
*/
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->odp_srcu);
}
err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3f9478d19376..f924250f80c2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -93,182 +93,185 @@ struct mlx5_pagefault {
static u64 mlx5_imr_ksm_entries;
-static int check_parent(struct ib_umem_odp *odp,
- struct mlx5_ib_mr *parent)
+void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *imr, int flags)
{
- struct mlx5_ib_mr *mr = odp->private;
-
- return mr && mr->parent == parent && !odp->dying;
-}
-
-static struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
-{
- if (WARN_ON(!mr || !is_odp_mr(mr)))
- return NULL;
-
- return to_ib_umem_odp(mr->umem)->per_mm;
-}
-
-static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
-{
- struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
- struct ib_ucontext_per_mm *per_mm = odp->per_mm;
- struct rb_node *rb;
-
- down_read(&per_mm->umem_rwsem);
- while (1) {
- rb = rb_next(&odp->interval_tree.rb);
- if (!rb)
- goto not_found;
- odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (check_parent(odp, parent))
- goto end;
- }
-not_found:
- odp = NULL;
-end:
- up_read(&per_mm->umem_rwsem);
- return odp;
-}
-
-static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
- struct mlx5_ib_mr *parent)
-{
- struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
- struct ib_umem_odp *odp;
- struct rb_node *rb;
-
- down_read(&per_mm->umem_rwsem);
- odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
- if (!odp)
- goto end;
-
- while (1) {
- if (check_parent(odp, parent))
- goto end;
- rb = rb_next(&odp->interval_tree.rb);
- if (!rb)
- goto not_found;
- odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (ib_umem_start(odp) > start + length)
- goto not_found;
- }
-not_found:
- odp = NULL;
-end:
- up_read(&per_mm->umem_rwsem);
- return odp;
-}
-
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
- size_t nentries, struct mlx5_ib_mr *mr, int flags)
-{
- struct ib_pd *pd = mr->ibmr.pd;
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct ib_umem_odp *odp;
- unsigned long va;
- int i;
+ struct mlx5_klm *end = pklm + nentries;
if (flags & MLX5_IB_UPD_XLT_ZAP) {
- for (i = 0; i < nentries; i++, pklm++) {
+ for (; pklm != end; pklm++, idx++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- pklm->key = cpu_to_be32(dev->null_mkey);
+ pklm->key = cpu_to_be32(imr->dev->null_mkey);
pklm->va = 0;
}
return;
}
/*
- * The locking here is pretty subtle. Ideally the implicit children
- * list would be protected by the umem_mutex, however that is not
+ * The locking here is pretty subtle. Ideally the implicit_children
+ * xarray would be protected by the umem_mutex, however that is not
* possible. Instead this uses a weaker update-then-lock pattern:
*
* srcu_read_lock()
- * <change children list>
+ * xa_store()
* mutex_lock(umem_mutex)
* mlx5_ib_update_xlt()
* mutex_unlock(umem_mutex)
* destroy lkey
*
- * ie any change the children list must be followed by the locked
- * update_xlt before destroying.
+ * ie any change the xarray must be followed by the locked update_xlt
+ * before destroying.
*
* The umem_mutex provides the acquire/release semantic needed to make
- * the children list visible to a racing thread. While SRCU is not
+ * the xa_store() visible to a racing thread. While SRCU is not
* technically required, using it gives consistent use of the SRCU
- * locking around the children list.
+ * locking around the xarray.
*/
- lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex);
- lockdep_assert_held(&mr->dev->mr_srcu);
+ lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
+ lockdep_assert_held(&imr->dev->odp_srcu);
- odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
- nentries * MLX5_IMR_MTT_SIZE, mr);
+ for (; pklm != end; pklm++, idx++) {
+ struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
- for (i = 0; i < nentries; i++, pklm++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- va = (offset + i) * MLX5_IMR_MTT_SIZE;
- if (odp && ib_umem_start(odp) == va) {
- struct mlx5_ib_mr *mtt = odp->private;
-
+ if (mtt) {
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
- odp = odp_next(odp);
+ pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
} else {
- pklm->key = cpu_to_be32(dev->null_mkey);
+ pklm->key = cpu_to_be32(imr->dev->null_mkey);
+ pklm->va = 0;
}
- mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
- i, va, be32_to_cpu(pklm->key));
}
}
-static void mr_leaf_free_action(struct work_struct *work)
+static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ /* Ensure mlx5_ib_invalidate_range() will not touch the MR any more */
+ mutex_lock(&odp->umem_mutex);
+ if (odp->npages) {
+ mlx5_mr_cache_invalidate(mr);
+ ib_umem_odp_unmap_dma_pages(odp, ib_umem_start(odp),
+ ib_umem_end(odp));
+ WARN_ON(odp->npages);
+ }
+ odp->private = NULL;
+ mutex_unlock(&odp->umem_mutex);
+
+ if (!mr->allocated_from_cache) {
+ mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey);
+ WARN_ON(mr->descs);
+ }
+}
+
+/*
+ * This must be called after the mr has been removed from implicit_children
+ * and the SRCU synchronized. NOTE: The MR does not necessarily have to be
+ * empty here, parallel page faults could have raced with the free process and
+ * added pages to it.
+ */
+static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
{
- struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
- int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
- struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
+ struct mlx5_ib_mr *imr = mr->parent;
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
int srcu_key;
- mr->parent = NULL;
- synchronize_srcu(&mr->dev->mr_srcu);
+ /* implicit_child_mr's are not allowed to have deferred work */
+ WARN_ON(atomic_read(&mr->num_deferred_work));
- if (smp_load_acquire(&imr->live)) {
- srcu_key = srcu_read_lock(&mr->dev->mr_srcu);
+ if (need_imr_xlt) {
+ srcu_key = srcu_read_lock(&mr->dev->odp_srcu);
mutex_lock(&odp_imr->umem_mutex);
- mlx5_ib_update_xlt(imr, idx, 1, 0,
+ mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&odp_imr->umem_mutex);
- srcu_read_unlock(&mr->dev->mr_srcu, srcu_key);
+ srcu_read_unlock(&mr->dev->odp_srcu, srcu_key);
}
- ib_umem_odp_release(odp);
+
+ dma_fence_odp_mr(mr);
+
+ mr->parent = NULL;
mlx5_mr_cache_free(mr->dev, mr);
+ ib_umem_odp_release(odp);
+ atomic_dec(&imr->num_deferred_work);
+}
+
+static void free_implicit_child_mr_work(struct work_struct *work)
+{
+ struct mlx5_ib_mr *mr =
+ container_of(work, struct mlx5_ib_mr, odp_destroy.work);
- if (atomic_dec_and_test(&imr->num_leaf_free))
- wake_up(&imr->q_leaf_free);
+ free_implicit_child_mr(mr, true);
}
-void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
- unsigned long end)
+static void free_implicit_child_mr_rcu(struct rcu_head *head)
{
+ struct mlx5_ib_mr *mr =
+ container_of(head, struct mlx5_ib_mr, odp_destroy.rcu);
+
+ /* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+ queue_work(system_unbound_wq, &mr->odp_destroy.work);
+}
+
+static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
+ struct mlx5_ib_mr *imr = mr->parent;
+
+ xa_lock(&imr->implicit_children);
+ /*
+ * This can race with mlx5_ib_free_implicit_mr(), the first one to
+ * reach the xa lock wins the race and destroys the MR.
+ */
+ if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_ATOMIC) !=
+ mr)
+ goto out_unlock;
+
+ atomic_inc(&imr->num_deferred_work);
+ call_srcu(&mr->dev->odp_srcu, &mr->odp_destroy.rcu,
+ free_implicit_child_mr_rcu);
+
+out_unlock:
+ xa_unlock(&imr->implicit_children);
+}
+
+static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(mni, struct ib_umem_odp, notifier);
struct mlx5_ib_mr *mr;
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
+ u64 invalidations = 0;
+ unsigned long start;
+ unsigned long end;
int in_block = 0;
u64 addr;
- if (!umem_odp) {
- pr_err("invalidation called on NULL umem or non-ODP umem\n");
- return;
- }
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+ mutex_lock(&umem_odp->umem_mutex);
+ mmu_interval_set_seq(mni, cur_seq);
+ /*
+ * If npages is zero then umem_odp->private may not be setup yet. This
+ * does not complete until after the first page is mapped for DMA.
+ */
+ if (!umem_odp->npages)
+ goto out;
mr = umem_odp->private;
- if (!mr || !mr->ibmr.pd)
- return;
-
- start = max_t(u64, ib_umem_start(umem_odp), start);
- end = min_t(u64, ib_umem_end(umem_odp), end);
+ start = max_t(u64, ib_umem_start(umem_odp), range->start);
+ end = min_t(u64, ib_umem_end(umem_odp), range->end);
/*
* Iteration one - zap the HW's MTTs. The notifiers_count ensures that
@@ -276,7 +279,6 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
* overwrite the same MTTs. Concurent invalidations might race us,
* but they will write 0s as well, so no difference in the end result.
*/
- mutex_lock(&umem_odp->umem_mutex);
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
@@ -291,6 +293,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
blk_start_idx = idx;
in_block = 1;
}
+
+ /* Count page invalidations */
+ invalidations += idx - blk_start_idx + 1;
} else {
u64 umr_offset = idx & umr_block_mask;
@@ -308,6 +313,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
+
+ mlx5_update_odp_stats(mr, invalidations, invalidations);
+
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -316,16 +324,17 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
- if (unlikely(!umem_odp->npages && mr->parent &&
- !umem_odp->dying)) {
- WRITE_ONCE(mr->live, 0);
- umem_odp->dying = 1;
- atomic_inc(&mr->parent->num_leaf_free);
- schedule_work(&umem_odp->work);
- }
+ if (unlikely(!umem_odp->npages && mr->parent))
+ destroy_unused_implicit_child_mr(mr);
+out:
mutex_unlock(&umem_odp->umem_mutex);
+ return true;
}
+const struct mmu_interval_notifier_ops mlx5_mn_ops = {
+ .invalidate = mlx5_ib_invalidate_range,
+};
+
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
struct ib_odp_caps *caps = &dev->odp_caps;
@@ -390,8 +399,6 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
!MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
-
- return;
}
static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
@@ -416,257 +423,226 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
wq_num, err);
}
-static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
- struct ib_umem_odp *umem_odp,
- bool ksm, int access_flags)
+static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
+ unsigned long idx)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem_odp *odp;
struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *ret;
int err;
- mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
- MLX5_IMR_MTT_CACHE_ENTRY);
+ odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
+ idx * MLX5_IMR_MTT_SIZE,
+ MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
+ if (IS_ERR(odp))
+ return ERR_CAST(odp);
+ ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY);
if (IS_ERR(mr))
- return mr;
-
- mr->ibmr.pd = pd;
-
- mr->dev = dev;
- mr->access_flags = access_flags;
- mr->mmkey.iova = 0;
- mr->umem = &umem_odp->umem;
-
- if (ksm) {
- err = mlx5_ib_update_xlt(mr, 0,
- mlx5_imr_ksm_entries,
- MLX5_KSM_PAGE_SHIFT,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ZAP |
- MLX5_IB_UPD_XLT_ENABLE);
-
- } else {
- err = mlx5_ib_update_xlt(mr, 0,
- MLX5_IMR_MTT_ENTRIES,
- PAGE_SHIFT,
- MLX5_IB_UPD_XLT_ZAP |
- MLX5_IB_UPD_XLT_ENABLE |
- MLX5_IB_UPD_XLT_ATOMIC);
- }
-
- if (err)
- goto fail;
+ goto out_umem;
+ mr->ibmr.pd = imr->ibmr.pd;
+ mr->access_flags = imr->access_flags;
+ mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
-
- mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
- mr->mmkey.key, dev->mdev, mr);
-
- return mr;
-
-fail:
- mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
- mlx5_mr_cache_free(dev, mr);
-
- return ERR_PTR(err);
-}
-
-static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
- u64 io_virt, size_t bcnt)
-{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
- struct ib_umem_odp *odp, *result = NULL;
- struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
- u64 addr = io_virt & MLX5_IMR_MTT_MASK;
- int nentries = 0, start_idx = 0, ret;
- struct mlx5_ib_mr *mtt;
-
- mutex_lock(&odp_mr->umem_mutex);
- odp = odp_lookup(addr, 1, mr);
-
- mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
- io_virt, bcnt, addr, odp);
-
-next_mr:
- if (likely(odp)) {
- if (nentries)
- nentries++;
- } else {
- odp = ib_umem_odp_alloc_child(odp_mr, addr, MLX5_IMR_MTT_SIZE);
- if (IS_ERR(odp)) {
- mutex_unlock(&odp_mr->umem_mutex);
- return ERR_CAST(odp);
- }
-
- mtt = implicit_mr_alloc(mr->ibmr.pd, odp, 0,
- mr->access_flags);
- if (IS_ERR(mtt)) {
- mutex_unlock(&odp_mr->umem_mutex);
- ib_umem_odp_release(odp);
- return ERR_CAST(mtt);
- }
-
- odp->private = mtt;
- mtt->umem = &odp->umem;
- mtt->mmkey.iova = addr;
- mtt->parent = mr;
- INIT_WORK(&odp->work, mr_leaf_free_action);
-
- smp_store_release(&mtt->live, 1);
-
- if (!nentries)
- start_idx = addr >> MLX5_IMR_MTT_SHIFT;
- nentries++;
- }
-
- /* Return first odp if region not covered by single one */
- if (likely(!result))
- result = odp;
-
- addr += MLX5_IMR_MTT_SIZE;
- if (unlikely(addr < io_virt + bcnt)) {
- odp = odp_next(odp);
- if (odp && ib_umem_start(odp) != addr)
- odp = NULL;
- goto next_mr;
+ mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->parent = imr;
+ odp->private = mr;
+
+ err = mlx5_ib_update_xlt(mr, 0,
+ MLX5_IMR_MTT_ENTRIES,
+ PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out_mr;
}
- if (unlikely(nentries)) {
- ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ATOMIC);
- if (ret) {
- mlx5_ib_err(dev, "Failed to update PAS\n");
- result = ERR_PTR(ret);
+ /*
+ * Once the store to either xarray completes any error unwind has to
+ * use synchronize_srcu(). Avoid this with xa_reserve()
+ */
+ ret = xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
+ GFP_KERNEL);
+ if (unlikely(ret)) {
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ goto out_mr;
}
+ /*
+ * Another thread beat us to creating the child mr, use
+ * theirs.
+ */
+ goto out_mr;
}
- mutex_unlock(&odp_mr->umem_mutex);
- return result;
+ mlx5_ib_dbg(imr->dev, "key %x mr %p\n", mr->mmkey.key, mr);
+ return mr;
+
+out_mr:
+ mlx5_mr_cache_free(imr->dev, mr);
+out_umem:
+ ib_umem_odp_release(odp);
+ return ret;
}
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags)
{
- struct mlx5_ib_mr *imr;
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *imr;
+ int err;
umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
- imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags);
+ imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY);
if (IS_ERR(imr)) {
- ib_umem_odp_release(umem_odp);
- return ERR_CAST(imr);
+ err = PTR_ERR(imr);
+ goto out_umem;
}
+ imr->ibmr.pd = &pd->ibpd;
+ imr->access_flags = access_flags;
+ imr->mmkey.iova = 0;
imr->umem = &umem_odp->umem;
- init_waitqueue_head(&imr->q_leaf_free);
- atomic_set(&imr->num_leaf_free, 0);
- atomic_set(&imr->num_pending_prefetch, 0);
- smp_store_release(&imr->live, 1);
+ imr->ibmr.lkey = imr->mmkey.key;
+ imr->ibmr.rkey = imr->mmkey.key;
+ imr->umem = &umem_odp->umem;
+ imr->is_odp_implicit = true;
+ atomic_set(&imr->num_deferred_work, 0);
+ xa_init(&imr->implicit_children);
+
+ err = mlx5_ib_update_xlt(imr, 0,
+ mlx5_imr_ksm_entries,
+ MLX5_KSM_PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err)
+ goto out_mr;
+ err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
+ &imr->mmkey, GFP_KERNEL));
+ if (err)
+ goto out_mr;
+
+ mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
return imr;
+out_mr:
+ mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
+ mlx5_mr_cache_free(dev, imr);
+out_umem:
+ ib_umem_odp_release(umem_odp);
+ return ERR_PTR(err);
}
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
- struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
- struct rb_node *node;
+ struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct mlx5_ib_dev *dev = imr->dev;
+ struct list_head destroy_list;
+ struct mlx5_ib_mr *mtt;
+ struct mlx5_ib_mr *tmp;
+ unsigned long idx;
- down_read(&per_mm->umem_rwsem);
- for (node = rb_first_cached(&per_mm->umem_tree); node;
- node = rb_next(node)) {
- struct ib_umem_odp *umem_odp =
- rb_entry(node, struct ib_umem_odp, interval_tree.rb);
- struct mlx5_ib_mr *mr = umem_odp->private;
+ INIT_LIST_HEAD(&destroy_list);
- if (mr->parent != imr)
- continue;
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
+ /*
+ * This stops the SRCU protected page fault path from touching either
+ * the imr or any children. The page fault path can only reach the
+ * children xarray via the imr.
+ */
+ synchronize_srcu(&dev->odp_srcu);
- mutex_lock(&umem_odp->umem_mutex);
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
+ xa_lock(&imr->implicit_children);
+ xa_for_each (&imr->implicit_children, idx, mtt) {
+ __xa_erase(&imr->implicit_children, idx);
+ list_add(&mtt->odp_destroy.elm, &destroy_list);
+ }
+ xa_unlock(&imr->implicit_children);
- if (umem_odp->dying) {
- mutex_unlock(&umem_odp->umem_mutex);
- continue;
- }
+ /*
+ * num_deferred_work can only be incremented inside the odp_srcu, or
+ * under xa_lock while the child is in the xarray. Thus at this point
+ * it is only decreasing, and all work holding it is now on the wq.
+ */
+ if (atomic_read(&imr->num_deferred_work)) {
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(atomic_read(&imr->num_deferred_work));
+ }
+
+ /*
+ * Fence the imr before we destroy the children. This allows us to
+ * skip updating the XLT of the imr during destroy of the child mkey
+ * the imr points to.
+ */
+ mlx5_mr_cache_invalidate(imr);
- umem_odp->dying = 1;
- atomic_inc(&imr->num_leaf_free);
- schedule_work(&umem_odp->work);
- mutex_unlock(&umem_odp->umem_mutex);
+ list_for_each_entry_safe (mtt, tmp, &destroy_list, odp_destroy.elm)
+ free_implicit_child_mr(mtt, false);
+
+ mlx5_mr_cache_free(dev, imr);
+ ib_umem_odp_release(odp_imr);
+}
+
+/**
+ * mlx5_ib_fence_odp_mr - Stop all access to the ODP MR
+ * @mr: to fence
+ *
+ * On return no parallel threads will be touching this MR and no DMA will be
+ * active.
+ */
+void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
+{
+ /* Prevent new page faults and prefetch requests from succeeding */
+ xa_erase(&mr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
+
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&mr->dev->odp_srcu);
+
+ if (atomic_read(&mr->num_deferred_work)) {
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(atomic_read(&mr->num_deferred_work));
}
- up_read(&per_mm->umem_rwsem);
- wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
+ dma_fence_odp_mr(mr);
}
-#define MLX5_PF_FLAGS_PREFETCH BIT(0)
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
-static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- u64 io_virt, size_t bcnt, u32 *bytes_mapped,
- u32 flags)
+static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
+ u64 user_va, size_t bcnt, u32 *bytes_mapped,
+ u32 flags)
{
- int npages = 0, current_seq, page_shift, ret, np;
- struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
+ int page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
+ unsigned long current_seq;
u64 access_mask;
u64 start_idx, page_mask;
- struct ib_umem_odp *odp;
- size_t size;
-
- if (odp_mr->is_implicit_odp) {
- odp = implicit_mr_get_data(mr, io_virt, bcnt);
-
- if (IS_ERR(odp))
- return PTR_ERR(odp);
- mr = odp->private;
- } else {
- odp = odp_mr;
- }
-
-next_mr:
- size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt);
page_shift = odp->page_shift;
page_mask = ~(BIT(page_shift) - 1);
- start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+ start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT;
- if (prefetch && !downgrade && !odp->umem.writable) {
- /* prefetch with write-access must
- * be supported by the MR
- */
- ret = -EINVAL;
- goto out;
- }
-
if (odp->umem.writable && !downgrade)
access_mask |= ODP_WRITE_ALLOWED_BIT;
- current_seq = READ_ONCE(odp->notifiers_seq);
- /*
- * Ensure the sequence number is valid for some time before we call
- * gup.
- */
- smp_rmb();
+ current_seq = mmu_interval_read_begin(&odp->notifier);
- ret = ib_umem_odp_map_dma_pages(odp, io_virt, size, access_mask,
- current_seq);
-
- if (ret < 0)
- goto out;
-
- np = ret;
+ np = ib_umem_odp_map_dma_pages(odp, user_va, bcnt, access_mask,
+ current_seq);
+ if (np < 0)
+ return np;
mutex_lock(&odp->umem_mutex);
- if (!ib_umem_mmu_notifier_retry(odp, current_seq)) {
+ if (!mmu_interval_read_retry(&odp->notifier, current_seq)) {
/*
* No need to check whether the MTTs really belong to
* this MR, since ib_umem_odp_map_dma_pages already
@@ -681,53 +657,127 @@ next_mr:
if (ret < 0) {
if (ret != -EAGAIN)
- mlx5_ib_err(dev, "Failed to update mkey page tables\n");
+ mlx5_ib_err(mr->dev,
+ "Failed to update mkey page tables\n");
goto out;
}
if (bytes_mapped) {
u32 new_mappings = (np << page_shift) -
- (io_virt - round_down(io_virt, 1 << page_shift));
- *bytes_mapped += min_t(u32, new_mappings, size);
+ (user_va - round_down(user_va, 1 << page_shift));
+
+ *bytes_mapped += min_t(u32, new_mappings, bcnt);
}
- npages += np << (page_shift - PAGE_SHIFT);
- bcnt -= size;
+ return np << (page_shift - PAGE_SHIFT);
+
+out:
+ return ret;
+}
+
+static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
+ struct ib_umem_odp *odp_imr, u64 user_va,
+ size_t bcnt, u32 *bytes_mapped, u32 flags)
+{
+ unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
+ unsigned long upd_start_idx = end_idx + 1;
+ unsigned long upd_len = 0;
+ unsigned long npages = 0;
+ int err;
+ int ret;
- if (unlikely(bcnt)) {
- struct ib_umem_odp *next;
+ if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
+ mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
+ return -EFAULT;
- io_virt += size;
- next = odp_next(odp);
- if (unlikely(!next || ib_umem_start(next) != io_virt)) {
- mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
- io_virt, next);
- return -EAGAIN;
+ /* Fault each child mr that intersects with our interval. */
+ while (bcnt) {
+ unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
+ struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *mtt;
+ u64 len;
+
+ mtt = xa_load(&imr->implicit_children, idx);
+ if (unlikely(!mtt)) {
+ mtt = implicit_get_child_mr(imr, idx);
+ if (IS_ERR(mtt)) {
+ ret = PTR_ERR(mtt);
+ goto out;
+ }
+ upd_start_idx = min(upd_start_idx, idx);
+ upd_len = idx - upd_start_idx + 1;
}
- odp = next;
- mr = odp->private;
- goto next_mr;
+
+ umem_odp = to_ib_umem_odp(mtt->umem);
+ len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
+ user_va;
+
+ ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
+ bytes_mapped, flags);
+ if (ret < 0)
+ goto out;
+ user_va += len;
+ bcnt -= len;
+ npages += ret;
}
- return npages;
+ ret = npages;
+ /*
+ * Any time the implicit_children are changed we must perform an
+ * update of the xlt before exiting to ensure the HW and the
+ * implicit_children remains synchronized.
+ */
out:
- if (ret == -EAGAIN) {
- unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
-
- if (!wait_for_completion_timeout(&odp->notifier_completion,
- timeout)) {
- mlx5_ib_warn(
- dev,
- "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
- current_seq, odp->notifiers_seq,
- odp->notifiers_count);
- }
- }
+ if (likely(!upd_len))
+ return ret;
+ /*
+ * Notice this is not strictly ordered right, the KSM is updated after
+ * the implicit_children is updated, so a parallel page fault could
+ * see a MR that is not yet visible in the KSM. This is similar to a
+ * parallel page fault seeing a MR that is being concurrently removed
+ * from the KSM. Both of these improbable situations are resolved
+ * safely by resuming the HW and then taking another page fault. The
+ * next pagefault handler will see the new information.
+ */
+ mutex_lock(&odp_imr->umem_mutex);
+ err = mlx5_ib_update_xlt(imr, upd_start_idx, upd_len, 0,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ if (err) {
+ mlx5_ib_err(imr->dev, "Failed to update PAS\n");
+ return err;
+ }
return ret;
}
+/*
+ * Returns:
+ * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
+ * not accessible, or the MR is no longer valid.
+ * -EAGAIN/-ENOMEM: The operation should be retried
+ *
+ * -EINVAL/others: General internal malfunction
+ * >0: Number of pages mapped
+ */
+static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
+ u32 *bytes_mapped, u32 flags)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ if (!odp->is_implicit_odp) {
+ if (unlikely(io_virt < ib_umem_start(odp) ||
+ ib_umem_end(odp) - io_virt < bcnt))
+ return -EFAULT;
+ return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+ }
+ return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+}
+
struct pf_frame {
struct pf_frame *next;
u32 key;
@@ -775,10 +825,9 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
struct ib_pd *pd, u32 key,
u64 io_virt, size_t bcnt,
u32 *bytes_committed,
- u32 *bytes_mapped, u32 flags)
+ u32 *bytes_mapped)
{
int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
- bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
struct pf_frame *head = NULL, *frame;
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -787,58 +836,49 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
size_t offset;
int ndescs;
- srcu_key = srcu_read_lock(&dev->mr_srcu);
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
next_mr:
- mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
+ if (!mmkey) {
+ mlx5_ib_dbg(
+ dev,
+ "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+ /*
+ * The user could specify a SGL with multiple lkeys and only
+ * some of them are ODP. Treat the non-ODP ones as fully
+ * faulted.
+ */
+ ret = 0;
+ goto srcu_unlock;
+ }
if (!mkey_is_eq(mmkey, key)) {
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
ret = -EFAULT;
goto srcu_unlock;
}
- if (prefetch && mmkey->type != MLX5_MKEY_MR) {
- mlx5_ib_dbg(dev, "prefetch is allowed only for MR\n");
- ret = -EINVAL;
- goto srcu_unlock;
- }
-
switch (mmkey->type) {
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) {
- mlx5_ib_dbg(dev, "got dead MR\n");
- ret = -EFAULT;
- goto srcu_unlock;
- }
-
- if (prefetch) {
- if (!is_odp_mr(mr) ||
- mr->ibmr.pd != pd) {
- mlx5_ib_dbg(dev, "Invalid prefetch request: %s\n",
- is_odp_mr(mr) ? "MR is not ODP" :
- "PD is not of the MR");
- ret = -EINVAL;
- goto srcu_unlock;
- }
- }
-
- if (!is_odp_mr(mr)) {
- mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
- key);
- if (bytes_mapped)
- *bytes_mapped += bcnt;
- ret = 0;
- goto srcu_unlock;
- }
- ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped, flags);
+ ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
if (ret < 0)
goto srcu_unlock;
+ /*
+ * When prefetching a page, page fault is generated
+ * in order to bring the page to the main memory.
+ * In the current flow, page faults are being counted.
+ */
+ mlx5_update_odp_stats(mr, faults, ret);
+
npages += ret;
ret = 0;
break;
@@ -928,7 +968,7 @@ srcu_unlock:
}
kfree(out);
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
*bytes_committed = 0;
return ret ? ret : npages;
}
@@ -1009,7 +1049,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
ret = pagefault_single_data_segment(dev, NULL, key,
io_virt, bcnt,
&pfault->bytes_committed,
- bytes_mapped, 0);
+ bytes_mapped);
if (ret < 0)
break;
npages += ret;
@@ -1292,8 +1332,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
}
ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
- &pfault->bytes_committed, NULL,
- 0);
+ &pfault->bytes_committed, NULL);
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
@@ -1320,8 +1359,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
ret = pagefault_single_data_segment(dev, NULL, rkey, address,
prefetch_len,
- &bytes_committed, NULL,
- 0);
+ &bytes_committed, NULL);
if (ret < 0 && ret != -EAGAIN) {
mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
ret, pfault->token, address, prefetch_len);
@@ -1581,7 +1619,6 @@ void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
.advise_mr = mlx5_ib_advise_mr,
- .invalidate_range = mlx5_ib_invalidate_range,
};
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
@@ -1624,114 +1661,128 @@ int mlx5_ib_odp_init(void)
struct prefetch_mr_work {
struct work_struct work;
- struct ib_pd *pd;
u32 pf_flags;
u32 num_sge;
- struct ib_sge sg_list[0];
+ struct {
+ u64 io_virt;
+ struct mlx5_ib_mr *mr;
+ size_t length;
+ } frags[];
};
-static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
- struct ib_sge *sg_list, u32 num_sge,
- u32 from)
+static void destroy_prefetch_work(struct prefetch_mr_work *work)
{
u32 i;
- int srcu_key;
-
- srcu_key = srcu_read_lock(&dev->mr_srcu);
- for (i = from; i < num_sge; ++i) {
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
-
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(sg_list[i].lkey));
- mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- atomic_dec(&mr->num_pending_prefetch);
- }
-
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
+ for (i = 0; i < work->num_sge; ++i)
+ atomic_dec(&work->frags[i].mr->num_deferred_work);
+ kvfree(work);
}
-static bool num_pending_prefetch_inc(struct ib_pd *pd,
- struct ib_sge *sg_list, u32 num_sge)
+static struct mlx5_ib_mr *
+get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 lkey)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- bool ret = true;
- u32 i;
+ struct mlx5_core_mkey *mmkey;
+ struct ib_umem_odp *odp;
+ struct mlx5_ib_mr *mr;
- for (i = 0; i < num_sge; ++i) {
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
+ lockdep_assert_held(&dev->odp_srcu);
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(sg_list[i].lkey));
- if (!mmkey || mmkey->key != sg_list[i].lkey) {
- ret = false;
- break;
- }
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
+ if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+ return NULL;
- if (mmkey->type != MLX5_MKEY_MR) {
- ret = false;
- break;
- }
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ if (mr->ibmr.pd != pd)
+ return NULL;
- if (!smp_load_acquire(&mr->live)) {
- ret = false;
- break;
- }
+ odp = to_ib_umem_odp(mr->umem);
- if (mr->ibmr.pd != pd) {
- ret = false;
- break;
- }
+ /* prefetch with write-access must be supported by the MR */
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ !odp->umem.writable)
+ return NULL;
- atomic_inc(&mr->num_pending_prefetch);
- }
+ return mr;
+}
- if (!ret)
- num_pending_prefetch_dec(dev, sg_list, i, 0);
+static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
+{
+ struct prefetch_mr_work *work =
+ container_of(w, struct prefetch_mr_work, work);
+ u32 bytes_mapped = 0;
+ u32 i;
- return ret;
+ for (i = 0; i < work->num_sge; ++i)
+ pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+ work->frags[i].length, &bytes_mapped,
+ work->pf_flags);
+
+ destroy_prefetch_work(work);
}
-static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, u32 pf_flags,
- struct ib_sge *sg_list, u32 num_sge)
+static bool init_prefetch_work(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct prefetch_mr_work *work,
+ struct ib_sge *sg_list, u32 num_sge)
{
u32 i;
- int ret = 0;
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
+ INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
+ work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) {
- struct ib_sge *sg = &sg_list[i];
- int bytes_committed = 0;
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr =
+ get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (!work->frags[i].mr) {
+ work->num_sge = i - 1;
+ if (i)
+ destroy_prefetch_work(work);
+ return false;
+ }
- ret = pagefault_single_data_segment(dev, pd, sg->lkey, sg->addr,
- sg->length,
- &bytes_committed, NULL,
- pf_flags);
- if (ret < 0)
- break;
+ /* Keep the MR pointer will valid outside the SRCU */
+ atomic_inc(&work->frags[i].mr->num_deferred_work);
}
-
- return ret < 0 ? ret : 0;
+ work->num_sge = num_sge;
+ return true;
}
-static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
+static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct ib_sge *sg_list,
+ u32 num_sge)
{
- struct prefetch_mr_work *w =
- container_of(work, struct prefetch_mr_work, work);
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ u32 bytes_mapped = 0;
+ int srcu_key;
+ int ret = 0;
+ u32 i;
+
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
+ for (i = 0; i < num_sge; ++i) {
+ struct mlx5_ib_mr *mr;
- if (ib_device_try_get(w->pd->device)) {
- mlx5_ib_prefetch_sg_list(w->pd, w->pf_flags, w->sg_list,
- w->num_sge);
- ib_device_put(w->pd->device);
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (!mr) {
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
+ &bytes_mapped, pf_flags);
+ if (ret < 0)
+ goto out;
}
+ ret = 0;
- num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
- w->num_sge, 0);
- kvfree(w);
+out:
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return ret;
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1739,43 +1790,27 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
u32 flags, struct ib_sge *sg_list, u32 num_sge)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- u32 pf_flags = MLX5_PF_FLAGS_PREFETCH;
+ u32 pf_flags = 0;
struct prefetch_mr_work *work;
- bool valid_req;
int srcu_key;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
- return mlx5_ib_prefetch_sg_list(pd, pf_flags, sg_list,
+ return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
num_sge);
- work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
+ work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
if (!work)
return -ENOMEM;
- memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
-
- /* It is guaranteed that the pd when work is executed is the pd when
- * work was queued since pd can't be destroyed while it holds MRs and
- * destroying a MR leads to flushing the workquque
- */
- work->pd = pd;
- work->pf_flags = pf_flags;
- work->num_sge = num_sge;
-
- INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
-
- srcu_key = srcu_read_lock(&dev->mr_srcu);
-
- valid_req = num_pending_prefetch_inc(pd, sg_list, num_sge);
- if (valid_req)
- queue_work(system_unbound_wq, &work->work);
- else
- kvfree(work);
-
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
-
- return valid_req ? 0 : -EINVAL;
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
+ if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return -EINVAL;
+ }
+ queue_work(system_unbound_wq, &work->work);
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5fd071c05944..7e51870e9e01 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -749,7 +749,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
{
int err;
- *umem = ib_umem_get(udata, addr, size, 0, 0);
+ *umem = ib_umem_get(udata, addr, size, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -806,7 +806,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr)
return -EINVAL;
- rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0, 0);
+ rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
@@ -1041,11 +1041,14 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_IPOIB_UD_LSO |
IB_QP_CREATE_NETIF_QP |
- mlx5_ib_create_qp_sqpn_qp1()))
+ MLX5_IB_QP_CREATE_SQPN_QP1 |
+ MLX5_IB_QP_CREATE_WC_TEST))
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
qp->bf.bfreg = &dev->fp_bfreg;
+ else if (init_attr->create_flags & MLX5_IB_QP_CREATE_WC_TEST)
+ qp->bf.bfreg = &dev->wc_bfreg;
else
qp->bf.bfreg = &dev->bfreg;
@@ -1104,7 +1107,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
MLX5_SET(qpc, qpc, fre, 1);
MLX5_SET(qpc, qpc, rlky, 1);
- if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
+ if (init_attr->create_flags & MLX5_IB_QP_CREATE_SQPN_QP1) {
MLX5_SET(qpc, qpc, deth_sqpn, 1);
qp->flags |= MLX5_IB_QP_SQPN_QP1;
}
@@ -2140,7 +2143,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EINVAL;
}
if (init_attr->create_flags &
- mlx5_ib_create_qp_sqpn_qp1()) {
+ MLX5_IB_QP_CREATE_SQPN_QP1) {
mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
return -EINVAL;
}
@@ -5330,7 +5333,6 @@ out:
* we hit doorbell */
wmb();
- /* currently we support only regular doorbells */
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
@@ -5825,7 +5827,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1();
+ qp_init_attr->create_flags |= MLX5_IB_QP_CREATE_SQPN_QP1;
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
@@ -5957,12 +5959,21 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
}
MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
+ /*
+ * In Firmware number of strides in each WQE is:
+ * "512 * 2^single_wqe_log_num_of_strides"
+ * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
+ * accepted as 0 to 9
+ */
+ static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
+ 2, 3, 4, 5, 6, 7, 8, 9 };
MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
MLX5_SET(wq, wq, log_wqe_stride_size,
rwq->single_stride_log_num_of_bytes -
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
- MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides -
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ fw_map[rwq->log_num_strides -
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
}
MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
@@ -6037,6 +6048,19 @@ static int set_user_rq_size(struct mlx5_ib_dev *dev,
return 0;
}
+static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
+{
+ if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
+ (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
+ (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ return true;
+}
+
static int prepare_user_rq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata,
@@ -6084,14 +6108,16 @@ static int prepare_user_rq(struct ib_pd *pd,
MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
return -EINVAL;
}
- if ((ucmd.single_wqe_log_num_of_strides >
- MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
- (ucmd.single_wqe_log_num_of_strides <
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) {
- mlx5_ib_dbg(dev, "Invalid log num strides (%u. Range is %u - %u)\n",
- ucmd.single_wqe_log_num_of_strides,
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
- MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
+ if (!log_of_strides_valid(dev,
+ ucmd.single_wqe_log_num_of_strides)) {
+ mlx5_ib_dbg(
+ dev,
+ "Invalid log num strides (%u. Range is %u - %u)\n",
+ ucmd.single_wqe_log_num_of_strides,
+ MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
return -EINVAL;
}
rwq->single_stride_log_num_of_bytes =
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
new file mode 100644
index 000000000000..8f6c04f12531
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <uapi/rdma/rdma_netlink.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/restrack.h>
+#include "mlx5_ib.h"
+
+static int fill_stat_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+
+ if (!table_attr)
+ goto err;
+
+ if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
+ atomic64_read(&mr->odp_stats.faults)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations",
+ atomic64_read(&mr->odp_stats.invalidations)))
+ goto err_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ if (mr->is_odp_implicit) {
+ if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
+ goto err;
+ } else {
+ if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+int mlx5_ib_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (res->type == RDMA_RESTRACK_MR)
+ return fill_res_mr_entry(msg, res);
+
+ return 0;
+}
+
+int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (res->type == RDMA_RESTRACK_MR)
+ return fill_stat_mr_entry(msg, res);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4e7fde86c96b..62939df3c692 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index bfd4eebc1182..599794c5a78f 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -576,14 +576,10 @@ enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mthca_create_agents(struct mthca_dev *dev);
void mthca_free_agents(struct mthca_dev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7ad517da4917..99aa8183a7f2 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -196,30 +196,19 @@ static void forward_trap(struct mthca_dev *dev,
}
}
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
int err;
u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
- slid == 0) {
- forward_trap(to_mdev(ibdev), port_num, in_mad);
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP && !slid) {
+ forward_trap(to_mdev(ibdev), port_num, in);
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
}
@@ -229,40 +218,39 @@ int mthca_process_mad(struct ib_device *ibdev,
* Only handle PMA and Mellanox vendor-specific class gets and
* sets for other classes.
*/
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
return IB_MAD_RESULT_SUCCESS;
/*
* Don't process SMInfo queries or vendor-specific
* MADs -- the SMA can't handle them.
*/
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
- ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
+ ((in->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
IB_SMP_ATTR_VENDOR_MASK))
return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
+ } else if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET)
return IB_MAD_RESULT_SUCCESS;
} else
return IB_MAD_RESULT_SUCCESS;
- if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
- in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ if ((in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+ in->mad_hdr.method == IB_MGMT_METHOD_SET &&
+ in->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
prev_lid = ib_lid_cpu16(pattr.lid);
- err = mthca_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
+ err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
if (err == -EBADMSG)
return IB_MAD_RESULT_SUCCESS;
else if (err) {
@@ -270,16 +258,16 @@ int mthca_process_mad(struct ib_device *ibdev,
return IB_MAD_RESULT_FAILURE;
}
- if (!out_mad->mad_hdr.status) {
- smp_snoop(ibdev, port_num, in_mad, prev_lid);
- node_desc_override(ibdev, out_mad);
+ if (!out->mad_hdr.status) {
+ smp_snoop(ibdev, port_num, in, prev_lid);
+ node_desc_override(ibdev, out);
}
/* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
/* no response for trap repress */
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 23554d8bf241..33002530fee7 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -880,9 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, acc,
- ucmd.mr_attrs & MTHCA_MR_DMASYNC);
-
+ mr->umem = ib_umem_get(udata, start, length, acc);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 8d3e36d548aa..2b7f00ac41b0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -247,35 +247,20 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
-int ocrdma_process_mad(struct ib_device *ibdev,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
{
- int status;
+ int status = IB_MAD_RESULT_SUCCESS;
struct ocrdma_dev *dev;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
- switch (in_mad->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_PERF_MGMT:
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
dev = get_ocrdma_dev(ibdev);
- if (!ocrdma_pma_counters(dev, out_mad))
- status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
- else
- status = IB_MAD_RESULT_SUCCESS;
- break;
- default:
- status = IB_MAD_RESULT_SUCCESS;
- break;
+ ocrdma_pma_counters(dev, out);
+ status |= IB_MAD_RESULT_REPLY;
}
+
return status;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 64cb82c08664..9780afcde780 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -56,12 +56,9 @@ int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
-int ocrdma_process_mad(struct ib_device *,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
#endif /* __OCRDMA_AH_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index c15cfc6cef81..d8c47d24d6d6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -166,7 +166,6 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.get_port_immutable = ocrdma_port_immutable,
.map_mr_sg = ocrdma_map_mr_sg,
.mmap = ocrdma_mmap,
- .modify_port = ocrdma_modify_port,
.modify_qp = ocrdma_modify_qp,
.poll_cq = ocrdma_poll_cq,
.post_recv = ocrdma_post_recv,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 6ef89c226ad8..c2e0d0fa44be 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -2034,7 +2034,7 @@ struct ocrdma_rx_stats {
};
struct ocrdma_rx_qp_err_stats {
- u32 nak_invalid_requst_errors;
+ u32 nak_invalid_request_errors;
u32 nak_remote_operation_errors;
u32 nak_count_remote_access_errors;
u32 local_length_errors;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index a902942adb5d..5f831e3bdbad 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -423,8 +423,8 @@ static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
- pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
- (u64)rx_qp_err_stats->nak_invalid_requst_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_request_errors",
+ (u64)rx_qp_err_stats->nak_invalid_request_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
(u64)rx_qp_err_stats->nak_remote_operation_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
@@ -670,12 +670,10 @@ err:
return -EFAULT;
}
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad)
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad)
{
struct ib_pma_portcounters *pma_cnt;
- memset(out_mad->data, 0, sizeof out_mad->data);
pma_cnt = (void *)(out_mad->data + 40);
ocrdma_update_stats(dev);
@@ -683,7 +681,6 @@ int ocrdma_pma_counters(struct ocrdma_dev *dev,
pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
- return 0;
}
static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index bba1fec4f11f..98feca26ac55 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -69,7 +69,6 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
void ocrdma_add_port_stats(struct ocrdma_dev *dev);
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad);
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad);
#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e8267e590772..9bc1ca6f6f9e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -190,12 +190,6 @@ int ocrdma_query_port(struct ib_device *ibdev,
return 0;
}
-int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
unsigned long len)
{
@@ -875,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(udata, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 32488da1b752..3a5010881be5 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -54,8 +54,6 @@ int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props,
struct ib_udata *uhw);
int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
-int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
- struct ib_port_modify *props);
enum rdma_protocol_type
ocrdma_query_protocol(struct ib_device *device, u8 port_num);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index dc71b6e16a07..dcdc85a1ab25 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -212,7 +212,7 @@ static const struct ib_device_ops qedr_dev_ops = {
.get_link_layer = qedr_link_layer,
.map_mr_sg = qedr_map_mr_sg,
.mmap = qedr_mmap,
- .modify_port = qedr_modify_port,
+ .mmap_free = qedr_mmap_free,
.modify_qp = qedr_modify_qp,
.modify_srq = qedr_modify_srq,
.poll_cq = qedr_poll_cq,
@@ -357,9 +357,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
return -ENOMEM;
spin_lock_init(&dev->sgid_lock);
+ xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
if (IS_IWARP(dev)) {
- xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ);
+ xa_init(&dev->qps);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 0cfd849b13d6..5488dbd59d3c 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -40,6 +40,7 @@
#include <linux/qed/qed_rdma_if.h>
#include <linux/qed/qede_rdma.h>
#include <linux/qed/roce_common.h>
+#include <linux/completion.h>
#include "qedr_hsi_rdma.h"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
@@ -230,14 +231,16 @@ struct qedr_ucontext {
struct qedr_dev *dev;
struct qedr_pd *pd;
void __iomem *dpi_addr;
+ struct rdma_user_mmap_entry *db_mmap_entry;
u64 dpi_phys_addr;
u32 dpi_size;
u16 dpi;
+ bool db_rec;
+};
- struct list_head mm_head;
-
- /* Lock to protect mm list */
- struct mutex mm_list_lock;
+union db_prod32 {
+ struct rdma_pwm_val16_data data;
+ u32 raw;
};
union db_prod64 {
@@ -265,6 +268,13 @@ struct qedr_userq {
struct qedr_pbl *pbl_tbl;
u64 buf_addr;
size_t buf_len;
+
+ /* doorbell recovery */
+ void __iomem *db_addr;
+ struct qedr_user_db_rec *db_rec_data;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ void __iomem *db_rec_db2_addr;
+ union db_prod32 db_rec_db2_data;
};
struct qedr_cq {
@@ -300,19 +310,6 @@ struct qedr_pd {
struct qedr_ucontext *uctx;
};
-struct qedr_mm {
- struct {
- u64 phy_addr;
- unsigned long len;
- } key;
- struct list_head entry;
-};
-
-union db_prod32 {
- struct rdma_pwm_val16_data data;
- u32 raw;
-};
-
struct qedr_qp_hwq_info {
/* WQE Elements */
struct qed_chain pbl;
@@ -377,10 +374,20 @@ enum qedr_qp_err_bitmap {
QEDR_QP_ERR_RQ_PBL_FULL = 32,
};
+enum qedr_qp_create_type {
+ QEDR_QP_CREATE_NONE,
+ QEDR_QP_CREATE_USER,
+ QEDR_QP_CREATE_KERNEL,
+};
+
+enum qedr_iwarp_cm_flags {
+ QEDR_IWARP_CM_WAIT_FOR_CONNECT = BIT(0),
+ QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1),
+};
+
struct qedr_qp {
struct ib_qp ibqp; /* must be first */
struct qedr_dev *dev;
- struct qedr_iw_ep *ep;
struct qedr_qp_hwq_info sq;
struct qedr_qp_hwq_info rq;
@@ -395,6 +402,7 @@ struct qedr_qp {
u32 id;
struct qedr_pd *pd;
enum ib_qp_type qp_type;
+ enum qedr_qp_create_type create_type;
struct qed_rdma_qp *qed_qp;
u32 qp_id;
u16 icid;
@@ -437,8 +445,11 @@ struct qedr_qp {
/* Relevant to qps created from user space only (applications) */
struct qedr_userq usq;
struct qedr_userq urq;
- atomic_t refcnt;
- bool destroyed;
+
+ /* synchronization objects used with iwarp ep */
+ struct kref refcnt;
+ struct completion iwarp_cm_comp;
+ unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};
struct qedr_ah {
@@ -476,6 +487,18 @@ struct qedr_mr {
u32 npages;
};
+struct qedr_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ struct qedr_dev *dev;
+ union {
+ u64 io_address;
+ void *address;
+ };
+ size_t length;
+ u16 dpi;
+ u8 mmap_flag;
+};
+
#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
@@ -531,7 +554,7 @@ struct qedr_iw_ep {
struct iw_cm_id *cm_id;
struct qedr_qp *qp;
void *qed_context;
- u8 during_connect;
+ struct kref refcnt;
};
static inline
@@ -574,4 +597,11 @@ static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct qedr_srq, ibsrq);
}
+
+static inline struct qedr_user_mmap_entry *
+get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct qedr_user_mmap_entry,
+ rdma_entry);
+}
#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 22881d4442b9..792eecd206b6 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -79,6 +79,27 @@ qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
}
}
+static void qedr_iw_free_qp(struct kref *ref)
+{
+ struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
+
+ kfree(qp);
+}
+
+static void
+qedr_iw_free_ep(struct kref *ref)
+{
+ struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt);
+
+ if (ep->qp)
+ kref_put(&ep->qp->refcnt, qedr_iw_free_qp);
+
+ if (ep->cm_id)
+ ep->cm_id->rem_ref(ep->cm_id);
+
+ kfree(ep);
+}
+
static void
qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
{
@@ -93,6 +114,7 @@ qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
ep->dev = dev;
ep->qed_context = params->ep_context;
+ kref_init(&ep->refcnt);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
@@ -141,12 +163,10 @@ qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
- if (ep->cm_id) {
+ if (ep->cm_id)
qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
- ep->cm_id->rem_ref(ep->cm_id);
- ep->cm_id = NULL;
- }
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
}
static void
@@ -186,11 +206,13 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
struct qedr_qp *qp = ep->qp;
struct iw_cm_event event;
- if (qp->destroyed) {
- kfree(dwork);
- qedr_iw_qp_rem_ref(&qp->ibqp);
- return;
- }
+ /* The qp won't be released until we release the ep.
+ * the ep's refcnt was increased before calling this
+ * function, therefore it is safe to access qp
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ goto out;
memset(&event, 0, sizeof(event));
event.status = dwork->status;
@@ -204,7 +226,6 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
else
qp_params.new_state = QED_ROCE_QP_STATE_SQD;
- kfree(dwork);
if (ep->cm_id)
ep->cm_id->event_handler(ep->cm_id, &event);
@@ -214,7 +235,10 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
- qedr_iw_qp_rem_ref(&qp->ibqp);
+ complete(&ep->qp->iwarp_cm_comp);
+out:
+ kfree(dwork);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
}
static void
@@ -224,13 +248,17 @@ qedr_iw_disconnect_event(void *context,
struct qedr_discon_work *work;
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct qedr_dev *dev = ep->dev;
- struct qedr_qp *qp = ep->qp;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return;
- qedr_iw_qp_add_ref(&qp->ibqp);
+ /* We can't get a close event before disconnect, but since
+ * we're scheduling a work queue we need to make sure close
+ * won't delete the ep, so we increase the refcnt
+ */
+ kref_get(&ep->refcnt);
+
work->ep = ep;
work->event = params->event;
work->status = params->status;
@@ -252,16 +280,30 @@ qedr_iw_passive_complete(void *context,
if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
DP_DEBUG(dev, QEDR_MSG_IWARP,
"PASSIVE connection refused releasing ep...\n");
- kfree(ep);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
return;
}
+ complete(&ep->qp->iwarp_cm_comp);
qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
if (params->status < 0)
qedr_iw_close_event(context, params);
}
+static void
+qedr_iw_active_complete(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ complete(&ep->qp->iwarp_cm_comp);
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY);
+
+ if (params->status < 0)
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+}
+
static int
qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
{
@@ -288,27 +330,15 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
qedr_iw_mpa_reply(context, params);
break;
case QED_IWARP_EVENT_PASSIVE_COMPLETE:
- ep->during_connect = 0;
qedr_iw_passive_complete(context, params);
break;
-
case QED_IWARP_EVENT_ACTIVE_COMPLETE:
- ep->during_connect = 0;
- qedr_iw_issue_event(context,
- params,
- IW_CM_EVENT_CONNECT_REPLY);
- if (params->status < 0) {
- struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
-
- ep->cm_id->rem_ref(ep->cm_id);
- ep->cm_id = NULL;
- }
+ qedr_iw_active_complete(context, params);
break;
case QED_IWARP_EVENT_DISCONNECT:
qedr_iw_disconnect_event(context, params);
break;
case QED_IWARP_EVENT_CLOSE:
- ep->during_connect = 0;
qedr_iw_close_event(context, params);
break;
case QED_IWARP_EVENT_RQ_EMPTY:
@@ -451,10 +481,10 @@ qedr_addr6_resolve(struct qedr_dev *dev,
if ((!dst) || dst->error) {
if (dst) {
- dst_release(dst);
DP_ERR(dev,
"ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return -EINVAL;
}
@@ -476,6 +506,19 @@ qedr_addr6_resolve(struct qedr_dev *dev,
return rc;
}
+static struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn)
+{
+ struct qedr_qp *qp;
+
+ xa_lock(&dev->qps);
+ qp = xa_load(&dev->qps, qpn);
+ if (qp)
+ kref_get(&qp->refcnt);
+ xa_unlock(&dev->qps);
+
+ return qp;
+}
+
int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct qedr_dev *dev = get_qedr_dev(cm_id->device);
@@ -491,10 +534,6 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int rc = 0;
int i;
- qp = xa_load(&dev->qps, conn_param->qpn);
- if (unlikely(!qp))
- return -EINVAL;
-
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
@@ -516,8 +555,15 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return -ENOMEM;
ep->dev = dev;
+ kref_init(&ep->refcnt);
+
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
+ if (!qp) {
+ rc = -EINVAL;
+ goto err;
+ }
+
ep->qp = qp;
- qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
@@ -580,16 +626,20 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
in_params.qp = qp->qed_qp;
memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
- ep->during_connect = 1;
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ goto err; /* QP already being destroyed */
+
rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
- if (rc)
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
goto err;
+ }
return rc;
err:
- cm_id->rem_ref(cm_id);
- kfree(ep);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
return rc;
}
@@ -677,18 +727,17 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct qedr_dev *dev = ep->dev;
struct qedr_qp *qp;
struct qed_iwarp_accept_in params;
- int rc;
+ int rc = 0;
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
- qp = xa_load(&dev->qps, conn_param->qpn);
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
if (!qp) {
DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
return -EINVAL;
}
ep->qp = qp;
- qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
@@ -700,15 +749,21 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
params.ird = conn_param->ird;
params.ord = conn_param->ord;
- ep->during_connect = 1;
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ goto err; /* QP already destroyed */
+
rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
- if (rc)
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
goto err;
+ }
return rc;
+
err:
- ep->during_connect = 0;
- cm_id->rem_ref(cm_id);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+
return rc;
}
@@ -731,17 +786,14 @@ void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
- atomic_inc(&qp->refcnt);
+ kref_get(&qp->refcnt);
}
void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
- if (atomic_dec_and_test(&qp->refcnt)) {
- xa_erase_irq(&qp->dev->qps, qp->qp_id);
- kfree(qp);
- }
+ kref_put(&qp->refcnt, qedr_iw_free_qp);
}
struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 6f3ce86019b7..4cd292966aa9 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -51,6 +51,7 @@
#include "verbs.h"
#include <rdma/qedr-abi.h>
#include "qedr_roce_cm.h"
+#include "qedr_iw_cm.h"
#define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
#define RDMA_MAX_SGE_PER_SRQ (4)
@@ -58,6 +59,11 @@
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+enum {
+ QEDR_USER_MMAP_IO_WC = 0,
+ QEDR_USER_MMAP_PHYS_PAGE,
+};
+
static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
size_t len)
{
@@ -250,78 +256,31 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
return 0;
}
-int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
-static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
- unsigned long len)
-{
- struct qedr_mm *mm;
-
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm)
- return -ENOMEM;
-
- mm->key.phy_addr = phy_addr;
- /* This function might be called with a length which is not a multiple
- * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
- * forces this granularity by increasing the requested size if needed.
- * When qedr_mmap is called, it will search the list with the updated
- * length as a key. To prevent search failures, the length is rounded up
- * in advance to PAGE_SIZE.
- */
- mm->key.len = roundup(len, PAGE_SIZE);
- INIT_LIST_HEAD(&mm->entry);
-
- mutex_lock(&uctx->mm_list_lock);
- list_add(&mm->entry, &uctx->mm_head);
- mutex_unlock(&uctx->mm_list_lock);
-
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
- (unsigned long long)mm->key.phy_addr,
- (unsigned long)mm->key.len, uctx);
-
- return 0;
-}
-
-static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
- unsigned long len)
-{
- bool found = false;
- struct qedr_mm *mm;
-
- mutex_lock(&uctx->mm_list_lock);
- list_for_each_entry(mm, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
- continue;
-
- found = true;
- break;
- }
- mutex_unlock(&uctx->mm_list_lock);
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
- mm->key.phy_addr, mm->key.len, uctx, found);
-
- return found;
-}
-
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
struct ib_device *ibdev = uctx->device;
int rc;
struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
struct qedr_alloc_ucontext_resp uresp = {};
+ struct qedr_alloc_ucontext_req ureq = {};
struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qed_rdma_add_user_out_params oparams;
+ struct qedr_user_mmap_entry *entry;
if (!udata)
return -EFAULT;
+ if (udata->inlen) {
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen));
+ if (rc) {
+ DP_ERR(dev, "Problem copying data from user space\n");
+ return -EFAULT;
+ }
+
+ ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
+ }
+
rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
if (rc) {
DP_ERR(dev,
@@ -334,13 +293,29 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
ctx->dpi_addr = oparams.dpi_addr;
ctx->dpi_phys_addr = oparams.dpi_phys_addr;
ctx->dpi_size = oparams.dpi_size;
- INIT_LIST_HEAD(&ctx->mm_head);
- mutex_init(&ctx->mm_list_lock);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ entry->io_address = ctx->dpi_phys_addr;
+ entry->length = ctx->dpi_size;
+ entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
+ entry->dpi = ctx->dpi;
+ entry->dev = dev;
+ rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
+ ctx->dpi_size);
+ if (rc) {
+ kfree(entry);
+ goto err;
+ }
+ ctx->db_mmap_entry = &entry->rdma_entry;
uresp.dpm_enabled = dev->user_dpm_enabled;
uresp.wids_enabled = 1;
uresp.wid_count = oparams.wid_count;
- uresp.db_pa = ctx->dpi_phys_addr;
+ uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
uresp.db_size = ctx->dpi_size;
uresp.max_send_wr = dev->attr.max_sqe;
uresp.max_recv_wr = dev->attr.max_rqe;
@@ -352,82 +327,92 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
- return rc;
+ goto err;
ctx->dev = dev;
- rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
- if (rc)
- return rc;
-
DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
&ctx->ibucontext);
return 0;
+
+err:
+ if (!ctx->db_mmap_entry)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
+ else
+ rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
+
+ return rc;
}
void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
{
struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
- struct qedr_mm *mm, *tmp;
DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
uctx);
- uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
- list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
- mm->key.phy_addr, mm->key.len, uctx);
- list_del(&mm->entry);
- kfree(mm);
- }
+ rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
}
-int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
- struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
- struct qedr_dev *dev = get_qedr_dev(context->device);
- unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long len = (vma->vm_end - vma->vm_start);
- unsigned long dpi_start;
+ struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
+ struct qedr_dev *dev = entry->dev;
- dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
+ if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
+ free_page((unsigned long)entry->address);
+ else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
- DP_DEBUG(dev, QEDR_MSG_INIT,
- "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
- (void *)vma->vm_start, (void *)vma->vm_end,
- (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
+ kfree(entry);
+}
- if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
- DP_ERR(dev,
- "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
- (void *)vma->vm_start, (void *)vma->vm_end);
- return -EINVAL;
- }
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
+{
+ struct ib_device *dev = ucontext->device;
+ size_t length = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct qedr_user_mmap_entry *entry;
+ int rc = 0;
+ u64 pfn;
- if (!qedr_search_mmap(ucontext, phys_addr, len)) {
- DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
- vma->vm_pgoff);
- return -EINVAL;
- }
+ ibdev_dbg(dev,
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
- if (phys_addr < dpi_start ||
- ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
- DP_ERR(dev,
- "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
- (void *)phys_addr, (void *)dpi_start,
- ucontext->dpi_size);
+ rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
return -EINVAL;
}
-
- if (vma->vm_flags & VM_READ) {
- DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
- return -EINVAL;
+ entry = get_qedr_mmap_entry(rdma_entry);
+ ibdev_dbg(dev,
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->io_address, length, entry->mmap_flag);
+
+ switch (entry->mmap_flag) {
+ case QEDR_USER_MMAP_IO_WC:
+ pfn = entry->io_address >> PAGE_SHIFT;
+ rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case QEDR_USER_MMAP_PHYS_PAGE:
+ rc = vm_insert_page(vma, vma->vm_start,
+ virt_to_page(entry->address));
+ break;
+ default:
+ rc = -EINVAL;
}
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
- vma->vm_page_prot);
+ if (rc)
+ ibdev_dbg(dev,
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->io_address, length, entry->mmap_flag, rc);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return rc;
}
int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
@@ -657,16 +642,50 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
}
}
+static int qedr_db_recovery_add(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return 0;
+ }
+
+ return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
+ db_width, db_space);
+}
+
+static void qedr_db_recovery_del(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return;
+ }
+
+ /* Ignore return code as there is not much we can do about it. Error
+ * log will be printed inside.
+ */
+ dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
+}
+
static int qedr_copy_cq_uresp(struct qedr_dev *dev,
- struct qedr_cq *cq, struct ib_udata *udata)
+ struct qedr_cq *cq, struct ib_udata *udata,
+ u32 db_offset)
{
struct qedr_create_cq_uresp uresp;
int rc;
memset(&uresp, 0, sizeof(uresp));
- uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ uresp.db_offset = db_offset;
uresp.icid = cq->icid;
+ if (cq->q.db_mmap_entry)
+ uresp.db_rec_addr =
+ rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
@@ -694,10 +713,58 @@ static inline int qedr_align_cq_entries(int entries)
return aligned_size / QEDR_CQE_SIZE;
}
+static int qedr_init_user_db_rec(struct ib_udata *udata,
+ struct qedr_dev *dev, struct qedr_userq *q,
+ bool requires_db_rec)
+{
+ struct qedr_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ struct qedr_user_mmap_entry *entry;
+ int rc;
+
+ /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
+ if (requires_db_rec == 0 || !uctx->db_rec)
+ return 0;
+
+ /* Allocate a page for doorbell recovery, add to mmap */
+ q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
+ if (!q->db_rec_data) {
+ DP_ERR(dev, "get_zeroed_page failed\n");
+ return -ENOMEM;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto err_free_db_data;
+
+ entry->address = q->db_rec_data;
+ entry->length = PAGE_SIZE;
+ entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
+ rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
+ &entry->rdma_entry,
+ PAGE_SIZE);
+ if (rc)
+ goto err_free_entry;
+
+ q->db_mmap_entry = &entry->rdma_entry;
+
+ return 0;
+
+err_free_entry:
+ kfree(entry);
+
+err_free_db_data:
+ free_page((unsigned long)q->db_rec_data);
+ q->db_rec_data = NULL;
+ return -ENOMEM;
+}
+
static inline int qedr_init_user_queue(struct ib_udata *udata,
struct qedr_dev *dev,
struct qedr_userq *q, u64 buf_addr,
- size_t buf_len, int access, int dmasync,
+ size_t buf_len, bool requires_db_rec,
+ int access,
int alloc_and_init)
{
u32 fw_pages;
@@ -705,7 +772,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
q->buf_addr = buf_addr;
q->buf_len = buf_len;
- q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access, dmasync);
+ q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access);
if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem));
@@ -735,7 +802,8 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
}
}
- return 0;
+ /* mmap the user address used to store doorbell data for recovery */
+ return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
err0:
ib_umem_release(q->umem);
@@ -821,6 +889,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int entries = attr->cqe;
struct qedr_cq *cq = get_qedr_cq(ibcq);
int chain_entries;
+ u32 db_offset;
int page_cnt;
u64 pbl_ptr;
u16 icid;
@@ -840,8 +909,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
chain_entries = qedr_align_cq_entries(entries);
chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+ /* calc db offset. user will add DPI base, kernel will add db addr */
+ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+
if (udata) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
DP_ERR(dev,
"create cq: problem copying data from user space\n");
goto err0;
@@ -856,7 +929,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->cq_type = QEDR_CQ_TYPE_USER;
rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
- ureq.len, IB_ACCESS_LOCAL_WRITE, 1,
+ ureq.len, true, IB_ACCESS_LOCAL_WRITE,
1);
if (rc)
goto err0;
@@ -865,6 +938,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
page_cnt = cq->q.pbl_info.num_pbes;
cq->ibcq.cqe = chain_entries;
+ cq->q.db_addr = ctx->dpi_addr + db_offset;
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;
@@ -876,7 +950,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
sizeof(union rdma_cqe),
&cq->pbl, NULL);
if (rc)
- goto err1;
+ goto err0;
page_cnt = qed_chain_get_page_cnt(&cq->pbl);
pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
@@ -888,21 +962,28 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
if (rc)
- goto err2;
+ goto err1;
cq->icid = icid;
cq->sig = QEDR_CQ_MAGIC_NUMBER;
spin_lock_init(&cq->cq_lock);
if (udata) {
- rc = qedr_copy_cq_uresp(dev, cq, udata);
+ rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
+ if (rc)
+ goto err2;
+
+ rc = qedr_db_recovery_add(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data,
+ DB_REC_WIDTH_64B,
+ DB_REC_USER);
if (rc)
- goto err3;
+ goto err2;
+
} else {
/* Generate doorbell address. */
- cq->db_addr = dev->db_addr +
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
cq->db.data.icid = cq->icid;
+ cq->db_addr = dev->db_addr + db_offset;
cq->db.data.params = DB_AGG_CMD_SET <<
RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
@@ -912,6 +993,11 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->latest_cqe = NULL;
consume_cqe(cq);
cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+ rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
+ if (rc)
+ goto err2;
}
DP_DEBUG(dev, QEDR_MSG_CQ,
@@ -920,18 +1006,19 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
-err3:
+err2:
destroy_iparams.icid = cq->icid;
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
&destroy_oparams);
-err2:
- if (udata)
- qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
- else
- dev->ops->common->chain_free(dev->cdev, &cq->pbl);
err1:
- if (udata)
+ if (udata) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
+ if (cq->q.db_mmap_entry)
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ } else {
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
err0:
return -EINVAL;
}
@@ -962,8 +1049,10 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
cq->destroyed = 1;
/* GSIs CQs are handled by driver, so they don't exist in the FW */
- if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
return;
+ }
iparams.icid = cq->icid;
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
@@ -972,6 +1061,14 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
if (udata) {
qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
+
+ if (cq->q.db_rec_data) {
+ qedr_db_recovery_del(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ }
+ } else {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
}
/* We don't want the IRQ handler to handle a non-existing CQ so we
@@ -1136,8 +1233,8 @@ static int qedr_copy_srq_uresp(struct qedr_dev *dev,
}
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
- struct qedr_create_qp_uresp *uresp,
- struct qedr_qp *qp)
+ struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
{
/* iWARP requires two doorbells per RQ. */
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
@@ -1150,6 +1247,9 @@ static void qedr_copy_rq_uresp(struct qedr_dev *dev,
}
uresp->rq_icid = qp->icid;
+ if (qp->urq.db_mmap_entry)
+ uresp->rq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
}
static void qedr_copy_sq_uresp(struct qedr_dev *dev,
@@ -1163,22 +1263,26 @@ static void qedr_copy_sq_uresp(struct qedr_dev *dev,
uresp->sq_icid = qp->icid;
else
uresp->sq_icid = qp->icid + 1;
+
+ if (qp->usq.db_mmap_entry)
+ uresp->sq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
}
static int qedr_copy_qp_uresp(struct qedr_dev *dev,
- struct qedr_qp *qp, struct ib_udata *udata)
+ struct qedr_qp *qp, struct ib_udata *udata,
+ struct qedr_create_qp_uresp *uresp)
{
- struct qedr_create_qp_uresp uresp;
int rc;
- memset(&uresp, 0, sizeof(uresp));
- qedr_copy_sq_uresp(dev, &uresp, qp);
- qedr_copy_rq_uresp(dev, &uresp, qp);
+ memset(uresp, 0, sizeof(*uresp));
+ qedr_copy_sq_uresp(dev, uresp, qp);
+ qedr_copy_rq_uresp(dev, uresp, qp);
- uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
- uresp.qp_id = qp->qp_id;
+ uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+ uresp->qp_id = qp->qp_id;
- rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
if (rc)
DP_ERR(dev,
"create qp: failed a copy to user space with qp icid=0x%x.\n",
@@ -1193,7 +1297,10 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs)
{
spin_lock_init(&qp->q_lock);
- atomic_set(&qp->refcnt, 1);
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ kref_init(&qp->refcnt);
+ init_completion(&qp->iwarp_cm_comp);
+ }
qp->pd = pd;
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
@@ -1222,16 +1329,35 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->sq.max_sges, qp->sq_cq->icid);
}
-static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
+ int rc;
+
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid + 1;
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
+ &qp->sq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
if (!qp->srq) {
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
+ &qp->rq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ qedr_db_recovery_del(dev, qp->sq.db,
+ &qp->sq.db_data);
}
+
+ return rc;
}
static int qedr_check_srq_params(struct qedr_dev *dev,
@@ -1279,19 +1405,19 @@ static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
static int qedr_init_srq_user_params(struct ib_udata *udata,
struct qedr_srq *srq,
struct qedr_create_srq_ureq *ureq,
- int access, int dmasync)
+ int access)
{
struct scatterlist *sg;
int rc;
rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
- ureq->srq_len, access, dmasync, 1);
+ ureq->srq_len, false, access, 1);
if (rc)
return rc;
srq->prod_umem =
ib_umem_get(udata, ureq->prod_pair_addr,
- sizeof(struct rdma_srq_producers), access, dmasync);
+ sizeof(struct rdma_srq_producers), access);
if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@@ -1381,13 +1507,14 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
hw_srq->max_sges = init_attr->attr.max_sge;
if (udata) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
DP_ERR(dev,
"create srq: problem copying data from user space\n");
goto err0;
}
- rc = qedr_init_srq_user_params(udata, srq, &ureq, 0, 0);
+ rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
if (rc)
goto err0;
@@ -1570,13 +1697,39 @@ qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
&qp->urq.pbl_info, FW_PAGE_SHIFT);
}
-static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
+static void qedr_cleanup_user(struct qedr_dev *dev,
+ struct qedr_ucontext *ctx,
+ struct qedr_qp *qp)
{
ib_umem_release(qp->usq.umem);
qp->usq.umem = NULL;
ib_umem_release(qp->urq.umem);
qp->urq.umem = NULL;
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ kfree(qp->urq.pbl_tbl);
+ }
+
+ if (qp->usq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
+ }
+
+ if (qp->urq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
+ }
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data);
}
static int qedr_create_user_qp(struct qedr_dev *dev,
@@ -1588,27 +1741,30 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
struct qed_rdma_create_qp_in_params in_params;
struct qed_rdma_create_qp_out_params out_params;
struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_create_qp_uresp uresp;
+ struct qedr_ucontext *ctx = NULL;
struct qedr_create_qp_ureq ureq;
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
int rc = -EINVAL;
+ qp->create_type = QEDR_QP_CREATE_USER;
memset(&ureq, 0, sizeof(ureq));
- rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
+ rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen));
if (rc) {
DP_ERR(dev, "Problem copying data from user space\n");
return rc;
}
- /* SQ - read access only (0), dma sync not required (0) */
+ /* SQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
- ureq.sq_len, 0, 0, alloc_and_init);
+ ureq.sq_len, true, 0, alloc_and_init);
if (rc)
return rc;
if (!qp->srq) {
- /* RQ - read access only (0), dma sync not required (0) */
+ /* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
- ureq.rq_len, 0, 0, alloc_and_init);
+ ureq.rq_len, true, 0, alloc_and_init);
if (rc)
return rc;
}
@@ -1638,29 +1794,76 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- rc = qedr_copy_qp_uresp(dev, qp, udata);
+ rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
+ if (rc)
+ goto err;
+
+ /* db offset was calculated in copy_qp_uresp, now set in the user q */
+ ctx = pd->uctx;
+ qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
+
+ /* calculate the db_rec_db2 data since it is constant so no
+ * need to reflect from user
+ */
+ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
+ qp->urq.db_rec_db2_data.data.value =
+ cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+ }
+
+ rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
if (rc)
goto err;
+ rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
qedr_qp_user_print(dev, qp);
- return 0;
+ return rc;
err:
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
if (rc)
DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
err1:
- qedr_cleanup_user(dev, qp);
+ qedr_cleanup_user(dev, ctx, qp);
return rc;
}
-static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
+ int rc;
+
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid;
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
+ &qp->sq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
@@ -1668,6 +1871,19 @@ static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
qp->rq.iwarp_db2_data.data.icid = qp->icid;
qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
+ &qp->rq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ return rc;
}
static int
@@ -1715,8 +1931,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- qedr_set_roce_db_info(dev, qp);
- return rc;
+ return qedr_set_roce_db_info(dev, qp);
}
static int
@@ -1774,8 +1989,7 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- qedr_set_iwarp_db_info(dev, qp);
- return rc;
+ return qedr_set_iwarp_db_info(dev, qp);
err:
dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -1790,6 +2004,20 @@ static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
kfree(qp->rqe_wr_id);
+
+ /* GSI qp is not registered to db mechanism so no need to delete */
+ if (qp->qp_type == IB_QPT_GSI)
+ return;
+
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
+
+ if (!qp->srq) {
+ qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data);
+ }
}
static int qedr_create_kernel_qp(struct qedr_dev *dev,
@@ -1805,6 +2033,7 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_entries;
memset(&in_params, 0, sizeof(in_params));
+ qp->create_type = QEDR_QP_CREATE_KERNEL;
/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
* the ring. The ring should allow at least a single WR, even if the
@@ -1918,7 +2147,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
- rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
+ rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
if (rc)
goto err;
}
@@ -2429,7 +2658,10 @@ err:
static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
struct ib_udata *udata)
{
- int rc = 0;
+ struct qedr_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ int rc;
if (qp->qp_type != IB_QPT_GSI) {
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -2437,8 +2669,8 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
return rc;
}
- if (udata)
- qedr_cleanup_user(dev, qp);
+ if (qp->create_type == QEDR_QP_CREATE_USER)
+ qedr_cleanup_user(dev, ctx, qp);
else
qedr_cleanup_kernel(dev, qp);
@@ -2467,34 +2699,44 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
}
} else {
- /* Wait for the connect/accept to complete */
- if (qp->ep) {
- int wait_count = 1;
-
- while (qp->ep->during_connect) {
- DP_DEBUG(dev, QEDR_MSG_QP,
- "Still in during connect/accept\n");
-
- msleep(100);
- if (wait_count++ > 200) {
- DP_NOTICE(dev,
- "during connect timeout\n");
- break;
- }
- }
- }
+ /* If connection establishment started the WAIT_FOR_CONNECT
+ * bit will be on and we need to Wait for the establishment
+ * to complete before destroying the qp.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
+
+ /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
+ * bit will be on, and we need to wait for the disconnect to
+ * complete before continuing. We can use the same completion,
+ * iwarp_cm_comp, since this is the only place that waits for
+ * this completion and it is sequential. In addition,
+ * disconnect can't occur before the connection is fully
+ * established, therefore if WAIT_FOR_DISCONNECT is on it
+ * means WAIT_FOR_CONNECT is also on and the completion for
+ * CONNECT already occurred.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
}
if (qp->qp_type == IB_QPT_GSI)
qedr_destroy_gsi_qp(dev);
+ /* We need to remove the entry from the xarray before we release the
+ * qp_id to avoid a race of the qp_id being reallocated and failing
+ * on xa_insert
+ */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ xa_erase(&dev->qps, qp->qp_id);
+
qedr_free_qp_resources(dev, qp, udata);
- if (atomic_dec_and_test(&qp->refcnt) &&
- rdma_protocol_iwarp(&dev->ibdev, 1)) {
- xa_erase_irq(&dev->qps, qp->qp_id);
- kfree(qp);
- }
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_iw_qp_rem_ref(&qp->ibqp);
+
return 0;
}
@@ -2597,7 +2839,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(udata, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
@@ -2673,8 +2915,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
- if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
- qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+ if (mr->type != QEDR_MR_DMA)
+ free_mr_info(dev, &mr->info);
/* it could be user registered memory. */
ib_umem_release(mr->umem);
@@ -4106,19 +4348,10 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *mad_hdr,
- size_t in_mad_size, struct ib_mad_hdr *out_mad,
- size_t *out_mad_size, u16 *out_mad_pkey_index)
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
{
- struct qedr_dev *dev = get_qedr_dev(ibdev);
-
- DP_DEBUG(dev, QEDR_MSG_GSI,
- "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
- mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
- mad_hdr->class_specific, mad_hdr->class_version,
- mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
return IB_MAD_RESULT_SUCCESS;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 9aaa90283d6e..18027844eb87 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -35,8 +35,6 @@
int qedr_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_udata *udata);
int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
-int qedr_modify_port(struct ib_device *, u8 port, int mask,
- struct ib_port_modify *props);
int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid);
@@ -46,7 +44,8 @@ int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
-int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma);
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
@@ -93,10 +92,9 @@ int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size, struct ib_mad_hdr *out_mad,
- size_t *out_mad_size, u16 *out_mad_pkey_index);
+ const struct ib_grh *in_grh, const struct ib_mad *in_mad,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 531d8a1db2c3..ca5ea734e3d0 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1417,7 +1417,6 @@ static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
*
* The exact combo of LEDs if on is true is determined by looking
* at the ibcstatus.
-
* These LEDs indicate the physical and logical state of IB link.
* For this chip (at least with recommended board pinouts), LED1
* is Yellow (logical state) and LED2 is Green (physical state),
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index f92faf5ec369..79bb83222e8d 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2098,8 +2098,6 @@ static int cc_get_classportinfo(struct ib_cc_mad *ccp,
struct ib_cc_classportinfo_attr *p =
(struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->base_version = 1;
p->class_version = 1;
p->cap_mask = 0;
@@ -2120,8 +2118,6 @@ static int cc_get_congestion_info(struct ib_cc_mad *ccp,
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->congestion_info = 0;
p->control_table_cap = ppd->cc_max_table_entries;
@@ -2138,8 +2134,6 @@ static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_cc_congestion_entry_shadow *entries;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
entries = ppd->congestion_entries_shadow->entries;
@@ -2176,8 +2170,6 @@ static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
goto bail;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
max_cct_block =
@@ -2296,18 +2288,11 @@ bail:
return reply_failure((struct ib_smp *) ccp);
}
-static int check_cc_key(struct qib_ibport *ibp,
- struct ib_cc_mad *ccp, int mad_flags)
-{
- return 0;
-}
-
static int process_cc(struct ib_device *ibdev, int mad_flags,
u8 port, const struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
- struct qib_ibport *ibp = to_iport(ibdev, port);
int ret;
*out_mad = *in_mad;
@@ -2318,10 +2303,6 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
goto bail;
}
- ret = check_cc_key(ibp, ccp, mad_flags);
- if (ret)
- goto bail;
-
switch (ccp->method) {
case IB_MGMT_METHOD_GET:
switch (ccp->attr_id) {
@@ -2405,28 +2386,21 @@ bail:
*/
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
int ret;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
- switch (in_mad->mad_hdr.mgmt_class) {
+ switch (in->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+ ret = process_subn(ibdev, mad_flags, port, in, out);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf(ibdev, port, in_mad, out_mad);
+ ret = process_perf(ibdev, port, in, out);
goto bail;
case IB_MGMT_CLASS_CONG_MGMT:
@@ -2435,7 +2409,7 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
- ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
+ ret = process_cc(ibdev, mad_flags, port, in, out);
goto bail;
default:
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 3926be78036e..568b21eb6ea1 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->show)
+ return -EIO;
+
return pattr->show(ppd, buf);
}
@@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->store)
+ return -EIO;
+
return pattr->store(ppd, buf, len);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 17bdf8acee2f..8bf414b47b96 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -245,9 +245,8 @@ void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 7800e6930502..a26a4fd86bf4 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -136,7 +136,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem);
goto err_cq;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index 8f9749d54688..86a6c054ea26 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -58,7 +58,8 @@
#define PVRDMA_ROCEV1_VERSION 17
#define PVRDMA_ROCEV2_VERSION 18
#define PVRDMA_PPN64_VERSION 19
-#define PVRDMA_VERSION PVRDMA_PPN64_VERSION
+#define PVRDMA_QPHANDLE_VERSION 20
+#define PVRDMA_VERSION PVRDMA_QPHANDLE_VERSION
#define PVRDMA_BOARD_ID 1
#define PVRDMA_REV_ID 1
@@ -581,6 +582,17 @@ struct pvrdma_cmd_create_qp_resp {
u32 max_inline_data;
};
+struct pvrdma_cmd_create_qp_resp_v2 {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 qpn;
+ u32 qp_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
struct pvrdma_cmd_modify_qp {
struct pvrdma_cmd_hdr hdr;
u32 qp_handle;
@@ -663,6 +675,7 @@ union pvrdma_cmd_resp {
struct pvrdma_cmd_create_cq_resp create_cq_resp;
struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
struct pvrdma_cmd_create_qp_resp create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 create_qp_resp_v2;
struct pvrdma_cmd_query_qp_resp query_qp_resp;
struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
struct pvrdma_cmd_create_srq_resp create_srq_resp;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index f3a3d22ee8d7..c61e665ff261 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
}
- umem = ib_umem_get(udata, start, length, access_flags, 0);
+ umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index bca6a58a442e..f15809c28f67 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -52,6 +52,9 @@
#include "pvrdma.h"
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp);
+
static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
struct pvrdma_cq **recv_cq)
{
@@ -195,7 +198,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 *resp_v2 = &rsp.create_qp_resp_v2;
struct pvrdma_create_qp ucmd;
+ struct pvrdma_create_qp_resp qp_resp = {};
unsigned long flags;
int ret;
bool is_srq = !!init_attr->srq;
@@ -260,10 +265,19 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
goto err_qp;
}
+ /* Userspace supports qpn and qp handles? */
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION &&
+ udata->outlen < sizeof(qp_resp)) {
+ dev_warn(&dev->pdev->dev,
+ "create queuepair not supported\n");
+ ret = -EOPNOTSUPP;
+ goto err_qp;
+ }
+
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
- ucmd.rbuf_size, 0, 0);
+ ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -275,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
}
qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
- ucmd.sbuf_size, 0, 0);
+ ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
ib_umem_release(qp->rumem);
@@ -379,13 +393,33 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
}
/* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
- qp->qp_handle = resp->qpn;
qp->port = init_attr->port_num;
- qp->ibqp.qp_num = resp->qpn;
+
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION) {
+ qp->ibqp.qp_num = resp_v2->qpn;
+ qp->qp_handle = resp_v2->qp_handle;
+ } else {
+ qp->ibqp.qp_num = resp->qpn;
+ qp->qp_handle = resp->qpn;
+ }
+
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+ if (udata) {
+ qp_resp.qpn = qp->ibqp.qp_num;
+ qp_resp.qp_handle = qp->qp_handle;
+
+ if (ib_copy_to_udata(udata, &qp_resp,
+ min(udata->outlen, sizeof(qp_resp)))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back udata\n");
+ __pvrdma_destroy_qp(dev, qp);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
return &qp->ibqp;
err_pdir:
@@ -400,27 +434,15 @@ err_qp:
return ERR_PTR(ret);
}
-static void pvrdma_free_qp(struct pvrdma_qp *qp)
+static void _pvrdma_free_qp(struct pvrdma_qp *qp)
{
+ unsigned long flags;
struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
- struct pvrdma_cq *scq;
- struct pvrdma_cq *rcq;
- unsigned long flags, scq_flags, rcq_flags;
-
- /* In case cq is polling */
- get_cqs(qp, &scq, &rcq);
- pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
-
- _pvrdma_flush_cqe(qp, scq);
- if (scq != rcq)
- _pvrdma_flush_cqe(qp, rcq);
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
dev->qp_tbl[qp->qp_handle] = NULL;
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
- pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
-
if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
wait_for_completion(&qp->free);
@@ -435,34 +457,71 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
atomic_dec(&dev->num_qps);
}
-/**
- * pvrdma_destroy_qp - destroy a queue pair
- * @qp: the queue pair to destroy
- * @udata: user data or null for kernel object
- *
- * @return: 0 on success.
- */
-int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+static void pvrdma_free_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_cq *scq;
+ struct pvrdma_cq *rcq;
+ unsigned long scq_flags, rcq_flags;
+
+ /* In case cq is polling */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ /*
+ * We're now unlocking the CQs before clearing out the qp handle this
+ * should still be safe. We have destroyed the backend QP and flushed
+ * the CQEs so there should be no other completions for this QP.
+ */
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_free_qp(qp);
+}
+
+static inline void _pvrdma_destroy_qp_work(struct pvrdma_dev *dev,
+ u32 qp_handle)
{
- struct pvrdma_qp *vqp = to_vqp(qp);
union pvrdma_cmd_req req;
struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
int ret;
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
- cmd->qp_handle = vqp->qp_handle;
+ cmd->qp_handle = qp_handle;
- ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
if (ret < 0)
- dev_warn(&to_vdev(qp->device)->pdev->dev,
+ dev_warn(&dev->pdev->dev,
"destroy queuepair failed, error: %d\n", ret);
+}
+/**
+ * pvrdma_destroy_qp - destroy a queue pair
+ * @qp: the queue pair to destroy
+ * @udata: user data or null for kernel object
+ *
+ * @return: always 0.
+ */
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+{
+ struct pvrdma_qp *vqp = to_vqp(qp);
+
+ _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle);
pvrdma_free_qp(vqp);
return 0;
}
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp)
+{
+ _pvrdma_destroy_qp_work(dev, qp->qp_handle);
+ _pvrdma_free_qp(qp);
+}
+
/**
* pvrdma_modify_qp - modify queue pair attributes
* @ibqp: the queue pair
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 36cdfbdbd325..98c8be71d91d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq;
}
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index fe99da0ff060..ee02c6176007 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -129,7 +129,6 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
* rvt_destory_ah - Destory an address handle
* @ibah: address handle
* @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
- * @udata: user data or NULL for kernel object
*
* Return: 0 on success
*/
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index a85571a4cf57..13d7f66eadab 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -552,7 +552,6 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
/**
* rvt_driver_cq_init - Init cq resources on behalf of driver
- * @rdi: rvt dev structure
*
* Return: 0 on success
*/
@@ -568,7 +567,6 @@ int rvt_driver_cq_init(void)
/**
* rvt_cq_exit - tear down cq reources
- * @rdi: rvt dev structure
*/
void rvt_cq_exit(void)
{
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index a6a39f01dca3..b9a76bf74857 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem))
return (void *)umem;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 0b0a241c57ff..3cdf75d0c7a4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2563,10 +2563,9 @@ void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
EXPORT_SYMBOL(rvt_add_retry_timer_ext);
/**
- * rvt_add_rnr_timer - add/start an rnr timer
- * @qp - the QP
- * @aeth - aeth of RNR timeout, simulated aeth for loopback
- * add an rnr timer on the QP
+ * rvt_add_rnr_timer - add/start an rnr timer on the QP
+ * @qp: the QP
+ * @aeth: aeth of RNR timeout, simulated aeth for loopback
*/
void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
{
@@ -2583,7 +2582,7 @@ EXPORT_SYMBOL(rvt_add_rnr_timer);
/**
* rvt_stop_rc_timers - stop all timers
- * @qp - the QP
+ * @qp: the QP
* stop any pending timers
*/
void rvt_stop_rc_timers(struct rvt_qp *qp)
@@ -2617,7 +2616,7 @@ static void rvt_stop_rnr_timer(struct rvt_qp *qp)
/**
* rvt_del_timers_sync - wait for any timeout routines to exit
- * @qp - the QP
+ * @qp: the QP
*/
void rvt_del_timers_sync(struct rvt_qp *qp)
{
@@ -2626,7 +2625,7 @@ void rvt_del_timers_sync(struct rvt_qp *qp)
}
EXPORT_SYMBOL(rvt_del_timers_sync);
-/**
+/*
* This is called from s_timer for missing responses.
*/
static void rvt_rc_timeout(struct timer_list *t)
@@ -2676,12 +2675,13 @@ EXPORT_SYMBOL(rvt_rc_rnr_retry);
* rvt_qp_iter_init - initial for QP iteration
* @rdi: rvt devinfo
* @v: u64 value
+ * @cb: user-defined callback
*
* This returns an iterator suitable for iterating QPs
* in the system.
*
- * The @cb is a user defined callback and @v is a 64
- * bit value passed to and relevant for processing in the
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
* @cb. An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp.
*
@@ -2712,7 +2712,7 @@ EXPORT_SYMBOL(rvt_qp_iter_init);
/**
* rvt_qp_iter_next - return the next QP in iter
- * @iter - the iterator
+ * @iter: the iterator
*
* Fine grained QP iterator suitable for use
* with debugfs seq_file mechanisms.
@@ -2775,14 +2775,14 @@ EXPORT_SYMBOL(rvt_qp_iter_next);
/**
* rvt_qp_iter - iterate all QPs
- * @rdi - rvt devinfo
- * @v - a 64 bit value
- * @cb - a callback
+ * @rdi: rvt devinfo
+ * @v: a 64-bit value
+ * @cb: a callback
*
* This provides a way for iterating all QPs.
*
- * The @cb is a user defined callback and @v is a 64
- * bit value passed to and relevant for processing in the
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
* cb. An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp.
*
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 18da1e1ea979..986265ad6e79 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -683,9 +683,10 @@ EXPORT_SYMBOL(rvt_unregister_device);
/**
* rvt_init_port - init internal data for driver port
- * @rdi: rvt dev strut
+ * @rdi: rvt_dev_info struct
* @port: rvt port
* @port_index: 0 based index of ports, different from IB core port num
+ * @pkey_table: pkey_table for @port
*
* Keep track of a list of ports. No need to have a detach port.
* They persist until the driver goes away.
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index a8c11b5e1e94..0946a301a5c5 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,12 +77,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
{
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
- rxe->attr.fw_ver = RXE_FW_VER;
rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
- rxe->attr.vendor_id = RXE_VENDOR_ID;
- rxe->attr.vendor_part_id = RXE_VENDOR_PART_ID;
- rxe->attr.hw_ver = RXE_HW_VER;
rxe->attr.max_qp = RXE_MAX_QP;
rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
@@ -94,22 +90,13 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_mr = RXE_MAX_MR;
rxe->attr.max_pd = RXE_MAX_PD;
rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
- rxe->attr.max_ee_rd_atom = RXE_MAX_EE_RD_ATOM;
rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
- rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
rxe->attr.atomic_cap = IB_ATOMIC_HCA;
- rxe->attr.max_ee = RXE_MAX_EE;
- rxe->attr.max_rdd = RXE_MAX_RDD;
- rxe->attr.max_mw = RXE_MAX_MW;
- rxe->attr.max_raw_ipv6_qp = RXE_MAX_RAW_IPV6_QP;
- rxe->attr.max_raw_ethy_qp = RXE_MAX_RAW_ETHY_QP;
rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
rxe->attr.max_ah = RXE_MAX_AH;
- rxe->attr.max_fmr = RXE_MAX_FMR;
- rxe->attr.max_map_per_fmr = RXE_MAX_MAP_PER_FMR;
rxe->attr.max_srq = RXE_MAX_SRQ;
rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index ea6a819b7167..35a2baf2f364 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(udata, start, length, access, 0);
+ umem = ib_umem_get(udata, start, length, access);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index fe5207386700..353c6668249e 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -60,12 +60,8 @@ static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
/* default/initial rxe device parameter settings */
enum rxe_device_param {
- RXE_FW_VER = 0,
RXE_MAX_MR_SIZE = -1ull,
RXE_PAGE_SIZE_CAP = 0xfffff000,
- RXE_VENDOR_ID = 0,
- RXE_VENDOR_PART_ID = 0,
- RXE_HW_VER = 0,
RXE_MAX_QP = 0x10000,
RXE_MAX_QP_WR = 0x4000,
RXE_MAX_INLINE_DATA = 400,
@@ -87,21 +83,12 @@ enum rxe_device_param {
RXE_MAX_MR = 256 * 1024,
RXE_MAX_PD = 0x7ffc,
RXE_MAX_QP_RD_ATOM = 128,
- RXE_MAX_EE_RD_ATOM = 0,
RXE_MAX_RES_RD_ATOM = 0x3f000,
RXE_MAX_QP_INIT_RD_ATOM = 128,
- RXE_MAX_EE_INIT_RD_ATOM = 0,
- RXE_MAX_EE = 0,
- RXE_MAX_RDD = 0,
- RXE_MAX_MW = 0,
- RXE_MAX_RAW_IPV6_QP = 0,
- RXE_MAX_RAW_ETHY_QP = 0,
RXE_MAX_MCAST_GRP = 8192,
RXE_MAX_MCAST_QP_ATTACH = 56,
RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
RXE_MAX_AH = 100,
- RXE_MAX_FMR = 0,
- RXE_MAX_MAP_PER_FMR = 0,
RXE_MAX_SRQ = 960,
RXE_MAX_SRQ_WR = 0x4000,
RXE_MIN_SRQ_WR = 1,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 623129f27f5a..9dd4bd7aea92 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -106,6 +106,10 @@ static int rxe_modify_device(struct ib_device *dev,
{
struct rxe_dev *rxe = to_rdev(dev);
+ if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC))
+ return -EOPNOTSUPP;
+
if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
@@ -1171,6 +1175,9 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
dev->dev.dma_ops = &dma_virt_ops;
+ dev->dev.dma_parms = &rxe->dma_parms;
+ rxe->dma_parms = (struct device_dma_parameters)
+ { .max_segment_size = SZ_2G };
dma_coerce_mask_and_coherent(&dev->dev,
dma_get_required_mask(&dev->dev));
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 5c4b2239129c..95834206c80c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -384,6 +384,7 @@ struct rxe_port {
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
+ struct device_dma_parameters dma_parms;
int max_ucontext;
int max_inline_data;
struct mutex usdev_lock;
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index dba4535494ab..b939f489cd46 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -70,6 +70,7 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
+ struct device_dma_parameters dma_parms;
struct net_device *netdev;
struct siw_dev_cap attrs;
@@ -98,18 +99,9 @@ struct siw_device {
struct work_struct netdev_down;
};
-struct siw_uobj {
- void *addr;
- u32 size;
-};
-
struct siw_ucontext {
struct ib_ucontext base_ucontext;
struct siw_device *sdev;
-
- /* xarray of user mappable objects */
- struct xarray xa;
- u32 uobj_nextkey;
};
/*
@@ -149,8 +141,6 @@ struct siw_pbl {
struct siw_pble pbe[1];
};
-struct siw_mr;
-
/*
* Generic memory representation for registered siw memory.
* Memory lookup always via higher 24 bit of STag (STag index).
@@ -220,7 +210,7 @@ struct siw_cq {
u32 cq_get;
u32 num_cqe;
bool kernel_verbs;
- u32 xa_cq_index; /* mmap information for CQE array */
+ struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
u32 id; /* For debugging only */
};
@@ -263,7 +253,7 @@ struct siw_srq {
u32 rq_put;
u32 rq_get;
u32 num_rqe; /* max # of wqe's allowed */
- u32 xa_srq_index; /* mmap information for SRQ array */
+ struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
char armed; /* inform user if limit hit */
char kernel_verbs; /* '1' if kernel client */
};
@@ -477,8 +467,8 @@ struct siw_qp {
u8 layer : 4, etype : 4;
u8 ecode;
} term_info;
- u32 xa_sq_index; /* mmap information for SQE array */
- u32 xa_rq_index; /* mmap information for RQE array */
+ struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
+ struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
struct rcu_head rcu;
};
@@ -503,6 +493,11 @@ struct iwarp_msg_info {
int (*rx_data)(struct siw_qp *qp);
};
+struct siw_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ void *address;
+};
+
/* Global siw parameters. Currently set in siw_main.c */
extern const bool zcopy_tx;
extern const bool try_gso;
@@ -607,6 +602,12 @@ static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr)
return container_of(base_mr, struct siw_mr, base_mr);
}
+static inline struct siw_user_mmap_entry *
+to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap)
+{
+ return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry);
+}
+
static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
{
struct siw_qp *qp;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 8c1931a57f4a..3bccfef40e7e 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1373,22 +1373,8 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
rv = -EINVAL;
goto error;
}
- if (v4)
- siw_dbg_qp(qp,
- "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
- pd_len,
- &((struct sockaddr_in *)(laddr))->sin_addr,
- ntohs(((struct sockaddr_in *)(laddr))->sin_port),
- &((struct sockaddr_in *)(raddr))->sin_addr,
- ntohs(((struct sockaddr_in *)(raddr))->sin_port));
- else
- siw_dbg_qp(qp,
- "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
- pd_len,
- &((struct sockaddr_in6 *)(laddr))->sin6_addr,
- ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
- &((struct sockaddr_in6 *)(raddr))->sin6_addr,
- ntohs(((struct sockaddr_in6 *)(raddr))->sin6_port));
+ siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr,
+ raddr);
rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
if (rv < 0)
@@ -1867,14 +1853,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
- if (addr_family == AF_INET)
- siw_dbg(id->device, "Listen at laddr %pI4 %u\n",
- &(((struct sockaddr_in *)laddr)->sin_addr),
- ((struct sockaddr_in *)laddr)->sin_port);
- else
- siw_dbg(id->device, "Listen at laddr %pI6 %u\n",
- &(((struct sockaddr_in6 *)laddr)->sin6_addr),
- ((struct sockaddr_in6 *)laddr)->sin6_port);
+ siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
return 0;
@@ -1935,7 +1914,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
/*
* siw_create_listen - Create resources for a listener's IWCM ID @id
*
- * Listens on the socket addresses id->local_addr and id->remote_addr.
+ * Listens on the socket address id->local_addr.
*
* If the listener's @id provides a specific local IP address, at most one
* listening socket is created and associated with @id.
@@ -1959,7 +1938,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
*/
if (id->local_addr.ss_family == AF_INET) {
struct in_device *in_dev = in_dev_get(dev);
- struct sockaddr_in s_laddr, *s_raddr;
+ struct sockaddr_in s_laddr;
const struct in_ifaddr *ifa;
if (!in_dev) {
@@ -1967,12 +1946,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
goto out;
}
memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
- s_raddr = (struct sockaddr_in *)&id->remote_addr;
- siw_dbg(id->device,
- "laddr %pI4:%d, raddr %pI4:%d\n",
- &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
- &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
+ siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
rtnl_lock();
in_dev_for_each_ifa_rtnl(ifa, in_dev) {
@@ -1992,17 +1967,13 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
} else if (id->local_addr.ss_family == AF_INET6) {
struct inet6_dev *in6_dev = in6_dev_get(dev);
struct inet6_ifaddr *ifp;
- struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
- *s_raddr = &to_sockaddr_in6(id->remote_addr);
+ struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr);
if (!in6_dev) {
rv = -ENODEV;
goto out;
}
- siw_dbg(id->device,
- "laddr %pI6:%d, raddr %pI6:%d\n",
- &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
- &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
+ siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
rtnl_lock();
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 05a92f997f60..c147f0613d95 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/rdma_netlink.h>
@@ -248,24 +249,6 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
return NULL;
}
-static void siw_verbs_sq_flush(struct ib_qp *base_qp)
-{
- struct siw_qp *qp = to_siw_qp(base_qp);
-
- down_write(&qp->state_lock);
- siw_sq_flush(qp);
- up_write(&qp->state_lock);
-}
-
-static void siw_verbs_rq_flush(struct ib_qp *base_qp)
-{
- struct siw_qp *qp = to_siw_qp(base_qp);
-
- down_write(&qp->state_lock);
- siw_rq_flush(qp);
- up_write(&qp->state_lock);
-}
-
static const struct ib_device_ops siw_device_ops = {
.owner = THIS_MODULE,
.uverbs_abi_ver = SIW_ABI_VERSION,
@@ -284,8 +267,6 @@ static const struct ib_device_ops siw_device_ops = {
.destroy_cq = siw_destroy_cq,
.destroy_qp = siw_destroy_qp,
.destroy_srq = siw_destroy_srq,
- .drain_rq = siw_verbs_rq_flush,
- .drain_sq = siw_verbs_sq_flush,
.get_dma_mr = siw_get_dma_mr,
.get_port_immutable = siw_get_port_immutable,
.iw_accept = siw_accept,
@@ -298,6 +279,7 @@ static const struct ib_device_ops siw_device_ops = {
.iw_rem_ref = siw_qp_put_ref,
.map_mr_sg = siw_map_mr_sg,
.mmap = siw_mmap,
+ .mmap_free = siw_mmap_free,
.modify_qp = siw_verbs_modify_qp,
.modify_srq = siw_modify_srq,
.poll_cq = siw_poll_cq,
@@ -350,15 +332,19 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->netdev = netdev;
if (netdev->type != ARPHRD_LOOPBACK) {
- memcpy(&base_dev->node_guid, netdev->dev_addr, 6);
+ addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+ netdev->dev_addr);
} else {
/*
* The loopback device does not have a HW address,
* but connection mangagement lib expects gid != 0
*/
- size_t gidlen = min_t(size_t, strlen(base_dev->name), 6);
+ size_t len = min_t(size_t, strlen(base_dev->name), 6);
+ char addr[6] = { };
- memcpy(&base_dev->node_guid, base_dev->name, gidlen);
+ memcpy(addr, base_dev->name, len);
+ addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+ addr);
}
base_dev->uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -397,6 +383,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
base_dev->phys_port_cnt = 1;
base_dev->dev.parent = parent;
base_dev->dev.dma_ops = &dma_virt_ops;
+ base_dev->dev.dma_parms = &sdev->dma_parms;
+ sdev->dma_parms = (struct device_dma_parameters)
+ { .max_segment_size = SZ_2G };
base_dev->num_comp_vectors = num_possible_cpus();
ib_set_device_ops(base_dev, &siw_device_ops);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index b18a677832e1..5fd6d6499b3d 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -34,44 +34,19 @@ static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
[IB_QPS_ERR] = "ERR"
};
-static u32 siw_create_uobj(struct siw_ucontext *uctx, void *vaddr, u32 size)
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
- struct siw_uobj *uobj;
- struct xa_limit limit = XA_LIMIT(0, SIW_UOBJ_MAX_KEY);
- u32 key;
+ struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
- uobj = kzalloc(sizeof(*uobj), GFP_KERNEL);
- if (!uobj)
- return SIW_INVAL_UOBJ_KEY;
-
- if (xa_alloc_cyclic(&uctx->xa, &key, uobj, limit, &uctx->uobj_nextkey,
- GFP_KERNEL) < 0) {
- kfree(uobj);
- return SIW_INVAL_UOBJ_KEY;
- }
- uobj->size = PAGE_ALIGN(size);
- uobj->addr = vaddr;
-
- return key;
-}
-
-static struct siw_uobj *siw_get_uobj(struct siw_ucontext *uctx,
- unsigned long off, u32 size)
-{
- struct siw_uobj *uobj = xa_load(&uctx->xa, off);
-
- if (uobj && uobj->size == size)
- return uobj;
-
- return NULL;
+ kfree(entry);
}
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
{
struct siw_ucontext *uctx = to_siw_ctx(ctx);
- struct siw_uobj *uobj;
- unsigned long off = vma->vm_pgoff;
- int size = vma->vm_end - vma->vm_start;
+ size_t size = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct siw_user_mmap_entry *entry;
int rv = -EINVAL;
/*
@@ -79,18 +54,25 @@ int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
*/
if (vma->vm_start & (PAGE_SIZE - 1)) {
pr_warn("siw: mmap not page aligned\n");
- goto out;
+ return -EINVAL;
}
- uobj = siw_get_uobj(uctx, off, size);
- if (!uobj) {
- siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %u\n",
- off, size);
+ rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
+ if (!rdma_entry) {
+ siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
+ vma->vm_pgoff, size);
+ return -EINVAL;
+ }
+ entry = to_siw_mmap_entry(rdma_entry);
+
+ rv = remap_vmalloc_range(vma, entry->address, 0);
+ if (rv) {
+ pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
+ size);
goto out;
}
- rv = remap_vmalloc_range(vma, uobj->addr, 0);
- if (rv)
- pr_warn("remap_vmalloc_range failed: %lu, %u\n", off, size);
out:
+ rdma_user_mmap_entry_put(rdma_entry);
+
return rv;
}
@@ -105,8 +87,6 @@ int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
rv = -ENOMEM;
goto err_out;
}
- xa_init_flags(&ctx->xa, XA_FLAGS_ALLOC);
- ctx->uobj_nextkey = 0;
ctx->sdev = sdev;
uresp.dev_id = sdev->vendor_part_id;
@@ -135,19 +115,7 @@ err_out:
void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
{
struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
- void *entry;
- unsigned long index;
- /*
- * Make sure all user mmap objects are gone. Since QP, CQ
- * and SRQ destroy routines destroy related objects, nothing
- * should be found here.
- */
- xa_for_each(&uctx->xa, index, entry) {
- kfree(xa_erase(&uctx->xa, index));
- pr_warn("siw: dropping orphaned uobj at %lu\n", index);
- }
- xa_destroy(&uctx->xa);
atomic_dec(&uctx->sdev->num_ctx);
}
@@ -293,6 +261,33 @@ void siw_qp_put_ref(struct ib_qp *base_qp)
siw_qp_put(to_siw_qp(base_qp));
}
+static struct rdma_user_mmap_entry *
+siw_mmap_entry_insert(struct siw_ucontext *uctx,
+ void *address, size_t length,
+ u64 *offset)
+{
+ struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int rv;
+
+ *offset = SIW_INVAL_UOBJ_KEY;
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+
+ rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
+ &entry->rdma_entry,
+ length);
+ if (rv) {
+ kfree(entry);
+ return NULL;
+ }
+
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
/*
* siw_create_qp()
*
@@ -317,6 +312,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct siw_cq *scq = NULL, *rcq = NULL;
unsigned long flags;
int num_sqe, num_rqe, rv = 0;
+ size_t length;
siw_dbg(base_dev, "create new QP\n");
@@ -380,8 +376,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->orq_lock);
qp->kernel_verbs = !udata;
- qp->xa_sq_index = SIW_INVAL_UOBJ_KEY;
- qp->xa_rq_index = SIW_INVAL_UOBJ_KEY;
rv = siw_qp_add(sdev, qp);
if (rv)
@@ -458,22 +452,27 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
uresp.qp_id = qp_id(qp);
if (qp->sendq) {
- qp->xa_sq_index =
- siw_create_uobj(uctx, qp->sendq,
- num_sqe * sizeof(struct siw_sqe));
+ length = num_sqe * sizeof(struct siw_sqe);
+ qp->sq_entry =
+ siw_mmap_entry_insert(uctx, qp->sendq,
+ length, &uresp.sq_key);
+ if (!qp->sq_entry) {
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
}
+
if (qp->recvq) {
- qp->xa_rq_index =
- siw_create_uobj(uctx, qp->recvq,
- num_rqe * sizeof(struct siw_rqe));
- }
- if (qp->xa_sq_index == SIW_INVAL_UOBJ_KEY ||
- qp->xa_rq_index == SIW_INVAL_UOBJ_KEY) {
- rv = -ENOMEM;
- goto err_out_xa;
+ length = num_rqe * sizeof(struct siw_rqe);
+ qp->rq_entry =
+ siw_mmap_entry_insert(uctx, qp->recvq,
+ length, &uresp.rq_key);
+ if (!qp->rq_entry) {
+ uresp.sq_key = SIW_INVAL_UOBJ_KEY;
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
}
- uresp.sq_key = qp->xa_sq_index << PAGE_SHIFT;
- uresp.rq_key = qp->xa_rq_index << PAGE_SHIFT;
if (udata->outlen < sizeof(uresp)) {
rv = -EINVAL;
@@ -501,11 +500,10 @@ err_out:
kfree(siw_base_qp);
if (qp) {
- if (qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
- if (qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
-
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
vfree(qp->sendq);
vfree(qp->recvq);
kfree(qp);
@@ -618,10 +616,10 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
qp->attrs.flags |= SIW_QP_IN_DESTROY;
qp->rx_stream.rx_suspend = 1;
- if (uctx && qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
- if (uctx && qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
down_write(&qp->state_lock);
@@ -685,6 +683,47 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
return bytes;
}
+/* Complete SQ WR's without processing */
+static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct siw_sqe sqe = {};
+ int rv = 0;
+
+ while (wr) {
+ sqe.id = wr->wr_id;
+ sqe.opcode = wr->opcode;
+ rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
+/* Complete RQ WR's without processing */
+static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct siw_rqe rqe = {};
+ int rv = 0;
+
+ while (wr) {
+ rqe.id = wr->wr_id;
+ rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
/*
* siw_post_send()
*
@@ -703,26 +742,54 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
unsigned long flags;
int rv = 0;
+ if (wr && !qp->kernel_verbs) {
+ siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
/*
* Try to acquire QP state lock. Must be non-blocking
* to accommodate kernel clients needs.
*/
if (!down_read_trylock(&qp->state_lock)) {
- *bad_wr = wr;
- siw_dbg_qp(qp, "QP locked, state %d\n", qp->attrs.state);
- return -ENOTCONN;
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_sq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
}
if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. SQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_sq().
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
up_read(&qp->state_lock);
- *bad_wr = wr;
- siw_dbg_qp(qp, "QP out of state %d\n", qp->attrs.state);
- return -ENOTCONN;
- }
- if (wr && !qp->kernel_verbs) {
- siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
- up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ return rv;
}
spin_lock_irqsave(&qp->sq_lock, flags);
@@ -917,24 +984,54 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
*bad_wr = wr;
return -EOPNOTSUPP; /* what else from errno.h? */
}
+ if (!qp->kernel_verbs) {
+ siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
/*
* Try to acquire QP state lock. Must be non-blocking
* to accommodate kernel clients needs.
*/
if (!down_read_trylock(&qp->state_lock)) {
- *bad_wr = wr;
- return -ENOTCONN;
- }
- if (!qp->kernel_verbs) {
- siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
- up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_rq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
}
if (qp->attrs.state > SIW_QP_STATE_RTS) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. RQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_rq().
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ return rv;
}
/*
* Serialize potentially multiple producers.
@@ -991,8 +1088,8 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
siw_cq_flush(cq);
- if (ctx && cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
atomic_dec(&sdev->num_cq);
@@ -1029,7 +1126,6 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
size = roundup_pow_of_two(size);
cq->base_cq.cqe = size;
cq->num_cqe = size;
- cq->xa_cq_index = SIW_INVAL_UOBJ_KEY;
if (!udata) {
cq->kernel_verbs = 1;
@@ -1055,16 +1151,17 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
+ size_t length = size * sizeof(struct siw_cqe) +
+ sizeof(struct siw_cq_ctrl);
- cq->xa_cq_index =
- siw_create_uobj(ctx, cq->queue,
- size * sizeof(struct siw_cqe) +
- sizeof(struct siw_cq_ctrl));
- if (cq->xa_cq_index == SIW_INVAL_UOBJ_KEY) {
+ cq->cq_entry =
+ siw_mmap_entry_insert(ctx, cq->queue,
+ length, &uresp.cq_key);
+ if (!cq->cq_entry) {
rv = -ENOMEM;
goto err_out;
}
- uresp.cq_key = cq->xa_cq_index << PAGE_SHIFT;
+
uresp.cq_id = cq->id;
uresp.num_cqe = size;
@@ -1085,8 +1182,8 @@ err_out:
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- if (cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
vfree(cq->queue);
}
atomic_dec(&sdev->num_cq);
@@ -1490,7 +1587,6 @@ int siw_create_srq(struct ib_srq *base_srq,
}
srq->max_sge = attrs->max_sge;
srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
- srq->xa_srq_index = SIW_INVAL_UOBJ_KEY;
srq->limit = attrs->srq_limit;
if (srq->limit)
srq->armed = 1;
@@ -1509,15 +1605,16 @@ int siw_create_srq(struct ib_srq *base_srq,
}
if (udata) {
struct siw_uresp_create_srq uresp = {};
+ size_t length = srq->num_rqe * sizeof(struct siw_rqe);
- srq->xa_srq_index = siw_create_uobj(
- ctx, srq->recvq, srq->num_rqe * sizeof(struct siw_rqe));
-
- if (srq->xa_srq_index == SIW_INVAL_UOBJ_KEY) {
+ srq->srq_entry =
+ siw_mmap_entry_insert(ctx, srq->recvq,
+ length, &uresp.srq_key);
+ if (!srq->srq_entry) {
rv = -ENOMEM;
goto err_out;
}
- uresp.srq_key = srq->xa_srq_index;
+
uresp.num_rqe = srq->num_rqe;
if (udata->outlen < sizeof(uresp)) {
@@ -1536,8 +1633,8 @@ int siw_create_srq(struct ib_srq *base_srq,
err_out:
if (srq->recvq) {
- if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
vfree(srq->recvq);
}
atomic_dec(&sdev->num_srq);
@@ -1623,9 +1720,8 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
-
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
vfree(srq->recvq);
atomic_dec(&sdev->num_srq);
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
index 1910869281cb..1a731989fad6 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.h
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -83,6 +83,7 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata);
int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
void siw_qp_event(struct siw_qp *qp, enum ib_event_type type);
void siw_cq_event(struct siw_cq *cq, enum ib_event_type type);
void siw_srq_event(struct siw_srq *srq, enum ib_event_type type);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ac0583ff280d..e5f438ab716c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2019,6 +2019,15 @@ static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
}
+static int ipoib_get_vf_guid(struct net_device *dev, int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
+}
+
static int ipoib_get_vf_stats(struct net_device *dev, int vf,
struct ifla_vf_stats *vf_stats)
{
@@ -2045,6 +2054,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_set_vf_link_state = ipoib_set_vf_link_state,
.ndo_get_vf_config = ipoib_get_vf_config,
.ndo_get_vf_stats = ipoib_get_vf_stats,
+ .ndo_get_vf_guid = ipoib_get_vf_guid,
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
.ndo_get_stats64 = ipoib_get_stats,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 2e72fc5af157..3690e28cc7ea 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -646,13 +646,14 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
if (ib_conn->pi_support) {
u32 sig_caps = ib_dev->attrs.sig_prot_cap;
+ shost->sg_prot_tablesize = shost->sg_tablesize;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
}
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
- shost->virt_boundary_mask = ~MASK_4K;
+ shost->virt_boundary_mask = SZ_4K - 1;
if (iscsi_host_add(shost, ib_dev->dev.parent)) {
mutex_unlock(&iser_conn->state_mutex);
@@ -785,7 +786,7 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
* iscsi_iser_ep_connect() - Initiate iSER connection establishment
* @shost: scsi_host
* @dst_addr: destination address
- * @non-blocking: indicate if routine can block
+ * @non_blocking: indicate if routine can block
*
* Allocate an iscsi endpoint, an iser_conn structure and bind them.
* After that start RDMA connection establishment via rdma_cm. We
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 52ce63592dcf..029c00163442 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -96,16 +96,12 @@
#define iser_err(fmt, arg...) \
pr_err(PFX "%s: " fmt, __func__ , ## arg)
-#define SHIFT_4K 12
-#define SIZE_4K (1ULL << SHIFT_4K)
-#define MASK_4K (~(SIZE_4K-1))
-
/* Default support is 512KB I/O size */
#define ISER_DEF_MAX_SECTORS 1024
#define ISCSI_ISER_DEF_SG_TABLESIZE \
- ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> SHIFT_4K)
+ ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> ilog2(SZ_4K))
/* Maximum support is 16MB I/O size */
-#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> SHIFT_4K)
+#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> ilog2(SZ_4K))
#define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
@@ -232,15 +228,16 @@ enum iser_desc_type {
* @iser_header: iser header
* @iscsi_header: iscsi header
* @type: command/control/dataout
- * @dam_addr: header buffer dma_address
+ * @dma_addr: header buffer dma_address
* @tx_sg: sg[0] points to iser/iscsi headers
* sg[1] optionally points to either of immediate data
* unsolicited data-out or control
* @num_sge: number sges used on this TX task
+ * @cqe: completion handler
* @mapped: Is the task header mapped
- * reg_wr: registration WR
- * send_wr: send WR
- * inv_wr: invalidate WR
+ * @reg_wr: registration WR
+ * @send_wr: send WR
+ * @inv_wr: invalidate WR
*/
struct iser_tx_desc {
struct iser_ctrl iser_header;
@@ -267,6 +264,7 @@ struct iser_tx_desc {
* @data: received data segment
* @dma_addr: receive buffer dma address
* @rx_sg: ib_sge of receive buffer
+ * @cqe: completion handler
* @pad: for sense data TODO: Modify to maximum sense length supported
*/
struct iser_rx_desc {
@@ -283,9 +281,9 @@ struct iser_rx_desc {
* struct iser_login_desc - iSER login descriptor
*
* @req: pointer to login request buffer
- * @resp: pointer to login response buffer
+ * @rsp: pointer to login response buffer
* @req_dma: DMA address of login request buffer
- * @rsp_dma: DMA address of login response buffer
+ * @rsp_dma: DMA address of login response buffer
* @sge: IB sge for login post recv
* @cqe: completion handler
*/
@@ -315,12 +313,12 @@ struct iser_comp {
};
/**
- * struct iser_device - Memory registration operations
+ * struct iser_reg_ops - Memory registration operations
* per-device registration schemes
*
* @alloc_reg_res: Allocate registration resources
* @free_reg_res: Free registration resources
- * @fast_reg_mem: Register memory buffers
+ * @reg_mem: Register memory buffers
* @unreg_mem: Un-register memory buffers
* @reg_desc_get: Get a registration descriptor for pool
* @reg_desc_put: Get a registration descriptor to pool
@@ -369,7 +367,7 @@ struct iser_device {
};
/**
- * struct iser_reg_resources - Fast registration recources
+ * struct iser_reg_resources - Fast registration resources
*
* @mr: memory region
* @fmr_pool: pool of fmrs
@@ -402,7 +400,7 @@ struct iser_fr_desc {
};
/**
- * struct iser_fr_pool: connection fast registration pool
+ * struct iser_fr_pool - connection fast registration pool
*
* @list: list of fastreg descriptors
* @lock: protects fmr/fastreg pool
@@ -427,6 +425,7 @@ struct iser_fr_pool {
* @comp: iser completion context
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
+ * @reg_cqe: completion handler
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
@@ -467,6 +466,7 @@ struct ib_conn {
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
* @pages_per_mr: maximum pages available for registration
+ * @snd_w_inv: connection uses remote invalidation
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -525,7 +525,7 @@ struct iser_page_vec {
};
/**
- * struct iser_global: iSER global context
+ * struct iser_global - iSER global context
*
* @device_list_mutex: protects device_list
* @device_list: iser devices global list
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5cbb4b3a0566..4a7045bb0831 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -358,6 +358,8 @@ static inline bool iser_signal_comp(u8 sig_count)
/**
* iser_send_command - send command PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
*/
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task)
@@ -429,6 +431,9 @@ send_command_error:
/**
* iser_send_data_out - send data out PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
+ * @hdr: pointer to the LLD's iSCSI message header
*/
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2cc89a9b9e9b..0f74dc6d12fa 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -170,7 +170,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
dev = iser_task->iser_conn->ib_conn.device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
- if (data->dma_nents == 0) {
+ if (unlikely(data->dma_nents == 0)) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
@@ -237,7 +237,7 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
int ret, plen;
page_vec->npages = 0;
- page_vec->fake_mr.page_size = SIZE_4K;
+ page_vec->fake_mr.page_size = SZ_4K;
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
mem->dma_nents, NULL, iser_set_page);
if (unlikely(plen < mem->dma_nents)) {
@@ -451,7 +451,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
+ n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
if (unlikely(n != mem->dma_nents)) {
iser_err("failed to map sg (%d/%d)\n",
n, mem->dma_nents);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a6548de0e218..1f4a37a3c2b3 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -58,12 +58,12 @@ static void iser_event_handler(struct ib_event_handler *handler,
dev_name(&event->device->dev), event->element.port_num);
}
-/**
+/*
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
- * the adapator.
+ * the adaptor.
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
@@ -124,9 +124,9 @@ comps_err:
return -1;
}
-/**
+/*
* iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
- * CQ and PD created with the device associated with the adapator.
+ * CQ and PD created with the device associated with the adaptor.
*/
static void iser_free_device_ib_res(struct iser_device *device)
{
@@ -149,8 +149,11 @@ static void iser_free_device_ib_res(struct iser_device *device)
/**
* iser_alloc_fmr_pool - Creates FMR pool and page_vector
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
*
- * returns 0 on success, or errno code on failure
+ * Return: 0 on success, or errno code on failure
*/
int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
@@ -180,7 +183,7 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
page_vec->pages = (u64 *)(page_vec + 1);
- params.page_shift = SHIFT_4K;
+ params.page_shift = ilog2(SZ_4K);
params.max_pages_per_fmr = size;
/* make the pool size twice the max number of SCSI commands *
* the ML is expected to queue, watermark for unmap at 50% */
@@ -215,6 +218,7 @@ err_frpl:
/**
* iser_free_fmr_pool - releases the FMR pool and page vec
+ * @ib_conn: connection RDMA resources
*/
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
@@ -295,7 +299,11 @@ static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
/**
* iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
* for fast registration work requests.
- * returns 0 on success, or errno code on failure
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
+ *
+ * Return: 0 on success, or errno code on failure
*/
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
@@ -332,6 +340,7 @@ err:
/**
* iser_free_fastreg_pool - releases the pool of fast_reg descriptors
+ * @ib_conn: connection RDMA resources
*/
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
{
@@ -355,10 +364,10 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
fr_pool->size - i);
}
-/**
+/*
* iser_create_ib_conn_res - Queue-Pair (QP)
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
@@ -436,7 +445,7 @@ out_err:
return ret;
}
-/**
+/*
* based on the resolved device node GUID see if there already allocated
* device for this device. If there's no such, create one.
*/
@@ -487,9 +496,9 @@ static void iser_device_try_release(struct iser_device *device)
mutex_unlock(&ig.device_list_mutex);
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
enum iser_conn_state comp,
enum iser_conn_state exch)
@@ -561,7 +570,8 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
}
/**
- * Frees all conn objects and deallocs conn descriptor
+ * iser_conn_release - Frees all conn objects and deallocs conn descriptor
+ * @iser_conn: iSER connection context
*/
void iser_conn_release(struct iser_conn *iser_conn)
{
@@ -595,7 +605,10 @@ void iser_conn_release(struct iser_conn *iser_conn)
}
/**
- * triggers start of the disconnect procedures and wait for them to be done
+ * iser_conn_terminate - triggers start of the disconnect procedures and
+ * waits for them to be done
+ * @iser_conn: iSER connection context
+ *
* Called with state mutex held
*/
int iser_conn_terminate(struct iser_conn *iser_conn)
@@ -632,9 +645,9 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
return 1;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *iser_conn;
@@ -670,7 +683,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
else
max_num_sg = attr->max_fast_reg_page_list_len;
- sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
+ sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
sup_sg_tablesize =
min_t(
@@ -684,9 +697,9 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
iser_conn->scsi_sg_tablesize + reserved_mr_pages;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
@@ -732,9 +745,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
}
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
@@ -1019,7 +1032,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
ib_conn->post_recv_buf_count += count;
ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
- if (ib_ret) {
+ if (unlikely(ib_ret)) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
ib_conn->post_recv_buf_count -= count;
} else
@@ -1030,9 +1043,12 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
/**
- * iser_start_send - Initiate a Send DTO operation
+ * iser_post_send - Initiate a Send DTO operation
+ * @ib_conn: connection RDMA resources
+ * @tx_desc: iSER TX descriptor
+ * @signal: true to send work request as SIGNALED
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal)
@@ -1060,7 +1076,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
first_wr = wr;
ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
- if (ib_ret)
+ if (unlikely(ib_ret))
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
ib_ret, wr->opcode);
@@ -1081,7 +1097,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
ret = ib_check_mr_status(desc->rsc.sig_mr,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ iser_err("ib_check_mr_status failed, ret %d\n", ret);
/* Not a lot we can do, return ambiguous guard error */
*sector = 0;
return 0x1;
@@ -1093,7 +1109,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
sector_div(sector_off, sector_size + 8);
*sector = scsi_get_lba(iser_task->sc) + sector_off;
- pr_err("PI error found type %d at sector %llx "
+ iser_err("PI error found type %d at sector %llx "
"expected %x vs actual %x\n",
mr_status.sig_err.err_type,
(unsigned long long)*sector,
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index 43ac61ffef4a..6dbc08e1a6a6 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -70,7 +70,7 @@
struct opa_vnic_adapter;
-/**
+/*
* struct __opa_vesw_info - OPA vnic virtual switch info
*
* Same as opa_vesw_info without bitwise attribute.
@@ -96,7 +96,7 @@ struct __opa_vesw_info {
u8 rsvd4[2];
} __packed;
-/**
+/*
* struct __opa_per_veswport_info - OPA vnic per port info
*
* Same as opa_per_veswport_info without bitwise attribute.
@@ -136,7 +136,7 @@ struct __opa_per_veswport_info {
u8 rsvd3[8];
} __packed;
-/**
+/*
* struct __opa_veswport_info - OPA vnic port info
*
* Same as opa_veswport_info without bitwise attribute.
@@ -146,7 +146,7 @@ struct __opa_veswport_info {
struct __opa_per_veswport_info vport;
};
-/**
+/*
* struct __opa_veswport_trap - OPA vnic trap info
*
* Same as opa_veswport_trap without bitwise attribute.
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index b5960351bec0..b7f7a5f7bd98 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -174,9 +174,9 @@ static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
int tmo = *(int *)kp->arg;
if (tmo >= 0)
- return sprintf(buffer, "%d", tmo);
+ return sprintf(buffer, "%d\n", tmo);
else
- return sprintf(buffer, "off");
+ return sprintf(buffer, "off\n");
}
static int srp_tmo_set(const char *val, const struct kernel_param *kp)
@@ -352,11 +352,11 @@ static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
init_completion(&ch->done);
ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
- (struct sockaddr *)&target->rdma_cm.src : NULL,
- (struct sockaddr *)&target->rdma_cm.dst,
+ &target->rdma_cm.src.sa : NULL,
+ &target->rdma_cm.dst.sa,
SRP_PATH_REC_TIMEOUT_MS);
if (ret) {
- pr_err("No route available from %pIS to %pIS (%d)\n",
+ pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
&target->rdma_cm.src, &target->rdma_cm.dst, ret);
goto out;
}
@@ -366,7 +366,7 @@ static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
ret = ch->status;
if (ret) {
- pr_err("Resolving address %pIS failed (%d)\n",
+ pr_err("Resolving address %pISpsc failed (%d)\n",
&target->rdma_cm.dst, ret);
goto out;
}
@@ -552,6 +552,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
+ const struct ib_device_attr *attr = &dev->dev->attrs;
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
@@ -583,12 +584,14 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
init_attr->cap.max_send_wr = m * target->queue_size;
init_attr->cap.max_recv_wr = target->queue_size + 1;
init_attr->cap.max_recv_sge = 1;
- init_attr->cap.max_send_sge = SRP_MAX_SGE;
+ init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr->qp_type = IB_QPT_RC;
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
+ ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
+
if (target->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
qp = ch->rdma_cm.cm_id->qp;
@@ -1362,7 +1365,8 @@ static void srp_terminate_io(struct srp_rport *rport)
}
/* Calculate maximum initiator to target information unit length. */
-static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
+static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
+ uint32_t max_it_iu_size)
{
uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
sizeof(struct srp_indirect_buf) +
@@ -1372,6 +1376,11 @@ static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
srp_max_imm_data);
+ if (max_it_iu_size)
+ max_iu_len = min(max_iu_len, max_it_iu_size);
+
+ pr_debug("max_iu_len = %d\n", max_iu_len);
+
return max_iu_len;
}
@@ -1389,7 +1398,8 @@ static int srp_rport_reconnect(struct srp_rport *rport)
struct srp_target_port *target = rport->lld_data;
struct srp_rdma_ch *ch;
uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
- srp_use_imm_data);
+ srp_use_imm_data,
+ target->max_it_iu_size);
int i, j, ret = 0;
bool multich = false;
@@ -1838,7 +1848,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
return -EIO;
if (ch->use_imm_data &&
- count <= SRP_MAX_IMM_SGE &&
+ count <= ch->max_imm_sge &&
SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
scmnd->sc_data_direction == DMA_TO_DEVICE) {
struct srp_imm_buf *buf;
@@ -2538,7 +2548,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
- ch->use_imm_data);
+ ch->use_imm_data,
+ target->max_it_iu_size);
WARN_ON_ONCE(ch->max_it_iu_len >
be32_to_cpu(lrsp->max_it_iu_len));
@@ -3411,6 +3422,7 @@ enum {
SRP_OPT_IP_SRC = 1 << 15,
SRP_OPT_IP_DEST = 1 << 16,
SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
+ SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
};
static unsigned int srp_opt_mandatory[] = {
@@ -3443,6 +3455,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
+ { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
{ SRP_OPT_ERR, NULL }
};
@@ -3736,6 +3749,14 @@ static int srp_parse_options(struct net *net, const char *buf,
target->tl_retry_count = token;
break;
+ case SRP_OPT_MAX_IT_IU_SIZE:
+ if (match_int(args, &token) || token < 0) {
+ pr_warn("bad maximum initiator to target IU size '%s'\n", p);
+ goto out;
+ }
+ target->max_it_iu_size = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -3887,7 +3908,9 @@ static ssize_t srp_create_target(struct device *dev,
target->mr_per_cmd = mr_per_cmd;
target->indirect_size = target->sg_tablesize *
sizeof (struct srp_direct_buf);
- max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data);
+ max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
+ srp_use_imm_data,
+ target->max_it_iu_size);
INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index b2861cd2087a..5359ece561ca 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -161,6 +161,7 @@ struct srp_rdma_ch {
};
uint32_t max_it_iu_len;
uint32_t max_ti_iu_len;
+ u8 max_imm_sge;
bool use_imm_data;
/* Everything above this point is used in the hot path of
@@ -209,6 +210,7 @@ struct srp_target_port {
u32 ch_count;
u32 lkey;
enum srp_target_state state;
+ uint32_t max_it_iu_size;
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
@@ -245,11 +247,13 @@ struct srp_target_port {
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
+ struct sockaddr sa;
struct sockaddr_storage ss;
} src;
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
+ struct sockaddr sa;
struct sockaddr_storage ss;
} dst;
bool src_specified;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index e25c70a56be6..23c782e3d49a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -556,34 +556,41 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_port_attr port_attr;
int ret;
- memset(&port_modify, 0, sizeof(port_modify));
- port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- port_modify.clr_port_cap_mask = 0;
-
- ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
- if (ret)
- goto err_mod_port;
-
ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
if (ret)
- goto err_query_port;
+ return ret;
sport->sm_lid = port_attr.sm_lid;
sport->lid = port_attr.lid;
ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
if (ret)
- goto err_query_port;
+ return ret;
- sport->port_guid_wwn.priv = sport;
- srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
+ sport->port_guid_id.wwn.priv = sport;
+ srpt_format_guid(sport->port_guid_id.name,
+ sizeof(sport->port_guid_id.name),
&sport->gid.global.interface_id);
- sport->port_gid_wwn.priv = sport;
- snprintf(sport->port_gid, sizeof(sport->port_gid),
+ sport->port_gid_id.wwn.priv = sport;
+ snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
+ if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
+ return 0;
+
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ port_modify.clr_port_cap_mask = 0;
+
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+ if (ret) {
+ pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port, ret);
+ return 0;
+ }
+
if (!sport->mad_agent) {
memset(&reg_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@@ -599,23 +606,14 @@ static int srpt_refresh_port(struct srpt_port *sport)
srpt_mad_recv_handler,
sport, 0);
if (IS_ERR(sport->mad_agent)) {
- ret = PTR_ERR(sport->mad_agent);
+ pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ PTR_ERR(sport->mad_agent));
sport->mad_agent = NULL;
- goto err_query_port;
}
}
return 0;
-
-err_query_port:
-
- port_modify.set_port_cap_mask = 0;
- port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
-
-err_mod_port:
-
- return ret;
}
/**
@@ -1364,9 +1362,11 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx, u64 tag,
int status)
{
+ struct se_cmd *cmd = &ioctx->cmd;
struct srp_rsp *srp_rsp;
const u8 *sense_data;
int sense_data_len, max_sense_len;
+ u32 resid = cmd->residual_count;
/*
* The lowest bit of all SAM-3 status codes is zero (see also
@@ -1388,6 +1388,28 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
srp_rsp->tag = tag;
srp_rsp->status = status;
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an underflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an underflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an overflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an overflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ }
+
if (sense_data_len) {
BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
@@ -1931,41 +1953,22 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
-{
- struct srpt_nexus *nexus;
- struct srpt_rdma_ch *ch2;
- bool res = true;
-
- rcu_read_lock();
- list_for_each_entry(nexus, &sport->nexus_list, entry) {
- list_for_each_entry(ch2, &nexus->ch_list, list) {
- if (ch2 == ch) {
- res = false;
- goto done;
- }
- }
- }
-done:
- rcu_read_unlock();
-
- return res;
-}
-
/* Send DREQ and wait for DREP. */
static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
{
+ DECLARE_COMPLETION_ONSTACK(closed);
struct srpt_port *sport = ch->sport;
pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
ch->state);
+ ch->closed = &closed;
+
mutex_lock(&sport->mutex);
srpt_disconnect_ch(ch);
mutex_unlock(&sport->mutex);
- while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
- 5 * HZ) == 0)
+ while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
ch->sess_name, ch->qp->qp_num, ch->state);
@@ -2045,10 +2048,17 @@ static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
__srpt_close_all_ch(sport);
}
+static void srpt_drop_sport_ref(struct srpt_port *sport)
+{
+ if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
+ complete(sport->freed_channels);
+}
+
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
+ srpt_drop_sport_ref(ch->sport);
kfree_rcu(ch, rcu);
}
@@ -2092,6 +2102,9 @@ static void srpt_release_channel_work(struct work_struct *w)
list_del_rcu(&ch->list);
mutex_unlock(&sport->mutex);
+ if (ch->closed)
+ complete(ch->closed);
+
srpt_destroy_ch_ib(ch);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2106,8 +2119,6 @@ static void srpt_release_channel_work(struct work_struct *w)
kmem_cache_destroy(ch->req_buf_cache);
- wake_up(&sport->ch_releaseQ);
-
kref_put(&ch->kref, srpt_free_ch);
}
@@ -2144,6 +2155,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
char i_port_id[36];
u32 it_iu_len;
int i, tag_num, tag_size, ret;
+ struct srpt_tpg *stpg;
WARN_ON_ONCE(irqs_disabled());
@@ -2296,23 +2308,38 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
be64_to_cpu(*(__be64 *)nexus->i_port_id),
be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
- pr_debug("registering session %s\n", ch->sess_name);
+ pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
+ i_port_id);
tag_num = ch->rq_size;
tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
- if (sport->port_guid_tpg.se_tpg_wwn)
- ch->sess = target_setup_session(&sport->port_guid_tpg, tag_num,
+
+ mutex_lock(&sport->port_guid_id.mutex);
+ list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ }
+ mutex_unlock(&sport->port_guid_id.mutex);
+
+ mutex_lock(&sport->port_gid_id.mutex);
+ list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL, i_port_id,
ch, NULL);
- /* Retry without leading "0x" */
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ /* Retry without leading "0x" */
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
+ }
+ mutex_unlock(&sport->port_gid_id.mutex);
+
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
ret = PTR_ERR(ch->sess);
@@ -2325,6 +2352,12 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
goto destroy_ib;
}
+ /*
+ * Once a session has been created destruction of srpt_rdma_ch objects
+ * will decrement sport->refcount. Hence increment sport->refcount now.
+ */
+ atomic_inc(&sport->refcount);
+
mutex_lock(&sport->mutex);
if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
@@ -2505,6 +2538,7 @@ static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
struct srpt_device *sdev;
struct srp_login_req req;
const struct srp_login_req_rdma *req_rdma;
+ struct sa_path_rec *path_rec = cm_id->route.path_rec;
char src_addr[40];
sdev = ib_get_client_data(cm_id->device, &srpt_client);
@@ -2530,7 +2564,7 @@ static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
&cm_id->route.addr.src_addr);
return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
- cm_id->route.path_rec->pkey, &req, src_addr);
+ path_rec ? path_rec->pkey : 0, &req, src_addr);
}
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
@@ -2906,39 +2940,29 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
-static bool srpt_ch_list_empty(struct srpt_port *sport)
-{
- struct srpt_nexus *nexus;
- bool res = true;
-
- rcu_read_lock();
- list_for_each_entry(nexus, &sport->nexus_list, entry)
- if (!list_empty(&nexus->ch_list))
- res = false;
- rcu_read_unlock();
-
- return res;
-}
-
/**
* srpt_release_sport - disable login and wait for associated channels
* @sport: SRPT HCA port.
*/
static int srpt_release_sport(struct srpt_port *sport)
{
+ DECLARE_COMPLETION_ONSTACK(c);
struct srpt_nexus *nexus, *next_n;
struct srpt_rdma_ch *ch;
WARN_ON_ONCE(irqs_disabled());
+ sport->freed_channels = &c;
+
mutex_lock(&sport->mutex);
srpt_set_enabled(sport, false);
mutex_unlock(&sport->mutex);
- while (wait_event_timeout(sport->ch_releaseQ,
- srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
- pr_info("%s_%d: waiting for session unregistration ...\n",
- dev_name(&sport->sdev->device->dev), sport->port);
+ while (atomic_read(&sport->refcount) > 0 &&
+ wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
+ pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ atomic_read(&sport->refcount));
rcu_read_lock();
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
@@ -2975,10 +2999,10 @@ static struct se_wwn *__srpt_lookup_wwn(const char *name)
for (i = 0; i < dev->phys_port_cnt; i++) {
sport = &sdev->port[i];
- if (strcmp(sport->port_guid, name) == 0)
- return &sport->port_guid_wwn;
- if (strcmp(sport->port_gid, name) == 0)
- return &sport->port_gid_wwn;
+ if (strcmp(sport->port_guid_id.name, name) == 0)
+ return &sport->port_guid_id.wwn;
+ if (strcmp(sport->port_gid_id.name, name) == 0)
+ return &sport->port_gid_id.wwn;
}
}
@@ -3147,7 +3171,6 @@ static void srpt_add_one(struct ib_device *device)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
INIT_LIST_HEAD(&sport->nexus_list);
- init_waitqueue_head(&sport->ch_releaseQ);
mutex_init(&sport->mutex);
sport->sdev = sdev;
sport->port = i;
@@ -3156,6 +3179,10 @@ static void srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
+ mutex_init(&sport->port_guid_id.mutex);
+ INIT_LIST_HEAD(&sport->port_guid_id.tpg_list);
+ mutex_init(&sport->port_gid_id.mutex);
+ INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
@@ -3258,14 +3285,23 @@ static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
return tpg->se_tpg_wwn->priv;
}
+static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
+{
+ struct srpt_port *sport = wwn->priv;
+
+ if (wwn == &sport->port_guid_id.wwn)
+ return &sport->port_guid_id;
+ if (wwn == &sport->port_gid_id.wwn)
+ return &sport->port_gid_id;
+ WARN_ON_ONCE(true);
+ return NULL;
+}
+
static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
{
- struct srpt_port *sport = srpt_tpg_to_sport(tpg);
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
- WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
- tpg != &sport->port_gid_tpg);
- return tpg == &sport->port_guid_tpg ? sport->port_guid :
- sport->port_gid;
+ return stpg->sport_id->name;
}
static u16 srpt_get_tag(struct se_portal_group *tpg)
@@ -3721,19 +3757,25 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
const char *name)
{
- struct srpt_port *sport = wwn->priv;
- struct se_portal_group *tpg;
- int res;
-
- WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
- wwn != &sport->port_gid_wwn);
- tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
- &sport->port_gid_tpg;
- res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
- if (res)
+ struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
+ struct srpt_tpg *stpg;
+ int res = -ENOMEM;
+
+ stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
+ if (!stpg)
return ERR_PTR(res);
+ stpg->sport_id = sport_id;
+ res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
+ if (res) {
+ kfree(stpg);
+ return ERR_PTR(res);
+ }
- return tpg;
+ mutex_lock(&sport_id->mutex);
+ list_add_tail(&stpg->entry, &sport_id->tpg_list);
+ mutex_unlock(&sport_id->mutex);
+
+ return &stpg->tpg;
}
/**
@@ -3742,10 +3784,17 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
*/
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
+ struct srpt_port_id *sport_id = stpg->sport_id;
struct srpt_port *sport = srpt_tpg_to_sport(tpg);
+ mutex_lock(&sport_id->mutex);
+ list_del(&stpg->entry);
+ mutex_unlock(&sport_id->mutex);
+
sport->enabled = false;
core_tpg_deregister(tpg);
+ kfree(stpg);
}
/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index ee9f20e9177a..2e1a69840857 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -264,6 +264,8 @@ enum rdma_ch_state {
* @zw_cqe: Zero-length write CQE.
* @rcu: RCU head.
* @kref: kref for this channel.
+ * @closed: Completion object that will be signaled as soon as a new
+ * channel object with the same identity can be created.
* @rq_size: IB receive queue size.
* @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue.
@@ -306,6 +308,7 @@ struct srpt_rdma_ch {
struct ib_cqe zw_cqe;
struct rcu_head rcu;
struct kref kref;
+ struct completion *closed;
int rq_size;
u32 max_rsp_size;
atomic_t sq_wr_avail;
@@ -361,24 +364,52 @@ struct srpt_port_attrib {
};
/**
+ * struct srpt_tpg - information about a single "target portal group"
+ * @entry: Entry in @sport_id->tpg_list.
+ * @sport_id: Port name this TPG is associated with.
+ * @tpg: LIO TPG data structure.
+ *
+ * Zero or more target portal groups are associated with each port name
+ * (srpt_port_id). With each TPG an ACL list is associated.
+ */
+struct srpt_tpg {
+ struct list_head entry;
+ struct srpt_port_id *sport_id;
+ struct se_portal_group tpg;
+};
+
+/**
+ * struct srpt_port_id - information about an RDMA port name
+ * @mutex: Protects @tpg_list changes.
+ * @tpg_list: TPGs associated with the RDMA port name.
+ * @wwn: WWN associated with the RDMA port name.
+ * @name: ASCII representation of the port name.
+ *
+ * Multiple sysfs directories can be associated with a single RDMA port. This
+ * data structure represents a single (port, name) pair.
+ */
+struct srpt_port_id {
+ struct mutex mutex;
+ struct list_head tpg_list;
+ struct se_wwn wwn;
+ char name[64];
+};
+
+/**
* struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
- * @port_guid: ASCII representation of Port GUID
- * @port_gid: ASCII representation of Port GID
* @port: one-based port number.
* @sm_lid: cached value of the port's sm_lid.
* @lid: cached value of the port's lid.
* @gid: cached value of the port's gid.
- * @port_acl_lock spinlock for port_acl_list:
* @work: work structure for refreshing the aforementioned cached values.
- * @port_guid_tpg: TPG associated with target port GUID.
- * @port_guid_wwn: WWN associated with target port GUID.
- * @port_gid_tpg: TPG associated with target port GID.
- * @port_gid_wwn: WWN associated with target port GID.
+ * @port_guid_id: target port GUID
+ * @port_gid_id: target port GID
* @port_attrib: Port attributes that can be accessed through configfs.
- * @ch_releaseQ: Enables waiting for removal from nexus_list.
+ * @refcount: Number of objects associated with this port.
+ * @freed_channels: Completion that will be signaled once @refcount becomes 0.
* @mutex: Protects nexus_list.
* @nexus_list: Nexus list. See also srpt_nexus.entry.
*/
@@ -386,19 +417,16 @@ struct srpt_port {
struct srpt_device *sdev;
struct ib_mad_agent *mad_agent;
bool enabled;
- u8 port_guid[24];
- u8 port_gid[64];
u8 port;
u32 sm_lid;
u32 lid;
union ib_gid gid;
struct work_struct work;
- struct se_portal_group port_guid_tpg;
- struct se_wwn port_guid_wwn;
- struct se_portal_group port_gid_tpg;
- struct se_wwn port_gid_wwn;
+ struct srpt_port_id port_guid_id;
+ struct srpt_port_id port_gid_id;
struct srpt_port_attrib port_attrib;
- wait_queue_head_t ch_releaseQ;
+ atomic_t refcount;
+ struct completion *freed_channels;
struct mutex mutex;
struct list_head nexus_list;
};
diff --git a/drivers/input/input-poller.c b/drivers/input/input-poller.c
index 1b3d28964bb2..7d6b4e8879f1 100644
--- a/drivers/input/input-poller.c
+++ b/drivers/input/input-poller.c
@@ -123,6 +123,15 @@ void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval)
}
EXPORT_SYMBOL(input_set_max_poll_interval);
+int input_get_poll_interval(struct input_dev *dev)
+{
+ if (!dev->poller)
+ return -EINVAL;
+
+ return dev->poller->poll_interval;
+}
+EXPORT_SYMBOL(input_get_poll_interval);
+
/* SYSFS interface */
static ssize_t input_dev_get_poll_interval(struct device *dev,
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 312b854b5506..940b744639c7 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -334,7 +334,6 @@ config JOYSTICK_MAPLE
config JOYSTICK_PSXPAD_SPI
tristate "PlayStation 1/2 joypads via SPI interface"
depends on SPI
- select INPUT_POLLDEV
help
Say Y here if you wish to connect PlayStation 1/2 joypads
via SPI interface.
diff --git a/drivers/input/joystick/psxpad-spi.c b/drivers/input/joystick/psxpad-spi.c
index 7eee1b0e360f..a32656064f39 100644
--- a/drivers/input/joystick/psxpad-spi.c
+++ b/drivers/input/joystick/psxpad-spi.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
@@ -60,7 +59,7 @@ static const u8 PSX_CMD_ENABLE_MOTOR[] = {
struct psxpad {
struct spi_device *spi;
- struct input_polled_dev *pdev;
+ struct input_dev *idev;
char phys[0x20];
bool motor1enable;
bool motor2enable;
@@ -140,8 +139,7 @@ static void psxpad_set_motor_level(struct psxpad *pad,
static int psxpad_spi_play_effect(struct input_dev *idev,
void *data, struct ff_effect *effect)
{
- struct input_polled_dev *pdev = input_get_drvdata(idev);
- struct psxpad *pad = pdev->private;
+ struct psxpad *pad = input_get_drvdata(idev);
switch (effect->type) {
case FF_RUMBLE:
@@ -158,10 +156,9 @@ static int psxpad_spi_init_ff(struct psxpad *pad)
{
int err;
- input_set_capability(pad->pdev->input, EV_FF, FF_RUMBLE);
+ input_set_capability(pad->idev, EV_FF, FF_RUMBLE);
- err = input_ff_create_memless(pad->pdev->input, NULL,
- psxpad_spi_play_effect);
+ err = input_ff_create_memless(pad->idev, NULL, psxpad_spi_play_effect);
if (err) {
dev_err(&pad->spi->dev,
"input_ff_create_memless() failed: %d\n", err);
@@ -189,24 +186,25 @@ static inline int psxpad_spi_init_ff(struct psxpad *pad)
}
#endif /* CONFIG_JOYSTICK_PSXPAD_SPI_FF */
-static void psxpad_spi_poll_open(struct input_polled_dev *pdev)
+static int psxpad_spi_poll_open(struct input_dev *input)
{
- struct psxpad *pad = pdev->private;
+ struct psxpad *pad = input_get_drvdata(input);
pm_runtime_get_sync(&pad->spi->dev);
+
+ return 0;
}
-static void psxpad_spi_poll_close(struct input_polled_dev *pdev)
+static void psxpad_spi_poll_close(struct input_dev *input)
{
- struct psxpad *pad = pdev->private;
+ struct psxpad *pad = input_get_drvdata(input);
pm_runtime_put_sync(&pad->spi->dev);
}
-static void psxpad_spi_poll(struct input_polled_dev *pdev)
+static void psxpad_spi_poll(struct input_dev *input)
{
- struct psxpad *pad = pdev->private;
- struct input_dev *input = pdev->input;
+ struct psxpad *pad = input_get_drvdata(input);
u8 b_rsp3, b_rsp4;
int err;
@@ -284,7 +282,6 @@ static void psxpad_spi_poll(struct input_polled_dev *pdev)
static int psxpad_spi_probe(struct spi_device *spi)
{
struct psxpad *pad;
- struct input_polled_dev *pdev;
struct input_dev *idev;
int err;
@@ -292,31 +289,26 @@ static int psxpad_spi_probe(struct spi_device *spi)
if (!pad)
return -ENOMEM;
- pdev = input_allocate_polled_device();
- if (!pdev) {
+ idev = devm_input_allocate_device(&spi->dev);
+ if (!idev) {
dev_err(&spi->dev, "failed to allocate input device\n");
return -ENOMEM;
}
/* input poll device settings */
- pad->pdev = pdev;
+ pad->idev = idev;
pad->spi = spi;
- pdev->private = pad;
- pdev->open = psxpad_spi_poll_open;
- pdev->close = psxpad_spi_poll_close;
- pdev->poll = psxpad_spi_poll;
- /* poll interval is about 60fps */
- pdev->poll_interval = 16;
- pdev->poll_interval_min = 8;
- pdev->poll_interval_max = 32;
-
/* input device settings */
- idev = pdev->input;
+ input_set_drvdata(idev, pad);
+
idev->name = "PlayStation 1/2 joypad";
snprintf(pad->phys, sizeof(pad->phys), "%s/input", dev_name(&spi->dev));
idev->id.bustype = BUS_SPI;
+ idev->open = psxpad_spi_poll_open;
+ idev->close = psxpad_spi_poll_close;
+
/* key/value map settings */
input_set_abs_params(idev, ABS_X, 0, 255, 0, 0);
input_set_abs_params(idev, ABS_Y, 0, 255, 0, 0);
@@ -354,11 +346,23 @@ static int psxpad_spi_probe(struct spi_device *spi)
/* pad settings */
psxpad_set_motor_level(pad, 0, 0);
+
+ err = input_setup_polling(idev, psxpad_spi_poll);
+ if (err) {
+ dev_err(&spi->dev, "failed to set up polling: %d\n", err);
+ return err;
+ }
+
+ /* poll interval is about 60fps */
+ input_set_poll_interval(idev, 16);
+ input_set_min_poll_interval(idev, 8);
+ input_set_max_poll_interval(idev, 32);
+
/* register input poll device */
- err = input_register_polled_device(pdev);
+ err = input_register_device(idev);
if (err) {
dev_err(&spi->dev,
- "failed to register input poll device: %d\n", err);
+ "failed to register input device: %d\n", err);
return err;
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 8911bc2ec42a..61f4eb63eec1 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -16,7 +16,6 @@ if INPUT_KEYBOARD
config KEYBOARD_ADC
tristate "ADC Ladder Buttons"
depends on IIO
- select INPUT_POLLDEV
help
This driver implements support for buttons connected
to an ADC using a resistor ladder.
@@ -168,14 +167,14 @@ config KEYBOARD_QT1050
the module will be called qt1050
config KEYBOARD_QT1070
- tristate "Atmel AT42QT1070 Touch Sensor Chip"
- depends on I2C
- help
- Say Y here if you want to use Atmel AT42QT1070 QTouch
- Sensor chip as input device.
+ tristate "Atmel AT42QT1070 Touch Sensor Chip"
+ depends on I2C
+ help
+ Say Y here if you want to use Atmel AT42QT1070 QTouch
+ Sensor chip as input device.
- To compile this driver as a module, choose M here:
- the module will be called qt1070
+ To compile this driver as a module, choose M here:
+ the module will be called qt1070
config KEYBOARD_QT2160
tristate "Atmel AT42QT2160 Touch Sensor Chip"
@@ -191,7 +190,6 @@ config KEYBOARD_CLPS711X
tristate "CLPS711X Keypad support"
depends on OF_GPIO && (ARCH_CLPS711X || COMPILE_TEST)
select INPUT_MATRIXKMAP
- select INPUT_POLLDEV
help
Say Y here to enable the matrix keypad on the Cirrus Logic
CLPS711X CPUs.
@@ -250,7 +248,6 @@ config KEYBOARD_GPIO
config KEYBOARD_GPIO_POLLED
tristate "Polled GPIO buttons"
depends on GPIOLIB
- select INPUT_POLLDEV
help
This driver implements support for buttons connected
to GPIO pins that are not capable of generating interrupts.
@@ -342,7 +339,6 @@ config KEYBOARD_HIL
config KEYBOARD_HP6XX
tristate "HP Jornada 6xx keyboard"
depends on SH_HP6XX
- select INPUT_POLLDEV
help
Say Y here if you have a HP Jornada 620/660/680/690 and want to
support the built-in keyboard.
@@ -469,6 +465,16 @@ config KEYBOARD_IMX
To compile this driver as a module, choose M here: the
module will be called imx_keypad.
+config KEYBOARD_IMX_SC_KEY
+ tristate "IMX SCU Key Driver"
+ depends on IMX_SCU
+ help
+ This is the system controller key driver for NXP i.MX SoCs with
+ system controller inside.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx_sc_key.
+
config KEYBOARD_NEWTON
tristate "Newton keyboard"
select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 9510325c0c5d..f5b17524adf2 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IPAQ_MICRO) += ipaq-micro-keys.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
+obj-$(CONFIG_KEYBOARD_IMX_SC_KEY) += imx_sc_key.o
obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
diff --git a/drivers/input/keyboard/adc-keys.c b/drivers/input/keyboard/adc-keys.c
index 9885fd56f5f9..6d5be48d1b3d 100644
--- a/drivers/input/keyboard/adc-keys.c
+++ b/drivers/input/keyboard/adc-keys.c
@@ -9,7 +9,6 @@
#include <linux/iio/consumer.h>
#include <linux/iio/types.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -30,9 +29,9 @@ struct adc_keys_state {
const struct adc_keys_button *map;
};
-static void adc_keys_poll(struct input_polled_dev *dev)
+static void adc_keys_poll(struct input_dev *input)
{
- struct adc_keys_state *st = dev->private;
+ struct adc_keys_state *st = input_get_drvdata(input);
int i, value, ret;
u32 diff, closest = 0xffffffff;
int keycode = 0;
@@ -55,12 +54,12 @@ static void adc_keys_poll(struct input_polled_dev *dev)
keycode = 0;
if (st->last_key && st->last_key != keycode)
- input_report_key(dev->input, st->last_key, 0);
+ input_report_key(input, st->last_key, 0);
if (keycode)
- input_report_key(dev->input, keycode, 1);
+ input_report_key(input, keycode, 1);
- input_sync(dev->input);
+ input_sync(input);
st->last_key = keycode;
}
@@ -108,7 +107,6 @@ static int adc_keys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct adc_keys_state *st;
- struct input_polled_dev *poll_dev;
struct input_dev *input;
enum iio_chan_type type;
int i, value;
@@ -145,19 +143,13 @@ static int adc_keys_probe(struct platform_device *pdev)
if (error)
return error;
- poll_dev = devm_input_allocate_polled_device(dev);
- if (!poll_dev) {
+ input = devm_input_allocate_device(dev);
+ if (!input) {
dev_err(dev, "failed to allocate input device\n");
return -ENOMEM;
}
- if (!device_property_read_u32(dev, "poll-interval", &value))
- poll_dev->poll_interval = value;
-
- poll_dev->poll = adc_keys_poll;
- poll_dev->private = st;
-
- input = poll_dev->input;
+ input_set_drvdata(input, st);
input->name = pdev->name;
input->phys = "adc-keys/input0";
@@ -174,7 +166,17 @@ static int adc_keys_probe(struct platform_device *pdev)
if (device_property_read_bool(dev, "autorepeat"))
__set_bit(EV_REP, input->evbit);
- error = input_register_polled_device(poll_dev);
+
+ error = input_setup_polling(input, adc_keys_poll);
+ if (error) {
+ dev_err(dev, "Unable to set up polling: %d\n", error);
+ return error;
+ }
+
+ if (!device_property_read_u32(dev, "poll-interval", &value))
+ input_set_poll_interval(input, value);
+
+ error = input_register_device(input);
if (error) {
dev_err(dev, "Unable to register input device: %d\n", error);
return error;
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 4f96a4a99e5b..e7d58e7f0257 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -857,70 +857,35 @@ static void adp5589_report_switch_state(struct adp5589_kpad *kpad)
input_sync(kpad->input);
}
-static int adp5589_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adp5589_keypad_add(struct adp5589_kpad *kpad, unsigned int revid)
{
- struct adp5589_kpad *kpad;
+ struct i2c_client *client = kpad->client;
const struct adp5589_kpad_platform_data *pdata =
dev_get_platdata(&client->dev);
struct input_dev *input;
- unsigned int revid;
- int ret, i;
+ unsigned int i;
int error;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
- return -EIO;
- }
-
- if (!pdata) {
- dev_err(&client->dev, "no platform data?\n");
- return -EINVAL;
- }
-
- kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
- if (!kpad)
- return -ENOMEM;
-
- switch (id->driver_data) {
- case ADP5585_02:
- kpad->support_row5 = true;
- /* fall through */
- case ADP5585_01:
- kpad->is_adp5585 = true;
- kpad->var = &const_adp5585;
- break;
- case ADP5589:
- kpad->support_row5 = true;
- kpad->var = &const_adp5589;
- break;
- }
-
if (!((pdata->keypad_en_mask & kpad->var->row_mask) &&
(pdata->keypad_en_mask >> kpad->var->col_shift)) ||
!pdata->keymap) {
dev_err(&client->dev, "no rows, cols or keymap from pdata\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
if (pdata->keymapsize != kpad->var->keymapsize) {
dev_err(&client->dev, "invalid keymapsize\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
if (!pdata->gpimap && pdata->gpimapsize) {
dev_err(&client->dev, "invalid gpimap from pdata\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
if (pdata->gpimapsize > kpad->var->gpimapsize_max) {
dev_err(&client->dev, "invalid gpimapsize\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
for (i = 0; i < pdata->gpimapsize; i++) {
@@ -929,41 +894,27 @@ static int adp5589_probe(struct i2c_client *client,
if (pin < kpad->var->gpi_pin_base ||
pin > kpad->var->gpi_pin_end) {
dev_err(&client->dev, "invalid gpi pin data\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
if ((1 << (pin - kpad->var->gpi_pin_row_base)) &
pdata->keypad_en_mask) {
dev_err(&client->dev, "invalid gpi row/col data\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
}
if (!client->irq) {
dev_err(&client->dev, "no IRQ?\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ if (!input)
+ return -ENOMEM;
- kpad->client = client;
kpad->input = input;
- ret = adp5589_read(client, ADP5589_5_ID);
- if (ret < 0) {
- error = ret;
- goto err_free_input;
- }
-
- revid = (u8) ret & ADP5589_5_DEVICE_ID_MASK;
-
input->name = client->name;
input->phys = "adp5589-keys/input0";
input->dev.parent = &client->dev;
@@ -1015,30 +966,99 @@ static int adp5589_probe(struct i2c_client *client,
goto err_unreg_dev;
}
+ device_init_wakeup(&client->dev, 1);
+
+ return 0;
+
+err_unreg_dev:
+ input_unregister_device(input);
+ input = NULL;
+err_free_input:
+ input_free_device(input);
+
+ return error;
+}
+
+static void adp5589_keypad_remove(struct adp5589_kpad *kpad)
+{
+ if (kpad->input) {
+ free_irq(kpad->client->irq, kpad);
+ input_unregister_device(kpad->input);
+ }
+}
+
+static int adp5589_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adp5589_kpad *kpad;
+ const struct adp5589_kpad_platform_data *pdata =
+ dev_get_platdata(&client->dev);
+ unsigned int revid;
+ int error, ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
+ return -EIO;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data?\n");
+ return -EINVAL;
+ }
+
+ kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
+ if (!kpad)
+ return -ENOMEM;
+
+ kpad->client = client;
+
+ switch (id->driver_data) {
+ case ADP5585_02:
+ kpad->support_row5 = true;
+ /* fall through */
+ case ADP5585_01:
+ kpad->is_adp5585 = true;
+ kpad->var = &const_adp5585;
+ break;
+ case ADP5589:
+ kpad->support_row5 = true;
+ kpad->var = &const_adp5589;
+ break;
+ }
+
+ ret = adp5589_read(client, ADP5589_5_ID);
+ if (ret < 0) {
+ error = ret;
+ goto err_free_mem;
+ }
+
+ revid = (u8) ret & ADP5589_5_DEVICE_ID_MASK;
+
+ if (pdata->keymapsize) {
+ error = adp5589_keypad_add(kpad, revid);
+ if (error)
+ goto err_free_mem;
+ }
+
error = adp5589_setup(kpad);
if (error)
- goto err_free_irq;
+ goto err_keypad_remove;
if (kpad->gpimapsize)
adp5589_report_switch_state(kpad);
error = adp5589_gpio_add(kpad);
if (error)
- goto err_free_irq;
+ goto err_keypad_remove;
- device_init_wakeup(&client->dev, 1);
i2c_set_clientdata(client, kpad);
dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
return 0;
-err_free_irq:
- free_irq(client->irq, kpad);
-err_unreg_dev:
- input_unregister_device(input);
- input = NULL;
-err_free_input:
- input_free_device(input);
+err_keypad_remove:
+ adp5589_keypad_remove(kpad);
err_free_mem:
kfree(kpad);
@@ -1050,8 +1070,7 @@ static int adp5589_remove(struct i2c_client *client)
struct adp5589_kpad *kpad = i2c_get_clientdata(client);
adp5589_write(client, kpad->var->reg(ADP5589_GENERAL_CFG), 0);
- free_irq(client->irq, kpad);
- input_unregister_device(kpad->input);
+ adp5589_keypad_remove(kpad);
adp5589_gpio_remove(kpad);
kfree(kpad);
@@ -1064,6 +1083,9 @@ static int adp5589_suspend(struct device *dev)
struct adp5589_kpad *kpad = dev_get_drvdata(dev);
struct i2c_client *client = kpad->client;
+ if (!kpad->input)
+ return 0;
+
disable_irq(client->irq);
if (device_may_wakeup(&client->dev))
@@ -1077,6 +1099,9 @@ static int adp5589_resume(struct device *dev)
struct adp5589_kpad *kpad = dev_get_drvdata(dev);
struct i2c_client *client = kpad->client;
+ if (!kpad->input)
+ return 0;
+
if (device_may_wakeup(&client->dev))
disable_irq_wake(client->irq);
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
index c4a5c07a4b98..019dd6ed2c29 100644
--- a/drivers/input/keyboard/clps711x-keypad.c
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -6,7 +6,6 @@
*/
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
@@ -30,10 +29,10 @@ struct clps711x_keypad_data {
struct clps711x_gpio_data *gpio_data;
};
-static void clps711x_keypad_poll(struct input_polled_dev *dev)
+static void clps711x_keypad_poll(struct input_dev *input)
{
- const unsigned short *keycodes = dev->input->keycode;
- struct clps711x_keypad_data *priv = dev->private;
+ const unsigned short *keycodes = input->keycode;
+ struct clps711x_keypad_data *priv = input_get_drvdata(input);
bool sync = false;
int col, row;
@@ -61,14 +60,14 @@ static void clps711x_keypad_poll(struct input_polled_dev *dev)
if (state) {
set_bit(col, data->last_state);
- input_event(dev->input, EV_MSC,
- MSC_SCAN, code);
+ input_event(input,
+ EV_MSC, MSC_SCAN, code);
} else {
clear_bit(col, data->last_state);
}
if (keycodes[code])
- input_report_key(dev->input,
+ input_report_key(input,
keycodes[code], state);
sync = true;
}
@@ -80,7 +79,7 @@ static void clps711x_keypad_poll(struct input_polled_dev *dev)
}
if (sync)
- input_sync(dev->input);
+ input_sync(input);
}
static int clps711x_keypad_probe(struct platform_device *pdev)
@@ -88,7 +87,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
struct clps711x_keypad_data *priv;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
u32 poll_interval;
int i, err;
@@ -125,53 +124,43 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
if (err)
return err;
- poll_dev = input_allocate_polled_device();
- if (!poll_dev)
+ input = devm_input_allocate_device(dev);
+ if (!input)
return -ENOMEM;
- poll_dev->private = priv;
- poll_dev->poll = clps711x_keypad_poll;
- poll_dev->poll_interval = poll_interval;
- poll_dev->input->name = pdev->name;
- poll_dev->input->dev.parent = dev;
- poll_dev->input->id.bustype = BUS_HOST;
- poll_dev->input->id.vendor = 0x0001;
- poll_dev->input->id.product = 0x0001;
- poll_dev->input->id.version = 0x0100;
+ input_set_drvdata(input, priv);
+
+ input->name = pdev->name;
+ input->dev.parent = dev;
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
err = matrix_keypad_build_keymap(NULL, NULL, priv->row_count,
CLPS711X_KEYPAD_COL_COUNT,
- NULL, poll_dev->input);
+ NULL, input);
if (err)
- goto out_err;
+ return err;
- input_set_capability(poll_dev->input, EV_MSC, MSC_SCAN);
+ input_set_capability(input, EV_MSC, MSC_SCAN);
if (of_property_read_bool(np, "autorepeat"))
- __set_bit(EV_REP, poll_dev->input->evbit);
-
- platform_set_drvdata(pdev, poll_dev);
+ __set_bit(EV_REP, input->evbit);
/* Set all columns to low */
regmap_update_bits(priv->syscon, SYSCON_OFFSET, SYSCON1_KBDSCAN_MASK,
SYSCON1_KBDSCAN(1));
- err = input_register_polled_device(poll_dev);
- if (err)
- goto out_err;
-
- return 0;
-out_err:
- input_free_polled_device(poll_dev);
- return err;
-}
+ err = input_setup_polling(input, clps711x_keypad_poll);
+ if (err)
+ return err;
-static int clps711x_keypad_remove(struct platform_device *pdev)
-{
- struct input_polled_dev *poll_dev = platform_get_drvdata(pdev);
+ input_set_poll_interval(input, poll_interval);
- input_unregister_polled_device(poll_dev);
- input_free_polled_device(poll_dev);
+ err = input_register_device(input);
+ if (err)
+ return err;
return 0;
}
@@ -188,7 +177,6 @@ static struct platform_driver clps711x_keypad_driver = {
.of_match_table = clps711x_keypad_of_match,
},
.probe = clps711x_keypad_probe,
- .remove = clps711x_keypad_remove,
};
module_platform_driver(clps711x_keypad_driver);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 8d4d9786cc74..2b71c5a51f90 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -226,8 +226,6 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
{
struct cros_ec_keyb *ckdev = container_of(nb, struct cros_ec_keyb,
notifier);
- uint8_t mkbp_event_type = ckdev->ec->event_data.event_type &
- EC_MKBP_EVENT_TYPE_MASK;
u32 val;
unsigned int ev_type;
@@ -239,7 +237,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
if (queued_during_suspend && !device_may_wakeup(ckdev->dev))
return NOTIFY_OK;
- switch (mkbp_event_type) {
+ switch (ckdev->ec->event_data.event_type) {
case EC_MKBP_EVENT_KEY_MATRIX:
pm_wakeup_event(ckdev->dev, 0);
@@ -266,7 +264,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
case EC_MKBP_EVENT_SWITCH:
pm_wakeup_event(ckdev->dev, 0);
- if (mkbp_event_type == EC_MKBP_EVENT_BUTTON) {
+ if (ckdev->ec->event_data.event_type == EC_MKBP_EVENT_BUTTON) {
val = get_unaligned_le32(
&ckdev->ec->event_data.data.buttons);
ev_type = EV_KEY;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 1373dc5b0765..1f56d53454b2 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -494,10 +494,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
spin_lock_init(&bdata->lock);
if (child) {
- bdata->gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL,
- child,
- GPIOD_IN,
- desc);
+ bdata->gpiod = devm_fwnode_gpiod_get(dev, child,
+ NULL, GPIOD_IN, desc);
if (IS_ERR(bdata->gpiod)) {
error = PTR_ERR(bdata->gpiod);
if (error == -ENOENT) {
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 465eecfa6b3f..6eb0a2f3f9de 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
@@ -34,7 +33,7 @@ struct gpio_keys_button_data {
};
struct gpio_keys_polled_dev {
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct device *dev;
const struct gpio_keys_platform_data *pdata;
unsigned long rel_axis_seen[BITS_TO_LONGS(REL_CNT)];
@@ -42,12 +41,11 @@ struct gpio_keys_polled_dev {
struct gpio_keys_button_data data[0];
};
-static void gpio_keys_button_event(struct input_polled_dev *dev,
+static void gpio_keys_button_event(struct input_dev *input,
const struct gpio_keys_button *button,
int state)
{
- struct gpio_keys_polled_dev *bdev = dev->private;
- struct input_dev *input = dev->input;
+ struct gpio_keys_polled_dev *bdev = input_get_drvdata(input);
unsigned int type = button->type ?: EV_KEY;
if (type == EV_REL) {
@@ -66,7 +64,7 @@ static void gpio_keys_button_event(struct input_polled_dev *dev,
}
}
-static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
+static void gpio_keys_polled_check_state(struct input_dev *input,
const struct gpio_keys_button *button,
struct gpio_keys_button_data *bdata)
{
@@ -74,10 +72,10 @@ static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
state = gpiod_get_value_cansleep(bdata->gpiod);
if (state < 0) {
- dev_err(dev->input->dev.parent,
+ dev_err(input->dev.parent,
"failed to get gpio state: %d\n", state);
} else {
- gpio_keys_button_event(dev, button, state);
+ gpio_keys_button_event(input, button, state);
if (state != bdata->last_state) {
bdata->count = 0;
@@ -86,11 +84,10 @@ static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
}
}
-static void gpio_keys_polled_poll(struct input_polled_dev *dev)
+static void gpio_keys_polled_poll(struct input_dev *input)
{
- struct gpio_keys_polled_dev *bdev = dev->private;
+ struct gpio_keys_polled_dev *bdev = input_get_drvdata(input);
const struct gpio_keys_platform_data *pdata = bdev->pdata;
- struct input_dev *input = dev->input;
int i;
memset(bdev->rel_axis_seen, 0, sizeof(bdev->rel_axis_seen));
@@ -101,10 +98,10 @@ static void gpio_keys_polled_poll(struct input_polled_dev *dev)
if (bdata->count < bdata->threshold) {
bdata->count++;
- gpio_keys_button_event(dev, &pdata->buttons[i],
+ gpio_keys_button_event(input, &pdata->buttons[i],
bdata->last_state);
} else {
- gpio_keys_polled_check_state(dev, &pdata->buttons[i],
+ gpio_keys_polled_check_state(input, &pdata->buttons[i],
bdata);
}
}
@@ -122,18 +119,20 @@ static void gpio_keys_polled_poll(struct input_polled_dev *dev)
input_sync(input);
}
-static void gpio_keys_polled_open(struct input_polled_dev *dev)
+static int gpio_keys_polled_open(struct input_dev *input)
{
- struct gpio_keys_polled_dev *bdev = dev->private;
+ struct gpio_keys_polled_dev *bdev = input_get_drvdata(input);
const struct gpio_keys_platform_data *pdata = bdev->pdata;
if (pdata->enable)
pdata->enable(bdev->dev);
+
+ return 0;
}
-static void gpio_keys_polled_close(struct input_polled_dev *dev)
+static void gpio_keys_polled_close(struct input_dev *input)
{
- struct gpio_keys_polled_dev *bdev = dev->private;
+ struct gpio_keys_polled_dev *bdev = input_get_drvdata(input);
const struct gpio_keys_platform_data *pdata = bdev->pdata;
if (pdata->disable)
@@ -232,7 +231,6 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
struct fwnode_handle *child = NULL;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
struct gpio_keys_polled_dev *bdev;
- struct input_polled_dev *poll_dev;
struct input_dev *input;
int error;
int i;
@@ -255,19 +253,13 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
return -ENOMEM;
}
- poll_dev = devm_input_allocate_polled_device(dev);
- if (!poll_dev) {
- dev_err(dev, "no memory for polled device\n");
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "no memory for input device\n");
return -ENOMEM;
}
- poll_dev->private = bdev;
- poll_dev->poll = gpio_keys_polled_poll;
- poll_dev->poll_interval = pdata->poll_interval;
- poll_dev->open = gpio_keys_polled_open;
- poll_dev->close = gpio_keys_polled_close;
-
- input = poll_dev->input;
+ input_set_drvdata(input, bdev);
input->name = pdata->name ?: pdev->name;
input->phys = DRV_NAME"/input0";
@@ -277,6 +269,9 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0100;
+ input->open = gpio_keys_polled_open;
+ input->close = gpio_keys_polled_close;
+
__set_bit(EV_KEY, input->evbit);
if (pdata->rep)
__set_bit(EV_REP, input->evbit);
@@ -300,10 +295,9 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
return -EINVAL;
}
- bdata->gpiod = devm_fwnode_get_gpiod_from_child(dev,
- NULL, child,
- GPIOD_IN,
- button->desc);
+ bdata->gpiod = devm_fwnode_gpiod_get(dev, child,
+ NULL, GPIOD_IN,
+ button->desc);
if (IS_ERR(bdata->gpiod)) {
error = PTR_ERR(bdata->gpiod);
if (error != -EPROBE_DEFER)
@@ -353,11 +347,19 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
fwnode_handle_put(child);
- bdev->poll_dev = poll_dev;
+ bdev->input = input;
bdev->dev = dev;
bdev->pdata = pdata;
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input, gpio_keys_polled_poll);
+ if (error) {
+ dev_err(dev, "unable to set up polling, err=%d\n", error);
+ return error;
+ }
+
+ input_set_poll_interval(input, pdata->poll_interval);
+
+ error = input_register_device(input);
if (error) {
dev_err(dev, "unable to register polled device, err=%d\n",
error);
@@ -366,7 +368,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
/* report initial state of the buttons */
for (i = 0; i < pdata->nbuttons; i++)
- gpio_keys_polled_check_state(poll_dev, &pdata->buttons[i],
+ gpio_keys_polled_check_state(input, &pdata->buttons[i],
&bdev->data[i]);
input_sync(input);
diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c
new file mode 100644
index 000000000000..53799527dc75
--- /dev/null
+++ b/drivers/input/keyboard/imx_sc_key.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define DEBOUNCE_TIME 30
+#define REPEAT_INTERVAL 60
+
+#define SC_IRQ_BUTTON 1
+#define SC_IRQ_GROUP_WAKE 3
+
+#define IMX_SC_MISC_FUNC_GET_BUTTON_STATUS 18
+
+struct imx_key_drv_data {
+ u32 keycode;
+ bool keystate; /* true: pressed, false: released */
+ struct delayed_work check_work;
+ struct input_dev *input;
+ struct imx_sc_ipc *key_ipc_handle;
+ struct notifier_block key_notifier;
+};
+
+struct imx_sc_msg_key {
+ struct imx_sc_rpc_msg hdr;
+ u32 state;
+};
+
+static int imx_sc_key_notify(struct notifier_block *nb,
+ unsigned long event, void *group)
+{
+ struct imx_key_drv_data *priv =
+ container_of(nb,
+ struct imx_key_drv_data,
+ key_notifier);
+
+ if ((event & SC_IRQ_BUTTON) && (*(u8 *)group == SC_IRQ_GROUP_WAKE)) {
+ schedule_delayed_work(&priv->check_work,
+ msecs_to_jiffies(DEBOUNCE_TIME));
+ pm_wakeup_event(priv->input->dev.parent, 0);
+ }
+
+ return 0;
+}
+
+static void imx_sc_check_for_events(struct work_struct *work)
+{
+ struct imx_key_drv_data *priv =
+ container_of(work,
+ struct imx_key_drv_data,
+ check_work.work);
+ struct input_dev *input = priv->input;
+ struct imx_sc_msg_key msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ bool state;
+ int error;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_GET_BUTTON_STATUS;
+ hdr->size = 1;
+
+ error = imx_scu_call_rpc(priv->key_ipc_handle, &msg, true);
+ if (error) {
+ dev_err(&input->dev, "read imx sc key failed, error %d\n", error);
+ return;
+ }
+
+ state = (bool)msg.state;
+
+ if (state ^ priv->keystate) {
+ priv->keystate = state;
+ input_event(input, EV_KEY, priv->keycode, state);
+ input_sync(input);
+ if (!priv->keystate)
+ pm_relax(priv->input->dev.parent);
+ }
+
+ if (state)
+ schedule_delayed_work(&priv->check_work,
+ msecs_to_jiffies(REPEAT_INTERVAL));
+}
+
+static int imx_sc_key_probe(struct platform_device *pdev)
+{
+ struct imx_key_drv_data *priv;
+ struct input_dev *input;
+ int error;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ error = imx_scu_get_handle(&priv->key_ipc_handle);
+ if (error)
+ return error;
+
+ if (device_property_read_u32(&pdev->dev, "linux,keycodes",
+ &priv->keycode)) {
+ dev_err(&pdev->dev, "missing linux,keycodes property\n");
+ return -EINVAL;
+ }
+
+ INIT_DELAYED_WORK(&priv->check_work, imx_sc_check_for_events);
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input) {
+ dev_err(&pdev->dev, "failed to allocate the input device\n");
+ return -ENOMEM;
+ }
+
+ input->name = pdev->name;
+ input->phys = "imx-sc-key/input0";
+ input->id.bustype = BUS_HOST;
+
+ input_set_capability(input, EV_KEY, priv->keycode);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ return error;
+ }
+
+ priv->input = input;
+ platform_set_drvdata(pdev, priv);
+
+ error = imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON,
+ true);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable scu group irq\n");
+ return error;
+ }
+
+ priv->key_notifier.notifier_call = imx_sc_key_notify;
+ error = imx_scu_irq_register_notifier(&priv->key_notifier);
+ if (error) {
+ imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON,
+ false);
+ dev_err(&pdev->dev, "failed to register scu notifier\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static int imx_sc_key_remove(struct platform_device *pdev)
+{
+ struct imx_key_drv_data *priv = platform_get_drvdata(pdev);
+
+ imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false);
+ imx_scu_irq_unregister_notifier(&priv->key_notifier);
+ cancel_delayed_work_sync(&priv->check_work);
+
+ return 0;
+}
+
+static const struct of_device_id imx_sc_key_ids[] = {
+ { .compatible = "fsl,imx-sc-key" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_sc_key_ids);
+
+static struct platform_driver imx_sc_key_driver = {
+ .driver = {
+ .name = "imx-sc-key",
+ .of_match_table = imx_sc_key_ids,
+ },
+ .probe = imx_sc_key_probe,
+ .remove = imx_sc_key_remove,
+};
+module_platform_driver(imx_sc_key_driver);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("i.MX System Controller Key Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 4232aa876d2e..7e35081393be 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -15,7 +15,6 @@
#include <linux/device.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -64,7 +63,7 @@ static const unsigned short jornada_scancodes[] = {
#define JORNADA_SCAN_SIZE 18
struct jornadakbd {
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
unsigned short keymap[ARRAY_SIZE(jornada_scancodes)];
unsigned char length;
unsigned char old_scan[JORNADA_SCAN_SIZE];
@@ -73,7 +72,7 @@ struct jornadakbd {
static void jornada_parse_kbd(struct jornadakbd *jornadakbd)
{
- struct input_dev *input_dev = jornadakbd->poll_dev->input;
+ struct input_dev *input_dev = jornadakbd->input;
unsigned short *keymap = jornadakbd->keymap;
unsigned int sync_me = 0;
unsigned int i, j;
@@ -167,9 +166,9 @@ static void jornada_scan_keyb(unsigned char *s)
*s++ = __raw_readb(PHDR);
}
-static void jornadakbd680_poll(struct input_polled_dev *dev)
+static void jornadakbd680_poll(struct input_dev *input)
{
- struct jornadakbd *jornadakbd = dev->private;
+ struct jornadakbd *jornadakbd = input_get_drvdata(input);
jornada_scan_keyb(jornadakbd->new_scan);
jornada_parse_kbd(jornadakbd);
@@ -179,7 +178,6 @@ static void jornadakbd680_poll(struct input_polled_dev *dev)
static int jornada680kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
- struct input_polled_dev *poll_dev;
struct input_dev *input_dev;
int i, error;
@@ -188,29 +186,24 @@ static int jornada680kbd_probe(struct platform_device *pdev)
if (!jornadakbd)
return -ENOMEM;
- poll_dev = devm_input_allocate_polled_device(&pdev->dev);
- if (!poll_dev) {
- dev_err(&pdev->dev, "failed to allocate polled input device\n");
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
return -ENOMEM;
}
- jornadakbd->poll_dev = poll_dev;
+ jornadakbd->input = input_dev;
memcpy(jornadakbd->keymap, jornada_scancodes,
sizeof(jornadakbd->keymap));
- poll_dev->private = jornadakbd;
- poll_dev->poll = jornadakbd680_poll;
- poll_dev->poll_interval = 50; /* msec */
-
- input_dev = poll_dev->input;
+ input_set_drvdata(input_dev, jornadakbd);
input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
input_dev->name = "HP Jornada 680 keyboard";
input_dev->phys = "jornadakbd/input0";
input_dev->keycode = jornadakbd->keymap;
input_dev->keycodesize = sizeof(unsigned short);
input_dev->keycodemax = ARRAY_SIZE(jornada_scancodes);
- input_dev->dev.parent = &pdev->dev;
input_dev->id.bustype = BUS_HOST;
for (i = 0; i < 128; i++)
@@ -220,9 +213,17 @@ static int jornada680kbd_probe(struct platform_device *pdev)
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- error = input_register_polled_device(jornadakbd->poll_dev);
+ error = input_setup_polling(input_dev, jornadakbd680_poll);
+ if (error) {
+ dev_err(&pdev->dev, "failed to set up polling\n");
+ return error;
+ }
+
+ input_set_poll_interval(input_dev, 50 /* msec */);
+
+ error = input_register_device(input_dev);
if (error) {
- dev_err(&pdev->dev, "failed to register polled input device\n");
+ dev_err(&pdev->dev, "failed to register input device\n");
return error;
}
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index ee80de44ce3f..40d6e5087cde 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -54,6 +54,9 @@
/* MPR121 has 12 keys */
#define MPR121_MAX_KEY_COUNT 12
+#define MPR121_MIN_POLL_INTERVAL 10
+#define MPR121_MAX_POLL_INTERVAL 200
+
struct mpr121_touchkey {
struct i2c_client *client;
struct input_dev *input_dev;
@@ -115,11 +118,11 @@ static struct regulator *mpr121_vdd_supply_init(struct device *dev)
return vdd_supply;
}
-static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
+static void mpr_touchkey_report(struct input_dev *dev)
{
- struct mpr121_touchkey *mpr121 = dev_id;
- struct i2c_client *client = mpr121->client;
+ struct mpr121_touchkey *mpr121 = input_get_drvdata(dev);
struct input_dev *input = mpr121->input_dev;
+ struct i2c_client *client = mpr121->client;
unsigned long bit_changed;
unsigned int key_num;
int reg;
@@ -127,14 +130,14 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
if (reg < 0) {
dev_err(&client->dev, "i2c read error [%d]\n", reg);
- goto out;
+ return;
}
reg <<= 8;
reg |= i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_0_ADDR);
if (reg < 0) {
dev_err(&client->dev, "i2c read error [%d]\n", reg);
- goto out;
+ return;
}
reg &= TOUCH_STATUS_MASK;
@@ -155,8 +158,14 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
}
input_sync(input);
+}
+
+static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
+{
+ struct mpr121_touchkey *mpr121 = dev_id;
+
+ mpr_touchkey_report(mpr121->input_dev);
-out:
return IRQ_HANDLED;
}
@@ -229,14 +238,10 @@ static int mpr_touchkey_probe(struct i2c_client *client,
int vdd_uv;
struct mpr121_touchkey *mpr121;
struct input_dev *input_dev;
+ u32 poll_interval = 0;
int error;
int i;
- if (!client->irq) {
- dev_err(dev, "irq number should not be zero\n");
- return -EINVAL;
- }
-
vdd_supply = mpr121_vdd_supply_init(dev);
if (IS_ERR(vdd_supply))
return PTR_ERR(vdd_supply);
@@ -274,6 +279,7 @@ static int mpr_touchkey_probe(struct i2c_client *client,
if (device_property_read_bool(dev, "autorepeat"))
__set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input_dev, mpr121);
input_dev->keycode = mpr121->keycodes;
input_dev->keycodesize = sizeof(mpr121->keycodes[0]);
@@ -288,13 +294,40 @@ static int mpr_touchkey_probe(struct i2c_client *client,
return error;
}
- error = devm_request_threaded_irq(dev, client->irq, NULL,
- mpr_touchkey_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev->driver->name, mpr121);
- if (error) {
- dev_err(dev, "Failed to register interrupt\n");
- return error;
+ device_property_read_u32(dev, "poll-interval", &poll_interval);
+
+ if (client->irq) {
+ error = devm_request_threaded_irq(dev, client->irq, NULL,
+ mpr_touchkey_interrupt,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ dev->driver->name, mpr121);
+ if (error) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return error;
+ }
+ } else if (poll_interval) {
+ if (poll_interval < MPR121_MIN_POLL_INTERVAL)
+ return -EINVAL;
+
+ if (poll_interval > MPR121_MAX_POLL_INTERVAL)
+ return -EINVAL;
+
+ error = input_setup_polling(input_dev, mpr_touchkey_report);
+ if (error) {
+ dev_err(dev, "Failed to setup polling\n");
+ return error;
+ }
+
+ input_set_poll_interval(input_dev, poll_interval);
+ input_set_min_poll_interval(input_dev,
+ MPR121_MIN_POLL_INTERVAL);
+ input_set_max_poll_interval(input_dev,
+ MPR121_MAX_POLL_INTERVAL);
+ } else {
+ dev_err(dev,
+ "invalid IRQ number and polling not configured\n");
+ return -EINVAL;
}
error = input_register_device(input_dev);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7d9ae394e597..7e2e658d551c 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -100,7 +100,6 @@ config INPUT_ATMEL_CAPTOUCH
config INPUT_BMA150
tristate "BMA150/SMB380 acceleration sensor support"
depends on I2C
- select INPUT_POLLDEV
help
Say Y here if you have Bosch Sensortec's BMA150 or SMB380
acceleration sensor hooked to an I2C bus.
@@ -246,7 +245,6 @@ config INPUT_MC13783_PWRBUTTON
config INPUT_MMA8450
tristate "MMA8450 - Freescale's 3-Axis, 8/12-bit Digital Accelerometer"
depends on I2C
- select INPUT_POLLDEV
help
Say Y here if you want to support Freescale's MMA8450 Accelerometer
through I2C interface.
@@ -257,7 +255,6 @@ config INPUT_MMA8450
config INPUT_APANEL
tristate "Fujitsu Lifebook Application Panel buttons"
depends on X86 && I2C && LEDS_CLASS
- select INPUT_POLLDEV
select CHECK_SIGNATURE
help
Say Y here for support of the Application Panel buttons, used on
@@ -291,7 +288,6 @@ config INPUT_GPIO_BEEPER
config INPUT_GPIO_DECODER
tristate "Polled GPIO Decoder Input driver"
depends on GPIOLIB || COMPILE_TEST
- select INPUT_POLLDEV
help
Say Y here if you want driver to read status of multiple GPIO
lines and report the encoded value as an absolute integer to
@@ -327,7 +323,6 @@ config INPUT_IXP4XX_BEEPER
config INPUT_COBALT_BTNS
tristate "Cobalt button interface"
depends on MIPS_COBALT
- select INPUT_POLLDEV
help
Say Y here if you want to support MIPS Cobalt button interface.
@@ -347,7 +342,6 @@ config INPUT_CPCAP_PWRBUTTON
config INPUT_WISTRON_BTNS
tristate "x86 Wistron laptop button interface"
depends on X86_32
- select INPUT_POLLDEV
select INPUT_SPARSEKMAP
select NEW_LEDS
select LEDS_CLASS
@@ -410,13 +404,6 @@ config INPUT_KXTJ9
To compile this driver as a module, choose M here: the module will
be called kxtj9.
-config INPUT_KXTJ9_POLLED_MODE
- bool "Enable polling mode support"
- depends on INPUT_KXTJ9
- select INPUT_POLLDEV
- help
- Say Y here if you need accelerometer to work in polling mode.
-
config INPUT_POWERMATE
tristate "Griffin PowerMate and Contour Jog support"
depends on USB_ARCH_HAS_HCD
@@ -546,7 +533,6 @@ config INPUT_UINPUT
config INPUT_SGI_BTNS
tristate "SGI Indy/O2 volume button interface"
depends on SGI_IP22 || SGI_IP32
- select INPUT_POLLDEV
help
Say Y here if you want to support SGI Indy/O2 volume button interface.
@@ -637,7 +623,6 @@ config INPUT_RB532_BUTTON
tristate "Mikrotik Routerboard 532 button interface"
depends on MIKROTIK_RB532
depends on GPIOLIB
- select INPUT_POLLDEV
help
Say Y here if you want support for the S1 button built into
Mikrotik's Routerboard 532.
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index 53ec40d1b90d..7276657ad7ca 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -17,7 +17,7 @@
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/io.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/leds.h>
@@ -51,19 +51,28 @@ static enum apanel_chip device_chip[APANEL_DEV_MAX];
#define MAX_PANEL_KEYS 12
struct apanel {
- struct input_polled_dev *ipdev;
+ struct input_dev *idev;
struct i2c_client *client;
unsigned short keymap[MAX_PANEL_KEYS];
- u16 nkeys;
+ u16 nkeys;
struct led_classdev mail_led;
};
+static const unsigned short apanel_keymap[MAX_PANEL_KEYS] = {
+ [0] = KEY_MAIL,
+ [1] = KEY_WWW,
+ [2] = KEY_PROG2,
+ [3] = KEY_PROG1,
-static int apanel_probe(struct i2c_client *, const struct i2c_device_id *);
+ [8] = KEY_FORWARD,
+ [9] = KEY_REWIND,
+ [10] = KEY_STOPCD,
+ [11] = KEY_PLAYPAUSE,
+};
static void report_key(struct input_dev *input, unsigned keycode)
{
- pr_debug(APANEL ": report key %#x\n", keycode);
+ dev_dbg(input->dev.parent, "report key %#x\n", keycode);
input_report_key(input, keycode, 1);
input_sync(input);
@@ -79,10 +88,9 @@ static void report_key(struct input_dev *input, unsigned keycode)
* CD keys:
* Forward (0x100), Rewind (0x200), Stop (0x400), Pause (0x800)
*/
-static void apanel_poll(struct input_polled_dev *ipdev)
+static void apanel_poll(struct input_dev *idev)
{
- struct apanel *ap = ipdev->private;
- struct input_dev *idev = ipdev->input;
+ struct apanel *ap = input_get_drvdata(idev);
u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
s32 data;
int i;
@@ -112,126 +120,93 @@ static int mail_led_set(struct led_classdev *led,
return i2c_smbus_write_word_data(ap->client, 0x10, led_bits);
}
-static int apanel_remove(struct i2c_client *client)
-{
- struct apanel *ap = i2c_get_clientdata(client);
-
- if (device_chip[APANEL_DEV_LED] != CHIP_NONE)
- led_classdev_unregister(&ap->mail_led);
-
- input_unregister_polled_device(ap->ipdev);
- input_free_polled_device(ap->ipdev);
-
- return 0;
-}
-
-static void apanel_shutdown(struct i2c_client *client)
-{
- apanel_remove(client);
-}
-
-static const struct i2c_device_id apanel_id[] = {
- { "fujitsu_apanel", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, apanel_id);
-
-static struct i2c_driver apanel_driver = {
- .driver = {
- .name = APANEL,
- },
- .probe = &apanel_probe,
- .remove = &apanel_remove,
- .shutdown = &apanel_shutdown,
- .id_table = apanel_id,
-};
-
-static struct apanel apanel = {
- .keymap = {
- [0] = KEY_MAIL,
- [1] = KEY_WWW,
- [2] = KEY_PROG2,
- [3] = KEY_PROG1,
-
- [8] = KEY_FORWARD,
- [9] = KEY_REWIND,
- [10] = KEY_STOPCD,
- [11] = KEY_PLAYPAUSE,
-
- },
- .mail_led = {
- .name = "mail:blue",
- .brightness_set_blocking = mail_led_set,
- },
-};
-
-/* NB: Only one panel on the i2c. */
static int apanel_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct apanel *ap;
- struct input_polled_dev *ipdev;
struct input_dev *idev;
u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
- int i, err = -ENOMEM;
+ int i, err;
- ap = &apanel;
+ ap = devm_kzalloc(&client->dev, sizeof(*ap), GFP_KERNEL);
+ if (!ap)
+ return -ENOMEM;
- ipdev = input_allocate_polled_device();
- if (!ipdev)
- goto out1;
+ idev = devm_input_allocate_device(&client->dev);
+ if (!idev)
+ return -ENOMEM;
- ap->ipdev = ipdev;
+ ap->idev = idev;
ap->client = client;
i2c_set_clientdata(client, ap);
err = i2c_smbus_write_word_data(client, cmd, 0);
if (err) {
- dev_warn(&client->dev, APANEL ": smbus write error %d\n",
- err);
- goto out3;
+ dev_warn(&client->dev, "smbus write error %d\n", err);
+ return err;
}
- ipdev->poll = apanel_poll;
- ipdev->poll_interval = POLL_INTERVAL_DEFAULT;
- ipdev->private = ap;
+ input_set_drvdata(idev, ap);
- idev = ipdev->input;
idev->name = APANEL_NAME " buttons";
idev->phys = "apanel/input0";
idev->id.bustype = BUS_HOST;
- idev->dev.parent = &client->dev;
-
- set_bit(EV_KEY, idev->evbit);
+ memcpy(ap->keymap, apanel_keymap, sizeof(apanel_keymap));
idev->keycode = ap->keymap;
idev->keycodesize = sizeof(ap->keymap[0]);
idev->keycodemax = (device_chip[APANEL_DEV_CDBTN] != CHIP_NONE) ? 12 : 4;
+ set_bit(EV_KEY, idev->evbit);
for (i = 0; i < idev->keycodemax; i++)
if (ap->keymap[i])
set_bit(ap->keymap[i], idev->keybit);
- err = input_register_polled_device(ipdev);
+ err = input_setup_polling(idev, apanel_poll);
if (err)
- goto out3;
+ return err;
+
+ input_set_poll_interval(idev, POLL_INTERVAL_DEFAULT);
+
+ err = input_register_device(idev);
+ if (err)
+ return err;
if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
- err = led_classdev_register(&client->dev, &ap->mail_led);
+ ap->mail_led.name = "mail:blue";
+ ap->mail_led.brightness_set_blocking = mail_led_set;
+ err = devm_led_classdev_register(&client->dev, &ap->mail_led);
if (err)
- goto out4;
+ return err;
}
return 0;
-out4:
- input_unregister_polled_device(ipdev);
-out3:
- input_free_polled_device(ipdev);
-out1:
- return err;
}
+static void apanel_shutdown(struct i2c_client *client)
+{
+ struct apanel *ap = i2c_get_clientdata(client);
+
+ if (device_chip[APANEL_DEV_LED] != CHIP_NONE)
+ led_set_brightness(&ap->mail_led, LED_OFF);
+}
+
+static const struct i2c_device_id apanel_id[] = {
+ { "fujitsu_apanel", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, apanel_id);
+
+static struct i2c_driver apanel_driver = {
+ .driver = {
+ .name = APANEL,
+ },
+ .probe = apanel_probe,
+ .shutdown = apanel_shutdown,
+ .id_table = apanel_id,
+};
+
/* Scan the system ROM for the signature "FJKEYINF" */
static __init const void __iomem *bios_signature(const void __iomem *bios)
{
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 735d3a46f44b..a9d984da95f3 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -123,7 +122,6 @@
struct bma150_data {
struct i2c_client *client;
- struct input_polled_dev *input_polled;
struct input_dev *input;
u8 mode;
};
@@ -336,13 +334,16 @@ static irqreturn_t bma150_irq_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static void bma150_poll(struct input_polled_dev *dev)
+static void bma150_poll(struct input_dev *input)
{
- bma150_report_xyz(dev->private);
+ struct bma150_data *bma150 = input_get_drvdata(input);
+
+ bma150_report_xyz(bma150);
}
-static int bma150_open(struct bma150_data *bma150)
+static int bma150_open(struct input_dev *input)
{
+ struct bma150_data *bma150 = input_get_drvdata(input);
int error;
error = pm_runtime_get_sync(&bma150->client->dev);
@@ -362,44 +363,18 @@ static int bma150_open(struct bma150_data *bma150)
return 0;
}
-static void bma150_close(struct bma150_data *bma150)
+static void bma150_close(struct input_dev *input)
{
+ struct bma150_data *bma150 = input_get_drvdata(input);
+
pm_runtime_put_sync(&bma150->client->dev);
if (bma150->mode != BMA150_MODE_SLEEP)
bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
-static int bma150_irq_open(struct input_dev *input)
-{
- struct bma150_data *bma150 = input_get_drvdata(input);
-
- return bma150_open(bma150);
-}
-
-static void bma150_irq_close(struct input_dev *input)
-{
- struct bma150_data *bma150 = input_get_drvdata(input);
-
- bma150_close(bma150);
-}
-
-static void bma150_poll_open(struct input_polled_dev *ipoll_dev)
-{
- struct bma150_data *bma150 = ipoll_dev->private;
-
- bma150_open(bma150);
-}
-
-static void bma150_poll_close(struct input_polled_dev *ipoll_dev)
-{
- struct bma150_data *bma150 = ipoll_dev->private;
-
- bma150_close(bma150);
-}
-
static int bma150_initialize(struct bma150_data *bma150,
- const struct bma150_cfg *cfg)
+ const struct bma150_cfg *cfg)
{
int error;
@@ -439,84 +414,14 @@ static int bma150_initialize(struct bma150_data *bma150,
return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
-static void bma150_init_input_device(struct bma150_data *bma150,
- struct input_dev *idev)
-{
- idev->name = BMA150_DRIVER;
- idev->phys = BMA150_DRIVER "/input0";
- idev->id.bustype = BUS_I2C;
- idev->dev.parent = &bma150->client->dev;
-
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
- input_set_abs_params(idev, ABS_Y, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
- input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
-}
-
-static int bma150_register_input_device(struct bma150_data *bma150)
-{
- struct input_dev *idev;
- int error;
-
- idev = input_allocate_device();
- if (!idev)
- return -ENOMEM;
-
- bma150_init_input_device(bma150, idev);
-
- idev->open = bma150_irq_open;
- idev->close = bma150_irq_close;
- input_set_drvdata(idev, bma150);
-
- bma150->input = idev;
-
- error = input_register_device(idev);
- if (error) {
- input_free_device(idev);
- return error;
- }
-
- return 0;
-}
-
-static int bma150_register_polled_device(struct bma150_data *bma150)
-{
- struct input_polled_dev *ipoll_dev;
- int error;
-
- ipoll_dev = input_allocate_polled_device();
- if (!ipoll_dev)
- return -ENOMEM;
-
- ipoll_dev->private = bma150;
- ipoll_dev->open = bma150_poll_open;
- ipoll_dev->close = bma150_poll_close;
- ipoll_dev->poll = bma150_poll;
- ipoll_dev->poll_interval = BMA150_POLL_INTERVAL;
- ipoll_dev->poll_interval_min = BMA150_POLL_MIN;
- ipoll_dev->poll_interval_max = BMA150_POLL_MAX;
-
- bma150_init_input_device(bma150, ipoll_dev->input);
-
- bma150->input_polled = ipoll_dev;
- bma150->input = ipoll_dev->input;
-
- error = input_register_polled_device(ipoll_dev);
- if (error) {
- input_free_polled_device(ipoll_dev);
- return error;
- }
-
- return 0;
-}
-
static int bma150_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
const struct bma150_platform_data *pdata =
dev_get_platdata(&client->dev);
const struct bma150_cfg *cfg;
struct bma150_data *bma150;
+ struct input_dev *idev;
int chip_id;
int error;
@@ -531,7 +436,7 @@ static int bma150_probe(struct i2c_client *client,
return -EINVAL;
}
- bma150 = kzalloc(sizeof(struct bma150_data), GFP_KERNEL);
+ bma150 = devm_kzalloc(&client->dev, sizeof(*bma150), GFP_KERNEL);
if (!bma150)
return -ENOMEM;
@@ -544,7 +449,7 @@ static int bma150_probe(struct i2c_client *client,
dev_err(&client->dev,
"IRQ GPIO conf. error %d, error %d\n",
client->irq, error);
- goto err_free_mem;
+ return error;
}
}
cfg = &pdata->cfg;
@@ -554,14 +459,42 @@ static int bma150_probe(struct i2c_client *client,
error = bma150_initialize(bma150, cfg);
if (error)
- goto err_free_mem;
+ return error;
- if (client->irq > 0) {
- error = bma150_register_input_device(bma150);
+ idev = devm_input_allocate_device(&bma150->client->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ input_set_drvdata(idev, bma150);
+ bma150->input = idev;
+
+ idev->name = BMA150_DRIVER;
+ idev->phys = BMA150_DRIVER "/input0";
+ idev->id.bustype = BUS_I2C;
+
+ idev->open = bma150_open;
+ idev->close = bma150_close;
+
+ input_set_abs_params(idev, ABS_X, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
+ input_set_abs_params(idev, ABS_Y, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
+ input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
+
+ if (client->irq <= 0) {
+ error = input_setup_polling(idev, bma150_poll);
if (error)
- goto err_free_mem;
+ return error;
+
+ input_set_poll_interval(idev, BMA150_POLL_INTERVAL);
+ input_set_min_poll_interval(idev, BMA150_POLL_MIN);
+ input_set_max_poll_interval(idev, BMA150_POLL_MAX);
+ }
+
+ error = input_register_device(idev);
+ if (error)
+ return error;
- error = request_threaded_irq(client->irq,
+ if (client->irq > 0) {
+ error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, bma150_irq_thread,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
BMA150_DRIVER, bma150);
@@ -569,13 +502,8 @@ static int bma150_probe(struct i2c_client *client,
dev_err(&client->dev,
"irq request failed %d, error %d\n",
client->irq, error);
- input_unregister_device(bma150->input);
- goto err_free_mem;
+ return error;
}
- } else {
- error = bma150_register_polled_device(bma150);
- if (error)
- goto err_free_mem;
}
i2c_set_clientdata(client, bma150);
@@ -583,33 +511,16 @@ static int bma150_probe(struct i2c_client *client,
pm_runtime_enable(&client->dev);
return 0;
-
-err_free_mem:
- kfree(bma150);
- return error;
}
static int bma150_remove(struct i2c_client *client)
{
- struct bma150_data *bma150 = i2c_get_clientdata(client);
-
pm_runtime_disable(&client->dev);
- if (client->irq > 0) {
- free_irq(client->irq, bma150);
- input_unregister_device(bma150->input);
- } else {
- input_unregister_polled_device(bma150->input_polled);
- input_free_polled_device(bma150->input_polled);
- }
-
- kfree(bma150);
-
return 0;
}
-#ifdef CONFIG_PM
-static int bma150_suspend(struct device *dev)
+static int __maybe_unused bma150_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bma150_data *bma150 = i2c_get_clientdata(client);
@@ -617,14 +528,13 @@ static int bma150_suspend(struct device *dev)
return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
-static int bma150_resume(struct device *dev)
+static int __maybe_unused bma150_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bma150_data *bma150 = i2c_get_clientdata(client);
return bma150_set_mode(bma150, BMA150_MODE_NORMAL);
}
-#endif
static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index bcf6174bbd5d..b1624f5414ee 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -4,7 +4,8 @@
*
* Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*/
-#include <linux/input-polldev.h>
+#include <linux/input.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -26,16 +27,14 @@ static const unsigned short cobalt_map[] = {
};
struct buttons_dev {
- struct input_polled_dev *poll_dev;
unsigned short keymap[ARRAY_SIZE(cobalt_map)];
int count[ARRAY_SIZE(cobalt_map)];
void __iomem *reg;
};
-static void handle_buttons(struct input_polled_dev *dev)
+static void handle_buttons(struct input_dev *input)
{
- struct buttons_dev *bdev = dev->private;
- struct input_dev *input = dev->input;
+ struct buttons_dev *bdev = input_get_drvdata(input);
uint32_t status;
int i;
@@ -62,29 +61,33 @@ static void handle_buttons(struct input_polled_dev *dev)
static int cobalt_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
- struct input_polled_dev *poll_dev;
struct input_dev *input;
struct resource *res;
int error, i;
- bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL);
- poll_dev = input_allocate_polled_device();
- if (!bdev || !poll_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EBUSY;
+
+ bdev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!bdev->reg)
+ return -ENOMEM;
memcpy(bdev->keymap, cobalt_map, sizeof(bdev->keymap));
- poll_dev->private = bdev;
- poll_dev->poll = handle_buttons;
- poll_dev->poll_interval = BUTTONS_POLL_INTERVAL;
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input_set_drvdata(input, bdev);
- input = poll_dev->input;
input->name = "Cobalt buttons";
input->phys = "cobalt/input0";
input->id.bustype = BUS_HOST;
- input->dev.parent = &pdev->dev;
input->keycode = bdev->keymap;
input->keycodemax = ARRAY_SIZE(bdev->keymap);
@@ -96,39 +99,16 @@ static int cobalt_buttons_probe(struct platform_device *pdev)
__set_bit(bdev->keymap[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- error = -EBUSY;
- goto err_free_mem;
- }
-
- bdev->poll_dev = poll_dev;
- bdev->reg = ioremap(res->start, resource_size(res));
- dev_set_drvdata(&pdev->dev, bdev);
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input, handle_buttons);
if (error)
- goto err_iounmap;
-
- return 0;
+ return error;
- err_iounmap:
- iounmap(bdev->reg);
- err_free_mem:
- input_free_polled_device(poll_dev);
- kfree(bdev);
- return error;
-}
+ input_set_poll_interval(input, BUTTONS_POLL_INTERVAL);
-static int cobalt_buttons_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct buttons_dev *bdev = dev_get_drvdata(dev);
-
- input_unregister_polled_device(bdev->poll_dev);
- input_free_polled_device(bdev->poll_dev);
- iounmap(bdev->reg);
- kfree(bdev);
+ error = input_register_device(input);
+ if (error)
+ return error;
return 0;
}
@@ -141,7 +121,6 @@ MODULE_ALIAS("platform:Cobalt buttons");
static struct platform_driver cobalt_buttons_driver = {
.probe = cobalt_buttons_probe,
- .remove = cobalt_buttons_remove,
.driver = {
.name = "Cobalt buttons",
},
diff --git a/drivers/input/misc/gpio_decoder.c b/drivers/input/misc/gpio_decoder.c
index 1dca526e6f1a..145826a1a9a1 100644
--- a/drivers/input/misc/gpio_decoder.c
+++ b/drivers/input/misc/gpio_decoder.c
@@ -17,14 +17,12 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
struct gpio_decoder {
- struct input_polled_dev *poll_dev;
struct gpio_descs *input_gpios;
struct device *dev;
u32 axis;
@@ -53,15 +51,15 @@ static int gpio_decoder_get_gpios_state(struct gpio_decoder *decoder)
return ret;
}
-static void gpio_decoder_poll_gpios(struct input_polled_dev *poll_dev)
+static void gpio_decoder_poll_gpios(struct input_dev *input)
{
- struct gpio_decoder *decoder = poll_dev->private;
+ struct gpio_decoder *decoder = input_get_drvdata(input);
int state;
state = gpio_decoder_get_gpios_state(decoder);
if (state >= 0 && state != decoder->last_stable) {
- input_report_abs(poll_dev->input, decoder->axis, state);
- input_sync(poll_dev->input);
+ input_report_abs(input, decoder->axis, state);
+ input_sync(input);
decoder->last_stable = state;
}
}
@@ -70,20 +68,23 @@ static int gpio_decoder_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_decoder *decoder;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
u32 max;
int err;
- decoder = devm_kzalloc(dev, sizeof(struct gpio_decoder), GFP_KERNEL);
+ decoder = devm_kzalloc(dev, sizeof(*decoder), GFP_KERNEL);
if (!decoder)
return -ENOMEM;
+ decoder->dev = dev;
device_property_read_u32(dev, "linux,axis", &decoder->axis);
+
decoder->input_gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
if (IS_ERR(decoder->input_gpios)) {
dev_err(dev, "unable to acquire input gpios\n");
return PTR_ERR(decoder->input_gpios);
}
+
if (decoder->input_gpios->ndescs < 2) {
dev_err(dev, "not enough gpios found\n");
return -EINVAL;
@@ -92,22 +93,25 @@ static int gpio_decoder_probe(struct platform_device *pdev)
if (device_property_read_u32(dev, "decoder-max-value", &max))
max = (1U << decoder->input_gpios->ndescs) - 1;
- decoder->dev = dev;
- poll_dev = devm_input_allocate_polled_device(decoder->dev);
- if (!poll_dev)
+ input = devm_input_allocate_device(dev);
+ if (!input)
return -ENOMEM;
- poll_dev->private = decoder;
- poll_dev->poll = gpio_decoder_poll_gpios;
- decoder->poll_dev = poll_dev;
+ input_set_drvdata(input, decoder);
- poll_dev->input->name = pdev->name;
- poll_dev->input->id.bustype = BUS_HOST;
- input_set_abs_params(poll_dev->input, decoder->axis, 0, max, 0, 0);
+ input->name = pdev->name;
+ input->id.bustype = BUS_HOST;
+ input_set_abs_params(input, decoder->axis, 0, max, 0, 0);
+
+ err = input_setup_polling(input, gpio_decoder_poll_gpios);
+ if (err) {
+ dev_err(dev, "failed to set up polling\n");
+ return err;
+ }
- err = input_register_polled_device(poll_dev);
+ err = input_register_device(input);
if (err) {
- dev_err(dev, "failed to register polled device\n");
+ dev_err(dev, "failed to register input device\n");
return err;
}
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index abca895a6156..199bc17ddb1d 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -53,28 +53,10 @@ MODULE_LICENSE("Dual BSD/GPL");
#define RTC_VERSION "1.10d"
-static DEFINE_MUTEX(hp_sdc_rtc_mutex);
static unsigned long epoch = 2000;
static struct semaphore i8042tregs;
-static hp_sdc_irqhook hp_sdc_rtc_isr;
-
-static struct fasync_struct *hp_sdc_rtc_async_queue;
-
-static DECLARE_WAIT_QUEUE_HEAD(hp_sdc_rtc_wait);
-
-static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos);
-
-static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg);
-
-static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait);
-
-static int hp_sdc_rtc_open(struct inode *inode, struct file *file);
-static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on);
-
static void hp_sdc_rtc_isr (int irq, void *dev_id,
uint8_t status, uint8_t data)
{
@@ -283,151 +265,6 @@ static inline int hp_sdc_rtc_read_ct(struct timespec64 *res) {
return 0;
}
-
-#if 0 /* not used yet */
-/* Set the i8042 real-time clock */
-static int hp_sdc_rtc_set_rt (struct timeval *setto)
-{
- uint32_t tenms;
- unsigned int days;
- hp_sdc_transaction t;
- uint8_t tseq[11] = {
- HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT,
- HP_SDC_CMD_SET_RTMS, 3, 0, 0, 0,
- HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT,
- HP_SDC_CMD_SET_RTD, 2, 0, 0
- };
-
- t.endidx = 10;
-
- if (0xffff < setto->tv_sec / 86400) return -1;
- days = setto->tv_sec / 86400;
- if (0xffff < setto->tv_usec / 1000000 / 86400) return -1;
- days += ((setto->tv_sec % 86400) + setto->tv_usec / 1000000) / 86400;
- if (days > 0xffff) return -1;
-
- if (0xffffff < setto->tv_sec) return -1;
- tenms = setto->tv_sec * 100;
- if (0xffffff < setto->tv_usec / 10000) return -1;
- tenms += setto->tv_usec / 10000;
- if (tenms > 0xffffff) return -1;
-
- tseq[3] = (uint8_t)(tenms & 0xff);
- tseq[4] = (uint8_t)((tenms >> 8) & 0xff);
- tseq[5] = (uint8_t)((tenms >> 16) & 0xff);
-
- tseq[9] = (uint8_t)(days & 0xff);
- tseq[10] = (uint8_t)((days >> 8) & 0xff);
-
- t.seq = tseq;
-
- if (hp_sdc_enqueue_transaction(&t)) return -1;
- return 0;
-}
-
-/* Set the i8042 fast handshake timer */
-static int hp_sdc_rtc_set_fhs (struct timeval *setto)
-{
- uint32_t tenms;
- hp_sdc_transaction t;
- uint8_t tseq[5] = {
- HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT,
- HP_SDC_CMD_SET_FHS, 2, 0, 0
- };
-
- t.endidx = 4;
-
- if (0xffff < setto->tv_sec) return -1;
- tenms = setto->tv_sec * 100;
- if (0xffff < setto->tv_usec / 10000) return -1;
- tenms += setto->tv_usec / 10000;
- if (tenms > 0xffff) return -1;
-
- tseq[3] = (uint8_t)(tenms & 0xff);
- tseq[4] = (uint8_t)((tenms >> 8) & 0xff);
-
- t.seq = tseq;
-
- if (hp_sdc_enqueue_transaction(&t)) return -1;
- return 0;
-}
-
-
-/* Set the i8042 match timer (a.k.a. alarm) */
-#define hp_sdc_rtc_set_mt (setto) \
- hp_sdc_rtc_set_i8042timer(setto, HP_SDC_CMD_SET_MT)
-
-/* Set the i8042 delay timer */
-#define hp_sdc_rtc_set_dt (setto) \
- hp_sdc_rtc_set_i8042timer(setto, HP_SDC_CMD_SET_DT)
-
-/* Set the i8042 cycle timer (a.k.a. periodic) */
-#define hp_sdc_rtc_set_ct (setto) \
- hp_sdc_rtc_set_i8042timer(setto, HP_SDC_CMD_SET_CT)
-
-/* Set one of the i8042 3-byte wide timers */
-static int hp_sdc_rtc_set_i8042timer (struct timeval *setto, uint8_t setcmd)
-{
- uint32_t tenms;
- hp_sdc_transaction t;
- uint8_t tseq[6] = {
- HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT,
- 0, 3, 0, 0, 0
- };
-
- t.endidx = 6;
-
- if (0xffffff < setto->tv_sec) return -1;
- tenms = setto->tv_sec * 100;
- if (0xffffff < setto->tv_usec / 10000) return -1;
- tenms += setto->tv_usec / 10000;
- if (tenms > 0xffffff) return -1;
-
- tseq[1] = setcmd;
- tseq[3] = (uint8_t)(tenms & 0xff);
- tseq[4] = (uint8_t)((tenms >> 8) & 0xff);
- tseq[5] = (uint8_t)((tenms >> 16) & 0xff);
-
- t.seq = tseq;
-
- if (hp_sdc_enqueue_transaction(&t)) {
- return -1;
- }
- return 0;
-}
-#endif
-
-static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos) {
- ssize_t retval;
-
- if (count < sizeof(unsigned long))
- return -EINVAL;
-
- retval = put_user(68, (unsigned long __user *)buf);
- return retval;
-}
-
-static __poll_t hp_sdc_rtc_poll(struct file *file, poll_table *wait)
-{
- unsigned long l;
-
- l = 0;
- if (l != 0)
- return EPOLLIN | EPOLLRDNORM;
- return 0;
-}
-
-static int hp_sdc_rtc_open(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on)
-{
- return fasync_helper (fd, filp, on, &hp_sdc_rtc_async_queue);
-}
-
static int hp_sdc_rtc_proc_show(struct seq_file *m, void *v)
{
#define YN(bit) ("no")
@@ -507,182 +344,6 @@ static int hp_sdc_rtc_proc_show(struct seq_file *m, void *v)
#undef NY
}
-static int hp_sdc_rtc_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
-#if 1
- return -EINVAL;
-#else
-
- struct rtc_time wtime;
- struct timeval ttime;
- int use_wtime = 0;
-
- /* This needs major work. */
-
- switch (cmd) {
-
- case RTC_AIE_OFF: /* Mask alarm int. enab. bit */
- case RTC_AIE_ON: /* Allow alarm interrupts. */
- case RTC_PIE_OFF: /* Mask periodic int. enab. bit */
- case RTC_PIE_ON: /* Allow periodic ints */
- case RTC_UIE_ON: /* Allow ints for RTC updates. */
- case RTC_UIE_OFF: /* Allow ints for RTC updates. */
- {
- /* We cannot mask individual user timers and we
- cannot tell them apart when they occur, so it
- would be disingenuous to succeed these IOCTLs */
- return -EINVAL;
- }
- case RTC_ALM_READ: /* Read the present alarm time */
- {
- if (hp_sdc_rtc_read_mt(&ttime)) return -EFAULT;
- if (hp_sdc_rtc_read_bbrtc(&wtime)) return -EFAULT;
-
- wtime.tm_hour = ttime.tv_sec / 3600; ttime.tv_sec %= 3600;
- wtime.tm_min = ttime.tv_sec / 60; ttime.tv_sec %= 60;
- wtime.tm_sec = ttime.tv_sec;
-
- break;
- }
- case RTC_IRQP_READ: /* Read the periodic IRQ rate. */
- {
- return put_user(hp_sdc_rtc_freq, (unsigned long *)arg);
- }
- case RTC_IRQP_SET: /* Set periodic IRQ rate. */
- {
- /*
- * The max we can do is 100Hz.
- */
-
- if ((arg < 1) || (arg > 100)) return -EINVAL;
- ttime.tv_sec = 0;
- ttime.tv_usec = 1000000 / arg;
- if (hp_sdc_rtc_set_ct(&ttime)) return -EFAULT;
- hp_sdc_rtc_freq = arg;
- return 0;
- }
- case RTC_ALM_SET: /* Store a time into the alarm */
- {
- /*
- * This expects a struct hp_sdc_rtc_time. Writing 0xff means
- * "don't care" or "match all" for PC timers. The HP SDC
- * does not support that perk, but it could be emulated fairly
- * easily. Only the tm_hour, tm_min and tm_sec are used.
- * We could do it with 10ms accuracy with the HP SDC, if the
- * rtc interface left us a way to do that.
- */
- struct hp_sdc_rtc_time alm_tm;
-
- if (copy_from_user(&alm_tm, (struct hp_sdc_rtc_time*)arg,
- sizeof(struct hp_sdc_rtc_time)))
- return -EFAULT;
-
- if (alm_tm.tm_hour > 23) return -EINVAL;
- if (alm_tm.tm_min > 59) return -EINVAL;
- if (alm_tm.tm_sec > 59) return -EINVAL;
-
- ttime.sec = alm_tm.tm_hour * 3600 +
- alm_tm.tm_min * 60 + alm_tm.tm_sec;
- ttime.usec = 0;
- if (hp_sdc_rtc_set_mt(&ttime)) return -EFAULT;
- return 0;
- }
- case RTC_RD_TIME: /* Read the time/date from RTC */
- {
- if (hp_sdc_rtc_read_bbrtc(&wtime)) return -EFAULT;
- break;
- }
- case RTC_SET_TIME: /* Set the RTC */
- {
- struct rtc_time hp_sdc_rtc_tm;
- unsigned char mon, day, hrs, min, sec, leap_yr;
- unsigned int yrs;
-
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
- if (copy_from_user(&hp_sdc_rtc_tm, (struct rtc_time *)arg,
- sizeof(struct rtc_time)))
- return -EFAULT;
-
- yrs = hp_sdc_rtc_tm.tm_year + 1900;
- mon = hp_sdc_rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
- day = hp_sdc_rtc_tm.tm_mday;
- hrs = hp_sdc_rtc_tm.tm_hour;
- min = hp_sdc_rtc_tm.tm_min;
- sec = hp_sdc_rtc_tm.tm_sec;
-
- if (yrs < 1970)
- return -EINVAL;
-
- leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
-
- if ((mon > 12) || (day == 0))
- return -EINVAL;
- if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
- return -EINVAL;
- if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- return -EINVAL;
-
- if ((yrs -= eH) > 255) /* They are unsigned */
- return -EINVAL;
-
-
- return 0;
- }
- case RTC_EPOCH_READ: /* Read the epoch. */
- {
- return put_user (epoch, (unsigned long *)arg);
- }
- case RTC_EPOCH_SET: /* Set the epoch. */
- {
- /*
- * There were no RTC clocks before 1900.
- */
- if (arg < 1900)
- return -EINVAL;
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
-
- epoch = arg;
- return 0;
- }
- default:
- return -EINVAL;
- }
- return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
-#endif
-}
-
-static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&hp_sdc_rtc_mutex);
- ret = hp_sdc_rtc_ioctl(file, cmd, arg);
- mutex_unlock(&hp_sdc_rtc_mutex);
-
- return ret;
-}
-
-
-static const struct file_operations hp_sdc_rtc_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = hp_sdc_rtc_read,
- .poll = hp_sdc_rtc_poll,
- .unlocked_ioctl = hp_sdc_rtc_unlocked_ioctl,
- .open = hp_sdc_rtc_open,
- .fasync = hp_sdc_rtc_fasync,
-};
-
-static struct miscdevice hp_sdc_rtc_dev = {
- .minor = RTC_MINOR,
- .name = "rtc_HIL",
- .fops = &hp_sdc_rtc_fops
-};
-
static int __init hp_sdc_rtc_init(void)
{
int ret;
@@ -696,8 +357,6 @@ static int __init hp_sdc_rtc_init(void)
if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
return ret;
- if (misc_register(&hp_sdc_rtc_dev) != 0)
- printk(KERN_INFO "Could not register misc. dev for i8042 rtc\n");
proc_create_single("driver/rtc", 0, NULL, hp_sdc_rtc_proc_show);
@@ -710,7 +369,6 @@ static int __init hp_sdc_rtc_init(void)
static void __exit hp_sdc_rtc_exit(void)
{
remove_proc_entry ("driver/rtc", NULL);
- misc_deregister(&hp_sdc_rtc_dev);
hp_sdc_release_timer_irq(hp_sdc_rtc_isr);
printk(KERN_INFO "HP i8042 SDC + MSM-58321 RTC support unloaded\n");
}
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index db01c4a33914..52313c6e3fb3 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input/kxtj9.h>
-#include <linux/input-polldev.h>
#define NAME "kxtj9"
#define G_MAX 8000
@@ -71,9 +70,6 @@ struct kxtj9_data {
struct i2c_client *client;
struct kxtj9_platform_data pdata;
struct input_dev *input_dev;
-#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
- struct input_polled_dev *poll_dev;
-#endif
unsigned int last_poll_interval;
u8 shift;
u8 ctrl_reg1;
@@ -282,50 +278,6 @@ static void kxtj9_input_close(struct input_dev *dev)
kxtj9_disable(tj9);
}
-static void kxtj9_init_input_device(struct kxtj9_data *tj9,
- struct input_dev *input_dev)
-{
- __set_bit(EV_ABS, input_dev->evbit);
- input_set_abs_params(input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT);
- input_set_abs_params(input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT);
- input_set_abs_params(input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT);
-
- input_dev->name = "kxtj9_accel";
- input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &tj9->client->dev;
-}
-
-static int kxtj9_setup_input_device(struct kxtj9_data *tj9)
-{
- struct input_dev *input_dev;
- int err;
-
- input_dev = input_allocate_device();
- if (!input_dev) {
- dev_err(&tj9->client->dev, "input device allocate failed\n");
- return -ENOMEM;
- }
-
- tj9->input_dev = input_dev;
-
- input_dev->open = kxtj9_input_open;
- input_dev->close = kxtj9_input_close;
- input_set_drvdata(input_dev, tj9);
-
- kxtj9_init_input_device(tj9, input_dev);
-
- err = input_register_device(tj9->input_dev);
- if (err) {
- dev_err(&tj9->client->dev,
- "unable to register input polled device %s: %d\n",
- tj9->input_dev->name, err);
- input_free_device(tj9->input_dev);
- return err;
- }
-
- return 0;
-}
-
/*
* When IRQ mode is selected, we need to provide an interface to allow the user
* to change the output data rate of the part. For consistency, we are using
@@ -391,12 +343,10 @@ static struct attribute_group kxtj9_attribute_group = {
.attrs = kxtj9_attributes
};
-
-#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
-static void kxtj9_poll(struct input_polled_dev *dev)
+static void kxtj9_poll(struct input_dev *input)
{
- struct kxtj9_data *tj9 = dev->private;
- unsigned int poll_interval = dev->poll_interval;
+ struct kxtj9_data *tj9 = input_get_drvdata(input);
+ unsigned int poll_interval = input_get_poll_interval(input);
kxtj9_report_acceleration_data(tj9);
@@ -406,72 +356,14 @@ static void kxtj9_poll(struct input_polled_dev *dev)
}
}
-static void kxtj9_polled_input_open(struct input_polled_dev *dev)
-{
- struct kxtj9_data *tj9 = dev->private;
-
- kxtj9_enable(tj9);
-}
-
-static void kxtj9_polled_input_close(struct input_polled_dev *dev)
-{
- struct kxtj9_data *tj9 = dev->private;
-
- kxtj9_disable(tj9);
-}
-
-static int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
-{
- int err;
- struct input_polled_dev *poll_dev;
- poll_dev = input_allocate_polled_device();
-
- if (!poll_dev) {
- dev_err(&tj9->client->dev,
- "Failed to allocate polled device\n");
- return -ENOMEM;
- }
-
- tj9->poll_dev = poll_dev;
- tj9->input_dev = poll_dev->input;
-
- poll_dev->private = tj9;
- poll_dev->poll = kxtj9_poll;
- poll_dev->open = kxtj9_polled_input_open;
- poll_dev->close = kxtj9_polled_input_close;
-
- kxtj9_init_input_device(tj9, poll_dev->input);
-
- err = input_register_polled_device(poll_dev);
- if (err) {
- dev_err(&tj9->client->dev,
- "Unable to register polled device, err=%d\n", err);
- input_free_polled_device(poll_dev);
- return err;
- }
-
- return 0;
-}
-
-static void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
-{
- input_unregister_polled_device(tj9->poll_dev);
- input_free_polled_device(tj9->poll_dev);
-}
-
-#else
-
-static inline int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
+static void kxtj9_platform_exit(void *data)
{
- return -ENOSYS;
-}
+ struct kxtj9_data *tj9 = data;
-static inline void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
-{
+ if (tj9->pdata.exit)
+ tj9->pdata.exit();
}
-#endif
-
static int kxtj9_verify(struct kxtj9_data *tj9)
{
int retval;
@@ -494,11 +386,12 @@ out:
}
static int kxtj9_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
const struct kxtj9_platform_data *pdata =
dev_get_platdata(&client->dev);
struct kxtj9_data *tj9;
+ struct input_dev *input_dev;
int err;
if (!i2c_check_functionality(client->adapter,
@@ -512,7 +405,7 @@ static int kxtj9_probe(struct i2c_client *client,
return -EINVAL;
}
- tj9 = kzalloc(sizeof(*tj9), GFP_KERNEL);
+ tj9 = devm_kzalloc(&client->dev, sizeof(*tj9), GFP_KERNEL);
if (!tj9) {
dev_err(&client->dev,
"failed to allocate memory for module data\n");
@@ -525,13 +418,17 @@ static int kxtj9_probe(struct i2c_client *client,
if (pdata->init) {
err = pdata->init();
if (err < 0)
- goto err_free_mem;
+ return err;
}
+ err = devm_add_action_or_reset(&client->dev, kxtj9_platform_exit, tj9);
+ if (err)
+ return err;
+
err = kxtj9_verify(tj9);
if (err < 0) {
dev_err(&client->dev, "device not recognized\n");
- goto err_pdata_exit;
+ return err;
}
i2c_set_clientdata(client, tj9);
@@ -539,67 +436,63 @@ static int kxtj9_probe(struct i2c_client *client,
tj9->ctrl_reg1 = tj9->pdata.res_12bit | tj9->pdata.g_range;
tj9->last_poll_interval = tj9->pdata.init_interval;
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev) {
+ dev_err(&client->dev, "input device allocate failed\n");
+ return -ENOMEM;
+ }
+
+ input_set_drvdata(input_dev, tj9);
+ tj9->input_dev = input_dev;
+
+ input_dev->name = "kxtj9_accel";
+ input_dev->id.bustype = BUS_I2C;
+
+ input_dev->open = kxtj9_input_open;
+ input_dev->close = kxtj9_input_close;
+
+ input_set_abs_params(input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT);
+ input_set_abs_params(input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT);
+ input_set_abs_params(input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT);
+
+ if (client->irq <= 0) {
+ err = input_setup_polling(input_dev, kxtj9_poll);
+ if (err)
+ return err;
+ }
+
+ err = input_register_device(input_dev);
+ if (err) {
+ dev_err(&client->dev,
+ "unable to register input polled device %s: %d\n",
+ input_dev->name, err);
+ return err;
+ }
+
if (client->irq) {
/* If in irq mode, populate INT_CTRL_REG1 and enable DRDY. */
tj9->int_ctrl |= KXTJ9_IEN | KXTJ9_IEA | KXTJ9_IEL;
tj9->ctrl_reg1 |= DRDYE;
- err = kxtj9_setup_input_device(tj9);
- if (err)
- goto err_pdata_exit;
-
- err = request_threaded_irq(client->irq, NULL, kxtj9_isr,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "kxtj9-irq", tj9);
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, kxtj9_isr,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ "kxtj9-irq", tj9);
if (err) {
dev_err(&client->dev, "request irq failed: %d\n", err);
- goto err_destroy_input;
+ return err;
}
- err = sysfs_create_group(&client->dev.kobj, &kxtj9_attribute_group);
+ err = devm_device_add_group(&client->dev,
+ &kxtj9_attribute_group);
if (err) {
dev_err(&client->dev, "sysfs create failed: %d\n", err);
- goto err_free_irq;
+ return err;
}
-
- } else {
- err = kxtj9_setup_polled_device(tj9);
- if (err)
- goto err_pdata_exit;
}
return 0;
-
-err_free_irq:
- free_irq(client->irq, tj9);
-err_destroy_input:
- input_unregister_device(tj9->input_dev);
-err_pdata_exit:
- if (tj9->pdata.exit)
- tj9->pdata.exit();
-err_free_mem:
- kfree(tj9);
- return err;
-}
-
-static int kxtj9_remove(struct i2c_client *client)
-{
- struct kxtj9_data *tj9 = i2c_get_clientdata(client);
-
- if (client->irq) {
- sysfs_remove_group(&client->dev.kobj, &kxtj9_attribute_group);
- free_irq(client->irq, tj9);
- input_unregister_device(tj9->input_dev);
- } else {
- kxtj9_teardown_polled_device(tj9);
- }
-
- if (tj9->pdata.exit)
- tj9->pdata.exit();
-
- kfree(tj9);
-
- return 0;
}
static int __maybe_unused kxtj9_suspend(struct device *dev)
@@ -647,7 +540,6 @@ static struct i2c_driver kxtj9_driver = {
.pm = &kxtj9_pm_ops,
},
.probe = kxtj9_probe,
- .remove = kxtj9_remove,
.id_table = kxtj9_id,
};
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 49f5242bc54c..1b5a5e19230a 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -10,7 +10,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/of_device.h>
#define MMA8450_DRV_NAME "mma8450"
@@ -39,15 +39,8 @@
#define MMA8450_CTRL_REG1 0x38
#define MMA8450_CTRL_REG2 0x39
-/* mma8450 status */
-struct mma8450 {
- struct i2c_client *client;
- struct input_polled_dev *idev;
-};
-
-static int mma8450_read(struct mma8450 *m, unsigned off)
+static int mma8450_read(struct i2c_client *c, unsigned int off)
{
- struct i2c_client *c = m->client;
int ret;
ret = i2c_smbus_read_byte_data(c, off);
@@ -59,9 +52,8 @@ static int mma8450_read(struct mma8450 *m, unsigned off)
return ret;
}
-static int mma8450_write(struct mma8450 *m, unsigned off, u8 v)
+static int mma8450_write(struct i2c_client *c, unsigned int off, u8 v)
{
- struct i2c_client *c = m->client;
int error;
error = i2c_smbus_write_byte_data(c, off, v);
@@ -75,10 +67,9 @@ static int mma8450_write(struct mma8450 *m, unsigned off, u8 v)
return 0;
}
-static int mma8450_read_block(struct mma8450 *m, unsigned off,
+static int mma8450_read_block(struct i2c_client *c, unsigned int off,
u8 *buf, size_t size)
{
- struct i2c_client *c = m->client;
int err;
err = i2c_smbus_read_i2c_block_data(c, off, size, buf);
@@ -92,21 +83,21 @@ static int mma8450_read_block(struct mma8450 *m, unsigned off,
return 0;
}
-static void mma8450_poll(struct input_polled_dev *dev)
+static void mma8450_poll(struct input_dev *input)
{
- struct mma8450 *m = dev->private;
+ struct i2c_client *c = input_get_drvdata(input);
int x, y, z;
int ret;
u8 buf[6];
- ret = mma8450_read(m, MMA8450_STATUS);
+ ret = mma8450_read(c, MMA8450_STATUS);
if (ret < 0)
return;
if (!(ret & MMA8450_STATUS_ZXYDR))
return;
- ret = mma8450_read_block(m, MMA8450_OUT_X_LSB, buf, sizeof(buf));
+ ret = mma8450_read_block(c, MMA8450_OUT_X_LSB, buf, sizeof(buf));
if (ret < 0)
return;
@@ -114,41 +105,42 @@ static void mma8450_poll(struct input_polled_dev *dev)
y = ((int)(s8)buf[3] << 4) | (buf[2] & 0xf);
z = ((int)(s8)buf[5] << 4) | (buf[4] & 0xf);
- input_report_abs(dev->input, ABS_X, x);
- input_report_abs(dev->input, ABS_Y, y);
- input_report_abs(dev->input, ABS_Z, z);
- input_sync(dev->input);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_Z, z);
+ input_sync(input);
}
/* Initialize the MMA8450 chip */
-static void mma8450_open(struct input_polled_dev *dev)
+static int mma8450_open(struct input_dev *input)
{
- struct mma8450 *m = dev->private;
+ struct i2c_client *c = input_get_drvdata(input);
int err;
/* enable all events from X/Y/Z, no FIFO */
- err = mma8450_write(m, MMA8450_XYZ_DATA_CFG, 0x07);
+ err = mma8450_write(c, MMA8450_XYZ_DATA_CFG, 0x07);
if (err)
- return;
+ return err;
/*
* Sleep mode poll rate - 50Hz
* System output data rate - 400Hz
* Full scale selection - Active, +/- 2G
*/
- err = mma8450_write(m, MMA8450_CTRL_REG1, 0x01);
- if (err < 0)
- return;
+ err = mma8450_write(c, MMA8450_CTRL_REG1, 0x01);
+ if (err)
+ return err;
msleep(MODE_CHANGE_DELAY_MS);
+ return 0;
}
-static void mma8450_close(struct input_polled_dev *dev)
+static void mma8450_close(struct input_dev *input)
{
- struct mma8450 *m = dev->private;
+ struct i2c_client *c = input_get_drvdata(input);
- mma8450_write(m, MMA8450_CTRL_REG1, 0x00);
- mma8450_write(m, MMA8450_CTRL_REG2, 0x01);
+ mma8450_write(c, MMA8450_CTRL_REG1, 0x00);
+ mma8450_write(c, MMA8450_CTRL_REG2, 0x01);
}
/*
@@ -157,38 +149,37 @@ static void mma8450_close(struct input_polled_dev *dev)
static int mma8450_probe(struct i2c_client *c,
const struct i2c_device_id *id)
{
- struct input_polled_dev *idev;
- struct mma8450 *m;
+ struct input_dev *input;
int err;
- m = devm_kzalloc(&c->dev, sizeof(*m), GFP_KERNEL);
- if (!m)
+ input = devm_input_allocate_device(&c->dev);
+ if (!input)
return -ENOMEM;
- idev = devm_input_allocate_polled_device(&c->dev);
- if (!idev)
- return -ENOMEM;
+ input_set_drvdata(input, c);
+
+ input->name = MMA8450_DRV_NAME;
+ input->id.bustype = BUS_I2C;
+
+ input->open = mma8450_open;
+ input->close = mma8450_close;
- m->client = c;
- m->idev = idev;
+ input_set_abs_params(input, ABS_X, -2048, 2047, 32, 32);
+ input_set_abs_params(input, ABS_Y, -2048, 2047, 32, 32);
+ input_set_abs_params(input, ABS_Z, -2048, 2047, 32, 32);
- idev->private = m;
- idev->input->name = MMA8450_DRV_NAME;
- idev->input->id.bustype = BUS_I2C;
- idev->poll = mma8450_poll;
- idev->poll_interval = POLL_INTERVAL;
- idev->poll_interval_max = POLL_INTERVAL_MAX;
- idev->open = mma8450_open;
- idev->close = mma8450_close;
+ err = input_setup_polling(input, mma8450_poll);
+ if (err) {
+ dev_err(&c->dev, "failed to set up polling\n");
+ return err;
+ }
- __set_bit(EV_ABS, idev->input->evbit);
- input_set_abs_params(idev->input, ABS_X, -2048, 2047, 32, 32);
- input_set_abs_params(idev->input, ABS_Y, -2048, 2047, 32, 32);
- input_set_abs_params(idev->input, ABS_Z, -2048, 2047, 32, 32);
+ input_set_poll_interval(input, POLL_INTERVAL);
+ input_set_max_poll_interval(input, POLL_INTERVAL_MAX);
- err = input_register_polled_device(idev);
+ err = input_register_device(input);
if (err) {
- dev_err(&c->dev, "failed to register polled input device\n");
+ dev_err(&c->dev, "failed to register input device\n");
return err;
}
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index 4412055f8761..190a80e1e2c1 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -5,7 +5,7 @@
* Copyright (C) 2009 Phil Sutter <n0-1@freewrt.org>
*/
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
@@ -46,56 +46,42 @@ static bool rb532_button_pressed(void)
return !val;
}
-static void rb532_button_poll(struct input_polled_dev *poll_dev)
+static void rb532_button_poll(struct input_dev *input)
{
- input_report_key(poll_dev->input, RB532_BTN_KSYM,
- rb532_button_pressed());
- input_sync(poll_dev->input);
+ input_report_key(input, RB532_BTN_KSYM, rb532_button_pressed());
+ input_sync(input);
}
static int rb532_button_probe(struct platform_device *pdev)
{
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
int error;
- poll_dev = input_allocate_polled_device();
- if (!poll_dev)
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
return -ENOMEM;
- poll_dev->poll = rb532_button_poll;
- poll_dev->poll_interval = RB532_BTN_RATE;
+ input->name = "rb532 button";
+ input->phys = "rb532/button0";
+ input->id.bustype = BUS_HOST;
- poll_dev->input->name = "rb532 button";
- poll_dev->input->phys = "rb532/button0";
- poll_dev->input->id.bustype = BUS_HOST;
- poll_dev->input->dev.parent = &pdev->dev;
+ input_set_capability(input, EV_KEY, RB532_BTN_KSYM);
- dev_set_drvdata(&pdev->dev, poll_dev);
-
- input_set_capability(poll_dev->input, EV_KEY, RB532_BTN_KSYM);
-
- error = input_register_polled_device(poll_dev);
- if (error) {
- input_free_polled_device(poll_dev);
+ error = input_setup_polling(input, rb532_button_poll);
+ if (error)
return error;
- }
- return 0;
-}
+ input_set_poll_interval(input, RB532_BTN_RATE);
-static int rb532_button_remove(struct platform_device *pdev)
-{
- struct input_polled_dev *poll_dev = dev_get_drvdata(&pdev->dev);
-
- input_unregister_polled_device(poll_dev);
- input_free_polled_device(poll_dev);
+ error = input_register_device(input);
+ if (error)
+ return error;
return 0;
}
static struct platform_driver rb532_button_driver = {
.probe = rb532_button_probe,
- .remove = rb532_button_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 0fee6ddf3602..0657d785b3cc 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2008 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
*/
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -45,15 +45,13 @@ static const unsigned short sgi_map[] = {
};
struct buttons_dev {
- struct input_polled_dev *poll_dev;
unsigned short keymap[ARRAY_SIZE(sgi_map)];
int count[ARRAY_SIZE(sgi_map)];
};
-static void handle_buttons(struct input_polled_dev *dev)
+static void handle_buttons(struct input_dev *input)
{
- struct buttons_dev *bdev = dev->private;
- struct input_dev *input = dev->input;
+ struct buttons_dev *bdev = input_get_drvdata(input);
u8 status;
int i;
@@ -80,28 +78,24 @@ static void handle_buttons(struct input_polled_dev *dev)
static int sgi_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
- struct input_polled_dev *poll_dev;
struct input_dev *input;
int error, i;
- bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL);
- poll_dev = input_allocate_polled_device();
- if (!bdev || !poll_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
memcpy(bdev->keymap, sgi_map, sizeof(bdev->keymap));
- poll_dev->private = bdev;
- poll_dev->poll = handle_buttons;
- poll_dev->poll_interval = BUTTONS_POLL_INTERVAL;
+ input_set_drvdata(input, bdev);
- input = poll_dev->input;
input->name = "SGI buttons";
input->phys = "sgi/input0";
input->id.bustype = BUS_HOST;
- input->dev.parent = &pdev->dev;
input->keycode = bdev->keymap;
input->keycodemax = ARRAY_SIZE(bdev->keymap);
@@ -113,35 +107,21 @@ static int sgi_buttons_probe(struct platform_device *pdev)
__set_bit(bdev->keymap[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
- bdev->poll_dev = poll_dev;
- platform_set_drvdata(pdev, bdev);
-
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input, handle_buttons);
if (error)
- goto err_free_mem;
+ return error;
- return 0;
+ input_set_poll_interval(input, BUTTONS_POLL_INTERVAL);
- err_free_mem:
- input_free_polled_device(poll_dev);
- kfree(bdev);
- return error;
-}
-
-static int sgi_buttons_remove(struct platform_device *pdev)
-{
- struct buttons_dev *bdev = platform_get_drvdata(pdev);
-
- input_unregister_polled_device(bdev->poll_dev);
- input_free_polled_device(bdev->poll_dev);
- kfree(bdev);
+ error = input_register_device(input);
+ if (error)
+ return error;
return 0;
}
static struct platform_driver sgi_buttons_driver = {
.probe = sgi_buttons_probe,
- .remove = sgi_buttons_remove,
.driver = {
.name = "sgibtns",
},
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 7ce6cc60d4d2..80dfd72a02d3 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -8,7 +8,7 @@
#include <linux/io.h>
#include <linux/dmi.h>
#include <linux/init.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -1030,7 +1030,7 @@ static int __init select_keymap(void)
/* Input layer interface */
-static struct input_polled_dev *wistron_idev;
+static struct input_dev *wistron_idev;
static unsigned long jiffies_last_press;
static bool wifi_enabled;
static bool bluetooth_enabled;
@@ -1114,7 +1114,7 @@ static inline void wistron_led_resume(void)
static void handle_key(u8 code)
{
const struct key_entry *key =
- sparse_keymap_entry_from_scancode(wistron_idev->input, code);
+ sparse_keymap_entry_from_scancode(wistron_idev, code);
if (key) {
switch (key->type) {
@@ -1133,14 +1133,14 @@ static void handle_key(u8 code)
break;
default:
- sparse_keymap_report_entry(wistron_idev->input,
- key, 1, true);
+ sparse_keymap_report_entry(wistron_idev, key, 1, true);
break;
}
jiffies_last_press = jiffies;
- } else
+ } else {
printk(KERN_NOTICE
"wistron_btns: Unknown key code %02X\n", code);
+ }
}
static void poll_bios(bool discard)
@@ -1158,21 +1158,23 @@ static void poll_bios(bool discard)
}
}
-static void wistron_flush(struct input_polled_dev *dev)
+static int wistron_flush(struct input_dev *dev)
{
/* Flush stale event queue */
poll_bios(true);
+
+ return 0;
}
-static void wistron_poll(struct input_polled_dev *dev)
+static void wistron_poll(struct input_dev *dev)
{
poll_bios(false);
/* Increase poll frequency if user is currently pressing keys (< 2s ago) */
if (time_before(jiffies, jiffies_last_press + 2 * HZ))
- dev->poll_interval = POLL_INTERVAL_BURST;
+ input_set_poll_interval(dev, POLL_INTERVAL_BURST);
else
- dev->poll_interval = POLL_INTERVAL_DEFAULT;
+ input_set_poll_interval(dev, POLL_INTERVAL_DEFAULT);
}
static int wistron_setup_keymap(struct input_dev *dev,
@@ -1208,35 +1210,37 @@ static int wistron_setup_keymap(struct input_dev *dev,
static int setup_input_dev(void)
{
- struct input_dev *input_dev;
int error;
- wistron_idev = input_allocate_polled_device();
+ wistron_idev = input_allocate_device();
if (!wistron_idev)
return -ENOMEM;
+ wistron_idev->name = "Wistron laptop buttons";
+ wistron_idev->phys = "wistron/input0";
+ wistron_idev->id.bustype = BUS_HOST;
+ wistron_idev->dev.parent = &wistron_device->dev;
+
wistron_idev->open = wistron_flush;
- wistron_idev->poll = wistron_poll;
- wistron_idev->poll_interval = POLL_INTERVAL_DEFAULT;
- input_dev = wistron_idev->input;
- input_dev->name = "Wistron laptop buttons";
- input_dev->phys = "wistron/input0";
- input_dev->id.bustype = BUS_HOST;
- input_dev->dev.parent = &wistron_device->dev;
+ error = sparse_keymap_setup(wistron_idev, keymap, wistron_setup_keymap);
+ if (error)
+ goto err_free_dev;
- error = sparse_keymap_setup(input_dev, keymap, wistron_setup_keymap);
+ error = input_setup_polling(wistron_idev, wistron_poll);
if (error)
goto err_free_dev;
- error = input_register_polled_device(wistron_idev);
+ input_set_poll_interval(wistron_idev, POLL_INTERVAL_DEFAULT);
+
+ error = input_register_device(wistron_idev);
if (error)
goto err_free_dev;
return 0;
err_free_dev:
- input_free_polled_device(wistron_idev);
+ input_free_device(wistron_idev);
return error;
}
@@ -1285,8 +1289,7 @@ static int wistron_probe(struct platform_device *dev)
static int wistron_remove(struct platform_device *dev)
{
wistron_led_remove();
- input_unregister_polled_device(wistron_idev);
- input_free_polled_device(wistron_idev);
+ input_unregister_device(wistron_idev);
bios_detach();
return 0;
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 652c38e3c0b5..d8b6a5dab190 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -92,14 +92,14 @@ config MOUSE_PS2_SYNAPTICS_SMBUS
If unsure, say Y.
config MOUSE_PS2_CYPRESS
- bool "Cypress PS/2 mouse protocol extension" if EXPERT
- default y
- depends on MOUSE_PS2
- help
- Say Y here if you have a Cypress PS/2 Trackpad connected to
- your system.
+ bool "Cypress PS/2 mouse protocol extension" if EXPERT
+ default y
+ depends on MOUSE_PS2
+ help
+ Say Y here if you have a Cypress PS/2 Trackpad connected to
+ your system.
- If unsure, say Y.
+ If unsure, say Y.
config MOUSE_PS2_LIFEBOOK
bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
@@ -381,7 +381,6 @@ config MOUSE_VSXXXAA
config MOUSE_GPIO
tristate "GPIO mouse"
depends on GPIOLIB || COMPILE_TEST
- select INPUT_POLLDEV
help
This driver simulates a mouse on GPIO lines of various CPUs (and some
other chips).
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 461436f6f087..23507fce3a2b 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/gpio/consumer.h>
#include <linux/property.h>
#include <linux/of.h>
@@ -43,10 +43,9 @@ struct gpio_mouse {
* Timer function which is run every scan_ms ms when the device is opened.
* The dev input variable is set to the the input_dev pointer.
*/
-static void gpio_mouse_scan(struct input_polled_dev *dev)
+static void gpio_mouse_scan(struct input_dev *input)
{
- struct gpio_mouse *gpio = dev->private;
- struct input_dev *input = dev->input;
+ struct gpio_mouse *gpio = input_get_drvdata(input);
int x, y;
if (gpio->bleft)
@@ -71,18 +70,17 @@ static int gpio_mouse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_mouse *gmouse;
- struct input_polled_dev *input_poll;
struct input_dev *input;
- int ret;
+ int error;
gmouse = devm_kzalloc(dev, sizeof(*gmouse), GFP_KERNEL);
if (!gmouse)
return -ENOMEM;
/* Assign some default scanning time */
- ret = device_property_read_u32(dev, "scan-interval-ms",
- &gmouse->scan_ms);
- if (ret || gmouse->scan_ms == 0) {
+ error = device_property_read_u32(dev, "scan-interval-ms",
+ &gmouse->scan_ms);
+ if (error || gmouse->scan_ms == 0) {
dev_warn(dev, "invalid scan time, set to 50 ms\n");
gmouse->scan_ms = 50;
}
@@ -112,23 +110,14 @@ static int gpio_mouse_probe(struct platform_device *pdev)
if (IS_ERR(gmouse->bright))
return PTR_ERR(gmouse->bright);
- input_poll = devm_input_allocate_polled_device(dev);
- if (!input_poll) {
- dev_err(dev, "not enough memory for input device\n");
+ input = devm_input_allocate_device(dev);
+ if (!input)
return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, input_poll);
-
- /* set input-polldev handlers */
- input_poll->private = gmouse;
- input_poll->poll = gpio_mouse_scan;
- input_poll->poll_interval = gmouse->scan_ms;
- input = input_poll->input;
input->name = pdev->name;
input->id.bustype = BUS_HOST;
- input->dev.parent = &pdev->dev;
+
+ input_set_drvdata(input, gmouse);
input_set_capability(input, EV_REL, REL_X);
input_set_capability(input, EV_REL, REL_Y);
@@ -139,10 +128,16 @@ static int gpio_mouse_probe(struct platform_device *pdev)
if (gmouse->bright)
input_set_capability(input, EV_KEY, BTN_RIGHT);
- ret = input_register_polled_device(input_poll);
- if (ret) {
+ error = input_setup_polling(input, gpio_mouse_scan);
+ if (error)
+ return error;
+
+ input_set_poll_interval(input, gmouse->scan_ms);
+
+ error = input_register_device(input);
+ if (error) {
dev_err(dev, "could not register input device\n");
- return ret;
+ return error;
}
dev_dbg(dev, "%d ms scan time, buttons: %s%s%s\n",
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 56fae3472114..1ae6f8bba9ae 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -172,6 +172,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
+ "LEN0091", /* X1 Carbon 6 */
"LEN0092", /* X1 Carbon 6 */
"LEN0093", /* T480 */
"LEN0096", /* X280 */
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 897105b9a98b..0bc01cfc2b51 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -81,11 +81,6 @@ static const char * const rmi_f54_report_type_names[] = {
= "Full Raw Capacitance RX Offset Removed",
};
-struct rmi_f54_reports {
- int start;
- int size;
-};
-
struct f54_data {
struct rmi_function *fn;
@@ -98,7 +93,6 @@ struct f54_data {
enum rmi_f54_report_type report_type;
u8 *report_data;
int report_size;
- struct rmi_f54_reports standard_report[2];
bool is_busy;
struct mutex status_mutex;
@@ -116,6 +110,7 @@ struct f54_data {
struct video_device vdev;
struct vb2_queue queue;
struct mutex lock;
+ u32 sequence;
int input;
enum rmi_f54_report_type inputs[F54_MAX_REPORT_TYPE];
};
@@ -290,6 +285,7 @@ static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
static void rmi_f54_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct f54_data *f54 = vb2_get_drv_priv(vb->vb2_queue);
u16 *ptr;
enum vb2_buffer_state state;
@@ -298,6 +294,7 @@ static void rmi_f54_buffer_queue(struct vb2_buffer *vb)
mutex_lock(&f54->status_mutex);
+ vb2_set_plane_payload(vb, 0, 0);
reptype = rmi_f54_get_reptype(f54, f54->input);
if (reptype == F54_REPORT_NONE) {
state = VB2_BUF_STATE_ERROR;
@@ -344,14 +341,25 @@ static void rmi_f54_buffer_queue(struct vb2_buffer *vb)
data_done:
mutex_unlock(&f54->data_mutex);
done:
+ vb->timestamp = ktime_get_ns();
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence = f54->sequence++;
vb2_buffer_done(vb, state);
mutex_unlock(&f54->status_mutex);
}
+static void rmi_f54_stop_streaming(struct vb2_queue *q)
+{
+ struct f54_data *f54 = vb2_get_drv_priv(q);
+
+ f54->sequence = 0;
+}
+
/* V4L2 structures */
static const struct vb2_ops rmi_f54_queue_ops = {
.queue_setup = rmi_f54_queue_setup,
.buf_queue = rmi_f54_buffer_queue,
+ .stop_streaming = rmi_f54_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
@@ -363,7 +371,6 @@ static const struct vb2_queue rmi_f54_queue = {
.ops = &rmi_f54_queue_ops,
.mem_ops = &vb2_vmalloc_memops,
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
- .min_buffers_needed = 1,
};
static int rmi_f54_vidioc_querycap(struct file *file, void *priv,
@@ -516,13 +523,10 @@ static void rmi_f54_work(struct work_struct *work)
struct f54_data *f54 = container_of(work, struct f54_data, work.work);
struct rmi_function *fn = f54->fn;
u8 fifo[2];
- struct rmi_f54_reports *report;
int report_size;
u8 command;
- u8 *data;
int error;
- data = f54->report_data;
report_size = rmi_f54_get_report_size(f54);
if (report_size == 0) {
dev_err(&fn->dev, "Bad report size, report type=%d\n",
@@ -530,8 +534,6 @@ static void rmi_f54_work(struct work_struct *work)
error = -EINVAL;
goto error; /* retry won't help */
}
- f54->standard_report[0].size = report_size;
- report = f54->standard_report;
mutex_lock(&f54->data_mutex);
@@ -556,28 +558,23 @@ static void rmi_f54_work(struct work_struct *work)
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
- report_size = 0;
- for (; report->size; report++) {
- fifo[0] = report->start & 0xff;
- fifo[1] = (report->start >> 8) & 0xff;
- error = rmi_write_block(fn->rmi_dev,
- fn->fd.data_base_addr + F54_FIFO_OFFSET,
- fifo, sizeof(fifo));
- if (error) {
- dev_err(&fn->dev, "Failed to set fifo start offset\n");
- goto abort;
- }
+ fifo[0] = 0;
+ fifo[1] = 0;
+ error = rmi_write_block(fn->rmi_dev,
+ fn->fd.data_base_addr + F54_FIFO_OFFSET,
+ fifo, sizeof(fifo));
+ if (error) {
+ dev_err(&fn->dev, "Failed to set fifo start offset\n");
+ goto abort;
+ }
- error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
- F54_REPORT_DATA_OFFSET, data,
- report->size);
- if (error) {
- dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
- __func__, report->size, error);
- goto abort;
- }
- data += report->size;
- report_size += report->size;
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+ F54_REPORT_DATA_OFFSET, f54->report_data,
+ report_size);
+ if (error) {
+ dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+ __func__, report_size, error);
+ goto abort;
}
abort:
diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig
index e4c0d9a055b9..51c339182017 100644
--- a/drivers/input/tablet/Kconfig
+++ b/drivers/input/tablet/Kconfig
@@ -39,16 +39,16 @@ config TABLET_USB_AIPTEK
module will be called aiptek.
config TABLET_USB_GTCO
- tristate "GTCO CalComp/InterWrite USB Support"
- depends on USB && INPUT
- help
- Say Y here if you want to use the USB version of the GTCO
- CalComp/InterWrite Tablet. Make sure to say Y to "Mouse support"
- (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support"
- (CONFIG_INPUT_EVDEV) as well.
-
- To compile this driver as a module, choose M here: the
- module will be called gtco.
+ tristate "GTCO CalComp/InterWrite USB Support"
+ depends on USB && INPUT
+ help
+ Say Y here if you want to use the USB version of the GTCO
+ CalComp/InterWrite Tablet. Make sure to say Y to "Mouse support"
+ (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support"
+ (CONFIG_INPUT_EVDEV) as well.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gtco.
config TABLET_USB_HANWANG
tristate "Hanwang Art Master III tablet support (USB)"
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 46ad9090493b..c071f7c407b6 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -633,7 +633,7 @@ config TOUCHSCREEN_HP600
depends on SH_HP6XX && SH_ADC
help
Say Y here if you have a HP Jornada 620/660/680/690 and want to
- support the built-in touchscreen.
+ support the built-in touchscreen.
To compile this driver as a module, choose M here: the
module will be called hp680_ts_input.
@@ -700,7 +700,6 @@ config TOUCHSCREEN_EDT_FT5X06
config TOUCHSCREEN_RASPBERRYPI_FW
tristate "Raspberry Pi's firmware base touch screen support"
depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
- select INPUT_POLLDEV
help
Say Y here if you have the official Raspberry Pi 7 inch screen on
your system.
@@ -1038,7 +1037,6 @@ config TOUCHSCREEN_TS4800
depends on HAS_IOMEM && OF
depends on SOC_IMX51 || COMPILE_TEST
select MFD_SYSCON
- select INPUT_POLLDEV
help
Say Y here if you have a touchscreen on a TS-4800 board.
@@ -1210,7 +1208,6 @@ config TOUCHSCREEN_SUR40
tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
depends on USB && MEDIA_USB_SUPPORT && HAS_DMA
depends on VIDEO_V4L2
- select INPUT_POLLDEV
select VIDEOBUF2_DMA_SG
help
Say Y here if you want support for the Samsung SUR40 touchscreen
@@ -1246,7 +1243,6 @@ config TOUCHSCREEN_SX8654
config TOUCHSCREEN_TPS6507X
tristate "TPS6507x based touchscreens"
depends on I2C
- select INPUT_POLLDEV
help
Say Y here if you have a TPS6507x based touchscreen
controller.
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index 28644f372bd8..c0d5c2413356 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -13,7 +13,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
-#define AR1021_TOCUH_PKG_SIZE 5
+#define AR1021_TOUCH_PKG_SIZE 5
#define AR1021_MAX_X 4095
#define AR1021_MAX_Y 4095
@@ -25,7 +25,7 @@
struct ar1021_i2c {
struct i2c_client *client;
struct input_dev *input;
- u8 data[AR1021_TOCUH_PKG_SIZE];
+ u8 data[AR1021_TOUCH_PKG_SIZE];
};
static irqreturn_t ar1021_i2c_irq(int irq, void *dev_id)
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 24c4b691b1c9..ae60442efda0 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3156,6 +3156,8 @@ static int __maybe_unused mxt_suspend(struct device *dev)
mutex_unlock(&input_dev->mutex);
+ disable_irq(data->irq);
+
return 0;
}
@@ -3168,6 +3170,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
if (!input_dev)
return 0;
+ enable_irq(data->irq);
+
mutex_lock(&input_dev->mutex);
if (input_dev->users)
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
index 0e40897949bb..aa829725ded7 100644
--- a/drivers/input/touchscreen/colibri-vf50-ts.c
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -9,7 +9,6 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/iio/consumer.h>
#include <linux/iio/types.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 5525f1fb1526..d61731c0037d 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -28,6 +28,7 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <asm/unaligned.h>
+#include <linux/regulator/consumer.h>
#define WORK_REGISTER_THRESHOLD 0x00
#define WORK_REGISTER_REPORT_RATE 0x08
@@ -88,6 +89,7 @@ struct edt_ft5x06_ts_data {
struct touchscreen_properties prop;
u16 num_x;
u16 num_y;
+ struct regulator *vcc;
struct gpio_desc *reset_gpio;
struct gpio_desc *wake_gpio;
@@ -1036,6 +1038,13 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
}
}
+static void edt_ft5x06_disable_regulator(void *arg)
+{
+ struct edt_ft5x06_ts_data *data = arg;
+
+ regulator_disable(data->vcc);
+}
+
static int edt_ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1064,6 +1073,27 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
tsdata->max_support_points = chip_data->max_support_points;
+ tsdata->vcc = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(tsdata->vcc)) {
+ error = PTR_ERR(tsdata->vcc);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "failed to request regulator: %d\n", error);
+ return error;
+ }
+
+ error = regulator_enable(tsdata->vcc);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to enable vcc: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(&client->dev,
+ edt_ft5x06_disable_regulator,
+ tsdata);
+ if (error)
+ return error;
+
tsdata->reset_gpio = devm_gpiod_get_optional(&client->dev,
"reset", GPIOD_OUT_HIGH);
if (IS_ERR(tsdata->reset_gpio)) {
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index e9006407c9bc..4a17096e83e1 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -1,54 +1,54 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/delay.h>
-#include <linux/workqueue.h>
-#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
-#define ILI210X_TOUCHES 2
-#define ILI251X_TOUCHES 10
-#define DEFAULT_POLL_PERIOD 20
+#define ILI2XXX_POLL_PERIOD 20
+
+#define ILI210X_DATA_SIZE 64
+#define ILI211X_DATA_SIZE 43
+#define ILI251X_DATA_SIZE1 31
+#define ILI251X_DATA_SIZE2 20
/* Touchscreen commands */
#define REG_TOUCHDATA 0x10
#define REG_PANEL_INFO 0x20
-#define REG_FIRMWARE_VERSION 0x40
#define REG_CALIBRATE 0xcc
-struct firmware_version {
- u8 id;
- u8 major;
- u8 minor;
-} __packed;
-
-enum ili2xxx_model {
- MODEL_ILI210X,
- MODEL_ILI251X,
+struct ili2xxx_chip {
+ int (*read_reg)(struct i2c_client *client, u8 reg,
+ void *buf, size_t len);
+ int (*get_touch_data)(struct i2c_client *client, u8 *data);
+ bool (*parse_touch_data)(const u8 *data, unsigned int finger,
+ unsigned int *x, unsigned int *y);
+ bool (*continue_polling)(const u8 *data, bool touch);
+ unsigned int max_touches;
+ unsigned int resolution;
+ bool has_calibrate_reg;
};
struct ili210x {
struct i2c_client *client;
struct input_dev *input;
- unsigned int poll_period;
- struct delayed_work dwork;
struct gpio_desc *reset_gpio;
struct touchscreen_properties prop;
- enum ili2xxx_model model;
- unsigned int max_touches;
+ const struct ili2xxx_chip *chip;
+ bool stop;
};
-static int ili210x_read_reg(struct i2c_client *client, u8 reg, void *buf,
- size_t len)
+static int ili210x_read_reg(struct i2c_client *client,
+ u8 reg, void *buf, size_t len)
{
- struct ili210x *priv = i2c_get_clientdata(client);
- struct i2c_msg msg[2] = {
+ struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = 0,
@@ -62,151 +62,223 @@ static int ili210x_read_reg(struct i2c_client *client, u8 reg, void *buf,
.buf = buf,
}
};
+ int error, ret;
- if (priv->model == MODEL_ILI251X) {
- if (i2c_transfer(client->adapter, msg, 1) != 1) {
- dev_err(&client->dev, "i2c transfer failed\n");
- return -EIO;
- }
-
- usleep_range(5000, 5500);
-
- if (i2c_transfer(client->adapter, msg + 1, 1) != 1) {
- dev_err(&client->dev, "i2c transfer failed\n");
- return -EIO;
- }
- } else {
- if (i2c_transfer(client->adapter, msg, 2) != 2) {
- dev_err(&client->dev, "i2c transfer failed\n");
- return -EIO;
- }
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev, "%s failed: %d\n", __func__, error);
+ return error;
}
return 0;
}
-static int ili210x_read(struct i2c_client *client, void *buf, size_t len)
+static int ili210x_read_touch_data(struct i2c_client *client, u8 *data)
{
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- };
+ return ili210x_read_reg(client, REG_TOUCHDATA,
+ data, ILI210X_DATA_SIZE);
+}
+
+static bool ili210x_touchdata_to_coords(const u8 *touchdata,
+ unsigned int finger,
+ unsigned int *x, unsigned int *y)
+{
+ if (touchdata[0] & BIT(finger))
+ return false;
+
+ *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
+ *y = get_unaligned_be16(touchdata + 1 + (finger * 4) + 2);
- if (i2c_transfer(client->adapter, &msg, 1) != 1) {
- dev_err(&client->dev, "i2c transfer failed\n");
+ return true;
+}
+
+static bool ili210x_check_continue_polling(const u8 *data, bool touch)
+{
+ return data[0] & 0xf3;
+}
+
+static const struct ili2xxx_chip ili210x_chip = {
+ .read_reg = ili210x_read_reg,
+ .get_touch_data = ili210x_read_touch_data,
+ .parse_touch_data = ili210x_touchdata_to_coords,
+ .continue_polling = ili210x_check_continue_polling,
+ .max_touches = 2,
+ .has_calibrate_reg = true,
+};
+
+static int ili211x_read_touch_data(struct i2c_client *client, u8 *data)
+{
+ s16 sum = 0;
+ int error;
+ int ret;
+ int i;
+
+ ret = i2c_master_recv(client, data, ILI211X_DATA_SIZE);
+ if (ret != ILI211X_DATA_SIZE) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+ /* This chip uses custom checksum at the end of data */
+ for (i = 0; i < ILI211X_DATA_SIZE - 1; i++)
+ sum = (sum + data[i]) & 0xff;
+
+ if ((-sum & 0xff) != data[ILI211X_DATA_SIZE - 1]) {
+ dev_err(&client->dev,
+ "CRC error (crc=0x%02x expected=0x%02x)\n",
+ sum, data[ILI211X_DATA_SIZE - 1]);
return -EIO;
}
return 0;
}
-static bool ili210x_touchdata_to_coords(struct ili210x *priv, u8 *touchdata,
+static bool ili211x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
unsigned int *x, unsigned int *y)
{
- if (finger >= ILI210X_TOUCHES)
- return false;
+ u32 data;
- if (touchdata[0] & BIT(finger))
+ data = get_unaligned_be32(touchdata + 1 + (finger * 4) + 0);
+ if (data == 0xffffffff) /* Finger up */
return false;
- *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
- *y = get_unaligned_be16(touchdata + 1 + (finger * 4) + 2);
+ *x = ((touchdata[1 + (finger * 4) + 0] & 0xf0) << 4) |
+ touchdata[1 + (finger * 4) + 1];
+ *y = ((touchdata[1 + (finger * 4) + 0] & 0x0f) << 8) |
+ touchdata[1 + (finger * 4) + 2];
return true;
}
-static bool ili251x_touchdata_to_coords(struct ili210x *priv, u8 *touchdata,
+static bool ili211x_decline_polling(const u8 *data, bool touch)
+{
+ return false;
+}
+
+static const struct ili2xxx_chip ili211x_chip = {
+ .read_reg = ili210x_read_reg,
+ .get_touch_data = ili211x_read_touch_data,
+ .parse_touch_data = ili211x_touchdata_to_coords,
+ .continue_polling = ili211x_decline_polling,
+ .max_touches = 10,
+ .resolution = 2048,
+};
+
+static int ili251x_read_reg(struct i2c_client *client,
+ u8 reg, void *buf, size_t len)
+{
+ int error;
+ int ret;
+
+ ret = i2c_master_send(client, &reg, 1);
+ if (ret == 1) {
+ usleep_range(5000, 5500);
+
+ ret = i2c_master_recv(client, buf, len);
+ if (ret == len)
+ return 0;
+ }
+
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev, "%s failed: %d\n", __func__, error);
+ return ret;
+}
+
+static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
+{
+ int error;
+
+ error = ili251x_read_reg(client, REG_TOUCHDATA,
+ data, ILI251X_DATA_SIZE1);
+ if (!error && data[0] == 2) {
+ error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
+ ILI251X_DATA_SIZE2);
+ if (error >= 0 && error != ILI251X_DATA_SIZE2)
+ error = -EIO;
+ }
+
+ return error;
+}
+
+static bool ili251x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
unsigned int *x, unsigned int *y)
{
- if (finger >= ILI251X_TOUCHES)
- return false;
+ u16 val;
- *x = get_unaligned_be16(touchdata + 1 + (finger * 5) + 0);
- if (!(*x & BIT(15))) /* Touch indication */
+ val = get_unaligned_be16(touchdata + 1 + (finger * 5) + 0);
+ if (!(val & BIT(15))) /* Touch indication */
return false;
- *x &= 0x3fff;
+ *x = val & 0x3fff;
*y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
return true;
}
+static bool ili251x_check_continue_polling(const u8 *data, bool touch)
+{
+ return touch;
+}
+
+static const struct ili2xxx_chip ili251x_chip = {
+ .read_reg = ili251x_read_reg,
+ .get_touch_data = ili251x_read_touch_data,
+ .parse_touch_data = ili251x_touchdata_to_coords,
+ .continue_polling = ili251x_check_continue_polling,
+ .max_touches = 10,
+ .has_calibrate_reg = true,
+};
+
static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
{
struct input_dev *input = priv->input;
int i;
- bool contact = false, touch = false;
+ bool contact = false, touch;
unsigned int x = 0, y = 0;
- for (i = 0; i < priv->max_touches; i++) {
- if (priv->model == MODEL_ILI210X) {
- touch = ili210x_touchdata_to_coords(priv, touchdata,
- i, &x, &y);
- } else if (priv->model == MODEL_ILI251X) {
- touch = ili251x_touchdata_to_coords(priv, touchdata,
- i, &x, &y);
- if (touch)
- contact = true;
- }
+ for (i = 0; i < priv->chip->max_touches; i++) {
+ touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
- if (!touch)
- continue;
- touchscreen_report_pos(input, &priv->prop, x, y,
- true);
+ if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
+ touchscreen_report_pos(input, &priv->prop, x, y, true);
+ contact = true;
+ }
}
input_mt_report_pointer_emulation(input, false);
input_sync(input);
- if (priv->model == MODEL_ILI210X)
- contact = touchdata[0] & 0xf3;
-
return contact;
}
-static void ili210x_work(struct work_struct *work)
+static irqreturn_t ili210x_irq(int irq, void *irq_data)
{
- struct ili210x *priv = container_of(work, struct ili210x,
- dwork.work);
+ struct ili210x *priv = irq_data;
struct i2c_client *client = priv->client;
- u8 touchdata[64] = { 0 };
+ const struct ili2xxx_chip *chip = priv->chip;
+ u8 touchdata[ILI210X_DATA_SIZE] = { 0 };
+ bool keep_polling;
bool touch;
- int error = -EINVAL;
-
- if (priv->model == MODEL_ILI210X) {
- error = ili210x_read_reg(client, REG_TOUCHDATA,
- touchdata, sizeof(touchdata));
- } else if (priv->model == MODEL_ILI251X) {
- error = ili210x_read_reg(client, REG_TOUCHDATA,
- touchdata, 31);
- if (!error && touchdata[0] == 2)
- error = ili210x_read(client, &touchdata[31], 20);
- }
-
- if (error) {
- dev_err(&client->dev,
- "Unable to get touchdata, err = %d\n", error);
- return;
- }
-
- touch = ili210x_report_events(priv, touchdata);
-
- if (touch)
- schedule_delayed_work(&priv->dwork,
- msecs_to_jiffies(priv->poll_period));
-}
+ int error;
-static irqreturn_t ili210x_irq(int irq, void *irq_data)
-{
- struct ili210x *priv = irq_data;
+ do {
+ error = chip->get_touch_data(client, touchdata);
+ if (error) {
+ dev_err(&client->dev,
+ "Unable to get touch data: %d\n", error);
+ break;
+ }
- schedule_delayed_work(&priv->dwork, 0);
+ touch = ili210x_report_events(priv, touchdata);
+ keep_polling = chip->continue_polling(touchdata, touch);
+ if (keep_polling)
+ msleep(ILI2XXX_POLL_PERIOD);
+ } while (!priv->stop && keep_polling);
return IRQ_HANDLED;
}
@@ -242,8 +314,19 @@ static struct attribute *ili210x_attributes[] = {
NULL,
};
+static umode_t ili210x_calibrate_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+
+ return priv->chip->has_calibrate_reg;
+}
+
static const struct attribute_group ili210x_attr_group = {
.attrs = ili210x_attributes,
+ .is_visible = ili210x_calibrate_visible,
};
static void ili210x_power_down(void *data)
@@ -253,28 +336,35 @@ static void ili210x_power_down(void *data)
gpiod_set_value_cansleep(reset_gpio, 1);
}
-static void ili210x_cancel_work(void *data)
+static void ili210x_stop(void *data)
{
struct ili210x *priv = data;
- cancel_delayed_work_sync(&priv->dwork);
+ /* Tell ISR to quit even if there is a contact. */
+ priv->stop = true;
}
static int ili210x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ const struct ili2xxx_chip *chip;
struct ili210x *priv;
struct gpio_desc *reset_gpio;
struct input_dev *input;
- struct firmware_version firmware;
- enum ili2xxx_model model;
int error;
-
- model = (enum ili2xxx_model)id->driver_data;
+ unsigned int max_xy;
dev_dbg(dev, "Probing for ILI210X I2C Touschreen driver");
+ chip = device_get_match_data(dev);
+ if (!chip && id)
+ chip = (const struct ili2xxx_chip *)id->driver_data;
+ if (!chip) {
+ dev_err(&client->dev, "unknown device model\n");
+ return -ENODEV;
+ }
+
if (client->irq <= 0) {
dev_err(dev, "No IRQ!\n");
return -EINVAL;
@@ -305,49 +395,39 @@ static int ili210x_i2c_probe(struct i2c_client *client,
priv->client = client;
priv->input = input;
- priv->poll_period = DEFAULT_POLL_PERIOD;
- INIT_DELAYED_WORK(&priv->dwork, ili210x_work);
priv->reset_gpio = reset_gpio;
- priv->model = model;
- if (model == MODEL_ILI210X)
- priv->max_touches = ILI210X_TOUCHES;
- if (model == MODEL_ILI251X)
- priv->max_touches = ILI251X_TOUCHES;
-
+ priv->chip = chip;
i2c_set_clientdata(client, priv);
- /* Get firmware version */
- error = ili210x_read_reg(client, REG_FIRMWARE_VERSION,
- &firmware, sizeof(firmware));
- if (error) {
- dev_err(dev, "Failed to get firmware version, err: %d\n",
- error);
- return error;
- }
-
/* Setup input device */
input->name = "ILI210x Touchscreen";
input->id.bustype = BUS_I2C;
- input->dev.parent = dev;
/* Multi touch */
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, 0xffff, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 0xffff, 0, 0);
+ max_xy = (chip->resolution ?: SZ_64K) - 1;
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
touchscreen_parse_properties(input, true, &priv->prop);
- input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT);
- error = devm_add_action(dev, ili210x_cancel_work, priv);
- if (error)
+ error = input_mt_init_slots(input, priv->chip->max_touches,
+ INPUT_MT_DIRECT);
+ if (error) {
+ dev_err(dev, "Unable to set up slots, err: %d\n", error);
return error;
+ }
- error = devm_request_irq(dev, client->irq, ili210x_irq, 0,
- client->name, priv);
+ error = devm_request_threaded_irq(dev, client->irq, NULL, ili210x_irq,
+ IRQF_ONESHOT, client->name, priv);
if (error) {
dev_err(dev, "Unable to request touchscreen IRQ, err: %d\n",
error);
return error;
}
+ error = devm_add_action_or_reset(dev, ili210x_stop, priv);
+ if (error)
+ return error;
+
error = devm_device_add_group(dev, &ili210x_attr_group);
if (error) {
dev_err(dev, "Unable to create sysfs attributes, err: %d\n",
@@ -361,56 +441,28 @@ static int ili210x_i2c_probe(struct i2c_client *client,
return error;
}
- device_init_wakeup(dev, 1);
-
- dev_dbg(dev,
- "ILI210x initialized (IRQ: %d), firmware version %d.%d.%d",
- client->irq, firmware.id, firmware.major, firmware.minor);
-
return 0;
}
-static int __maybe_unused ili210x_i2c_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(client->irq);
-
- return 0;
-}
-
-static int __maybe_unused ili210x_i2c_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(client->irq);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(ili210x_i2c_pm,
- ili210x_i2c_suspend, ili210x_i2c_resume);
-
static const struct i2c_device_id ili210x_i2c_id[] = {
- { "ili210x", MODEL_ILI210X },
- { "ili251x", MODEL_ILI251X },
+ { "ili210x", (long)&ili210x_chip },
+ { "ili2117", (long)&ili211x_chip },
+ { "ili251x", (long)&ili251x_chip },
{ }
};
MODULE_DEVICE_TABLE(i2c, ili210x_i2c_id);
static const struct of_device_id ili210x_dt_ids[] = {
- { .compatible = "ilitek,ili210x", .data = (void *)MODEL_ILI210X },
- { .compatible = "ilitek,ili251x", .data = (void *)MODEL_ILI251X },
- { },
+ { .compatible = "ilitek,ili210x", .data = &ili210x_chip },
+ { .compatible = "ilitek,ili2117", .data = &ili211x_chip },
+ { .compatible = "ilitek,ili251x", .data = &ili251x_chip },
+ { }
};
MODULE_DEVICE_TABLE(of, ili210x_dt_ids);
static struct i2c_driver ili210x_ts_driver = {
.driver = {
.name = "ili210x_i2c",
- .pm = &ili210x_i2c_pm,
.of_match_table = ili210x_dt_ids,
},
.id_table = ili210x_i2c_id,
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index a5ab774da4cc..69c6d559eeb0 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -446,8 +446,7 @@ static int mms114_probe(struct i2c_client *client,
data->client = client;
data->input_dev = input_dev;
- /* FIXME: switch to device_get_match_data() when available */
- match_data = of_device_get_match_data(&client->dev);
+ match_data = device_get_match_data(&client->dev);
if (!match_data)
return -EINVAL;
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index e146dfa257b1..9aa098577350 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -5,22 +5,73 @@
* Copyright (C) 2010-2011 Pixcir, Inc.
*/
+#include <asm/unaligned.h>
#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
#include <linux/of_device.h>
-#include <linux/platform_data/pixcir_i2c_ts.h>
-#include <asm/unaligned.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#define PIXCIR_MAX_SLOTS 5 /* Max fingers supported by driver */
+/*
+ * Register map
+ */
+#define PIXCIR_REG_POWER_MODE 51
+#define PIXCIR_REG_INT_MODE 52
+
+/*
+ * Power modes:
+ * active: max scan speed
+ * idle: lower scan speed with automatic transition to active on touch
+ * halt: datasheet says sleep but this is more like halt as the chip
+ * clocks are cut and it can only be brought out of this mode
+ * using the RESET pin.
+ */
+enum pixcir_power_mode {
+ PIXCIR_POWER_ACTIVE,
+ PIXCIR_POWER_IDLE,
+ PIXCIR_POWER_HALT,
+};
+
+#define PIXCIR_POWER_MODE_MASK 0x03
+#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * diff coordinates: interrupt is asserted when coordinates change
+ * level on touch: interrupt level asserted during touch
+ * pulse on touch: interrupt pulse asserted during touch
+ *
+ */
+enum pixcir_int_mode {
+ PIXCIR_INT_PERIODICAL,
+ PIXCIR_INT_DIFF_COORD,
+ PIXCIR_INT_LEVEL_TOUCH,
+ PIXCIR_INT_PULSE_TOUCH,
+};
+
+#define PIXCIR_INT_MODE_MASK 0x03
+#define PIXCIR_INT_ENABLE (1UL << 3)
+#define PIXCIR_INT_POL_HIGH (1UL << 2)
+
+/**
+ * struct pixcir_i2c_chip_data - chip related data
+ * @max_fingers: Max number of fingers reported simultaneously by h/w
+ * @has_hw_ids: Hardware supports finger tracking IDs
+ *
+ */
+struct pixcir_i2c_chip_data {
+ u8 max_fingers;
+ bool has_hw_ids;
+};
+
struct pixcir_i2c_ts_data {
struct i2c_client *client;
struct input_dev *input;
@@ -30,7 +81,6 @@ struct pixcir_i2c_ts_data {
struct gpio_desc *gpio_wake;
const struct pixcir_i2c_chip_data *chip;
struct touchscreen_properties prop;
- int max_fingers; /* Max fingers supported in this instance */
bool running;
};
@@ -54,7 +104,7 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
memset(report, 0, sizeof(struct pixcir_report_data));
i = chip->has_hw_ids ? 1 : 0;
- readsize = 2 + tsdata->max_fingers * (4 + i);
+ readsize = 2 + tsdata->chip->max_fingers * (4 + i);
if (readsize > sizeof(rdbuf))
readsize = sizeof(rdbuf);
@@ -75,8 +125,8 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
}
touch = rdbuf[0] & 0x7;
- if (touch > tsdata->max_fingers)
- touch = tsdata->max_fingers;
+ if (touch > tsdata->chip->max_fingers)
+ touch = tsdata->chip->max_fingers;
report->num_touches = touch;
bufptr = &rdbuf[2];
@@ -192,7 +242,7 @@ static int pixcir_set_power_mode(struct pixcir_i2c_ts_data *ts,
ret = i2c_smbus_read_byte_data(ts->client, PIXCIR_REG_POWER_MODE);
if (ret < 0) {
- dev_err(dev, "%s: can't read reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't read reg %d : %d\n",
__func__, PIXCIR_REG_POWER_MODE, ret);
return ret;
}
@@ -205,7 +255,7 @@ static int pixcir_set_power_mode(struct pixcir_i2c_ts_data *ts,
ret = i2c_smbus_write_byte_data(ts->client, PIXCIR_REG_POWER_MODE, ret);
if (ret < 0) {
- dev_err(dev, "%s: can't write reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't write reg %d : %d\n",
__func__, PIXCIR_REG_POWER_MODE, ret);
return ret;
}
@@ -231,7 +281,7 @@ static int pixcir_set_int_mode(struct pixcir_i2c_ts_data *ts,
ret = i2c_smbus_read_byte_data(ts->client, PIXCIR_REG_INT_MODE);
if (ret < 0) {
- dev_err(dev, "%s: can't read reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't read reg %d : %d\n",
__func__, PIXCIR_REG_INT_MODE, ret);
return ret;
}
@@ -246,7 +296,7 @@ static int pixcir_set_int_mode(struct pixcir_i2c_ts_data *ts,
ret = i2c_smbus_write_byte_data(ts->client, PIXCIR_REG_INT_MODE, ret);
if (ret < 0) {
- dev_err(dev, "%s: can't write reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't write reg %d : %d\n",
__func__, PIXCIR_REG_INT_MODE, ret);
return ret;
}
@@ -264,7 +314,7 @@ static int pixcir_int_enable(struct pixcir_i2c_ts_data *ts, bool enable)
ret = i2c_smbus_read_byte_data(ts->client, PIXCIR_REG_INT_MODE);
if (ret < 0) {
- dev_err(dev, "%s: can't read reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't read reg %d : %d\n",
__func__, PIXCIR_REG_INT_MODE, ret);
return ret;
}
@@ -276,7 +326,7 @@ static int pixcir_int_enable(struct pixcir_i2c_ts_data *ts, bool enable)
ret = i2c_smbus_write_byte_data(ts->client, PIXCIR_REG_INT_MODE, ret);
if (ret < 0) {
- dev_err(dev, "%s: can't write reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't write reg %d : %d\n",
__func__, PIXCIR_REG_INT_MODE, ret);
return ret;
}
@@ -412,31 +462,9 @@ unlock:
static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
-#ifdef CONFIG_OF
-static const struct of_device_id pixcir_of_match[];
-
-static int pixcir_parse_dt(struct device *dev,
- struct pixcir_i2c_ts_data *tsdata)
-{
- tsdata->chip = of_device_get_match_data(dev);
- if (!tsdata->chip)
- return -EINVAL;
-
- return 0;
-}
-#else
-static int pixcir_parse_dt(struct device *dev,
- struct pixcir_i2c_ts_data *tsdata)
-{
- return -EINVAL;
-}
-#endif
-
static int pixcir_i2c_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct pixcir_ts_platform_data *pdata =
- dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct pixcir_i2c_ts_data *tsdata;
struct input_dev *input;
@@ -446,19 +474,11 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
if (!tsdata)
return -ENOMEM;
- if (pdata) {
- tsdata->chip = &pdata->chip;
- } else if (dev->of_node) {
- error = pixcir_parse_dt(dev, tsdata);
- if (error)
- return error;
- } else {
- dev_err(dev, "platform data not defined\n");
- return -EINVAL;
- }
-
- if (!tsdata->chip->max_fingers) {
- dev_err(dev, "Invalid max_fingers in chip data\n");
+ tsdata->chip = device_get_match_data(dev);
+ if (!tsdata->chip && id)
+ tsdata->chip = (const void *)id->driver_data;
+ if (!tsdata->chip) {
+ dev_err(dev, "can't locate chip data\n");
return -EINVAL;
}
@@ -475,30 +495,17 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
input->id.bustype = BUS_I2C;
input->open = pixcir_input_open;
input->close = pixcir_input_close;
- input->dev.parent = dev;
-
- if (pdata) {
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
- } else {
- input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
- input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
- touchscreen_parse_properties(input, true, &tsdata->prop);
- if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
- !input_abs_get_max(input, ABS_MT_POSITION_Y)) {
- dev_err(dev, "Touchscreen size is not specified\n");
- return -EINVAL;
- }
- }
- tsdata->max_fingers = tsdata->chip->max_fingers;
- if (tsdata->max_fingers > PIXCIR_MAX_SLOTS) {
- tsdata->max_fingers = PIXCIR_MAX_SLOTS;
- dev_info(dev, "Limiting maximum fingers to %d\n",
- tsdata->max_fingers);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+ touchscreen_parse_properties(input, true, &tsdata->prop);
+ if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
+ !input_abs_get_max(input, ABS_MT_POSITION_Y)) {
+ dev_err(dev, "Touchscreen size is not specified\n");
+ return -EINVAL;
}
- error = input_mt_init_slots(input, tsdata->max_fingers,
+ error = input_mt_init_slots(input, tsdata->chip->max_fingers,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (error) {
dev_err(dev, "Error initializing Multi-Touch slots\n");
@@ -510,7 +517,9 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
tsdata->gpio_attb = devm_gpiod_get(dev, "attb", GPIOD_IN);
if (IS_ERR(tsdata->gpio_attb)) {
error = PTR_ERR(tsdata->gpio_attb);
- dev_err(dev, "Failed to request ATTB gpio: %d\n", error);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request ATTB gpio: %d\n",
+ error);
return error;
}
@@ -518,7 +527,9 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
GPIOD_OUT_LOW);
if (IS_ERR(tsdata->gpio_reset)) {
error = PTR_ERR(tsdata->gpio_reset);
- dev_err(dev, "Failed to request RESET gpio: %d\n", error);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request RESET gpio: %d\n",
+ error);
return error;
}
@@ -574,14 +585,6 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
return 0;
}
-static const struct i2c_device_id pixcir_i2c_ts_id[] = {
- { "pixcir_ts", 0 },
- { "pixcir_tangoc", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id);
-
-#ifdef CONFIG_OF
static const struct pixcir_i2c_chip_data pixcir_ts_data = {
.max_fingers = 2,
/* no hw id support */
@@ -592,6 +595,14 @@ static const struct pixcir_i2c_chip_data pixcir_tangoc_data = {
.has_hw_ids = true,
};
+static const struct i2c_device_id pixcir_i2c_ts_id[] = {
+ { "pixcir_ts", (unsigned long) &pixcir_ts_data },
+ { "pixcir_tangoc", (unsigned long) &pixcir_tangoc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id);
+
+#ifdef CONFIG_OF
static const struct of_device_id pixcir_of_match[] = {
{ .compatible = "pixcir,pixcir_ts", .data = &pixcir_ts_data },
{ .compatible = "pixcir,pixcir_tangoc", .data = &pixcir_tangoc_data },
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index 69881265d121..0e2e08f3f433 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -16,7 +16,6 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/input/mt.h>
-#include <linux/input-polldev.h>
#include <linux/input/touchscreen.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
@@ -34,7 +33,7 @@
struct rpi_ts {
struct platform_device *pdev;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct touchscreen_properties prop;
void __iomem *fw_regs_va;
@@ -57,10 +56,9 @@ struct rpi_ts_regs {
} point[RPI_TS_MAX_SUPPORTED_POINTS];
};
-static void rpi_ts_poll(struct input_polled_dev *dev)
+static void rpi_ts_poll(struct input_dev *input)
{
- struct input_dev *input = dev->input;
- struct rpi_ts *ts = dev->private;
+ struct rpi_ts *ts = input_get_drvdata(input);
struct rpi_ts_regs regs;
int modified_ids = 0;
long released_ids;
@@ -123,10 +121,9 @@ static int rpi_ts_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct device_node *fw_node;
struct rpi_firmware *fw;
- struct input_dev *input;
struct rpi_ts *ts;
u32 touchbuf;
int error;
@@ -160,7 +157,6 @@ static int rpi_ts_probe(struct platform_device *pdev)
return error;
}
-
touchbuf = (u32)ts->fw_regs_phys;
error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
&touchbuf, sizeof(touchbuf));
@@ -170,19 +166,17 @@ static int rpi_ts_probe(struct platform_device *pdev)
return error;
}
- poll_dev = devm_input_allocate_polled_device(dev);
- if (!poll_dev) {
+ input = devm_input_allocate_device(dev);
+ if (!input) {
dev_err(dev, "Failed to allocate input device\n");
return -ENOMEM;
}
- ts->poll_dev = poll_dev;
- input = poll_dev->input;
+
+ ts->input = input;
+ input_set_drvdata(input, ts);
input->name = "raspberrypi-ts";
input->id.bustype = BUS_HOST;
- poll_dev->poll_interval = RPI_TS_POLL_INTERVAL;
- poll_dev->poll = rpi_ts_poll;
- poll_dev->private = ts;
input_set_abs_params(input, ABS_MT_POSITION_X, 0,
RPI_TS_DEFAULT_WIDTH, 0, 0);
@@ -197,7 +191,15 @@ static int rpi_ts_probe(struct platform_device *pdev)
return error;
}
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input, rpi_ts_poll);
+ if (error) {
+ dev_err(dev, "could not set up polling mode, %d\n", error);
+ return error;
+ }
+
+ input_set_poll_interval(input, RPI_TS_POLL_INTERVAL);
+
+ error = input_register_device(input);
if (error) {
dev_err(dev, "could not register input device, %d\n", error);
return error;
@@ -214,10 +216,10 @@ MODULE_DEVICE_TABLE(of, rpi_ts_match);
static struct platform_driver rpi_ts_driver = {
.driver = {
- .name = "raspberrypi-ts",
+ .name = "raspberrypi-ts",
.of_match_table = rpi_ts_match,
},
- .probe = rpi_ts_probe,
+ .probe = rpi_ts_probe,
};
module_platform_driver(rpi_ts_driver);
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index b346e7cafd62..82920ff46f72 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -13,7 +13,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 1139714e72e2..63b29c7279e2 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -14,23 +14,19 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/input/touchscreen.h>
#define ST1232_TS_NAME "st1232-ts"
#define ST1633_TS_NAME "st1633-ts"
-struct st1232_ts_finger {
- u16 x;
- u16 y;
- u8 t;
- bool is_valid;
-};
+#define ST_TS_MAX_FINGERS 10
struct st_chip_info {
bool have_z;
@@ -50,81 +46,89 @@ struct st1232_ts_data {
const struct st_chip_info *chip_info;
int read_buf_len;
u8 *read_buf;
- struct st1232_ts_finger *finger;
};
static int st1232_ts_read_data(struct st1232_ts_data *ts)
{
- struct st1232_ts_finger *finger = ts->finger;
struct i2c_client *client = ts->client;
- struct i2c_msg msg[2];
- int error;
- int i, y;
u8 start_reg = ts->chip_info->start_reg;
- u8 *buf = ts->read_buf;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .len = sizeof(start_reg),
+ .buf = &start_reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD | I2C_M_DMA_SAFE,
+ .len = ts->read_buf_len,
+ .buf = ts->read_buf,
+ }
+ };
+ int ret;
- /* read touchscreen data */
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &start_reg;
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg))
+ return ret < 0 ? ret : -EIO;
- msg[1].addr = ts->client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = ts->read_buf_len;
- msg[1].buf = buf;
+ return 0;
+}
- error = i2c_transfer(client->adapter, msg, 2);
- if (error < 0)
- return error;
+static int st1232_ts_parse_and_report(struct st1232_ts_data *ts)
+{
+ struct input_dev *input = ts->input_dev;
+ struct input_mt_pos pos[ST_TS_MAX_FINGERS];
+ u8 z[ST_TS_MAX_FINGERS];
+ int slots[ST_TS_MAX_FINGERS];
+ int n_contacts = 0;
+ int i;
+
+ for (i = 0; i < ts->chip_info->max_fingers; i++) {
+ u8 *buf = &ts->read_buf[i * 4];
+
+ if (buf[0] & BIT(7)) {
+ unsigned int x = ((buf[0] & 0x70) << 4) | buf[1];
+ unsigned int y = ((buf[0] & 0x07) << 8) | buf[2];
- for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
- finger[i].is_valid = buf[i + y] >> 7;
- if (finger[i].is_valid) {
- finger[i].x = ((buf[i + y] & 0x0070) << 4) |
- buf[i + y + 1];
- finger[i].y = ((buf[i + y] & 0x0007) << 8) |
- buf[i + y + 2];
+ touchscreen_set_mt_pos(&pos[n_contacts],
+ &ts->prop, x, y);
/* st1232 includes a z-axis / touch strength */
if (ts->chip_info->have_z)
- finger[i].t = buf[i + 6];
+ z[n_contacts] = ts->read_buf[i + 6];
+
+ n_contacts++;
}
}
- return 0;
+ input_mt_assign_slots(input, slots, pos, n_contacts, 0);
+ for (i = 0; i < n_contacts; i++) {
+ input_mt_slot(input, slots[i]);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X, pos[i].x);
+ input_report_abs(input, ABS_MT_POSITION_Y, pos[i].y);
+ if (ts->chip_info->have_z)
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, z[i]);
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+
+ return n_contacts;
}
static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
{
struct st1232_ts_data *ts = dev_id;
- struct st1232_ts_finger *finger = ts->finger;
- struct input_dev *input_dev = ts->input_dev;
- int count = 0;
- int i, ret;
-
- ret = st1232_ts_read_data(ts);
- if (ret < 0)
- goto end;
-
- /* multi touch protocol */
- for (i = 0; i < ts->chip_info->max_fingers; i++) {
- if (!finger[i].is_valid)
- continue;
-
- if (ts->chip_info->have_z)
- input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
- finger[i].t);
+ int count;
+ int error;
- touchscreen_report_pos(input_dev, &ts->prop,
- finger[i].x, finger[i].y, true);
- input_mt_sync(input_dev);
- count++;
- }
+ error = st1232_ts_read_data(ts);
+ if (error)
+ goto out;
- /* SYN_MT_REPORT only if no contact */
+ count = st1232_ts_parse_and_report(ts);
if (!count) {
- input_mt_sync(input_dev);
if (ts->low_latency_req.dev) {
dev_pm_qos_remove_request(&ts->low_latency_req);
ts->low_latency_req.dev = NULL;
@@ -136,10 +140,7 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
DEV_PM_QOS_RESUME_LATENCY, 100);
}
- /* SYN_REPORT */
- input_sync(input_dev);
-
-end:
+out:
return IRQ_HANDLED;
}
@@ -149,6 +150,11 @@ static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron)
gpiod_set_value_cansleep(ts->reset_gpio, !poweron);
}
+static void st1232_ts_power_off(void *data)
+{
+ st1232_ts_power(data, false);
+}
+
static const struct st_chip_info st1232_chip_info = {
.have_z = true,
.max_x = 0x31f, /* 800 - 1 */
@@ -172,7 +178,6 @@ static int st1232_ts_probe(struct i2c_client *client,
{
const struct st_chip_info *match;
struct st1232_ts_data *ts;
- struct st1232_ts_finger *finger;
struct input_dev *input_dev;
int error;
@@ -199,11 +204,6 @@ static int st1232_ts_probe(struct i2c_client *client,
return -ENOMEM;
ts->chip_info = match;
- ts->finger = devm_kcalloc(&client->dev,
- ts->chip_info->max_fingers, sizeof(*finger),
- GFP_KERNEL);
- if (!ts->finger)
- return -ENOMEM;
/* allocate a buffer according to the number of registers to read */
ts->read_buf_len = ts->chip_info->max_fingers * 4;
@@ -229,14 +229,15 @@ static int st1232_ts_probe(struct i2c_client *client,
st1232_ts_power(ts, true);
+ error = devm_add_action_or_reset(&client->dev, st1232_ts_power_off, ts);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to install power off action: %d\n", error);
+ return error;
+ }
+
input_dev->name = "st1232-touchscreen";
input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
-
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
- __set_bit(EV_SYN, input_dev->evbit);
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(EV_ABS, input_dev->evbit);
if (ts->chip_info->have_z)
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
@@ -249,6 +250,14 @@ static int st1232_ts_probe(struct i2c_client *client,
touchscreen_parse_properties(input_dev, true, &ts->prop);
+ error = input_mt_init_slots(input_dev, ts->chip_info->max_fingers,
+ INPUT_MT_DIRECT | INPUT_MT_TRACK |
+ INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(&client->dev, "failed to initialize MT slots\n");
+ return error;
+ }
+
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, st1232_ts_irq_handler,
IRQF_ONESHOT,
@@ -266,16 +275,6 @@ static int st1232_ts_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, ts);
- device_init_wakeup(&client->dev, 1);
-
- return 0;
-}
-
-static int st1232_ts_remove(struct i2c_client *client)
-{
- struct st1232_ts_data *ts = i2c_get_clientdata(client);
-
- st1232_ts_power(ts, false);
return 0;
}
@@ -285,12 +284,10 @@ static int __maybe_unused st1232_ts_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct st1232_ts_data *ts = i2c_get_clientdata(client);
- if (device_may_wakeup(&client->dev)) {
- enable_irq_wake(client->irq);
- } else {
- disable_irq(client->irq);
+ disable_irq(client->irq);
+
+ if (!device_may_wakeup(&client->dev))
st1232_ts_power(ts, false);
- }
return 0;
}
@@ -300,12 +297,10 @@ static int __maybe_unused st1232_ts_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct st1232_ts_data *ts = i2c_get_clientdata(client);
- if (device_may_wakeup(&client->dev)) {
- disable_irq_wake(client->irq);
- } else {
+ if (!device_may_wakeup(&client->dev))
st1232_ts_power(ts, true);
- enable_irq(client->irq);
- }
+
+ enable_irq(client->irq);
return 0;
}
@@ -329,7 +324,6 @@ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
- .remove = st1232_ts_remove,
.id_table = st1232_ts_id,
.driver = {
.name = ST1232_TS_NAME,
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 3fd3e862269b..1dd47dda71cd 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -27,7 +27,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/printk.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/usb/input.h>
#include <linux/videodev2.h>
@@ -206,7 +206,7 @@ struct sur40_state {
struct usb_device *usbdev;
struct device *dev;
- struct input_polled_dev *input;
+ struct input_dev *input;
struct v4l2_device v4l2;
struct video_device vdev;
@@ -370,6 +370,10 @@ static int sur40_init(struct sur40_state *dev)
goto error;
result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12);
+ if (result < 0)
+ goto error;
+
+ result = 0;
/*
* Discard the result buffer - no known data inside except
@@ -381,22 +385,22 @@ error:
}
/*
- * Callback routines from input_polled_dev
+ * Callback routines from input_dev
*/
/* Enable the device, polling will now start. */
-static void sur40_open(struct input_polled_dev *polldev)
+static int sur40_open(struct input_dev *input)
{
- struct sur40_state *sur40 = polldev->private;
+ struct sur40_state *sur40 = input_get_drvdata(input);
dev_dbg(sur40->dev, "open\n");
- sur40_init(sur40);
+ return sur40_init(sur40);
}
/* Disable device, polling has stopped. */
-static void sur40_close(struct input_polled_dev *polldev)
+static void sur40_close(struct input_dev *input)
{
- struct sur40_state *sur40 = polldev->private;
+ struct sur40_state *sur40 = input_get_drvdata(input);
dev_dbg(sur40->dev, "close\n");
/*
@@ -448,10 +452,9 @@ static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input)
}
/* core function: poll for new input data */
-static void sur40_poll(struct input_polled_dev *polldev)
+static void sur40_poll(struct input_dev *input)
{
- struct sur40_state *sur40 = polldev->private;
- struct input_dev *input = polldev->input;
+ struct sur40_state *sur40 = input_get_drvdata(input);
int result, bulk_read, need_blobs, packet_blobs, i;
u32 uninitialized_var(packet_id);
@@ -613,10 +616,9 @@ err_poll:
}
/* Initialize input device parameters. */
-static void sur40_input_setup(struct input_dev *input_dev)
+static int sur40_input_setup_events(struct input_dev *input_dev)
{
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(EV_ABS, input_dev->evbit);
+ int error;
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
0, SENSOR_RES_X, 0, 0);
@@ -637,8 +639,14 @@ static void sur40_input_setup(struct input_dev *input_dev)
input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
- input_mt_init_slots(input_dev, MAX_CONTACTS,
- INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ error = input_mt_init_slots(input_dev, MAX_CONTACTS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(input_dev->dev.parent, "failed to set up slots\n");
+ return error;
+ }
+
+ return 0;
}
/* Check candidate USB interface. */
@@ -649,7 +657,7 @@ static int sur40_probe(struct usb_interface *interface,
struct sur40_state *sur40;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
int error;
/* Check if we really have the right interface. */
@@ -670,8 +678,8 @@ static int sur40_probe(struct usb_interface *interface,
if (!sur40)
return -ENOMEM;
- poll_dev = input_allocate_polled_device();
- if (!poll_dev) {
+ input = input_allocate_device();
+ if (!input) {
error = -ENOMEM;
goto err_free_dev;
}
@@ -681,26 +689,33 @@ static int sur40_probe(struct usb_interface *interface,
spin_lock_init(&sur40->qlock);
mutex_init(&sur40->lock);
- /* Set up polled input device control structure */
- poll_dev->private = sur40;
- poll_dev->poll_interval = POLL_INTERVAL;
- poll_dev->open = sur40_open;
- poll_dev->poll = sur40_poll;
- poll_dev->close = sur40_close;
-
/* Set up regular input device structure */
- sur40_input_setup(poll_dev->input);
-
- poll_dev->input->name = DRIVER_LONG;
- usb_to_input_id(usbdev, &poll_dev->input->id);
+ input->name = DRIVER_LONG;
+ usb_to_input_id(usbdev, &input->id);
usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys));
strlcat(sur40->phys, "/input0", sizeof(sur40->phys));
- poll_dev->input->phys = sur40->phys;
- poll_dev->input->dev.parent = &interface->dev;
+ input->phys = sur40->phys;
+ input->dev.parent = &interface->dev;
+
+ input->open = sur40_open;
+ input->close = sur40_close;
+
+ error = sur40_input_setup_events(input);
+ if (error)
+ goto err_free_input;
+
+ input_set_drvdata(input, sur40);
+ error = input_setup_polling(input, sur40_poll);
+ if (error) {
+ dev_err(&interface->dev, "failed to set up polling");
+ goto err_free_input;
+ }
+
+ input_set_poll_interval(input, POLL_INTERVAL);
sur40->usbdev = usbdev;
sur40->dev = &interface->dev;
- sur40->input = poll_dev;
+ sur40->input = input;
/* use the bulk-in endpoint tested above */
sur40->bulk_in_size = usb_endpoint_maxp(endpoint);
@@ -709,11 +724,11 @@ static int sur40_probe(struct usb_interface *interface,
if (!sur40->bulk_in_buffer) {
dev_err(&interface->dev, "Unable to allocate input buffer.");
error = -ENOMEM;
- goto err_free_polldev;
+ goto err_free_input;
}
/* register the polled input device */
- error = input_register_polled_device(poll_dev);
+ error = input_register_device(input);
if (error) {
dev_err(&interface->dev,
"Unable to register polled input device.");
@@ -796,8 +811,8 @@ err_unreg_v4l2:
v4l2_device_unregister(&sur40->v4l2);
err_free_buffer:
kfree(sur40->bulk_in_buffer);
-err_free_polldev:
- input_free_polled_device(sur40->input);
+err_free_input:
+ input_free_device(input);
err_free_dev:
kfree(sur40);
@@ -813,8 +828,7 @@ static void sur40_disconnect(struct usb_interface *interface)
video_unregister_device(&sur40->vdev);
v4l2_device_unregister(&sur40->v4l2);
- input_unregister_polled_device(sur40->input);
- input_free_polled_device(sur40->input);
+ input_unregister_device(sur40->input);
kfree(sur40->bulk_in_buffer);
kfree(sur40);
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index 75170a7439b1..357a3108f2e5 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -17,7 +17,6 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/platform_device.h>
#include <linux/mfd/tps6507x.h>
#include <linux/input/tps6507x-ts.h>
@@ -40,7 +39,7 @@ struct ts_event {
struct tps6507x_ts {
struct device *dev;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct tps6507x_dev *mfd;
char phys[32];
struct ts_event tc;
@@ -148,10 +147,9 @@ static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc)
return ret;
}
-static void tps6507x_ts_poll(struct input_polled_dev *poll_dev)
+static void tps6507x_ts_poll(struct input_dev *input_dev)
{
- struct tps6507x_ts *tsc = poll_dev->private;
- struct input_dev *input_dev = poll_dev->input;
+ struct tps6507x_ts *tsc = input_get_drvdata(input_dev);
bool pendown;
s32 ret;
@@ -205,7 +203,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
const struct tps6507x_board *tps_board;
const struct touchscreen_init_data *init_data;
struct tps6507x_ts *tsc;
- struct input_polled_dev *poll_dev;
struct input_dev *input_dev;
int error;
@@ -240,23 +237,16 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
snprintf(tsc->phys, sizeof(tsc->phys),
"%s/input0", dev_name(tsc->dev));
- poll_dev = devm_input_allocate_polled_device(&pdev->dev);
- if (!poll_dev) {
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev) {
dev_err(tsc->dev, "Failed to allocate polled input device.\n");
return -ENOMEM;
}
- tsc->poll_dev = poll_dev;
-
- poll_dev->private = tsc;
- poll_dev->poll = tps6507x_ts_poll;
- poll_dev->poll_interval = init_data ?
- init_data->poll_period : TSC_DEFAULT_POLL_PERIOD;
-
- input_dev = poll_dev->input;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ tsc->input = input_dev;
+ input_set_drvdata(input_dev, tsc);
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0);
@@ -275,7 +265,15 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
if (error)
return error;
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input_dev, tps6507x_ts_poll);
+ if (error)
+ return error;
+
+ input_set_poll_interval(input_dev,
+ init_data ? init_data->poll_period :
+ TSC_DEFAULT_POLL_PERIOD);
+
+ error = input_register_device(input_dev);
if (error)
return error;
diff --git a/drivers/input/touchscreen/ts4800-ts.c b/drivers/input/touchscreen/ts4800-ts.c
index 5b4f5362c67b..6cf66aadc10e 100644
--- a/drivers/input/touchscreen/ts4800-ts.c
+++ b/drivers/input/touchscreen/ts4800-ts.c
@@ -10,7 +10,6 @@
#include <linux/bitops.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -33,7 +32,7 @@
#define Y_OFFSET 0x2
struct ts4800_ts {
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct device *dev;
char phys[32];
@@ -46,22 +45,26 @@ struct ts4800_ts {
int debounce;
};
-static void ts4800_ts_open(struct input_polled_dev *dev)
+static int ts4800_ts_open(struct input_dev *input_dev)
{
- struct ts4800_ts *ts = dev->private;
- int ret;
+ struct ts4800_ts *ts = input_get_drvdata(input_dev);
+ int error;
ts->pendown = false;
ts->debounce = DEBOUNCE_COUNT;
- ret = regmap_update_bits(ts->regmap, ts->reg, ts->bit, ts->bit);
- if (ret)
- dev_warn(ts->dev, "Failed to enable touchscreen\n");
+ error = regmap_update_bits(ts->regmap, ts->reg, ts->bit, ts->bit);
+ if (error) {
+ dev_warn(ts->dev, "Failed to enable touchscreen: %d\n", error);
+ return error;
+ }
+
+ return 0;
}
-static void ts4800_ts_close(struct input_polled_dev *dev)
+static void ts4800_ts_close(struct input_dev *input_dev)
{
- struct ts4800_ts *ts = dev->private;
+ struct ts4800_ts *ts = input_get_drvdata(input_dev);
int ret;
ret = regmap_update_bits(ts->regmap, ts->reg, ts->bit, 0);
@@ -70,10 +73,9 @@ static void ts4800_ts_close(struct input_polled_dev *dev)
}
-static void ts4800_ts_poll(struct input_polled_dev *dev)
+static void ts4800_ts_poll(struct input_dev *input_dev)
{
- struct input_dev *input_dev = dev->input;
- struct ts4800_ts *ts = dev->private;
+ struct ts4800_ts *ts = input_get_drvdata(input_dev);
u16 last_x = readw(ts->base + X_OFFSET);
u16 last_y = readw(ts->base + Y_OFFSET);
bool pendown = last_x & PENDOWN_MASK;
@@ -146,7 +148,7 @@ static int ts4800_parse_dt(struct platform_device *pdev,
static int ts4800_ts_probe(struct platform_device *pdev)
{
- struct input_polled_dev *poll_dev;
+ struct input_dev *input_dev;
struct ts4800_ts *ts;
int error;
@@ -162,32 +164,38 @@ static int ts4800_ts_probe(struct platform_device *pdev)
if (IS_ERR(ts->base))
return PTR_ERR(ts->base);
- poll_dev = devm_input_allocate_polled_device(&pdev->dev);
- if (!poll_dev)
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
return -ENOMEM;
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&pdev->dev));
- ts->poll_dev = poll_dev;
+ ts->input = input_dev;
ts->dev = &pdev->dev;
- poll_dev->private = ts;
- poll_dev->poll_interval = POLL_INTERVAL;
- poll_dev->open = ts4800_ts_open;
- poll_dev->close = ts4800_ts_close;
- poll_dev->poll = ts4800_ts_poll;
+ input_set_drvdata(input_dev, ts);
+
+ input_dev->name = "TS-4800 Touchscreen";
+ input_dev->phys = ts->phys;
+
+ input_dev->open = ts4800_ts_open;
+ input_dev->close = ts4800_ts_close;
+
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
- poll_dev->input->name = "TS-4800 Touchscreen";
- poll_dev->input->phys = ts->phys;
+ error = input_setup_polling(input_dev, ts4800_ts_poll);
+ if (error) {
+ dev_err(&pdev->dev, "Unable to set up polling: %d\n", error);
+ return error;
+ }
- input_set_capability(poll_dev->input, EV_KEY, BTN_TOUCH);
- input_set_abs_params(poll_dev->input, ABS_X, 0, MAX_12BIT, 0, 0);
- input_set_abs_params(poll_dev->input, ABS_Y, 0, MAX_12BIT, 0, 0);
+ input_set_poll_interval(input_dev, POLL_INTERVAL);
- error = input_register_polled_device(poll_dev);
+ error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev,
- "Unabled to register polled input device (%d)\n",
- error);
+ "Unable to register input device: %d\n", error);
return error;
}
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index f017af8c2aa3..1afc6bde2891 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <asm/unaligned.h>
#define WACOM_CMD_QUERY0 0x04
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 6ab4012a059a..c49afbea3458 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_MSM8974
+ tristate "Qualcomm MSM8974 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8974-based
+ platforms.
+
config INTERCONNECT_QCOM_QCS404
tristate "Qualcomm QCS404 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 67dafb783dec..9adf9e380545 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+qnoc-msm8974-objs := msm8974.o
qnoc-qcs404-objs := qcs404.o
qnoc-sdm845-objs := sdm845.o
icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
new file mode 100644
index 000000000000..ce599a0c83d9
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
+ *
+ * Based on MSM bus code from downstream MSM kernel sources.
+ * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
+ *
+ * Based on qcs404.c
+ * Copyright (C) 2019 Linaro Ltd
+ *
+ * Here's a rough representation that shows the various buses that form the
+ * Network On Chip (NOC) for the msm8974:
+ *
+ * Multimedia Subsystem (MMSS)
+ * |----------+-----------------------------------+-----------|
+ * | |
+ * | |
+ * Config | Bus Interface | Memory Controller
+ * |------------+-+-----------| |------------+-+-----------|
+ * | |
+ * | |
+ * | System |
+ * |--------------+-+---------------------------------+-+-------------|
+ * | |
+ * | |
+ * Peripheral | On Chip | Memory (OCMEM)
+ * |------------+-------------| |------------+-------------|
+ */
+
+#include <dt-bindings/interconnect/qcom,msm8974.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+
+enum {
+ MSM8974_BIMC_MAS_AMPSS_M0 = 1,
+ MSM8974_BIMC_MAS_AMPSS_M1,
+ MSM8974_BIMC_MAS_MSS_PROC,
+ MSM8974_BIMC_TO_MNOC,
+ MSM8974_BIMC_TO_SNOC,
+ MSM8974_BIMC_SLV_EBI_CH0,
+ MSM8974_BIMC_SLV_AMPSS_L2,
+ MSM8974_CNOC_MAS_RPM_INST,
+ MSM8974_CNOC_MAS_RPM_DATA,
+ MSM8974_CNOC_MAS_RPM_SYS,
+ MSM8974_CNOC_MAS_DEHR,
+ MSM8974_CNOC_MAS_QDSS_DAP,
+ MSM8974_CNOC_MAS_SPDM,
+ MSM8974_CNOC_MAS_TIC,
+ MSM8974_CNOC_SLV_CLK_CTL,
+ MSM8974_CNOC_SLV_CNOC_MSS,
+ MSM8974_CNOC_SLV_SECURITY,
+ MSM8974_CNOC_SLV_TCSR,
+ MSM8974_CNOC_SLV_TLMM,
+ MSM8974_CNOC_SLV_CRYPTO_0_CFG,
+ MSM8974_CNOC_SLV_CRYPTO_1_CFG,
+ MSM8974_CNOC_SLV_IMEM_CFG,
+ MSM8974_CNOC_SLV_MESSAGE_RAM,
+ MSM8974_CNOC_SLV_BIMC_CFG,
+ MSM8974_CNOC_SLV_BOOT_ROM,
+ MSM8974_CNOC_SLV_PMIC_ARB,
+ MSM8974_CNOC_SLV_SPDM_WRAPPER,
+ MSM8974_CNOC_SLV_DEHR_CFG,
+ MSM8974_CNOC_SLV_MPM,
+ MSM8974_CNOC_SLV_QDSS_CFG,
+ MSM8974_CNOC_SLV_RBCPR_CFG,
+ MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG,
+ MSM8974_CNOC_TO_SNOC,
+ MSM8974_CNOC_SLV_CNOC_ONOC_CFG,
+ MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG,
+ MSM8974_CNOC_SLV_CNOC_MNOC_CFG,
+ MSM8974_CNOC_SLV_PNOC_CFG,
+ MSM8974_CNOC_SLV_SNOC_MPU_CFG,
+ MSM8974_CNOC_SLV_SNOC_CFG,
+ MSM8974_CNOC_SLV_EBI1_DLL_CFG,
+ MSM8974_CNOC_SLV_PHY_APU_CFG,
+ MSM8974_CNOC_SLV_EBI1_PHY_CFG,
+ MSM8974_CNOC_SLV_RPM,
+ MSM8974_CNOC_SLV_SERVICE_CNOC,
+ MSM8974_MNOC_MAS_GRAPHICS_3D,
+ MSM8974_MNOC_MAS_JPEG,
+ MSM8974_MNOC_MAS_MDP_PORT0,
+ MSM8974_MNOC_MAS_VIDEO_P0,
+ MSM8974_MNOC_MAS_VIDEO_P1,
+ MSM8974_MNOC_MAS_VFE,
+ MSM8974_MNOC_TO_CNOC,
+ MSM8974_MNOC_TO_BIMC,
+ MSM8974_MNOC_SLV_CAMERA_CFG,
+ MSM8974_MNOC_SLV_DISPLAY_CFG,
+ MSM8974_MNOC_SLV_OCMEM_CFG,
+ MSM8974_MNOC_SLV_CPR_CFG,
+ MSM8974_MNOC_SLV_CPR_XPU_CFG,
+ MSM8974_MNOC_SLV_MISC_CFG,
+ MSM8974_MNOC_SLV_MISC_XPU_CFG,
+ MSM8974_MNOC_SLV_VENUS_CFG,
+ MSM8974_MNOC_SLV_GRAPHICS_3D_CFG,
+ MSM8974_MNOC_SLV_MMSS_CLK_CFG,
+ MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG,
+ MSM8974_MNOC_SLV_MNOC_MPU_CFG,
+ MSM8974_MNOC_SLV_ONOC_MPU_CFG,
+ MSM8974_MNOC_SLV_SERVICE_MNOC,
+ MSM8974_OCMEM_NOC_TO_OCMEM_VNOC,
+ MSM8974_OCMEM_MAS_JPEG_OCMEM,
+ MSM8974_OCMEM_MAS_MDP_OCMEM,
+ MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM,
+ MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM,
+ MSM8974_OCMEM_MAS_VFE_OCMEM,
+ MSM8974_OCMEM_MAS_CNOC_ONOC_CFG,
+ MSM8974_OCMEM_SLV_SERVICE_ONOC,
+ MSM8974_OCMEM_VNOC_TO_SNOC,
+ MSM8974_OCMEM_VNOC_TO_OCMEM_NOC,
+ MSM8974_OCMEM_VNOC_MAS_GFX3D,
+ MSM8974_OCMEM_SLV_OCMEM,
+ MSM8974_PNOC_MAS_PNOC_CFG,
+ MSM8974_PNOC_MAS_SDCC_1,
+ MSM8974_PNOC_MAS_SDCC_3,
+ MSM8974_PNOC_MAS_SDCC_4,
+ MSM8974_PNOC_MAS_SDCC_2,
+ MSM8974_PNOC_MAS_TSIF,
+ MSM8974_PNOC_MAS_BAM_DMA,
+ MSM8974_PNOC_MAS_BLSP_2,
+ MSM8974_PNOC_MAS_USB_HSIC,
+ MSM8974_PNOC_MAS_BLSP_1,
+ MSM8974_PNOC_MAS_USB_HS,
+ MSM8974_PNOC_TO_SNOC,
+ MSM8974_PNOC_SLV_SDCC_1,
+ MSM8974_PNOC_SLV_SDCC_3,
+ MSM8974_PNOC_SLV_SDCC_2,
+ MSM8974_PNOC_SLV_SDCC_4,
+ MSM8974_PNOC_SLV_TSIF,
+ MSM8974_PNOC_SLV_BAM_DMA,
+ MSM8974_PNOC_SLV_BLSP_2,
+ MSM8974_PNOC_SLV_USB_HSIC,
+ MSM8974_PNOC_SLV_BLSP_1,
+ MSM8974_PNOC_SLV_USB_HS,
+ MSM8974_PNOC_SLV_PDM,
+ MSM8974_PNOC_SLV_PERIPH_APU_CFG,
+ MSM8974_PNOC_SLV_PNOC_MPU_CFG,
+ MSM8974_PNOC_SLV_PRNG,
+ MSM8974_PNOC_SLV_SERVICE_PNOC,
+ MSM8974_SNOC_MAS_LPASS_AHB,
+ MSM8974_SNOC_MAS_QDSS_BAM,
+ MSM8974_SNOC_MAS_SNOC_CFG,
+ MSM8974_SNOC_TO_BIMC,
+ MSM8974_SNOC_TO_CNOC,
+ MSM8974_SNOC_TO_PNOC,
+ MSM8974_SNOC_TO_OCMEM_VNOC,
+ MSM8974_SNOC_MAS_CRYPTO_CORE0,
+ MSM8974_SNOC_MAS_CRYPTO_CORE1,
+ MSM8974_SNOC_MAS_LPASS_PROC,
+ MSM8974_SNOC_MAS_MSS,
+ MSM8974_SNOC_MAS_MSS_NAV,
+ MSM8974_SNOC_MAS_OCMEM_DMA,
+ MSM8974_SNOC_MAS_WCSS,
+ MSM8974_SNOC_MAS_QDSS_ETR,
+ MSM8974_SNOC_MAS_USB3,
+ MSM8974_SNOC_SLV_AMPSS,
+ MSM8974_SNOC_SLV_LPASS,
+ MSM8974_SNOC_SLV_USB3,
+ MSM8974_SNOC_SLV_WCSS,
+ MSM8974_SNOC_SLV_OCIMEM,
+ MSM8974_SNOC_SLV_SNOC_OCMEM,
+ MSM8974_SNOC_SLV_SERVICE_SNOC,
+ MSM8974_SNOC_SLV_QDSS_STM,
+};
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+#define to_msm8974_icc_provider(_provider) \
+ container_of(_provider, struct msm8974_icc_provider, provider)
+
+static const struct clk_bulk_data msm8974_icc_bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+/**
+ * struct msm8974_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct msm8974_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+#define MSM8974_ICC_MAX_LINKS 3
+
+/**
+ * struct msm8974_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM ID for devices that are bus masters
+ * @slv_rpm_id: RPM ID for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct msm8974_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[MSM8974_ICC_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct msm8974_icc_desc {
+ struct msm8974_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct msm8974_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(mas_ampss_m0, MSM8974_BIMC_MAS_AMPSS_M0, 8, 0, -1);
+DEFINE_QNODE(mas_ampss_m1, MSM8974_BIMC_MAS_AMPSS_M1, 8, 0, -1);
+DEFINE_QNODE(mas_mss_proc, MSM8974_BIMC_MAS_MSS_PROC, 8, 1, -1);
+DEFINE_QNODE(bimc_to_mnoc, MSM8974_BIMC_TO_MNOC, 8, 2, -1, MSM8974_BIMC_SLV_EBI_CH0);
+DEFINE_QNODE(bimc_to_snoc, MSM8974_BIMC_TO_SNOC, 8, 3, 2, MSM8974_SNOC_TO_BIMC, MSM8974_BIMC_SLV_EBI_CH0, MSM8974_BIMC_MAS_AMPSS_M0);
+DEFINE_QNODE(slv_ebi_ch0, MSM8974_BIMC_SLV_EBI_CH0, 8, -1, 0);
+DEFINE_QNODE(slv_ampss_l2, MSM8974_BIMC_SLV_AMPSS_L2, 8, -1, 1);
+
+static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
+ [BIMC_MAS_AMPSS_M0] = &mas_ampss_m0,
+ [BIMC_MAS_AMPSS_M1] = &mas_ampss_m1,
+ [BIMC_MAS_MSS_PROC] = &mas_mss_proc,
+ [BIMC_TO_MNOC] = &bimc_to_mnoc,
+ [BIMC_TO_SNOC] = &bimc_to_snoc,
+ [BIMC_SLV_EBI_CH0] = &slv_ebi_ch0,
+ [BIMC_SLV_AMPSS_L2] = &slv_ampss_l2,
+};
+
+static struct msm8974_icc_desc msm8974_bimc = {
+ .nodes = msm8974_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_bimc_nodes),
+};
+
+DEFINE_QNODE(mas_rpm_inst, MSM8974_CNOC_MAS_RPM_INST, 8, 45, -1);
+DEFINE_QNODE(mas_rpm_data, MSM8974_CNOC_MAS_RPM_DATA, 8, 46, -1);
+DEFINE_QNODE(mas_rpm_sys, MSM8974_CNOC_MAS_RPM_SYS, 8, 47, -1);
+DEFINE_QNODE(mas_dehr, MSM8974_CNOC_MAS_DEHR, 8, 48, -1);
+DEFINE_QNODE(mas_qdss_dap, MSM8974_CNOC_MAS_QDSS_DAP, 8, 49, -1);
+DEFINE_QNODE(mas_spdm, MSM8974_CNOC_MAS_SPDM, 8, 50, -1);
+DEFINE_QNODE(mas_tic, MSM8974_CNOC_MAS_TIC, 8, 51, -1);
+DEFINE_QNODE(slv_clk_ctl, MSM8974_CNOC_SLV_CLK_CTL, 8, -1, 47);
+DEFINE_QNODE(slv_cnoc_mss, MSM8974_CNOC_SLV_CNOC_MSS, 8, -1, 48);
+DEFINE_QNODE(slv_security, MSM8974_CNOC_SLV_SECURITY, 8, -1, 49);
+DEFINE_QNODE(slv_tcsr, MSM8974_CNOC_SLV_TCSR, 8, -1, 50);
+DEFINE_QNODE(slv_tlmm, MSM8974_CNOC_SLV_TLMM, 8, -1, 51);
+DEFINE_QNODE(slv_crypto_0_cfg, MSM8974_CNOC_SLV_CRYPTO_0_CFG, 8, -1, 52);
+DEFINE_QNODE(slv_crypto_1_cfg, MSM8974_CNOC_SLV_CRYPTO_1_CFG, 8, -1, 53);
+DEFINE_QNODE(slv_imem_cfg, MSM8974_CNOC_SLV_IMEM_CFG, 8, -1, 54);
+DEFINE_QNODE(slv_message_ram, MSM8974_CNOC_SLV_MESSAGE_RAM, 8, -1, 55);
+DEFINE_QNODE(slv_bimc_cfg, MSM8974_CNOC_SLV_BIMC_CFG, 8, -1, 56);
+DEFINE_QNODE(slv_boot_rom, MSM8974_CNOC_SLV_BOOT_ROM, 8, -1, 57);
+DEFINE_QNODE(slv_pmic_arb, MSM8974_CNOC_SLV_PMIC_ARB, 8, -1, 59);
+DEFINE_QNODE(slv_spdm_wrapper, MSM8974_CNOC_SLV_SPDM_WRAPPER, 8, -1, 60);
+DEFINE_QNODE(slv_dehr_cfg, MSM8974_CNOC_SLV_DEHR_CFG, 8, -1, 61);
+DEFINE_QNODE(slv_mpm, MSM8974_CNOC_SLV_MPM, 8, -1, 62);
+DEFINE_QNODE(slv_qdss_cfg, MSM8974_CNOC_SLV_QDSS_CFG, 8, -1, 63);
+DEFINE_QNODE(slv_rbcpr_cfg, MSM8974_CNOC_SLV_RBCPR_CFG, 8, -1, 64);
+DEFINE_QNODE(slv_rbcpr_qdss_apu_cfg, MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG, 8, -1, 65);
+DEFINE_QNODE(cnoc_to_snoc, MSM8974_CNOC_TO_SNOC, 8, 52, 75);
+DEFINE_QNODE(slv_cnoc_onoc_cfg, MSM8974_CNOC_SLV_CNOC_ONOC_CFG, 8, -1, 68);
+DEFINE_QNODE(slv_cnoc_mnoc_mmss_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG, 8, -1, 58);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_CFG, 8, -1, 66);
+DEFINE_QNODE(slv_pnoc_cfg, MSM8974_CNOC_SLV_PNOC_CFG, 8, -1, 69);
+DEFINE_QNODE(slv_snoc_mpu_cfg, MSM8974_CNOC_SLV_SNOC_MPU_CFG, 8, -1, 67);
+DEFINE_QNODE(slv_snoc_cfg, MSM8974_CNOC_SLV_SNOC_CFG, 8, -1, 70);
+DEFINE_QNODE(slv_ebi1_dll_cfg, MSM8974_CNOC_SLV_EBI1_DLL_CFG, 8, -1, 71);
+DEFINE_QNODE(slv_phy_apu_cfg, MSM8974_CNOC_SLV_PHY_APU_CFG, 8, -1, 72);
+DEFINE_QNODE(slv_ebi1_phy_cfg, MSM8974_CNOC_SLV_EBI1_PHY_CFG, 8, -1, 73);
+DEFINE_QNODE(slv_rpm, MSM8974_CNOC_SLV_RPM, 8, -1, 74);
+DEFINE_QNODE(slv_service_cnoc, MSM8974_CNOC_SLV_SERVICE_CNOC, 8, -1, 76);
+
+static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
+ [CNOC_MAS_RPM_INST] = &mas_rpm_inst,
+ [CNOC_MAS_RPM_DATA] = &mas_rpm_data,
+ [CNOC_MAS_RPM_SYS] = &mas_rpm_sys,
+ [CNOC_MAS_DEHR] = &mas_dehr,
+ [CNOC_MAS_QDSS_DAP] = &mas_qdss_dap,
+ [CNOC_MAS_SPDM] = &mas_spdm,
+ [CNOC_MAS_TIC] = &mas_tic,
+ [CNOC_SLV_CLK_CTL] = &slv_clk_ctl,
+ [CNOC_SLV_CNOC_MSS] = &slv_cnoc_mss,
+ [CNOC_SLV_SECURITY] = &slv_security,
+ [CNOC_SLV_TCSR] = &slv_tcsr,
+ [CNOC_SLV_TLMM] = &slv_tlmm,
+ [CNOC_SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [CNOC_SLV_CRYPTO_1_CFG] = &slv_crypto_1_cfg,
+ [CNOC_SLV_IMEM_CFG] = &slv_imem_cfg,
+ [CNOC_SLV_MESSAGE_RAM] = &slv_message_ram,
+ [CNOC_SLV_BIMC_CFG] = &slv_bimc_cfg,
+ [CNOC_SLV_BOOT_ROM] = &slv_boot_rom,
+ [CNOC_SLV_PMIC_ARB] = &slv_pmic_arb,
+ [CNOC_SLV_SPDM_WRAPPER] = &slv_spdm_wrapper,
+ [CNOC_SLV_DEHR_CFG] = &slv_dehr_cfg,
+ [CNOC_SLV_MPM] = &slv_mpm,
+ [CNOC_SLV_QDSS_CFG] = &slv_qdss_cfg,
+ [CNOC_SLV_RBCPR_CFG] = &slv_rbcpr_cfg,
+ [CNOC_SLV_RBCPR_QDSS_APU_CFG] = &slv_rbcpr_qdss_apu_cfg,
+ [CNOC_TO_SNOC] = &cnoc_to_snoc,
+ [CNOC_SLV_CNOC_ONOC_CFG] = &slv_cnoc_onoc_cfg,
+ [CNOC_SLV_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg,
+ [CNOC_SLV_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
+ [CNOC_SLV_PNOC_CFG] = &slv_pnoc_cfg,
+ [CNOC_SLV_SNOC_MPU_CFG] = &slv_snoc_mpu_cfg,
+ [CNOC_SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [CNOC_SLV_EBI1_DLL_CFG] = &slv_ebi1_dll_cfg,
+ [CNOC_SLV_PHY_APU_CFG] = &slv_phy_apu_cfg,
+ [CNOC_SLV_EBI1_PHY_CFG] = &slv_ebi1_phy_cfg,
+ [CNOC_SLV_RPM] = &slv_rpm,
+ [CNOC_SLV_SERVICE_CNOC] = &slv_service_cnoc,
+};
+
+static struct msm8974_icc_desc msm8974_cnoc = {
+ .nodes = msm8974_cnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_cnoc_nodes),
+};
+
+DEFINE_QNODE(mas_graphics_3d, MSM8974_MNOC_MAS_GRAPHICS_3D, 16, 6, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_jpeg, MSM8974_MNOC_MAS_JPEG, 16, 7, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_mdp_port0, MSM8974_MNOC_MAS_MDP_PORT0, 16, 8, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_video_p0, MSM8974_MNOC_MAS_VIDEO_P0, 16, 9, -1);
+DEFINE_QNODE(mas_video_p1, MSM8974_MNOC_MAS_VIDEO_P1, 16, 10, -1);
+DEFINE_QNODE(mas_vfe, MSM8974_MNOC_MAS_VFE, 16, 11, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mnoc_to_cnoc, MSM8974_MNOC_TO_CNOC, 16, 4, -1);
+DEFINE_QNODE(mnoc_to_bimc, MSM8974_MNOC_TO_BIMC, 16, -1, 16, MSM8974_BIMC_TO_MNOC);
+DEFINE_QNODE(slv_camera_cfg, MSM8974_MNOC_SLV_CAMERA_CFG, 16, -1, 3);
+DEFINE_QNODE(slv_display_cfg, MSM8974_MNOC_SLV_DISPLAY_CFG, 16, -1, 4);
+DEFINE_QNODE(slv_ocmem_cfg, MSM8974_MNOC_SLV_OCMEM_CFG, 16, -1, 5);
+DEFINE_QNODE(slv_cpr_cfg, MSM8974_MNOC_SLV_CPR_CFG, 16, -1, 6);
+DEFINE_QNODE(slv_cpr_xpu_cfg, MSM8974_MNOC_SLV_CPR_XPU_CFG, 16, -1, 7);
+DEFINE_QNODE(slv_misc_cfg, MSM8974_MNOC_SLV_MISC_CFG, 16, -1, 8);
+DEFINE_QNODE(slv_misc_xpu_cfg, MSM8974_MNOC_SLV_MISC_XPU_CFG, 16, -1, 9);
+DEFINE_QNODE(slv_venus_cfg, MSM8974_MNOC_SLV_VENUS_CFG, 16, -1, 10);
+DEFINE_QNODE(slv_graphics_3d_cfg, MSM8974_MNOC_SLV_GRAPHICS_3D_CFG, 16, -1, 11);
+DEFINE_QNODE(slv_mmss_clk_cfg, MSM8974_MNOC_SLV_MMSS_CLK_CFG, 16, -1, 12);
+DEFINE_QNODE(slv_mmss_clk_xpu_cfg, MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG, 16, -1, 13);
+DEFINE_QNODE(slv_mnoc_mpu_cfg, MSM8974_MNOC_SLV_MNOC_MPU_CFG, 16, -1, 14);
+DEFINE_QNODE(slv_onoc_mpu_cfg, MSM8974_MNOC_SLV_ONOC_MPU_CFG, 16, -1, 15);
+DEFINE_QNODE(slv_service_mnoc, MSM8974_MNOC_SLV_SERVICE_MNOC, 16, -1, 17);
+
+static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
+ [MNOC_MAS_GRAPHICS_3D] = &mas_graphics_3d,
+ [MNOC_MAS_JPEG] = &mas_jpeg,
+ [MNOC_MAS_MDP_PORT0] = &mas_mdp_port0,
+ [MNOC_MAS_VIDEO_P0] = &mas_video_p0,
+ [MNOC_MAS_VIDEO_P1] = &mas_video_p1,
+ [MNOC_MAS_VFE] = &mas_vfe,
+ [MNOC_TO_CNOC] = &mnoc_to_cnoc,
+ [MNOC_TO_BIMC] = &mnoc_to_bimc,
+ [MNOC_SLV_CAMERA_CFG] = &slv_camera_cfg,
+ [MNOC_SLV_DISPLAY_CFG] = &slv_display_cfg,
+ [MNOC_SLV_OCMEM_CFG] = &slv_ocmem_cfg,
+ [MNOC_SLV_CPR_CFG] = &slv_cpr_cfg,
+ [MNOC_SLV_CPR_XPU_CFG] = &slv_cpr_xpu_cfg,
+ [MNOC_SLV_MISC_CFG] = &slv_misc_cfg,
+ [MNOC_SLV_MISC_XPU_CFG] = &slv_misc_xpu_cfg,
+ [MNOC_SLV_VENUS_CFG] = &slv_venus_cfg,
+ [MNOC_SLV_GRAPHICS_3D_CFG] = &slv_graphics_3d_cfg,
+ [MNOC_SLV_MMSS_CLK_CFG] = &slv_mmss_clk_cfg,
+ [MNOC_SLV_MMSS_CLK_XPU_CFG] = &slv_mmss_clk_xpu_cfg,
+ [MNOC_SLV_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
+ [MNOC_SLV_ONOC_MPU_CFG] = &slv_onoc_mpu_cfg,
+ [MNOC_SLV_SERVICE_MNOC] = &slv_service_mnoc,
+};
+
+static struct msm8974_icc_desc msm8974_mnoc = {
+ .nodes = msm8974_mnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_mnoc_nodes),
+};
+
+DEFINE_QNODE(ocmem_noc_to_ocmem_vnoc, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC, 16, 54, 78, MSM8974_OCMEM_SLV_OCMEM);
+DEFINE_QNODE(mas_jpeg_ocmem, MSM8974_OCMEM_MAS_JPEG_OCMEM, 16, 13, -1);
+DEFINE_QNODE(mas_mdp_ocmem, MSM8974_OCMEM_MAS_MDP_OCMEM, 16, 14, -1);
+DEFINE_QNODE(mas_video_p0_ocmem, MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM, 16, 15, -1);
+DEFINE_QNODE(mas_video_p1_ocmem, MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM, 16, 16, -1);
+DEFINE_QNODE(mas_vfe_ocmem, MSM8974_OCMEM_MAS_VFE_OCMEM, 16, 17, -1);
+DEFINE_QNODE(mas_cnoc_onoc_cfg, MSM8974_OCMEM_MAS_CNOC_ONOC_CFG, 16, 12, -1);
+DEFINE_QNODE(slv_service_onoc, MSM8974_OCMEM_SLV_SERVICE_ONOC, 16, -1, 19);
+DEFINE_QNODE(slv_ocmem, MSM8974_OCMEM_SLV_OCMEM, 16, -1, 18);
+
+/* Virtual NoC is needed for connection to OCMEM */
+DEFINE_QNODE(ocmem_vnoc_to_onoc, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC, 16, 56, 79, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC);
+DEFINE_QNODE(ocmem_vnoc_to_snoc, MSM8974_OCMEM_VNOC_TO_SNOC, 8, 57, 80);
+DEFINE_QNODE(mas_v_ocmem_gfx3d, MSM8974_OCMEM_VNOC_MAS_GFX3D, 8, 55, -1, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
+
+static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
+ [OCMEM_NOC_TO_OCMEM_VNOC] = &ocmem_noc_to_ocmem_vnoc,
+ [OCMEM_MAS_JPEG_OCMEM] = &mas_jpeg_ocmem,
+ [OCMEM_MAS_MDP_OCMEM] = &mas_mdp_ocmem,
+ [OCMEM_MAS_VIDEO_P0_OCMEM] = &mas_video_p0_ocmem,
+ [OCMEM_MAS_VIDEO_P1_OCMEM] = &mas_video_p1_ocmem,
+ [OCMEM_MAS_VFE_OCMEM] = &mas_vfe_ocmem,
+ [OCMEM_MAS_CNOC_ONOC_CFG] = &mas_cnoc_onoc_cfg,
+ [OCMEM_SLV_SERVICE_ONOC] = &slv_service_onoc,
+ [OCMEM_VNOC_TO_SNOC] = &ocmem_vnoc_to_snoc,
+ [OCMEM_VNOC_TO_OCMEM_NOC] = &ocmem_vnoc_to_onoc,
+ [OCMEM_VNOC_MAS_GFX3D] = &mas_v_ocmem_gfx3d,
+ [OCMEM_SLV_OCMEM] = &slv_ocmem,
+};
+
+static struct msm8974_icc_desc msm8974_onoc = {
+ .nodes = msm8974_onoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_onoc_nodes),
+};
+
+DEFINE_QNODE(mas_pnoc_cfg, MSM8974_PNOC_MAS_PNOC_CFG, 8, 43, -1);
+DEFINE_QNODE(mas_sdcc_1, MSM8974_PNOC_MAS_SDCC_1, 8, 33, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_3, MSM8974_PNOC_MAS_SDCC_3, 8, 34, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_4, MSM8974_PNOC_MAS_SDCC_4, 8, 36, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_2, MSM8974_PNOC_MAS_SDCC_2, 8, 35, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_tsif, MSM8974_PNOC_MAS_TSIF, 8, 37, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_bam_dma, MSM8974_PNOC_MAS_BAM_DMA, 8, 38, -1);
+DEFINE_QNODE(mas_blsp_2, MSM8974_PNOC_MAS_BLSP_2, 8, 39, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_usb_hsic, MSM8974_PNOC_MAS_USB_HSIC, 8, 40, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_blsp_1, MSM8974_PNOC_MAS_BLSP_1, 8, 41, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_usb_hs, MSM8974_PNOC_MAS_USB_HS, 8, 42, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(pnoc_to_snoc, MSM8974_PNOC_TO_SNOC, 8, 44, 45, MSM8974_SNOC_TO_PNOC, MSM8974_PNOC_SLV_PRNG);
+DEFINE_QNODE(slv_sdcc_1, MSM8974_PNOC_SLV_SDCC_1, 8, -1, 31);
+DEFINE_QNODE(slv_sdcc_3, MSM8974_PNOC_SLV_SDCC_3, 8, -1, 32);
+DEFINE_QNODE(slv_sdcc_2, MSM8974_PNOC_SLV_SDCC_2, 8, -1, 33);
+DEFINE_QNODE(slv_sdcc_4, MSM8974_PNOC_SLV_SDCC_4, 8, -1, 34);
+DEFINE_QNODE(slv_tsif, MSM8974_PNOC_SLV_TSIF, 8, -1, 35);
+DEFINE_QNODE(slv_bam_dma, MSM8974_PNOC_SLV_BAM_DMA, 8, -1, 36);
+DEFINE_QNODE(slv_blsp_2, MSM8974_PNOC_SLV_BLSP_2, 8, -1, 37);
+DEFINE_QNODE(slv_usb_hsic, MSM8974_PNOC_SLV_USB_HSIC, 8, -1, 38);
+DEFINE_QNODE(slv_blsp_1, MSM8974_PNOC_SLV_BLSP_1, 8, -1, 39);
+DEFINE_QNODE(slv_usb_hs, MSM8974_PNOC_SLV_USB_HS, 8, -1, 40);
+DEFINE_QNODE(slv_pdm, MSM8974_PNOC_SLV_PDM, 8, -1, 41);
+DEFINE_QNODE(slv_periph_apu_cfg, MSM8974_PNOC_SLV_PERIPH_APU_CFG, 8, -1, 42);
+DEFINE_QNODE(slv_pnoc_mpu_cfg, MSM8974_PNOC_SLV_PNOC_MPU_CFG, 8, -1, 43);
+DEFINE_QNODE(slv_prng, MSM8974_PNOC_SLV_PRNG, 8, -1, 44, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(slv_service_pnoc, MSM8974_PNOC_SLV_SERVICE_PNOC, 8, -1, 46);
+
+static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
+ [PNOC_MAS_PNOC_CFG] = &mas_pnoc_cfg,
+ [PNOC_MAS_SDCC_1] = &mas_sdcc_1,
+ [PNOC_MAS_SDCC_3] = &mas_sdcc_3,
+ [PNOC_MAS_SDCC_4] = &mas_sdcc_4,
+ [PNOC_MAS_SDCC_2] = &mas_sdcc_2,
+ [PNOC_MAS_TSIF] = &mas_tsif,
+ [PNOC_MAS_BAM_DMA] = &mas_bam_dma,
+ [PNOC_MAS_BLSP_2] = &mas_blsp_2,
+ [PNOC_MAS_USB_HSIC] = &mas_usb_hsic,
+ [PNOC_MAS_BLSP_1] = &mas_blsp_1,
+ [PNOC_MAS_USB_HS] = &mas_usb_hs,
+ [PNOC_TO_SNOC] = &pnoc_to_snoc,
+ [PNOC_SLV_SDCC_1] = &slv_sdcc_1,
+ [PNOC_SLV_SDCC_3] = &slv_sdcc_3,
+ [PNOC_SLV_SDCC_2] = &slv_sdcc_2,
+ [PNOC_SLV_SDCC_4] = &slv_sdcc_4,
+ [PNOC_SLV_TSIF] = &slv_tsif,
+ [PNOC_SLV_BAM_DMA] = &slv_bam_dma,
+ [PNOC_SLV_BLSP_2] = &slv_blsp_2,
+ [PNOC_SLV_USB_HSIC] = &slv_usb_hsic,
+ [PNOC_SLV_BLSP_1] = &slv_blsp_1,
+ [PNOC_SLV_USB_HS] = &slv_usb_hs,
+ [PNOC_SLV_PDM] = &slv_pdm,
+ [PNOC_SLV_PERIPH_APU_CFG] = &slv_periph_apu_cfg,
+ [PNOC_SLV_PNOC_MPU_CFG] = &slv_pnoc_mpu_cfg,
+ [PNOC_SLV_PRNG] = &slv_prng,
+ [PNOC_SLV_SERVICE_PNOC] = &slv_service_pnoc,
+};
+
+static struct msm8974_icc_desc msm8974_pnoc = {
+ .nodes = msm8974_pnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_pnoc_nodes),
+};
+
+DEFINE_QNODE(mas_lpass_ahb, MSM8974_SNOC_MAS_LPASS_AHB, 8, 18, -1);
+DEFINE_QNODE(mas_qdss_bam, MSM8974_SNOC_MAS_QDSS_BAM, 8, 19, -1);
+DEFINE_QNODE(mas_snoc_cfg, MSM8974_SNOC_MAS_SNOC_CFG, 8, 20, -1);
+DEFINE_QNODE(snoc_to_bimc, MSM8974_SNOC_TO_BIMC, 8, 21, 24, MSM8974_BIMC_TO_SNOC);
+DEFINE_QNODE(snoc_to_cnoc, MSM8974_SNOC_TO_CNOC, 8, 22, 25);
+DEFINE_QNODE(snoc_to_pnoc, MSM8974_SNOC_TO_PNOC, 8, 29, 28, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(snoc_to_ocmem_vnoc, MSM8974_SNOC_TO_OCMEM_VNOC, 8, 53, 77, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
+DEFINE_QNODE(mas_crypto_core0, MSM8974_SNOC_MAS_CRYPTO_CORE0, 8, 23, -1, MSM8974_SNOC_TO_BIMC);
+DEFINE_QNODE(mas_crypto_core1, MSM8974_SNOC_MAS_CRYPTO_CORE1, 8, 24, -1);
+DEFINE_QNODE(mas_lpass_proc, MSM8974_SNOC_MAS_LPASS_PROC, 8, 25, -1, MSM8974_SNOC_TO_OCMEM_VNOC);
+DEFINE_QNODE(mas_mss, MSM8974_SNOC_MAS_MSS, 8, 26, -1);
+DEFINE_QNODE(mas_mss_nav, MSM8974_SNOC_MAS_MSS_NAV, 8, 27, -1);
+DEFINE_QNODE(mas_ocmem_dma, MSM8974_SNOC_MAS_OCMEM_DMA, 8, 28, -1);
+DEFINE_QNODE(mas_wcss, MSM8974_SNOC_MAS_WCSS, 8, 30, -1);
+DEFINE_QNODE(mas_qdss_etr, MSM8974_SNOC_MAS_QDSS_ETR, 8, 31, -1);
+DEFINE_QNODE(mas_usb3, MSM8974_SNOC_MAS_USB3, 8, 32, -1, MSM8974_SNOC_TO_BIMC);
+DEFINE_QNODE(slv_ampss, MSM8974_SNOC_SLV_AMPSS, 8, -1, 20);
+DEFINE_QNODE(slv_lpass, MSM8974_SNOC_SLV_LPASS, 8, -1, 21);
+DEFINE_QNODE(slv_usb3, MSM8974_SNOC_SLV_USB3, 8, -1, 22);
+DEFINE_QNODE(slv_wcss, MSM8974_SNOC_SLV_WCSS, 8, -1, 23);
+DEFINE_QNODE(slv_ocimem, MSM8974_SNOC_SLV_OCIMEM, 8, -1, 26);
+DEFINE_QNODE(slv_snoc_ocmem, MSM8974_SNOC_SLV_SNOC_OCMEM, 8, -1, 27);
+DEFINE_QNODE(slv_service_snoc, MSM8974_SNOC_SLV_SERVICE_SNOC, 8, -1, 29);
+DEFINE_QNODE(slv_qdss_stm, MSM8974_SNOC_SLV_QDSS_STM, 8, -1, 30);
+
+static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
+ [SNOC_MAS_LPASS_AHB] = &mas_lpass_ahb,
+ [SNOC_MAS_QDSS_BAM] = &mas_qdss_bam,
+ [SNOC_MAS_SNOC_CFG] = &mas_snoc_cfg,
+ [SNOC_TO_BIMC] = &snoc_to_bimc,
+ [SNOC_TO_CNOC] = &snoc_to_cnoc,
+ [SNOC_TO_PNOC] = &snoc_to_pnoc,
+ [SNOC_TO_OCMEM_VNOC] = &snoc_to_ocmem_vnoc,
+ [SNOC_MAS_CRYPTO_CORE0] = &mas_crypto_core0,
+ [SNOC_MAS_CRYPTO_CORE1] = &mas_crypto_core1,
+ [SNOC_MAS_LPASS_PROC] = &mas_lpass_proc,
+ [SNOC_MAS_MSS] = &mas_mss,
+ [SNOC_MAS_MSS_NAV] = &mas_mss_nav,
+ [SNOC_MAS_OCMEM_DMA] = &mas_ocmem_dma,
+ [SNOC_MAS_WCSS] = &mas_wcss,
+ [SNOC_MAS_QDSS_ETR] = &mas_qdss_etr,
+ [SNOC_MAS_USB3] = &mas_usb3,
+ [SNOC_SLV_AMPSS] = &slv_ampss,
+ [SNOC_SLV_LPASS] = &slv_lpass,
+ [SNOC_SLV_USB3] = &slv_usb3,
+ [SNOC_SLV_WCSS] = &slv_wcss,
+ [SNOC_SLV_OCIMEM] = &slv_ocimem,
+ [SNOC_SLV_SNOC_OCMEM] = &slv_snoc_ocmem,
+ [SNOC_SLV_SERVICE_SNOC] = &slv_service_snoc,
+ [SNOC_SLV_QDSS_STM] = &slv_qdss_stm,
+};
+
+static struct msm8974_icc_desc msm8974_snoc = {
+ .nodes = msm8974_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
+};
+
+static int msm8974_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static void msm8974_icc_rpm_smd_send(struct device *dev, int rsc_type,
+ char *name, int id, u64 val)
+{
+ int ret;
+
+ if (id == -1)
+ return;
+
+ /*
+ * Setting the bandwidth requests for some nodes fails and this same
+ * behavior occurs on the downstream MSM 3.4 kernel sources based on
+ * errors like this in that kernel:
+ *
+ * msm_rpm_get_error_from_ack(): RPM NACK Unsupported resource
+ * AXI: msm_bus_rpm_req(): RPM: Ack failed
+ * AXI: msm_bus_rpm_commit_arb(): RPM: Req fail: mas:32, bw:240000000
+ *
+ * Since there's no publicly available documentation for this hardware,
+ * and the bandwidth for some nodes in the path can be set properly,
+ * let's not return an error.
+ */
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, rsc_type, id,
+ val);
+ if (ret)
+ dev_dbg(dev, "Cannot set bandwidth for node %s (%d): %d\n",
+ name, id, ret);
+}
+
+static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct msm8974_icc_node *src_qn, *dst_qn;
+ struct msm8974_icc_provider *qp;
+ u64 sum_bw, max_peak_bw, rate;
+ u32 agg_avg = 0, agg_peak = 0;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ int ret, i;
+
+ src_qn = src->data;
+ dst_qn = dst->data;
+ provider = src->provider;
+ qp = to_msm8974_icc_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ msm8974_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* Set bandwidth on source node */
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
+ src_qn->name, src_qn->mas_rpm_id, sum_bw);
+
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
+ src_qn->name, src_qn->slv_rpm_id, sum_bw);
+
+ /* Set bandwidth on destination node */
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
+ dst_qn->name, dst_qn->mas_rpm_id, sum_bw);
+
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
+ dst_qn->name, dst_qn->slv_rpm_id, sum_bw);
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, src_qn->buswidth);
+
+ if (src_qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ dev_err(provider->dev, "%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ ret = 0;
+ }
+ }
+
+ src_qn->rate = rate;
+
+ return 0;
+}
+
+static int msm8974_icc_probe(struct platform_device *pdev)
+{
+ const struct msm8974_icc_desc *desc;
+ struct msm8974_icc_node **qnodes;
+ struct msm8974_icc_provider *qp;
+ struct device *dev = &pdev->dev;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, msm8974_icc_bus_clocks,
+ sizeof(msm8974_icc_bus_clocks), GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = ARRAY_SIZE(msm8974_icc_bus_clocks);
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = msm8974_icc_set;
+ provider->aggregate = msm8974_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ goto err_disable_clks;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err_del_icc;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(dev, "registered node %s\n", node->name);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+
+err_del_icc:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ icc_provider_del(provider);
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return ret;
+}
+
+static int msm8974_icc_remove(struct platform_device *pdev)
+{
+ struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id msm8974_noc_of_match[] = {
+ { .compatible = "qcom,msm8974-bimc", .data = &msm8974_bimc},
+ { .compatible = "qcom,msm8974-cnoc", .data = &msm8974_cnoc},
+ { .compatible = "qcom,msm8974-mmssnoc", .data = &msm8974_mnoc},
+ { .compatible = "qcom,msm8974-ocmemnoc", .data = &msm8974_onoc},
+ { .compatible = "qcom,msm8974-pnoc", .data = &msm8974_pnoc},
+ { .compatible = "qcom,msm8974-snoc", .data = &msm8974_snoc},
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8974_noc_of_match);
+
+static struct platform_driver msm8974_noc_driver = {
+ .probe = msm8974_icc_probe,
+ .remove = msm8974_icc_remove,
+ .driver = {
+ .name = "qnoc-msm8974",
+ .of_match_table = msm8974_noc_of_match,
+ },
+};
+module_platform_driver(msm8974_noc_driver);
+MODULE_DESCRIPTION("Qualcomm MSM8974 NoC driver");
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e3842eabcfdd..0b9d78a0f3ac 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -3,6 +3,10 @@
config IOMMU_IOVA
tristate
+# The IOASID library may also be used by non-IOMMU_API users
+config IOASID
+ tristate
+
# IOMMU_API always gets selected by whoever wants it.
config IOMMU_API
bool
@@ -138,6 +142,7 @@ config AMD_IOMMU
select PCI_PASID
select IOMMU_API
select IOMMU_IOVA
+ select IOMMU_DMA
depends on X86_64 && PCI && ACPI
---help---
With this option you can enable support for AMD IOMMU hardware in
@@ -207,6 +212,7 @@ config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86
select PCI_PASID
+ select PCI_PRI
select MMU_NOTIFIER
help
Shared Virtual Memory (SVM) provides a facility for devices
@@ -467,7 +473,7 @@ config QCOM_IOMMU
config HYPERV_IOMMU
bool "Hyper-V x2APIC IRQ Handling"
- depends on HYPERV
+ depends on HYPERV && X86
select IOMMU_API
default HYPERV
help
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 4f405f926e73..97814cc861ea 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,13 +7,14 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 12e5039a7a25..bd25674ee4db 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -20,6 +20,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dma-direct.h>
+#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
#include <linux/iommu.h>
#include <linux/delay.h>
@@ -88,8 +89,6 @@ const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
-static const struct dma_map_ops amd_iommu_dma_ops;
-
/*
* general struct to manage commands send to an IOMMU
*/
@@ -102,21 +101,6 @@ struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
-static void iova_domain_flush_tlb(struct iova_domain *iovad);
-
-/*
- * Data container for a dma_ops specific protection domain
- */
-struct dma_ops_domain {
- /* generic protection domain information */
- struct protection_domain domain;
-
- /* IOVA RB-Tree */
- struct iova_domain iovad;
-};
-
-static struct iova_domain reserved_iova_ranges;
-static struct lock_class_key reserved_rbtree_key;
/****************************************************************************
*
@@ -167,12 +151,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain)
-{
- BUG_ON(domain->flags != PD_DMA_OPS_MASK);
- return container_of(domain, struct dma_ops_domain, domain);
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -206,71 +184,61 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
return NULL;
}
-static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
- *(u16 *)data = alias;
- return 0;
-}
-
-static u16 get_alias(struct device *dev)
+static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- u16 devid, ivrs_alias, pci_alias;
-
- /* The callers make sure that get_device_id() does not fail here */
- devid = get_device_id(dev);
+ u16 devid = pci_dev_id(pdev);
- /* For ACPI HID devices, we simply return the devid as such */
- if (!dev_is_pci(dev))
- return devid;
+ if (devid == alias)
+ return 0;
- ivrs_alias = amd_iommu_alias_table[devid];
+ amd_iommu_rlookup_table[alias] =
+ amd_iommu_rlookup_table[devid];
+ memcpy(amd_iommu_dev_table[alias].data,
+ amd_iommu_dev_table[devid].data,
+ sizeof(amd_iommu_dev_table[alias].data));
- pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+ return 0;
+}
- if (ivrs_alias == pci_alias)
- return ivrs_alias;
+static void clone_aliases(struct pci_dev *pdev)
+{
+ if (!pdev)
+ return;
/*
- * DMA alias showdown
- *
- * The IVRS is fairly reliable in telling us about aliases, but it
- * can't know about every screwy device. If we don't have an IVRS
- * reported alias, use the PCI reported alias. In that case we may
- * still need to initialize the rlookup and dev_table entries if the
- * alias is to a non-existent device.
+ * The IVRS alias stored in the alias table may not be
+ * part of the PCI DMA aliases if it's bus differs
+ * from the original device.
*/
- if (ivrs_alias == devid) {
- if (!amd_iommu_rlookup_table[pci_alias]) {
- amd_iommu_rlookup_table[pci_alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[pci_alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[pci_alias].data));
- }
+ clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
- return pci_alias;
- }
+ pci_for_each_dma_alias(pdev, clone_alias, NULL);
+}
- pci_info(pdev, "Using IVRS reported alias %02x:%02x.%d "
- "for device [%04x:%04x], kernel reported alias "
- "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
- PCI_FUNC(ivrs_alias), pdev->vendor, pdev->device,
- PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
- PCI_FUNC(pci_alias));
+static struct pci_dev *setup_aliases(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 ivrs_alias;
+
+ /* For ACPI HID devices, there are no aliases */
+ if (!dev_is_pci(dev))
+ return NULL;
/*
- * If we don't have a PCI DMA alias and the IVRS alias is on the same
- * bus, then the IVRS table may know about a quirk that we don't.
+ * Add the IVRS alias to the pci aliases if it is on the same
+ * bus. The IVRS table may know about a quirk that we don't.
*/
- if (pci_alias == devid &&
+ ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ if (ivrs_alias != pci_dev_id(pdev) &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
pci_add_dma_alias(pdev, ivrs_alias & 0xff);
pci_info(pdev, "Added PCI DMA alias %02x.%d\n",
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias));
}
- return ivrs_alias;
+ clone_aliases(pdev);
+
+ return pdev;
}
static struct iommu_dev_data *find_dev_data(u16 devid)
@@ -408,7 +376,7 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
- dev_data->alias = get_alias(dev);
+ dev_data->pdev = setup_aliases(dev);
/*
* By default we use passthrough mode for IOMMUv2 capable device.
@@ -433,20 +401,16 @@ static int iommu_init_device(struct device *dev)
static void iommu_ignore_device(struct device *dev)
{
- u16 alias;
int devid;
devid = get_device_id(dev);
if (devid < 0)
return;
- alias = get_alias(dev);
-
+ amd_iommu_rlookup_table[devid] = NULL;
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
- memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
- amd_iommu_rlookup_table[devid] = NULL;
- amd_iommu_rlookup_table[alias] = NULL;
+ setup_aliases(dev);
}
static void iommu_uninit_device(struct device *dev)
@@ -620,8 +584,7 @@ retry:
pasid, address, flags);
break;
case EVENT_TYPE_INV_PPR_REQ:
- pasid = ((event[0] >> 16) & 0xFFFF)
- | ((event[1] << 6) & 0xF0000);
+ pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
@@ -856,17 +819,18 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
struct iommu_cmd *cmd)
{
u8 *target;
-
- target = iommu->cmd_buf + iommu->cmd_buf_tail;
-
- iommu->cmd_buf_tail += sizeof(*cmd);
- iommu->cmd_buf_tail %= CMD_BUFFER_SIZE;
+ u32 tail;
/* Copy command to buffer */
+ tail = iommu->cmd_buf_tail;
+ target = iommu->cmd_buf + tail;
memcpy(target, cmd, sizeof(*cmd));
+ tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
+ iommu->cmd_buf_tail = tail;
+
/* Tell the IOMMU about it */
- writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+ writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
}
static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
@@ -1216,6 +1180,13 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
return iommu_queue_command(iommu, &cmd);
}
+static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct amd_iommu *iommu = data;
+
+ return iommu_flush_dte(iommu, alias);
+}
+
/*
* Command send function for invalidating a device table entry
*/
@@ -1226,14 +1197,22 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
- ret = iommu_flush_dte(iommu, dev_data->devid);
- if (!ret && alias != dev_data->devid)
- ret = iommu_flush_dte(iommu, alias);
+ if (dev_data->pdev)
+ ret = pci_for_each_dma_alias(dev_data->pdev,
+ device_flush_dte_alias, iommu);
+ else
+ ret = iommu_flush_dte(iommu, dev_data->devid);
if (ret)
return ret;
+ alias = amd_iommu_alias_table[dev_data->devid];
+ if (alias != dev_data->devid) {
+ ret = iommu_flush_dte(iommu, alias);
+ if (ret)
+ return ret;
+ }
+
if (dev_data->ats.enabled)
ret = device_flush_iotlb(dev_data, 0, ~0UL);
@@ -1282,12 +1261,6 @@ static void domain_flush_pages(struct protection_domain *domain,
__domain_flush_pages(domain, address, size, 0);
}
-/* Flush the whole IO/TLB for a given protection domain */
-static void domain_flush_tlb(struct protection_domain *domain)
-{
- __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
-}
-
/* Flush the whole IO/TLB for a given protection domain - including PDE */
static void domain_flush_tlb_pde(struct protection_domain *domain)
{
@@ -1735,43 +1708,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
/****************************************************************************
*
- * The next functions belong to the address allocator for the dma_ops
- * interface functions.
- *
- ****************************************************************************/
-
-
-static unsigned long dma_ops_alloc_iova(struct device *dev,
- struct dma_ops_domain *dma_dom,
- unsigned int pages, u64 dma_mask)
-{
- unsigned long pfn = 0;
-
- pages = __roundup_pow_of_two(pages);
-
- if (dma_mask > DMA_BIT_MASK(32))
- pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(DMA_BIT_MASK(32)), false);
-
- if (!pfn)
- pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(dma_mask), true);
-
- return (pfn << PAGE_SHIFT);
-}
-
-static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
- unsigned long address,
- unsigned int pages)
-{
- pages = __roundup_pow_of_two(pages);
- address >>= PAGE_SHIFT;
-
- free_iova_fast(&dma_dom->iovad, address, pages);
-}
-
-/****************************************************************************
- *
* The next functions belong to the domain allocation. A domain is
* allocated for every IOMMU as the default domain. If device isolation
* is enabled, every device get its own domain. The most important thing
@@ -1846,42 +1782,23 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dom->domain.lock, flags);
- domain_flush_tlb(&dom->domain);
- domain_flush_complete(&dom->domain);
- spin_unlock_irqrestore(&dom->domain.lock, flags);
-}
-
-static void iova_domain_flush_tlb(struct iova_domain *iovad)
-{
- struct dma_ops_domain *dom;
-
- dom = container_of(iovad, struct dma_ops_domain, iovad);
-
- dma_ops_domain_flush_tlb(dom);
-}
-
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
*/
-static void dma_ops_domain_free(struct dma_ops_domain *dom)
+static void dma_ops_domain_free(struct protection_domain *domain)
{
- if (!dom)
+ if (!domain)
return;
- put_iova_domain(&dom->iovad);
+ iommu_put_dma_cookie(&domain->domain);
- free_pagetable(&dom->domain);
+ free_pagetable(domain);
- if (dom->domain.id)
- domain_id_free(dom->domain.id);
+ if (domain->id)
+ domain_id_free(domain->id);
- kfree(dom);
+ kfree(domain);
}
/*
@@ -1889,35 +1806,30 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
* It also initializes the page table and the address allocator data
* structures required for the dma_ops interface
*/
-static struct dma_ops_domain *dma_ops_domain_alloc(void)
+static struct protection_domain *dma_ops_domain_alloc(void)
{
- struct dma_ops_domain *dma_dom;
+ struct protection_domain *domain;
- dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
- if (!dma_dom)
+ domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
+ if (!domain)
return NULL;
- if (protection_domain_init(&dma_dom->domain))
- goto free_dma_dom;
-
- dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
- dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- dma_dom->domain.flags = PD_DMA_OPS_MASK;
- if (!dma_dom->domain.pt_root)
- goto free_dma_dom;
-
- init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
+ if (protection_domain_init(domain))
+ goto free_domain;
- if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
- goto free_dma_dom;
+ domain->mode = PAGE_MODE_3_LEVEL;
+ domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ domain->flags = PD_DMA_OPS_MASK;
+ if (!domain->pt_root)
+ goto free_domain;
- /* Initialize reserved ranges */
- copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
+ if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+ goto free_domain;
- return dma_dom;
+ return domain;
-free_dma_dom:
- dma_ops_domain_free(dma_dom);
+free_domain:
+ dma_ops_domain_free(domain);
return NULL;
}
@@ -2015,11 +1927,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu;
- u16 alias;
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -2032,8 +1942,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
/* Update device table */
set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
- if (alias != dev_data->devid)
- set_dte_entry(alias, domain, ats, dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
device_flush_dte(dev_data);
}
@@ -2042,17 +1951,14 @@ static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- u16 alias;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
clear_dte_entry(dev_data->devid);
- if (alias != dev_data->devid)
- clear_dte_entry(alias);
+ clone_aliases(dev_data->pdev);
/* Flush the DTE entry */
device_flush_dte(dev_data);
@@ -2285,8 +2191,8 @@ static int amd_iommu_add_device(struct device *dev)
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
- else
- dev->dma_ops = &amd_iommu_dma_ops;
+ else if (domain->type == IOMMU_DOMAIN_DMA)
+ iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
out:
iommu_completion_wait(iommu);
@@ -2320,43 +2226,32 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
return acpihid_device_group(dev);
}
+static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ return -ENODEV;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = !amd_iommu_unmap_flush;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+}
+
/*****************************************************************************
*
* The next functions belong to the dma_ops mapping/unmapping code.
*
*****************************************************************************/
-/*
- * In the dma_ops path we only have the struct device. This function
- * finds the corresponding IOMMU, the protection domain and the
- * requestor id for a given device.
- * If the device is not yet associated with a domain this is also done
- * in this function.
- */
-static struct protection_domain *get_domain(struct device *dev)
-{
- struct protection_domain *domain;
- struct iommu_domain *io_domain;
-
- if (!check_device(dev))
- return ERR_PTR(-EINVAL);
-
- domain = get_dev_data(dev)->domain;
- if (domain == NULL && get_dev_data(dev)->defer_attach) {
- get_dev_data(dev)->defer_attach = false;
- io_domain = iommu_get_domain_for_dev(dev);
- domain = to_pdomain(io_domain);
- attach_device(dev, domain);
- }
- if (domain == NULL)
- return ERR_PTR(-EBUSY);
-
- if (!dma_ops_domain(domain))
- return ERR_PTR(-EBUSY);
-
- return domain;
-}
-
static void update_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
@@ -2364,13 +2259,7 @@ static void update_device_table(struct protection_domain *domain)
list_for_each_entry(dev_data, &domain->dev_list, list) {
set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
dev_data->iommu_v2);
-
- if (dev_data->devid == dev_data->alias)
- continue;
-
- /* There is an alias, update device table entry for it */
- set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled,
- dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
}
}
@@ -2382,458 +2271,6 @@ static void update_domain(struct protection_domain *domain)
domain_flush_tlb_pde(domain);
}
-static int dir2prot(enum dma_data_direction direction)
-{
- if (direction == DMA_TO_DEVICE)
- return IOMMU_PROT_IR;
- else if (direction == DMA_FROM_DEVICE)
- return IOMMU_PROT_IW;
- else if (direction == DMA_BIDIRECTIONAL)
- return IOMMU_PROT_IW | IOMMU_PROT_IR;
- else
- return 0;
-}
-
-/*
- * This function contains common code for mapping of a physically
- * contiguous memory region into DMA address space. It is used by all
- * mapping functions provided with this IOMMU driver.
- * Must be called with the domain lock held.
- */
-static dma_addr_t __map_single(struct device *dev,
- struct dma_ops_domain *dma_dom,
- phys_addr_t paddr,
- size_t size,
- enum dma_data_direction direction,
- u64 dma_mask)
-{
- dma_addr_t offset = paddr & ~PAGE_MASK;
- dma_addr_t address, start, ret;
- unsigned long flags;
- unsigned int pages;
- int prot = 0;
- int i;
-
- pages = iommu_num_pages(paddr, size, PAGE_SIZE);
- paddr &= PAGE_MASK;
-
- address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
- if (!address)
- goto out;
-
- prot = dir2prot(direction);
-
- start = address;
- for (i = 0; i < pages; ++i) {
- ret = iommu_map_page(&dma_dom->domain, start, paddr,
- PAGE_SIZE, prot, GFP_ATOMIC);
- if (ret)
- goto out_unmap;
-
- paddr += PAGE_SIZE;
- start += PAGE_SIZE;
- }
- address += offset;
-
- domain_flush_np_cache(&dma_dom->domain, address, size);
-
-out:
- return address;
-
-out_unmap:
-
- for (--i; i >= 0; --i) {
- start -= PAGE_SIZE;
- iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
- }
-
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
-
- dma_ops_free_iova(dma_dom, address, pages);
-
- return DMA_MAPPING_ERROR;
-}
-
-/*
- * Does the reverse of the __map_single function. Must be called with
- * the domain lock held too
- */
-static void __unmap_single(struct dma_ops_domain *dma_dom,
- dma_addr_t dma_addr,
- size_t size,
- int dir)
-{
- dma_addr_t i, start;
- unsigned int pages;
-
- pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
- dma_addr &= PAGE_MASK;
- start = dma_addr;
-
- for (i = 0; i < pages; ++i) {
- iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
- start += PAGE_SIZE;
- }
-
- if (amd_iommu_unmap_flush) {
- unsigned long flags;
-
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
- dma_ops_free_iova(dma_dom, dma_addr, pages);
- } else {
- pages = __roundup_pow_of_two(pages);
- queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);
- }
-}
-
-/*
- * The exported map_single function for dma_ops.
- */
-static dma_addr_t map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t paddr = page_to_phys(page) + offset;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- u64 dma_mask;
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL)
- return (dma_addr_t)paddr;
- else if (IS_ERR(domain))
- return DMA_MAPPING_ERROR;
-
- dma_mask = *dev->dma_mask;
- dma_dom = to_dma_ops_domain(domain);
-
- return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
-}
-
-/*
- * The exported unmap_single function for dma_ops.
- */
-static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- dma_dom = to_dma_ops_domain(domain);
-
- __unmap_single(dma_dom, dma_addr, size, dir);
-}
-
-static int sg_num_pages(struct device *dev,
- struct scatterlist *sglist,
- int nelems)
-{
- unsigned long mask, boundary_size;
- struct scatterlist *s;
- int i, npages = 0;
-
- mask = dma_get_seg_boundary(dev);
- boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
- 1UL << (BITS_PER_LONG - PAGE_SHIFT);
-
- for_each_sg(sglist, s, nelems, i) {
- int p, n;
-
- s->dma_address = npages << PAGE_SHIFT;
- p = npages % boundary_size;
- n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
- if (p + n > boundary_size)
- npages += boundary_size - p;
- npages += n;
- }
-
- return npages;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static int map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int mapped_pages = 0, npages = 0, prot = 0, i;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct scatterlist *s;
- unsigned long address;
- u64 dma_mask;
- int ret;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return 0;
-
- dma_dom = to_dma_ops_domain(domain);
- dma_mask = *dev->dma_mask;
-
- npages = sg_num_pages(dev, sglist, nelems);
-
- address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
- if (!address)
- goto out_err;
-
- prot = dir2prot(direction);
-
- /* Map all sg entries */
- for_each_sg(sglist, s, nelems, i) {
- int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
-
- for (j = 0; j < pages; ++j) {
- unsigned long bus_addr, phys_addr;
-
- bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
- phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
- ret = iommu_map_page(domain, bus_addr, phys_addr,
- PAGE_SIZE, prot,
- GFP_ATOMIC | __GFP_NOWARN);
- if (ret)
- goto out_unmap;
-
- mapped_pages += 1;
- }
- }
-
- /* Everything is mapped - write the right values into s->dma_address */
- for_each_sg(sglist, s, nelems, i) {
- /*
- * Add in the remaining piece of the scatter-gather offset that
- * was masked out when we were determining the physical address
- * via (sg_phys(s) & PAGE_MASK) earlier.
- */
- s->dma_address += address + (s->offset & ~PAGE_MASK);
- s->dma_length = s->length;
- }
-
- if (s)
- domain_flush_np_cache(domain, s->dma_address, s->dma_length);
-
- return nelems;
-
-out_unmap:
- dev_err(dev, "IOMMU mapping error in map_sg (io-pages: %d reason: %d)\n",
- npages, ret);
-
- for_each_sg(sglist, s, nelems, i) {
- int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
-
- for (j = 0; j < pages; ++j) {
- unsigned long bus_addr;
-
- bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
- iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
-
- if (--mapped_pages == 0)
- goto out_free_iova;
- }
- }
-
-out_free_iova:
- free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
-
-out_err:
- return 0;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static void unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- unsigned long startaddr;
- int npages;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- startaddr = sg_dma_address(sglist) & PAGE_MASK;
- dma_dom = to_dma_ops_domain(domain);
- npages = sg_num_pages(dev, sglist, nelems);
-
- __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
-}
-
-/*
- * The exported alloc_coherent function for dma_ops.
- */
-static void *alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag,
- unsigned long attrs)
-{
- u64 dma_mask = dev->coherent_dma_mask;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL) {
- page = alloc_pages(flag, get_order(size));
- *dma_addr = page_to_phys(page);
- return page_address(page);
- } else if (IS_ERR(domain))
- return NULL;
-
- dma_dom = to_dma_ops_domain(domain);
- size = PAGE_ALIGN(size);
- dma_mask = dev->coherent_dma_mask;
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- flag |= __GFP_ZERO;
-
- page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
- if (!page) {
- if (!gfpflags_allow_blocking(flag))
- return NULL;
-
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flag & __GFP_NOWARN);
- if (!page)
- return NULL;
- }
-
- if (!dma_mask)
- dma_mask = *dev->dma_mask;
-
- *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
- size, DMA_BIDIRECTIONAL, dma_mask);
-
- if (*dma_addr == DMA_MAPPING_ERROR)
- goto out_free;
-
- return page_address(page);
-
-out_free:
-
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
-
- return NULL;
-}
-
-/*
- * The exported free_coherent function for dma_ops.
- */
-static void free_coherent(struct device *dev, size_t size,
- void *virt_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
-
- page = virt_to_page(virt_addr);
- size = PAGE_ALIGN(size);
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- goto free_mem;
-
- dma_dom = to_dma_ops_domain(domain);
-
- __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
-
-free_mem:
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
-}
-
-/*
- * This function is called by the DMA layer to find out if we can handle a
- * particular device. It is part of the dma_ops.
- */
-static int amd_iommu_dma_supported(struct device *dev, u64 mask)
-{
- if (!dma_direct_supported(dev, mask))
- return 0;
- return check_device(dev);
-}
-
-static const struct dma_map_ops amd_iommu_dma_ops = {
- .alloc = alloc_coherent,
- .free = free_coherent,
- .map_page = map_page,
- .unmap_page = unmap_page,
- .map_sg = map_sg,
- .unmap_sg = unmap_sg,
- .dma_supported = amd_iommu_dma_supported,
- .mmap = dma_common_mmap,
- .get_sgtable = dma_common_get_sgtable,
-};
-
-static int init_reserved_iova_ranges(void)
-{
- struct pci_dev *pdev = NULL;
- struct iova *val;
-
- init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
-
- lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
- &reserved_rbtree_key);
-
- /* MSI memory range */
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
- if (!val) {
- pr_err("Reserving MSI range failed\n");
- return -ENOMEM;
- }
-
- /* HT memory range */
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
- if (!val) {
- pr_err("Reserving HT range failed\n");
- return -ENOMEM;
- }
-
- /*
- * Memory used for PCI resources
- * FIXME: Check whether we can reserve the PCI-hole completly
- */
- for_each_pci_dev(pdev) {
- int i;
-
- for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
- struct resource *r = &pdev->resource[i];
-
- if (!(r->flags & IORESOURCE_MEM))
- continue;
-
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(r->start),
- IOVA_PFN(r->end));
- if (!val) {
- pci_err(pdev, "Reserve pci-resource range %pR failed\n", r);
- return -ENOMEM;
- }
- }
- }
-
- return 0;
-}
-
int __init amd_iommu_init_api(void)
{
int ret, err = 0;
@@ -2842,10 +2279,6 @@ int __init amd_iommu_init_api(void)
if (ret)
return ret;
- ret = init_reserved_iova_ranges();
- if (ret)
- return ret;
-
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
return err;
@@ -2916,7 +2349,6 @@ static void protection_domain_free(struct protection_domain *domain)
static int protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
- mutex_init(&domain->api_lock);
domain->id = domain_id_alloc();
if (!domain->id)
return -ENOMEM;
@@ -2947,7 +2379,6 @@ out_err:
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
- struct dma_ops_domain *dma_domain;
switch (type) {
case IOMMU_DOMAIN_UNMANAGED:
@@ -2968,12 +2399,11 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
break;
case IOMMU_DOMAIN_DMA:
- dma_domain = dma_ops_domain_alloc();
- if (!dma_domain) {
+ pdomain = dma_ops_domain_alloc();
+ if (!pdomain) {
pr_err("Failed to allocate\n");
return NULL;
}
- pdomain = &dma_domain->domain;
break;
case IOMMU_DOMAIN_IDENTITY:
pdomain = protection_domain_alloc();
@@ -2992,7 +2422,6 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
domain = to_pdomain(dom);
@@ -3007,8 +2436,7 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
switch (dom->type) {
case IOMMU_DOMAIN_DMA:
/* Now release the domain */
- dma_dom = to_dma_ops_domain(domain);
- dma_ops_domain_free(dma_dom);
+ dma_ops_domain_free(domain);
break;
default:
if (domain->mode != PAGE_MODE_NONE)
@@ -3064,6 +2492,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
return -EINVAL;
dev_data = dev->archdata.iommu;
+ dev_data->defer_attach = false;
iommu = amd_iommu_rlookup_table[dev_data->devid];
if (!iommu)
@@ -3089,7 +2518,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t page_size, int iommu_prot)
+ phys_addr_t paddr, size_t page_size, int iommu_prot,
+ gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
int prot = 0;
@@ -3103,9 +2533,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- mutex_lock(&domain->api_lock);
- ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
- mutex_unlock(&domain->api_lock);
+ ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
domain_flush_np_cache(domain, iova, page_size);
@@ -3117,16 +2545,11 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
return 0;
- mutex_lock(&domain->api_lock);
- unmap_size = iommu_unmap_page(domain, iova, page_size);
- mutex_unlock(&domain->api_lock);
-
- return unmap_size;
+ return iommu_unmap_page(domain, iova, page_size);
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -3227,19 +2650,6 @@ static void amd_iommu_put_resv_regions(struct device *dev,
kfree(entry);
}
-static void amd_iommu_apply_resv_region(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region)
-{
- struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
- unsigned long start, end;
-
- start = IOVA_PFN(region->start);
- end = IOVA_PFN(region->start + region->length - 1);
-
- WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
-}
-
static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev)
{
@@ -3276,9 +2686,9 @@ const struct iommu_ops amd_iommu_ops = {
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group,
+ .domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
.put_resv_regions = amd_iommu_put_resv_regions,
- .apply_resv_region = amd_iommu_apply_resv_region,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
@@ -3590,9 +3000,23 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
struct protection_domain *pdomain;
+ struct iommu_domain *io_domain;
+ struct device *dev = &pdev->dev;
+
+ if (!check_device(dev))
+ return NULL;
- pdomain = get_domain(&pdev->dev);
- if (IS_ERR(pdomain))
+ pdomain = get_dev_data(dev)->domain;
+ if (pdomain == NULL && get_dev_data(dev)->defer_attach) {
+ get_dev_data(dev)->defer_attach = false;
+ io_domain = iommu_get_domain_for_dev(dev);
+ pdomain = to_pdomain(io_domain);
+ attach_device(dev, pdomain);
+ }
+ if (pdomain == NULL)
+ return NULL;
+
+ if (!dma_ops_domain(pdomain))
return NULL;
/* Only return IOMMUv2 domains */
@@ -3732,7 +3156,20 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
iommu_flush_dte(iommu, devid);
}
-static struct irq_remap_table *alloc_irq_table(u16 devid)
+static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
+ void *data)
+{
+ struct irq_remap_table *table = data;
+
+ irq_lookup_table[alias] = table;
+ set_dte_irq_entry(alias, table);
+
+ iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
+
+ return 0;
+}
+
+static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
@@ -3778,7 +3215,12 @@ static struct irq_remap_table *alloc_irq_table(u16 devid)
table = new_table;
new_table = NULL;
- set_remap_table_entry(iommu, devid, table);
+ if (pdev)
+ pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
+ table);
+ else
+ set_remap_table_entry(iommu, devid, table);
+
if (devid != alias)
set_remap_table_entry(iommu, alias, table);
@@ -3795,7 +3237,8 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count, bool align)
+static int alloc_irq_index(u16 devid, int count, bool align,
+ struct pci_dev *pdev)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
@@ -3805,7 +3248,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
if (!iommu)
return -ENODEV;
- table = alloc_irq_table(devid);
+ table = alloc_irq_table(devid, pdev);
if (!table)
return -ENODEV;
@@ -4238,7 +3681,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_remap_table *table;
struct amd_iommu *iommu;
- table = alloc_irq_table(devid);
+ table = alloc_irq_table(devid, NULL);
if (table) {
if (!table->min_index) {
/*
@@ -4255,11 +3698,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
} else {
index = -ENOMEM;
}
- } else {
+ } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
+ info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
- index = alloc_irq_index(devid, nr_irqs, align);
+ index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
+ } else {
+ index = alloc_irq_index(devid, nr_irqs, false, NULL);
}
+
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
ret = index;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 17bd5a349119..f52f59d5c6bd 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -468,7 +468,6 @@ struct protection_domain {
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/
- struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
@@ -639,8 +638,8 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
+ struct pci_dev *pdev;
u16 devid; /* PCI Device ID */
- u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
index 5c87a38620c4..b2fe72a8f019 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm-smmu-impl.c
@@ -109,7 +109,7 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
-static int arm_mmu500_reset(struct arm_smmu_device *smmu)
+int arm_mmu500_reset(struct arm_smmu_device *smmu)
{
u32 reg, major;
int i;
@@ -170,5 +170,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
"calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
+ if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500"))
+ return qcom_smmu_impl_init(smmu);
+
return smmu;
}
diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c
new file mode 100644
index 000000000000..24c071c1d8b0
--- /dev/null
+++ b/drivers/iommu/arm-smmu-qcom.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/qcom_scm.h>
+
+#include "arm-smmu.h"
+
+struct qcom_smmu {
+ struct arm_smmu_device smmu;
+};
+
+static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
+{
+ int ret;
+
+ arm_mmu500_reset(smmu);
+
+ /*
+ * To address performance degradation in non-real time clients,
+ * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
+ * such as MTP and db845, whose firmwares implement secure monitor
+ * call handlers to turn on/off the wait-for-safe logic.
+ */
+ ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
+ if (ret)
+ dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
+
+ return ret;
+}
+
+static const struct arm_smmu_impl qcom_smmu_impl = {
+ .reset = qcom_sdm845_smmu500_reset,
+};
+
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ struct qcom_smmu *qsmmu;
+
+ qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
+ if (!qsmmu)
+ return ERR_PTR(-ENOMEM);
+
+ qsmmu->smmu = *smmu;
+
+ qsmmu->smmu.impl = &qcom_smmu_impl;
+ devm_kfree(smmu->dev, smmu);
+
+ return &qsmmu->smmu;
+}
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8da93e730d6f..effe72eb89e7 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2172,7 +2172,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
cfg->cd.asid = (u16)asid;
cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+ cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
return 0;
out_free_asid:
@@ -2448,7 +2448,7 @@ out_unlock:
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
@@ -3611,19 +3611,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Interrupt lines */
- irq = platform_get_irq_byname(pdev, "combined");
+ irq = platform_get_irq_byname_optional(pdev, "combined");
if (irq > 0)
smmu->combined_irq = irq;
else {
- irq = platform_get_irq_byname(pdev, "eventq");
+ irq = platform_get_irq_byname_optional(pdev, "eventq");
if (irq > 0)
smmu->evtq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "priq");
+ irq = platform_get_irq_byname_optional(pdev, "priq");
if (irq > 0)
smmu->priq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "gerror");
+ irq = platform_get_irq_byname_optional(pdev, "gerror");
if (irq > 0)
smmu->gerr_irq = irq;
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7c503a6bc585..4f1a350d9529 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/amba/bus.h>
@@ -122,7 +123,7 @@ static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
{
if (pm_runtime_enabled(smmu->dev))
- pm_runtime_put(smmu->dev);
+ pm_runtime_put_autosuspend(smmu->dev);
}
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -244,6 +245,9 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
unsigned int spin_cnt, delay;
u32 reg;
+ if (smmu->impl && unlikely(smmu->impl->tlb_sync))
+ return smmu->impl->tlb_sync(smmu, page, sync, status);
+
arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
@@ -268,9 +272,8 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
-static void arm_smmu_tlb_sync_context(void *cookie)
+static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long flags;
@@ -280,13 +283,6 @@ static void arm_smmu_tlb_sync_context(void *cookie)
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
-static void arm_smmu_tlb_sync_vmid(void *cookie)
-{
- struct arm_smmu_domain *smmu_domain = cookie;
-
- arm_smmu_tlb_sync_global(smmu_domain->smmu);
-}
-
static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
@@ -297,7 +293,7 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
wmb();
arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
- arm_smmu_tlb_sync_context(cookie);
+ arm_smmu_tlb_sync_context(smmu_domain);
}
static void arm_smmu_tlb_inv_context_s2(void *cookie)
@@ -312,18 +308,16 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
}
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+ size_t granule, void *cookie, int reg)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- int reg, idx = cfg->cbndx;
+ int idx = cfg->cbndx;
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
-
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
iova = (iova >> 12) << 12;
iova |= cfg->asid;
@@ -342,16 +336,15 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
}
static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+ size_t granule, void *cookie, int reg)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- int reg, idx = smmu_domain->cfg.cbndx;
+ int idx = smmu_domain->cfg.cbndx;
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
do {
if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
@@ -362,85 +355,98 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
} while (size -= granule);
}
-/*
- * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
- * almost negligible, but the benefit of getting the first one in as far ahead
- * of the sync as possible is significant, hence we don't just make this a
- * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
- */
-static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
-
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- wmb();
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVA);
+ arm_smmu_tlb_sync_context(cookie);
+}
- arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
+static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVAL);
+ arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVAL);
+}
- ops->tlb_inv_range(iova, size, granule, false, cookie);
- ops->tlb_sync(cookie);
+static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2);
+ arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2L);
+ arm_smmu_tlb_sync_context(cookie);
+}
- ops->tlb_inv_range(iova, size, granule, true, cookie);
- ops->tlb_sync(cookie);
+static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2L);
}
-static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
+static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_context_s2(cookie);
+}
+/*
+ * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
+ * almost negligible, but the benefit of getting the first one in as far ahead
+ * of the sync as possible is significant, hence we don't just make this a
+ * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
+ * think.
+ */
+static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
- ops->tlb_inv_range(iova, granule, granule, true, cookie);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
}
-static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_range_s1,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s1,
};
-static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_range_s2,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2,
};
-static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
- .tlb_sync = arm_smmu_tlb_sync_vmid,
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -472,6 +478,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
{
u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
struct arm_smmu_device *smmu = dev;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
@@ -481,11 +489,19 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
if (!gfsr)
return IRQ_NONE;
- dev_err_ratelimited(smmu->dev,
- "Unexpected global fault, this could be serious\n");
- dev_err_ratelimited(smmu->dev,
- "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
- gfsr, gfsynr0, gfsynr1, gfsynr2);
+ if (__ratelimit(&rs)) {
+ if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
+ (gfsr & sGFSR_USF))
+ dev_err(smmu->dev,
+ "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
+ (u16)gfsynr1);
+ else
+ dev_err(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+ }
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
return IRQ_HANDLED;
@@ -536,8 +552,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
} else {
- cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
- cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+ cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
+ cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
}
}
}
@@ -770,7 +786,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.ias = ias,
.oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
- .tlb = &smmu_domain->flush_ops->tlb,
+ .tlb = smmu_domain->flush_ops,
.iommu_dev = smmu->dev,
};
@@ -1039,8 +1055,6 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (!group)
- group = ERR_PTR(-ENOMEM);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_err;
@@ -1154,13 +1168,27 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
+ /*
+ * Setup an autosuspend delay to avoid bouncing runpm state.
+ * Otherwise, if a driver for a suspended consumer device
+ * unmaps buffers, it will runpm resume/suspend for each one.
+ *
+ * For example, when used by a GPU device, when an application
+ * or game exits, it can trigger unmapping 100s or 1000s of
+ * buffers. With a runpm cycle for each buffer, that adds up
+ * to 5-10sec worth of reprogramming the context bank, while
+ * the system appears to be locked up to the user.
+ */
+ pm_runtime_set_autosuspend_delay(smmu->dev, 20);
+ pm_runtime_use_autosuspend(smmu->dev);
+
rpm_put:
arm_smmu_rpm_put(smmu);
return ret;
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
@@ -1200,7 +1228,7 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
- smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
+ smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
@@ -1211,11 +1239,16 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->flush_ops) {
- arm_smmu_rpm_get(smmu);
- smmu_domain->flush_ops->tlb_sync(smmu_domain);
- arm_smmu_rpm_put(smmu);
- }
+ if (!smmu)
+ return;
+
+ arm_smmu_rpm_get(smmu);
+ if (smmu->version == ARM_SMMU_V2 ||
+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+ arm_smmu_tlb_sync_context(smmu_domain);
+ else
+ arm_smmu_tlb_sync_global(smmu);
+ arm_smmu_rpm_put(smmu);
}
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -2062,10 +2095,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
for (i = 0; i < num_irqs; ++i) {
int irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- dev_err(dev, "failed to get irq index %d\n", i);
+ if (irq < 0)
return -ENODEV;
- }
smmu->irqs[i] = irq;
}
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
index b19b6cae9b5e..62b9f0cec49b 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm-smmu.h
@@ -79,6 +79,8 @@
#define ID7_MINOR GENMASK(3, 0)
#define ARM_SMMU_GR0_sGFSR 0x48
+#define sGFSR_USF BIT(1)
+
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58
@@ -304,17 +306,10 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_BYPASS,
};
-struct arm_smmu_flush_ops {
- struct iommu_flush_ops tlb;
- void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
- bool leaf, void *cookie);
- void (*tlb_sync)(void *cookie);
-};
-
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
- const struct arm_smmu_flush_ops *flush_ops;
+ const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
@@ -335,6 +330,8 @@ struct arm_smmu_impl {
int (*cfg_probe)(struct arm_smmu_device *smmu);
int (*reset)(struct arm_smmu_device *smmu);
int (*init_context)(struct arm_smmu_domain *smmu_domain);
+ void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
+ int status);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
@@ -398,5 +395,8 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
+
+int arm_mmu500_reset(struct arm_smmu_device *smmu);
#endif /* _ARM_SMMU_H */
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f321279baf9e..0cc702a70a96 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
struct iommu_dma_msi_page {
struct list_head list;
@@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain);
}
+static int iommu_dma_deferred_attach(struct device *dev,
+ struct iommu_domain *domain)
+{
+ const struct iommu_ops *ops = domain->ops;
+
+ if (!is_kdump_kernel())
+ return 0;
+
+ if (unlikely(ops->is_attach_deferred &&
+ ops->is_attach_deferred(domain, dev)))
+ return iommu_attach_device(domain, dev);
+
+ return 0;
+}
+
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
@@ -405,8 +421,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
iova_len = roundup_pow_of_two(iova_len);
- if (dev->bus_dma_mask)
- dma_limit &= dev->bus_dma_mask;
+ dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
@@ -462,7 +477,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot)
+ size_t size, int prot, dma_addr_t dma_mask)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -470,13 +485,16 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return DMA_MAPPING_ERROR;
+
size = iova_align(iovad, size + iova_off);
- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
- if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
return DMA_MAPPING_ERROR;
}
@@ -579,6 +597,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
*dma_handle = DMA_MAPPING_ERROR;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return NULL;
+
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
min_size = PAGE_SIZE;
@@ -611,7 +632,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
+ if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size)
goto out_free_sg;
@@ -659,7 +680,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_cpu(dev, phys, size, dir);
+ arch_sync_dma_for_cpu(phys, size, dir);
}
static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -671,7 +692,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
}
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
@@ -685,7 +706,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -699,7 +720,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -711,10 +732,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;
- dma_handle =__iommu_dma_map(dev, phys, size, prot);
+ dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return dma_handle;
}
@@ -821,6 +842,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long mask = dma_get_seg_boundary(dev);
int i;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return 0;
+
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
@@ -871,7 +895,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
+ if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
@@ -911,7 +935,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+ dma_get_mask(dev));
}
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -1017,7 +1042,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!cpu_addr)
return NULL;
- *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+ *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
+ dev->coherent_dma_mask);
if (*handle == DMA_MAPPING_ERROR) {
__iommu_dma_free(dev, size, cpu_addr);
return NULL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index eecd6a421667..3acfa6a25fa2 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -895,8 +895,11 @@ int __init detect_intel_iommu(void)
}
#ifdef CONFIG_X86
- if (!ret)
+ if (!ret) {
x86_init.iommu.iommu_init = intel_iommu_init;
+ x86_platform.iommu_shutdown = intel_iommu_shutdown;
+ }
+
#endif
if (dmar_tbl) {
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 9c94e16fb127..186ff5cc975c 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1073,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*/
static int exynos_iommu_map(struct iommu_domain *iommu_domain,
unsigned long l_iova, phys_addr_t paddr, size_t size,
- int prot)
+ int prot, gfp_t gfp)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_pte_t *entry;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6db6d969e31c..0c8d81f56a30 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2420,14 +2420,24 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-/*
- * find_domain
- * Note: we use struct device->archdata.iommu stores the info
- */
static struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
+ if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO ||
+ dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
+ return NULL;
+
+ /* No lock here, assumes no domain exit in normal case */
+ info = dev->archdata.iommu;
+ if (likely(info))
+ return info->domain;
+
+ return NULL;
+}
+
+static struct dmar_domain *deferred_attach_domain(struct device *dev)
+{
if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
struct iommu_domain *domain;
@@ -2437,12 +2447,7 @@ static struct dmar_domain *find_domain(struct device *dev)
intel_iommu_attach_device(domain, dev);
}
- /* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
-
- if (likely(info))
- return info->domain;
- return NULL;
+ return find_domain(dev);
}
static inline struct device_domain_info *
@@ -3512,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3732,7 +3737,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (!domain)
return 0;
@@ -3827,7 +3832,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
int prot = 0;
int ret;
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (WARN_ON(dir == DMA_NONE || !domain))
return DMA_MAPPING_ERROR;
@@ -4314,13 +4319,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
+ int ret;
+
+ rmrr = (struct acpi_dmar_reserved_memory *)header;
+ ret = arch_rmrr_sanity_check(rmrr);
+ if (ret)
+ return ret;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
goto out;
rmrru->hdr = header;
- rmrr = (struct acpi_dmar_reserved_memory *)header;
+
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
@@ -4759,6 +4770,26 @@ static void intel_disable_iommus(void)
iommu_disable_translation(iommu);
}
+void intel_iommu_shutdown(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+
+ if (no_iommu || dmar_disabled)
+ return;
+
+ down_write(&dmar_global_lock);
+
+ /* Disable PMRs explicitly here. */
+ for_each_iommu(iommu, drhd)
+ iommu_disable_protect_mem_regions(iommu);
+
+ /* Make sure the IOMMUs are switched off */
+ intel_disable_iommus();
+
+ up_write(&dmar_global_lock);
+}
+
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
@@ -5440,7 +5471,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
+ size_t size, int iommu_prot, gfp_t gfp)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 4cb394937700..7c3bd2c3cdca 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -846,27 +846,28 @@ struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
-static struct io_pgtable_cfg *cfg_cookie;
+static struct io_pgtable_cfg *cfg_cookie __initdata;
-static void dummy_tlb_flush_all(void *cookie)
+static void __init dummy_tlb_flush_all(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
- void *cookie)
+static void __init dummy_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule, void *cookie)
+static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
dummy_tlb_flush(iova, granule, granule, cookie);
}
-static const struct iommu_flush_ops dummy_tlb_ops = {
+static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index ca51036aa53c..bdf47f745268 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -32,39 +32,31 @@
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
/*
- * For consistency with the architecture, we always consider
- * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
- */
-#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
-
-/*
* Calculate the right shift amount to get to the portion describing level l
* in a virtual address mapped by the pagetable in d.
*/
#define ARM_LPAE_LVL_SHIFT(l,d) \
- ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
- * (d)->bits_per_level) + (d)->pg_shift)
+ (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
+ ilog2(sizeof(arm_lpae_iopte)))
-#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
-
-#define ARM_LPAE_PAGES_PER_PGD(d) \
- DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
+#define ARM_LPAE_GRANULE(d) \
+ (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
+#define ARM_LPAE_PGD_SIZE(d) \
+ (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
/*
* Calculate the index at level l used to map virtual address a using the
* pagetable in d.
*/
#define ARM_LPAE_PGD_IDX(l,d) \
- ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
+ ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
#define ARM_LPAE_LVL_IDX(a,l,d) \
(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
/* Calculate the block/page mapping size at level l for pagetable in d. */
-#define ARM_LPAE_BLOCK_SIZE(l,d) \
- (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
- ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
+#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
/* Page table bits */
#define ARM_LPAE_PTE_TYPE_SHIFT 0
@@ -180,10 +172,9 @@
struct arm_lpae_io_pgtable {
struct io_pgtable iop;
- int levels;
- size_t pgd_size;
- unsigned long pg_shift;
- unsigned long bits_per_level;
+ int pgd_bits;
+ int start_level;
+ int bits_per_level;
void *pgd;
};
@@ -213,7 +204,7 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
{
u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
- if (data->pg_shift < 16)
+ if (ARM_LPAE_GRANULE(data) < SZ_64K)
return paddr;
/* Rotate the packed high-order bits back to the top */
@@ -392,7 +383,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
/* If we can install a leaf entry at this level, then do so */
- if (size == block_size && (size & cfg->pgsize_bitmap))
+ if (size == block_size)
return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
/* We can't allocate tables at the final level */
@@ -464,7 +455,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
- else if (prot & IOMMU_QCOM_SYS_CACHE)
+ else if (prot & IOMMU_SYS_CACHE_ONLY)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
@@ -479,16 +470,19 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int iommu_prot)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
- int ret, lvl = ARM_LPAE_START_LVL(data);
+ int ret, lvl = data->start_level;
arm_lpae_iopte prot;
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
- if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
- paddr >= (1ULL << data->iop.cfg.oas)))
+ if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ return -EINVAL;
+
+ if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
@@ -508,8 +502,8 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *start, *end;
unsigned long table_size;
- if (lvl == ARM_LPAE_START_LVL(data))
- table_size = data->pgd_size;
+ if (lvl == data->start_level)
+ table_size = ARM_LPAE_PGD_SIZE(data);
else
table_size = ARM_LPAE_GRANULE(data);
@@ -537,7 +531,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
{
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
- __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
kfree(data);
}
@@ -652,13 +646,16 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
- if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
+ if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ return 0;
+
+ if (WARN_ON(iova >> data->iop.cfg.ias))
return 0;
- return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
+ return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@@ -666,7 +663,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte pte, *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
+ int lvl = data->start_level;
do {
/* Valid IOPTE pointer? */
@@ -743,8 +740,8 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
- unsigned long va_bits, pgd_bits;
struct arm_lpae_io_pgtable *data;
+ int levels, va_bits, pg_shift;
arm_lpae_restrict_pgsizes(cfg);
@@ -766,15 +763,15 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
if (!data)
return NULL;
- data->pg_shift = __ffs(cfg->pgsize_bitmap);
- data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
- va_bits = cfg->ias - data->pg_shift;
- data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+ va_bits = cfg->ias - pg_shift;
+ levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+ data->start_level = ARM_LPAE_MAX_LEVELS - levels;
/* Calculate the actual size of our pgd (without concatenation) */
- pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
- data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+ data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
data->iop.ops = (struct io_pgtable_ops) {
.map = arm_lpae_map,
@@ -864,11 +861,11 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_MAIR_ATTR_INC_OWBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
- cfg->arm_lpae_s1_cfg.mair[0] = reg;
- cfg->arm_lpae_s1_cfg.mair[1] = 0;
+ cfg->arm_lpae_s1_cfg.mair = reg;
/* Looking good; allocate a pgd */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+ GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
@@ -903,13 +900,13 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
* Concatenate PGDs at level 1 if possible in order to reduce
* the depth of the stage-2 walk.
*/
- if (data->levels == ARM_LPAE_MAX_LEVELS) {
+ if (data->start_level == 0) {
unsigned long pgd_pages;
- pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
+ pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
- data->pgd_size = pgd_pages << data->pg_shift;
- data->levels--;
+ data->pgd_bits += data->bits_per_level;
+ data->start_level++;
}
}
@@ -919,7 +916,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
- sl = ARM_LPAE_START_LVL(data);
+ sl = data->start_level;
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
@@ -965,7 +962,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+ GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
@@ -1034,9 +1032,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
/* Mali seems to need a full 4-level table regardless of IAS */
- if (data->levels < ARM_LPAE_MAX_LEVELS) {
- data->levels = ARM_LPAE_MAX_LEVELS;
- data->pgd_size = sizeof(arm_lpae_iopte);
+ if (data->start_level > 0) {
+ data->start_level = 0;
+ data->pgd_bits = 0;
}
/*
* MEMATTR: Mali has no actual notion of a non-cacheable type, so the
@@ -1053,7 +1051,8 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_MALI_LPAE_MEMATTR_IMP_DEF
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
+ cfg);
if (!data->pgd)
goto out_free_data;
@@ -1097,22 +1096,23 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
-static struct io_pgtable_cfg *cfg_cookie;
+static struct io_pgtable_cfg *cfg_cookie __initdata;
-static void dummy_tlb_flush_all(void *cookie)
+static void __init dummy_tlb_flush_all(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
- void *cookie)
+static void __init dummy_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule, void *cookie)
+static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
dummy_tlb_flush(iova, granule, granule, cookie);
}
@@ -1131,9 +1131,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
cfg->pgsize_bitmap, cfg->ias);
- pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
- data->levels, data->pgd_size, data->pg_shift,
- data->bits_per_level, data->pgd);
+ pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
+ ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
+ ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
}
#define __FAIL(ops, i) ({ \
@@ -1145,7 +1145,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
{
- static const enum io_pgtable_fmt fmts[] = {
+ static const enum io_pgtable_fmt fmts[] __initconst = {
ARM_64_LPAE_S1,
ARM_64_LPAE_S2,
};
@@ -1244,13 +1244,13 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
static int __init arm_lpae_do_selftests(void)
{
- static const unsigned long pgsize[] = {
+ static const unsigned long pgsize[] __initconst = {
SZ_4K | SZ_2M | SZ_1G,
SZ_16K | SZ_32M,
SZ_64K | SZ_512M,
};
- static const unsigned int ias[] = {
+ static const unsigned int ias[] __initconst = {
32, 36, 40, 42, 44, 48,
};
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
new file mode 100644
index 000000000000..0f8dd377aada
--- /dev/null
+++ b/drivers/iommu/ioasid.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I/O Address Space ID allocator. There is one global IOASID space, split into
+ * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
+ * free IOASIDs with ioasid_alloc and ioasid_free.
+ */
+#include <linux/ioasid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+
+struct ioasid_data {
+ ioasid_t id;
+ struct ioasid_set *set;
+ void *private;
+ struct rcu_head rcu;
+};
+
+/*
+ * struct ioasid_allocator_data - Internal data structure to hold information
+ * about an allocator. There are two types of allocators:
+ *
+ * - Default allocator always has its own XArray to track the IOASIDs allocated.
+ * - Custom allocators may share allocation helpers with different private data.
+ * Custom allocators that share the same helper functions also share the same
+ * XArray.
+ * Rules:
+ * 1. Default allocator is always available, not dynamically registered. This is
+ * to prevent race conditions with early boot code that want to register
+ * custom allocators or allocate IOASIDs.
+ * 2. Custom allocators take precedence over the default allocator.
+ * 3. When all custom allocators sharing the same helper functions are
+ * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
+ * freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
+ * 4. When switching between custom allocators sharing the same helper
+ * functions, outstanding IOASIDs are preserved.
+ * 5. When switching between custom allocator and default allocator, all IOASIDs
+ * must be freed to ensure unadulterated space for the new allocator.
+ *
+ * @ops: allocator helper functions and its data
+ * @list: registered custom allocators
+ * @slist: allocators share the same ops but different data
+ * @flags: attributes of the allocator
+ * @xa: xarray holds the IOASID space
+ * @rcu: used for kfree_rcu when unregistering allocator
+ */
+struct ioasid_allocator_data {
+ struct ioasid_allocator_ops *ops;
+ struct list_head list;
+ struct list_head slist;
+#define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
+ unsigned long flags;
+ struct xarray xa;
+ struct rcu_head rcu;
+};
+
+static DEFINE_SPINLOCK(ioasid_allocator_lock);
+static LIST_HEAD(allocators_list);
+
+static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
+static void default_free(ioasid_t ioasid, void *opaque);
+
+static struct ioasid_allocator_ops default_ops = {
+ .alloc = default_alloc,
+ .free = default_free,
+};
+
+static struct ioasid_allocator_data default_allocator = {
+ .ops = &default_ops,
+ .flags = 0,
+ .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
+};
+
+static struct ioasid_allocator_data *active_allocator = &default_allocator;
+
+static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
+{
+ ioasid_t id;
+
+ if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
+ pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
+ return INVALID_IOASID;
+ }
+
+ return id;
+}
+
+static void default_free(ioasid_t ioasid, void *opaque)
+{
+ struct ioasid_data *ioasid_data;
+
+ ioasid_data = xa_erase(&default_allocator.xa, ioasid);
+ kfree_rcu(ioasid_data, rcu);
+}
+
+/* Allocate and initialize a new custom allocator with its helper functions */
+static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *ia_data;
+
+ ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
+ if (!ia_data)
+ return NULL;
+
+ xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
+ INIT_LIST_HEAD(&ia_data->slist);
+ ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
+ ia_data->ops = ops;
+
+ /* For tracking custom allocators that share the same ops */
+ list_add_tail(&ops->list, &ia_data->slist);
+
+ return ia_data;
+}
+
+static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
+{
+ return (a->free == b->free) && (a->alloc == b->alloc);
+}
+
+/**
+ * ioasid_register_allocator - register a custom allocator
+ * @ops: the custom allocator ops to be registered
+ *
+ * Custom allocators take precedence over the default xarray based allocator.
+ * Private data associated with the IOASID allocated by the custom allocators
+ * are managed by IOASID framework similar to data stored in xa by default
+ * allocator.
+ *
+ * There can be multiple allocators registered but only one is active. In case
+ * of runtime removal of a custom allocator, the next one is activated based
+ * on the registration ordering.
+ *
+ * Multiple allocators can share the same alloc() function, in this case the
+ * IOASID space is shared.
+ */
+int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *ia_data;
+ struct ioasid_allocator_data *pallocator;
+ int ret = 0;
+
+ spin_lock(&ioasid_allocator_lock);
+
+ ia_data = ioasid_alloc_allocator(ops);
+ if (!ia_data) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /*
+ * No particular preference, we activate the first one and keep
+ * the later registered allocators in a list in case the first one gets
+ * removed due to hotplug.
+ */
+ if (list_empty(&allocators_list)) {
+ WARN_ON(active_allocator != &default_allocator);
+ /* Use this new allocator if default is not active */
+ if (xa_empty(&active_allocator->xa)) {
+ rcu_assign_pointer(active_allocator, ia_data);
+ list_add_tail(&ia_data->list, &allocators_list);
+ goto out_unlock;
+ }
+ pr_warn("Default allocator active with outstanding IOASID\n");
+ ret = -EAGAIN;
+ goto out_free;
+ }
+
+ /* Check if the allocator is already registered */
+ list_for_each_entry(pallocator, &allocators_list, list) {
+ if (pallocator->ops == ops) {
+ pr_err("IOASID allocator already registered\n");
+ ret = -EEXIST;
+ goto out_free;
+ } else if (use_same_ops(pallocator->ops, ops)) {
+ /*
+ * If the new allocator shares the same ops,
+ * then they will share the same IOASID space.
+ * We should put them under the same xarray.
+ */
+ list_add_tail(&ops->list, &pallocator->slist);
+ goto out_free;
+ }
+ }
+ list_add_tail(&ia_data->list, &allocators_list);
+
+ spin_unlock(&ioasid_allocator_lock);
+ return 0;
+out_free:
+ kfree(ia_data);
+out_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioasid_register_allocator);
+
+/**
+ * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
+ * @ops: the custom allocator to be removed
+ *
+ * Remove an allocator from the list, activate the next allocator in
+ * the order it was registered. Or revert to default allocator if all
+ * custom allocators are unregistered without outstanding IOASIDs.
+ */
+void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *pallocator;
+ struct ioasid_allocator_ops *sops;
+
+ spin_lock(&ioasid_allocator_lock);
+ if (list_empty(&allocators_list)) {
+ pr_warn("No custom IOASID allocators active!\n");
+ goto exit_unlock;
+ }
+
+ list_for_each_entry(pallocator, &allocators_list, list) {
+ if (!use_same_ops(pallocator->ops, ops))
+ continue;
+
+ if (list_is_singular(&pallocator->slist)) {
+ /* No shared helper functions */
+ list_del(&pallocator->list);
+ /*
+ * All IOASIDs should have been freed before
+ * the last allocator that shares the same ops
+ * is unregistered.
+ */
+ WARN_ON(!xa_empty(&pallocator->xa));
+ if (list_empty(&allocators_list)) {
+ pr_info("No custom IOASID allocators, switch to default.\n");
+ rcu_assign_pointer(active_allocator, &default_allocator);
+ } else if (pallocator == active_allocator) {
+ rcu_assign_pointer(active_allocator,
+ list_first_entry(&allocators_list,
+ struct ioasid_allocator_data, list));
+ pr_info("IOASID allocator changed");
+ }
+ kfree_rcu(pallocator, rcu);
+ break;
+ }
+ /*
+ * Find the matching shared ops to delete,
+ * but keep outstanding IOASIDs
+ */
+ list_for_each_entry(sops, &pallocator->slist, list) {
+ if (sops == ops) {
+ list_del(&ops->list);
+ break;
+ }
+ }
+ break;
+ }
+
+exit_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+}
+EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
+
+/**
+ * ioasid_set_data - Set private data for an allocated ioasid
+ * @ioasid: the ID to set data
+ * @data: the private data
+ *
+ * For IOASID that is already allocated, private data can be set
+ * via this API. Future lookup can be done via ioasid_find.
+ */
+int ioasid_set_data(ioasid_t ioasid, void *data)
+{
+ struct ioasid_data *ioasid_data;
+ int ret = 0;
+
+ spin_lock(&ioasid_allocator_lock);
+ ioasid_data = xa_load(&active_allocator->xa, ioasid);
+ if (ioasid_data)
+ rcu_assign_pointer(ioasid_data->private, data);
+ else
+ ret = -ENOENT;
+ spin_unlock(&ioasid_allocator_lock);
+
+ /*
+ * Wait for readers to stop accessing the old private data, so the
+ * caller can free it.
+ */
+ if (!ret)
+ synchronize_rcu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioasid_set_data);
+
+/**
+ * ioasid_alloc - Allocate an IOASID
+ * @set: the IOASID set
+ * @min: the minimum ID (inclusive)
+ * @max: the maximum ID (inclusive)
+ * @private: data private to the caller
+ *
+ * Allocate an ID between @min and @max. The @private pointer is stored
+ * internally and can be retrieved with ioasid_find().
+ *
+ * Return: the allocated ID on success, or %INVALID_IOASID on failure.
+ */
+ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
+ void *private)
+{
+ struct ioasid_data *data;
+ void *adata;
+ ioasid_t id;
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
+ if (!data)
+ return INVALID_IOASID;
+
+ data->set = set;
+ data->private = private;
+
+ /*
+ * Custom allocator needs allocator data to perform platform specific
+ * operations.
+ */
+ spin_lock(&ioasid_allocator_lock);
+ adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
+ id = active_allocator->ops->alloc(min, max, adata);
+ if (id == INVALID_IOASID) {
+ pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
+ goto exit_free;
+ }
+
+ if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
+ xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
+ /* Custom allocator needs framework to store and track allocation results */
+ pr_err("Failed to alloc ioasid from %d\n", id);
+ active_allocator->ops->free(id, active_allocator->ops->pdata);
+ goto exit_free;
+ }
+ data->id = id;
+
+ spin_unlock(&ioasid_allocator_lock);
+ return id;
+exit_free:
+ spin_unlock(&ioasid_allocator_lock);
+ kfree(data);
+ return INVALID_IOASID;
+}
+EXPORT_SYMBOL_GPL(ioasid_alloc);
+
+/**
+ * ioasid_free - Free an IOASID
+ * @ioasid: the ID to remove
+ */
+void ioasid_free(ioasid_t ioasid)
+{
+ struct ioasid_data *ioasid_data;
+
+ spin_lock(&ioasid_allocator_lock);
+ ioasid_data = xa_load(&active_allocator->xa, ioasid);
+ if (!ioasid_data) {
+ pr_err("Trying to free unknown IOASID %u\n", ioasid);
+ goto exit_unlock;
+ }
+
+ active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
+ /* Custom allocator needs additional steps to free the xa element */
+ if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
+ ioasid_data = xa_erase(&active_allocator->xa, ioasid);
+ kfree_rcu(ioasid_data, rcu);
+ }
+
+exit_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+}
+EXPORT_SYMBOL_GPL(ioasid_free);
+
+/**
+ * ioasid_find - Find IOASID data
+ * @set: the IOASID set
+ * @ioasid: the IOASID to find
+ * @getter: function to call on the found object
+ *
+ * The optional getter function allows to take a reference to the found object
+ * under the rcu lock. The function can also check if the object is still valid:
+ * if @getter returns false, then the object is invalid and NULL is returned.
+ *
+ * If the IOASID exists, return the private pointer passed to ioasid_alloc.
+ * Private data can be NULL if not set. Return an error if the IOASID is not
+ * found, or if @set is not NULL and the IOASID does not belong to the set.
+ */
+void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *))
+{
+ void *priv;
+ struct ioasid_data *ioasid_data;
+ struct ioasid_allocator_data *idata;
+
+ rcu_read_lock();
+ idata = rcu_dereference(active_allocator);
+ ioasid_data = xa_load(&idata->xa, ioasid);
+ if (!ioasid_data) {
+ priv = ERR_PTR(-ENOENT);
+ goto unlock;
+ }
+ if (set && ioasid_data->set != set) {
+ /* data found but does not belong to the set */
+ priv = ERR_PTR(-EACCES);
+ goto unlock;
+ }
+ /* Now IOASID and its set is verified, we can return the private data */
+ priv = rcu_dereference(ioasid_data->private);
+ if (getter && !getter(priv))
+ priv = NULL;
+unlock:
+ rcu_read_unlock();
+
+ return priv;
+}
+EXPORT_SYMBOL_GPL(ioasid_find);
+
+MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d658c7c6a2ab..db7bfd4f2d20 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1665,6 +1665,36 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
+int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ if (unlikely(!domain->ops->cache_invalidate))
+ return -ENODEV;
+
+ return domain->ops->cache_invalidate(domain, dev, inv_info);
+}
+EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
+
+int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data)
+{
+ if (unlikely(!domain->ops->sva_bind_gpasid))
+ return -ENODEV;
+
+ return domain->ops->sva_bind_gpasid(domain, dev, data);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
+
+int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid)
+{
+ if (unlikely(!domain->ops->sva_unbind_gpasid))
+ return -ENODEV;
+
+ return domain->ops->sva_unbind_gpasid(dev, pasid);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
+
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -1854,8 +1884,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@@ -1892,8 +1922,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
+ ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
- ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
@@ -1913,8 +1943,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ might_sleep();
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map);
+int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_atomic);
+
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -1991,8 +2035,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
+size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
size_t len = 0, mapped = 0;
phys_addr_t start;
@@ -2003,7 +2048,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
phys_addr_t s_phys = sg_phys(sg);
if (len && s_phys != start + len) {
- ret = iommu_map(domain, iova + mapped, start, len, prot);
+ ret = __iommu_map(domain, iova + mapped, start,
+ len, prot, gfp);
+
if (ret)
goto out_err;
@@ -2031,8 +2078,22 @@ out_err:
return 0;
}
+
+size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ might_sleep();
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map_sg);
+size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
+
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
{
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 2639fc718117..d02edd2751f3 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -50,6 +50,9 @@ struct ipmmu_features {
bool twobit_imttbcr_sl0;
bool reserved_context;
bool cache_snoop;
+ unsigned int ctx_offset_base;
+ unsigned int ctx_offset_stride;
+ unsigned int utlb_offset_base;
};
struct ipmmu_vmsa_device {
@@ -99,125 +102,49 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IM_NS_ALIAS_OFFSET 0x800
-#define IM_CTX_SIZE 0x40
-
-#define IMCTR 0x0000
-#define IMCTR_TRE (1 << 17)
-#define IMCTR_AFE (1 << 16)
-#define IMCTR_RTSEL_MASK (3 << 4)
-#define IMCTR_RTSEL_SHIFT 4
-#define IMCTR_TREN (1 << 3)
-#define IMCTR_INTEN (1 << 2)
-#define IMCTR_FLUSH (1 << 1)
-#define IMCTR_MMUEN (1 << 0)
-
-#define IMCAAR 0x0004
-
-#define IMTTBCR 0x0008
-#define IMTTBCR_EAE (1 << 31)
-#define IMTTBCR_PMB (1 << 30)
-#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_MASK (3 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_NC (0 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WB_WA (1 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WT (2 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WB (3 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_MASK (3 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_NC (0 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WB_WA (1 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WT (2 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WB (3 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_MASK (3 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_TSZ1_MASK (7 << 16)
-#define IMTTBCR_TSZ1_SHIFT 16
-#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) /* R-Car Gen2 only */
+/* MMU "context" registers */
+#define IMCTR 0x0000 /* R-Car Gen2/3 */
+#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
+#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
+#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
+
+#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
+#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_SH0_MASK (3 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_NC (0 << 10) /* R-Car Gen2 only */
#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_WT (2 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_WB (3 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_MASK (3 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_NC (0 << 8) /* R-Car Gen2 only */
#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_WT (2 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_WB (3 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_MASK (3 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */
-#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */
#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
-#define IMTTBCR_SL0_LVL_2 (0 << 4)
-#define IMTTBCR_SL0_LVL_1 (1 << 4)
-#define IMTTBCR_TSZ0_MASK (7 << 0)
-#define IMTTBCR_TSZ0_SHIFT O
-
-#define IMBUSCR 0x000c
-#define IMBUSCR_DVM (1 << 2)
-#define IMBUSCR_BUSSEL_SYS (0 << 0)
-#define IMBUSCR_BUSSEL_CCI (1 << 0)
-#define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
-#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
-#define IMBUSCR_BUSSEL_MASK (3 << 0)
-
-#define IMTTLBR0 0x0010
-#define IMTTUBR0 0x0014
-#define IMTTLBR1 0x0018
-#define IMTTUBR1 0x001c
-
-#define IMSTR 0x0020
-#define IMSTR_ERRLVL_MASK (3 << 12)
-#define IMSTR_ERRLVL_SHIFT 12
-#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
-#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
-#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
-#define IMSTR_ERRCODE_MASK (7 << 8)
-#define IMSTR_MHIT (1 << 4)
-#define IMSTR_ABORT (1 << 2)
-#define IMSTR_PF (1 << 1)
-#define IMSTR_TF (1 << 0)
-
-#define IMMAIR0 0x0028
-#define IMMAIR1 0x002c
-#define IMMAIR_ATTR_MASK 0xff
-#define IMMAIR_ATTR_DEVICE 0x04
-#define IMMAIR_ATTR_NC 0x44
-#define IMMAIR_ATTR_WBRWA 0xff
-#define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
-#define IMMAIR_ATTR_IDX_NC 0
-#define IMMAIR_ATTR_IDX_WBRWA 1
-#define IMMAIR_ATTR_IDX_DEV 2
-
-#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */
-#define IMEUAR 0x0034 /* R-Car Gen3 only */
-
-#define IMPCTR 0x0200
-#define IMPSTR 0x0208
-#define IMPEAR 0x020c
-#define IMPMBA(n) (0x0280 + ((n) * 4))
-#define IMPMBD(n) (0x02c0 + ((n) * 4))
+#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
+
+#define IMBUSCR 0x000c /* R-Car Gen2 only */
+#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
+#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
+
+#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
+#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
+
+#define IMSTR 0x0020 /* R-Car Gen2/3 */
+#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
+#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
+#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
+#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
+#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
+
+#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
+#define IMEUAR 0x0034 /* R-Car Gen3 only */
+
+/* uTLB registers */
#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
-#define IMUCTR0(n) (0x0300 + ((n) * 16))
-#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16))
-#define IMUCTR_FIXADDEN (1 << 31)
-#define IMUCTR_FIXADD_MASK (0xff << 16)
-#define IMUCTR_FIXADD_SHIFT 16
-#define IMUCTR_TTSEL_MMU(n) ((n) << 4)
-#define IMUCTR_TTSEL_PMB (8 << 4)
-#define IMUCTR_TTSEL_MASK (15 << 4)
-#define IMUCTR_FLUSH (1 << 1)
-#define IMUCTR_MMUEN (1 << 0)
+#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
+#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
+#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
+#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
+#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
-#define IMUASID0(n) (0x0308 + ((n) * 16))
-#define IMUASID32(n) (0x0608 + (((n) - 32) * 16))
-#define IMUASID_ASID8_MASK (0xff << 8)
-#define IMUASID_ASID8_SHIFT 8
-#define IMUASID_ASID0_MASK (0xff << 0)
-#define IMUASID_ASID0_SHIFT 0
+#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
+#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
/* -----------------------------------------------------------------------------
* Root device handling
@@ -264,29 +191,61 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
iowrite32(data, mmu->base + offset);
}
+static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg)
+{
+ return mmu->features->ctx_offset_base +
+ context_id * mmu->features->ctx_offset_stride + reg;
+}
+
+static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg)
+{
+ return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
+}
+
+static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
+}
+
static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg)
{
- return ipmmu_read(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg);
+ return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
}
static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
- ipmmu_write(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
}
static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
if (domain->mmu != domain->mmu->root)
- ipmmu_write(domain->mmu,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
- ipmmu_write(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
+}
+
+static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
+{
+ return mmu->features->utlb_offset_base + reg;
+}
+
+static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int utlb, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
+}
+
+static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int utlb, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
}
/* -----------------------------------------------------------------------------
@@ -334,11 +293,10 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
*/
/* TODO: What should we set the ASID to ? */
- ipmmu_write(mmu, IMUASID(utlb), 0);
+ ipmmu_imuasid_write(mmu, utlb, 0);
/* TODO: Do we need to flush the microTLB ? */
- ipmmu_write(mmu, IMUCTR(utlb),
- IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
- IMUCTR_MMUEN);
+ ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
+ IMUCTR_FLUSH | IMUCTR_MMUEN);
mmu->utlb_ctx[utlb] = domain->context_id;
}
@@ -350,7 +308,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
{
struct ipmmu_vmsa_device *mmu = domain->mmu;
- ipmmu_write(mmu, IMUCTR(utlb), 0);
+ ipmmu_imuctr_write(mmu, utlb, 0);
mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
}
@@ -438,7 +396,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
/* MAIR0 */
ipmmu_ctx_write_root(domain, IMMAIR0,
- domain->cfg.arm_lpae_s1_cfg.mair[0]);
+ domain->cfg.arm_lpae_s1_cfg.mair);
/* IMBUSCR */
if (domain->mmu->features->setup_imbuscr)
@@ -724,7 +682,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
}
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
@@ -783,6 +741,7 @@ static int ipmmu_init_platform_device(struct device *dev,
static const struct soc_device_attribute soc_rcar_gen3[] = {
{ .soc_id = "r8a774a1", },
+ { .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a7795", },
{ .soc_id = "r8a7796", },
@@ -794,6 +753,7 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
};
static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
+ { .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a7795", .revision = "ES3.*" },
{ .soc_id = "r8a77965", },
@@ -985,7 +945,7 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
/* Disable all contexts. */
for (i = 0; i < mmu->num_ctx; ++i)
- ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
+ ipmmu_ctx_write(mmu, i, IMCTR, 0);
}
static const struct ipmmu_features ipmmu_features_default = {
@@ -997,6 +957,9 @@ static const struct ipmmu_features ipmmu_features_default = {
.twobit_imttbcr_sl0 = false,
.reserved_context = false,
.cache_snoop = true,
+ .ctx_offset_base = 0,
+ .ctx_offset_stride = 0x40,
+ .utlb_offset_base = 0,
};
static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
@@ -1008,6 +971,9 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.twobit_imttbcr_sl0 = true,
.reserved_context = true,
.cache_snoop = false,
+ .ctx_offset_base = 0,
+ .ctx_offset_stride = 0x40,
+ .utlb_offset_base = 0,
};
static const struct of_device_id ipmmu_of_ids[] = {
@@ -1018,6 +984,9 @@ static const struct of_device_id ipmmu_of_ids[] = {
.compatible = "renesas,ipmmu-r8a774a1",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a774b1",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
.compatible = "renesas,ipmmu-r8a774c0",
.data = &ipmmu_features_rcar_gen3,
}, {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index be99d408cf35..93f14bca26ee 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -504,7 +504,7 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t len, int prot)
+ phys_addr_t pa, size_t len, int prot, gfp_t gfp)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 67a483c1a935..6fc1f5ecf91e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -101,8 +101,6 @@
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
struct mtk_iommu_domain {
- spinlock_t pgtlock; /* lock for page table */
-
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@@ -173,13 +171,16 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
}
}
-static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf,
- void *cookie)
+static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
struct mtk_iommu_data *data = cookie;
+ unsigned long flags;
+ int ret;
+ u32 tmp;
for_each_m4u(data) {
+ spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL);
@@ -188,23 +189,10 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE,
data->base + REG_MMU_INVALIDATE);
- data->tlb_flush_active = true;
- }
-}
-
-static void mtk_iommu_tlb_sync(void *cookie)
-{
- struct mtk_iommu_data *data = cookie;
- int ret;
- u32 tmp;
-
- for_each_m4u(data) {
- /* Avoid timing out if there's nothing to wait for */
- if (!data->tlb_flush_active)
- return;
+ /* tlb sync */
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
- tmp, tmp != 0, 10, 100000);
+ tmp, tmp != 0, 10, 1000);
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
@@ -212,35 +200,24 @@ static void mtk_iommu_tlb_sync(void *cookie)
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
- data->tlb_flush_active = false;
+ spin_unlock_irqrestore(&data->tlb_lock, flags);
}
}
-static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
-static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
{
- mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
+ struct mtk_iommu_data *data = cookie;
+ struct iommu_domain *domain = &data->m4u_dom->domain;
+
+ iommu_iotlb_gather_add_page(domain, gather, iova, granule);
}
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
- .tlb_flush_walk = mtk_iommu_tlb_flush_walk,
- .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
+ .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
+ .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
};
@@ -316,8 +293,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
{
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- spin_lock_init(&dom->pgtlock);
-
dom->cfg = (struct io_pgtable_cfg) {
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
@@ -412,22 +387,17 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- unsigned long flags;
- int ret;
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
if (data->enable_4GB)
paddr |= BIT_ULL(32);
- spin_lock_irqsave(&dom->pgtlock, flags);
- ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
-
- return ret;
+ /* Synchronize with the tlb_lock */
+ return dom->iop->map(dom->iop, iova, paddr, size, prot);
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
@@ -435,25 +405,26 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- unsigned long flags;
- size_t unmapsz;
-
- spin_lock_irqsave(&dom->pgtlock, flags);
- unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
- return unmapsz;
+ return dom->iop->unmap(dom->iop, iova, size, gather);
}
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+ mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
}
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ size_t length = gather->end - gather->start;
+
+ if (gather->start == ULONG_MAX)
+ return;
+
+ mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
+ data);
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -461,13 +432,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- unsigned long flags;
phys_addr_t pa;
- spin_lock_irqsave(&dom->pgtlock, flags);
pa = dom->iop->iova_to_phys(dom->iop, iova);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
-
if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32);
@@ -733,6 +700,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
+ spin_lock_init(&data->tlb_lock);
list_add_tail(&data->list, &m4ulist);
if (!iommu_present(&platform_bus_type))
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index fc0f16eabacd..ea949a324e33 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -57,7 +57,7 @@ struct mtk_iommu_data {
struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group;
bool enable_4GB;
- bool tlb_flush_active;
+ spinlock_t tlb_lock; /* lock for tlb range flush */
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b5efd6dac953..e93b94ecac45 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 614a93aa5305..026ad2b29dcd 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -8,6 +8,8 @@
#include <linux/export.h>
#include <linux/iommu.h>
#include <linux/limits.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 09c6e1c680db..be551cc34be4 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev;
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index c31e7bc4ccbe..52f38292df5b 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -284,9 +284,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* MAIRs (stage-1 only) */
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
- pgtbl_cfg.arm_lpae_s1_cfg.mair[0]);
+ pgtbl_cfg.arm_lpae_s1_cfg.mair);
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
- pgtbl_cfg.arm_lpae_s1_cfg.mair[1]);
+ pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
/* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
@@ -423,7 +423,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
unsigned long flags;
@@ -539,8 +539,8 @@ static int qcom_iommu_add_device(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (IS_ERR_OR_NULL(group))
- return PTR_ERR_OR_ZERO(group);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
iommu_group_put(group);
iommu_device_link(&qcom_iommu->iommu, dev);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4dcbf68dfda4..b33cdd5aad81 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -527,7 +527,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
int i, err;
err = pm_runtime_get_if_in_use(iommu->dev);
- if (WARN_ON_ONCE(err <= 0))
+ if (!err || WARN_ON_ONCE(err < 0))
return ret;
if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
@@ -758,7 +758,7 @@ unwind:
}
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
@@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
if (!dma_dev)
return NULL;
- rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
return NULL;
if (type == IOMMU_DOMAIN_DMA &&
iommu_get_dma_cookie(&rk_domain->domain))
- return NULL;
+ goto err_free_domain;
/*
* rk32xx iommus use a 2 level pagetable.
@@ -1021,6 +1021,8 @@ err_free_dt:
err_put_cookie:
if (type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+err_free_domain:
+ kfree(rk_domain);
return NULL;
}
@@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (domain->type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+ kfree(rk_domain);
}
static int rk_iommu_add_device(struct device *dev)
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3b0b18e23187..1137f3ddcb85 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -265,7 +265,7 @@ undo_cpu_trans:
}
static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
int flags = ZPCI_PTE_VALID, rc = 0;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 3924f7c05544..3fb7ba72507d 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
}
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct gart_device *gart = gart_handle;
int ret;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 7293fc3f796d..63a147b623e6 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr;
}
-static dma_addr_t smmu_pde_to_dma(u32 pde)
+static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{
- return pde << 12;
+ return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
}
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@@ -240,7 +240,7 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
static inline void smmu_flush(struct tegra_smmu *smmu)
{
- smmu_readl(smmu, SMMU_CONFIG);
+ smmu_readl(smmu, SMMU_PTB_ASID);
}
static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
@@ -351,6 +351,20 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
unsigned int i;
u32 value;
+ group = tegra_smmu_find_swgroup(smmu, swgroup);
+ if (group) {
+ value = smmu_readl(smmu, group->reg);
+ value &= ~SMMU_ASID_MASK;
+ value |= SMMU_ASID_VALUE(asid);
+ value |= SMMU_ASID_ENABLE;
+ smmu_writel(smmu, value, group->reg);
+ } else {
+ pr_warn("%s group from swgroup %u not found\n", __func__,
+ swgroup);
+ /* No point moving ahead if group was not found */
+ return;
+ }
+
for (i = 0; i < smmu->soc->num_clients; i++) {
const struct tegra_mc_client *client = &smmu->soc->clients[i];
@@ -361,15 +375,6 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
value |= BIT(client->smmu.bit);
smmu_writel(smmu, value, client->smmu.reg);
}
-
- group = tegra_smmu_find_swgroup(smmu, swgroup);
- if (group) {
- value = smmu_readl(smmu, group->reg);
- value &= ~SMMU_ASID_MASK;
- value |= SMMU_ASID_VALUE(asid);
- value |= SMMU_ASID_ENABLE;
- smmu_writel(smmu, value, group->reg);
- }
}
static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
@@ -549,6 +554,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap)
{
unsigned int pd_index = iova_pd_index(iova);
+ struct tegra_smmu *smmu = as->smmu;
struct page *pt_page;
u32 *pd;
@@ -557,7 +563,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
return NULL;
pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
return tegra_smmu_pte_offset(pt_page, iova);
}
@@ -599,7 +605,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
} else {
u32 *pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -624,7 +630,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
tegra_smmu_set_pde(as, iova, 0);
@@ -650,7 +656,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
}
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 3ea9d7682999..315c7cc4f99d 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -153,7 +153,6 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
*/
static int __viommu_sync_req(struct viommu_dev *viommu)
{
- int ret = 0;
unsigned int len;
size_t write_len;
struct viommu_request *req;
@@ -182,7 +181,7 @@ static int __viommu_sync_req(struct viommu_dev *viommu)
kfree(req);
}
- return ret;
+ return 0;
}
static int viommu_sync_req(struct viommu_dev *viommu)
@@ -713,7 +712,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
u32 flags;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index ccbb8973a324..697e6a8ccaae 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -370,6 +370,10 @@ config MVEBU_PIC
config MVEBU_SEI
bool
+config LS_EXTIRQ
+ def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+ select MFD_SYSCON
+
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI
@@ -483,8 +487,6 @@ config TI_SCI_INTA_IRQCHIP
If you wish to use interrupt aggregator irq resources managed by the
TI System Controller, say Y here. Otherwise, say N.
-endmenu
-
config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
@@ -496,3 +498,5 @@ config SIFIVE_PLIC
interrupt sources are subordinate to the PLIC.
If you don't know what to do here, say Y.
+
+endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index cc7c43932f16..e806dda690ea 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
+obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index fc75c61233aa..cbf01afcd2a6 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/syscore_ops.h>
#define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
@@ -39,6 +40,11 @@ struct bcm7038_l1_chip {
unsigned int n_words;
struct irq_domain *domain;
struct bcm7038_l1_cpu *cpus[NR_CPUS];
+#ifdef CONFIG_PM_SLEEP
+ struct list_head list;
+ u32 wake_mask[MAX_WORDS];
+#endif
+ u32 irq_fwd_mask[MAX_WORDS];
u8 affinity[MAX_WORDS * IRQS_PER_WORD];
};
@@ -249,6 +255,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
resource_size_t sz;
struct bcm7038_l1_cpu *cpu;
unsigned int i, n_words, parent_irq;
+ int ret;
if (of_address_to_resource(dn, idx, &res))
return -EINVAL;
@@ -262,6 +269,14 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
else if (intc->n_words != n_words)
return -EINVAL;
+ ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask",
+ intc->irq_fwd_mask, n_words);
+ if (ret != 0 && ret != -EINVAL) {
+ /* property exists but has the wrong number of words */
+ pr_err("invalid brcm,int-fwd-mask property\n");
+ return -EINVAL;
+ }
+
cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
GFP_KERNEL);
if (!cpu)
@@ -272,8 +287,11 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
return -ENOMEM;
for (i = 0; i < n_words; i++) {
- l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i));
- cpu->mask_cache[i] = 0xffffffff;
+ l1_writel(~intc->irq_fwd_mask[i],
+ cpu->map_base + reg_mask_set(intc, i));
+ l1_writel(intc->irq_fwd_mask[i],
+ cpu->map_base + reg_mask_clr(intc, i));
+ cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
}
parent_irq = irq_of_parse_and_map(dn, idx);
@@ -281,12 +299,89 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL;
}
+
+ if (of_property_read_bool(dn, "brcm,irq-can-wake"))
+ enable_irq_wake(parent_irq);
+
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
intc);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+/*
+ * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
+ * used because the struct chip_type suspend/resume hooks are not called
+ * unless chip_type is hooked onto a generic_chip. Since this driver does
+ * not use generic_chip, we need to manually hook our resume/suspend to
+ * syscore_ops.
+ */
+static LIST_HEAD(bcm7038_l1_intcs_list);
+static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
+
+static int bcm7038_l1_suspend(void)
+{
+ struct bcm7038_l1_chip *intc;
+ int boot_cpu, word;
+ u32 val;
+
+ /* Wakeup interrupt should only come from the boot cpu */
+ boot_cpu = cpu_logical_map(0);
+
+ list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
+ for (word = 0; word < intc->n_words; word++) {
+ val = intc->wake_mask[word] | intc->irq_fwd_mask[word];
+ l1_writel(~val,
+ intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
+ l1_writel(val,
+ intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
+ }
+ }
+
+ return 0;
+}
+
+static void bcm7038_l1_resume(void)
+{
+ struct bcm7038_l1_chip *intc;
+ int boot_cpu, word;
+
+ boot_cpu = cpu_logical_map(0);
+
+ list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
+ for (word = 0; word < intc->n_words; word++) {
+ l1_writel(intc->cpus[boot_cpu]->mask_cache[word],
+ intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
+ l1_writel(~intc->cpus[boot_cpu]->mask_cache[word],
+ intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
+ }
+ }
+}
+
+static struct syscore_ops bcm7038_l1_syscore_ops = {
+ .suspend = bcm7038_l1_suspend,
+ .resume = bcm7038_l1_resume,
+};
+
+static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ if (on)
+ intc->wake_mask[word] |= mask;
+ else
+ intc->wake_mask[word] &= ~mask;
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ return 0;
+}
+#endif
+
static struct irq_chip bcm7038_l1_irq_chip = {
.name = "bcm7038-l1",
.irq_mask = bcm7038_l1_mask,
@@ -295,11 +390,21 @@ static struct irq_chip bcm7038_l1_irq_chip = {
#ifdef CONFIG_SMP
.irq_cpu_offline = bcm7038_l1_cpu_offline,
#endif
+#ifdef CONFIG_PM_SLEEP
+ .irq_set_wake = bcm7038_l1_set_wake,
+#endif
};
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq)
{
+ struct bcm7038_l1_chip *intc = d->host_data;
+ u32 mask = BIT(hw_irq % IRQS_PER_WORD);
+ u32 word = hw_irq / IRQS_PER_WORD;
+
+ if (intc->irq_fwd_mask[word] & mask)
+ return -EPERM;
+
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
@@ -340,6 +445,16 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
goto out_unmap;
}
+#ifdef CONFIG_PM_SLEEP
+ /* Add bcm7038_l1_chip into a list */
+ raw_spin_lock(&bcm7038_l1_intcs_lock);
+ list_add_tail(&intc->list, &bcm7038_l1_intcs_list);
+ raw_spin_unlock(&bcm7038_l1_intcs_lock);
+
+ if (list_is_singular(&bcm7038_l1_intcs_list))
+ register_syscore_ops(&bcm7038_l1_syscore_ops);
+#endif
+
pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
dn, IRQS_PER_WORD * intc->n_words);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index e88e75c22b6a..fbec07d634ad 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 229d586c3d7a..87711e0f8014 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -5,6 +5,7 @@
*/
#include <linux/acpi_iort.h>
+#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_irq.h>
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 787e8eec9a7f..e05673bcd52b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/cpu.h>
#include <linux/crash_dump.h>
@@ -102,20 +103,21 @@ struct its_node {
struct its_collection *collections;
struct fwnode_handle *fwnode_handle;
u64 (*get_msi_base)(struct its_device *its_dev);
+ u64 typer;
u64 cbaser_save;
u32 ctlr_save;
struct list_head its_device_list;
u64 flags;
unsigned long list_nr;
- u32 ite_size;
- u32 device_ids;
int numa_node;
unsigned int msi_domain_flags;
u32 pre_its_base; /* for Socionext Synquacer */
- bool is_v4;
int vlpi_redist_offset;
};
+#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
+#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
+
#define ITS_ITT_ALIGN SZ_256
/* The maximum number of VPEID bits supported by VLPI commands */
@@ -130,7 +132,7 @@ struct event_lpi_map {
u16 *col_map;
irq_hw_number_t lpi_base;
int nr_lpis;
- struct mutex vlpi_lock;
+ raw_spinlock_t vlpi_lock;
struct its_vm *vm;
struct its_vlpi_map *vlpi_maps;
int nr_vlpis;
@@ -181,7 +183,7 @@ static u16 get_its_list(struct its_vm *vm)
unsigned long its_list = 0;
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
if (vm->vlpi_count[its->list_nr])
@@ -191,6 +193,12 @@ static u16 get_its_list(struct its_vm *vm)
return (u16)its_list;
}
+static inline u32 its_get_event_id(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ return d->hwirq - its_dev->event_map.lpi_base;
+}
+
static struct its_collection *dev_event_to_col(struct its_device *its_dev,
u32 event)
{
@@ -199,6 +207,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
return its->collections + its_dev->event_map.col_map[event];
}
+static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
+ u32 event)
+{
+ if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
+ return NULL;
+
+ return &its_dev->event_map.vlpi_maps[event];
+}
+
+static struct its_collection *irq_to_col(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ return dev_event_to_col(its_dev, its_get_event_id(d));
+}
+
static struct its_collection *valid_col(struct its_collection *col)
{
if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
@@ -305,7 +329,10 @@ struct its_cmd_desc {
* The ITS command block, which is what the ITS actually parses.
*/
struct its_cmd_block {
- u64 raw_cmd[4];
+ union {
+ u64 raw_cmd[4];
+ __le64 raw_cmd_le[4];
+ };
};
#define ITS_CMD_QUEUE_SZ SZ_64K
@@ -414,10 +441,10 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
/* Let's fixup BE commands */
- cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
- cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
- cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
- cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
+ cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
+ cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
+ cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
+ cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
}
static struct its_collection *its_build_mapd_cmd(struct its_node *its,
@@ -676,6 +703,60 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
return valid_vpe(its, desc->its_vmovp_cmd.vpe);
}
+static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
+ desc->its_inv_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INV);
+ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
+static struct its_vpe *its_build_vint_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
+ desc->its_int_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INT);
+ its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_int_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
+static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
+ desc->its_clear_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_CLEAR);
+ its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr)
{
@@ -953,7 +1034,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
static void its_send_vmapti(struct its_device *dev, u32 id)
{
- struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+ struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
struct its_cmd_desc desc;
desc.its_vmapti_cmd.vpe = map->vpe;
@@ -967,7 +1048,7 @@ static void its_send_vmapti(struct its_device *dev, u32 id)
static void its_send_vmovi(struct its_device *dev, u32 id)
{
- struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+ struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
struct its_cmd_desc desc;
desc.its_vmovi_cmd.vpe = map->vpe;
@@ -1021,7 +1102,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
/* Emit VMOVPs */
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
if (!vpe->its_vm->vlpi_count[its->list_nr])
@@ -1042,29 +1123,71 @@ static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
}
+static void its_send_vinv(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VINV command. This is just a normal INV,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_inv_cmd.dev = dev;
+ desc.its_inv_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
+}
+
+static void its_send_vint(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VINT command. This is just a normal INT,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_int_cmd.dev = dev;
+ desc.its_int_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
+}
+
+static void its_send_vclear(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VCLEAR command. This is just a normal CLEAR,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_clear_cmd.dev = dev;
+ desc.its_clear_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
+}
+
/*
* irqchip functions - assumes MSI, mostly.
*/
-
-static inline u32 its_get_event_id(struct irq_data *d)
+static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- return d->hwirq - its_dev->event_map.lpi_base;
+ u32 event = its_get_event_id(d);
+
+ if (!irqd_is_forwarded_to_vcpu(d))
+ return NULL;
+
+ return dev_event_to_vlpi_map(its_dev, event);
}
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
+ struct its_vlpi_map *map = get_vlpi_map(d);
irq_hw_number_t hwirq;
void *va;
u8 *cfg;
- if (irqd_is_forwarded_to_vcpu(d)) {
- struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- u32 event = its_get_event_id(d);
- struct its_vlpi_map *map;
-
- va = page_address(its_dev->event_map.vm->vprop_page);
- map = &its_dev->event_map.vlpi_maps[event];
+ if (map) {
+ va = page_address(map->vm->vprop_page);
hwirq = map->vintid;
/* Remember the updated property */
@@ -1090,23 +1213,50 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
dsb(ishst);
}
+static void wait_for_syncr(void __iomem *rdbase)
+{
+ while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+ cpu_relax();
+}
+
+static void direct_lpi_inv(struct irq_data *d)
+{
+ struct its_collection *col;
+ void __iomem *rdbase;
+
+ /* Target the redistributor this LPI is currently routed to */
+ col = irq_to_col(d);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
+ gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
+
+ wait_for_syncr(rdbase);
+}
+
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
lpi_write_config(d, clr, set);
- its_send_inv(its_dev, its_get_event_id(d));
+ if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
+ direct_lpi_inv(d);
+ else if (!irqd_is_forwarded_to_vcpu(d))
+ its_send_inv(its_dev, its_get_event_id(d));
+ else
+ its_send_vinv(its_dev, its_get_event_id(d));
}
static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
- if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
+ map = dev_event_to_vlpi_map(its_dev, event);
+
+ if (map->db_enabled == enable)
return;
- its_dev->event_map.vlpi_maps[event].db_enabled = enable;
+ map->db_enabled = enable;
/*
* More fun with the architecture:
@@ -1208,10 +1358,17 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
if (which != IRQCHIP_STATE_PENDING)
return -EINVAL;
- if (state)
- its_send_int(its_dev, event);
- else
- its_send_clear(its_dev, event);
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ if (state)
+ its_send_vint(its_dev, event);
+ else
+ its_send_vclear(its_dev, event);
+ } else {
+ if (state)
+ its_send_int(its_dev, event);
+ else
+ its_send_clear(its_dev, event);
+ }
return 0;
}
@@ -1279,13 +1436,13 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
if (!info->map)
return -EINVAL;
- mutex_lock(&its_dev->event_map.vlpi_lock);
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps;
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!maps) {
ret = -ENOMEM;
goto out;
@@ -1328,29 +1485,30 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
}
out:
- mutex_unlock(&its_dev->event_map.vlpi_lock);
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
int ret = 0;
- mutex_lock(&its_dev->event_map.vlpi_lock);
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
+
+ map = get_vlpi_map(d);
- if (!its_dev->event_map.vm ||
- !its_dev->event_map.vlpi_maps[event].vm) {
+ if (!its_dev->event_map.vm || !map) {
ret = -EINVAL;
goto out;
}
/* Copy our mapping information to the incoming request */
- *info->map = its_dev->event_map.vlpi_maps[event];
+ *info->map = *map;
out:
- mutex_unlock(&its_dev->event_map.vlpi_lock);
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
@@ -1360,7 +1518,7 @@ static int its_vlpi_unmap(struct irq_data *d)
u32 event = its_get_event_id(d);
int ret = 0;
- mutex_lock(&its_dev->event_map.vlpi_lock);
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
ret = -EINVAL;
@@ -1390,7 +1548,7 @@ static int its_vlpi_unmap(struct irq_data *d)
}
out:
- mutex_unlock(&its_dev->event_map.vlpi_lock);
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
@@ -1416,7 +1574,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
struct its_cmd_info *info = vcpu_info;
/* Need a v4 ITS */
- if (!its_dev->its->is_v4)
+ if (!is_v4(its_dev->its))
return -EINVAL;
/* Unmap request? */
@@ -1922,9 +2080,9 @@ static bool its_parse_indirect_baser(struct its_node *its,
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
- pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
+ pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
&its->phys_base, its_base_type_string[type],
- its->device_ids, ids);
+ device_ids(its), ids);
}
*order = new_order;
@@ -1970,7 +2128,7 @@ static int its_alloc_tables(struct its_node *its)
case GITS_BASER_TYPE_DEVICE:
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
- its->device_ids);
+ device_ids(its));
break;
case GITS_BASER_TYPE_VCPU:
@@ -2361,7 +2519,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
/* Don't allow device id that exceeds ITS hardware limit */
if (!baser)
- return (ilog2(dev_id) < its->device_ids);
+ return (ilog2(dev_id) < device_ids(its));
return its_alloc_table_entry(its, baser, dev_id);
}
@@ -2380,7 +2538,7 @@ static bool its_alloc_vpe_table(u32 vpe_id)
list_for_each_entry(its, &its_nodes, entry) {
struct its_baser *baser;
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
@@ -2419,7 +2577,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
* sized as a power of two (and you need at least one bit...).
*/
nr_ites = max(2, nvecs);
- sz = nr_ites * its->ite_size;
+ sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
if (alloc_lpis) {
@@ -2450,7 +2608,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev->event_map.col_map = col_map;
dev->event_map.lpi_base = lpi_base;
dev->event_map.nr_lpis = nr_lpis;
- mutex_init(&dev->event_map.vlpi_lock);
+ raw_spin_lock_init(&dev->event_map.vlpi_lock);
dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry);
@@ -2471,6 +2629,7 @@ static void its_free_device(struct its_device *its_dev)
raw_spin_lock_irqsave(&its_dev->its->lock, flags);
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
+ kfree(its_dev->event_map.col_map);
kfree(its_dev->itt);
kfree(its_dev);
}
@@ -2679,7 +2838,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
its_lpi_free(its_dev->event_map.lpi_map,
its_dev->event_map.lpi_base,
its_dev->event_map.nr_lpis);
- kfree(its_dev->event_map.col_map);
/* Unmap device/itt */
its_send_mapd(its_dev, 0);
@@ -2772,8 +2930,7 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
- cpu_relax();
+ wait_for_syncr(rdbase);
return;
}
@@ -2869,7 +3026,7 @@ static void its_vpe_invall(struct its_vpe *vpe)
struct its_node *its;
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
@@ -2927,10 +3084,10 @@ static void its_vpe_send_inv(struct irq_data *d)
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
+ /* Target the redistributor this VPE is currently known on */
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
- gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
- cpu_relax();
+ gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
+ wait_for_syncr(rdbase);
} else {
its_vpe_send_cmd(vpe, its_send_inv);
}
@@ -2972,8 +3129,7 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
} else {
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
- cpu_relax();
+ wait_for_syncr(rdbase);
}
} else {
if (state)
@@ -3138,7 +3294,7 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
vpe->col_idx = cpumask_first(cpu_online_mask);
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
its_send_vmapp(its, vpe, true);
@@ -3164,7 +3320,7 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
return;
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
its_send_vmapp(its, vpe, false);
@@ -3215,8 +3371,9 @@ static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
{
struct its_node *its = data;
- /* erratum 22375: only alloc 8MB table size */
- its->device_ids = 0x14; /* 20 bits, 8MB */
+ /* erratum 22375: only alloc 8MB table size (20 bits) */
+ its->typer &= ~GITS_TYPER_DEVBITS;
+ its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
return true;
@@ -3236,7 +3393,8 @@ static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
struct its_node *its = data;
/* On QDF2400, the size of the ITE is 16Bytes */
- its->ite_size = 16;
+ its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
+ its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
return true;
}
@@ -3270,8 +3428,10 @@ static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
its->get_msi_base = its_irq_get_msi_base_pre_its;
ids = ilog2(pre_its_window[1]) - 2;
- if (its->device_ids > ids)
- its->device_ids = ids;
+ if (device_ids(its) > ids) {
+ its->typer &= ~GITS_TYPER_DEVBITS;
+ its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
+ }
/* the pre-ITS breaks isolation, so disable MSI remapping */
its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
@@ -3504,7 +3664,7 @@ static int its_init_vpe_domain(void)
}
/* Use the last possible DevID */
- devid = GENMASK(its->device_ids - 1, 0);
+ devid = GENMASK(device_ids(its) - 1, 0);
vpe_proxy.dev = its_create_device(its, devid, entries, false);
if (!vpe_proxy.dev) {
kfree(vpe_proxy.vpes);
@@ -3602,12 +3762,10 @@ static int __init its_probe_one(struct resource *res,
INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list);
typer = gic_read_typer(its_base + GITS_TYPER);
+ its->typer = typer;
its->base = its_base;
its->phys_base = res->start;
- its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
- its->device_ids = GITS_TYPER_DEVBITS(typer);
- its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
- if (its->is_v4) {
+ if (is_v4(its)) {
if (!(typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(res, its_base);
if (err < 0)
@@ -3674,7 +3832,7 @@ static int __init its_probe_one(struct resource *res,
gits_write_cwriter(0, its->base + GITS_CWRITER);
ctlr = readl_relaxed(its->base + GITS_CTLR);
ctlr |= GITS_CTLR_ENABLE;
- if (its->is_v4)
+ if (is_v4(its))
ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR);
@@ -3999,7 +4157,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return err;
list_for_each_entry(its, &its_nodes, entry)
- has_v4 |= its->is_v4;
+ has_v4 |= is_v4(its);
if (has_v4 & rdists->has_vlpis) {
if (its_init_vpe_domain() ||
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6bb1f682f78b..d6218012097b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -183,7 +183,7 @@ static void gic_do_wait_for_rwp(void __iomem *base)
}
cpu_relax();
udelay(1);
- };
+ }
}
/* Wait for completion of a distributor change */
@@ -240,7 +240,7 @@ static void gic_enable_redist(bool enable)
break;
cpu_relax();
udelay(1);
- };
+ }
if (!count)
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index f126255b3260..01d18b39069e 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * JZ4740 platform IRQ support
+ * Ingenic XBurst platform IRQ support
*/
#include <linux/errno.h>
@@ -10,7 +10,6 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/ingenic.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/timex.h>
@@ -22,6 +21,7 @@
struct ingenic_intc_data {
void __iomem *base;
+ struct irq_domain *domain;
unsigned num_chips;
};
@@ -35,41 +35,30 @@ struct ingenic_intc_data {
static irqreturn_t intc_cascade(int irq, void *data)
{
struct ingenic_intc_data *intc = irq_get_handler_data(irq);
- uint32_t irq_reg;
+ struct irq_domain *domain = intc->domain;
+ struct irq_chip_generic *gc;
+ uint32_t pending;
unsigned i;
for (i = 0; i < intc->num_chips; i++) {
- irq_reg = readl(intc->base + (i * CHIP_SIZE) +
- JZ_REG_INTC_PENDING);
- if (!irq_reg)
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+
+ pending = irq_reg_readl(gc, JZ_REG_INTC_PENDING);
+ if (!pending)
continue;
- generic_handle_irq(__fls(irq_reg) + (i * 32) + JZ4740_IRQ_BASE);
+ while (pending) {
+ int bit = __fls(pending);
+
+ irq = irq_find_mapping(domain, bit + (i * 32));
+ generic_handle_irq(irq);
+ pending &= ~BIT(bit);
+ }
}
return IRQ_HANDLED;
}
-static void intc_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask)
-{
- struct irq_chip_regs *regs = &gc->chip_types->regs;
-
- writel(mask, gc->reg_base + regs->enable);
- writel(~mask, gc->reg_base + regs->disable);
-}
-
-void ingenic_intc_irq_suspend(struct irq_data *data)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- intc_irq_set_mask(gc, gc->wake_active);
-}
-
-void ingenic_intc_irq_resume(struct irq_data *data)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- intc_irq_set_mask(gc, gc->mask_cache);
-}
-
static struct irqaction intc_cascade_action = {
.handler = intc_cascade,
.name = "SoC intc cascade interrupt",
@@ -108,17 +97,27 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
- for (i = 0; i < num_chips; i++) {
- /* Mask all irqs */
- writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
- JZ_REG_INTC_SET_MASK);
+ domain = irq_domain_add_legacy(node, num_chips * 32,
+ JZ4740_IRQ_BASE, 0,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ err = -ENOMEM;
+ goto out_unmap_base;
+ }
- gc = irq_alloc_generic_chip("INTC", 1,
- JZ4740_IRQ_BASE + (i * 32),
- intc->base + (i * CHIP_SIZE),
- handle_level_irq);
+ intc->domain = domain;
+
+ err = irq_alloc_domain_generic_chips(domain, 32, 1, "INTC",
+ handle_level_irq, 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+ if (err)
+ goto out_domain_remove;
+
+ for (i = 0; i < num_chips; i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
gc->wake_enabled = IRQ_MSK(32);
+ gc->reg_base = intc->base + (i * CHIP_SIZE);
ct = gc->chip_types;
ct->regs.enable = JZ_REG_INTC_CLEAR_MASK;
@@ -127,21 +126,19 @@ static int __init ingenic_intc_of_init(struct device_node *node,
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
ct->chip.irq_set_wake = irq_gc_set_wake;
- ct->chip.irq_suspend = ingenic_intc_irq_suspend;
- ct->chip.irq_resume = ingenic_intc_irq_resume;
+ ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
- irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0,
- IRQ_NOPROBE | IRQ_LEVEL);
+ /* Mask all irqs */
+ irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
}
- domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
- &irq_domain_simple_ops, NULL);
- if (!domain)
- pr_warn("unable to register IRQ domain\n");
-
setup_irq(parent_irq, &intc_cascade_action);
return 0;
+out_domain_remove:
+ irq_domain_remove(domain);
+out_unmap_base:
+ iounmap(intc->base);
out_unmap_irq:
irq_dispose_mapping(parent_irq);
out_free:
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
new file mode 100644
index 000000000000..4d1179fed77c
--- /dev/null
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "irq-ls-extirq: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define MAXIRQ 12
+#define LS1021A_SCFGREVCR 0x200
+
+struct ls_extirq_data {
+ struct regmap *syscon;
+ u32 intpcr;
+ bool bit_reverse;
+ u32 nirq;
+ struct irq_fwspec map[MAXIRQ];
+};
+
+static int
+ls_extirq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct ls_extirq_data *priv = data->chip_data;
+ irq_hw_number_t hwirq = data->hwirq;
+ u32 value, mask;
+
+ if (priv->bit_reverse)
+ mask = 1U << (31 - hwirq);
+ else
+ mask = 1U << hwirq;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ value = mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ value = mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_EDGE_RISING:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ regmap_update_bits(priv->syscon, priv->intpcr, mask, value);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static struct irq_chip ls_extirq_chip = {
+ .name = "ls-extirq",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = ls_extirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int
+ls_extirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct ls_extirq_data *priv = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+
+ hwirq = fwspec->param[0];
+ if (hwirq >= priv->nirq)
+ return -EINVAL;
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &ls_extirq_chip,
+ priv);
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &priv->map[hwirq]);
+}
+
+static const struct irq_domain_ops extirq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .alloc = ls_extirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int
+ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
+{
+ const __be32 *map;
+ u32 mapsize;
+ int ret;
+
+ map = of_get_property(node, "interrupt-map", &mapsize);
+ if (!map)
+ return -ENOENT;
+ if (mapsize % sizeof(*map))
+ return -EINVAL;
+ mapsize /= sizeof(*map);
+
+ while (mapsize) {
+ struct device_node *ipar;
+ u32 hwirq, intsize, j;
+
+ if (mapsize < 3)
+ return -EINVAL;
+ hwirq = be32_to_cpup(map);
+ if (hwirq >= MAXIRQ)
+ return -EINVAL;
+ priv->nirq = max(priv->nirq, hwirq + 1);
+
+ ipar = of_find_node_by_phandle(be32_to_cpup(map + 2));
+ map += 3;
+ mapsize -= 3;
+ if (!ipar)
+ return -EINVAL;
+ priv->map[hwirq].fwnode = &ipar->fwnode;
+ ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize);
+ if (ret)
+ return ret;
+
+ if (intsize > mapsize)
+ return -EINVAL;
+
+ priv->map[hwirq].param_count = intsize;
+ for (j = 0; j < intsize; ++j)
+ priv->map[hwirq].param[j] = be32_to_cpup(map++);
+ mapsize -= intsize;
+ }
+ return 0;
+}
+
+static int __init
+ls_extirq_of_init(struct device_node *node, struct device_node *parent)
+{
+
+ struct irq_domain *domain, *parent_domain;
+ struct ls_extirq_data *priv;
+ int ret;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Cannot find parent domain\n");
+ return -ENODEV;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->syscon = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(priv->syscon)) {
+ ret = PTR_ERR(priv->syscon);
+ pr_err("Failed to lookup parent regmap\n");
+ goto out;
+ }
+ ret = of_property_read_u32(node, "reg", &priv->intpcr);
+ if (ret) {
+ pr_err("Missing INTPCR offset value\n");
+ goto out;
+ }
+
+ ret = ls_extirq_parse_map(priv, node);
+ if (ret)
+ goto out;
+
+ if (of_device_is_compatible(node, "fsl,ls1021a-extirq")) {
+ u32 revcr;
+
+ ret = regmap_read(priv->syscon, LS1021A_SCFGREVCR, &revcr);
+ if (ret)
+ goto out;
+ priv->bit_reverse = (revcr != 0);
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node,
+ &extirq_domain_ops, priv);
+ if (!domain)
+ ret = -ENOMEM;
+
+out:
+ if (ret)
+ kfree(priv);
+ return ret;
+}
+
+IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 7d0a12fe2714..8df547d2d935 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -181,7 +181,7 @@ static void plic_handle_irq(struct pt_regs *regs)
WARN_ON_ONCE(!handler->present);
- csr_clear(sie, SIE_SEIE);
+ csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(plic_irqdomain, hwirq);
@@ -191,7 +191,7 @@ static void plic_handle_irq(struct pt_regs *regs)
else
generic_handle_irq(irq);
}
- csr_set(sie, SIE_SEIE);
+ csr_set(CSR_IE, IE_EIE);
}
/*
@@ -252,8 +252,11 @@ static int __init plic_init(struct device_node *node,
continue;
}
- /* skip contexts other than supervisor external interrupt */
- if (parent.args[0] != IRQ_S_EXT)
+ /*
+ * Skip contexts other than external interrupts for our
+ * privilege level.
+ */
+ if (parent.args[0] != IRQ_EXT)
continue;
hartid = plic_find_hart_id(parent.np);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index ef4d625d2d80..8f6e6b08eadf 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -246,8 +246,8 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *d
/* No free bits available. Allocate a new vint */
vint_desc = ti_sci_inta_alloc_parent_irq(domain);
if (IS_ERR(vint_desc)) {
- mutex_unlock(&inta->vint_mutex);
- return ERR_PTR(PTR_ERR(vint_desc));
+ event_desc = ERR_CAST(vint_desc);
+ goto unlock;
}
free_bit = find_first_zero_bit(vint_desc->event_map,
@@ -259,6 +259,7 @@ alloc_event:
if (IS_ERR(event_desc))
clear_bit(free_bit, vint_desc->event_map);
+unlock:
mutex_unlock(&inta->vint_mutex);
return event_desc;
}
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index 5a7efeb3892d..84163f1ebfcf 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -51,7 +51,7 @@ static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
while (readl(zevio_irq_io + IO_STATUS)) {
irqnr = readl(zevio_irq_io + IO_CURRENT);
handle_domain_irq(zevio_irq_domain, irqnr, regs);
- };
+ }
}
static void __init zevio_init_irq_base(void __iomem *base)
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index faa7d61b9d6c..6ae9e1f0819d 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
@@ -13,12 +14,13 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
-#define PDC_MAX_IRQS 126
+#define PDC_MAX_IRQS 168
+#define PDC_MAX_GPIO_IRQS 256
#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
@@ -26,6 +28,8 @@
#define IRQ_ENABLE_BANK 0x10
#define IRQ_i_CFG 0x110
+#define PDC_NO_PARENT_IRQ ~0UL
+
struct pdc_pin_region {
u32 pin_base;
u32 parent_base;
@@ -47,6 +51,26 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
+static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return 0;
+
+ return irq_chip_get_parent_state(d, which, state);
+}
+
+static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool value)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return 0;
+
+ return irq_chip_set_parent_state(d, which, value);
+}
+
static void pdc_enable_intr(struct irq_data *d, bool on)
{
int pin_out = d->hwirq;
@@ -63,15 +87,37 @@ static void pdc_enable_intr(struct irq_data *d, bool on)
raw_spin_unlock(&pdc_lock);
}
-static void qcom_pdc_gic_mask(struct irq_data *d)
+static void qcom_pdc_gic_disable(struct irq_data *d)
{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
pdc_enable_intr(d, false);
+ irq_chip_disable_parent(d);
+}
+
+static void qcom_pdc_gic_enable(struct irq_data *d)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
+ pdc_enable_intr(d, true);
+ irq_chip_enable_parent(d);
+}
+
+static void qcom_pdc_gic_mask(struct irq_data *d)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
irq_chip_mask_parent(d);
}
static void qcom_pdc_gic_unmask(struct irq_data *d)
{
- pdc_enable_intr(d, true);
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
irq_chip_unmask_parent(d);
}
@@ -114,6 +160,9 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
int pin_out = d->hwirq;
enum pdc_irq_config_bits pdc_type;
+ if (pin_out == GPIO_NO_WAKE_IRQ)
+ return 0;
+
switch (type) {
case IRQ_TYPE_EDGE_RISING:
pdc_type = PDC_EDGE_RISING;
@@ -148,6 +197,10 @@ static struct irq_chip qcom_pdc_gic_chip = {
.irq_eoi = irq_chip_eoi_parent,
.irq_mask = qcom_pdc_gic_mask,
.irq_unmask = qcom_pdc_gic_unmask,
+ .irq_disable = qcom_pdc_gic_disable,
+ .irq_enable = qcom_pdc_gic_enable,
+ .irq_get_irqchip_state = qcom_pdc_gic_get_irqchip_state,
+ .irq_set_irqchip_state = qcom_pdc_gic_set_irqchip_state,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = qcom_pdc_gic_set_type,
.flags = IRQCHIP_MASK_ON_SUSPEND |
@@ -169,8 +222,7 @@ static irq_hw_number_t get_parent_hwirq(int pin)
return (region->parent_base + pin - region->pin_base);
}
- WARN_ON(1);
- return ~0UL;
+ return PDC_NO_PARENT_IRQ;
}
static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
@@ -199,17 +251,17 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
if (ret)
- return -EINVAL;
-
- parent_hwirq = get_parent_hwirq(hwirq);
- if (parent_hwirq == ~0UL)
- return -EINVAL;
+ return ret;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&qcom_pdc_gic_chip, NULL);
if (ret)
return ret;
+ parent_hwirq = get_parent_hwirq(hwirq);
+ if (parent_hwirq == PDC_NO_PARENT_IRQ)
+ return 0;
+
if (type & IRQ_TYPE_EDGE_BOTH)
type = IRQ_TYPE_EDGE_RISING;
@@ -232,6 +284,60 @@ static const struct irq_domain_ops qcom_pdc_ops = {
.free = irq_domain_free_irqs_common,
};
+static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq, parent_hwirq;
+ unsigned int type;
+ int ret;
+
+ ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &qcom_pdc_gic_chip, NULL);
+ if (ret)
+ return ret;
+
+ if (hwirq == GPIO_NO_WAKE_IRQ)
+ return 0;
+
+ parent_hwirq = get_parent_hwirq(hwirq);
+ if (parent_hwirq == PDC_NO_PARENT_IRQ)
+ return 0;
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = parent_hwirq;
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static int qcom_pdc_gpio_domain_select(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ return bus_token == DOMAIN_BUS_WAKEUP;
+}
+
+static const struct irq_domain_ops qcom_pdc_gpio_ops = {
+ .select = qcom_pdc_gpio_domain_select,
+ .alloc = qcom_pdc_gpio_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
static int pdc_setup_pin_mapping(struct device_node *np)
{
int ret, n;
@@ -270,7 +376,7 @@ static int pdc_setup_pin_mapping(struct device_node *np)
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{
- struct irq_domain *parent_domain, *pdc_domain;
+ struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
int ret;
pdc_base = of_iomap(node, 0);
@@ -301,12 +407,27 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
goto fail;
}
+ pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain,
+ IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
+ PDC_MAX_GPIO_IRQS,
+ of_fwnode_handle(node),
+ &qcom_pdc_gpio_ops, NULL);
+ if (!pdc_gpio_domain) {
+ pr_err("%pOF: PDC domain add failed for GPIO domain\n", node);
+ ret = -ENOMEM;
+ goto remove;
+ }
+
+ irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP);
+
return 0;
+remove:
+ irq_domain_remove(pdc_domain);
fail:
kfree(pdc_region);
iounmap(pdc_base);
return ret;
}
-IRQCHIP_DECLARE(pdc_sdm845, "qcom,sdm845-pdc", qcom_pdc_init);
+IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ba8619524231..1675da34239b 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -950,6 +950,34 @@ capi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
+#ifdef CONFIG_COMPAT
+static long
+capi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ if (cmd == CAPI_MANUFACTURER_CMD) {
+ struct {
+ compat_ulong_t cmd;
+ compat_uptr_t data;
+ } mcmd32;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&mcmd32, compat_ptr(arg), sizeof(mcmd32)))
+ return -EFAULT;
+
+ mutex_lock(&capi_mutex);
+ ret = capi20_manufacturer(mcmd32.cmd, compat_ptr(mcmd32.data));
+ mutex_unlock(&capi_mutex);
+
+ return ret;
+ }
+
+ return capi_unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static int capi_open(struct inode *inode, struct file *file)
{
struct capidev *cdev;
@@ -996,6 +1024,9 @@ static const struct file_operations capi_fops =
.write = capi_write,
.poll = capi_poll,
.unlocked_ioctl = capi_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = capi_compat_ioctl,
+#endif
.open = capi_open,
.release = capi_release,
};
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 1988de1d64c0..4b68520ac251 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -17,7 +17,7 @@ if NEW_LEDS
config LEDS_CLASS
tristate "LED Class Support"
help
- This option enables the led sysfs class in /sys/class/leds. You'll
+ This option enables the LED sysfs class in /sys/class/leds. You'll
need this to do anything useful with LEDs. If unsure, say N.
config LEDS_CLASS_FLASH
@@ -35,7 +35,7 @@ config LEDS_BRIGHTNESS_HW_CHANGED
depends on LEDS_CLASS
help
This option enables support for the brightness_hw_changed attribute
- for led sysfs class devices under /sys/class/leds.
+ for LED sysfs class devices under /sys/class/leds.
See Documentation/ABI/testing/sysfs-class-led for details.
@@ -132,6 +132,19 @@ config LEDS_CR0014114
To compile this driver as a module, choose M here: the module
will be called leds-cr0014114.
+config LEDS_EL15203000
+ tristate "LED Support for Crane EL15203000"
+ depends on LEDS_CLASS
+ depends on SPI
+ depends on OF
+ help
+ This option enables support for EL15203000 LED Board
+ (aka RED LED board) which is widely used in coffee vending
+ machines produced by Crane Merchandising Systems.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-el15203000.
+
config LEDS_LM3530
tristate "LCD Backlight driver for LM3530"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 41fb073a39c1..2da39e896ce8 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
+obj-$(CONFIG_LEDS_EL15203000) += leds-el15203000.o
# LED Userspace Drivers
obj-$(CONFIG_LEDS_USER) += uleds.o
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index 60c3de5c6b9f..6eeb9effcf65 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -327,6 +327,56 @@ void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev)
}
EXPORT_SYMBOL_GPL(led_classdev_flash_unregister);
+static void devm_led_classdev_flash_release(struct device *dev, void *res)
+{
+ led_classdev_flash_unregister(*(struct led_classdev_flash **)res);
+}
+
+int devm_led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data)
+{
+ struct led_classdev_flash **dr;
+ int ret;
+
+ dr = devres_alloc(devm_led_classdev_flash_release, sizeof(*dr),
+ GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = led_classdev_flash_register_ext(parent, fled_cdev, init_data);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = fled_cdev;
+ devres_add(parent, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_led_classdev_flash_register_ext);
+
+static int devm_led_classdev_flash_match(struct device *dev,
+ void *res, void *data)
+{
+ struct led_classdev_flash **p = res;
+
+ if (WARN_ON(!p || !*p))
+ return 0;
+
+ return *p == data;
+}
+
+void devm_led_classdev_flash_unregister(struct device *dev,
+ struct led_classdev_flash *fled_cdev)
+{
+ WARN_ON(devres_release(dev,
+ devm_led_classdev_flash_release,
+ devm_led_classdev_flash_match, fled_cdev));
+}
+EXPORT_SYMBOL_GPL(devm_led_classdev_flash_unregister);
+
static void led_clamp_align(struct led_flash_setting *s)
{
u32 v, offset;
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 647b1263c579..438774315e6c 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -74,13 +74,13 @@ static ssize_t max_brightness_show(struct device *dev,
static DEVICE_ATTR_RO(max_brightness);
#ifdef CONFIG_LEDS_TRIGGERS
-static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
-static struct attribute *led_trigger_attrs[] = {
- &dev_attr_trigger.attr,
+static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
+static struct bin_attribute *led_trigger_bin_attrs[] = {
+ &bin_attr_trigger,
NULL,
};
static const struct attribute_group led_trigger_group = {
- .attrs = led_trigger_attrs,
+ .bin_attrs = led_trigger_bin_attrs,
};
#endif
@@ -403,7 +403,7 @@ EXPORT_SYMBOL_GPL(devm_led_classdev_register_ext);
static int devm_led_classdev_match(struct device *dev, void *res, void *data)
{
- struct led_cdev **p = res;
+ struct led_classdev **p = res;
if (WARN_ON(!p || !*p))
return 0;
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 23963e5cb5d6..79e30d2cb7a5 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -16,6 +16,7 @@
#include <linux/rwsem.h>
#include <linux/leds.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include "leds.h"
/*
@@ -26,9 +27,11 @@ LIST_HEAD(trigger_list);
/* Used by LED Class */
-ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_trigger *trig;
int ret = count;
@@ -64,39 +67,82 @@ unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
-EXPORT_SYMBOL_GPL(led_trigger_store);
+EXPORT_SYMBOL_GPL(led_trigger_write);
-ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+__printf(3, 4)
+static int led_trigger_snprintf(char *buf, ssize_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ if (size <= 0)
+ i = vsnprintf(NULL, 0, fmt, args);
+ else
+ i = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return i;
+}
+
+static int led_trigger_format(char *buf, size_t size,
+ struct led_classdev *led_cdev)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_trigger *trig;
- int len = 0;
+ int len = led_trigger_snprintf(buf, size, "%s",
+ led_cdev->trigger ? "none" : "[none]");
+
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ bool hit = led_cdev->trigger &&
+ !strcmp(led_cdev->trigger->name, trig->name);
+
+ len += led_trigger_snprintf(buf + len, size - len,
+ " %s%s%s", hit ? "[" : "",
+ trig->name, hit ? "]" : "");
+ }
+
+ len += led_trigger_snprintf(buf + len, size - len, "\n");
+
+ return len;
+}
+
+/*
+ * It was stupid to create 10000 cpu triggers, but we are stuck with it now.
+ * Don't make that mistake again. We work around it here by creating binary
+ * attribute, which is not limited by length. This is _not_ good design, do not
+ * copy it.
+ */
+ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ void *data;
+ int len;
down_read(&triggers_list_lock);
down_read(&led_cdev->trigger_lock);
- if (!led_cdev->trigger)
- len += scnprintf(buf+len, PAGE_SIZE - len, "[none] ");
- else
- len += scnprintf(buf+len, PAGE_SIZE - len, "none ");
-
- list_for_each_entry(trig, &trigger_list, next_trig) {
- if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
- trig->name))
- len += scnprintf(buf+len, PAGE_SIZE - len, "[%s] ",
- trig->name);
- else
- len += scnprintf(buf+len, PAGE_SIZE - len, "%s ",
- trig->name);
+ len = led_trigger_format(NULL, 0, led_cdev);
+ data = kvmalloc(len + 1, GFP_KERNEL);
+ if (!data) {
+ up_read(&led_cdev->trigger_lock);
+ up_read(&triggers_list_lock);
+ return -ENOMEM;
}
+ len = led_trigger_format(data, len + 1, led_cdev);
+
up_read(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
- len += scnprintf(len+buf, PAGE_SIZE - len, "\n");
+ len = memory_read_from_buffer(buf, count, &pos, data, len);
+
+ kvfree(data);
+
return len;
}
-EXPORT_SYMBOL_GPL(led_trigger_show);
+EXPORT_SYMBOL_GPL(led_trigger_read);
/* Caller must ensure led_cdev->trigger_lock held */
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index 250dc9d6f635..82350a28a564 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -305,6 +305,13 @@ static int an30259a_probe(struct i2c_client *client)
chip->regmap = devm_regmap_init_i2c(client, &an30259a_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ err = PTR_ERR(chip->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ err);
+ goto exit;
+ }
+
for (i = 0; i < chip->num_leds; i++) {
struct led_init_data init_data = {};
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index c50d34e2b098..42e1b7598c3a 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -346,16 +346,11 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
- struct resource *mem_r;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
unsigned long val, *blink_leds, *blink_delay;
- mem_r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_r)
- return -EINVAL;
-
- mem = devm_ioremap_resource(dev, mem_r);
+ mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index aec285fd21c0..94fefd456ba0 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -151,17 +151,12 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
- struct resource *mem_r;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
unsigned long val;
u32 clk_div;
- mem_r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_r)
- return -EINVAL;
-
- mem = devm_ioremap_resource(dev, mem_r);
+ mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
diff --git a/drivers/leds/leds-el15203000.c b/drivers/leds/leds-el15203000.c
new file mode 100644
index 000000000000..298b13e4807a
--- /dev/null
+++ b/drivers/leds/leds-el15203000.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Crane Merchandising Systems. All rights reserved.
+// Copyright (C) 2019 Oleh Kravchenko <oleg@kaa.org.ua>
+
+#include <linux/delay.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+/*
+ * EL15203000 SPI protocol description:
+ * +-----+---------+
+ * | LED | COMMAND |
+ * +-----+---------+
+ * | 1 | 1 |
+ * +-----+---------+
+ * (*) LEDs MCU board expects 20 msec delay per byte.
+ *
+ * LEDs:
+ * +----------+--------------+-------------------------------------------+
+ * | ID | NAME | DESCRIPTION |
+ * +----------+--------------+-------------------------------------------+
+ * | 'P' 0x50 | Pipe | Consists from 5 LEDs, controlled by board |
+ * +----------+--------------+-------------------------------------------+
+ * | 'S' 0x53 | Screen frame | Light tube around the screen |
+ * +----------+--------------+-------------------------------------------+
+ * | 'V' 0x56 | Vending area | Highlights a cup of coffee |
+ * +----------+--------------+-------------------------------------------+
+ *
+ * COMMAND:
+ * +----------+-----------------+--------------+--------------+
+ * | VALUES | PIPE | SCREEN FRAME | VENDING AREA |
+ * +----------+-----------------+--------------+--------------+
+ * | '0' 0x30 | Off |
+ * +----------+-----------------------------------------------+
+ * | '1' 0x31 | On |
+ * +----------+-----------------+--------------+--------------+
+ * | '2' 0x32 | Cascade | Breathing |
+ * +----------+-----------------+--------------+
+ * | '3' 0x33 | Inverse cascade |
+ * +----------+-----------------+
+ * | '4' 0x34 | Bounce |
+ * +----------+-----------------+
+ * | '5' 0x35 | Inverse bounce |
+ * +----------+-----------------+
+ */
+
+/* EL15203000 default settings */
+#define EL_FW_DELAY_USEC 20000ul
+#define EL_PATTERN_DELAY_MSEC 800u
+#define EL_PATTERN_LEN 10u
+#define EL_PATTERN_HALF_LEN (EL_PATTERN_LEN / 2)
+
+enum el15203000_command {
+ /* for all LEDs */
+ EL_OFF = '0',
+ EL_ON = '1',
+
+ /* for Screen LED */
+ EL_SCREEN_BREATHING = '2',
+
+ /* for Pipe LED */
+ EL_PIPE_CASCADE = '2',
+ EL_PIPE_INV_CASCADE = '3',
+ EL_PIPE_BOUNCE = '4',
+ EL_PIPE_INV_BOUNCE = '5',
+};
+
+struct el15203000_led {
+ struct el15203000 *priv;
+ struct led_classdev ldev;
+ u32 reg;
+};
+
+struct el15203000 {
+ struct device *dev;
+ struct mutex lock;
+ struct spi_device *spi;
+ unsigned long delay;
+ size_t count;
+ struct el15203000_led leds[];
+};
+
+static int el15203000_cmd(struct el15203000_led *led, u8 brightness)
+{
+ int ret;
+ u8 cmd[2];
+ size_t i;
+
+ mutex_lock(&led->priv->lock);
+
+ dev_dbg(led->priv->dev, "Set brightness of 0x%02x(%c) to 0x%02x(%c)",
+ led->reg, led->reg, brightness, brightness);
+
+ /* to avoid SPI mistiming with firmware we should wait some time */
+ if (time_after(led->priv->delay, jiffies)) {
+ dev_dbg(led->priv->dev, "Wait %luus to sync",
+ EL_FW_DELAY_USEC);
+
+ usleep_range(EL_FW_DELAY_USEC,
+ EL_FW_DELAY_USEC + 1);
+ }
+
+ cmd[0] = led->reg;
+ cmd[1] = brightness;
+
+ for (i = 0; i < ARRAY_SIZE(cmd); i++) {
+ if (i)
+ usleep_range(EL_FW_DELAY_USEC,
+ EL_FW_DELAY_USEC + 1);
+
+ ret = spi_write(led->priv->spi, &cmd[i], sizeof(cmd[i]));
+ if (ret) {
+ dev_err(led->priv->dev,
+ "spi_write() error %d", ret);
+ break;
+ }
+ }
+
+ led->priv->delay = jiffies + usecs_to_jiffies(EL_FW_DELAY_USEC);
+
+ mutex_unlock(&led->priv->lock);
+
+ return ret;
+}
+
+static int el15203000_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct el15203000_led *led = container_of(ldev,
+ struct el15203000_led,
+ ldev);
+
+ return el15203000_cmd(led, brightness == LED_OFF ? EL_OFF : EL_ON);
+}
+
+static int el15203000_pattern_set_S(struct led_classdev *ldev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ struct el15203000_led *led = container_of(ldev,
+ struct el15203000_led,
+ ldev);
+
+ if (repeat > 0 || len != 2 ||
+ pattern[0].delta_t != 4000 || pattern[0].brightness != 0 ||
+ pattern[1].delta_t != 4000 || pattern[1].brightness != 1)
+ return -EINVAL;
+
+ dev_dbg(led->priv->dev, "Breathing mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ return el15203000_cmd(led, EL_SCREEN_BREATHING);
+}
+
+static bool is_cascade(const struct led_pattern *pattern, u32 len,
+ bool inv, bool right)
+{
+ int val, t;
+ u32 i;
+
+ if (len != EL_PATTERN_HALF_LEN)
+ return false;
+
+ val = right ? BIT(4) : BIT(0);
+
+ for (i = 0; i < len; i++) {
+ t = inv ? ~val & GENMASK(4, 0) : val;
+
+ if (pattern[i].delta_t != EL_PATTERN_DELAY_MSEC ||
+ pattern[i].brightness != t)
+ return false;
+
+ val = right ? val >> 1 : val << 1;
+ }
+
+ return true;
+}
+
+static bool is_bounce(const struct led_pattern *pattern, u32 len, bool inv)
+{
+ if (len != EL_PATTERN_LEN)
+ return false;
+
+ return is_cascade(pattern, EL_PATTERN_HALF_LEN, inv, false) &&
+ is_cascade(pattern + EL_PATTERN_HALF_LEN,
+ EL_PATTERN_HALF_LEN, inv, true);
+}
+
+static int el15203000_pattern_set_P(struct led_classdev *ldev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ u8 cmd;
+ struct el15203000_led *led = container_of(ldev,
+ struct el15203000_led,
+ ldev);
+
+ if (repeat > 0)
+ return -EINVAL;
+
+ if (is_cascade(pattern, len, false, false)) {
+ dev_dbg(led->priv->dev, "Cascade mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ cmd = EL_PIPE_CASCADE;
+ } else if (is_cascade(pattern, len, true, false)) {
+ dev_dbg(led->priv->dev, "Inverse cascade mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ cmd = EL_PIPE_INV_CASCADE;
+ } else if (is_bounce(pattern, len, false)) {
+ dev_dbg(led->priv->dev, "Bounce mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ cmd = EL_PIPE_BOUNCE;
+ } else if (is_bounce(pattern, len, true)) {
+ dev_dbg(led->priv->dev, "Inverse bounce mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ cmd = EL_PIPE_INV_BOUNCE;
+ } else {
+ dev_err(led->priv->dev, "Invalid hw_pattern for 0x%02x(%c)!",
+ led->reg, led->reg);
+
+ return -EINVAL;
+ }
+
+ return el15203000_cmd(led, cmd);
+}
+
+static int el15203000_pattern_clear(struct led_classdev *ldev)
+{
+ struct el15203000_led *led = container_of(ldev,
+ struct el15203000_led,
+ ldev);
+
+ return el15203000_cmd(led, EL_OFF);
+}
+
+static int el15203000_probe_dt(struct el15203000 *priv)
+{
+ struct el15203000_led *led = priv->leds;
+ struct fwnode_handle *child;
+ int ret;
+
+ device_for_each_child_node(priv->dev, child) {
+ struct led_init_data init_data = {};
+
+ ret = fwnode_property_read_u32(child, "reg", &led->reg);
+ if (ret) {
+ dev_err(priv->dev, "LED without ID number");
+ fwnode_handle_put(child);
+
+ break;
+ }
+
+ if (led->reg > U8_MAX) {
+ dev_err(priv->dev, "LED value %d is invalid", led->reg);
+ fwnode_handle_put(child);
+
+ return -EINVAL;
+ }
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led->ldev.default_trigger);
+
+ led->priv = priv;
+ led->ldev.max_brightness = LED_ON;
+ led->ldev.brightness_set_blocking = el15203000_set_blocking;
+
+ if (led->reg == 'S') {
+ led->ldev.pattern_set = el15203000_pattern_set_S;
+ led->ldev.pattern_clear = el15203000_pattern_clear;
+ } else if (led->reg == 'P') {
+ led->ldev.pattern_set = el15203000_pattern_set_P;
+ led->ldev.pattern_clear = el15203000_pattern_clear;
+ }
+
+ init_data.fwnode = child;
+ ret = devm_led_classdev_register_ext(priv->dev, &led->ldev,
+ &init_data);
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to register LED device %s, err %d",
+ led->ldev.name, ret);
+ fwnode_handle_put(child);
+
+ break;
+ }
+
+ led++;
+ }
+
+ return ret;
+}
+
+static int el15203000_probe(struct spi_device *spi)
+{
+ struct el15203000 *priv;
+ size_t count;
+
+ count = device_get_child_node_count(&spi->dev);
+ if (!count) {
+ dev_err(&spi->dev, "LEDs are not defined in device tree!");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&spi->dev, struct_size(priv, leds, count),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ priv->count = count;
+ priv->dev = &spi->dev;
+ priv->spi = spi;
+ priv->delay = jiffies -
+ usecs_to_jiffies(EL_FW_DELAY_USEC);
+
+ spi_set_drvdata(spi, priv);
+
+ return el15203000_probe_dt(priv);
+}
+
+static int el15203000_remove(struct spi_device *spi)
+{
+ struct el15203000 *priv = spi_get_drvdata(spi);
+
+ mutex_destroy(&priv->lock);
+
+ return 0;
+}
+
+static const struct of_device_id el15203000_dt_ids[] = {
+ { .compatible = "crane,el15203000", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, el15203000_dt_ids);
+
+static struct spi_driver el15203000_driver = {
+ .probe = el15203000_probe,
+ .remove = el15203000_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = el15203000_dt_ids,
+ },
+};
+
+module_spi_driver(el15203000_driver);
+
+MODULE_AUTHOR("Oleh Kravchenko <oleg@kaa.org.ua>");
+MODULE_DESCRIPTION("el15203000 LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:el15203000");
diff --git a/drivers/leds/leds-lm3601x.c b/drivers/leds/leds-lm3601x.c
index b02972f1a341..fce89f2a2d92 100644
--- a/drivers/leds/leds-lm3601x.c
+++ b/drivers/leds/leds-lm3601x.c
@@ -350,8 +350,7 @@ static int lm3601x_register_leds(struct lm3601x_led *led,
init_data.devicename = led->client->name;
init_data.default_label = (led->led_mode == LM3601X_LED_TORCH) ?
"torch" : "infrared";
-
- return led_classdev_flash_register_ext(&led->client->dev,
+ return devm_led_classdev_flash_register_ext(&led->client->dev,
&led->fled_cdev, &init_data);
}
@@ -445,7 +444,6 @@ static int lm3601x_remove(struct i2c_client *client)
{
struct lm3601x_led *led = i2c_get_clientdata(client);
- led_classdev_flash_unregister(&led->fled_cdev);
mutex_destroy(&led->lock);
return regmap_update_bits(led->regmap, LM3601X_ENABLE_REG,
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 3d381f2f73d0..8b408102e138 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -174,19 +174,20 @@ static int lm3692x_brightness_set(struct led_classdev *led_cdev,
ret = lm3692x_fault_check(led);
if (ret) {
- dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ dev_err(&led->client->dev, "Cannot read/clear faults: %d\n",
+ ret);
goto out;
}
ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
if (ret) {
- dev_err(&led->client->dev, "Cannot write MSB\n");
+ dev_err(&led->client->dev, "Cannot write MSB: %d\n", ret);
goto out;
}
ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
if (ret) {
- dev_err(&led->client->dev, "Cannot write LSB\n");
+ dev_err(&led->client->dev, "Cannot write LSB: %d\n", ret);
goto out;
}
out:
@@ -197,13 +198,13 @@ out:
static int lm3692x_init(struct lm3692x_led *led)
{
int enable_state;
- int ret;
+ int ret, reg_ret;
if (led->regulator) {
ret = regulator_enable(led->regulator);
if (ret) {
dev_err(&led->client->dev,
- "Failed to enable regulator\n");
+ "Failed to enable regulator: %d\n", ret);
return ret;
}
}
@@ -213,7 +214,8 @@ static int lm3692x_init(struct lm3692x_led *led)
ret = lm3692x_fault_check(led);
if (ret) {
- dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ dev_err(&led->client->dev, "Cannot read/clear faults: %d\n",
+ ret);
goto out;
}
@@ -248,9 +250,9 @@ static int lm3692x_init(struct lm3692x_led *led)
goto out;
ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL,
- LM3692X_BRHT_MODE_RAMP_MULTI |
- LM3692X_BL_ADJ_POL |
- LM3692X_RAMP_RATE_250us);
+ LM3692X_BOOST_SW_1MHZ |
+ LM3692X_BOOST_SW_NO_SHIFT |
+ LM3692X_OCP_PROT_1_5A);
if (ret)
goto out;
@@ -267,7 +269,7 @@ static int lm3692x_init(struct lm3692x_led *led)
goto out;
ret = regmap_write(led->regmap, LM3692X_BRT_CTRL,
- LM3692X_BL_ADJ_POL | LM3692X_PWM_HYSTER_4LSB);
+ LM3692X_BL_ADJ_POL | LM3692X_RAMP_EN);
if (ret)
goto out;
@@ -311,14 +313,15 @@ out:
gpiod_direction_output(led->enable_gpio, 0);
if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
+ reg_ret = regulator_disable(led->regulator);
+ if (reg_ret)
dev_err(&led->client->dev,
- "Failed to disable regulator\n");
+ "Failed to disable regulator: %d\n", reg_ret);
}
return ret;
}
+
static int lm3692x_probe_dt(struct lm3692x_led *led)
{
struct fwnode_handle *child = NULL;
@@ -334,9 +337,18 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
return ret;
}
- led->regulator = devm_regulator_get(&led->client->dev, "vled");
- if (IS_ERR(led->regulator))
+ led->regulator = devm_regulator_get_optional(&led->client->dev, "vled");
+ if (IS_ERR(led->regulator)) {
+ ret = PTR_ERR(led->regulator);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&led->client->dev,
+ "Failed to get vled regulator: %d\n",
+ ret);
+ return ret;
+ }
led->regulator = NULL;
+ }
child = device_get_next_child_node(&led->client->dev, child);
if (!child) {
@@ -409,7 +421,8 @@ static int lm3692x_remove(struct i2c_client *client)
ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN, 0);
if (ret) {
- dev_err(&led->client->dev, "Failed to disable regulator\n");
+ dev_err(&led->client->dev, "Failed to disable regulator: %d\n",
+ ret);
return ret;
}
@@ -420,7 +433,7 @@ static int lm3692x_remove(struct i2c_client *client)
ret = regulator_disable(led->regulator);
if (ret)
dev_err(&led->client->dev,
- "Failed to disable regulator\n");
+ "Failed to disable regulator: %d\n", ret);
}
mutex_destroy(&led->lock);
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
index cabe379071a7..82aea1cd0c12 100644
--- a/drivers/leds/leds-mlxreg.c
+++ b/drivers/leds/leds-mlxreg.c
@@ -228,8 +228,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv)
brightness = LED_OFF;
led_data->base_color = MLXREG_LED_GREEN_SOLID;
}
- sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg",
- data->label);
+ snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name),
+ "mlxreg:%s", data->label);
led_cdev->name = led_data->led_cdev_name;
led_cdev->brightness = brightness;
led_cdev->max_brightness = LED_ON;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index c7c7199e8ebd..7d515d5e57bd 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -467,16 +467,11 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
{
struct pca9532_platform_data *pdata;
struct device_node *child;
- const struct of_device_id *match;
int devid, maxleds;
int i = 0;
const char *state;
- match = of_match_device(of_pca9532_leds_match, dev);
- if (!match)
- return ERR_PTR(-ENODEV);
-
- devid = (int)(uintptr_t)match->data;
+ devid = (int)(uintptr_t)of_device_get_match_data(dev);
maxleds = pca9532_chip_info_tbl[devid].num_leds;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -509,7 +504,6 @@ static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int devid;
- const struct of_device_id *of_id;
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
dev_get_platdata(&client->dev);
@@ -525,11 +519,7 @@ static int pca9532_probe(struct i2c_client *client,
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
- of_id = of_match_device(of_pca9532_leds_match,
- &client->dev);
- if (unlikely(!of_id))
- return -EINVAL;
- devid = (int)(uintptr_t) of_id->data;
+ devid = (int)(uintptr_t)of_device_get_match_data(&client->dev);
} else {
devid = id->driver_data;
}
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index 59ff088c7d75..a8911ebd30e5 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#define TLC591XX_MAX_LEDS 16
+#define TLC591XX_MAX_BRIGHTNESS 256
#define TLC591XX_REG_MODE1 0x00
#define MODE1_RESPON_ADDR_MASK 0xF0
@@ -112,11 +113,11 @@ tlc591xx_brightness_set(struct led_classdev *led_cdev,
struct tlc591xx_priv *priv = led->priv;
int err;
- switch (brightness) {
+ switch ((int)brightness) {
case 0:
err = tlc591xx_set_ledout(priv, led, LEDOUT_OFF);
break;
- case LED_FULL:
+ case TLC591XX_MAX_BRIGHTNESS:
err = tlc591xx_set_ledout(priv, led, LEDOUT_ON);
break;
default:
@@ -128,51 +129,6 @@ tlc591xx_brightness_set(struct led_classdev *led_cdev,
return err;
}
-static void
-tlc591xx_destroy_devices(struct tlc591xx_priv *priv, unsigned int j)
-{
- int i = j;
-
- while (--i >= 0) {
- if (priv->leds[i].active)
- led_classdev_unregister(&priv->leds[i].ldev);
- }
-}
-
-static int
-tlc591xx_configure(struct device *dev,
- struct tlc591xx_priv *priv,
- const struct tlc591xx *tlc591xx)
-{
- unsigned int i;
- int err = 0;
-
- tlc591xx_set_mode(priv->regmap, MODE2_DIM);
- for (i = 0; i < TLC591XX_MAX_LEDS; i++) {
- struct tlc591xx_led *led = &priv->leds[i];
-
- if (!led->active)
- continue;
-
- led->priv = priv;
- led->led_no = i;
- led->ldev.brightness_set_blocking = tlc591xx_brightness_set;
- led->ldev.max_brightness = LED_FULL;
- err = led_classdev_register(dev, &led->ldev);
- if (err < 0) {
- dev_err(dev, "couldn't register LED %s\n",
- led->ldev.name);
- goto exit;
- }
- }
-
- return 0;
-
-exit:
- tlc591xx_destroy_devices(priv, i);
- return err;
-}
-
static const struct regmap_config tlc591xx_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -225,7 +181,16 @@ tlc591xx_probe(struct i2c_client *client,
i2c_set_clientdata(client, priv);
+ err = tlc591xx_set_mode(priv->regmap, MODE2_DIM);
+ if (err < 0)
+ return err;
+
for_each_child_of_node(np, child) {
+ struct tlc591xx_led *led;
+ struct led_init_data init_data = {};
+
+ init_data.fwnode = of_fwnode_handle(child);
+
err = of_property_read_u32(child, "reg", &reg);
if (err) {
of_node_put(child);
@@ -236,22 +201,24 @@ tlc591xx_probe(struct i2c_client *client,
of_node_put(child);
return -EINVAL;
}
- priv->leds[reg].active = true;
- priv->leds[reg].ldev.name =
- of_get_property(child, "label", NULL) ? : child->name;
- priv->leds[reg].ldev.default_trigger =
- of_get_property(child, "linux,default-trigger", NULL);
- }
- return tlc591xx_configure(dev, priv, tlc591xx);
-}
+ led = &priv->leds[reg];
-static int
-tlc591xx_remove(struct i2c_client *client)
-{
- struct tlc591xx_priv *priv = i2c_get_clientdata(client);
-
- tlc591xx_destroy_devices(priv, TLC591XX_MAX_LEDS);
+ led->active = true;
+ led->ldev.default_trigger =
+ of_get_property(child, "linux,default-trigger", NULL);
+ led->priv = priv;
+ led->led_no = reg;
+ led->ldev.brightness_set_blocking = tlc591xx_brightness_set;
+ led->ldev.max_brightness = TLC591XX_MAX_BRIGHTNESS;
+ err = devm_led_classdev_register_ext(dev, &led->ldev,
+ &init_data);
+ if (err < 0) {
+ dev_err(dev, "couldn't register LED %s\n",
+ led->ldev.name);
+ return err;
+ }
+ }
return 0;
}
@@ -268,7 +235,6 @@ static struct i2c_driver tlc591xx_driver = {
.of_match_table = of_match_ptr(of_tlc591xx_leds_match),
},
.probe = tlc591xx_probe,
- .remove = tlc591xx_remove,
.id_table = tlc591xx_id,
};
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 0b577cece8f7..2d9eb48bbed9 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -23,6 +23,12 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev,
enum led_brightness value);
void led_set_brightness_nosleep(struct led_classdev *led_cdev,
enum led_brightness value);
+ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count);
+ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count);
extern struct rw_semaphore leds_list_lock;
extern struct list_head leds_list;
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 136f86a1627d..d5e774d83021 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -302,10 +302,12 @@ static int netdev_trig_notify(struct notifier_block *nb,
container_of(nb, struct led_netdev_data, notifier);
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
- && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
+ && evt != NETDEV_CHANGENAME)
return NOTIFY_DONE;
if (!(dev == trigger_data->net_dev ||
+ (evt == NETDEV_CHANGENAME && !strcmp(dev->name, trigger_data->device_name)) ||
(evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
return NOTIFY_DONE;
@@ -315,6 +317,7 @@ static int netdev_trig_notify(struct notifier_block *nb,
clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
switch (evt) {
+ case NETDEV_CHANGENAME:
case NETDEV_REGISTER:
if (trigger_data->net_dev)
dev_put(trigger_data->net_dev);
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 400960cf04d5..b1314d104b06 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -147,7 +147,8 @@ static struct miscdevice anslcd_dev = {
&anslcd_fops
};
-const char anslcd_logo[] = "********************" /* Line #1 */
+static const char anslcd_logo[] __initconst =
+ "********************" /* Line #1 */
"* LINUX! *" /* Line #3 */
"* Welcome to *" /* Line #2 */
"********************"; /* Line #4 */
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index 8b9eb56e4311..cc236ac7a0b5 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -354,7 +354,6 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
static struct platform_driver hi6220_mbox_driver = {
.driver = {
.name = "hi6220-mbox",
- .owner = THIS_MODULE,
.of_match_table = hi6220_mbox_of_match,
},
.probe = hi6220_mbox_probe,
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 9f74dee1a58c..2cdcdc5f1119 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -12,19 +12,11 @@
#include <linux/of_device.h>
#include <linux/slab.h>
-/* Transmit Register */
-#define IMX_MU_xTRn(x) (0x00 + 4 * (x))
-/* Receive Register */
-#define IMX_MU_xRRn(x) (0x10 + 4 * (x))
-/* Status Register */
-#define IMX_MU_xSR 0x20
#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
#define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
#define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
#define IMX_MU_xSR_BRDIP BIT(9)
-/* Control Register */
-#define IMX_MU_xCR 0x24
/* General Purpose Interrupt Enable */
#define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
/* Receive Interrupt Enable */
@@ -44,6 +36,13 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RXDB, /* Rx doorbell */
};
+struct imx_mu_dcfg {
+ u32 xTR[4]; /* Transmit Registers */
+ u32 xRR[4]; /* Receive Registers */
+ u32 xSR; /* Status Register */
+ u32 xCR; /* Control Register */
+};
+
struct imx_mu_con_priv {
unsigned int idx;
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
@@ -61,12 +60,27 @@ struct imx_mu_priv {
struct mbox_chan mbox_chans[IMX_MU_CHANS];
struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
+ const struct imx_mu_dcfg *dcfg;
struct clk *clk;
int irq;
bool side_b;
};
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .xTR = {0x20, 0x24, 0x28, 0x2c},
+ .xRR = {0x40, 0x44, 0x48, 0x4c},
+ .xSR = 0x60,
+ .xCR = 0x64,
+};
+
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
{
return container_of(mbox, struct imx_mu_priv, mbox);
@@ -88,10 +102,10 @@ static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
u32 val;
spin_lock_irqsave(&priv->xcr_lock, flags);
- val = imx_mu_read(priv, IMX_MU_xCR);
+ val = imx_mu_read(priv, priv->dcfg->xCR);
val &= ~clr;
val |= set;
- imx_mu_write(priv, val, IMX_MU_xCR);
+ imx_mu_write(priv, val, priv->dcfg->xCR);
spin_unlock_irqrestore(&priv->xcr_lock, flags);
return val;
@@ -111,8 +125,8 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
struct imx_mu_con_priv *cp = chan->con_priv;
u32 val, ctrl, dat;
- ctrl = imx_mu_read(priv, IMX_MU_xCR);
- val = imx_mu_read(priv, IMX_MU_xSR);
+ ctrl = imx_mu_read(priv, priv->dcfg->xCR);
+ val = imx_mu_read(priv, priv->dcfg->xSR);
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -138,10 +152,10 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
mbox_chan_txdone(chan, 0);
} else if (val == IMX_MU_xSR_RFn(cp->idx)) {
- dat = imx_mu_read(priv, IMX_MU_xRRn(cp->idx));
+ dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
mbox_chan_received_data(chan, (void *)&dat);
} else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
- imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), IMX_MU_xSR);
+ imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
mbox_chan_received_data(chan, NULL);
} else {
dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
@@ -159,7 +173,7 @@ static int imx_mu_send_data(struct mbox_chan *chan, void *data)
switch (cp->type) {
case IMX_MU_TYPE_TX:
- imx_mu_write(priv, *arg, IMX_MU_xTRn(cp->idx));
+ imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
break;
case IMX_MU_TYPE_TXDB:
@@ -214,11 +228,24 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
- if (cp->type == IMX_MU_TYPE_TXDB)
+ if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
+ return;
+ }
- imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx) |
- IMX_MU_xCR_RIEn(cp->idx) | IMX_MU_xCR_GIEn(cp->idx));
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RX:
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RXDB:
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
+ break;
+ default:
+ break;
+ }
free_irq(priv->irq, chan);
}
@@ -257,7 +284,7 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
return;
/* Set default MU configuration */
- imx_mu_write(priv, 0, IMX_MU_xCR);
+ imx_mu_write(priv, 0, priv->dcfg->xCR);
}
static int imx_mu_probe(struct platform_device *pdev)
@@ -265,6 +292,7 @@ static int imx_mu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx_mu_priv *priv;
+ const struct imx_mu_dcfg *dcfg;
unsigned int i;
int ret;
@@ -282,6 +310,11 @@ static int imx_mu_probe(struct platform_device *pdev)
if (priv->irq < 0)
return priv->irq;
+ dcfg = of_device_get_match_data(dev);
+ if (!dcfg)
+ return -EINVAL;
+ priv->dcfg = dcfg;
+
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
if (PTR_ERR(priv->clk) != -ENOENT)
@@ -335,7 +368,8 @@ static int imx_mu_remove(struct platform_device *pdev)
}
static const struct of_device_id imx_mu_dt_ids[] = {
- { .compatible = "fsl,imx6sx-mu" },
+ { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
+ { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
{ },
};
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index a3cd63583cf7..5978a35aac6d 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -868,7 +868,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
ret = pm_runtime_put_sync(mdev->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -ENOSYS)
goto unregister;
devm_kfree(&pdev->dev, finfoblk);
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index 5c2d1e1f988b..ef966887aa15 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -52,7 +52,6 @@ struct stm32_ipcc {
struct clk *clk;
spinlock_t lock; /* protect access to IPCC registers */
int irqs[IPCC_IRQ_NUM];
- int wkp;
u32 proc_id;
u32 n_chans;
u32 xcr;
@@ -282,16 +281,9 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
/* wakeup */
if (of_property_read_bool(np, "wakeup-source")) {
- ipcc->wkp = platform_get_irq_byname(pdev, "wakeup");
- if (ipcc->wkp < 0) {
- if (ipcc->wkp != -EPROBE_DEFER)
- dev_err(dev, "could not get wakeup IRQ\n");
- ret = ipcc->wkp;
- goto err_clk;
- }
-
device_set_wakeup_capable(dev, true);
- ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp);
+
+ ret = dev_pm_set_wake_irq(dev, ipcc->irqs[IPCC_IRQ_RX]);
if (ret) {
dev_err(dev, "Failed to set wake up irq\n");
goto err_init_wkp;
@@ -334,10 +326,10 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
return 0;
err_irq_wkp:
- if (ipcc->wkp)
+ if (of_property_read_bool(np, "wakeup-source"))
dev_pm_clear_wake_irq(dev);
err_init_wkp:
- device_init_wakeup(dev, false);
+ device_set_wakeup_capable(dev, false);
err_clk:
clk_disable_unprepare(ipcc->clk);
return ret;
@@ -345,27 +337,17 @@ err_clk:
static int stm32_ipcc_remove(struct platform_device *pdev)
{
- struct stm32_ipcc *ipcc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
- if (ipcc->wkp)
+ if (of_property_read_bool(dev->of_node, "wakeup-source"))
dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
+ device_set_wakeup_capable(dev, false);
return 0;
}
#ifdef CONFIG_PM_SLEEP
-static void stm32_ipcc_set_irq_wake(struct device *dev, bool enable)
-{
- struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
- unsigned int i;
-
- if (device_may_wakeup(dev))
- for (i = 0; i < IPCC_IRQ_NUM; i++)
- irq_set_irq_wake(ipcc->irqs[i], enable);
-}
-
static int stm32_ipcc_suspend(struct device *dev)
{
struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
@@ -373,8 +355,6 @@ static int stm32_ipcc_suspend(struct device *dev)
ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
- stm32_ipcc_set_irq_wake(dev, true);
-
return 0;
}
@@ -382,8 +362,6 @@ static int stm32_ipcc_resume(struct device *dev)
{
struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
- stm32_ipcc_set_irq_wake(dev, false);
-
writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 4c5ba35d48d4..834b35dc3b13 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -657,7 +657,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
- err = platform_get_irq_byname(pdev, "doorbell");
+ err = platform_get_irq_byname_optional(pdev, "doorbell");
if (err >= 0)
hsp->doorbell_irq = err;
@@ -677,7 +677,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- err = platform_get_irq_byname(pdev, name);
+ err = platform_get_irq_byname_optional(pdev, name);
if (err >= 0) {
hsp->shared_irqs[i] = err;
count++;
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index b72e82efaee5..38fbb3b59873 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -191,7 +191,7 @@ int __mcb_register_driver(struct mcb_driver *drv, struct module *owner,
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL_GPL(__mcb_register_driver);
+EXPORT_SYMBOL_NS_GPL(__mcb_register_driver, MCB);
/**
* mcb_unregister_driver() - Unregister a @mcb_driver from the system
@@ -203,7 +203,7 @@ void mcb_unregister_driver(struct mcb_driver *drv)
{
driver_unregister(&drv->driver);
}
-EXPORT_SYMBOL_GPL(mcb_unregister_driver);
+EXPORT_SYMBOL_NS_GPL(mcb_unregister_driver, MCB);
static void mcb_release_dev(struct device *dev)
{
@@ -249,7 +249,7 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(mcb_device_register);
+EXPORT_SYMBOL_NS_GPL(mcb_device_register, MCB);
static void mcb_free_bus(struct device *dev)
{
@@ -301,7 +301,7 @@ err_free:
kfree(bus);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(mcb_alloc_bus);
+EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
static int __mcb_devices_unregister(struct device *dev, void *data)
{
@@ -323,7 +323,7 @@ void mcb_release_bus(struct mcb_bus *bus)
{
mcb_devices_unregister(bus);
}
-EXPORT_SYMBOL_GPL(mcb_release_bus);
+EXPORT_SYMBOL_NS_GPL(mcb_release_bus, MCB);
/**
* mcb_bus_put() - Increment refcnt
@@ -338,7 +338,7 @@ struct mcb_bus *mcb_bus_get(struct mcb_bus *bus)
return bus;
}
-EXPORT_SYMBOL_GPL(mcb_bus_get);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_get, MCB);
/**
* mcb_bus_put() - Decrement refcnt
@@ -351,7 +351,7 @@ void mcb_bus_put(struct mcb_bus *bus)
if (bus)
put_device(&bus->dev);
}
-EXPORT_SYMBOL_GPL(mcb_bus_put);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_put, MCB);
/**
* mcb_alloc_dev() - Allocate a device
@@ -371,7 +371,7 @@ struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus)
return dev;
}
-EXPORT_SYMBOL_GPL(mcb_alloc_dev);
+EXPORT_SYMBOL_NS_GPL(mcb_alloc_dev, MCB);
/**
* mcb_free_dev() - Free @mcb_device
@@ -383,7 +383,7 @@ void mcb_free_dev(struct mcb_device *dev)
{
kfree(dev);
}
-EXPORT_SYMBOL_GPL(mcb_free_dev);
+EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
static int __mcb_bus_add_devices(struct device *dev, void *data)
{
@@ -412,7 +412,7 @@ void mcb_bus_add_devices(const struct mcb_bus *bus)
{
bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
}
-EXPORT_SYMBOL_GPL(mcb_bus_add_devices);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_add_devices, MCB);
/**
* mcb_get_resource() - get a resource for a mcb device
@@ -428,7 +428,7 @@ struct resource *mcb_get_resource(struct mcb_device *dev, unsigned int type)
else
return NULL;
}
-EXPORT_SYMBOL_GPL(mcb_get_resource);
+EXPORT_SYMBOL_NS_GPL(mcb_get_resource, MCB);
/**
* mcb_request_mem() - Request memory
@@ -454,7 +454,7 @@ struct resource *mcb_request_mem(struct mcb_device *dev, const char *name)
return mem;
}
-EXPORT_SYMBOL_GPL(mcb_request_mem);
+EXPORT_SYMBOL_NS_GPL(mcb_request_mem, MCB);
/**
* mcb_release_mem() - Release memory requested by device
@@ -469,7 +469,7 @@ void mcb_release_mem(struct resource *mem)
size = resource_size(mem);
release_mem_region(mem->start, size);
}
-EXPORT_SYMBOL_GPL(mcb_release_mem);
+EXPORT_SYMBOL_NS_GPL(mcb_release_mem, MCB);
static int __mcb_get_irq(struct mcb_device *dev)
{
@@ -495,7 +495,7 @@ int mcb_get_irq(struct mcb_device *dev)
return __mcb_get_irq(dev);
}
-EXPORT_SYMBOL_GPL(mcb_get_irq);
+EXPORT_SYMBOL_NS_GPL(mcb_get_irq, MCB);
static int mcb_init(void)
{
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
index 8f1bde437a7e..506676754538 100644
--- a/drivers/mcb/mcb-lpc.c
+++ b/drivers/mcb/mcb-lpc.c
@@ -168,3 +168,4 @@ module_exit(mcb_lpc_exit);
MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over LPC support");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 3b69e6aa3d88..0266bfddfbe2 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -253,4 +253,4 @@ free_header:
return ret;
}
-EXPORT_SYMBOL_GPL(chameleon_parse_cells);
+EXPORT_SYMBOL_NS_GPL(chameleon_parse_cells, MCB);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 14866aa22f75..dc88232d9af8 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -131,3 +131,4 @@ module_pci_driver(mcb_pci_driver);
MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over PCI support");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 5ef7daeb8cbd..9340435a94a0 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -319,6 +319,8 @@ static void cec_post_state_event(struct cec_adapter *adap)
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ ev.state_change.have_conn_info =
+ adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event(adap, &ev);
}
@@ -1976,7 +1978,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* Play function, this message can have variable length
* depending on the specific play function that is used.
*/
- case 0x60:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
if (msg->len == 2)
rc_keydown(adap->rc, RC_PROTO_CEC,
msg->msg[2], 0);
@@ -1993,8 +1995,12 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* For the time being these messages are not processed by the
* framework and are simply forwarded to the user space.
*/
- case 0x56: case 0x57:
- case 0x67: case 0x68: case 0x69: case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
break;
default:
rc_keydown(adap->rc, RC_PROTO_CEC, msg->msg[2], 0);
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 12d676484472..17d1cb2e5f97 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -187,6 +187,21 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
return 0;
}
+static long cec_adap_g_connector_info(struct cec_adapter *adap,
+ struct cec_log_addrs __user *parg)
+{
+ int ret = 0;
+
+ if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
+ return -ENOTTY;
+
+ mutex_lock(&adap->lock);
+ if (copy_to_user(parg, &adap->conn_info, sizeof(adap->conn_info)))
+ ret = -EFAULT;
+ mutex_unlock(&adap->lock);
+ return ret;
+}
+
static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
bool block, struct cec_msg __user *parg)
{
@@ -506,6 +521,9 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case CEC_ADAP_S_LOG_ADDRS:
return cec_adap_s_log_addrs(adap, fh, block, parg);
+ case CEC_ADAP_G_CONNECTOR_INFO:
+ return cec_adap_g_connector_info(adap, parg);
+
case CEC_TRANSMIT:
return cec_transmit(adap, fh, block, parg);
@@ -578,6 +596,8 @@ static int cec_open(struct inode *inode, struct file *filp)
/* Queue up initial state events */
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ ev.state_change.have_conn_info =
+ adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event_fh(fh, &ev, 0);
#ifdef CONFIG_CEC_PIN
if (adap->pin && adap->pin->ops->read_hpd) {
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 9c610e1e99b8..db7adffcdc76 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -257,11 +257,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
struct cec_adapter *adap;
int res;
- /*
- * Disable this capability until the connector info public API
- * is ready.
- */
- caps &= ~CEC_CAP_CONNECTOR_INFO;
#ifndef CONFIG_MEDIA_CEC_RC
caps &= ~CEC_CAP_RC;
#endif
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 4d82a5522072..7cf42b133dbc 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -153,13 +153,14 @@ cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
}
EXPORT_SYMBOL_GPL(cec_notifier_cec_adap_register);
-void cec_notifier_cec_adap_unregister(struct cec_notifier *n)
+void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
+ struct cec_adapter *adap)
{
if (!n)
return;
mutex_lock(&n->lock);
- n->cec_adap->notifier = NULL;
+ adap->notifier = NULL;
n->cec_adap = NULL;
n->callback = NULL;
mutex_unlock(&n->lock);
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index 8f987bc0dd88..660fe111f540 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -1279,6 +1279,15 @@ static void cec_pin_adap_free(struct cec_adapter *adap)
kfree(pin);
}
+static int cec_pin_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct cec_pin *pin = adap->pin;
+
+ if (pin->ops->received)
+ return pin->ops->received(adap, msg);
+ return -ENOMSG;
+}
+
void cec_pin_changed(struct cec_adapter *adap, bool value)
{
struct cec_pin *pin = adap->pin;
@@ -1301,6 +1310,7 @@ static const struct cec_adap_ops cec_pin_adap_ops = {
.error_inj_parse_line = cec_pin_error_inj_parse_line,
.error_inj_show = cec_pin_error_inj_show,
#endif
+ .received = cec_pin_received,
};
struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 0ba51dacc580..c1511094fdc7 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -230,8 +230,8 @@ static char *siano_msgs[] = {
[MSG_SMS_FLASH_DL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_FLASH_DL_REQ",
[MSG_SMS_EXEC_TEST_1_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_REQ",
[MSG_SMS_EXEC_TEST_1_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_RES",
- [MSG_SMS_ENBALE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENBALE_TS_INTERFACE_REQ",
- [MSG_SMS_ENBALE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENBALE_TS_INTERFACE_RES",
+ [MSG_SMS_ENABLE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_REQ",
+ [MSG_SMS_ENABLE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_RES",
[MSG_SMS_SPI_SET_BUS_WIDTH_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_REQ",
[MSG_SMS_SPI_SET_BUS_WIDTH_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_RES",
[MSG_SMS_SEND_EMM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_EMM_REQ",
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index a2f95f4899c2..b3b793b5caf3 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -434,8 +434,8 @@ enum msg_types {
MSG_SMS_FLASH_DL_REQ = 732,
MSG_SMS_EXEC_TEST_1_REQ = 734,
MSG_SMS_EXEC_TEST_1_RES = 735,
- MSG_SMS_ENBALE_TS_INTERFACE_REQ = 736,
- MSG_SMS_ENBALE_TS_INTERFACE_RES = 737,
+ MSG_SMS_ENABLE_TS_INTERFACE_REQ = 736,
+ MSG_SMS_ENABLE_TS_INTERFACE_RES = 737,
MSG_SMS_SPI_SET_BUS_WIDTH_REQ = 738,
MSG_SMS_SPI_SET_BUS_WIDTH_RES = 739,
MSG_SMS_SEND_EMM_REQ = 740,
diff --git a/drivers/media/common/siano/smsir.h b/drivers/media/common/siano/smsir.h
index b2c54c256e86..ada41d5c4e83 100644
--- a/drivers/media/common/siano/smsir.h
+++ b/drivers/media/common/siano/smsir.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * SPDX-License-Identifier: GPL-2.0+
*
* Siano Mobile Silicon, Inc.
* MDTV receiver kernel modules.
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 5a9ba3846f0a..e652f4318284 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -49,8 +49,11 @@ module_param(debug, int, 0644);
V4L2_BUF_FLAG_REQUEST_FD | \
V4L2_BUF_FLAG_TIMESTAMP_MASK)
/* Output buffer flags that should be passed on to the driver */
-#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
- V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
+ V4L2_BUF_FLAG_BFRAME | \
+ V4L2_BUF_FLAG_KEYFRAME | \
+ V4L2_BUF_FLAG_TIMECODE | \
+ V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
/*
* __verify_planes_array() - verify that the planes array passed in struct
@@ -194,6 +197,7 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
}
vbuf->sequence = 0;
vbuf->request_fd = -1;
+ vbuf->is_held = false;
if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
switch (b->memory) {
@@ -321,6 +325,8 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
*/
vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
vbuf->field = b->field;
+ if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
} else {
/* Zero any output buffer flags as this is a capture buffer */
vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
@@ -654,6 +660,8 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
if (q->io_modes & VB2_DMABUF)
*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+ if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 6f7eedb4c00e..0ba382948c51 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -298,7 +298,7 @@ int cxd2820r_sleep_c(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static const struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index d56c6f788196..fbdfa6bf38dc 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -392,7 +392,7 @@ int cxd2820r_sleep_t(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index f924a80b968a..34ef2bb2de34 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -386,7 +386,7 @@ int cxd2820r_sleep_t2(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static const struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 1b30cf570803..758c95bc3b11 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -60,6 +60,7 @@ struct cxd2841er_priv {
enum cxd2841er_xtal xtal;
enum fe_caps caps;
u32 flags;
+ unsigned long stats_time;
};
static const struct cxd2841er_cnr_data s_cn_data[] = {
@@ -3279,9 +3280,15 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe,
p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
if (status & FE_HAS_LOCK) {
+ if (priv->stats_time &&
+ (!time_after(jiffies, priv->stats_time)))
+ return 0;
+
+ /* Prevent retrieving stats faster than once per second */
+ priv->stats_time = jiffies + msecs_to_jiffies(1000);
+
cxd2841er_read_snr(fe);
cxd2841er_read_ucblocks(fe);
-
cxd2841er_read_ber(fe);
} else {
p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
@@ -3360,6 +3367,9 @@ done:
p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ /* Reset the wait for jiffies logic */
+ priv->stats_time = 0;
+
return ret;
}
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 2f5af4813a74..ac7be872f460 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -4201,7 +4201,7 @@ int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
u16 *data, u32 flags)
{
u8 buf[2] = { 0 };
- int rc = -EIO;
+ int rc;
u16 word = 0;
if (!data)
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 4e50441c247a..a7faf0cf8788 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -517,7 +517,7 @@ static void mb86a20s_reset_frontend_cache(struct dvb_frontend *fe)
* Estimates the bit rate using the per-segment bit rate given by
* ABNT/NBR 15601 spec (table 4).
*/
-static u32 isdbt_rate[3][5][4] = {
+static const u32 isdbt_rate[3][5][4] = {
{ /* DQPSK/QPSK */
{ 280850, 312060, 330420, 340430 }, /* 1/2 */
{ 374470, 416080, 440560, 453910 }, /* 2/3 */
@@ -539,13 +539,9 @@ static u32 isdbt_rate[3][5][4] = {
}
};
-static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
- u32 modulation, u32 forward_error_correction,
- u32 guard_interval,
- u32 segment)
+static u32 isdbt_layer_min_bitrate(struct dtv_frontend_properties *c,
+ u32 layer)
{
- struct mb86a20s_state *state = fe->demodulator_priv;
- u32 rate;
int mod, fec, guard;
/*
@@ -553,7 +549,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
* to consider the lowest bit rate, to avoid taking too long time
* to get BER.
*/
- switch (modulation) {
+ switch (c->layer[layer].modulation) {
case DQPSK:
case QPSK:
default:
@@ -567,7 +563,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- switch (forward_error_correction) {
+ switch (c->layer[layer].fec) {
default:
case FEC_1_2:
case FEC_AUTO:
@@ -587,7 +583,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- switch (guard_interval) {
+ switch (c->guard_interval) {
default:
case GUARD_INTERVAL_1_4:
guard = 0;
@@ -603,29 +599,14 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- /* Samples BER at BER_SAMPLING_RATE seconds */
- rate = isdbt_rate[mod][fec][guard] * segment * BER_SAMPLING_RATE;
-
- /* Avoids sampling too quickly or to overflow the register */
- if (rate < 256)
- rate = 256;
- else if (rate > (1 << 24) - 1)
- rate = (1 << 24) - 1;
-
- dev_dbg(&state->i2c->dev,
- "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
- __func__, 'A' + layer,
- segment * isdbt_rate[mod][fec][guard]/1000,
- rate, rate);
-
- state->estimated_rate[layer] = rate;
+ return isdbt_rate[mod][fec][guard] * c->layer[layer].segment_count;
}
static int mb86a20s_get_frontend(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int layer, rc;
+ int layer, rc, rate, counter;
dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
@@ -676,10 +657,21 @@ static int mb86a20s_get_frontend(struct dvb_frontend *fe)
dev_dbg(&state->i2c->dev, "%s: interleaving %d.\n",
__func__, rc);
c->layer[layer].interleaving = rc;
- mb86a20s_layer_bitrate(fe, layer, c->layer[layer].modulation,
- c->layer[layer].fec,
- c->guard_interval,
- c->layer[layer].segment_count);
+
+ rate = isdbt_layer_min_bitrate(c, layer);
+ counter = rate * BER_SAMPLING_RATE;
+
+ /* Avoids sampling too quickly or to overflow the register */
+ if (counter < 256)
+ counter = 256;
+ else if (counter > (1 << 24) - 1)
+ counter = (1 << 24) - 1;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
+ __func__, 'A' + layer, rate / 1000, counter, counter);
+
+ state->estimated_rate[layer] = counter;
}
rc = mb86a20s_writereg(state, 0x6d, 0x84);
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index 7cae7d632030..d43a67045dbe 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -135,11 +135,6 @@ static inline int mt312_writereg(struct mt312_state *state,
return mt312_write(state, reg, &tmp, 1);
}
-static inline u32 mt312_div(u32 a, u32 b)
-{
- return (a + (b / 2)) / b;
-}
-
static int mt312_reset(struct mt312_state *state, const u8 full)
{
return mt312_writereg(state, RESET, full ? 0x80 : 0x40);
@@ -187,7 +182,7 @@ static int mt312_get_symbol_rate(struct mt312_state *state, u32 *sr)
monitor = (buf[0] << 8) | buf[1];
dprintk("sr(auto) = %u\n",
- mt312_div(monitor * 15625, 4));
+ DIV_ROUND_CLOSEST(monitor * 15625, 4));
} else {
ret = mt312_writereg(state, MON_CTRL, 0x05);
if (ret < 0)
@@ -291,10 +286,10 @@ static int mt312_initfe(struct dvb_frontend *fe)
}
/* SYS_CLK */
- buf[0] = mt312_div(state->xtal * state->freq_mult * 2, 1000000);
+ buf[0] = DIV_ROUND_CLOSEST(state->xtal * state->freq_mult * 2, 1000000);
/* DISEQC_RATIO */
- buf[1] = mt312_div(state->xtal, 22000 * 4);
+ buf[1] = DIV_ROUND_CLOSEST(state->xtal, 22000 * 4);
ret = mt312_write(state, SYS_CLK, buf, sizeof(buf));
if (ret < 0)
@@ -610,7 +605,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
}
/* sr = (u16)(sr * 256.0 / 1000000.0) */
- sr = mt312_div(p->symbol_rate * 4, 15625);
+ sr = DIV_ROUND_CLOSEST(p->symbol_rate * 4, 15625);
/* SYM_RATE */
buf[0] = (sr >> 8) & 0x3f;
diff --git a/drivers/media/dvb-frontends/si2168.h b/drivers/media/dvb-frontends/si2168.h
index 50dccb394efa..ecd21adf8950 100644
--- a/drivers/media/dvb-frontends/si2168.h
+++ b/drivers/media/dvb-frontends/si2168.h
@@ -9,38 +9,43 @@
#define SI2168_H
#include <linux/dvb/frontend.h>
-/*
- * I2C address
- * 0x64
+/**
+ * struct si2168_config - configuration parameters for si2168
+ *
+ * @fe:
+ * frontend returned by driver
+ * @i2c_adapter:
+ * tuner I2C adapter returned by driver
+ * @ts_mode:
+ * Transport Stream mode. Can be:
+ * - %SI2168_TS_PARALLEL
+ * - %SI2168_TS_SERIAL
+ * - %SI2168_TS_TRISTATE
+ * - %SI2168_TS_CLK_MANUAL
+ * @ts_clock_inv:
+ * TS clock inverted
+ * @ts_clock_gapped:
+ * TS clock gapped
+ * @spectral_inversion:
+ * Inverted spectrum
+ *
+ * Note:
+ * The I2C address of this demod is 0x64.
*/
struct si2168_config {
- /*
- * frontend
- * returned by driver
- */
struct dvb_frontend **fe;
-
- /*
- * tuner I2C adapter
- * returned by driver
- */
struct i2c_adapter **i2c_adapter;
- /* TS mode */
#define SI2168_TS_PARALLEL 0x06
#define SI2168_TS_SERIAL 0x03
#define SI2168_TS_TRISTATE 0x00
#define SI2168_TS_CLK_MANUAL 0x20
u8 ts_mode;
- /* TS clock inverted */
- bool ts_clock_inv;
-
- /* TS clock gapped */
- bool ts_clock_gapped;
-
- /* Inverted spectrum */
- bool spectral_inversion;
+ /* Flags */
+ unsigned int ts_clock_inv:1;
+ unsigned int ts_clock_gapped:1;
+ unsigned int spectral_inversion:1;
};
#endif
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 804d5b30c697..18bea5222082 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -34,12 +34,12 @@ struct si2168_dev {
unsigned int chip_id;
unsigned int version;
const char *firmware_name;
- bool active;
- bool warm;
u8 ts_mode;
- bool ts_clock_inv;
- bool ts_clock_gapped;
- bool spectral_inversion;
+ unsigned int active:1;
+ unsigned int warm:1;
+ unsigned int ts_clock_inv:1;
+ unsigned int ts_clock_gapped:1;
+ unsigned int spectral_inversion:1;
};
/* firmware command struct */
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 849d63dbc279..e83836b29715 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -685,10 +685,33 @@ tc90522_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
p += new_msgs[j].len;
}
- if (i < num)
+ if (i < num) {
ret = -ENOMEM;
- else
+ } else if (!state->cfg.split_tuner_read_i2c || rd_num == 0) {
ret = i2c_transfer(state->i2c_client->adapter, new_msgs, j);
+ } else {
+ /*
+ * Split transactions at each I2C_M_RD message.
+ * Some of the parent device require this,
+ * such as Friio (see. dvb-usb-gl861).
+ */
+ int from, to;
+
+ ret = 0;
+ from = 0;
+ do {
+ int r;
+
+ to = from + 1;
+ while (to < j && !(new_msgs[to].flags & I2C_M_RD))
+ to++;
+ r = i2c_transfer(state->i2c_client->adapter,
+ &new_msgs[from], to - from);
+ ret = (r <= 0) ? r : ret + r;
+ from = to;
+ } while (from < j && ret > 0);
+ }
+
if (ret >= 0 && ret < j)
ret = -EIO;
kfree(new_msgs);
diff --git a/drivers/media/dvb-frontends/tc90522.h b/drivers/media/dvb-frontends/tc90522.h
index ac0e2ab51924..07e3813bf590 100644
--- a/drivers/media/dvb-frontends/tc90522.h
+++ b/drivers/media/dvb-frontends/tc90522.h
@@ -28,6 +28,9 @@ struct tc90522_config {
/* [OUT] tuner I2C adapter returned by driver */
struct i2c_adapter *tuner_i2c;
+
+ /* [IN] use two separate I2C transactions for one tuner read */
+ bool split_tuner_read_i2c;
};
#endif /* TC90522_H */
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 7eee1812bba3..c68e002d26ea 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -566,10 +566,23 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+if MEDIA_CAMERA_SUPPORT
+
+config VIDEO_HI556
+ tristate "Hynix Hi-556 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Hynix
+ Hi-556 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hi556.
+
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB && I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
@@ -581,7 +594,6 @@ config VIDEO_IMX214
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX258 camera.
@@ -592,16 +604,25 @@ config VIDEO_IMX258
config VIDEO_IMX274
tristate "Sony IMX274 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a V4L2 sensor driver for the Sony IMX274
CMOS image sensor.
+config VIDEO_IMX290
+ tristate "Sony IMX290 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX290 camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx290.
+
config VIDEO_IMX319
tristate "Sony IMX319 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX319 camera.
@@ -612,7 +633,6 @@ config VIDEO_IMX319
config VIDEO_IMX355
tristate "Sony IMX355 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX355 camera.
@@ -623,7 +643,6 @@ config VIDEO_IMX355
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV2640 camera.
@@ -633,8 +652,7 @@ config VIDEO_OV2640
config VIDEO_OV2659
tristate "OmniVision OV2659 sensor support"
- depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
+ depends on VIDEO_V4L2 && I2C && GPIOLIB
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -646,7 +664,6 @@ config VIDEO_OV2659
config VIDEO_OV2680
tristate "OmniVision OV2680 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -658,7 +675,6 @@ config VIDEO_OV2680
config VIDEO_OV2685
tristate "OmniVision OV2685 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -671,7 +687,6 @@ config VIDEO_OV5640
tristate "OmniVision OV5640 sensor support"
depends on OF
depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Omnivision
@@ -681,7 +696,6 @@ config VIDEO_OV5645
tristate "OmniVision OV5645 sensor support"
depends on OF
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -693,7 +707,6 @@ config VIDEO_OV5645
config VIDEO_OV5647
tristate "OmniVision OV5647 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -705,7 +718,6 @@ config VIDEO_OV5647
config VIDEO_OV6650
tristate "OmniVision OV6650 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV6650 camera.
@@ -716,7 +728,6 @@ config VIDEO_OV6650
config VIDEO_OV5670
tristate "OmniVision OV5670 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -729,7 +740,6 @@ config VIDEO_OV5670
config VIDEO_OV5675
tristate "OmniVision OV5675 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -742,7 +752,7 @@ config VIDEO_OV5675
config VIDEO_OV5695
tristate "OmniVision OV5695 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5695 camera.
@@ -753,7 +763,6 @@ config VIDEO_OV5695
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -765,7 +774,6 @@ config VIDEO_OV7251
config VIDEO_OV772X
tristate "OmniVision OV772x sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_SCCB
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -777,7 +785,6 @@ config VIDEO_OV772X
config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7640 camera.
@@ -788,7 +795,6 @@ config VIDEO_OV7640
config VIDEO_OV7670
tristate "OmniVision OV7670 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -798,7 +804,6 @@ config VIDEO_OV7670
config VIDEO_OV7740
tristate "OmniVision OV7740 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
@@ -806,7 +811,6 @@ config VIDEO_OV7740
config VIDEO_OV8856
tristate "OmniVision OV8856 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -833,7 +837,6 @@ config VIDEO_OV9650
config VIDEO_OV13858
tristate "OmniVision OV13858 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -842,7 +845,6 @@ config VIDEO_OV13858
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the ST VS6624
camera.
@@ -853,7 +855,6 @@ config VIDEO_VS6624
config VIDEO_MT9M001
tristate "mt9m001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports MT9M001 cameras from Micron, monochrome
and colour models.
@@ -861,7 +862,6 @@ config VIDEO_MT9M001
config VIDEO_MT9M032
tristate "MT9M032 camera sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
help
This driver supports MT9M032 camera sensors from Aptina, monochrome
@@ -878,7 +878,6 @@ config VIDEO_MT9M111
config VIDEO_MT9P031
tristate "Aptina MT9P031 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
help
This is a Video4Linux2 sensor driver for the Aptina
@@ -887,7 +886,6 @@ config VIDEO_MT9P031
config VIDEO_MT9T001
tristate "Aptina MT9T001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt0t001 3 Mpixel camera.
@@ -895,7 +893,6 @@ config VIDEO_MT9T001
config VIDEO_MT9T112
tristate "Aptina MT9T111/MT9T112 support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) MT9T111 and MT9T112 3 Mpixel camera.
@@ -906,7 +903,6 @@ config VIDEO_MT9T112
config VIDEO_MT9V011
tristate "Micron mt9v011 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Micron
mt0v011 1.3 Mpixel camera. It currently only works with the
@@ -915,7 +911,6 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
select V4L2_FWNODE
help
@@ -925,7 +920,6 @@ config VIDEO_MT9V032
config VIDEO_MT9V111
tristate "Aptina MT9V111 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina/Micron
MT9V111 sensor.
@@ -936,14 +930,12 @@ config VIDEO_MT9V111
config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports SR030PC30 VGA camera from Siliconfile
config VIDEO_NOON010PC30
tristate "Siliconfile NOON010PC30 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports NOON010PC30 CIF camera from Siliconfile
@@ -952,7 +944,6 @@ source "drivers/media/i2c/m5mols/Kconfig"
config VIDEO_RJ54N1
tristate "Sharp RJ54N1CB0C sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a V4L2 sensor driver for Sharp RJ54N1CB0C CMOS image
sensor.
@@ -962,7 +953,6 @@ config VIDEO_RJ54N1
config VIDEO_S5K6AA
tristate "Samsung S5K6AAFX sensor support"
- depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
@@ -970,7 +960,6 @@ config VIDEO_S5K6AA
config VIDEO_S5K6A3
tristate "Samsung S5K6A3 sensor support"
- depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6A3 raw
@@ -1002,12 +991,15 @@ config VIDEO_S5C73M3
help
This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
+endif
comment "Lens drivers"
+if MEDIA_CAMERA_SUPPORT
+
config VIDEO_AD5820
tristate "AD5820 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on GPIOLIB && I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
help
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
@@ -1042,12 +1034,15 @@ config VIDEO_DW9807_VCM
capability. This is designed for linear control of
voice coil motors, controlled via I2C serial interface.
+endif
+
comment "Flash devices"
+if MEDIA_CAMERA_SUPPORT
+
config VIDEO_ADP1653
tristate "ADP1653 flash support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
help
This is a driver for the ADP1653 flash controller. It is used for
example in Nokia N900.
@@ -1055,7 +1050,6 @@ config VIDEO_ADP1653
config VIDEO_LM3560
tristate "LM3560 dual flash driver support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a driver for the lm3560 dual flash controllers. It controls
@@ -1064,12 +1058,13 @@ config VIDEO_LM3560
config VIDEO_LM3646
tristate "LM3646 dual flash driver support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a driver for the lm3646 dual flash controllers. It controls
flash, torch LEDs.
+endif
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -1113,6 +1108,7 @@ comment "SDR tuner chips"
config SDR_MAX2175
tristate "Maxim 2175 RF to Bits tuner"
depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
+ select REGMAP_I2C
help
Support for Maxim 2175 tuner. It is an advanced analog/digital
radio receiver with RF-to-Bits front-end designed for SDR solutions.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index beb170b002dc..c147bb9d28db 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -109,9 +109,11 @@ obj-$(CONFIG_VIDEO_I2C) += video-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_HI556) += hi556.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX258) += imx258.o
obj-$(CONFIG_VIDEO_IMX274) += imx274.o
+obj-$(CONFIG_VIDEO_IMX290) += imx290.o
obj-$(CONFIG_VIDEO_IMX319) += imx319.o
obj-$(CONFIG_VIDEO_IMX355) += imx355.o
obj-$(CONFIG_VIDEO_ST_MIPID02) += st-mipid02.o
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 925c171e7797..19c74db0649f 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -19,13 +19,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
-#define AD5820_NAME "ad5820"
-
/* Register definitions */
#define AD5820_POWER_DOWN (1 << 15)
#define AD5820_DAC_SHIFT 4
@@ -47,6 +46,8 @@ struct ad5820_device {
u32 focus_ramp_time;
u32 focus_ramp_mode;
+ struct gpio_desc *enable_gpio;
+
struct mutex power_lock;
int power_count;
@@ -114,6 +115,8 @@ static int ad5820_power_off(struct ad5820_device *coil, bool standby)
ret = ad5820_update_hw(coil);
}
+ gpiod_set_value_cansleep(coil->enable_gpio, 0);
+
ret2 = regulator_disable(coil->vana);
if (ret)
return ret;
@@ -128,6 +131,8 @@ static int ad5820_power_on(struct ad5820_device *coil, bool restore)
if (ret < 0)
return ret;
+ gpiod_set_value_cansleep(coil->enable_gpio, 1);
+
if (restore) {
/* Restore the hardware settings. */
coil->standby = false;
@@ -138,6 +143,7 @@ static int ad5820_power_on(struct ad5820_device *coil, bool restore)
return 0;
fail:
+ gpiod_set_value_cansleep(coil->enable_gpio, 0);
coil->standby = true;
regulator_disable(coil->vana);
@@ -304,11 +310,21 @@ static int ad5820_probe(struct i2c_client *client,
return ret;
}
+ coil->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(coil->enable_gpio)) {
+ ret = PTR_ERR(coil->enable_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev, "could not get enable gpio\n");
+ return ret;
+ }
+
mutex_init(&coil->power_lock);
v4l2_i2c_subdev_init(&coil->subdev, client, &ad5820_ops);
coil->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
coil->subdev.internal_ops = &ad5820_internal_ops;
+ coil->subdev.entity.function = MEDIA_ENT_F_LENS;
strscpy(coil->subdev.name, "ad5820 focus", sizeof(coil->subdev.name));
ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
@@ -341,17 +357,28 @@ static int ad5820_remove(struct i2c_client *client)
}
static const struct i2c_device_id ad5820_id_table[] = {
- { AD5820_NAME, 0 },
+ { "ad5820", 0 },
+ { "ad5821", 0 },
+ { "ad5823", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+static const struct of_device_id ad5820_of_table[] = {
+ { .compatible = "adi,ad5820" },
+ { .compatible = "adi,ad5821" },
+ { .compatible = "adi,ad5823" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad5820_of_table);
+
static SIMPLE_DEV_PM_OPS(ad5820_pm, ad5820_suspend, ad5820_resume);
static struct i2c_driver ad5820_i2c_driver = {
.driver = {
- .name = AD5820_NAME,
+ .name = "ad5820",
.pm = &ad5820_pm,
+ .of_match_table = ad5820_of_table,
},
.probe = ad5820_probe,
.remove = ad5820_remove,
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index e780969cc2f2..6528e2343fc8 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1309,9 +1309,6 @@ static int adv7180_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr, client->adapter->name);
-
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
@@ -1382,6 +1379,9 @@ static int adv7180_probe(struct i2c_client *client,
if (ret)
goto err_free_irq;
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr, client->adapter->name);
+
return 0;
err_free_irq:
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 885619841719..0855f648416d 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2547,7 +2547,7 @@ struct adv7842_cfg_read_infoframe {
u8 payload_addr;
};
-static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infoframe *cri)
+static void log_infoframe(struct v4l2_subdev *sd, const struct adv7842_cfg_read_infoframe *cri)
{
int i;
u8 buffer[32];
@@ -2585,7 +2585,7 @@ static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infofr
static void adv7842_log_infoframes(struct v4l2_subdev *sd)
{
int i;
- struct adv7842_cfg_read_infoframe cri[] = {
+ static const struct adv7842_cfg_read_infoframe cri[] = {
{ "AVI", 0x01, 0xe0, 0x00 },
{ "Audio", 0x02, 0xe3, 0x1c },
{ "SDP", 0x04, 0xe6, 0x2a },
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 43336175c7d9..73bc50c919d7 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -157,7 +157,7 @@ static int bt819_init(struct v4l2_subdev *sd)
0x12, 0x04, /* 0x12 Output Format */
0x13, 0x20, /* 0x13 Vertical Scaling msb 0x00
chroma comb OFF, line drop scaling, interlace scaling
- BUG? Why does turning the chroma comb on fuck up color?
+ BUG? Why does turning the chroma comb on screw up color?
Bug in the bt819 stepping on my board?
*/
0x14, 0x00, /* 0x14 Vertical Scaling lsb */
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
new file mode 100644
index 000000000000..c66cd1446c0f
--- /dev/null
+++ b/drivers/media/i2c/hi556.c
@@ -0,0 +1,1200 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define HI556_REG_VALUE_08BIT 1
+#define HI556_REG_VALUE_16BIT 2
+#define HI556_REG_VALUE_24BIT 3
+
+#define HI556_LINK_FREQ_437MHZ 437000000ULL
+#define HI556_MCLK 19200000
+#define HI556_DATA_LANES 2
+#define HI556_RGB_DEPTH 10
+
+#define HI556_REG_CHIP_ID 0x0f16
+#define HI556_CHIP_ID 0x0556
+
+#define HI556_REG_MODE_SELECT 0x0a00
+#define HI556_MODE_STANDBY 0x0000
+#define HI556_MODE_STREAMING 0x0100
+
+/* vertical-timings from sensor */
+#define HI556_REG_FLL 0x0006
+#define HI556_FLL_30FPS 0x0814
+#define HI556_FLL_30FPS_MIN 0x0814
+#define HI556_FLL_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define HI556_REG_LLP 0x0008
+
+/* Exposure controls from sensor */
+#define HI556_REG_EXPOSURE 0x0074
+#define HI556_EXPOSURE_MIN 6
+#define HI556_EXPOSURE_MAX_MARGIN 2
+#define HI556_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define HI556_REG_ANALOG_GAIN 0x0077
+#define HI556_ANAL_GAIN_MIN 0
+#define HI556_ANAL_GAIN_MAX 240
+#define HI556_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define HI556_REG_MWB_GR_GAIN 0x0078
+#define HI556_REG_MWB_GB_GAIN 0x007a
+#define HI556_REG_MWB_R_GAIN 0x007c
+#define HI556_REG_MWB_B_GAIN 0x007e
+#define HI556_DGTL_GAIN_MIN 0
+#define HI556_DGTL_GAIN_MAX 2048
+#define HI556_DGTL_GAIN_STEP 1
+#define HI556_DGTL_GAIN_DEFAULT 256
+
+/* Test Pattern Control */
+#define HI556_REG_ISP 0X0a05
+#define HI556_REG_ISP_TPG_EN 0x01
+#define HI556_REG_TEST_PATTERN 0x0201
+
+enum {
+ HI556_LINK_FREQ_437MHZ_INDEX,
+};
+
+struct hi556_reg {
+ u16 address;
+ u16 val;
+};
+
+struct hi556_reg_list {
+ u32 num_of_regs;
+ const struct hi556_reg *regs;
+};
+
+struct hi556_link_freq_config {
+ const struct hi556_reg_list reg_list;
+};
+
+struct hi556_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 llp;
+
+ /* Default vertical timining size */
+ u32 fll_def;
+
+ /* Min vertical timining size */
+ u32 fll_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct hi556_reg_list reg_list;
+};
+
+#define to_hi556(_sd) container_of(_sd, struct hi556, sd)
+
+//SENSOR_INITIALIZATION
+static const struct hi556_reg mipi_data_rate_874mbps[] = {
+ {0x0e00, 0x0102},
+ {0x0e02, 0x0102},
+ {0x0e0c, 0x0100},
+ {0x2000, 0x7400},
+ {0x2002, 0x001c},
+ {0x2004, 0x0242},
+ {0x2006, 0x0942},
+ {0x2008, 0x7007},
+ {0x200a, 0x0fd9},
+ {0x200c, 0x0259},
+ {0x200e, 0x7008},
+ {0x2010, 0x160e},
+ {0x2012, 0x0047},
+ {0x2014, 0x2118},
+ {0x2016, 0x0041},
+ {0x2018, 0x00d8},
+ {0x201a, 0x0145},
+ {0x201c, 0x0006},
+ {0x201e, 0x0181},
+ {0x2020, 0x13cc},
+ {0x2022, 0x2057},
+ {0x2024, 0x7001},
+ {0x2026, 0x0fca},
+ {0x2028, 0x00cb},
+ {0x202a, 0x009f},
+ {0x202c, 0x7002},
+ {0x202e, 0x13cc},
+ {0x2030, 0x019b},
+ {0x2032, 0x014d},
+ {0x2034, 0x2987},
+ {0x2036, 0x2766},
+ {0x2038, 0x0020},
+ {0x203a, 0x2060},
+ {0x203c, 0x0e5d},
+ {0x203e, 0x181d},
+ {0x2040, 0x2066},
+ {0x2042, 0x20c4},
+ {0x2044, 0x5000},
+ {0x2046, 0x0005},
+ {0x2048, 0x0000},
+ {0x204a, 0x01db},
+ {0x204c, 0x025a},
+ {0x204e, 0x00c0},
+ {0x2050, 0x0005},
+ {0x2052, 0x0006},
+ {0x2054, 0x0ad9},
+ {0x2056, 0x0259},
+ {0x2058, 0x0618},
+ {0x205a, 0x0258},
+ {0x205c, 0x2266},
+ {0x205e, 0x20c8},
+ {0x2060, 0x2060},
+ {0x2062, 0x707b},
+ {0x2064, 0x0fdd},
+ {0x2066, 0x81b8},
+ {0x2068, 0x5040},
+ {0x206a, 0x0020},
+ {0x206c, 0x5060},
+ {0x206e, 0x3143},
+ {0x2070, 0x5081},
+ {0x2072, 0x025c},
+ {0x2074, 0x7800},
+ {0x2076, 0x7400},
+ {0x2078, 0x001c},
+ {0x207a, 0x0242},
+ {0x207c, 0x0942},
+ {0x207e, 0x0bd9},
+ {0x2080, 0x0259},
+ {0x2082, 0x7008},
+ {0x2084, 0x160e},
+ {0x2086, 0x0047},
+ {0x2088, 0x2118},
+ {0x208a, 0x0041},
+ {0x208c, 0x00d8},
+ {0x208e, 0x0145},
+ {0x2090, 0x0006},
+ {0x2092, 0x0181},
+ {0x2094, 0x13cc},
+ {0x2096, 0x2057},
+ {0x2098, 0x7001},
+ {0x209a, 0x0fca},
+ {0x209c, 0x00cb},
+ {0x209e, 0x009f},
+ {0x20a0, 0x7002},
+ {0x20a2, 0x13cc},
+ {0x20a4, 0x019b},
+ {0x20a6, 0x014d},
+ {0x20a8, 0x2987},
+ {0x20aa, 0x2766},
+ {0x20ac, 0x0020},
+ {0x20ae, 0x2060},
+ {0x20b0, 0x0e5d},
+ {0x20b2, 0x181d},
+ {0x20b4, 0x2066},
+ {0x20b6, 0x20c4},
+ {0x20b8, 0x50a0},
+ {0x20ba, 0x0005},
+ {0x20bc, 0x0000},
+ {0x20be, 0x01db},
+ {0x20c0, 0x025a},
+ {0x20c2, 0x00c0},
+ {0x20c4, 0x0005},
+ {0x20c6, 0x0006},
+ {0x20c8, 0x0ad9},
+ {0x20ca, 0x0259},
+ {0x20cc, 0x0618},
+ {0x20ce, 0x0258},
+ {0x20d0, 0x2266},
+ {0x20d2, 0x20c8},
+ {0x20d4, 0x2060},
+ {0x20d6, 0x707b},
+ {0x20d8, 0x0fdd},
+ {0x20da, 0x86b8},
+ {0x20dc, 0x50e0},
+ {0x20de, 0x0020},
+ {0x20e0, 0x5100},
+ {0x20e2, 0x3143},
+ {0x20e4, 0x5121},
+ {0x20e6, 0x7800},
+ {0x20e8, 0x3140},
+ {0x20ea, 0x01c4},
+ {0x20ec, 0x01c1},
+ {0x20ee, 0x01c0},
+ {0x20f0, 0x01c4},
+ {0x20f2, 0x2700},
+ {0x20f4, 0x3d40},
+ {0x20f6, 0x7800},
+ {0x20f8, 0xffff},
+ {0x27fe, 0xe000},
+ {0x3000, 0x60f8},
+ {0x3002, 0x187f},
+ {0x3004, 0x7060},
+ {0x3006, 0x0114},
+ {0x3008, 0x60b0},
+ {0x300a, 0x1473},
+ {0x300c, 0x0013},
+ {0x300e, 0x140f},
+ {0x3010, 0x0040},
+ {0x3012, 0x100f},
+ {0x3014, 0x60f8},
+ {0x3016, 0x187f},
+ {0x3018, 0x7060},
+ {0x301a, 0x0114},
+ {0x301c, 0x60b0},
+ {0x301e, 0x1473},
+ {0x3020, 0x0013},
+ {0x3022, 0x140f},
+ {0x3024, 0x0040},
+ {0x3026, 0x000f},
+
+ {0x0b00, 0x0000},
+ {0x0b02, 0x0045},
+ {0x0b04, 0xb405},
+ {0x0b06, 0xc403},
+ {0x0b08, 0x0081},
+ {0x0b0a, 0x8252},
+ {0x0b0c, 0xf814},
+ {0x0b0e, 0xc618},
+ {0x0b10, 0xa828},
+ {0x0b12, 0x004c},
+ {0x0b14, 0x4068},
+ {0x0b16, 0x0000},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7067},
+ {0x0954, 0x0009},
+ {0x0956, 0x0000},
+ {0x0958, 0xbb80},
+ {0x095a, 0x5140},
+ {0x0c00, 0x1110},
+ {0x0c02, 0x0011},
+ {0x0c04, 0x0000},
+ {0x0c06, 0x0200},
+ {0x0c10, 0x0040},
+ {0x0c12, 0x0040},
+ {0x0c14, 0x0040},
+ {0x0c16, 0x0040},
+ {0x0a10, 0x4000},
+ {0x3068, 0xf800},
+ {0x306a, 0xf876},
+ {0x006c, 0x0000},
+ {0x005e, 0x0200},
+ {0x000e, 0x0100},
+ {0x0e0a, 0x0001},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0202},
+ {0x0012, 0x000e},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0028},
+ {0x002a, 0x002d},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x1111},
+ {0x0030, 0x1111},
+ {0x0032, 0x1111},
+ {0x0006, 0x07bc},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0a20},
+ {0x0a14, 0x0798},
+ {0x003e, 0x0000},
+ {0x0074, 0x080e},
+ {0x0070, 0x0407},
+ {0x0002, 0x0000},
+ {0x0a02, 0x0100},
+ {0x0a24, 0x0100},
+ {0x0046, 0x0000},
+ {0x0076, 0x0000},
+ {0x0060, 0x0000},
+ {0x0062, 0x0530},
+ {0x0064, 0x0500},
+ {0x0066, 0x0530},
+ {0x0068, 0x0500},
+ {0x0122, 0x0300},
+ {0x015a, 0xff08},
+ {0x0804, 0x0300},
+ {0x0806, 0x0100},
+ {0x005c, 0x0102},
+ {0x0a1a, 0x0800},
+};
+
+static const struct hi556_reg mode_2592x1944_regs[] = {
+ {0x0a00, 0x0000},
+ {0x0b0a, 0x8252},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7067},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0202},
+ {0x0012, 0x000e},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0028},
+ {0x002a, 0x002d},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x1111},
+ {0x0030, 0x1111},
+ {0x0032, 0x1111},
+ {0x0006, 0x0814},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0a20},
+ {0x0a14, 0x0798},
+ {0x003e, 0x0000},
+ {0x0074, 0x0812},
+ {0x0070, 0x0409},
+ {0x0804, 0x0300},
+ {0x0806, 0x0100},
+ {0x0a04, 0x014a},
+ {0x090c, 0x0fdc},
+ {0x090e, 0x002d},
+
+ {0x0902, 0x4319},
+ {0x0914, 0xc10a},
+ {0x0916, 0x071f},
+ {0x0918, 0x0408},
+ {0x091a, 0x0c0d},
+ {0x091c, 0x0f09},
+ {0x091e, 0x0a00},
+ {0x0958, 0xbb80},
+};
+
+static const struct hi556_reg mode_1296x972_regs[] = {
+ {0x0a00, 0x0000},
+ {0x0b0a, 0x8259},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7167},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0122},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0404},
+ {0x0012, 0x000c},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0022},
+ {0x002a, 0x002b},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x3311},
+ {0x0030, 0x3311},
+ {0x0032, 0x3311},
+ {0x0006, 0x0814},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0510},
+ {0x0a14, 0x03cc},
+ {0x003e, 0x0000},
+ {0x0074, 0x0812},
+ {0x0070, 0x0409},
+ {0x0804, 0x0308},
+ {0x0806, 0x0100},
+ {0x0a04, 0x016a},
+ {0x090e, 0x0010},
+ {0x090c, 0x09c0},
+
+ {0x0902, 0x4319},
+ {0x0914, 0xc106},
+ {0x0916, 0x040e},
+ {0x0918, 0x0304},
+ {0x091a, 0x0708},
+ {0x091c, 0x0e06},
+ {0x091e, 0x0300},
+ {0x0958, 0xbb80},
+};
+
+static const char * const hi556_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Colour",
+ "100% Colour Bars",
+ "Fade To Grey Colour Bars",
+ "PN9",
+ "Gradient Horizontal",
+ "Gradient Vertical",
+ "Check Board",
+ "Slant Pattern",
+};
+
+static const s64 link_freq_menu_items[] = {
+ HI556_LINK_FREQ_437MHZ,
+};
+
+static const struct hi556_link_freq_config link_freq_configs[] = {
+ [HI556_LINK_FREQ_437MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_874mbps),
+ .regs = mipi_data_rate_874mbps,
+ }
+ }
+};
+
+static const struct hi556_mode supported_modes[] = {
+ {
+ .width = 2592,
+ .height = 1944,
+ .fll_def = HI556_FLL_30FPS,
+ .fll_min = HI556_FLL_30FPS_MIN,
+ .llp = 0x0b00,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2592x1944_regs),
+ .regs = mode_2592x1944_regs,
+ },
+ .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+ },
+ {
+ .width = 1296,
+ .height = 972,
+ .fll_def = HI556_FLL_30FPS,
+ .fll_min = HI556_FLL_30FPS_MIN,
+ .llp = 0x0b00,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1296x972_regs),
+ .regs = mode_1296x972_regs,
+ },
+ .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+ }
+};
+
+struct hi556 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct hi556_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HI556_DATA_LANES;
+
+ do_div(pixel_rate, HI556_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static int hi556_read_reg(struct hi556 *hi556, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int hi556_write_reg(struct hi556 *hi556, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int hi556_write_reg_list(struct hi556 *hi556,
+ const struct hi556_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = hi556_write_reg(hi556, r_list->regs[i].address,
+ HI556_REG_VALUE_16BIT,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x. error = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hi556_update_digital_gain(struct hi556 *hi556, u32 d_gain)
+{
+ int ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_GR_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_GB_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_R_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ return hi556_write_reg(hi556, HI556_REG_MWB_B_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+}
+
+static int hi556_test_pattern(struct hi556 *hi556, u32 pattern)
+{
+ int ret;
+ u32 val;
+
+ if (pattern) {
+ ret = hi556_read_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT,
+ val | HI556_REG_ISP_TPG_EN);
+ if (ret)
+ return ret;
+ }
+
+ return hi556_write_reg(hi556, HI556_REG_TEST_PATTERN,
+ HI556_REG_VALUE_08BIT, pattern);
+}
+
+static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hi556 *hi556 = container_of(ctrl->handler,
+ struct hi556, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = hi556->cur_mode->height + ctrl->val -
+ HI556_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(hi556->exposure,
+ hi556->exposure->minimum,
+ exposure_max, hi556->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = hi556_write_reg(hi556, HI556_REG_ANALOG_GAIN,
+ HI556_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = hi556_update_digital_gain(hi556, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = hi556_write_reg(hi556, HI556_REG_EXPOSURE,
+ HI556_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ ret = hi556_write_reg(hi556, HI556_REG_FLL,
+ HI556_REG_VALUE_16BIT,
+ hi556->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = hi556_test_pattern(hi556, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hi556_ctrl_ops = {
+ .s_ctrl = hi556_set_ctrl,
+};
+
+static int hi556_init_controls(struct hi556 *hi556)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+
+ ctrl_hdlr = &hi556->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &hi556->mutex;
+ hi556->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+ if (hi556->link_freq)
+ hi556->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hi556->pixel_rate = v4l2_ctrl_new_std
+ (ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX),
+ 1,
+ to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX));
+ hi556->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_VBLANK,
+ hi556->cur_mode->fll_min -
+ hi556->cur_mode->height,
+ HI556_FLL_MAX -
+ hi556->cur_mode->height, 1,
+ hi556->cur_mode->fll_def -
+ hi556->cur_mode->height);
+
+ h_blank = hi556->cur_mode->llp - hi556->cur_mode->width;
+
+ hi556->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (hi556->hblank)
+ hi556->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ HI556_ANAL_GAIN_MIN, HI556_ANAL_GAIN_MAX,
+ HI556_ANAL_GAIN_STEP, HI556_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ HI556_DGTL_GAIN_MIN, HI556_DGTL_GAIN_MAX,
+ HI556_DGTL_GAIN_STEP, HI556_DGTL_GAIN_DEFAULT);
+ exposure_max = hi556->cur_mode->fll_def - HI556_EXPOSURE_MAX_MARGIN;
+ hi556->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ HI556_EXPOSURE_MIN, exposure_max,
+ HI556_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hi556_test_pattern_menu) - 1,
+ 0, 0, hi556_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ hi556->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void hi556_assign_pad_format(const struct hi556_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int hi556_start_streaming(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ const struct hi556_reg_list *reg_list;
+ int link_freq_index, ret;
+
+ link_freq_index = hi556->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = hi556_write_reg_list(hi556, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &hi556->cur_mode->reg_list;
+ ret = hi556_write_reg_list(hi556, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(hi556->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
+ HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING);
+
+ if (ret) {
+ dev_err(&client->dev, "failed to set stream");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hi556_stop_streaming(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+
+ if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
+ HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY))
+ dev_err(&client->dev, "failed to set stream");
+}
+
+static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (hi556->streaming == enable)
+ return 0;
+
+ mutex_lock(&hi556->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&hi556->mutex);
+ return ret;
+ }
+
+ ret = hi556_start_streaming(hi556);
+ if (ret) {
+ enable = 0;
+ hi556_stop_streaming(hi556);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ hi556_stop_streaming(hi556);
+ pm_runtime_put(&client->dev);
+ }
+
+ hi556->streaming = enable;
+ mutex_unlock(&hi556->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused hi556_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ if (hi556->streaming)
+ hi556_stop_streaming(hi556);
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused hi556_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+ int ret;
+
+ mutex_lock(&hi556->mutex);
+ if (hi556->streaming) {
+ ret = hi556_start_streaming(hi556);
+ if (ret)
+ goto error;
+ }
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+
+error:
+ hi556_stop_streaming(hi556);
+ hi556->streaming = 0;
+ mutex_unlock(&hi556->mutex);
+ return ret;
+}
+
+static int hi556_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+ const struct hi556_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&hi556->mutex);
+ hi556_assign_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ } else {
+ hi556->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(hi556->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(hi556->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->fll_def - mode->height;
+ __v4l2_ctrl_modify_range(hi556->vblank,
+ mode->fll_min - mode->height,
+ HI556_FLL_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(hi556->vblank, vblank_def);
+
+ h_blank = hi556->cur_mode->llp - hi556->cur_mode->width;
+
+ __v4l2_ctrl_modify_range(hi556->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&hi556->sd, cfg,
+ fmt->pad);
+ else
+ hi556_assign_pad_format(hi556->cur_mode, &fmt->format);
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int hi556_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int hi556_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ hi556_assign_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops hi556_video_ops = {
+ .s_stream = hi556_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops hi556_pad_ops = {
+ .set_fmt = hi556_set_format,
+ .get_fmt = hi556_get_format,
+ .enum_mbus_code = hi556_enum_mbus_code,
+ .enum_frame_size = hi556_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops hi556_subdev_ops = {
+ .video = &hi556_video_ops,
+ .pad = &hi556_pad_ops,
+};
+
+static const struct media_entity_operations hi556_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops hi556_internal_ops = {
+ .open = hi556_open,
+};
+
+static int hi556_identify_module(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ int ret;
+ u32 val;
+
+ ret = hi556_read_reg(hi556, HI556_REG_CHIP_ID,
+ HI556_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != HI556_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ HI556_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int hi556_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 mclk;
+ int ret = 0;
+ unsigned int i, j;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (mclk != HI556_MCLK) {
+ dev_err(dev, "external clock %d is not supported", mclk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int hi556_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_probe(struct i2c_client *client)
+{
+ struct hi556 *hi556;
+ int ret;
+
+ ret = hi556_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ hi556 = devm_kzalloc(&client->dev, sizeof(*hi556), GFP_KERNEL);
+ if (!hi556)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
+ ret = hi556_identify_module(hi556);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&hi556->mutex);
+ hi556->cur_mode = &supported_modes[0];
+ ret = hi556_init_controls(hi556);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ hi556->sd.internal_ops = &hi556_internal_ops;
+ hi556->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ hi556->sd.entity.ops = &hi556_subdev_entity_ops;
+ hi556->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ hi556->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&hi556->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&hi556->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(hi556->sd.ctrl_handler);
+ mutex_destroy(&hi556->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops hi556_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hi556_suspend, hi556_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hi556_acpi_ids[] = {
+ {"INT3537"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, hi556_acpi_ids);
+#endif
+
+static struct i2c_driver hi556_i2c_driver = {
+ .driver = {
+ .name = "hi556",
+ .pm = &hi556_pm_ops,
+ .acpi_match_table = ACPI_PTR(hi556_acpi_ids),
+ },
+ .probe_new = hi556_probe,
+ .remove = hi556_remove,
+};
+
+module_i2c_driver(hi556_i2c_driver);
+
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_DESCRIPTION("Hynix HI556 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 159a3a604f0e..adcaaa8c86d1 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -47,6 +47,7 @@ struct imx214 {
struct v4l2_ctrl *pixel_rate;
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *unit_size;
struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES];
@@ -948,6 +949,10 @@ static int imx214_probe(struct i2c_client *client)
static const s64 link_freq[] = {
IMX214_DEFAULT_LINK_FREQ,
};
+ static const struct v4l2_area unit_size = {
+ .width = 1120,
+ .height = 1120,
+ };
int ret;
ret = imx214_parse_fwnode(dev);
@@ -1029,6 +1034,10 @@ static int imx214_probe(struct i2c_client *client)
V4L2_CID_EXPOSURE,
0, 3184, 1, 0x0c70);
+ imx214->unit_size = v4l2_ctrl_new_std_compound(&imx214->ctrls,
+ NULL,
+ V4L2_CID_UNIT_CELL_SIZE,
+ v4l2_ctrl_ptr_create((void *)&unit_size));
ret = imx214->ctrls.error;
if (ret) {
dev_err(&client->dev, "%s control init failed (%d)\n",
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
new file mode 100644
index 000000000000..f7678e5a5d87
--- /dev/null
+++ b/drivers/media/i2c/imx290.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sony IMX290 CMOS Image Sensor Driver
+ *
+ * Copyright (C) 2019 FRAMOS GmbH.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define IMX290_STANDBY 0x3000
+#define IMX290_REGHOLD 0x3001
+#define IMX290_XMSTA 0x3002
+#define IMX290_GAIN 0x3014
+
+#define IMX290_DEFAULT_LINK_FREQ 445500000
+
+static const char * const imx290_supply_name[] = {
+ "vdda",
+ "vddd",
+ "vdddo",
+};
+
+#define IMX290_NUM_SUPPLIES ARRAY_SIZE(imx290_supply_name)
+
+struct imx290_regval {
+ u16 reg;
+ u8 val;
+};
+
+struct imx290_mode {
+ u32 width;
+ u32 height;
+ u32 pixel_rate;
+ u32 link_freq_index;
+
+ const struct imx290_regval *data;
+ u32 data_size;
+};
+
+struct imx290 {
+ struct device *dev;
+ struct clk *xclk;
+ struct regmap *regmap;
+
+ struct v4l2_subdev sd;
+ struct v4l2_fwnode_endpoint ep;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt current_format;
+ const struct imx290_mode *current_mode;
+
+ struct regulator_bulk_data supplies[IMX290_NUM_SUPPLIES];
+ struct gpio_desc *rst_gpio;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+
+ struct mutex lock;
+};
+
+struct imx290_pixfmt {
+ u32 code;
+};
+
+static const struct imx290_pixfmt imx290_formats[] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10 },
+};
+
+static const struct regmap_config imx290_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct imx290_regval imx290_global_init_settings[] = {
+ { 0x3007, 0x00 },
+ { 0x3009, 0x00 },
+ { 0x3018, 0x65 },
+ { 0x3019, 0x04 },
+ { 0x301a, 0x00 },
+ { 0x3443, 0x03 },
+ { 0x3444, 0x20 },
+ { 0x3445, 0x25 },
+ { 0x3407, 0x03 },
+ { 0x303a, 0x0c },
+ { 0x3040, 0x00 },
+ { 0x3041, 0x00 },
+ { 0x303c, 0x00 },
+ { 0x303d, 0x00 },
+ { 0x3042, 0x9c },
+ { 0x3043, 0x07 },
+ { 0x303e, 0x49 },
+ { 0x303f, 0x04 },
+ { 0x304b, 0x0a },
+ { 0x300f, 0x00 },
+ { 0x3010, 0x21 },
+ { 0x3012, 0x64 },
+ { 0x3016, 0x09 },
+ { 0x3070, 0x02 },
+ { 0x3071, 0x11 },
+ { 0x309b, 0x10 },
+ { 0x309c, 0x22 },
+ { 0x30a2, 0x02 },
+ { 0x30a6, 0x20 },
+ { 0x30a8, 0x20 },
+ { 0x30aa, 0x20 },
+ { 0x30ac, 0x20 },
+ { 0x30b0, 0x43 },
+ { 0x3119, 0x9e },
+ { 0x311c, 0x1e },
+ { 0x311e, 0x08 },
+ { 0x3128, 0x05 },
+ { 0x313d, 0x83 },
+ { 0x3150, 0x03 },
+ { 0x317e, 0x00 },
+ { 0x32b8, 0x50 },
+ { 0x32b9, 0x10 },
+ { 0x32ba, 0x00 },
+ { 0x32bb, 0x04 },
+ { 0x32c8, 0x50 },
+ { 0x32c9, 0x10 },
+ { 0x32ca, 0x00 },
+ { 0x32cb, 0x04 },
+ { 0x332c, 0xd3 },
+ { 0x332d, 0x10 },
+ { 0x332e, 0x0d },
+ { 0x3358, 0x06 },
+ { 0x3359, 0xe1 },
+ { 0x335a, 0x11 },
+ { 0x3360, 0x1e },
+ { 0x3361, 0x61 },
+ { 0x3362, 0x10 },
+ { 0x33b0, 0x50 },
+ { 0x33b2, 0x1a },
+ { 0x33b3, 0x04 },
+};
+
+static const struct imx290_regval imx290_1080p_settings[] = {
+ /* mode settings */
+ { 0x3007, 0x00 },
+ { 0x303a, 0x0c },
+ { 0x3414, 0x0a },
+ { 0x3472, 0x80 },
+ { 0x3473, 0x07 },
+ { 0x3418, 0x38 },
+ { 0x3419, 0x04 },
+ { 0x3012, 0x64 },
+ { 0x3013, 0x00 },
+ { 0x305c, 0x18 },
+ { 0x305d, 0x03 },
+ { 0x305e, 0x20 },
+ { 0x305f, 0x01 },
+ { 0x315e, 0x1a },
+ { 0x3164, 0x1a },
+ { 0x3480, 0x49 },
+ /* data rate settings */
+ { 0x3009, 0x01 },
+ { 0x3405, 0x10 },
+ { 0x3446, 0x57 },
+ { 0x3447, 0x00 },
+ { 0x3448, 0x37 },
+ { 0x3449, 0x00 },
+ { 0x344a, 0x1f },
+ { 0x344b, 0x00 },
+ { 0x344c, 0x1f },
+ { 0x344d, 0x00 },
+ { 0x344e, 0x1f },
+ { 0x344f, 0x00 },
+ { 0x3450, 0x77 },
+ { 0x3451, 0x00 },
+ { 0x3452, 0x1f },
+ { 0x3453, 0x00 },
+ { 0x3454, 0x17 },
+ { 0x3455, 0x00 },
+ { 0x301c, 0x98 },
+ { 0x301d, 0x08 },
+};
+
+static const struct imx290_regval imx290_720p_settings[] = {
+ /* mode settings */
+ { 0x3007, 0x10 },
+ { 0x303a, 0x06 },
+ { 0x3414, 0x04 },
+ { 0x3472, 0x00 },
+ { 0x3473, 0x05 },
+ { 0x3418, 0xd0 },
+ { 0x3419, 0x02 },
+ { 0x3012, 0x64 },
+ { 0x3013, 0x00 },
+ { 0x305c, 0x20 },
+ { 0x305d, 0x00 },
+ { 0x305e, 0x20 },
+ { 0x305f, 0x01 },
+ { 0x315e, 0x1a },
+ { 0x3164, 0x1a },
+ { 0x3480, 0x49 },
+ /* data rate settings */
+ { 0x3009, 0x01 },
+ { 0x3405, 0x10 },
+ { 0x3446, 0x4f },
+ { 0x3447, 0x00 },
+ { 0x3448, 0x2f },
+ { 0x3449, 0x00 },
+ { 0x344a, 0x17 },
+ { 0x344b, 0x00 },
+ { 0x344c, 0x17 },
+ { 0x344d, 0x00 },
+ { 0x344e, 0x17 },
+ { 0x344f, 0x00 },
+ { 0x3450, 0x57 },
+ { 0x3451, 0x00 },
+ { 0x3452, 0x17 },
+ { 0x3453, 0x00 },
+ { 0x3454, 0x17 },
+ { 0x3455, 0x00 },
+ { 0x301c, 0xe4 },
+ { 0x301d, 0x0c },
+};
+
+static const struct imx290_regval imx290_10bit_settings[] = {
+ { 0x3005, 0x00},
+ { 0x3046, 0x00},
+ { 0x3129, 0x1d},
+ { 0x317c, 0x12},
+ { 0x31ec, 0x37},
+ { 0x3441, 0x0a},
+ { 0x3442, 0x0a},
+ { 0x300a, 0x3c},
+ { 0x300b, 0x00},
+};
+
+/* supported link frequencies */
+static const s64 imx290_link_freq[] = {
+ IMX290_DEFAULT_LINK_FREQ,
+};
+
+/* Mode configs */
+static const struct imx290_mode imx290_modes[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .data = imx290_1080p_settings,
+ .data_size = ARRAY_SIZE(imx290_1080p_settings),
+ .pixel_rate = 178200000,
+ .link_freq_index = 0,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .data = imx290_720p_settings,
+ .data_size = ARRAY_SIZE(imx290_720p_settings),
+ .pixel_rate = 178200000,
+ .link_freq_index = 0,
+ },
+};
+
+static inline struct imx290 *to_imx290(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct imx290, sd);
+}
+
+static inline int imx290_read_reg(struct imx290 *imx290, u16 addr, u8 *value)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(imx290->regmap, addr, &regval);
+ if (ret) {
+ dev_err(imx290->dev, "I2C read failed for addr: %x\n", addr);
+ return ret;
+ }
+
+ *value = regval & 0xff;
+
+ return 0;
+}
+
+static int imx290_write_reg(struct imx290 *imx290, u16 addr, u8 value)
+{
+ int ret;
+
+ ret = regmap_write(imx290->regmap, addr, value);
+ if (ret) {
+ dev_err(imx290->dev, "I2C write failed for addr: %x\n", addr);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx290_set_register_array(struct imx290 *imx290,
+ const struct imx290_regval *settings,
+ unsigned int num_settings)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_settings; ++i, ++settings) {
+ ret = imx290_write_reg(imx290, settings->reg, settings->val);
+ if (ret < 0)
+ return ret;
+
+ /* Settle time is 10ms for all registers */
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int imx290_write_buffered_reg(struct imx290 *imx290, u16 address_low,
+ u8 nr_regs, u32 value)
+{
+ unsigned int i;
+ int ret;
+
+ ret = imx290_write_reg(imx290, IMX290_REGHOLD, 0x01);
+ if (ret) {
+ dev_err(imx290->dev, "Error setting hold register\n");
+ return ret;
+ }
+
+ for (i = 0; i < nr_regs; i++) {
+ ret = imx290_write_reg(imx290, address_low + i,
+ (u8)(value >> (i * 8)));
+ if (ret) {
+ dev_err(imx290->dev, "Error writing buffered registers\n");
+ return ret;
+ }
+ }
+
+ ret = imx290_write_reg(imx290, IMX290_REGHOLD, 0x00);
+ if (ret) {
+ dev_err(imx290->dev, "Error setting hold register\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx290_set_gain(struct imx290 *imx290, u32 value)
+{
+ int ret;
+
+ ret = imx290_write_buffered_reg(imx290, IMX290_GAIN, 1, value);
+ if (ret)
+ dev_err(imx290->dev, "Unable to write gain\n");
+
+ return ret;
+}
+
+/* Stop streaming */
+static int imx290_stop_streaming(struct imx290 *imx290)
+{
+ int ret;
+
+ ret = imx290_write_reg(imx290, IMX290_STANDBY, 0x01);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ return imx290_write_reg(imx290, IMX290_XMSTA, 0x01);
+}
+
+static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx290 *imx290 = container_of(ctrl->handler,
+ struct imx290, ctrls);
+ int ret = 0;
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(imx290->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ret = imx290_set_gain(imx290, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(imx290->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx290_ctrl_ops = {
+ .s_ctrl = imx290_set_ctrl,
+};
+
+static int imx290_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(imx290_formats))
+ return -EINVAL;
+
+ code->code = imx290_formats[code->index].code;
+
+ return 0;
+}
+
+static int imx290_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ mutex_lock(&imx290->lock);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ framefmt = v4l2_subdev_get_try_format(&imx290->sd, cfg,
+ fmt->pad);
+ else
+ framefmt = &imx290->current_format;
+
+ fmt->format = *framefmt;
+
+ mutex_unlock(&imx290->lock);
+
+ return 0;
+}
+
+static int imx290_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ const struct imx290_mode *mode;
+ struct v4l2_mbus_framefmt *format;
+ unsigned int i;
+
+ mutex_lock(&imx290->lock);
+
+ mode = v4l2_find_nearest_size(imx290_modes,
+ ARRAY_SIZE(imx290_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+
+ for (i = 0; i < ARRAY_SIZE(imx290_formats); i++)
+ if (imx290_formats[i].code == fmt->format.code)
+ break;
+
+ if (i >= ARRAY_SIZE(imx290_formats))
+ i = 0;
+
+ fmt->format.code = imx290_formats[i].code;
+ fmt->format.field = V4L2_FIELD_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ } else {
+ format = &imx290->current_format;
+ __v4l2_ctrl_s_ctrl(imx290->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(imx290->pixel_rate, mode->pixel_rate);
+
+ imx290->current_mode = mode;
+ }
+
+ *format = fmt->format;
+
+ mutex_unlock(&imx290->lock);
+
+ return 0;
+}
+
+static int imx290_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format fmt = { 0 };
+
+ fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.format.width = 1920;
+ fmt.format.height = 1080;
+
+ imx290_set_fmt(subdev, cfg, &fmt);
+
+ return 0;
+}
+
+static int imx290_write_current_format(struct imx290 *imx290,
+ struct v4l2_mbus_framefmt *format)
+{
+ int ret;
+
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ ret = imx290_set_register_array(imx290, imx290_10bit_settings,
+ ARRAY_SIZE(
+ imx290_10bit_settings));
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set format registers\n");
+ return ret;
+ }
+ break;
+ default:
+ dev_err(imx290->dev, "Unknown pixel format\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Start streaming */
+static int imx290_start_streaming(struct imx290 *imx290)
+{
+ int ret;
+
+ /* Set init register settings */
+ ret = imx290_set_register_array(imx290, imx290_global_init_settings,
+ ARRAY_SIZE(
+ imx290_global_init_settings));
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set init registers\n");
+ return ret;
+ }
+
+ /* Set current frame format */
+ ret = imx290_write_current_format(imx290, &imx290->current_format);
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set frame format\n");
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ ret = imx290_set_register_array(imx290, imx290->current_mode->data,
+ imx290->current_mode->data_size);
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set current mode\n");
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = v4l2_ctrl_handler_setup(imx290->sd.ctrl_handler);
+ if (ret) {
+ dev_err(imx290->dev, "Could not sync v4l2 controls\n");
+ return ret;
+ }
+
+ ret = imx290_write_reg(imx290, IMX290_STANDBY, 0x00);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ /* Start streaming */
+ return imx290_write_reg(imx290, IMX290_XMSTA, 0x00);
+}
+
+static int imx290_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ int ret = 0;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(imx290->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(imx290->dev);
+ goto unlock_and_return;
+ }
+
+ ret = imx290_start_streaming(imx290);
+ if (ret) {
+ dev_err(imx290->dev, "Start stream failed\n");
+ pm_runtime_put(imx290->dev);
+ goto unlock_and_return;
+ }
+ } else {
+ imx290_stop_streaming(imx290);
+ pm_runtime_put(imx290->dev);
+ }
+
+unlock_and_return:
+
+ return ret;
+}
+
+static int imx290_get_regulators(struct device *dev, struct imx290 *imx290)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX290_NUM_SUPPLIES; i++)
+ imx290->supplies[i].supply = imx290_supply_name[i];
+
+ return devm_regulator_bulk_get(dev, IMX290_NUM_SUPPLIES,
+ imx290->supplies);
+}
+
+static int imx290_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+ int ret;
+
+ ret = clk_prepare_enable(imx290->xclk);
+ if (ret) {
+ dev_err(imx290->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(IMX290_NUM_SUPPLIES, imx290->supplies);
+ if (ret) {
+ dev_err(imx290->dev, "Failed to enable regulators\n");
+ clk_disable_unprepare(imx290->xclk);
+ return ret;
+ }
+
+ usleep_range(1, 2);
+ gpiod_set_value_cansleep(imx290->rst_gpio, 1);
+ usleep_range(30000, 31000);
+
+ return 0;
+}
+
+static int imx290_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+
+ clk_disable_unprepare(imx290->xclk);
+ gpiod_set_value_cansleep(imx290->rst_gpio, 0);
+ regulator_bulk_disable(IMX290_NUM_SUPPLIES, imx290->supplies);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx290_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx290_power_on, imx290_power_off, NULL)
+};
+
+static const struct v4l2_subdev_video_ops imx290_video_ops = {
+ .s_stream = imx290_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx290_pad_ops = {
+ .init_cfg = imx290_entity_init_cfg,
+ .enum_mbus_code = imx290_enum_mbus_code,
+ .get_fmt = imx290_get_fmt,
+ .set_fmt = imx290_set_fmt,
+};
+
+static const struct v4l2_subdev_ops imx290_subdev_ops = {
+ .video = &imx290_video_ops,
+ .pad = &imx290_pad_ops,
+};
+
+static const struct media_entity_operations imx290_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int imx290_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct fwnode_handle *endpoint;
+ struct imx290 *imx290;
+ u32 xclk_freq;
+ int ret;
+
+ imx290 = devm_kzalloc(dev, sizeof(*imx290), GFP_KERNEL);
+ if (!imx290)
+ return -ENOMEM;
+
+ imx290->dev = dev;
+ imx290->regmap = devm_regmap_init_i2c(client, &imx290_regmap_config);
+ if (IS_ERR(imx290->regmap)) {
+ dev_err(dev, "Unable to initialize I2C\n");
+ return -ENODEV;
+ }
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!endpoint) {
+ dev_err(dev, "Endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &imx290->ep);
+ fwnode_handle_put(endpoint);
+ if (ret) {
+ dev_err(dev, "Parsing endpoint node failed\n");
+ goto free_err;
+ }
+
+ if (!imx290->ep.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ if (imx290->ep.link_frequencies[0] != IMX290_DEFAULT_LINK_FREQ) {
+ dev_err(dev, "Unsupported link frequency\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ /* Only CSI2 is supported for now */
+ if (imx290->ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(dev, "Unsupported bus type, should be CSI2\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ /* Set default mode to max resolution */
+ imx290->current_mode = &imx290_modes[0];
+
+ /* get system clock (xclk) */
+ imx290->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(imx290->xclk)) {
+ dev_err(dev, "Could not get xclk");
+ ret = PTR_ERR(imx290->xclk);
+ goto free_err;
+ }
+
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &xclk_freq);
+ if (ret) {
+ dev_err(dev, "Could not get xclk frequency\n");
+ goto free_err;
+ }
+
+ /* external clock must be 37.125 MHz */
+ if (xclk_freq != 37125000) {
+ dev_err(dev, "External clock frequency %u is not supported\n",
+ xclk_freq);
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ ret = clk_set_rate(imx290->xclk, xclk_freq);
+ if (ret) {
+ dev_err(dev, "Could not set xclk frequency\n");
+ goto free_err;
+ }
+
+ ret = imx290_get_regulators(dev, imx290);
+ if (ret < 0) {
+ dev_err(dev, "Cannot get regulators\n");
+ goto free_err;
+ }
+
+ imx290->rst_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(imx290->rst_gpio)) {
+ dev_err(dev, "Cannot get reset gpio\n");
+ ret = PTR_ERR(imx290->rst_gpio);
+ goto free_err;
+ }
+
+ mutex_init(&imx290->lock);
+
+ v4l2_ctrl_handler_init(&imx290->ctrls, 3);
+
+ v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
+ V4L2_CID_GAIN, 0, 72, 1, 0);
+ imx290->link_freq =
+ v4l2_ctrl_new_int_menu(&imx290->ctrls,
+ &imx290_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(imx290_link_freq) - 1,
+ 0, imx290_link_freq);
+ if (imx290->link_freq)
+ imx290->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ imx290->pixel_rate = v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 1,
+ INT_MAX, 1,
+ imx290_modes[0].pixel_rate);
+
+ imx290->sd.ctrl_handler = &imx290->ctrls;
+
+ if (imx290->ctrls.error) {
+ dev_err(dev, "Control initialization error %d\n",
+ imx290->ctrls.error);
+ ret = imx290->ctrls.error;
+ goto free_ctrl;
+ }
+
+ v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops);
+ imx290->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ imx290->sd.dev = &client->dev;
+ imx290->sd.entity.ops = &imx290_subdev_entity_ops;
+ imx290->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ imx290->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&imx290->sd.entity, 1, &imx290->pad);
+ if (ret < 0) {
+ dev_err(dev, "Could not register media entity\n");
+ goto free_ctrl;
+ }
+
+ ret = v4l2_async_register_subdev(&imx290->sd);
+ if (ret < 0) {
+ dev_err(dev, "Could not register v4l2 device\n");
+ goto free_entity;
+ }
+
+ /* Power on the device to match runtime PM state below */
+ ret = imx290_power_on(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not power on the device\n");
+ goto free_entity;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ v4l2_fwnode_endpoint_free(&imx290->ep);
+
+ return 0;
+
+free_entity:
+ media_entity_cleanup(&imx290->sd.entity);
+free_ctrl:
+ v4l2_ctrl_handler_free(&imx290->ctrls);
+ mutex_destroy(&imx290->lock);
+free_err:
+ v4l2_fwnode_endpoint_free(&imx290->ep);
+
+ return ret;
+}
+
+static int imx290_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ mutex_destroy(&imx290->lock);
+
+ pm_runtime_disable(imx290->dev);
+ if (!pm_runtime_status_suspended(imx290->dev))
+ imx290_power_off(imx290->dev);
+ pm_runtime_set_suspended(imx290->dev);
+
+ return 0;
+}
+
+static const struct of_device_id imx290_of_match[] = {
+ { .compatible = "sony,imx290" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx290_of_match);
+
+static struct i2c_driver imx290_i2c_driver = {
+ .probe_new = imx290_probe,
+ .remove = imx290_remove,
+ .driver = {
+ .name = "imx290",
+ .pm = &imx290_pm_ops,
+ .of_match_table = of_match_ptr(imx290_of_match),
+ },
+};
+
+module_i2c_driver(imx290_i2c_driver);
+
+MODULE_DESCRIPTION("Sony IMX290 CMOS Image Sensor Driver");
+MODULE_AUTHOR("FRAMOS GmbH");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index d8a8853f9a2b..c76ccf67a909 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -134,7 +134,7 @@ static int lm3646_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct lm3646_flash *flash = to_lm3646_flash(ctrl);
unsigned int reg_val;
- int rval = -EINVAL;
+ int rval;
switch (ctrl->id) {
case V4L2_CID_FLASH_LED_MODE:
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 19a3ceea3bc2..506a30e69ced 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -591,8 +591,8 @@ static int max2175_set_lo_freq(struct max2175 *ctx, u32 lo_freq)
lo_freq *= lo_mult;
int_desired = lo_freq / ctx->xtal_freq;
- frac_desired = div_u64((u64)(lo_freq % ctx->xtal_freq) << 20,
- ctx->xtal_freq);
+ frac_desired = div64_ul((u64)(lo_freq % ctx->xtal_freq) << 20,
+ ctx->xtal_freq);
/* Check CSM is not busy */
ret = max2175_poll_csm_ready(ctx);
diff --git a/drivers/media/i2c/max2175.h b/drivers/media/i2c/max2175.h
index 1ece587c153d..4c722ea3e5f1 100644
--- a/drivers/media/i2c/max2175.h
+++ b/drivers/media/i2c/max2175.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Maxim Integrated MAX2175 RF to Bits tuner driver
*
* This driver & most of the hard coded values are based on the reference
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 5613072908ac..210ea76adb53 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -167,7 +167,7 @@ static int multi_reg_write(struct i2c_client *client,
static int mt9m001_init(struct i2c_client *client)
{
- const struct mt9m001_reg init_regs[] = {
+ static const struct mt9m001_reg init_regs[] = {
/*
* Issue a soft reset. This returns all registers to their
* default values.
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index f4ded0669ff9..42f64175a6df 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Omnivision OV2659 CMOS Image Sensor driver
*
@@ -5,46 +6,21 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/media.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/videodev2.h>
+#include <linux/pm_runtime.h>
-#include <media/media-entity.h>
#include <media/i2c/ov2659.h>
-#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-image-sizes.h>
-#include <media/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
#define DRIVER_NAME "ov2659"
@@ -232,6 +208,10 @@ struct ov2659 {
struct sensor_register *format_ctrl_regs;
struct ov2659_pll_ctrl pll;
int streaming;
+ /* used to control the sensor PWDN pin */
+ struct gpio_desc *pwdn_gpio;
+ /* used to control the sensor RESETB pin */
+ struct gpio_desc *resetb_gpio;
};
static const struct sensor_register ov2659_init_regs[] = {
@@ -419,10 +399,14 @@ static struct sensor_register ov2659_720p[] = {
{ REG_TIMING_YINC, 0x11 },
{ REG_TIMING_VERT_FORMAT, 0x80 },
{ REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x370a, 0x12 },
{ 0x3a03, 0xe8 },
{ 0x3a09, 0x6f },
{ 0x3a0b, 0x5d },
{ 0x3a15, 0x9a },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
{ REG_NULL, 0x00 },
};
@@ -661,7 +645,7 @@ static struct sensor_register ov2659_vga[] = {
{ REG_TIMING_HORIZ_FORMAT, 0x01 },
{ 0x370a, 0x52 },
{ REG_VFIFO_READ_START_H, 0x00 },
- { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_VFIFO_READ_START_L, 0xa0 },
{ REG_ISP_CTRL02, 0x10 },
{ REG_NULL, 0x00 },
};
@@ -709,7 +693,7 @@ static struct sensor_register ov2659_qvga[] = {
{ REG_TIMING_HORIZ_FORMAT, 0x01 },
{ 0x370a, 0x52 },
{ REG_VFIFO_READ_START_H, 0x00 },
- { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_VFIFO_READ_START_L, 0xa0 },
{ REG_ISP_CTRL02, 0x10 },
{ REG_NULL, 0x00 },
};
@@ -1198,14 +1182,27 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
/* Stop Streaming Sequence */
ov2659_set_streaming(ov2659, 0);
ov2659->streaming = on;
+ pm_runtime_put(&client->dev);
goto unlock;
}
- ov2659_set_pixel_clock(ov2659);
- ov2659_set_frame_size(ov2659);
- ov2659_set_format(ov2659);
- ov2659_set_streaming(ov2659, 1);
- ov2659->streaming = on;
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto unlock;
+ }
+
+ ret = ov2659_init(sd, 0);
+ if (!ret)
+ ret = ov2659_set_pixel_clock(ov2659);
+ if (!ret)
+ ret = ov2659_set_frame_size(ov2659);
+ if (!ret)
+ ret = ov2659_set_format(ov2659);
+ if (!ret) {
+ ov2659_set_streaming(ov2659, 1);
+ ov2659->streaming = on;
+ }
unlock:
mutex_unlock(&ov2659->lock);
@@ -1239,12 +1236,18 @@ static int ov2659_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov2659 *ov2659 =
container_of(ctrl->handler, struct ov2659, ctrls);
+ struct i2c_client *client = ov2659->client;
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
return ov2659_set_test_pattern(ov2659, ctrl->val);
}
+ pm_runtime_put(&client->dev);
return 0;
}
@@ -1257,6 +1260,39 @@ static const char * const ov2659_test_pattern_menu[] = {
"Vertical Color Bars",
};
+static int ov2659_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ gpiod_set_value(ov2659->pwdn_gpio, 1);
+
+ return 0;
+}
+
+static int ov2659_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ gpiod_set_value(ov2659->pwdn_gpio, 0);
+
+ if (ov2659->resetb_gpio) {
+ gpiod_set_value(ov2659->resetb_gpio, 1);
+ usleep_range(500, 1000);
+ gpiod_set_value(ov2659->resetb_gpio, 0);
+ usleep_range(3000, 5000);
+ }
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* V4L2 subdev internal operations
*/
@@ -1330,13 +1366,13 @@ static int ov2659_detect(struct v4l2_subdev *sd)
unsigned short id;
id = OV265X_ID(pid, ver);
- if (id != OV2659_ID)
+ if (id != OV2659_ID) {
dev_err(&client->dev,
"Sensor detection failed (%04X, %d)\n",
id, ret);
- else {
+ ret = -ENODEV;
+ } else {
dev_info(&client->dev, "Found OV%04X sensor\n", id);
- ret = ov2659_init(sd, 0);
}
}
@@ -1413,6 +1449,18 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->xvclk_frequency > 27000000)
return -EINVAL;
+ /* Optional gpio don't fail if not present */
+ ov2659->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ov2659->pwdn_gpio))
+ return PTR_ERR(ov2659->pwdn_gpio);
+
+ /* Optional gpio don't fail if not present */
+ ov2659->resetb_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov2659->resetb_gpio))
+ return PTR_ERR(ov2659->resetb_gpio);
+
v4l2_ctrl_handler_init(&ov2659->ctrls, 2);
ov2659->link_frequency =
v4l2_ctrl_new_std(&ov2659->ctrls, &ov2659_ctrl_ops,
@@ -1458,6 +1506,8 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->frame_size = &ov2659_framesizes[2];
ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs;
+ ov2659_power_on(&client->dev);
+
ret = ov2659_detect(sd);
if (ret < 0)
goto error;
@@ -1471,10 +1521,15 @@ static int ov2659_probe(struct i2c_client *client)
dev_info(&client->dev, "%s sensor driver registered !!\n", sd->name);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
return 0;
error:
v4l2_ctrl_handler_free(&ov2659->ctrls);
+ ov2659_power_off(&client->dev);
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov2659->lock);
return ret;
@@ -1490,9 +1545,18 @@ static int ov2659_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov2659->lock);
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ov2659_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
return 0;
}
+static const struct dev_pm_ops ov2659_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov2659_power_off, ov2659_power_on, NULL)
+};
+
static const struct i2c_device_id ov2659_id[] = {
{ "ov2659", 0 },
{ /* sentinel */ },
@@ -1510,6 +1574,7 @@ MODULE_DEVICE_TABLE(of, ov2659_of_match);
static struct i2c_driver ov2659_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
+ .pm = &ov2659_pm_ops,
.of_match_table = of_match_ptr(ov2659_of_match),
},
.probe_new = ov2659_probe,
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 500d9bbff10b..5e495c833d32 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -193,6 +193,7 @@ struct ov5640_mode_info {
struct ov5640_ctrls {
struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *pixel_rate;
struct {
struct v4l2_ctrl *auto_exp;
struct v4l2_ctrl *exposure;
@@ -489,7 +490,6 @@ static const struct reg_value ov5640_setting_720P_1280_720[] = {
};
static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
- {0x3008, 0x42, 0, 0},
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
@@ -517,7 +517,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
{0x3a15, 0x60, 0, 0}, {0x4407, 0x04, 0, 0},
{0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
- {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0},
+ {0x4005, 0x1a, 0, 0},
};
static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
@@ -1611,9 +1611,24 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
!(mode->hact == 640 && mode->vact == 480))
return NULL;
+ /* 2592x1944 only works at 15fps max */
+ if ((mode->hact == 2592 && mode->vact == 1944) &&
+ fr > OV5640_15_FPS)
+ return NULL;
+
return mode;
}
+static u64 ov5640_calc_pixel_rate(struct ov5640_dev *sensor)
+{
+ u64 rate;
+
+ rate = sensor->current_mode->vtot * sensor->current_mode->htot;
+ rate *= ov5640_framerates[sensor->current_fr];
+
+ return rate;
+}
+
/*
* sensor changes between scaling and subsampling, go through
* exposure calculation
@@ -1818,8 +1833,7 @@ static int ov5640_set_mode(struct ov5640_dev *sensor)
* All the formats we support have 16 bits per pixel, seems to require
* the same rate than YUV, so we can just use 16 bpp all the time.
*/
- rate = mode->vtot * mode->htot * 16;
- rate *= ov5640_framerates[sensor->current_fr];
+ rate = ov5640_calc_pixel_rate(sensor) * 16;
if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
ret = ov5640_set_mipi_pclk(sensor, rate);
@@ -2233,6 +2247,8 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
if (mbus_fmt->code != sensor->fmt.code)
sensor->pending_fmt_change = true;
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
out:
mutex_unlock(&sensor->lock);
return ret;
@@ -2657,6 +2673,11 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
/* we can use our own mutex for the ctrl lock */
hdl->lock = &sensor->lock;
+ /* Clock related controls */
+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
+ 0, INT_MAX, 1,
+ ov5640_calc_pixel_rate(sensor));
+
/* Auto/manual white balance */
ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
V4L2_CID_AUTO_WHITE_BALANCE,
@@ -2704,6 +2725,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
goto free_ctrls;
}
+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
@@ -2816,6 +2838,9 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
sensor->frame_interval = fi->interval;
sensor->current_mode = mode;
sensor->pending_mode_change = true;
+
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
}
out:
mutex_unlock(&sensor->lock);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 34b7046d9702..d6cd15bb699a 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1325,7 +1325,7 @@ static int ov5695_probe(struct i2c_client *client,
goto err_power_off;
#endif
- ret = v4l2_async_register_subdev(sd);
+ ret = v4l2_async_register_subdev_sensor_common(sd);
if (ret) {
dev_err(dev, "v4l2 async register subdev failed\n");
goto err_clean_entity;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 5b9af5e5b7f1..91906b94f978 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -124,12 +124,13 @@
#define DEF_AECH 0x4D
-#define CLKRC_6MHz 0x00
+#define CLKRC_8MHz 0x00
#define CLKRC_12MHz 0x40
#define CLKRC_16MHz 0x80
#define CLKRC_24MHz 0xc0
#define CLKRC_DIV_MASK 0x3f
#define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
+#define DEF_CLKRC 0x00
#define COMA_RESET BIT(7)
#define COMA_QCIF BIT(5)
@@ -196,13 +197,33 @@ struct ov6650 {
struct v4l2_clk *clk;
bool half_scale; /* scale down output by 2 */
struct v4l2_rect rect; /* sensor cropping window */
- unsigned long pclk_limit; /* from host */
- unsigned long pclk_max; /* from resolution and format */
struct v4l2_fract tpf; /* as requested with s_frame_interval */
u32 code;
- enum v4l2_colorspace colorspace;
};
+struct ov6650_xclk {
+ unsigned long rate;
+ u8 clkrc;
+};
+
+static const struct ov6650_xclk ov6650_xclk[] = {
+{
+ .rate = 8000000,
+ .clkrc = CLKRC_8MHz,
+},
+{
+ .rate = 12000000,
+ .clkrc = CLKRC_12MHz,
+},
+{
+ .rate = 16000000,
+ .clkrc = CLKRC_16MHz,
+},
+{
+ .rate = 24000000,
+ .clkrc = CLKRC_24MHz,
+},
+};
static u32 ov6650_codes[] = {
MEDIA_BUS_FMT_YUYV8_2X8,
@@ -213,6 +234,17 @@ static u32 ov6650_codes[] = {
MEDIA_BUS_FMT_Y8_1X8,
};
+static const struct v4l2_mbus_framefmt ov6650_def_fmt = {
+ .width = W_CIF,
+ .height = H_CIF,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+};
+
/* read a register */
static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
{
@@ -465,38 +497,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect rect = sel->r;
int ret;
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- v4l_bound_align_image(&rect.width, 2, W_CIF, 1,
- &rect.height, 2, H_CIF, 1, 0);
- v4l_bound_align_image(&rect.left, DEF_HSTRT << 1,
- (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1,
- &rect.top, DEF_VSTRT << 1,
- (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1,
- 0);
+ v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
+ &sel->r.height, 2, H_CIF, 1, 0);
+ v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
+ (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
+ &sel->r.top, DEF_VSTRT << 1,
+ (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
+ 1, 0);
- ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1);
+ ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
if (!ret) {
- priv->rect.left = rect.left;
+ priv->rect.width += priv->rect.left - sel->r.left;
+ priv->rect.left = sel->r.left;
ret = ov6650_reg_write(client, REG_HSTOP,
- (rect.left + rect.width) >> 1);
+ (sel->r.left + sel->r.width) >> 1);
}
if (!ret) {
- priv->rect.width = rect.width;
- ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1);
+ priv->rect.width = sel->r.width;
+ ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
}
if (!ret) {
- priv->rect.top = rect.top;
+ priv->rect.height += priv->rect.top - sel->r.top;
+ priv->rect.top = sel->r.top;
ret = ov6650_reg_write(client, REG_VSTOP,
- (rect.top + rect.height) >> 1);
+ (sel->r.top + sel->r.height) >> 1);
}
if (!ret)
- priv->rect.height = rect.height;
+ priv->rect.height = sel->r.height;
return ret;
}
@@ -512,12 +545,20 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
if (format->pad)
return -EINVAL;
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- mf->colorspace = priv->colorspace;
- mf->field = V4L2_FIELD_NONE;
+ /* initialize response with default media bus frame format */
+ *mf = ov6650_def_fmt;
+
+ /* update media bus format code and frame size */
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+ } else {
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -526,22 +567,7 @@ static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
return width > rect->width >> 1 || height > rect->height >> 1;
}
-static u8 to_clkrc(struct v4l2_fract *timeperframe,
- unsigned long pclk_limit, unsigned long pclk_max)
-{
- unsigned long pclk;
-
- if (timeperframe->numerator && timeperframe->denominator)
- pclk = pclk_max * timeperframe->denominator /
- (FRAME_RATE_MAX * timeperframe->numerator);
- else
- pclk = pclk_max;
-
- if (pclk_limit && pclk_limit < pclk)
- pclk = pclk_limit;
-
- return (pclk_max - 1) / pclk;
-}
+#define to_clkrc(div) ((div) - 1)
/* set the format we will capture in */
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
@@ -560,8 +586,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
.r.height = mf->height << half_scale,
};
u32 code = mf->code;
- unsigned long mclk, pclk;
- u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc;
+ u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask;
int ret;
/* select color matrix configuration for given color encoding */
@@ -610,58 +635,35 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
return -EINVAL;
}
- priv->code = code;
if (code == MEDIA_BUS_FMT_Y8_1X8 ||
code == MEDIA_BUS_FMT_SBGGR8_1X8) {
coml_mask = COML_ONE_CHANNEL;
coml_set = 0;
- priv->pclk_max = 4000000;
} else {
coml_mask = 0;
coml_set = COML_ONE_CHANNEL;
- priv->pclk_max = 8000000;
}
- if (code == MEDIA_BUS_FMT_SBGGR8_1X8)
- priv->colorspace = V4L2_COLORSPACE_SRGB;
- else if (code != 0)
- priv->colorspace = V4L2_COLORSPACE_JPEG;
-
if (half_scale) {
dev_dbg(&client->dev, "max resolution: QCIF\n");
coma_set |= COMA_QCIF;
- priv->pclk_max /= 2;
} else {
dev_dbg(&client->dev, "max resolution: CIF\n");
coma_mask |= COMA_QCIF;
}
- priv->half_scale = half_scale;
-
- clkrc = CLKRC_12MHz;
- mclk = 12000000;
- priv->pclk_limit = 1334000;
- dev_dbg(&client->dev, "using 12MHz input clock\n");
-
- clkrc |= to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
-
- pclk = priv->pclk_max / GET_CLKRC_DIV(clkrc);
- dev_dbg(&client->dev, "pixel clock divider: %ld.%ld\n",
- mclk / pclk, 10 * mclk % pclk / pclk);
ret = ov6650_set_selection(sd, NULL, &sel);
if (!ret)
ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
- if (!ret)
- ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
- if (!ret)
- ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
-
if (!ret) {
- mf->colorspace = priv->colorspace;
- mf->width = priv->rect.width >> half_scale;
- mf->height = priv->rect.height >> half_scale;
+ priv->half_scale = half_scale;
+
+ ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
}
+ if (!ret)
+ priv->code = code;
+
return ret;
}
@@ -680,8 +682,6 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
&mf->height, 2, H_CIF, 1, 0);
- mf->field = V4L2_FIELD_NONE;
-
switch (mf->code) {
case MEDIA_BUS_FMT_Y10_1X10:
mf->code = MEDIA_BUS_FMT_Y8_1X8;
@@ -691,20 +691,39 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_VYUY8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
- mf->colorspace = V4L2_COLORSPACE_JPEG;
break;
default:
mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
/* fall through */
case MEDIA_BUS_FMT_SBGGR8_1X8:
- mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
}
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return ov6650_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ /* store media bus format code and frame size in pad config */
+ cfg->try_fmt.width = mf->width;
+ cfg->try_fmt.height = mf->height;
+ cfg->try_fmt.code = mf->code;
+ /* return default mbus frame format updated with pad config */
+ *mf = ov6650_def_fmt;
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+
+ } else {
+ /* apply new media bus format code and frame size */
+ int ret = ov6650_s_fmt(sd, mf);
+
+ if (ret)
+ return ret;
+
+ /* return default format updated with active size and code */
+ *mf = ov6650_def_fmt;
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -725,9 +744,7 @@ static int ov6650_g_frame_interval(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- ival->interval.numerator = GET_CLKRC_DIV(to_clkrc(&priv->tpf,
- priv->pclk_limit, priv->pclk_max));
- ival->interval.denominator = FRAME_RATE_MAX;
+ ival->interval = priv->tpf;
dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
ival->interval.numerator, ival->interval.denominator);
@@ -742,7 +759,6 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
struct ov6650 *priv = to_ov6650(client);
struct v4l2_fract *tpf = &ival->interval;
int div, ret;
- u8 clkrc;
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
@@ -754,19 +770,12 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
- /*
- * Keep result to be used as tpf limit
- * for subsequent clock divider calculations
- */
- priv->tpf.numerator = div;
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- clkrc = to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
-
- ret = ov6650_reg_rmw(client, REG_CLKRC, clkrc, CLKRC_DIV_MASK);
+ ret = ov6650_reg_rmw(client, REG_CLKRC, to_clkrc(div), CLKRC_DIV_MASK);
if (!ret) {
- tpf->numerator = GET_CLKRC_DIV(clkrc);
- tpf->denominator = FRAME_RATE_MAX;
+ priv->tpf.numerator = div;
+ priv->tpf.denominator = FRAME_RATE_MAX;
+
+ *tpf = priv->tpf;
}
return ret;
@@ -788,7 +797,7 @@ static int ov6650_reset(struct i2c_client *client)
}
/* program default register values */
-static int ov6650_prog_dflt(struct i2c_client *client)
+static int ov6650_prog_dflt(struct i2c_client *client, u8 clkrc)
{
int ret;
@@ -796,6 +805,8 @@ static int ov6650_prog_dflt(struct i2c_client *client)
ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */
if (!ret)
+ ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
+ if (!ret)
ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER);
return ret;
@@ -805,8 +816,10 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- u8 pidh, pidl, midh, midl;
- int ret;
+ const struct ov6650_xclk *xclk = NULL;
+ unsigned long rate;
+ u8 pidh, pidl, midh, midl;
+ int i, ret = 0;
priv->clk = v4l2_clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
@@ -815,6 +828,33 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
return ret;
}
+ rate = v4l2_clk_get_rate(priv->clk);
+ for (i = 0; rate && i < ARRAY_SIZE(ov6650_xclk); i++) {
+ if (rate != ov6650_xclk[i].rate)
+ continue;
+
+ xclk = &ov6650_xclk[i];
+ dev_info(&client->dev, "using host default clock rate %lukHz\n",
+ rate / 1000);
+ break;
+ }
+ for (i = 0; !xclk && i < ARRAY_SIZE(ov6650_xclk); i++) {
+ ret = v4l2_clk_set_rate(priv->clk, ov6650_xclk[i].rate);
+ if (ret || v4l2_clk_get_rate(priv->clk) != ov6650_xclk[i].rate)
+ continue;
+
+ xclk = &ov6650_xclk[i];
+ dev_info(&client->dev, "using negotiated clock rate %lukHz\n",
+ xclk->rate / 1000);
+ break;
+ }
+ if (!xclk) {
+ dev_err(&client->dev, "unable to get supported clock rate\n");
+ if (!ret)
+ ret = -EINVAL;
+ goto eclkput;
+ }
+
ret = ov6650_s_power(sd, 1);
if (ret < 0)
goto eclkput;
@@ -848,7 +888,12 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
ret = ov6650_reset(client);
if (!ret)
- ret = ov6650_prog_dflt(client);
+ ret = ov6650_prog_dflt(client, xclk->clkrc);
+ if (!ret) {
+ struct v4l2_mbus_framefmt mf = ov6650_def_fmt;
+
+ ret = ov6650_s_fmt(sd, &mf);
+ }
if (!ret)
ret = v4l2_ctrl_handler_setup(&priv->hdl);
@@ -989,8 +1034,10 @@ static int ov6650_probe(struct i2c_client *client,
V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error)
- return priv->hdl.error;
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto ectlhdlfree;
+ }
v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
@@ -1001,15 +1048,18 @@ static int ov6650_probe(struct i2c_client *client,
priv->rect.top = DEF_VSTRT << 1;
priv->rect.width = W_CIF;
priv->rect.height = H_CIF;
- priv->half_scale = false;
- priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
- priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+ /* Hardware default frame interval */
+ priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC);
+ priv->tpf.denominator = FRAME_RATE_MAX;
priv->subdev.internal_ops = &ov6650_internal_ops;
ret = v4l2_async_register_subdev(&priv->subdev);
- if (ret)
- v4l2_ctrl_handler_free(&priv->hdl);
+ if (!ret)
+ return 0;
+ectlhdlfree:
+ v4l2_ctrl_handler_free(&priv->hdl);
return ret;
}
@@ -1041,6 +1091,6 @@ static struct i2c_driver ov6650_i2c_driver = {
module_i2c_driver(ov6650_i2c_driver);
-MODULE_DESCRIPTION("SoC Camera driver for OmniVision OV6650");
-MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_DESCRIPTION("V4L2 subdevice driver for OmniVision OV6650 camera sensor");
+MODULE_AUTHOR("Janusz Krzysztofik <jmkrzyszt@gmail.com");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/saa711x_regs.h b/drivers/media/i2c/saa711x_regs.h
index 44fabe08234d..4b5f6985710b 100644
--- a/drivers/media/i2c/saa711x_regs.h
+++ b/drivers/media/i2c/saa711x_regs.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * SPDX-License-Identifier: GPL-2.0+
* saa711x - Philips SAA711x video decoder register specifications
*
* Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 9adf8e034e7d..84f9771b5fed 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -682,66 +682,6 @@ static int smiapp_get_all_limits(struct smiapp_sensor *sensor)
return 0;
}
-static int smiapp_get_limits_binning(struct smiapp_sensor *sensor)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- static u32 const limits[] = {
- SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN,
- SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN,
- SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN,
- SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN,
- SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN_BIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN,
- };
- static u32 const limits_replace[] = {
- SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES,
- SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES,
- SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK,
- SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK,
- SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN,
- };
- unsigned int i;
- int rval;
-
- if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY] ==
- SMIAPP_BINNING_CAPABILITY_NO) {
- for (i = 0; i < ARRAY_SIZE(limits); i++)
- sensor->limits[limits[i]] =
- sensor->limits[limits_replace[i]];
-
- return 0;
- }
-
- rval = smiapp_get_limits(sensor, limits, ARRAY_SIZE(limits));
- if (rval < 0)
- return rval;
-
- /*
- * Sanity check whether the binning limits are valid. If not,
- * use the non-binning ones.
- */
- if (sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN]
- && sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN]
- && sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN])
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(limits); i++) {
- dev_dbg(&client->dev,
- "replace limit 0x%8.8x \"%s\" = %d, 0x%x\n",
- smiapp_reg_limits[limits[i]].addr,
- smiapp_reg_limits[limits[i]].what,
- sensor->limits[limits_replace[i]],
- sensor->limits[limits_replace[i]]);
- sensor->limits[limits[i]] =
- sensor->limits[limits_replace[i]];
- }
-
- return 0;
-}
-
static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
@@ -891,60 +831,47 @@ static void smiapp_update_blanking(struct smiapp_sensor *sensor)
{
struct v4l2_ctrl *vblank = sensor->vblank;
struct v4l2_ctrl *hblank = sensor->hblank;
+ uint16_t min_fll, max_fll, min_llp, max_llp, min_lbp;
int min, max;
+ if (sensor->binning_vertical > 1 || sensor->binning_horizontal > 1) {
+ min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN];
+ max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN];
+ min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN];
+ max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN];
+ min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN];
+ } else {
+ min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES];
+ max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES];
+ min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK];
+ max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK];
+ min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK];
+ }
+
min = max_t(int,
sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES],
- sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN] -
+ min_fll -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height);
- max = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
+ max = max_fll - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
__v4l2_ctrl_modify_range(vblank, min, max, vblank->step, min);
min = max_t(int,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN] -
+ min_llp -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN]);
- max = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
+ min_lbp);
+ max = max_llp - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
__v4l2_ctrl_modify_range(hblank, min, max, hblank->step, min);
__smiapp_update_exposure_limits(sensor);
}
-static int smiapp_update_mode(struct smiapp_sensor *sensor)
+static int smiapp_pll_blanking_update(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- unsigned int binning_mode;
int rval;
- /* Binning has to be set up here; it affects limits */
- if (sensor->binning_horizontal == 1 &&
- sensor->binning_vertical == 1) {
- binning_mode = 0;
- } else {
- u8 binning_type =
- (sensor->binning_horizontal << 4)
- | sensor->binning_vertical;
-
- rval = smiapp_write(
- sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
- if (rval < 0)
- return rval;
-
- binning_mode = 1;
- }
- rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
- if (rval < 0)
- return rval;
-
- /* Get updated limits due to binning */
- rval = smiapp_get_limits_binning(sensor);
- if (rval < 0)
- return rval;
-
rval = smiapp_pll_update(sensor);
if (rval < 0)
return rval;
@@ -970,62 +897,91 @@ static int smiapp_update_mode(struct smiapp_sensor *sensor)
* SMIA++ NVM handling
*
*/
-static int smiapp_read_nvm(struct smiapp_sensor *sensor,
- unsigned char *nvm)
+
+static int smiapp_read_nvm_page(struct smiapp_sensor *sensor, u32 p, u8 *nvm,
+ u8 *status)
{
- u32 i, s, p, np, v;
- int rval = 0, rval2;
+ unsigned int i;
+ int rval;
+ u32 s;
- np = sensor->nvm_size / SMIAPP_NVM_PAGE_SIZE;
- for (p = 0; p < np; p++) {
- rval = smiapp_write(
- sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
- if (rval)
- goto out;
+ *status = 0;
- rval = smiapp_write(sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
- SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN |
- SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN);
- if (rval)
- goto out;
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
+ if (rval)
+ return rval;
- for (i = 1000; i > 0; i--) {
- rval = smiapp_read(
- sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
+ SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN);
+ if (rval)
+ return rval;
- if (rval)
- goto out;
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS,
+ &s);
+ if (rval)
+ return rval;
+
+ if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE) {
+ *status = s;
+ return -ENODATA;
+ }
+ if (sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL) {
+ for (i = 1000; i > 0; i--) {
if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
break;
- }
- if (!i) {
- rval = -ETIMEDOUT;
- goto out;
- }
-
- for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
rval = smiapp_read(
sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
- &v);
- if (rval)
- goto out;
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS,
+ &s);
- *nvm++ = v;
+ if (rval)
+ return rval;
}
+
+ if (!i)
+ return -ETIMEDOUT;
}
-out:
+ for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
+ u32 v;
+
+ rval = smiapp_read(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
+ &v);
+ if (rval)
+ return rval;
+
+ *nvm++ = v;
+ }
+
+ return 0;
+}
+
+static int smiapp_read_nvm(struct smiapp_sensor *sensor, unsigned char *nvm,
+ size_t nvm_size)
+{
+ u8 status = 0;
+ u32 p;
+ int rval = 0, rval2;
+
+ for (p = 0; p < nvm_size / SMIAPP_NVM_PAGE_SIZE && !rval; p++) {
+ rval = smiapp_read_nvm_page(sensor, p, nvm, &status);
+ nvm += SMIAPP_NVM_PAGE_SIZE;
+ }
+
+ if (rval == -ENODATA &&
+ status & SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE)
+ rval = 0;
+
rval2 = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL, 0);
if (rval < 0)
return rval;
else
- return rval2;
+ return rval2 ?: p * SMIAPP_NVM_PAGE_SIZE;
}
/*
@@ -1324,10 +1280,6 @@ static int smiapp_power_on(struct device *dev)
rval = __v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
if (rval)
goto out_cci_addr_fail;
-
- rval = smiapp_update_mode(sensor);
- if (rval < 0)
- goto out_cci_addr_fail;
}
mutex_unlock(&sensor->mutex);
@@ -1387,6 +1339,7 @@ static int smiapp_power_off(struct device *dev)
static int smiapp_start_streaming(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int binning_mode;
int rval;
mutex_lock(&sensor->mutex);
@@ -1397,6 +1350,27 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor)
if (rval)
goto out;
+ /* Binning configuration */
+ if (sensor->binning_horizontal == 1 &&
+ sensor->binning_vertical == 1) {
+ binning_mode = 0;
+ } else {
+ u8 binning_type =
+ (sensor->binning_horizontal << 4)
+ | sensor->binning_vertical;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
+ if (rval < 0)
+ goto out;
+
+ binning_mode = 1;
+ }
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
+ if (rval < 0)
+ goto out;
+
+ /* Set up PLL */
rval = smiapp_pll_configure(sensor);
if (rval)
goto out;
@@ -2073,7 +2047,7 @@ static int smiapp_set_compose(struct v4l2_subdev *subdev,
smiapp_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return smiapp_update_mode(sensor);
+ return smiapp_pll_blanking_update(sensor);
return 0;
}
@@ -2312,41 +2286,34 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev));
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- unsigned int nbytes;
+ int rval;
if (!sensor->dev_init_done)
return -EBUSY;
- if (!sensor->nvm_size) {
- int rval;
-
- /* NVM not read yet - read it now */
- sensor->nvm_size = sensor->hwcfg->nvm_size;
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ return -ENODEV;
+ }
- rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(&client->dev);
- pm_runtime_put(&client->dev);
- return -ENODEV;
- }
+ rval = smiapp_read_nvm(sensor, buf, PAGE_SIZE);
+ if (rval < 0) {
+ pm_runtime_put(&client->dev);
+ dev_err(&client->dev, "nvm read failed\n");
+ return -ENODEV;
+ }
- if (smiapp_read_nvm(sensor, sensor->nvm)) {
- dev_err(&client->dev, "nvm read failed\n");
- return -ENODEV;
- }
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
- pm_runtime_mark_last_busy(&client->dev);
- pm_runtime_put_autosuspend(&client->dev);
- }
/*
* NVM is still way below a PAGE_SIZE, so we can safely
* assume this for now.
*/
- nbytes = min_t(unsigned int, sensor->nvm_size, PAGE_SIZE);
- memcpy(buf, sensor->nvm, nbytes);
-
- return nbytes;
+ return rval;
}
static DEVICE_ATTR(nvm, S_IRUGO, smiapp_sysfs_nvm_read, NULL);
@@ -2810,16 +2777,13 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
}
}
- /* NVM size is not mandatory */
- fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
-
rval = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&hwcfg->ext_clk);
if (rval)
dev_info(dev, "can't get clock-frequency\n");
- dev_dbg(dev, "nvm %d, clk %d, mode %d\n",
- hwcfg->nvm_size, hwcfg->ext_clk, hwcfg->csi_signalling_mode);
+ dev_dbg(dev, "clk %d, mode %d\n", hwcfg->ext_clk,
+ hwcfg->csi_signalling_mode);
if (!bus_cfg.nr_of_link_frequencies) {
dev_warn(dev, "no link frequencies defined\n");
@@ -2862,7 +2826,6 @@ static int smiapp_probe(struct i2c_client *client)
return -ENOMEM;
sensor->hwcfg = hwcfg;
- mutex_init(&sensor->mutex);
sensor->src = &sensor->ssds[sensor->ssds_used];
v4l2_i2c_subdev_init(&sensor->src->sd, client, &smiapp_ops);
@@ -2926,6 +2889,8 @@ static int smiapp_probe(struct i2c_client *client)
if (rval < 0)
return rval;
+ mutex_init(&sensor->mutex);
+
rval = smiapp_identify_module(sensor);
if (rval) {
rval = -ENODEV;
@@ -3003,17 +2968,10 @@ static int smiapp_probe(struct i2c_client *client)
rval = -ENOENT;
goto out_power_off;
}
- /* SMIA++ NVM initialization - it will be read from the sensor
- * when it is first requested by userspace.
- */
- if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
- sensor->nvm = devm_kzalloc(&client->dev,
- sensor->hwcfg->nvm_size, GFP_KERNEL);
- if (sensor->nvm == NULL) {
- rval = -ENOMEM;
- goto out_cleanup;
- }
+ if (sensor->minfo.smiapp_version &&
+ sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED) {
if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
dev_err(&client->dev, "sysfs nvm entry failed\n");
rval = -EBUSY;
@@ -3086,7 +3044,7 @@ static int smiapp_probe(struct i2c_client *client)
}
mutex_lock(&sensor->mutex);
- rval = smiapp_update_mode(sensor);
+ rval = smiapp_pll_blanking_update(sensor);
mutex_unlock(&sensor->mutex);
if (rval) {
dev_err(&client->dev, "update mode failed\n");
@@ -3101,19 +3059,23 @@ static int smiapp_probe(struct i2c_client *client)
if (rval < 0)
goto out_media_entity_cleanup;
- rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
- if (rval < 0)
- goto out_media_entity_cleanup;
-
pm_runtime_set_active(&client->dev);
pm_runtime_get_noresume(&client->dev);
pm_runtime_enable(&client->dev);
+
+ rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
+ if (rval < 0)
+ goto out_disable_runtime_pm;
+
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
pm_runtime_use_autosuspend(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
+out_disable_runtime_pm:
+ pm_runtime_disable(&client->dev);
+
out_media_entity_cleanup:
media_entity_cleanup(&sensor->src->sd.entity);
@@ -3122,6 +3084,7 @@ out_cleanup:
out_power_off:
smiapp_power_off(&client->dev);
+ mutex_destroy(&sensor->mutex);
return rval;
}
@@ -3144,6 +3107,7 @@ static int smiapp_remove(struct i2c_client *client)
media_entity_cleanup(&sensor->ssds[i].sd.entity);
}
smiapp_cleanup(sensor);
+ mutex_destroy(&sensor->mutex);
return 0;
}
diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h
index 2804a4d9a4e1..43505cd0616e 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg.h
@@ -11,25 +11,29 @@
#ifndef __SMIAPP_REG_H_
#define __SMIAPP_REG_H_
+#include <linux/bits.h>
+
#include "smiapp-reg-defs.h"
/* Bits for above register */
-#define SMIAPP_IMAGE_ORIENTATION_HFLIP (1 << 0)
-#define SMIAPP_IMAGE_ORIENTATION_VFLIP (1 << 1)
-
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN (1 << 0)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN (0 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN (1 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR (1 << 2)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY (1 << 0)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY (1 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA (1 << 2)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE (1 << 3)
-
-#define SMIAPP_SOFTWARE_RESET (1 << 0)
-
-#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE (1 << 0)
-#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE (1 << 1)
+#define SMIAPP_IMAGE_ORIENTATION_HFLIP BIT(0)
+#define SMIAPP_IMAGE_ORIENTATION_VFLIP BIT(1)
+
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN BIT(1)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR BIT(2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY BIT(1)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA BIT(2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE BIT(3)
+
+#define SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL BIT(2)
+
+#define SMIAPP_SOFTWARE_RESET BIT(0)
+
+#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE BIT(0)
+#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE BIT(1)
#define SMIAPP_DPHY_CTRL_AUTOMATIC 0
/* DPHY control based on REQUESTED_LINK_BIT_RATE_MBPS */
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index ecf8a17dbe37..3ab874a5deba 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -208,9 +208,6 @@ struct smiapp_sensor {
bool dev_init_done;
u8 compressed_min_bpp;
- u8 *nvm; /* nvm memory buffer */
- unsigned int nvm_size; /* bytes */
-
struct smiapp_module_info minfo;
struct smiapp_pll pll;
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 81285b8d5cfb..003ba22334cd 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -971,6 +971,11 @@ static int mipid02_probe(struct i2c_client *client)
bridge->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
+ if (IS_ERR(bridge->reset_gpio)) {
+ dev_err(dev, "failed to get reset GPIO\n");
+ return PTR_ERR(bridge->reset_gpio);
+ }
+
ret = mipid02_get_regulators(bridge);
if (ret) {
dev_err(dev, "failed to get regulators %d", ret);
diff --git a/drivers/media/i2c/tda1997x_regs.h b/drivers/media/i2c/tda1997x_regs.h
index ecf87534613b..d9b3daada07d 100644
--- a/drivers/media/i2c/tda1997x_regs.h
+++ b/drivers/media/i2c/tda1997x_regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 Gateworks Corporation
*/
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 9088186c24d1..f716129adf09 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
*
* tvp5150 - Texas Instruments TVP5150A/AM1 video decoder registers
*
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 39f66e7a0e42..8be03fe5928c 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -375,7 +375,7 @@ static int vpx3220_s_routing(struct v4l2_subdev *sd,
input = 1: COMPOSITE input
input = 2: SVHS input */
- const int input_vals[3][2] = {
+ static const int input_vals[3][2] = {
{0x0c, 0},
{0x0d, 0},
{0x0e, 1}
diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c
index e19df5165e78..da8088351135 100644
--- a/drivers/media/mc/mc-device.c
+++ b/drivers/media/mc/mc-device.c
@@ -575,6 +575,38 @@ static void media_device_release(struct media_devnode *devnode)
dev_dbg(devnode->parent, "Media device released\n");
}
+static void __media_device_unregister_entity(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_link *link, *tmp;
+ struct media_interface *intf;
+ unsigned int i;
+
+ ida_free(&mdev->entity_internal_idx, entity->internal_idx);
+
+ /* Remove all interface links pointing to this entity */
+ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
+ list_for_each_entry_safe(link, tmp, &intf->links, list) {
+ if (link->entity == entity)
+ __media_remove_intf_link(link);
+ }
+ }
+
+ /* Remove all data links that belong to this entity */
+ __media_entity_remove_links(entity);
+
+ /* Remove all pads that belong to this entity */
+ for (i = 0; i < entity->num_pads; i++)
+ media_gobj_destroy(&entity->pads[i].graph_obj);
+
+ /* Remove the entity */
+ media_gobj_destroy(&entity->graph_obj);
+
+ /* invoke entity_notify callbacks to handle entity removal?? */
+
+ entity->graph_obj.mdev = NULL;
+}
+
/**
* media_device_register_entity - Register an entity with a media device
* @mdev: The media device
@@ -632,6 +664,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
*/
ret = media_graph_walk_init(&new, mdev);
if (ret) {
+ __media_device_unregister_entity(entity);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
@@ -644,38 +677,6 @@ int __must_check media_device_register_entity(struct media_device *mdev,
}
EXPORT_SYMBOL_GPL(media_device_register_entity);
-static void __media_device_unregister_entity(struct media_entity *entity)
-{
- struct media_device *mdev = entity->graph_obj.mdev;
- struct media_link *link, *tmp;
- struct media_interface *intf;
- unsigned int i;
-
- ida_free(&mdev->entity_internal_idx, entity->internal_idx);
-
- /* Remove all interface links pointing to this entity */
- list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
- list_for_each_entry_safe(link, tmp, &intf->links, list) {
- if (link->entity == entity)
- __media_remove_intf_link(link);
- }
- }
-
- /* Remove all data links that belong to this entity */
- __media_entity_remove_links(entity);
-
- /* Remove all pads that belong to this entity */
- for (i = 0; i < entity->num_pads; i++)
- media_gobj_destroy(&entity->pads[i].graph_obj);
-
- /* Remove the entity */
- media_gobj_destroy(&entity->graph_obj);
-
- /* invoke entity_notify callbacks to handle entity removal?? */
-
- entity->graph_obj.mdev = NULL;
-}
-
void media_device_unregister_entity(struct media_entity *entity)
{
struct media_device *mdev = entity->graph_obj.mdev;
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 85f3e7307538..fa57e12f2ac8 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -664,7 +664,7 @@ static int _cx18_process_idx_data(struct cx18_buffer *buf,
struct cx18_enc_idx_entry *e_buf;
/* Frame type lookup: 1=I, 2=P, 4=B */
- const int mapping[8] = {
+ static const int mapping[8] = {
-1, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_P,
-1, V4L2_ENC_IDX_FRAME_B, -1, -1, -1
};
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index e880afe37f15..d59ca3601785 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -1167,8 +1167,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
return -ENOMEM;
spin_lock_init(&state->rx_kfifo_lock);
- if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
+ if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE,
+ GFP_KERNEL)) {
+ kfree(state);
return -ENOMEM;
+ }
state->dev = dev;
sd = &state->sd;
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 3cd87626cd79..9fa388626bae 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -1781,6 +1781,41 @@ static const struct cx88_board cx88_boards[] = {
},
.mpeg = CX88_MPEG_DVB,
},
+ [CX88_BOARD_NOTONLYTV_LV3H] = {
+ .name = "NotOnlyTV LV3H",
+ .tuner_type = TUNER_XC2028,
+ .radio_type = UNSET,
+ .tuner_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
+ /* if gpio1:bit9 is enabled, DVB-T won't work */
+
+ .input = { {
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa141,
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa161,
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa161,
+ .gpio2 = 0x0000,
+ } },
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa141,
+ .gpio2 = 0x0000,
+ },
+ .mpeg = CX88_MPEG_DVB,
+ },
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO] = {
.name = "DViCO FusionHDTV DVB-T PRO",
.tuner_type = TUNER_XC2028,
@@ -2654,6 +2689,7 @@ static const struct cx88_subid cx88_subids[] = {
.subdevice = 0x6f18,
.card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
}, {
+ /* Also NotOnlyTV LV3H (version 1.11 is silkscreened on the board) */
.subvendor = 0x14f1,
.subdevice = 0x8852,
.card = CX88_BOARD_GENIATECH_X8000_MT,
@@ -3121,6 +3157,7 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
return cx88_dvico_xc2028_callback(core, command, arg);
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
return cx88_xc3028_winfast1800h_callback(core, command, arg);
@@ -3322,6 +3359,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
udelay(1000);
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
cx88_xc3028_winfast1800h_callback(core, XC2028_TUNER_RESET, 0);
@@ -3378,6 +3416,11 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
*/
ctl->disable_power_mgmt = 1;
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
+ ctl->demod = XC3028_FE_ZARLINK456;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ ctl->read_not_reliable = 1;
+ break;
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 0292d0947cc7..202ff9e8c257 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1378,6 +1378,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_PINNACLE_HYBRID_PCTV:
case CX88_BOARD_WINFAST_DTV1800H:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index dcc0f02aeb70..b8abcd550604 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1277,7 +1277,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
core = cx88_core_get(dev->pci);
if (!core) {
err = -EINVAL;
- goto fail_free;
+ goto fail_disable;
}
dev->core = core;
@@ -1323,7 +1323,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->audio_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
}
@@ -1337,7 +1337,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->video_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
if (vc->id == V4L2_CID_CHROMA_AGC)
@@ -1509,11 +1509,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
fail_unreg:
cx8800_unregister_video(dev);
- free_irq(pci_dev->irq, dev);
mutex_unlock(&core->lock);
+fail_irq:
+ free_irq(pci_dev->irq, dev);
fail_core:
core->v4ldev = NULL;
cx88_core_put(core, dev->pci);
+fail_disable:
+ pci_disable_device(pci_dev);
fail_free:
kfree(dev);
return err;
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 744a22328ebc..ce4acf6de6aa 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -228,6 +228,7 @@ extern const struct sram_channel cx88_sram_channels[];
#define CX88_BOARD_WINFAST_DTV1800H_XC4000 88
#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36 89
#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43 90
+#define CX88_BOARD_NOTONLYTV_LV3H 91
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index bb3a8cc9de0c..9dce31d2b525 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.c b/drivers/media/pci/ivtv/ivtv-vbi.c
index 6d22c0107d33..80478b026d75 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.c
+++ b/drivers/media/pci/ivtv/ivtv-vbi.c
@@ -325,7 +325,7 @@ static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size)
static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav)
{
u32 line_size = itv->vbi.sliced_decoder_line_size;
- struct v4l2_decode_vbi_line vbi;
+ struct v4l2_decode_vbi_line vbi = {};
int i;
unsigned lines = 0;
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index 67aebe759232..c0bd5d7e148b 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -60,10 +60,8 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(!mantis)) {
- dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ if (unlikely(!mantis))
return IRQ_NONE;
- }
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index deadd0b92233..906e4500d87d 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -69,10 +69,8 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(mantis == NULL)) {
- dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ if (unlikely(!mantis))
return IRQ_NONE;
- }
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 9ae04e18e6c6..126d085be9a7 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -13,12 +13,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <asm/div64.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#endif
#include "saa7164.h"
MODULE_DESCRIPTION("Driver for NXP SAA7164 based TV cards");
@@ -1045,92 +1043,138 @@ static void saa7164_dev_unregister(struct saa7164_dev *dev)
return;
}
-#ifdef CONFIG_PROC_FS
-static int saa7164_proc_show(struct seq_file *m, void *v)
+#ifdef CONFIG_DEBUG_FS
+static void *saa7164_seq_start(struct seq_file *s, loff_t *pos)
{
struct saa7164_dev *dev;
- struct tmComResBusInfo *b;
- struct list_head *list;
- int i, c;
+ loff_t index = *pos;
- if (saa7164_devcount == 0)
- return 0;
+ mutex_lock(&devlist);
+ list_for_each_entry(dev, &saa7164_devlist, devlist) {
+ if (index-- == 0) {
+ mutex_unlock(&devlist);
+ return dev;
+ }
+ }
+ mutex_unlock(&devlist);
- list_for_each(list, &saa7164_devlist) {
- dev = list_entry(list, struct saa7164_dev, devlist);
- seq_printf(m, "%s = %p\n", dev->name, dev);
+ return NULL;
+}
- /* Lock the bus from any other access */
- b = &dev->bus;
- mutex_lock(&b->lock);
+static void *saa7164_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct saa7164_dev *dev = v;
+ void *ret;
- seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
- b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
+ mutex_lock(&devlist);
+ if (list_is_last(&dev->devlist, &saa7164_devlist))
+ ret = NULL;
+ else
+ ret = list_next_entry(dev, devlist);
+ mutex_unlock(&devlist);
- seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
- b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+ ++*pos;
- seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
- b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+ return ret;
+}
- seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
- b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
- c = 0;
- seq_printf(m, "\n Set Ring:\n");
- seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
- for (i = 0; i < b->m_dwSizeSetRing; i++) {
- if (c == 0)
- seq_printf(m, " %04x:", i);
+static void saa7164_seq_stop(struct seq_file *s, void *v)
+{
+}
- seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
+static int saa7164_seq_show(struct seq_file *m, void *v)
+{
+ struct saa7164_dev *dev = v;
+ struct tmComResBusInfo *b;
+ int i, c;
- if (++c == 16) {
- seq_printf(m, "\n");
- c = 0;
- }
- }
+ seq_printf(m, "%s = %p\n", dev->name, dev);
- c = 0;
- seq_printf(m, "\n Get Ring:\n");
- seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
- for (i = 0; i < b->m_dwSizeGetRing; i++) {
- if (c == 0)
- seq_printf(m, " %04x:", i);
+ /* Lock the bus from any other access */
+ b = &dev->bus;
+ mutex_lock(&b->lock);
- seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
+ seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
- if (++c == 16) {
- seq_printf(m, "\n");
- c = 0;
- }
+ seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+
+ seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+
+ seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
+ c = 0;
+ seq_puts(m, "\n Set Ring:\n");
+ seq_puts(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeSetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+
+ seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
+
+ if (++c == 16) {
+ seq_puts(m, "\n");
+ c = 0;
}
+ }
- mutex_unlock(&b->lock);
+ c = 0;
+ seq_puts(m, "\n Get Ring:\n");
+ seq_puts(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeGetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+ seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
+
+ if (++c == 16) {
+ seq_puts(m, "\n");
+ c = 0;
+ }
}
+ mutex_unlock(&b->lock);
+
return 0;
}
-static struct proc_dir_entry *saa7164_pe;
+static const struct seq_operations saa7164_seq_ops = {
+ .start = saa7164_seq_start,
+ .next = saa7164_seq_next,
+ .stop = saa7164_seq_stop,
+ .show = saa7164_seq_show,
+};
-static int saa7164_proc_create(void)
+static int saa7164_open(struct inode *inode, struct file *file)
{
- saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show);
- if (!saa7164_pe)
- return -ENOMEM;
+ return seq_open(file, &saa7164_seq_ops);
+}
- return 0;
+static const struct file_operations saa7164_operations = {
+ .owner = THIS_MODULE,
+ .open = saa7164_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static struct dentry *saa7614_dentry;
+
+static void __init saa7164_debugfs_create(void)
+{
+ saa7614_dentry = debugfs_create_file("saa7164", 0444, NULL, NULL,
+ &saa7164_operations);
}
-static void saa7164_proc_destroy(void)
+static void __exit saa7164_debugfs_remove(void)
{
- if (saa7164_pe)
- remove_proc_entry("saa7164", NULL);
+ debugfs_remove(saa7614_dentry);
}
#else
-static int saa7164_proc_create(void) { return 0; }
-static void saa7164_proc_destroy(void) {}
+static void saa7164_debugfs_create(void) { }
+static void saa7164_debugfs_remove(void) { }
#endif
static int saa7164_thread_function(void *data)
@@ -1507,7 +1551,7 @@ static int __init saa7164_init(void)
if (ret)
return ret;
- saa7164_proc_create();
+ saa7164_debugfs_create();
pr_info("saa7164 driver loaded\n");
@@ -1516,7 +1560,7 @@ static int __init saa7164_init(void)
static void __exit saa7164_fini(void)
{
- saa7164_proc_destroy();
+ saa7164_debugfs_remove();
pci_unregister_driver(&saa7164_pci_driver);
}
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index 65bc7e29450b..2b5e0154814c 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 30c8f2ec9c3c..eaa57d835ea8 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -353,7 +353,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
G723_PERIOD_BYTES * PERIODS,
G723_PERIOD_BYTES * PERIODS);
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 40373bd23381..7786e51d19ae 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -300,7 +300,7 @@ static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(dev->pci_dev),
+ &dev->pci_dev->dev,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX);
return 0;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f1f61419fd29..e84f35d3a68e 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -483,6 +483,7 @@ config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on VIDEO_DEV && VIDEO_V4L2
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ depends on INTERCONNECT || !INTERCONNECT
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM if ARCH_QCOM
select VIDEOBUF2_DMA_SG
@@ -493,6 +494,19 @@ config VIDEO_QCOM_VENUS
on various Qualcomm SoCs.
To compile this driver as a module choose m here.
+config VIDEO_SUN8I_DEINTERLACE
+ tristate "Allwinner Deinterlace driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ depends on PM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Allwinner deinterlace unit with scaling
+ capability found on some SoCs, like H3.
+ To compile this driver as a module choose m here.
+
endif # V4L_MEM2MEM_DRIVERS
# TI VIDEO PORT Helper Modules
@@ -585,9 +599,10 @@ config VIDEO_MESON_G12A_AO_CEC
config CEC_GPIO
tristate "Generic GPIO-based CEC driver"
- depends on PREEMPT || COMPILE_TEST
+ depends on PREEMPTION || COMPILE_TEST
select CEC_CORE
select CEC_PIN
+ select CEC_NOTIFIER
select GPIOLIB
help
This is a generic GPIO-based CEC driver.
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 6ee7eb0d36f4..d13db96e3015 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -19,9 +19,7 @@ obj-$(CONFIG_VIDEO_VIVID) += vivid/
obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
-obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
-
-obj-$(CONFIG_VIDEO_TI_CAL) += ti-vpe/
+obj-y += ti-vpe/
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 2b42ba1f5949..09104304bd06 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI VPFE capture Driver
*
@@ -5,19 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/delay.h>
@@ -69,125 +57,64 @@ static const struct vpfe_standard vpfe_standards[] = {
{V4L2_STD_625_50, 720, 576, {54, 59}, 1},
};
-struct bus_format {
- unsigned int width;
- unsigned int bpp;
-};
-
-/*
- * struct vpfe_fmt - VPFE media bus format information
- * @code: V4L2 media bus format code
- * @shifted: V4L2 media bus format code for the same pixel layout but
- * shifted to be 8 bits per pixel. =0 if format is not shiftable.
- * @pixelformat: V4L2 pixel format FCC identifier
- * @width: Bits per pixel (when transferred over a bus)
- * @bpp: Bytes per pixel (when stored in memory)
- * @supported: Indicates format supported by subdev
- */
-struct vpfe_fmt {
- u32 fourcc;
- u32 code;
- struct bus_format l;
- struct bus_format s;
- bool supported;
- u32 index;
-};
-
-static struct vpfe_fmt formats[] = {
+static struct vpfe_fmt formats[VPFE_NUM_FORMATS] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.code = MEDIA_BUS_FMT_YUYV8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
.code = MEDIA_BUS_FMT_UYVY8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_YVYU,
.code = MEDIA_BUS_FMT_YVYU8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
.code = MEDIA_BUS_FMT_VYUY8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_RGB565,
.code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB565X,
.code = MEDIA_BUS_FMT_RGB565_2X8_BE,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
},
};
-static int
-__vpfe_get_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp);
+static int __subdev_get_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt);
+static int vpfe_calc_format_size(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt,
+ struct v4l2_format *f);
-static struct vpfe_fmt *find_format_by_code(unsigned int code)
+static struct vpfe_fmt *find_format_by_code(struct vpfe_device *vpfe,
+ unsigned int code)
{
struct vpfe_fmt *fmt;
unsigned int k;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
+ for (k = 0; k < vpfe->num_active_fmt; k++) {
+ fmt = vpfe->active_fmt[k];
if (fmt->code == code)
return fmt;
}
@@ -195,13 +122,14 @@ static struct vpfe_fmt *find_format_by_code(unsigned int code)
return NULL;
}
-static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
+static struct vpfe_fmt *find_format_by_pix(struct vpfe_device *vpfe,
+ unsigned int pixelformat)
{
struct vpfe_fmt *fmt;
unsigned int k;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
+ for (k = 0; k < vpfe->num_active_fmt; k++) {
+ fmt = vpfe->active_fmt[k];
if (fmt->fourcc == pixelformat)
return fmt;
}
@@ -209,48 +137,18 @@ static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
return NULL;
}
-static void
-mbus_to_pix(struct vpfe_device *vpfe,
- const struct v4l2_mbus_framefmt *mbus,
- struct v4l2_pix_format *pix, unsigned int *bpp)
+static unsigned int __get_bytesperpixel(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt)
{
struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
unsigned int bus_width = sdinfo->vpfe_param.bus_width;
- struct vpfe_fmt *fmt;
-
- fmt = find_format_by_code(mbus->code);
- if (WARN_ON(fmt == NULL)) {
- pr_err("Invalid mbus code set\n");
- *bpp = 1;
- return;
- }
-
- memset(pix, 0, sizeof(*pix));
- v4l2_fill_pix_format(pix, mbus);
- pix->pixelformat = fmt->fourcc;
- *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
+ u32 bpp, bus_width_bytes, clocksperpixel;
- /* pitch should be 32 bytes aligned */
- pix->bytesperline = ALIGN(pix->width * *bpp, 32);
- pix->sizeimage = pix->bytesperline * pix->height;
-}
-
-static void pix_to_mbus(struct vpfe_device *vpfe,
- struct v4l2_pix_format *pix_fmt,
- struct v4l2_mbus_framefmt *mbus_fmt)
-{
- struct vpfe_fmt *fmt;
-
- fmt = find_format_by_pix(pix_fmt->pixelformat);
- if (!fmt) {
- /* default to first entry */
- vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
- pix_fmt->pixelformat);
- fmt = &formats[0];
- }
+ bus_width_bytes = ALIGN(bus_width, 8) >> 3;
+ clocksperpixel = DIV_ROUND_UP(fmt->bitsperpixel, bus_width);
+ bpp = clocksperpixel * bus_width_bytes;
- memset(mbus_fmt, 0, sizeof(*mbus_fmt));
- v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
+ return bpp;
}
/* Print Four-character-code (FOURCC) */
@@ -267,20 +165,6 @@ static char *print_fourcc(u32 fmt)
return code;
}
-static int
-cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
-{
- return lhs->type == rhs->type &&
- lhs->fmt.pix.width == rhs->fmt.pix.width &&
- lhs->fmt.pix.height == rhs->fmt.pix.height &&
- lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
- lhs->fmt.pix.field == rhs->fmt.pix.field &&
- lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
- lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
- lhs->fmt.pix.quantization == rhs->fmt.pix.quantization &&
- lhs->fmt.pix.xfer_func == rhs->fmt.pix.xfer_func;
-}
-
static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
{
return ioread32(ccdc->ccdc_cfg.base_addr + offset);
@@ -345,13 +229,9 @@ static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
vert_nr_lines = (image_win->height >> 1) - 1;
vert_start >>= 1;
- /* Since first line doesn't have any data */
- vert_start += 1;
/* configure VDINT0 */
val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
} else {
- /* Since first line doesn't have any data */
- vert_start += 1;
vert_nr_lines = image_win->height - 1;
/*
* configure VDINT0 and VDINT1. VDINT1 will be at half
@@ -405,7 +285,6 @@ vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
- ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
max_gamma > max_data) {
vpfe_dbg(1, vpfe, "Invalid data line select\n");
return -EINVAL;
@@ -445,40 +324,25 @@ static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
{
- int dma_cntl, i, pcr;
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ u32 dma_cntl, pcr;
- /* If the CCDC module is still busy wait for it to be done */
- for (i = 0; i < 10; i++) {
- usleep_range(5000, 6000);
- pcr = vpfe_reg_read(ccdc, VPFE_PCR);
- if (!pcr)
- break;
+ pcr = vpfe_reg_read(ccdc, VPFE_PCR);
+ if (pcr)
+ vpfe_dbg(1, vpfe, "VPFE_PCR is still set (%x)", pcr);
- /* make sure it it is disabled */
- vpfe_pcr_enable(ccdc, 0);
- }
+ dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
+ if ((dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
+ vpfe_dbg(1, vpfe, "VPFE_DMA_CNTL_OVERFLOW is still set (%x)",
+ dma_cntl);
/* Disable CCDC by resetting all register to default POR values */
vpfe_ccdc_restore_defaults(ccdc);
- /* if DMA_CNTL overflow bit is set. Clear it
- * It appears to take a while for this to become quiescent ~20ms
- */
- for (i = 0; i < 10; i++) {
- dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
- if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
- break;
-
- /* Clear the overflow bit */
- vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
- usleep_range(5000, 6000);
- }
-
/* Disabled the module at the CONFIG level */
vpfe_config_enable(ccdc, 0);
pm_runtime_put_sync(dev);
-
return 0;
}
@@ -494,8 +358,8 @@ static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
x = copy_from_user(&raw_params, params, sizeof(raw_params));
if (x) {
vpfe_dbg(1, vpfe,
- "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
- x);
+ "%s: error in copying ccdc params, %d\n",
+ __func__, x);
return -EFAULT;
}
@@ -513,11 +377,9 @@ static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
*/
static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
{
- struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
u32 syn_mode;
- vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
/*
* first restore the CCDC registers to default values
* This is important since we assume default values to be set in
@@ -649,8 +511,6 @@ static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
unsigned int syn_mode;
unsigned int val;
- vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
-
/* Reset CCDC */
vpfe_ccdc_restore_defaults(ccdc);
@@ -751,8 +611,8 @@ static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
{
struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
- vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
- ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
+ vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n",
+ __func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
@@ -1036,10 +896,9 @@ static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
{
enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ u32 bpp;
int ret = 0;
- vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
-
vpfe_dbg(1, vpfe, "pixelformat: %s\n",
print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
@@ -1050,7 +909,8 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
}
/* configure the image window */
- vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
+ bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, bpp);
switch (vpfe->fmt.fmt.pix.field) {
case V4L2_FIELD_INTERLACED:
@@ -1094,7 +954,8 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
static int vpfe_config_image_format(struct vpfe_device *vpfe,
v4l2_std_id std_id)
{
- struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
+ struct vpfe_fmt *fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
int i, ret;
for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
@@ -1116,26 +977,29 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe,
return -EINVAL;
}
- vpfe->crop.top = vpfe->crop.left = 0;
- vpfe->crop.width = vpfe->std_info.active_pixels;
- vpfe->crop.height = vpfe->std_info.active_lines;
- pix->width = vpfe->crop.width;
- pix->height = vpfe->crop.height;
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
-
- /* first field and frame format based on standard frame format */
- if (vpfe->std_info.frame_format)
- pix->field = V4L2_FIELD_INTERLACED;
- else
- pix->field = V4L2_FIELD_NONE;
-
- ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
+ ret = __subdev_get_format(vpfe, &mbus_fmt);
if (ret)
return ret;
+ fmt = find_format_by_code(vpfe, mbus_fmt.code);
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "mbus code format (0x%08x) not found.\n",
+ mbus_fmt.code);
+ return -EINVAL;
+ }
+
+ /* Save current subdev format */
+ v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe->fmt.fmt.pix.pixelformat = fmt->fourcc;
+ vpfe_calc_format_size(vpfe, fmt, &vpfe->fmt);
+ vpfe->current_vpfe_fmt = fmt;
+
/* Update the crop window based on found values */
- vpfe->crop.width = pix->width;
- vpfe->crop.height = pix->height;
+ vpfe->crop.top = 0;
+ vpfe->crop.left = 0;
+ vpfe->crop.width = mbus_fmt.width;
+ vpfe->crop.height = mbus_fmt.height;
return vpfe_config_ccdc_image_format(vpfe);
}
@@ -1237,22 +1101,29 @@ unlock:
* This function will get next buffer from the dma queue and
* set the buffer address in the vpfe register for capture.
* the buffer is marked active
- *
- * Assumes caller is holding vpfe->dma_queue_lock already
*/
-static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
+static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
{
+ dma_addr_t addr;
+
+ spin_lock(&vpfe->dma_queue_lock);
+ if (list_empty(&vpfe->dma_queue)) {
+ spin_unlock(&vpfe->dma_queue_lock);
+ return;
+ }
+
vpfe->next_frm = list_entry(vpfe->dma_queue.next,
struct vpfe_cap_buffer, list);
list_del(&vpfe->next_frm->list);
+ spin_unlock(&vpfe->dma_queue_lock);
- vpfe_set_sdr_addr(&vpfe->ccdc,
- vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0);
+ vpfe_set_sdr_addr(&vpfe->ccdc, addr);
}
static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
{
- unsigned long addr;
+ dma_addr_t addr;
addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
vpfe->field_off;
@@ -1277,6 +1148,58 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
vpfe->cur_frm = vpfe->next_frm;
}
+static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe,
+ enum v4l2_field field)
+{
+ int fid;
+
+ /* interlaced or TB capture check which field
+ * we are in hardware
+ */
+ fid = vpfe_ccdc_getfid(&vpfe->ccdc);
+
+ /* switch the software maintained field id */
+ vpfe->field ^= 1;
+ if (fid == vpfe->field) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the
+ * next frame is available, release the
+ * current frame and move on
+ */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+
+ if (vpfe->stopping)
+ return;
+
+ /*
+ * based on whether the two fields are stored
+ * interleave or separately in memory,
+ * reconfigure the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_schedule_bottom_field(vpfe);
+ } else {
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ if (vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ }
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe->field = fid;
+ }
+}
+
/*
* vpfe_isr : ISR handler for vpfe capture (VINT0)
* @irq: irq number
@@ -1288,76 +1211,28 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
static irqreturn_t vpfe_isr(int irq, void *dev)
{
struct vpfe_device *vpfe = (struct vpfe_device *)dev;
- enum v4l2_field field;
- int intr_status;
- int fid;
+ enum v4l2_field field = vpfe->fmt.fmt.pix.field;
+ int intr_status, stopping = vpfe->stopping;
intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
if (intr_status & VPFE_VDINT0) {
- field = vpfe->fmt.fmt.pix.field;
-
if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
if (vpfe->cur_frm != vpfe->next_frm)
vpfe_process_buffer_complete(vpfe);
- goto next_intr;
+ } else {
+ vpfe_handle_interlaced_irq(vpfe, field);
}
-
- /* interlaced or TB capture check which field
- we are in hardware */
- fid = vpfe_ccdc_getfid(&vpfe->ccdc);
-
- /* switch the software maintained field id */
- vpfe->field ^= 1;
- if (fid == vpfe->field) {
- /* we are in-sync here,continue */
- if (fid == 0) {
- /*
- * One frame is just being captured. If the
- * next frame is available, release the
- * current frame and move on
- */
- if (vpfe->cur_frm != vpfe->next_frm)
- vpfe_process_buffer_complete(vpfe);
- /*
- * based on whether the two fields are stored
- * interleave or separately in memory,
- * reconfigure the CCDC memory address
- */
- if (field == V4L2_FIELD_SEQ_TB)
- vpfe_schedule_bottom_field(vpfe);
-
- goto next_intr;
- }
- /*
- * if one field is just being captured configure
- * the next frame get the next frame from the empty
- * queue if no frame is available hold on to the
- * current buffer
- */
- spin_lock(&vpfe->dma_queue_lock);
- if (!list_empty(&vpfe->dma_queue) &&
- vpfe->cur_frm == vpfe->next_frm)
- vpfe_schedule_next_buffer(vpfe);
- spin_unlock(&vpfe->dma_queue_lock);
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- vpfe->field = fid;
+ if (stopping) {
+ vpfe->stopping = false;
+ complete(&vpfe->capture_stop);
}
}
-next_intr:
- if (intr_status & VPFE_VDINT1) {
- spin_lock(&vpfe->dma_queue_lock);
- if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
- !list_empty(&vpfe->dma_queue) &&
+ if (intr_status & VPFE_VDINT1 && !stopping) {
+ if (field == V4L2_FIELD_NONE &&
vpfe->cur_frm == vpfe->next_frm)
vpfe_schedule_next_buffer(vpfe);
- spin_unlock(&vpfe->dma_queue_lock);
}
vpfe_clear_intr(&vpfe->ccdc, intr_status);
@@ -1394,8 +1269,6 @@ static int vpfe_querycap(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_querycap\n");
-
strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
@@ -1404,83 +1277,74 @@ static int vpfe_querycap(struct file *file, void *priv,
}
/* get the format set at output pad of the adjacent subdev */
-static int __vpfe_get_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp)
+static int __subdev_get_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt)
{
- struct v4l2_mbus_framefmt mbus_fmt;
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
-
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt.pad = 0;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
- ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
- if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
return ret;
- if (!ret) {
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
- } else {
- ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
- sdinfo->grp_id,
- pad, get_fmt,
- NULL, &fmt);
- if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
- return ret;
- v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
- mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
- }
-
- format->type = vpfe->fmt.type;
+ *fmt = *mbus_fmt;
- vpfe_dbg(1, vpfe,
- "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
- __func__, format->fmt.pix.width, format->fmt.pix.height,
- print_fourcc(format->fmt.pix.pixelformat),
- format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+ vpfe_dbg(1, vpfe, "%s: %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
return 0;
}
/* set the format at output pad of the adjacent subdev */
-static int __vpfe_set_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp)
+static int __subdev_set_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt)
{
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ *mbus_fmt = *fmt;
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt.pad = 0;
+ vpfe_dbg(1, vpfe, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
- pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
+ return 0;
+}
- ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
- if (ret)
- return ret;
+static int vpfe_calc_format_size(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt,
+ struct v4l2_format *f)
+{
+ u32 bpp;
+
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "No vpfe_fmt provided!\n");
+ return -EINVAL;
+ }
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+ bpp = __get_bytesperpixel(vpfe, fmt);
- format->type = vpfe->fmt.type;
+ /* pitch should be 32 bytes aligned */
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.width * bpp, 32);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
- vpfe_dbg(1, vpfe,
- "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
- __func__, format->fmt.pix.width, format->fmt.pix.height,
- print_fourcc(format->fmt.pix.pixelformat),
- format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+ vpfe_dbg(3, vpfe, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
+ __func__, print_fourcc(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
return 0;
}
@@ -1490,8 +1354,6 @@ static int vpfe_g_fmt(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
-
*fmt = vpfe->fmt;
return 0;
@@ -1502,82 +1364,124 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- struct vpfe_fmt *fmt = NULL;
- unsigned int k;
-
- vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
- f->index);
+ struct vpfe_fmt *fmt;
sdinfo = vpfe->current_subdev;
if (!sdinfo->sd)
return -EINVAL;
- if (f->index > ARRAY_SIZE(formats))
+ if (f->index >= vpfe->num_active_fmt)
return -EINVAL;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- if (formats[k].index == f->index) {
- fmt = &formats[k];
- break;
- }
- }
- if (!fmt)
- return -EINVAL;
+ fmt = vpfe->active_fmt[f->index];
f->pixelformat = fmt->fourcc;
- vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s\n",
- f->index, fmt->code, print_fourcc(fmt->fourcc));
+ vpfe_dbg(1, vpfe, "%s: mbus index: %d code: %x pixelformat: %s\n",
+ __func__, f->index, fmt->code, print_fourcc(fmt->fourcc));
return 0;
}
static int vpfe_try_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
+ struct v4l2_format *f)
{
struct vpfe_device *vpfe = video_drvdata(file);
- unsigned int bpp;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ const struct vpfe_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret, found;
+
+ fmt = find_format_by_pix(vpfe, f->fmt.pix.pixelformat);
+ if (!fmt) {
+ /* default to first entry */
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ f->fmt.pix.pixelformat);
+ fmt = vpfe->active_fmt[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = vpfe->fmt.fmt.pix.field;
+
+ /* check for/find a valid width/height */
+ ret = 0;
+ found = false;
+ fse.pad = 0;
+ fse.code = fmt->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ for (fse.index = 0; ; fse.index++) {
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ break;
+
+ if (f->fmt.pix.width == fse.max_width &&
+ f->fmt.pix.height == fse.max_height) {
+ found = true;
+ break;
+ } else if (f->fmt.pix.width >= fse.min_width &&
+ f->fmt.pix.width <= fse.max_width &&
+ f->fmt.pix.height >= fse.min_height &&
+ f->fmt.pix.height <= fse.max_height) {
+ found = true;
+ break;
+ }
+ }
- vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
+ if (!found) {
+ /* use existing values as default */
+ f->fmt.pix.width = vpfe->fmt.fmt.pix.width;
+ f->fmt.pix.height = vpfe->fmt.fmt.pix.height;
+ }
- return __vpfe_get_format(vpfe, fmt, &bpp);
+ /*
+ * Use current colorspace for now, it will get
+ * updated properly during s_fmt
+ */
+ f->fmt.pix.colorspace = vpfe->fmt.fmt.pix.colorspace;
+ return vpfe_calc_format_size(vpfe, fmt, f);
}
static int vpfe_s_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct vpfe_device *vpfe = video_drvdata(file);
- struct v4l2_format format;
- unsigned int bpp;
+ struct vpfe_fmt *f;
+ struct v4l2_mbus_framefmt mbus_fmt;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
-
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
return -EBUSY;
}
- ret = __vpfe_get_format(vpfe, &format, &bpp);
- if (ret)
+ ret = vpfe_try_fmt(file, priv, fmt);
+ if (ret < 0)
return ret;
+ f = find_format_by_pix(vpfe, fmt->fmt.pix.pixelformat);
- if (!cmp_v4l2_format(fmt, &format)) {
- /* Sensor format is different from the requested format
- * so we need to change it
- */
- ret = __vpfe_set_format(vpfe, fmt, &bpp);
- if (ret)
- return ret;
- } else /* Just make sure all of the fields are consistent */
- *fmt = format;
+ v4l2_fill_mbus_format(&mbus_fmt, &fmt->fmt.pix, f->code);
- /* First detach any IRQ if currently attached */
- vpfe_detach_irq(vpfe);
- vpfe->fmt = *fmt;
- vpfe->bpp = bpp;
+ ret = __subdev_set_format(vpfe, &mbus_fmt);
+ if (ret)
+ return ret;
+
+ /* Just double check nothing has gone wrong */
+ if (mbus_fmt.code != f->code) {
+ vpfe_dbg(3, vpfe,
+ "%s subdev changed format on us, this should not happen\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe->fmt.fmt.pix.pixelformat = f->fourcc;
+ vpfe_calc_format_size(vpfe, f, &vpfe->fmt);
+ *fmt = vpfe->fmt;
+ vpfe->current_vpfe_fmt = f;
/* Update the crop window based on found values */
vpfe->crop.width = fmt->fmt.pix.width;
@@ -1592,57 +1496,40 @@ static int vpfe_enum_size(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
struct v4l2_subdev_frame_size_enum fse;
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_mbus_framefmt mbus;
- struct v4l2_pix_format pix;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
struct vpfe_fmt *fmt;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
-
/* check for valid format */
- fmt = find_format_by_pix(fsize->pixel_format);
+ fmt = find_format_by_pix(vpfe, fsize->pixel_format);
if (!fmt) {
- vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
- fsize->pixel_format);
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x\n",
+ fsize->pixel_format);
return -EINVAL;
}
memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
-
- memset(&pix, 0x0, sizeof(pix));
- /* Construct pix from parameter and use default for the rest */
- pix.pixelformat = fsize->pixel_format;
- pix.width = 640;
- pix.height = 480;
- pix.colorspace = V4L2_COLORSPACE_SRGB;
- pix.field = V4L2_FIELD_NONE;
- pix_to_mbus(vpfe, &pix, &mbus);
-
memset(&fse, 0x0, sizeof(fse));
fse.index = fsize->index;
fse.pad = 0;
- fse.code = mbus.code;
+ fse.code = fmt->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
if (ret)
- return -EINVAL;
+ return ret;
- vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
- fse.index, fse.code, fse.min_width, fse.max_width,
- fse.min_height, fse.max_height);
+ vpfe_dbg(1, vpfe, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fse.max_width;
fsize->discrete.height = fse.max_height;
- vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
- fsize->index, print_fourcc(fsize->pixel_format),
- fsize->discrete.width, fsize->discrete.height);
+ vpfe_dbg(1, vpfe, "%s: index: %d pixformat: %s size: %dx%d\n",
+ __func__, fsize->index, print_fourcc(fsize->pixel_format),
+ fsize->discrete.width, fsize->discrete.height);
return 0;
}
@@ -1707,8 +1594,6 @@ static int vpfe_enum_input(struct file *file, void *priv,
struct vpfe_subdev_info *sdinfo;
int subdev, index;
- vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
-
if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
inp->index) < 0) {
vpfe_dbg(1, vpfe,
@@ -1725,8 +1610,6 @@ static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_input\n");
-
return vpfe_get_app_input_index(vpfe, index);
}
@@ -1739,8 +1622,6 @@ static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
u32 input, output;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
-
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
@@ -1796,9 +1677,6 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe,
- "vpfe_s_input: index: %d\n", index);
-
return vpfe_set_input(vpfe, index);
}
@@ -1807,8 +1685,6 @@ static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- vpfe_dbg(2, vpfe, "vpfe_querystd\n");
-
sdinfo = vpfe->current_subdev;
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
@@ -1824,12 +1700,14 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
struct vpfe_subdev_info *sdinfo;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_s_std\n");
-
sdinfo = vpfe->current_subdev;
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
+ /* if trying to set the same std then nothing to do */
+ if (vpfe_standards[vpfe->std_index].std_id == std_id)
+ return 0;
+
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
@@ -1853,8 +1731,6 @@ static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- vpfe_dbg(2, vpfe, "vpfe_g_std\n");
-
sdinfo = vpfe->current_subdev;
if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
return -ENODATA;
@@ -1872,8 +1748,6 @@ static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
{
struct v4l2_rect image_win;
- vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
-
vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
vpfe->field_off = image_win.height * image_win.width;
}
@@ -1957,6 +1831,29 @@ static void vpfe_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
}
+static void vpfe_return_all_buffers(struct vpfe_device *vpfe,
+ enum vb2_buffer_state state)
+{
+ struct vpfe_cap_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ list_for_each_entry_safe(buf, node, &vpfe->dma_queue, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+
+ if (vpfe->cur_frm)
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, state);
+
+ if (vpfe->next_frm && vpfe->next_frm != vpfe->cur_frm)
+ vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf, state);
+
+ vpfe->cur_frm = NULL;
+ vpfe->next_frm = NULL;
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
/*
* vpfe_start_streaming : Starts the DMA engine for streaming
* @vb: ptr to vb2_buffer
@@ -1965,7 +1862,6 @@ static void vpfe_buffer_queue(struct vb2_buffer *vb)
static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
- struct vpfe_cap_buffer *buf, *tmp;
struct vpfe_subdev_info *sdinfo;
unsigned long flags;
unsigned long addr;
@@ -1980,6 +1876,9 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
vpfe_attach_irq(vpfe);
+ vpfe->stopping = false;
+ init_completion(&vpfe->capture_stop);
+
if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
vpfe_ccdc_config_raw(&vpfe->ccdc);
else
@@ -2008,11 +1907,8 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
err:
- list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- }
-
+ vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_QUEUED);
+ vpfe_pcr_enable(&vpfe->ccdc, 0);
return ret;
}
@@ -2027,11 +1923,15 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
struct vpfe_subdev_info *sdinfo;
- unsigned long flags;
int ret;
vpfe_pcr_enable(&vpfe->ccdc, 0);
+ /* Wait for the last frame to be captured */
+ vpfe->stopping = true;
+ wait_for_completion_timeout(&vpfe->capture_stop,
+ msecs_to_jiffies(250));
+
vpfe_detach_irq(vpfe);
sdinfo = vpfe->current_subdev;
@@ -2040,27 +1940,7 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
/* release all active buffers */
- spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
- if (vpfe->cur_frm == vpfe->next_frm) {
- vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- } else {
- if (vpfe->cur_frm != NULL)
- vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- if (vpfe->next_frm != NULL)
- vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
-
- while (!list_empty(&vpfe->dma_queue)) {
- vpfe->next_frm = list_entry(vpfe->dma_queue.next,
- struct vpfe_cap_buffer, list);
- list_del(&vpfe->next_frm->list);
- vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
- spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+ vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_ERROR);
}
static int vpfe_g_pixelaspect(struct file *file, void *priv,
@@ -2068,8 +1948,6 @@ static int vpfe_g_pixelaspect(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_pixelaspect\n");
-
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
return -EINVAL;
@@ -2128,6 +2006,7 @@ vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
struct vpfe_device *vpfe = video_drvdata(file);
struct v4l2_rect cr = vpfe->crop;
struct v4l2_rect r = s->r;
+ u32 bpp;
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
@@ -2153,10 +2032,12 @@ vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
s->r = vpfe->crop = r;
- vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
+ bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, bpp);
vpfe->fmt.fmt.pix.width = r.width;
vpfe->fmt.fmt.pix.height = r.height;
- vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ vpfe->fmt.fmt.pix.bytesperline =
+ vpfe_ccdc_get_line_length(&vpfe->ccdc);
vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
vpfe->fmt.fmt.pix.height;
@@ -2172,8 +2053,6 @@ static long vpfe_ioctl_default(struct file *file, void *priv,
struct vpfe_device *vpfe = video_drvdata(file);
int ret;
- vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
-
if (!valid_prio) {
vpfe_err(vpfe, "%s device busy\n", __func__);
return -EBUSY;
@@ -2279,10 +2158,10 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
struct vpfe_device, v4l2_dev);
struct v4l2_subdev_mbus_code_enum mbus_code;
struct vpfe_subdev_info *sdinfo;
+ struct vpfe_fmt *fmt;
+ int ret = 0;
bool found = false;
- int i, j;
-
- vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
+ int i, j, k;
for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
if (vpfe->cfg->asd[i]->match.fwnode ==
@@ -2302,27 +2181,37 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
- /* setup the supported formats & indexes */
- for (j = 0, i = 0; ; ++j) {
- struct vpfe_fmt *fmt;
- int ret;
-
+ vpfe->num_active_fmt = 0;
+ for (j = 0, i = 0; (ret != -EINVAL); ++j) {
memset(&mbus_code, 0, sizeof(mbus_code));
mbus_code.index = j;
mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
- NULL, &mbus_code);
+ NULL, &mbus_code);
if (ret)
- break;
-
- fmt = find_format_by_code(mbus_code.code);
- if (!fmt)
continue;
- fmt->supported = true;
- fmt->index = i++;
+ vpfe_dbg(3, vpfe,
+ "subdev %s: code: %04x idx: %d\n",
+ subdev->name, mbus_code.code, j);
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (mbus_code.code != fmt->code)
+ continue;
+ vpfe->active_fmt[i] = fmt;
+ vpfe_dbg(3, vpfe,
+ "matched fourcc: %s code: %04x idx: %d\n",
+ print_fourcc(fmt->fourcc), mbus_code.code, i);
+ vpfe->num_active_fmt = ++i;
+ }
}
+ if (!i) {
+ vpfe_err(vpfe, "No suitable format reported by subdev %s\n",
+ subdev->name);
+ return -EINVAL;
+ }
return 0;
}
@@ -2605,8 +2494,6 @@ static int vpfe_remove(struct platform_device *pdev)
{
struct vpfe_device *vpfe = platform_get_drvdata(pdev);
- vpfe_dbg(2, vpfe, "vpfe_remove\n");
-
pm_runtime_disable(&pdev->dev);
v4l2_async_notifier_unregister(&vpfe->notifier);
@@ -2653,22 +2540,21 @@ static int vpfe_suspend(struct device *dev)
struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
- /* if streaming has not started we don't care */
- if (!vb2_start_streaming_called(&vpfe->buffer_queue))
- return 0;
-
- pm_runtime_get_sync(dev);
- vpfe_config_enable(ccdc, 1);
+ /* only do full suspend if streaming has started */
+ if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
- /* Save VPFE context */
- vpfe_save_context(ccdc);
+ /* Save VPFE context */
+ vpfe_save_context(ccdc);
- /* Disable CCDC */
- vpfe_pcr_enable(ccdc, 0);
- vpfe_config_enable(ccdc, 0);
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+ vpfe_config_enable(ccdc, 0);
- /* Disable both master and slave clock */
- pm_runtime_put_sync(dev);
+ /* Disable both master and slave clock */
+ pm_runtime_put_sync(dev);
+ }
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
@@ -2710,19 +2596,18 @@ static int vpfe_resume(struct device *dev)
struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
- /* if streaming has not started we don't care */
- if (!vb2_start_streaming_called(&vpfe->buffer_queue))
- return 0;
-
- /* Enable both master and slave clock */
- pm_runtime_get_sync(dev);
- vpfe_config_enable(ccdc, 1);
+ /* only do full resume if streaming has started */
+ if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
+ /* Enable both master and slave clock */
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
- /* Restore VPFE context */
- vpfe_restore_context(ccdc);
+ /* Restore VPFE context */
+ vpfe_restore_context(ccdc);
- vpfe_config_enable(ccdc, 0);
- pm_runtime_put_sync(dev);
+ vpfe_config_enable(ccdc, 0);
+ pm_runtime_put_sync(dev);
+ }
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
index 4678285f34c6..05ee37db0273 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 - 2014 Texas Instruments, Inc.
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef AM437X_VPFE_H
@@ -23,6 +11,7 @@
#include <linux/am437x-vpfe.h>
#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/i2c.h>
@@ -214,6 +203,25 @@ struct vpfe_ccdc {
u32 ccdc_ctx[VPFE_REG_END / sizeof(u32)];
};
+/*
+ * struct vpfe_fmt - VPFE media bus format information
+ * fourcc: V4L2 pixel format code
+ * code: V4L2 media bus format code
+ * bitsperpixel: Bits per pixel over the bus
+ */
+struct vpfe_fmt {
+ u32 fourcc;
+ u32 code;
+ u32 bitsperpixel;
+};
+
+/*
+ * When formats[] is modified make sure to adjust this value also.
+ * Expect compile time warnings if VPFE_NUM_FORMATS is smaller then
+ * the number of elements in formats[].
+ */
+#define VPFE_NUM_FORMATS 10
+
struct vpfe_device {
/* V4l2 specific parameters */
/* Identifies video device for this channel */
@@ -249,8 +257,11 @@ struct vpfe_device {
struct vpfe_cap_buffer *next_frm;
/* Used to store pixel format */
struct v4l2_format fmt;
- /* Used to store current bytes per pixel based on current format */
- unsigned int bpp;
+ /* Used to keep a reference to the current vpfe_fmt */
+ struct vpfe_fmt *current_vpfe_fmt;
+ struct vpfe_fmt *active_fmt[VPFE_NUM_FORMATS];
+ unsigned int num_active_fmt;
+
/*
* used when IMP is chained to store the crop window which
* is different from the image window
@@ -270,6 +281,8 @@ struct vpfe_device {
*/
u32 field_off;
struct vpfe_ccdc ccdc;
+ int stopping;
+ struct completion capture_stop;
};
#endif /* AM437X_VPFE_H */
diff --git a/drivers/media/platform/am437x/am437x-vpfe_regs.h b/drivers/media/platform/am437x/am437x-vpfe_regs.h
index 0746c48ec23f..63ecdca3b908 100644
--- a/drivers/media/platform/am437x/am437x-vpfe_regs.h
+++ b/drivers/media/platform/am437x/am437x-vpfe_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI AM437x Image Sensor Interface Registers
*
@@ -5,15 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef AM437X_VPFE_REGS_H
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index eb12f3793062..d8593cb2ae84 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -606,6 +606,16 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
aspeed_video_start_frame(video);
}
+ /*
+ * CAPTURE_COMPLETE and FRAME_COMPLETE interrupts come even when these
+ * are disabled in the VE_INTERRUPT_CTRL register so clear them to
+ * prevent unnecessary interrupt calls.
+ */
+ if (sts & VE_INTERRUPT_CAPTURE_COMPLETE)
+ sts &= ~VE_INTERRUPT_CAPTURE_COMPLETE;
+ if (sts & VE_INTERRUPT_FRAME_COMPLETE)
+ sts &= ~VE_INTERRUPT_FRAME_COMPLETE;
+
return sts ? IRQ_NONE : IRQ_HANDLED;
}
@@ -614,7 +624,7 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
int i;
int hsync_counter = 0;
int vsync_counter = 0;
- u32 sts;
+ u32 sts, ctrl;
for (i = 0; i < NUM_POLARITY_CHECKS; ++i) {
sts = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
@@ -629,30 +639,29 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
hsync_counter++;
}
- if (hsync_counter < 0 || vsync_counter < 0) {
- u32 ctrl = 0;
+ ctrl = aspeed_video_read(video, VE_CTRL);
- if (hsync_counter < 0) {
- ctrl = VE_CTRL_HSYNC_POL;
- video->detected_timings.polarities &=
- ~V4L2_DV_HSYNC_POS_POL;
- } else {
- video->detected_timings.polarities |=
- V4L2_DV_HSYNC_POS_POL;
- }
-
- if (vsync_counter < 0) {
- ctrl = VE_CTRL_VSYNC_POL;
- video->detected_timings.polarities &=
- ~V4L2_DV_VSYNC_POS_POL;
- } else {
- video->detected_timings.polarities |=
- V4L2_DV_VSYNC_POS_POL;
- }
+ if (hsync_counter < 0) {
+ ctrl |= VE_CTRL_HSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_HSYNC_POS_POL;
+ } else {
+ ctrl &= ~VE_CTRL_HSYNC_POL;
+ video->detected_timings.polarities |=
+ V4L2_DV_HSYNC_POS_POL;
+ }
- if (ctrl)
- aspeed_video_update(video, VE_CTRL, 0, ctrl);
+ if (vsync_counter < 0) {
+ ctrl |= VE_CTRL_VSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_VSYNC_POS_POL;
+ } else {
+ ctrl &= ~VE_CTRL_VSYNC_POL;
+ video->detected_timings.polarities |=
+ V4L2_DV_VSYNC_POS_POL;
}
+
+ aspeed_video_write(video, VE_CTRL, ctrl);
}
static bool aspeed_video_alloc_buf(struct aspeed_video *video,
@@ -741,6 +750,8 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
}
set_bit(VIDEO_RES_DETECT, &video->flags);
+ aspeed_video_update(video, VE_CTRL,
+ VE_CTRL_VSYNC_POL | VE_CTRL_HSYNC_POL, 0);
aspeed_video_enable_mode_detect(video);
rc = wait_event_interruptible_timeout(video->wait,
@@ -1646,7 +1657,8 @@ static int aspeed_video_probe(struct platform_device *pdev)
{
int rc;
struct resource *res;
- struct aspeed_video *video = kzalloc(sizeof(*video), GFP_KERNEL);
+ struct aspeed_video *video =
+ devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL);
if (!video)
return -ENOMEM;
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 31ace114eda1..be9ec59774d6 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -129,7 +129,7 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
*/
for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
unsigned int idx = find_first_zero_bit(&lanes_used,
- sizeof(lanes_used));
+ csi2rx->max_lanes);
set_bit(idx, &lanes_used);
reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
}
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
index 5b17d3a31896..42d2c2cd9a78 100644
--- a/drivers/media/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -8,10 +8,12 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
+#include <media/cec-notifier.h>
#include <media/cec-pin.h>
struct cec_gpio {
struct cec_adapter *adap;
+ struct cec_notifier *notifier;
struct device *dev;
struct gpio_desc *cec_gpio;
@@ -173,9 +175,17 @@ static const struct cec_pin_ops cec_gpio_pin_ops = {
static int cec_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device *hdmi_dev;
struct cec_gpio *cec;
+ u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
int ret;
+ hdmi_dev = cec_notifier_parse_hdmi_phandle(dev);
+ if (PTR_ERR(hdmi_dev) == -EPROBE_DEFER)
+ return PTR_ERR(hdmi_dev);
+ if (IS_ERR(hdmi_dev))
+ caps |= CEC_CAP_PHYS_ADDR;
+
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
@@ -196,8 +206,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
return PTR_ERR(cec->v5_gpio);
cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
- cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
- CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
+ cec, pdev->name, caps);
if (IS_ERR(cec->adap))
return PTR_ERR(cec->adap);
@@ -205,7 +214,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
cec->adap->name, cec);
if (ret)
- return ret;
+ goto del_adap;
cec_gpio_disable_irq(cec->adap);
@@ -218,7 +227,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"hpd-gpio", cec);
if (ret)
- return ret;
+ goto del_adap;
}
if (cec->v5_gpio) {
@@ -230,23 +239,37 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"v5-gpio", cec);
if (ret)
- return ret;
+ goto del_adap;
}
- ret = cec_register_adapter(cec->adap, &pdev->dev);
- if (ret) {
- cec_delete_adapter(cec->adap);
- return ret;
+ if (!IS_ERR(hdmi_dev)) {
+ cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ cec->adap);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto del_adap;
+ }
}
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret)
+ goto unreg_notifier;
+
platform_set_drvdata(pdev, cec);
return 0;
+
+unreg_notifier:
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
+del_adap:
+ cec_delete_adapter(cec->adap);
+ return ret;
}
static int cec_gpio_remove(struct platform_device *pdev)
{
struct cec_gpio *cec = platform_get_drvdata(pdev);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 73222c0615c0..94fb4d2ecc43 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -933,7 +933,8 @@ static int coda_g_selection(struct file *file, void *fh,
rsel = &r;
/* fallthrough */
case V4L2_SEL_TGT_CROP:
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ ctx->inst_type == CODA_INST_DECODER)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -942,7 +943,8 @@ static int coda_g_selection(struct file *file, void *fh,
/* fallthrough */
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ ctx->inst_type == CODA_INST_ENCODER)
return -EINVAL;
break;
default:
@@ -1084,16 +1086,16 @@ static int coda_decoder_cmd(struct file *file, void *fh,
switch (dc->cmd) {
case V4L2_DEC_CMD_START:
- mutex_lock(&ctx->bitstream_mutex);
mutex_lock(&dev->coda_mutex);
+ mutex_lock(&ctx->bitstream_mutex);
coda_bitstream_flush(ctx);
- mutex_unlock(&dev->coda_mutex);
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_clear_last_buffer_dequeued(dst_vq);
ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
+ mutex_unlock(&dev->coda_mutex);
break;
case V4L2_DEC_CMD_STOP:
stream_end = false;
@@ -2387,6 +2389,7 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->dma_attrs = DMA_ATTR_NO_KERNEL_MAPPING;
dst_vq->mem_ops = &vb2_dma_contig_memops;
return coda_queue_init(priv, dst_vq);
@@ -2959,8 +2962,6 @@ static int coda_probe(struct platform_device *pdev)
else
return -EINVAL;
- spin_lock_init(&dev->irqlock);
-
dev->dev = &pdev->dev;
dev->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(dev->clk_per)) {
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 848bf1da401e..9f226140b486 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -86,7 +86,6 @@ struct coda_dev {
struct gen_pool *iram_pool;
struct coda_aux_buf iram;
- spinlock_t irqlock;
struct mutex dev_mutex;
struct mutex coda_mutex;
struct workqueue_struct *workqueue;
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
index 4a3b3810fd89..f048e8994785 100644
--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -314,7 +314,8 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
return 0;
out_probe_notify:
- cec_notifier_cec_adap_unregister(cros_ec_cec->notify);
+ cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
+ cros_ec_cec->adap);
out_probe_adapter:
cec_delete_adapter(cros_ec_cec->adap);
return ret;
@@ -335,7 +336,8 @@ static int cros_ec_cec_remove(struct platform_device *pdev)
return ret;
}
- cec_notifier_cec_adap_unregister(cros_ec_cec->notify);
+ cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
+ cros_ec_cec->adap);
cec_unregister_adapter(cros_ec_cec->adap);
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 378cc302e1f8..d2cbcdca0463 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -313,7 +313,7 @@ static int isp_video_release(struct file *file)
ivc->streaming = 0;
}
- vb2_fop_release(file);
+ _vb2_fop_release(file, NULL);
if (v4l2_fh_is_singular_file(file)) {
fimc_pipeline_call(&ivc->ve, close);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index a838189d4490..9aaf3b8060d5 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1457,12 +1457,12 @@ static int fimc_md_probe(struct platform_device *pdev)
ret = v4l2_device_register(dev, &fmd->v4l2_dev);
if (ret < 0) {
v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
- return ret;
+ goto err_md;
}
ret = fimc_md_get_clocks(fmd);
if (ret)
- goto err_md;
+ goto err_v4l2dev;
ret = fimc_md_get_pinctrl(fmd);
if (ret < 0) {
@@ -1519,9 +1519,10 @@ err_m_ent:
fimc_md_unregister_entities(fmd);
err_clk:
fimc_md_put_clocks(fmd);
+err_v4l2dev:
+ v4l2_device_unregister(&fmd->v4l2_dev);
err_md:
media_device_cleanup(&fmd->media_dev);
- v4l2_device_unregister(&fmd->v4l2_dev);
return ret;
}
diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c
index 3b39e875292e..891533060d49 100644
--- a/drivers/media/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/platform/meson/ao-cec-g12a.c
@@ -662,34 +662,27 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
- ao_cec->adap);
- if (!ao_cec->notify) {
- ret = -ENOMEM;
- goto out_probe_adapter;
- }
-
ao_cec->adap->owner = THIS_MODULE;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_ao_cec_g12a_regmap_conf);
if (IS_ERR(ao_cec->regmap)) {
ret = PTR_ERR(ao_cec->regmap);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec,
&meson_ao_cec_g12a_cec_regmap_conf);
if (IS_ERR(ao_cec->regmap_cec)) {
ret = PTR_ERR(ao_cec->regmap_cec);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
@@ -699,45 +692,52 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin");
if (IS_ERR(ao_cec->oscin)) {
dev_err(&pdev->dev, "oscin clock request failed\n");
ret = PTR_ERR(ao_cec->oscin);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = meson_ao_cec_g12a_setup_clk(ao_cec);
if (ret)
- goto out_probe_notify;
+ goto out_probe_adapter;
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
device_reset_optional(&pdev->dev);
platform_set_drvdata(pdev, ao_cec);
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_core_clk;
+ }
+
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
- goto out_probe_core_clk;
+ goto out_probe_notify;
/* Setup Hardware */
regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET);
return 0;
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
+
out_probe_core_clk:
clk_disable_unprepare(ao_cec->core);
-out_probe_notify:
- cec_notifier_cec_adap_unregister(ao_cec->notify);
-
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
@@ -752,7 +752,7 @@ static int meson_ao_cec_g12a_remove(struct platform_device *pdev)
clk_disable_unprepare(ao_cec->core);
- cec_notifier_cec_adap_unregister(ao_cec->notify);
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
cec_unregister_adapter(ao_cec->adap);
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
index 64ed549bf012..09aff82c3773 100644
--- a/drivers/media/platform/meson/ao-cec.c
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -624,20 +624,13 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
- ao_cec->adap);
- if (!ao_cec->notify) {
- ret = -ENOMEM;
- goto out_probe_adapter;
- }
-
ao_cec->adap->owner = THIS_MODULE;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ao_cec->base)) {
ret = PTR_ERR(ao_cec->base);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
@@ -647,20 +640,20 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->core = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(ao_cec->core)) {
dev_err(&pdev->dev, "core clock request failed\n");
ret = PTR_ERR(ao_cec->core);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE);
@@ -674,9 +667,16 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
ao_cec->pdev = pdev;
platform_set_drvdata(pdev, ao_cec);
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_clk;
+ }
+
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
- goto out_probe_clk;
+ goto out_probe_notify;
/* Setup Hardware */
writel_relaxed(CEC_GEN_CNTL_RESET,
@@ -684,12 +684,12 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
return 0;
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
+
out_probe_clk:
clk_disable_unprepare(ao_cec->core);
-out_probe_notify:
- cec_notifier_cec_adap_unregister(ao_cec->notify);
-
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
@@ -704,7 +704,7 @@ static int meson_ao_cec_remove(struct platform_device *pdev)
clk_disable_unprepare(ao_cec->core);
- cec_notifier_cec_adap_unregister(ao_cec->notify);
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
cec_unregister_adapter(ao_cec->adap);
return 0;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 26a55c3e807e..858727824889 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -284,7 +284,7 @@ static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
fmt = &mtk_video_formats[k];
if (fmt->fourcc == pixelformat) {
mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
- dst_q_data->fmt.fourcc, pixelformat);
+ dst_q_data->fmt->fourcc, pixelformat);
dst_q_data->fmt = fmt;
return;
}
@@ -841,12 +841,20 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
return -EINVAL;
pix_mp = &f->fmt.pix_mp;
+ /*
+ * Setting OUTPUT format after OUTPUT buffers are allocated is invalid
+ * if using the stateful API.
+ */
if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
mtk_v4l2_err("out_q_ctx buffers already requested");
ret = -EBUSY;
}
+ /*
+ * Setting CAPTURE format after CAPTURE buffers are allocated is
+ * invalid.
+ */
if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
mtk_v4l2_err("cap_q_ctx buffers already requested");
@@ -865,6 +873,8 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
fmt = mtk_vdec_find_format(f);
}
}
+ if (fmt == NULL)
+ return -EINVAL;
q_data->fmt = fmt;
vidioc_try_fmt(f, q_data->fmt);
@@ -873,10 +883,10 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
q_data->coded_width = pix_mp->width;
q_data->coded_height = pix_mp->height;
- ctx->colorspace = f->fmt.pix_mp.colorspace;
- ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
- ctx->quantization = f->fmt.pix_mp.quantization;
- ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quantization = pix_mp->quantization;
+ ctx->xfer_func = pix_mp->xfer_func;
if (ctx->state == MTK_STATE_FREE) {
ret = vdec_if_init(ctx, q_data->fmt->fourcc);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 00d090df11bb..944771ee5f5c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -253,13 +253,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
}
for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (res == NULL) {
- dev_err(&pdev->dev, "get memory resource failed.");
- ret = -ENXIO;
- goto err_res;
- }
- dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ dev->reg_base[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR((__force void *)dev->reg_base[i])) {
ret = PTR_ERR((__force void *)dev->reg_base[i]);
goto err_res;
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index 49aa85a9bb5a..50048c170b99 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -283,7 +283,6 @@ static int vdec_h264_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_H264;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
err = vpu_dec_init(&inst->vpu);
if (err) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index 63a8708ce682..6011fdd60a22 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -402,7 +402,6 @@ static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_VP8;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
err = vpu_dec_init(&inst->vpu);
if (err) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 5066c283d86d..24c1f0bf2147 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -793,7 +793,6 @@ static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_VP9;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
if (vpu_dec_init(&inst->vpu)) {
mtk_vcodec_err(inst, "vp9_dec_vpu_init failed");
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 3f38cc4509ef..70abfd4cd4b9 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -25,10 +25,16 @@ static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
}
/*
+ * vpu_dec_ipi_handler - Handler for VPU ipi message.
+ *
+ * @data: ipi message
+ * @len : length of ipi message
+ * @priv: callback private data which is passed by decoder when register.
+ *
* This function runs in interrupt context and it means there's an IPI MSG
* from VPU.
*/
-void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
+static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
{
struct vdec_vpu_ipi_ack *msg = data;
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
@@ -102,6 +108,7 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu)
mtk_vcodec_debug_enter(vpu);
init_waitqueue_head(&vpu->wq);
+ vpu->handler = vpu_dec_ipi_handler;
err = vpu_ipi_register(vpu->dev, vpu->id, vpu->handler, "vdec", NULL);
if (err != 0) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index b76f717e4fd7..f779b0676fbd 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -76,13 +76,4 @@ int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
*/
int vpu_dec_reset(struct vdec_vpu_inst *vpu);
-/**
- * vpu_dec_ipi_handler - Handler for VPU ipi message.
- *
- * @data: ipi message
- * @len : length of ipi message
- * @priv: callback private data which is passed by decoder when register.
- */
-void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv);
-
#endif
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index cc2ff40d060d..a768707abb94 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -273,7 +273,7 @@ int vpu_ipi_register(struct platform_device *pdev,
return -EPROBE_DEFER;
}
- if (id >= 0 && id < IPI_MAX && handler) {
+ if (id < IPI_MAX && handler) {
ipi_desc = vpu->ipi_desc;
ipi_desc[id].name = name;
ipi_desc[id].handler = handler;
@@ -398,7 +398,7 @@ int vpu_wdt_reg_handler(struct platform_device *pdev,
handler = vpu->wdt.handler;
- if (id >= 0 && id < VPU_RST_MAX && wdt_reset) {
+ if (id < VPU_RST_MAX && wdt_reset) {
dev_dbg(vpu->dev, "wdt register id %d\n", id);
mutex_lock(&vpu->vpu_mutex);
handler[id].reset_func = wdt_reset;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index e6eff512a8a1..07312a2fab24 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/interconnect.h>
#include <linux/ioctl.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -239,6 +240,14 @@ static int venus_probe(struct platform_device *pdev)
if (IS_ERR(core->base))
return PTR_ERR(core->base);
+ core->video_path = of_icc_get(dev, "video-mem");
+ if (IS_ERR(core->video_path))
+ return PTR_ERR(core->video_path);
+
+ core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
+ if (IS_ERR(core->cpucfg_path))
+ return PTR_ERR(core->cpucfg_path);
+
core->irq = platform_get_irq(pdev, 0);
if (core->irq < 0)
return core->irq;
@@ -273,6 +282,10 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = icc_set_bw(core->cpucfg_path, 0, kbps_to_icc(1000));
+ if (ret)
+ return ret;
+
ret = hfi_create(core, &venus_core_ops);
if (ret)
return ret;
@@ -355,6 +368,9 @@ static int venus_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ icc_put(core->video_path);
+ icc_put(core->cpucfg_path);
+
v4l2_device_unregister(&core->v4l2_dev);
return ret;
@@ -427,10 +443,11 @@ static const struct venus_resources msm8916_res = {
};
static const struct freq_tbl msm8996_freq_table[] = {
- { 1944000, 490000000 }, /* 4k UHD @ 60 */
- { 972000, 320000000 }, /* 4k UHD @ 30 */
- { 489600, 150000000 }, /* 1080p @ 60 */
- { 244800, 75000000 }, /* 1080p @ 30 */
+ { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
+ { 972000, 520000000 }, /* 4k UHD @ 30 */
+ { 489600, 346666667 }, /* 1080p @ 60 */
+ { 244800, 150000000 }, /* 1080p @ 30 */
+ { 108000, 75000000 }, /* 720p @ 30 */
};
static const struct reg_val msm8996_reg_preset[] = {
@@ -464,9 +481,40 @@ static const struct freq_tbl sdm845_freq_table[] = {
{ 244800, 100000000 }, /* 1920x1080@30 */
};
+static struct codec_freq_data sdm845_codec_freq_data[] = {
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10 },
+};
+
+static const struct bw_tbl sdm845_bw_table_enc[] = {
+ { 1944000, 1612000, 0, 2416000, 0 }, /* 3840x2160@60 */
+ { 972000, 951000, 0, 1434000, 0 }, /* 3840x2160@30 */
+ { 489600, 723000, 0, 973000, 0 }, /* 1920x1080@60 */
+ { 244800, 370000, 0, 495000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct bw_tbl sdm845_bw_table_dec[] = {
+ { 2073600, 3929000, 0, 5551000, 0 }, /* 4096x2160@60 */
+ { 1036800, 1987000, 0, 2797000, 0 }, /* 4096x2160@30 */
+ { 489600, 1040000, 0, 1298000, 0 }, /* 1920x1080@60 */
+ { 244800, 530000, 0, 659000, 0 }, /* 1920x1080@30 */
+};
+
static const struct venus_resources sdm845_res = {
.freq_tbl = sdm845_freq_table,
.freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
+ .bw_tbl_enc = sdm845_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc),
+ .bw_tbl_dec = sdm845_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec),
+ .codec_freq_data = sdm845_codec_freq_data,
+ .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.max_load = 3110400, /* 4096x2160@90 */
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 922cb7e64bfa..11585fb3cae3 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -26,12 +26,33 @@ struct reg_val {
u32 value;
};
+struct codec_freq_data {
+ u32 pixfmt;
+ u32 session_type;
+ unsigned long vpp_freq;
+ unsigned long vsp_freq;
+};
+
+struct bw_tbl {
+ u32 mbs_per_sec;
+ u32 avg;
+ u32 peak;
+ u32 avg_10bit;
+ u32 peak_10bit;
+};
+
struct venus_resources {
u64 dma_mask;
const struct freq_tbl *freq_tbl;
unsigned int freq_tbl_size;
+ const struct bw_tbl *bw_tbl_enc;
+ unsigned int bw_tbl_enc_size;
+ const struct bw_tbl *bw_tbl_dec;
+ unsigned int bw_tbl_dec_size;
const struct reg_val *reg_tbl;
unsigned int reg_tbl_size;
+ const struct codec_freq_data *codec_freq_data;
+ unsigned int codec_freq_data_size;
const char * const clks[VIDC_CLKS_NUM_MAX];
unsigned int clks_num;
enum hfi_version hfi_version;
@@ -115,6 +136,8 @@ struct venus_core {
struct clk *core1_clk;
struct clk *core0_bus_clk;
struct clk *core1_bus_clk;
+ struct icc_path *video_path;
+ struct icc_path *cpucfg_path;
struct video_device *vdev_dec;
struct video_device *vdev_enc;
struct v4l2_device v4l2_dev;
@@ -208,6 +231,12 @@ struct venus_buffer {
struct list_head ref_list;
};
+struct clock_data {
+ u32 core_id;
+ unsigned long freq;
+ const struct codec_freq_data *codec_freq_data;
+};
+
#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
enum venus_dec_state {
@@ -288,6 +317,7 @@ struct venus_inst {
struct list_head list;
struct mutex lock;
struct venus_core *core;
+ struct clock_data clk_data;
struct list_head dpbbufs;
struct list_head internalbufs;
struct list_head registeredbufs;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 1ad96c25ab09..a172f1ac0b35 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk.h>
#include <linux/iopoll.h>
+#include <linux/interconnect.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
@@ -388,12 +389,91 @@ static u32 load_per_type(struct venus_core *core, u32 session_type)
return mbs_per_sec;
}
-int venus_helper_load_scale_clocks(struct venus_core *core)
+static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak)
{
+ const struct venus_resources *res = inst->core->res;
+ const struct bw_tbl *bw_tbl;
+ unsigned int num_rows, i;
+
+ *avg = 0;
+ *peak = 0;
+
+ if (mbs == 0)
+ return;
+
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
+ num_rows = res->bw_tbl_enc_size;
+ bw_tbl = res->bw_tbl_enc;
+ } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
+ num_rows = res->bw_tbl_dec_size;
+ bw_tbl = res->bw_tbl_dec;
+ } else {
+ return;
+ }
+
+ if (!bw_tbl || num_rows == 0)
+ return;
+
+ for (i = 0; i < num_rows; i++) {
+ if (mbs > bw_tbl[i].mbs_per_sec)
+ break;
+
+ if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) {
+ *avg = bw_tbl[i].avg_10bit;
+ *peak = bw_tbl[i].peak_10bit;
+ } else {
+ *avg = bw_tbl[i].avg;
+ *peak = bw_tbl[i].peak;
+ }
+ }
+}
+
+static int load_scale_bw(struct venus_core *core)
+{
+ struct venus_inst *inst = NULL;
+ u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ mbs_per_sec = load_per_instance(inst);
+ mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
+ total_avg += avg;
+ total_peak += peak;
+ }
+ mutex_unlock(&core->lock);
+
+ dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n",
+ total_avg, total_peak);
+
+ return icc_set_bw(core->video_path, total_avg, total_peak);
+}
+
+static int set_clk_freq(struct venus_core *core, unsigned long freq)
+{
+ struct clk *clk = core->clks[0];
+ int ret;
+
+ ret = clk_set_rate(clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->core0_clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->core1_clk, freq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int scale_clocks(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
const struct freq_tbl *table = core->res->freq_tbl;
unsigned int num_rows = core->res->freq_tbl_size;
unsigned long freq = table[0].freq;
- struct clk *clk = core->clks[0];
struct device *dev = core->dev;
u32 mbs_per_sec;
unsigned int i;
@@ -419,23 +499,124 @@ int venus_helper_load_scale_clocks(struct venus_core *core)
set_freq:
- ret = clk_set_rate(clk, freq);
- if (ret)
- goto err;
+ ret = set_clk_freq(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
- ret = clk_set_rate(core->core0_clk, freq);
- if (ret)
- goto err;
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
- ret = clk_set_rate(core->core1_clk, freq);
- if (ret)
- goto err;
+ return 0;
+}
+
+static unsigned long calculate_inst_freq(struct venus_inst *inst,
+ unsigned long filled_len)
+{
+ unsigned long vpp_freq = 0, vsp_freq = 0;
+ u32 fps = (u32)inst->fps;
+ u32 mbs_per_sec;
+
+ mbs_per_sec = load_per_instance(inst) / fps;
+
+ vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
+ /* 21 / 20 is overhead factor */
+ vpp_freq += vpp_freq / 20;
+ vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq;
+
+ /* 10 / 7 is overhead factor */
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ vsp_freq += (inst->controls.enc.bitrate * 10) / 7;
+ else
+ vsp_freq += ((fps * filled_len * 8) * 10) / 7;
+
+ return max(vpp_freq, vsp_freq);
+}
+
+static int scale_clocks_v4(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ const struct freq_tbl *table = core->res->freq_tbl;
+ unsigned int num_rows = core->res->freq_tbl_size;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct device *dev = core->dev;
+ unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
+ unsigned long filled_len = 0;
+ struct venus_buffer *buf, *n;
+ struct vb2_buffer *vb;
+ int i, ret;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
+ vb = &buf->vb.vb2_buf;
+ filled_len = max(filled_len, vb2_get_plane_payload(vb, 0));
+ }
+
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
+ return 0;
+
+ freq = calculate_inst_freq(inst, filled_len);
+ inst->clk_data.freq = freq;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
+ freq_core1 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
+ freq_core2 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+ freq_core1 += inst->clk_data.freq;
+ freq_core2 += inst->clk_data.freq;
+ }
+ }
+ mutex_unlock(&core->lock);
+
+ freq = max(freq_core1, freq_core2);
+
+ if (freq >= table[0].freq) {
+ freq = table[0].freq;
+ dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
+ freq, table[0].freq);
+ goto set_freq;
+ }
+
+ for (i = num_rows - 1 ; i >= 0; i--) {
+ if (freq <= table[i].freq) {
+ freq = table[i].freq;
+ break;
+ }
+ }
+
+set_freq:
+
+ ret = set_clk_freq(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
+
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
return 0;
+}
-err:
- dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
- return ret;
+int venus_helper_load_scale_clocks(struct venus_inst *inst)
+{
+ if (IS_V4(inst->core))
+ return scale_clocks_v4(inst);
+
+ return scale_clocks(inst);
}
EXPORT_SYMBOL_GPL(venus_helper_load_scale_clocks);
@@ -541,6 +722,8 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
put_ts_metadata(inst, vbuf);
+
+ venus_helper_load_scale_clocks(inst);
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
fdata.buffer_type = HFI_BUFFER_OUTPUT;
@@ -809,6 +992,7 @@ int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
struct hfi_videocores_usage_type cu;
+ inst->clk_data.core_id = usage;
if (!IS_V4(inst->core))
return 0;
@@ -818,6 +1002,36 @@ int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
}
EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
+int venus_helper_init_codec_freq_data(struct venus_inst *inst)
+{
+ const struct codec_freq_data *data;
+ unsigned int i, data_size;
+ u32 pixfmt;
+ int ret = 0;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ data = inst->core->res->codec_freq_data;
+ data_size = inst->core->res->codec_freq_data_size;
+ pixfmt = inst->session_type == VIDC_SESSION_TYPE_DEC ?
+ inst->fmt_out->pixfmt : inst->fmt_cap->pixfmt;
+
+ for (i = 0; i < data_size; i++) {
+ if (data[i].pixfmt == pixfmt &&
+ data[i].session_type == inst->session_type) {
+ inst->clk_data.codec_freq_data = &data[i];
+ break;
+ }
+ }
+
+ if (!inst->clk_data.codec_freq_data)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_init_codec_freq_data);
+
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
unsigned int output2_bufs)
@@ -1140,7 +1354,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
}
@@ -1193,7 +1407,6 @@ EXPORT_SYMBOL_GPL(venus_helper_process_initial_out_bufs);
int venus_helper_vb2_start_streaming(struct venus_inst *inst)
{
- struct venus_core *core = inst->core;
int ret;
ret = venus_helper_intbufs_alloc(inst);
@@ -1204,7 +1417,7 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst)
if (ret)
goto err_bufs_free;
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
ret = hfi_session_load_res(inst);
if (ret)
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index 01f411b12f81..34dcd0c13f06 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -33,6 +33,7 @@ int venus_helper_set_output_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height,
u32 buftype);
int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
+int venus_helper_init_codec_freq_data(struct venus_inst *inst);
int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage);
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
@@ -59,7 +60,7 @@ int venus_helper_intbufs_free(struct venus_inst *inst);
int venus_helper_intbufs_realloc(struct venus_inst *inst);
int venus_helper_queue_dpb_bufs(struct venus_inst *inst);
int venus_helper_unregister_bufs(struct venus_inst *inst);
-int venus_helper_load_scale_clocks(struct venus_core *core);
+int venus_helper_load_scale_clocks(struct venus_inst *inst);
int venus_helper_process_initial_cap_bufs(struct venus_inst *inst);
int venus_helper_process_initial_out_bufs(struct venus_inst *inst);
void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 7129a2aea09a..0d8855014ab3 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -1472,6 +1472,7 @@ static int venus_suspend_3xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
+ u32 ctrl_status;
bool val;
int ret;
@@ -1487,6 +1488,10 @@ static int venus_suspend_3xx(struct venus_core *core)
return -EINVAL;
}
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
+ goto power_off;
+
/*
* Power collapse sequence for Venus 3xx and 4xx versions:
* 1. Check for ARM9 and video core to be idle by checking WFI bit
@@ -1511,6 +1516,7 @@ static int venus_suspend_3xx(struct venus_core *core)
if (ret)
return ret;
+power_off:
mutex_lock(&hdev->lock);
ret = venus_power_off(hdev);
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 7f4660555ddb..8feaf5daece9 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -685,6 +685,10 @@ static int vdec_session_init(struct venus_inst *inst)
if (ret)
goto deinit;
+ ret = venus_helper_init_codec_freq_data(inst);
+ if (ret)
+ goto deinit;
+
return 0;
deinit:
hfi_session_deinit(inst);
@@ -864,7 +868,7 @@ reconfigure:
if (ret)
goto free_dpb_bufs;
- venus_helper_load_scale_clocks(inst->core);
+ venus_helper_load_scale_clocks(inst);
ret = hfi_session_continue(inst);
if (ret)
@@ -1072,7 +1076,7 @@ static void vdec_session_release(struct venus_inst *inst)
hfi_session_abort(inst);
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
mutex_unlock(&inst->lock);
@@ -1412,9 +1416,6 @@ static const struct v4l2_file_operations vdec_fops = {
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
-#ifdef CONFIG_COMPAT
- .compat_ioctl32 = v4l2_compat_ioctl32,
-#endif
};
static int vdec_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 1b7fb2d5887c..453edf966d4f 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -842,6 +842,10 @@ static int venc_init_session(struct venus_inst *inst)
if (ret)
goto deinit;
+ ret = venus_helper_init_codec_freq_data(inst);
+ if (ret)
+ goto deinit;
+
ret = venc_set_properties(inst);
if (ret)
goto deinit;
@@ -1235,9 +1239,6 @@ static const struct v4l2_file_operations venc_fops = {
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
-#ifdef CONFIG_COMPAT
- .compat_ioctl32 = v4l2_compat_ioctl32,
-#endif
};
static int venc_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 6993484ff0f3..7440c8965d27 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -983,6 +983,7 @@ static const struct rvin_group_route rcar_info_r8a7795_routes[] = {
static const struct rvin_info rcar_info_r8a7795 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a7795_routes,
@@ -1077,6 +1078,7 @@ static const struct rvin_group_route rcar_info_r8a7796_routes[] = {
static const struct rvin_info rcar_info_r8a7796 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a7796_routes,
@@ -1121,6 +1123,7 @@ static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
static const struct rvin_info rcar_info_r8a77965 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77965_routes,
@@ -1168,6 +1171,7 @@ static const struct rvin_group_route rcar_info_r8a77980_routes[] = {
static const struct rvin_info rcar_info_r8a77980 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77980_routes,
@@ -1184,6 +1188,7 @@ static const struct rvin_group_route rcar_info_r8a77990_routes[] = {
static const struct rvin_info rcar_info_r8a77990 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77990_routes,
@@ -1196,6 +1201,7 @@ static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
static const struct rvin_info rcar_info_r8a77995 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77995_routes,
@@ -1207,6 +1213,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a7796,
},
{
+ .compatible = "renesas,vin-r8a774b1",
+ .data = &rcar_info_r8a77965,
+ },
+ {
.compatible = "renesas,vin-r8a774c0",
.data = &rcar_info_r8a77990,
},
@@ -1282,7 +1292,6 @@ static int rcar_vin_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rvin_dev *vin;
- struct resource *mem;
int irq, ret;
vin = devm_kzalloc(&pdev->dev, sizeof(*vin), GFP_KERNEL);
@@ -1301,11 +1310,7 @@ static int rcar_vin_probe(struct platform_device *pdev)
if (attr)
vin->info = attr->data;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL)
- return -EINVAL;
-
- vin->base = devm_ioremap_resource(vin->dev, mem);
+ vin->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vin->base))
return PTR_ERR(vin->base);
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index c14af1b929df..faa9fb23a2e9 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -1082,6 +1082,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.data = &rcar_csi2_info_r8a7796,
},
{
+ .compatible = "renesas,r8a774b1-csi2",
+ .data = &rcar_csi2_info_r8a77965,
+ },
+ {
.compatible = "renesas,r8a774c0-csi2",
.data = &rcar_csi2_info_r8a77990,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 3cb29b2e0b2b..cf9029efeb04 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -118,6 +118,7 @@
#define VNDMR_ABIT (1 << 2)
#define VNDMR_DTMD_YCSEP (1 << 1)
#define VNDMR_DTMD_ARGB (1 << 0)
+#define VNDMR_DTMD_YCSEP_420 (3 << 0)
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
@@ -529,12 +530,17 @@ static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs)
static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
{
+ unsigned int crop_height;
u32 xs, ys;
/* Set scaling coefficient */
+ crop_height = vin->crop.height;
+ if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
+ crop_height *= 2;
+
ys = 0;
- if (vin->crop.height != vin->compose.height)
- ys = (4096 * vin->crop.height) / vin->compose.height;
+ if (crop_height != vin->compose.height)
+ ys = (4096 * crop_height) / vin->compose.height;
rvin_write(vin, ys, VNYS_REG);
xs = 0;
@@ -557,16 +563,11 @@ static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
rvin_write(vin, 0, VNSPPOC_REG);
rvin_write(vin, 0, VNSLPOC_REG);
rvin_write(vin, vin->format.width - 1, VNEPPOC_REG);
- switch (vin->format.field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
+
+ if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG);
- break;
- default:
+ else
rvin_write(vin, vin->format.height - 1, VNELPOC_REG);
- break;
- }
vin_dbg(vin,
"Pre-Clip: %ux%u@%u:%u YS: %d XS: %d Post-Clip: %ux%u@%u:%u\n",
@@ -583,21 +584,9 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
/* Set Start/End Pixel/Line Pre-Clip */
rvin_write(vin, vin->crop.left, VNSPPRC_REG);
rvin_write(vin, vin->crop.left + vin->crop.width - 1, VNEPPRC_REG);
+ rvin_write(vin, vin->crop.top, VNSLPRC_REG);
+ rvin_write(vin, vin->crop.top + vin->crop.height - 1, VNELPRC_REG);
- switch (vin->format.field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- rvin_write(vin, vin->crop.top / 2, VNSLPRC_REG);
- rvin_write(vin, (vin->crop.top + vin->crop.height) / 2 - 1,
- VNELPRC_REG);
- break;
- default:
- rvin_write(vin, vin->crop.top, VNSLPRC_REG);
- rvin_write(vin, vin->crop.top + vin->crop.height - 1,
- VNELPRC_REG);
- break;
- }
/* TODO: Add support for the UDS scaler. */
if (vin->info->model != RCAR_GEN3)
@@ -641,6 +630,9 @@ static int rvin_setup(struct rvin_dev *vin)
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
+ case V4L2_FIELD_ALTERNATE:
+ vnmc = VNMC_IM_ODD_EVEN;
+ break;
default:
vnmc = VNMC_IM_ODD;
break;
@@ -710,11 +702,13 @@ static int rvin_setup(struct rvin_dev *vin)
* Output format
*/
switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
rvin_write(vin,
- ALIGN(vin->format.width * vin->format.height, 0x80),
- VNUVAOF_REG);
- dmr = VNDMR_DTMD_YCSEP;
+ ALIGN(vin->format.bytesperline * vin->format.height,
+ 0x80), VNUVAOF_REG);
+ dmr = vin->format.pixelformat == V4L2_PIX_FMT_NV12 ?
+ VNDMR_DTMD_YCSEP_420 : VNDMR_DTMD_YCSEP;
output_is_yuv = true;
break;
case V4L2_PIX_FMT_YUYV:
@@ -799,6 +793,18 @@ static bool rvin_capture_active(struct rvin_dev *vin)
return rvin_read(vin, VNMS_REG) & VNMS_CA;
}
+static enum v4l2_field rvin_get_active_field(struct rvin_dev *vin, u32 vnms)
+{
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ /* If FS is set it is an Even field. */
+ if (vnms & VNMS_FS)
+ return V4L2_FIELD_BOTTOM;
+ return V4L2_FIELD_TOP;
+ }
+
+ return vin->format.field;
+}
+
static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
{
const struct rvin_video_format *fmt;
@@ -948,7 +954,7 @@ static irqreturn_t rvin_irq(int irq, void *data)
/* Capture frame */
if (vin->queue_buf[slot]) {
- vin->queue_buf[slot]->field = vin->format.field;
+ vin->queue_buf[slot]->field = rvin_get_active_field(vin, vnms);
vin->queue_buf[slot]->sequence = vin->sequence;
vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf,
@@ -1075,6 +1081,7 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
case V4L2_FIELD_TOP:
case V4L2_FIELD_BOTTOM:
case V4L2_FIELD_NONE:
+ case V4L2_FIELD_ALTERNATE:
break;
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index cbc1c07f0a96..9e2e63ffcc47 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -31,6 +31,10 @@
static const struct rvin_video_format rvin_formats[] = {
{
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .bpp = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_NV16,
.bpp = 1,
},
@@ -72,6 +76,9 @@ const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
if (vin->info->model == RCAR_M1 && pixelformat == V4L2_PIX_FMT_XBGR32)
return NULL;
+ if (pixelformat == V4L2_PIX_FMT_NV12 && !vin->info->nv12)
+ return NULL;
+
for (i = 0; i < ARRAY_SIZE(rvin_formats); i++)
if (rvin_formats[i].fourcc == pixelformat)
return rvin_formats + i;
@@ -90,17 +97,29 @@ static u32 rvin_format_bytesperline(struct rvin_dev *vin,
if (WARN_ON(!fmt))
return -EINVAL;
- align = pix->pixelformat == V4L2_PIX_FMT_NV16 ? 0x20 : 0x10;
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ align = 0x20;
+ break;
+ default:
+ align = 0x10;
+ break;
+ }
return ALIGN(pix->width, align) * fmt->bpp;
}
static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix)
{
- if (pix->pixelformat == V4L2_PIX_FMT_NV16)
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ return pix->bytesperline * pix->height * 3 / 2;
+ case V4L2_PIX_FMT_NV16:
return pix->bytesperline * pix->height * 2;
-
- return pix->bytesperline * pix->height;
+ default:
+ return pix->bytesperline * pix->height;
+ }
}
static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
@@ -117,23 +136,23 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- break;
case V4L2_FIELD_ALTERNATE:
- /*
- * Driver does not (yet) support outputting ALTERNATE to a
- * userspace. It does support outputting INTERLACED so use
- * the VIN hardware to combine the two fields.
- */
- pix->field = V4L2_FIELD_INTERLACED;
- pix->height *= 2;
break;
default:
pix->field = RVIN_DEFAULT_FIELD;
break;
}
- /* HW limit width to a multiple of 32 (2^5) for NV16 else 2 (2^1) */
- walign = vin->format.pixelformat == V4L2_PIX_FMT_NV16 ? 5 : 1;
+ /* HW limit width to a multiple of 32 (2^5) for NV12/16 else 2 (2^1) */
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ walign = 5;
+ break;
+ default:
+ walign = 1;
+ break;
+ }
/* Limit to VIN capabilities */
v4l_bound_align_image(&pix->width, 2, vin->info->max_width, walign,
@@ -164,22 +183,32 @@ static int rvin_reset_format(struct rvin_dev *vin)
v4l2_fill_pix_format(&vin->format, &fmt.format);
+ vin->src_rect.top = 0;
+ vin->src_rect.left = 0;
+ vin->src_rect.width = vin->format.width;
+ vin->src_rect.height = vin->format.height;
+
+ /* Make use of the hardware interlacer by default. */
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ vin->format.field = V4L2_FIELD_INTERLACED;
+ vin->format.height *= 2;
+ }
+
rvin_format_align(vin, &vin->format);
- vin->source.top = 0;
- vin->source.left = 0;
- vin->source.width = vin->format.width;
- vin->source.height = vin->format.height;
+ vin->crop = vin->src_rect;
- vin->crop = vin->source;
- vin->compose = vin->source;
+ vin->compose.top = 0;
+ vin->compose.left = 0;
+ vin->compose.width = vin->format.width;
+ vin->compose.height = vin->format.height;
return 0;
}
static int rvin_try_format(struct rvin_dev *vin, u32 which,
struct v4l2_pix_format *pix,
- struct v4l2_rect *crop, struct v4l2_rect *compose)
+ struct v4l2_rect *src_rect)
{
struct v4l2_subdev *sd = vin_to_source(vin);
struct v4l2_subdev_pad_config *pad_cfg;
@@ -208,21 +237,15 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto done;
+ ret = 0;
v4l2_fill_pix_format(pix, &format.format);
- if (crop) {
- crop->top = 0;
- crop->left = 0;
- crop->width = pix->width;
- crop->height = pix->height;
-
- /*
- * If source is ALTERNATE the driver will use the VIN hardware
- * to INTERLACE it. The crop height then needs to be doubled.
- */
- if (pix->field == V4L2_FIELD_ALTERNATE)
- crop->height *= 2;
+ if (src_rect) {
+ src_rect->top = 0;
+ src_rect->left = 0;
+ src_rect->width = pix->width;
+ src_rect->height = pix->height;
}
if (field != V4L2_FIELD_ANY)
@@ -232,17 +255,10 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
pix->height = height;
rvin_format_align(vin, pix);
-
- if (compose) {
- compose->top = 0;
- compose->left = 0;
- compose->width = pix->width;
- compose->height = pix->height;
- }
done:
v4l2_subdev_free_pad_config(pad_cfg);
- return 0;
+ return ret;
}
static int rvin_querycap(struct file *file, void *priv,
@@ -262,29 +278,34 @@ static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
{
struct rvin_dev *vin = video_drvdata(file);
- return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL,
- NULL);
+ return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL);
}
static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_rect crop, compose;
+ struct v4l2_rect fmt_rect, src_rect;
int ret;
if (vb2_is_busy(&vin->queue))
return -EBUSY;
ret = rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
- &crop, &compose);
+ &src_rect);
if (ret)
return ret;
vin->format = f->fmt.pix;
- vin->crop = crop;
- vin->compose = compose;
- vin->source = crop;
+
+ fmt_rect.top = 0;
+ fmt_rect.left = 0;
+ fmt_rect.width = vin->format.width;
+ fmt_rect.height = vin->format.height;
+
+ v4l2_rect_map_inside(&vin->crop, &src_rect);
+ v4l2_rect_map_inside(&vin->compose, &fmt_rect);
+ vin->src_rect = src_rect;
return 0;
}
@@ -302,12 +323,22 @@ static int rvin_g_fmt_vid_cap(struct file *file, void *priv,
static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index >= ARRAY_SIZE(rvin_formats))
- return -EINVAL;
-
- f->pixelformat = rvin_formats[f->index].fourcc;
+ struct rvin_dev *vin = video_drvdata(file);
+ unsigned int i;
+ int matched;
+
+ matched = -1;
+ for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) {
+ if (rvin_format_from_pixel(vin, rvin_formats[i].fourcc))
+ matched++;
+
+ if (matched == f->index) {
+ f->pixelformat = rvin_formats[i].fourcc;
+ return 0;
+ }
+ }
- return 0;
+ return -EINVAL;
}
static int rvin_g_selection(struct file *file, void *fh,
@@ -322,8 +353,8 @@ static int rvin_g_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
s->r.left = s->r.top = 0;
- s->r.width = vin->source.width;
- s->r.height = vin->source.height;
+ s->r.width = vin->src_rect.width;
+ s->r.height = vin->src_rect.height;
break;
case V4L2_SEL_TGT_CROP:
s->r = vin->crop;
@@ -365,21 +396,22 @@ static int rvin_s_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_CROP:
/* Can't crop outside of source input */
max_rect.top = max_rect.left = 0;
- max_rect.width = vin->source.width;
- max_rect.height = vin->source.height;
+ max_rect.width = vin->src_rect.width;
+ max_rect.height = vin->src_rect.height;
v4l2_rect_map_inside(&r, &max_rect);
- v4l_bound_align_image(&r.width, 6, vin->source.width, 0,
- &r.height, 2, vin->source.height, 0, 0);
+ v4l_bound_align_image(&r.width, 6, vin->src_rect.width, 0,
+ &r.height, 2, vin->src_rect.height, 0, 0);
- r.top = clamp_t(s32, r.top, 0, vin->source.height - r.height);
- r.left = clamp_t(s32, r.left, 0, vin->source.width - r.width);
+ r.top = clamp_t(s32, r.top, 0,
+ vin->src_rect.height - r.height);
+ r.left = clamp_t(s32, r.left, 0, vin->src_rect.width - r.width);
vin->crop = s->r = r;
vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
r.width, r.height, r.left, r.top,
- vin->source.width, vin->source.height);
+ vin->src_rect.width, vin->src_rect.height);
break;
case V4L2_SEL_TGT_COMPOSE:
/* Make sure compose rect fits inside output format */
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index e562c2ff21ec..a36b0824f81d 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -126,6 +126,7 @@ struct rvin_group_route {
* struct rvin_info - Information about the particular VIN implementation
* @model: VIN model
* @use_mc: use media controller instead of controlling subdevice
+ * @nv12: support outputing NV12 pixel format
* @max_width: max input width the VIN supports
* @max_height: max input height the VIN supports
* @routes: list of possible routes from the CSI-2 recivers to
@@ -134,6 +135,7 @@ struct rvin_group_route {
struct rvin_info {
enum model_id model;
bool use_mc;
+ bool nv12;
unsigned int max_width;
unsigned int max_height;
@@ -176,7 +178,7 @@ struct rvin_info {
*
* @crop: active cropping
* @compose: active composing
- * @source: active size of the video source
+ * @src_rect: active size of the video source
* @std: active video standard of the video source
*
* @alpha: Alpha component to fill in for supported pixel formats
@@ -215,7 +217,7 @@ struct rvin_dev {
struct v4l2_rect crop;
struct v4l2_rect compose;
- struct v4l2_rect source;
+ struct v4l2_rect src_rect;
v4l2_std_id std;
unsigned int alpha;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 608e5217ccd5..0f267a237b42 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -912,6 +912,7 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rcar_drif_sdr *sdr = video_drvdata(file);
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index cb93a13e1777..97bed45360f0 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2369,7 +2369,7 @@ static int fdp1_probe(struct platform_device *pdev)
dprintk(fdp1, "FDP1 Version R-Car H3\n");
break;
case FD1_IP_M3N:
- dprintk(fdp1, "FDP1 Version R-Car M3N\n");
+ dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
break;
case FD1_IP_E3:
dprintk(fdp1, "FDP1 Version R-Car E3\n");
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
index 1a65532dc36d..e80204f5720c 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.c
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -553,7 +553,7 @@ void camif_hw_disable_capture(struct camif_vp *vp)
void camif_hw_dump_regs(struct camif_dev *camif, const char *label)
{
- struct {
+ static const struct {
u32 offset;
const char * const name;
} registers[] = {
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 6ddcc35b0bbd..2a3e7ffefe0a 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -239,7 +239,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
return 0;
err_notifier:
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_delete_adapter:
cec_delete_adapter(cec->adap);
@@ -250,7 +250,7 @@ static int s5p_cec_remove(struct platform_device *pdev)
{
struct s5p_cec_dev *cec = platform_get_drvdata(pdev);
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 8dbbd5f2a40a..ac2162235cef 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1236,7 +1236,6 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
}
result->sof = sof;
result->sof_len = sof_len;
- result->components = components;
return true;
}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 3bc52f83f5bc..4407fe775afa 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -190,7 +190,6 @@ struct s5p_jpeg_marker {
* @dqt: DQT markers' positions relative to the buffer beginning
* @sof: SOF0 marker's position relative to the buffer beginning
* @sof_len: SOF0 marker's payload length (without length field itself)
- * @components: number of image components
* @size: image buffer size in bytes
*/
struct s5p_jpeg_q_data {
@@ -202,7 +201,6 @@ struct s5p_jpeg_q_data {
struct s5p_jpeg_marker dqt;
u32 sof;
u32 sof_len;
- u32 components;
u32 size;
};
diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c
index 9cd60fe1867c..2ff62a488b27 100644
--- a/drivers/media/platform/seco-cec/seco-cec.c
+++ b/drivers/media/platform/seco-cec/seco-cec.c
@@ -671,10 +671,11 @@ static int secocec_probe(struct platform_device *pdev)
return ret;
err_notifier:
- cec_notifier_cec_adap_unregister(secocec->notifier);
+ cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap);
err_delete_adapter:
cec_delete_adapter(secocec->cec_adap);
err:
+ release_region(BRA_SMB_BASE_ADDR, 7);
dev_err(dev, "%s device probe failed\n", dev_name(dev));
return ret;
@@ -692,7 +693,7 @@ static int secocec_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "IR disabled");
}
- cec_notifier_cec_adap_unregister(secocec->notifier);
+ cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap);
cec_unregister_adapter(secocec->cec_adap);
release_region(BRA_SMB_BASE_ADDR, 7);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index e90f1ba30574..675b5f2b4c2e 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -651,8 +651,7 @@ static int bdisp_release(struct file *file)
dev_dbg(bdisp->dev, "%s\n", __func__);
- if (mutex_lock_interruptible(&bdisp->lock))
- return -ERESTARTSYS;
+ mutex_lock(&bdisp->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
index 8f0ddcbeed9d..301fa10f419b 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
@@ -225,36 +225,16 @@ static const struct debugfs_reg32 fei_sys_regs[] = {
void c8sectpfe_debugfs_init(struct c8sectpfei *fei)
{
- struct dentry *root;
- struct dentry *file;
-
- root = debugfs_create_dir("c8sectpfe", NULL);
- if (!root)
- goto err;
-
- fei->root = root;
-
fei->regset = devm_kzalloc(fei->dev, sizeof(*fei->regset), GFP_KERNEL);
if (!fei->regset)
- goto err;
+ return;
fei->regset->regs = fei_sys_regs;
fei->regset->nregs = ARRAY_SIZE(fei_sys_regs);
fei->regset->base = fei->io;
- file = debugfs_create_regset32("registers", S_IRUGO, root,
- fei->regset);
- if (!file) {
- dev_err(fei->dev,
- "%s not able to create 'registers' debugfs\n"
- , __func__);
- goto err;
- }
-
- return;
-
-err:
- debugfs_remove_recursive(root);
+ fei->root = debugfs_create_dir("c8sectpfe", NULL);
+ debugfs_create_regset32("registers", S_IRUGO, fei->root, fei->regset);
}
void c8sectpfe_debugfs_exit(struct c8sectpfei *fei)
diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/platform/sti/cec/stih-cec.c
index 8118c7365d3f..f0c73e64b586 100644
--- a/drivers/media/platform/sti/cec/stih-cec.c
+++ b/drivers/media/platform/sti/cec/stih-cec.c
@@ -359,7 +359,7 @@ static int stih_cec_probe(struct platform_device *pdev)
return 0;
err_notifier:
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_delete_adapter:
cec_delete_adapter(cec->adap);
@@ -370,7 +370,7 @@ static int stih_cec_remove(struct platform_device *pdev)
{
struct stih_cec *cec = platform_get_drvdata(pdev);
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
diff --git a/drivers/media/platform/sunxi/Makefile b/drivers/media/platform/sunxi/Makefile
index a05127529006..3878cb4efdc2 100644
--- a/drivers/media/platform/sunxi/Makefile
+++ b/drivers/media/platform/sunxi/Makefile
@@ -1,2 +1,3 @@
obj-y += sun4i-csi/
obj-y += sun6i-csi/
+obj-y += sun8i-di/
diff --git a/drivers/media/platform/sunxi/sun8i-di/Makefile b/drivers/media/platform/sunxi/sun8i-di/Makefile
new file mode 100644
index 000000000000..109f7e5442b7
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_SUN8I_DEINTERLACE) += sun8i-di.o
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
new file mode 100644
index 000000000000..aaa1dc159ac2
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -0,0 +1,1028 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner sun8i deinterlacer with scaler driver
+ *
+ * Copyright (C) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * Based on vim2m driver.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "sun8i-di.h"
+
+#define FLAG_SIZE (DEINTERLACE_MAX_WIDTH * DEINTERLACE_MAX_HEIGHT / 4)
+
+static u32 deinterlace_formats[] = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+};
+
+static inline u32 deinterlace_read(struct deinterlace_dev *dev, u32 reg)
+{
+ return readl(dev->base + reg);
+}
+
+static inline void deinterlace_write(struct deinterlace_dev *dev,
+ u32 reg, u32 value)
+{
+ writel(value, dev->base + reg);
+}
+
+static inline void deinterlace_set_bits(struct deinterlace_dev *dev,
+ u32 reg, u32 bits)
+{
+ writel(readl(dev->base + reg) | bits, dev->base + reg);
+}
+
+static inline void deinterlace_clr_set_bits(struct deinterlace_dev *dev,
+ u32 reg, u32 clr, u32 set)
+{
+ u32 val = readl(dev->base + reg);
+
+ val &= ~clr;
+ val |= set;
+
+ writel(val, dev->base + reg);
+}
+
+static void deinterlace_device_run(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *dev = ctx->dev;
+ u32 size, stride, width, height, val;
+ struct vb2_v4l2_buffer *src, *dst;
+ unsigned int hstep, vstep;
+ dma_addr_t addr;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ deinterlace_write(dev, DEINTERLACE_MOD_ENABLE,
+ DEINTERLACE_MOD_ENABLE_EN);
+
+ if (ctx->field) {
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG0,
+ ctx->flag1_buf_dma);
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG1,
+ ctx->flag2_buf_dma);
+ } else {
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG0,
+ ctx->flag2_buf_dma);
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG1,
+ ctx->flag1_buf_dma);
+ }
+ deinterlace_write(dev, DEINTERLACE_FLAG_LINE_STRIDE, 0x200);
+
+ width = ctx->src_fmt.width;
+ height = ctx->src_fmt.height;
+ stride = ctx->src_fmt.bytesperline;
+ size = stride * height;
+
+ addr = vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR0, addr);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR1, addr + size);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR2, 0);
+
+ deinterlace_write(dev, DEINTERLACE_LINE_STRIDE0, stride);
+ deinterlace_write(dev, DEINTERLACE_LINE_STRIDE1, stride);
+
+ deinterlace_write(dev, DEINTERLACE_CH0_IN_SIZE,
+ DEINTERLACE_SIZE(width, height));
+ deinterlace_write(dev, DEINTERLACE_CH1_IN_SIZE,
+ DEINTERLACE_SIZE(width / 2, height / 2));
+
+ val = DEINTERLACE_IN_FMT_FMT(DEINTERLACE_IN_FMT_YUV420) |
+ DEINTERLACE_IN_FMT_MOD(DEINTERLACE_MODE_UV_COMBINED);
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_UVUV);
+ break;
+ case V4L2_PIX_FMT_NV21:
+ val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_VUVU);
+ break;
+ }
+ deinterlace_write(dev, DEINTERLACE_IN_FMT, val);
+
+ if (ctx->prev)
+ addr = vb2_dma_contig_plane_dma_addr(&ctx->prev->vb2_buf, 0);
+
+ deinterlace_write(dev, DEINTERLACE_PRELUMA, addr);
+ deinterlace_write(dev, DEINTERLACE_PRECHROMA, addr + size);
+
+ val = DEINTERLACE_OUT_FMT_FMT(DEINTERLACE_OUT_FMT_YUV420SP);
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_UVUV);
+ break;
+ case V4L2_PIX_FMT_NV21:
+ val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_VUVU);
+ break;
+ }
+ deinterlace_write(dev, DEINTERLACE_OUT_FMT, val);
+
+ width = ctx->dst_fmt.width;
+ height = ctx->dst_fmt.height;
+ stride = ctx->dst_fmt.bytesperline;
+ size = stride * height;
+
+ deinterlace_write(dev, DEINTERLACE_CH0_OUT_SIZE,
+ DEINTERLACE_SIZE(width, height));
+ deinterlace_write(dev, DEINTERLACE_CH1_OUT_SIZE,
+ DEINTERLACE_SIZE(width / 2, height / 2));
+
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE0, stride);
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE1, stride);
+
+ addr = vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR0, addr);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR1, addr + size);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR2, 0);
+
+ hstep = (ctx->src_fmt.width << 16) / ctx->dst_fmt.width;
+ vstep = (ctx->src_fmt.height << 16) / ctx->dst_fmt.height;
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_FACT, hstep);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_FACT, vstep);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field));
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_START);
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_REG_READY);
+
+ deinterlace_set_bits(dev, DEINTERLACE_INT_ENABLE,
+ DEINTERLACE_INT_ENABLE_WB_EN);
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_WB_EN);
+}
+
+static int deinterlace_job_ready(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) >= 1 &&
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) >= 2;
+}
+
+static void deinterlace_job_abort(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+static irqreturn_t deinterlace_irq(int irq, void *data)
+{
+ struct deinterlace_dev *dev = data;
+ struct vb2_v4l2_buffer *src, *dst;
+ enum vb2_buffer_state state;
+ struct deinterlace_ctx *ctx;
+ unsigned int val;
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_NONE;
+ }
+
+ val = deinterlace_read(dev, DEINTERLACE_INT_STATUS);
+ if (!(val & DEINTERLACE_INT_STATUS_WRITEBACK))
+ return IRQ_NONE;
+
+ deinterlace_write(dev, DEINTERLACE_INT_ENABLE, 0);
+ deinterlace_set_bits(dev, DEINTERLACE_INT_STATUS,
+ DEINTERLACE_INT_STATUS_WRITEBACK);
+ deinterlace_write(dev, DEINTERLACE_MOD_ENABLE, 0);
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_START, 0);
+
+ val = deinterlace_read(dev, DEINTERLACE_STATUS);
+ if (val & DEINTERLACE_STATUS_WB_ERROR)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ state = VB2_BUF_STATE_DONE;
+
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(dst, state);
+
+ if (ctx->field != ctx->first_field || ctx->aborting) {
+ ctx->field = ctx->first_field;
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (ctx->prev)
+ v4l2_m2m_buf_done(ctx->prev, state);
+ ctx->prev = src;
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ } else {
+ ctx->field = !ctx->first_field;
+ deinterlace_device_run(ctx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void deinterlace_init(struct deinterlace_dev *dev)
+{
+ u32 val;
+ int i;
+
+ deinterlace_write(dev, DEINTERLACE_BYPASS,
+ DEINTERLACE_BYPASS_CSC);
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE_CTRL,
+ DEINTERLACE_WB_LINE_STRIDE_CTRL_EN);
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_OUT_CTRL);
+ deinterlace_write(dev, DEINTERLACE_AGTH_SEL,
+ DEINTERLACE_AGTH_SEL_LINEBUF);
+
+ val = DEINTERLACE_CTRL_EN |
+ DEINTERLACE_CTRL_MODE_MIXED |
+ DEINTERLACE_CTRL_DIAG_INTP_EN |
+ DEINTERLACE_CTRL_TEMP_DIFF_EN;
+ deinterlace_write(dev, DEINTERLACE_CTRL, val);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_LUMA_TH,
+ DEINTERLACE_LUMA_TH_MIN_LUMA_MSK,
+ DEINTERLACE_LUMA_TH_MIN_LUMA(4));
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_SPAT_COMP,
+ DEINTERLACE_SPAT_COMP_TH2_MSK,
+ DEINTERLACE_SPAT_COMP_TH2(5));
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_TEMP_DIFF,
+ DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH_MSK,
+ DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH(5));
+
+ val = DEINTERLACE_DIAG_INTP_TH0(60) |
+ DEINTERLACE_DIAG_INTP_TH1(0) |
+ DEINTERLACE_DIAG_INTP_TH3(30);
+ deinterlace_write(dev, DEINTERLACE_DIAG_INTP, val);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF,
+ DEINTERLACE_CHROMA_DIFF_TH_MSK,
+ DEINTERLACE_CHROMA_DIFF_TH(5));
+
+ /* neutral filter coefficients */
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS);
+ readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
+ val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
+
+ for (i = 0; i < 32; i++) {
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ }
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
+}
+
+static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct deinterlace_ctx, fh);
+}
+
+static bool deinterlace_check_format(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(deinterlace_formats); i++)
+ if (deinterlace_formats[i] == pixelformat)
+ return true;
+
+ return false;
+}
+
+static void deinterlace_prepare_format(struct v4l2_pix_format *pix_fmt)
+{
+ unsigned int height = pix_fmt->height;
+ unsigned int width = pix_fmt->width;
+ unsigned int bytesperline;
+ unsigned int sizeimage;
+
+ width = clamp(width, DEINTERLACE_MIN_WIDTH,
+ DEINTERLACE_MAX_WIDTH);
+ height = clamp(height, DEINTERLACE_MIN_HEIGHT,
+ DEINTERLACE_MAX_HEIGHT);
+
+ bytesperline = ALIGN(width, 2);
+ /* luma */
+ sizeimage = bytesperline * height;
+ /* chroma */
+ sizeimage += bytesperline * height / 2;
+
+ pix_fmt->width = width;
+ pix_fmt->height = height;
+ pix_fmt->bytesperline = bytesperline;
+ pix_fmt->sizeimage = sizeimage;
+}
+
+static int deinterlace_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DEINTERLACE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, DEINTERLACE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", DEINTERLACE_NAME);
+
+ return 0;
+}
+
+static int deinterlace_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index < ARRAY_SIZE(deinterlace_formats)) {
+ f->pixelformat = deinterlace_formats[f->index];
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int deinterlace_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ if (!deinterlace_check_format(fsize->pixel_format))
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = DEINTERLACE_MIN_WIDTH;
+ fsize->stepwise.min_height = DEINTERLACE_MIN_HEIGHT;
+ fsize->stepwise.max_width = DEINTERLACE_MAX_WIDTH;
+ fsize->stepwise.max_height = DEINTERLACE_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int deinterlace_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+
+ f->fmt.pix = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int deinterlace_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+
+ f->fmt.pix = ctx->src_fmt;
+
+ return 0;
+}
+
+static int deinterlace_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (!deinterlace_check_format(f->fmt.pix.pixelformat))
+ f->fmt.pix.pixelformat = deinterlace_formats[0];
+
+ if (f->fmt.pix.field != V4L2_FIELD_NONE)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ deinterlace_prepare_format(&f->fmt.pix);
+
+ return 0;
+}
+
+static int deinterlace_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (!deinterlace_check_format(f->fmt.pix.pixelformat))
+ f->fmt.pix.pixelformat = deinterlace_formats[0];
+
+ if (f->fmt.pix.field != V4L2_FIELD_INTERLACED_TB &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED_BT &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ deinterlace_prepare_format(&f->fmt.pix);
+
+ return 0;
+}
+
+static int deinterlace_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = deinterlace_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->dst_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int deinterlace_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = deinterlace_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->src_fmt = f->fmt.pix;
+
+ /* Propagate colorspace information to capture. */
+ ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
+ ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
+ ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->dst_fmt.quantization = f->fmt.pix.quantization;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = {
+ .vidioc_querycap = deinterlace_querycap,
+
+ .vidioc_enum_framesizes = deinterlace_enum_framesizes,
+
+ .vidioc_enum_fmt_vid_cap = deinterlace_enum_fmt,
+ .vidioc_g_fmt_vid_cap = deinterlace_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = deinterlace_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = deinterlace_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = deinterlace_enum_fmt,
+ .vidioc_g_fmt_vid_out = deinterlace_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = deinterlace_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = deinterlace_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static int deinterlace_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (*nplanes) {
+ if (sizes[0] < pix_fmt->sizeimage)
+ return -EINVAL;
+ } else {
+ sizes[0] = pix_fmt->sizeimage;
+ *nplanes = 1;
+ }
+
+ return 0;
+}
+
+static int deinterlace_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+
+ return 0;
+}
+
+static void deinterlace_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void deinterlace_queue_cleanup(struct vb2_queue *vq, u32 state)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf;
+
+ do {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (vbuf)
+ v4l2_m2m_buf_done(vbuf, state);
+ } while (vbuf);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type) && ctx->prev)
+ v4l2_m2m_buf_done(ctx->prev, state);
+}
+
+static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct device *dev = ctx->dev->dev;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable module\n");
+
+ goto err_runtime_get;
+ }
+
+ ctx->first_field =
+ ctx->src_fmt.field == V4L2_FIELD_INTERLACED_BT;
+ ctx->field = ctx->first_field;
+
+ ctx->prev = NULL;
+ ctx->aborting = 0;
+
+ ctx->flag1_buf = dma_alloc_coherent(dev, FLAG_SIZE,
+ &ctx->flag1_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->flag1_buf) {
+ ret = -ENOMEM;
+
+ goto err_no_mem1;
+ }
+
+ ctx->flag2_buf = dma_alloc_coherent(dev, FLAG_SIZE,
+ &ctx->flag2_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->flag2_buf) {
+ ret = -ENOMEM;
+
+ goto err_no_mem2;
+ }
+ }
+
+ return 0;
+
+err_no_mem2:
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf,
+ ctx->flag1_buf_dma);
+err_no_mem1:
+ pm_runtime_put(dev);
+err_runtime_get:
+ deinterlace_queue_cleanup(vq, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void deinterlace_stop_streaming(struct vb2_queue *vq)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ struct device *dev = ctx->dev->dev;
+
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf,
+ ctx->flag1_buf_dma);
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag2_buf,
+ ctx->flag2_buf_dma);
+
+ pm_runtime_put(dev);
+ }
+
+ deinterlace_queue_cleanup(vq, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops deinterlace_qops = {
+ .queue_setup = deinterlace_queue_setup,
+ .buf_prepare = deinterlace_buf_prepare,
+ .buf_queue = deinterlace_buf_queue,
+ .start_streaming = deinterlace_start_streaming,
+ .stop_streaming = deinterlace_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int deinterlace_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct deinterlace_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->min_buffers_needed = 1;
+ src_vq->ops = &deinterlace_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->min_buffers_needed = 2;
+ dst_vq->ops = &deinterlace_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int deinterlace_open(struct file *file)
+{
+ struct deinterlace_dev *dev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock(&dev->dev_mutex);
+ return -ENOMEM;
+ }
+
+ /* default output format */
+ ctx->src_fmt.pixelformat = deinterlace_formats[0];
+ ctx->src_fmt.field = V4L2_FIELD_INTERLACED;
+ ctx->src_fmt.width = 640;
+ ctx->src_fmt.height = 480;
+ deinterlace_prepare_format(&ctx->src_fmt);
+
+ /* default capture format */
+ ctx->dst_fmt.pixelformat = deinterlace_formats[0];
+ ctx->dst_fmt.field = V4L2_FIELD_NONE;
+ ctx->dst_fmt.width = 640;
+ ctx->dst_fmt.height = 480;
+ deinterlace_prepare_format(&ctx->dst_fmt);
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ &deinterlace_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_free;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+
+err_free:
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int deinterlace_release(struct file *file)
+{
+ struct deinterlace_dev *dev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = container_of(file->private_data,
+ struct deinterlace_ctx, fh);
+
+ mutex_lock(&dev->dev_mutex);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations deinterlace_fops = {
+ .owner = THIS_MODULE,
+ .open = deinterlace_open,
+ .release = deinterlace_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device deinterlace_video_device = {
+ .name = DEINTERLACE_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &deinterlace_fops,
+ .ioctl_ops = &deinterlace_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static const struct v4l2_m2m_ops deinterlace_m2m_ops = {
+ .device_run = deinterlace_device_run,
+ .job_ready = deinterlace_job_ready,
+ .job_abort = deinterlace_job_abort,
+};
+
+static int deinterlace_probe(struct platform_device *pdev)
+{
+ struct deinterlace_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int irq, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vfd = deinterlace_video_device;
+ dev->dev = &pdev->dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev->dev, "Failed to get IRQ\n");
+
+ return irq;
+ }
+
+ ret = devm_request_irq(dev->dev, irq, deinterlace_irq,
+ 0, dev_name(dev->dev), dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to request IRQ\n");
+
+ return ret;
+ }
+
+ ret = of_dma_configure(dev->dev, dev->dev->of_node, true);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->base)) {
+ dev_err(dev->dev, "Failed to map registers\n");
+
+ return PTR_ERR(dev->base);
+ }
+
+ dev->bus_clk = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->bus_clk)) {
+ dev_err(dev->dev, "Failed to get bus clock\n");
+
+ return PTR_ERR(dev->bus_clk);
+ }
+
+ dev->mod_clk = devm_clk_get(dev->dev, "mod");
+ if (IS_ERR(dev->mod_clk)) {
+ dev_err(dev->dev, "Failed to get mod clock\n");
+
+ return PTR_ERR(dev->mod_clk);
+ }
+
+ dev->ram_clk = devm_clk_get(dev->dev, "ram");
+ if (IS_ERR(dev->ram_clk)) {
+ dev_err(dev->dev, "Failed to get ram clock\n");
+
+ return PTR_ERR(dev->ram_clk);
+ }
+
+ dev->rstc = devm_reset_control_get(dev->dev, NULL);
+ if (IS_ERR(dev->rstc)) {
+ dev_err(dev->dev, "Failed to get reset control\n");
+
+ return PTR_ERR(dev->rstc);
+ }
+
+ mutex_init(&dev->dev_mutex);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register V4L2 device\n");
+
+ return ret;
+ }
+
+ vfd = &dev->vfd;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s",
+ deinterlace_video_device.name);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+
+ goto err_v4l2;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ dev->m2m_dev = v4l2_m2m_init(&deinterlace_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+
+ goto err_video;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ pm_runtime_enable(dev->dev);
+
+ return 0;
+
+err_video:
+ video_unregister_device(&dev->vfd);
+err_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int deinterlace_remove(struct platform_device *pdev)
+{
+ struct deinterlace_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ pm_runtime_force_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static int deinterlace_runtime_resume(struct device *device)
+{
+ struct deinterlace_dev *dev = dev_get_drvdata(device);
+ int ret;
+
+ ret = clk_set_rate_exclusive(dev->mod_clk, 300000000);
+ if (ret) {
+ dev_err(dev->dev, "Failed to set exclusive mod clock rate\n");
+
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev->bus_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable bus clock\n");
+
+ goto err_exlusive_rate;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable mod clock\n");
+
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(dev->ram_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable ram clock\n");
+
+ goto err_mod_clk;
+ }
+
+ ret = reset_control_deassert(dev->rstc);
+ if (ret) {
+ dev_err(dev->dev, "Failed to apply reset\n");
+
+ goto err_ram_clk;
+ }
+
+ deinterlace_init(dev);
+
+ return 0;
+
+err_exlusive_rate:
+ clk_rate_exclusive_put(dev->mod_clk);
+err_ram_clk:
+ clk_disable_unprepare(dev->ram_clk);
+err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+err_bus_clk:
+ clk_disable_unprepare(dev->bus_clk);
+
+ return ret;
+}
+
+static int deinterlace_runtime_suspend(struct device *device)
+{
+ struct deinterlace_dev *dev = dev_get_drvdata(device);
+
+ reset_control_assert(dev->rstc);
+
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->bus_clk);
+ clk_rate_exclusive_put(dev->mod_clk);
+
+ return 0;
+}
+
+static const struct of_device_id deinterlace_dt_match[] = {
+ { .compatible = "allwinner,sun8i-h3-deinterlace" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, deinterlace_dt_match);
+
+static const struct dev_pm_ops deinterlace_pm_ops = {
+ .runtime_resume = deinterlace_runtime_resume,
+ .runtime_suspend = deinterlace_runtime_suspend,
+};
+
+static struct platform_driver deinterlace_driver = {
+ .probe = deinterlace_probe,
+ .remove = deinterlace_remove,
+ .driver = {
+ .name = DEINTERLACE_NAME,
+ .of_match_table = deinterlace_dt_match,
+ .pm = &deinterlace_pm_ops,
+ },
+};
+module_platform_driver(deinterlace_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner Deinterlace driver");
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h
new file mode 100644
index 000000000000..0254251d8687
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Allwinner Deinterlace driver
+ *
+ * Copyright (C) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#ifndef _SUN8I_DEINTERLACE_H_
+#define _SUN8I_DEINTERLACE_H_
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <linux/platform_device.h>
+
+#define DEINTERLACE_NAME "sun8i-di"
+
+#define DEINTERLACE_MOD_ENABLE 0x00
+#define DEINTERLACE_MOD_ENABLE_EN BIT(0)
+
+#define DEINTERLACE_FRM_CTRL 0x04
+#define DEINTERLACE_FRM_CTRL_REG_READY BIT(0)
+#define DEINTERLACE_FRM_CTRL_WB_EN BIT(2)
+#define DEINTERLACE_FRM_CTRL_OUT_CTRL BIT(11)
+#define DEINTERLACE_FRM_CTRL_START BIT(16)
+#define DEINTERLACE_FRM_CTRL_COEF_ACCESS BIT(23)
+
+#define DEINTERLACE_BYPASS 0x08
+#define DEINTERLACE_BYPASS_CSC BIT(1)
+
+#define DEINTERLACE_AGTH_SEL 0x0c
+#define DEINTERLACE_AGTH_SEL_LINEBUF BIT(8)
+
+#define DEINTERLACE_LINT_CTRL 0x10
+#define DEINTERLACE_TRD_PRELUMA 0x1c
+#define DEINTERLACE_BUF_ADDR0 0x20
+#define DEINTERLACE_BUF_ADDR1 0x24
+#define DEINTERLACE_BUF_ADDR2 0x28
+
+#define DEINTERLACE_FIELD_CTRL 0x2c
+#define DEINTERLACE_FIELD_CTRL_FIELD_CNT(v) ((v) & 0xff)
+#define DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK (0xff)
+
+#define DEINTERLACE_TB_OFFSET0 0x30
+#define DEINTERLACE_TB_OFFSET1 0x34
+#define DEINTERLACE_TB_OFFSET2 0x38
+#define DEINTERLACE_TRD_PRECHROMA 0x3c
+#define DEINTERLACE_LINE_STRIDE0 0x40
+#define DEINTERLACE_LINE_STRIDE1 0x44
+#define DEINTERLACE_LINE_STRIDE2 0x48
+
+#define DEINTERLACE_IN_FMT 0x4c
+#define DEINTERLACE_IN_FMT_PS(v) ((v) & 3)
+#define DEINTERLACE_IN_FMT_FMT(v) (((v) & 7) << 4)
+#define DEINTERLACE_IN_FMT_MOD(v) (((v) & 7) << 8)
+
+#define DEINTERLACE_WB_ADDR0 0x50
+#define DEINTERLACE_WB_ADDR1 0x54
+#define DEINTERLACE_WB_ADDR2 0x58
+
+#define DEINTERLACE_OUT_FMT 0x5c
+#define DEINTERLACE_OUT_FMT_FMT(v) ((v) & 0xf)
+#define DEINTERLACE_OUT_FMT_PS(v) (((v) & 3) << 5)
+
+#define DEINTERLACE_INT_ENABLE 0x60
+#define DEINTERLACE_INT_ENABLE_WB_EN BIT(7)
+
+#define DEINTERLACE_INT_STATUS 0x64
+#define DEINTERLACE_INT_STATUS_WRITEBACK BIT(7)
+
+#define DEINTERLACE_STATUS 0x68
+#define DEINTERLACE_STATUS_COEF_STATUS BIT(11)
+#define DEINTERLACE_STATUS_WB_ERROR BIT(12)
+
+#define DEINTERLACE_CSC_COEF 0x70 /* 12 registers */
+
+#define DEINTERLACE_CTRL 0xa0
+#define DEINTERLACE_CTRL_EN BIT(0)
+#define DEINTERLACE_CTRL_FLAG_OUT_EN BIT(8)
+#define DEINTERLACE_CTRL_MODE_PASSTROUGH (0 << 16)
+#define DEINTERLACE_CTRL_MODE_WEAVE (1 << 16)
+#define DEINTERLACE_CTRL_MODE_BOB (2 << 16)
+#define DEINTERLACE_CTRL_MODE_MIXED (3 << 16)
+#define DEINTERLACE_CTRL_DIAG_INTP_EN BIT(24)
+#define DEINTERLACE_CTRL_TEMP_DIFF_EN BIT(25)
+
+#define DEINTERLACE_DIAG_INTP 0xa4
+#define DEINTERLACE_DIAG_INTP_TH0(v) ((v) & 0x7f)
+#define DEINTERLACE_DIAG_INTP_TH0_MSK (0x7f)
+#define DEINTERLACE_DIAG_INTP_TH1(v) (((v) & 0x7f) << 8)
+#define DEINTERLACE_DIAG_INTP_TH1_MSK (0x7f << 8)
+#define DEINTERLACE_DIAG_INTP_TH3(v) (((v) & 0xff) << 24)
+#define DEINTERLACE_DIAG_INTP_TH3_MSK (0xff << 24)
+
+#define DEINTERLACE_TEMP_DIFF 0xa8
+#define DEINTERLACE_TEMP_DIFF_SAD_CENTRAL_TH(v) ((v) & 0x7f)
+#define DEINTERLACE_TEMP_DIFF_SAD_CENTRAL_TH_MSK (0x7f)
+#define DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH(v) (((v) & 0x7f) << 8)
+#define DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH_MSK (0x7f << 8)
+#define DEINTERLACE_TEMP_DIFF_DIRECT_DITHER_TH(v) (((v) & 0x7ff) << 16)
+#define DEINTERLACE_TEMP_DIFF_DIRECT_DITHER_TH_MSK (0x7ff << 16)
+
+#define DEINTERLACE_LUMA_TH 0xac
+#define DEINTERLACE_LUMA_TH_MIN_LUMA(v) ((v) & 0xff)
+#define DEINTERLACE_LUMA_TH_MIN_LUMA_MSK (0xff)
+#define DEINTERLACE_LUMA_TH_MAX_LUMA(v) (((v) & 0xff) << 8)
+#define DEINTERLACE_LUMA_TH_MAX_LUMA_MSK (0xff << 8)
+#define DEINTERLACE_LUMA_TH_AVG_LUMA_SHIFT(v) (((v) & 0xff) << 16)
+#define DEINTERLACE_LUMA_TH_AVG_LUMA_SHIFT_MSK (0xff << 16)
+#define DEINTERLACE_LUMA_TH_PIXEL_STATIC(v) (((v) & 3) << 24)
+#define DEINTERLACE_LUMA_TH_PIXEL_STATIC_MSK (3 << 24)
+
+#define DEINTERLACE_SPAT_COMP 0xb0
+#define DEINTERLACE_SPAT_COMP_TH2(v) ((v) & 0xff)
+#define DEINTERLACE_SPAT_COMP_TH2_MSK (0xff)
+#define DEINTERLACE_SPAT_COMP_TH3(v) (((v) & 0xff) << 16)
+#define DEINTERLACE_SPAT_COMP_TH3_MSK (0xff << 16)
+
+#define DEINTERLACE_CHROMA_DIFF 0xb4
+#define DEINTERLACE_CHROMA_DIFF_TH(v) ((v) & 0xff)
+#define DEINTERLACE_CHROMA_DIFF_TH_MSK (0xff)
+#define DEINTERLACE_CHROMA_DIFF_LUMA(v) (((v) & 0x3f) << 16)
+#define DEINTERLACE_CHROMA_DIFF_LUMA_MSK (0x3f << 16)
+#define DEINTERLACE_CHROMA_DIFF_CHROMA(v) (((v) & 0x3f) << 24)
+#define DEINTERLACE_CHROMA_DIFF_CHROMA_MSK (0x3f << 24)
+
+#define DEINTERLACE_PRELUMA 0xb8
+#define DEINTERLACE_PRECHROMA 0xbc
+#define DEINTERLACE_TILE_FLAG0 0xc0
+#define DEINTERLACE_TILE_FLAG1 0xc4
+#define DEINTERLACE_FLAG_LINE_STRIDE 0xc8
+#define DEINTERLACE_FLAG_SEQ 0xcc
+
+#define DEINTERLACE_WB_LINE_STRIDE_CTRL 0xd0
+#define DEINTERLACE_WB_LINE_STRIDE_CTRL_EN BIT(0)
+
+#define DEINTERLACE_WB_LINE_STRIDE0 0xd4
+#define DEINTERLACE_WB_LINE_STRIDE1 0xd8
+#define DEINTERLACE_WB_LINE_STRIDE2 0xdc
+#define DEINTERLACE_TRD_CTRL 0xe0
+#define DEINTERLACE_TRD_BUF_ADDR0 0xe4
+#define DEINTERLACE_TRD_BUF_ADDR1 0xe8
+#define DEINTERLACE_TRD_BUF_ADDR2 0xec
+#define DEINTERLACE_TRD_TB_OFF0 0xf0
+#define DEINTERLACE_TRD_TB_OFF1 0xf4
+#define DEINTERLACE_TRD_TB_OFF2 0xf8
+#define DEINTERLACE_TRD_WB_STRIDE 0xfc
+#define DEINTERLACE_CH0_IN_SIZE 0x100
+#define DEINTERLACE_CH0_OUT_SIZE 0x104
+#define DEINTERLACE_CH0_HORZ_FACT 0x108
+#define DEINTERLACE_CH0_VERT_FACT 0x10c
+#define DEINTERLACE_CH0_HORZ_PHASE 0x110
+#define DEINTERLACE_CH0_VERT_PHASE0 0x114
+#define DEINTERLACE_CH0_VERT_PHASE1 0x118
+#define DEINTERLACE_CH0_HORZ_TAP0 0x120
+#define DEINTERLACE_CH0_HORZ_TAP1 0x124
+#define DEINTERLACE_CH0_VERT_TAP 0x128
+#define DEINTERLACE_CH1_IN_SIZE 0x200
+#define DEINTERLACE_CH1_OUT_SIZE 0x204
+#define DEINTERLACE_CH1_HORZ_FACT 0x208
+#define DEINTERLACE_CH1_VERT_FACT 0x20c
+#define DEINTERLACE_CH1_HORZ_PHASE 0x210
+#define DEINTERLACE_CH1_VERT_PHASE0 0x214
+#define DEINTERLACE_CH1_VERT_PHASE1 0x218
+#define DEINTERLACE_CH1_HORZ_TAP0 0x220
+#define DEINTERLACE_CH1_HORZ_TAP1 0x224
+#define DEINTERLACE_CH1_VERT_TAP 0x228
+#define DEINTERLACE_CH0_HORZ_COEF0 0x400 /* 32 registers */
+#define DEINTERLACE_CH0_HORZ_COEF1 0x480 /* 32 registers */
+#define DEINTERLACE_CH0_VERT_COEF 0x500 /* 32 registers */
+#define DEINTERLACE_CH1_HORZ_COEF0 0x600 /* 32 registers */
+#define DEINTERLACE_CH1_HORZ_COEF1 0x680 /* 32 registers */
+#define DEINTERLACE_CH1_VERT_COEF 0x700 /* 32 registers */
+#define DEINTERLACE_CH3_HORZ_COEF0 0x800 /* 32 registers */
+#define DEINTERLACE_CH3_HORZ_COEF1 0x880 /* 32 registers */
+#define DEINTERLACE_CH3_VERT_COEF 0x900 /* 32 registers */
+
+#define DEINTERLACE_MIN_WIDTH 2U
+#define DEINTERLACE_MIN_HEIGHT 2U
+#define DEINTERLACE_MAX_WIDTH 2048U
+#define DEINTERLACE_MAX_HEIGHT 1100U
+
+#define DEINTERLACE_MODE_UV_COMBINED 2
+
+#define DEINTERLACE_IN_FMT_YUV420 2
+
+#define DEINTERLACE_OUT_FMT_YUV420SP 13
+
+#define DEINTERLACE_PS_UVUV 0
+#define DEINTERLACE_PS_VUVU 1
+
+#define DEINTERLACE_IDENTITY_COEF 0x4000
+
+#define DEINTERLACE_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
+
+struct deinterlace_ctx {
+ struct v4l2_fh fh;
+ struct deinterlace_dev *dev;
+
+ struct v4l2_pix_format src_fmt;
+ struct v4l2_pix_format dst_fmt;
+
+ void *flag1_buf;
+ dma_addr_t flag1_buf_dma;
+
+ void *flag2_buf;
+ dma_addr_t flag2_buf_dma;
+
+ struct vb2_v4l2_buffer *prev;
+
+ unsigned int first_field;
+ unsigned int field;
+
+ int aborting;
+};
+
+struct deinterlace_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ /* Device file mutex */
+ struct mutex dev_mutex;
+
+ void __iomem *base;
+
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct clk *ram_clk;
+
+ struct reset_control *rstc;
+};
+
+#endif
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index a632602131f2..a99caac59f44 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -409,7 +409,7 @@ static int tegra_cec_probe(struct platform_device *pdev)
return 0;
err_notifier:
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_adapter:
cec_delete_adapter(cec->adap);
err_clk:
@@ -423,7 +423,7 @@ static int tegra_cec_remove(struct platform_device *pdev)
clk_disable_unprepare(cec->clk);
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index eda2a5985da7..834114a4eebe 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -15,76 +15,96 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
#include "csc.h"
/*
- * 16 coefficients in the order:
+ * 12 coefficients in the order:
* a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2
- * (we may need to pass non-default values from user space later on, we might
- * need to make the coefficient struct more easy to populate)
*/
-struct colorspace_coeffs {
- u16 sd[12];
- u16 hd[12];
+struct quantization {
+ u16 coeff[12];
};
-/* VIDEO_RANGE: limited range, GRAPHICS_RANGE: full range */
-#define CSC_COEFFS_VIDEO_RANGE_Y2R 0
-#define CSC_COEFFS_GRAPHICS_RANGE_Y2R 1
-#define CSC_COEFFS_VIDEO_RANGE_R2Y 2
-#define CSC_COEFFS_GRAPHICS_RANGE_R2Y 3
+struct colorspace {
+ struct quantization limited;
+ struct quantization full;
+};
+
+struct encoding_direction {
+ struct colorspace r601;
+ struct colorspace r709;
+};
+
+struct csc_coeffs {
+ struct encoding_direction y2r;
+ struct encoding_direction r2y;
+};
/* default colorspace coefficients */
-static struct colorspace_coeffs colorspace_coeffs[4] = {
- [CSC_COEFFS_VIDEO_RANGE_Y2R] = {
- {
- /* SDTV */
- 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
- 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+static struct csc_coeffs csc_coeffs = {
+ .y2r = {
+ .r601 = {
+ .limited = {
+ { /* SDTV */
+ 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
+ 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+ }
+ },
+ .full = {
+ { /* SDTV */
+ 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
+ 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ }
+ },
},
- {
- /* HDTV */
- 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
- 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ .r709 = {
+ .limited = {
+ { /* HDTV */
+ 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
+ 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ }
+ },
+ .full = {
+ { /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ }
+ },
},
},
- [CSC_COEFFS_GRAPHICS_RANGE_Y2R] = {
- {
- /* SDTV */
- 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
- 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ .r2y = {
+ .r601 = {
+ .limited = {
+ { /* SDTV */
+ 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
+ 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
+ }
+ },
+ .full = {
+ { /* SDTV */
+ 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
+ 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
+ }
+ },
},
- {
- /* HDTV */
- 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
- 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
- },
- },
- [CSC_COEFFS_VIDEO_RANGE_R2Y] = {
- {
- /* SDTV */
- 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
- 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
- },
- {
- /* HDTV */
- 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
- 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
- },
- },
- [CSC_COEFFS_GRAPHICS_RANGE_R2Y] = {
- {
- /* SDTV */
- 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
- 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
- },
- {
- /* HDTV */
- 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
- 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ .r709 = {
+ .limited = {
+ { /* HDTV */
+ 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
+ 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
+ }
+ },
+ .full = {
+ { /* HDTV */
+ 0x00bb, 0x0275, 0x003f, 0x1f99, 0x1ea5, 0x01c2,
+ 0x01c2, 0x1e67, 0x1fd7, 0x0040, 0x0200, 0x0200,
+ }
+ },
},
},
+
};
void csc_dump_regs(struct csc_data *csc)
@@ -117,46 +137,114 @@ EXPORT_SYMBOL(csc_set_coeff_bypass);
* set the color space converter coefficient shadow register values
*/
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
- enum v4l2_colorspace src_colorspace,
- enum v4l2_colorspace dst_colorspace)
+ struct v4l2_format *src_fmt, struct v4l2_format *dst_fmt)
{
u32 *csc_reg5 = csc_reg0 + 5;
u32 *shadow_csc = csc_reg0;
- struct colorspace_coeffs *sd_hd_coeffs;
u16 *coeff, *end_coeff;
- enum v4l2_colorspace yuv_colorspace;
- int sel = 0;
-
- /*
- * support only graphics data range(full range) for now, a control ioctl
- * would be nice here
- */
- /* Y2R */
- if (dst_colorspace == V4L2_COLORSPACE_SRGB &&
- (src_colorspace == V4L2_COLORSPACE_SMPTE170M ||
- src_colorspace == V4L2_COLORSPACE_REC709)) {
+ const struct v4l2_pix_format *pix;
+ const struct v4l2_pix_format_mplane *mp;
+ const struct v4l2_format_info *src_finfo, *dst_finfo;
+ enum v4l2_ycbcr_encoding src_ycbcr_enc, dst_ycbcr_enc;
+ enum v4l2_quantization src_quantization, dst_quantization;
+ u32 src_pixelformat, dst_pixelformat;
+
+ switch (src_fmt->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &src_fmt->fmt.pix;
+ src_pixelformat = pix->pixelformat;
+ src_ycbcr_enc = pix->ycbcr_enc;
+ src_quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ default:
+ mp = &src_fmt->fmt.pix_mp;
+ src_pixelformat = mp->pixelformat;
+ src_ycbcr_enc = mp->ycbcr_enc;
+ src_quantization = mp->quantization;
+ break;
+ }
+
+ switch (dst_fmt->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &dst_fmt->fmt.pix;
+ dst_pixelformat = pix->pixelformat;
+ dst_ycbcr_enc = pix->ycbcr_enc;
+ dst_quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ default:
+ mp = &dst_fmt->fmt.pix_mp;
+ dst_pixelformat = mp->pixelformat;
+ dst_ycbcr_enc = mp->ycbcr_enc;
+ dst_quantization = mp->quantization;
+ break;
+ }
+
+ src_finfo = v4l2_format_info(src_pixelformat);
+ dst_finfo = v4l2_format_info(dst_pixelformat);
+
+ if (v4l2_is_format_yuv(src_finfo) &&
+ v4l2_is_format_rgb(dst_finfo)) {
/* Y2R */
- sel = 1;
- yuv_colorspace = src_colorspace;
- } else if ((dst_colorspace == V4L2_COLORSPACE_SMPTE170M ||
- dst_colorspace == V4L2_COLORSPACE_REC709) &&
- src_colorspace == V4L2_COLORSPACE_SRGB) {
+
+ /*
+ * These are not the standard default values but are
+ * set this way for historical compatibility
+ */
+ if (src_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ src_ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (src_quantization == V4L2_QUANTIZATION_DEFAULT)
+ src_quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ if (src_ycbcr_enc == V4L2_YCBCR_ENC_601) {
+ if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.y2r.r601.full.coeff;
+ else
+ coeff = csc_coeffs.y2r.r601.limited.coeff;
+ } else if (src_ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.y2r.r709.full.coeff;
+ else
+ coeff = csc_coeffs.y2r.r709.limited.coeff;
+ } else {
+ /* Should never reach this, but it keeps gcc happy */
+ coeff = csc_coeffs.y2r.r601.full.coeff;
+ }
+ } else if (v4l2_is_format_rgb(src_finfo) &&
+ v4l2_is_format_yuv(dst_finfo)) {
/* R2Y */
- sel = 3;
- yuv_colorspace = dst_colorspace;
+
+ /*
+ * These are not the standard default values but are
+ * set this way for historical compatibility
+ */
+ if (dst_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ dst_ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (dst_quantization == V4L2_QUANTIZATION_DEFAULT)
+ dst_quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ if (dst_ycbcr_enc == V4L2_YCBCR_ENC_601) {
+ if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.r2y.r601.full.coeff;
+ else
+ coeff = csc_coeffs.r2y.r601.limited.coeff;
+ } else if (dst_ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.r2y.r709.full.coeff;
+ else
+ coeff = csc_coeffs.r2y.r709.limited.coeff;
+ } else {
+ /* Should never reach this, but it keeps gcc happy */
+ coeff = csc_coeffs.r2y.r601.full.coeff;
+ }
} else {
*csc_reg5 |= CSC_BYPASS;
return;
}
- sd_hd_coeffs = &colorspace_coeffs[sel];
-
- /* select between SD or HD coefficients */
- if (yuv_colorspace == V4L2_COLORSPACE_SMPTE170M)
- coeff = sd_hd_coeffs->sd;
- else
- coeff = sd_hd_coeffs->hd;
-
end_coeff = coeff + 12;
for (; coeff < end_coeff; coeff += 2)
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
index de9a58af2ca8..af2e86bccf57 100644
--- a/drivers/media/platform/ti-vpe/csc.h
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -58,8 +58,8 @@ struct csc_data {
void csc_dump_regs(struct csc_data *csc);
void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
- enum v4l2_colorspace src_colorspace,
- enum v4l2_colorspace dst_colorspace);
+ struct v4l2_format *src_fmt, struct v4l2_format *dst_fmt);
+
struct csc_data *csc_create(struct platform_device *pdev, const char *res_name);
#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index 53d27cd6e10a..2e5148ae7a0f 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -56,6 +56,11 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
.data_type = DATA_TYPE_C420,
.depth = 4,
},
+ [VPDMA_DATA_FMT_CB420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CB420,
+ .depth = 4,
+ },
[VPDMA_DATA_FMT_YCR422] = {
.type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_YCR422,
@@ -759,7 +764,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("word1: line_length = %d, xfer_height = %d\n",
dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
- pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
+ pr_debug("word2: start_addr = %x\n", dtd->start_addr);
pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
dtd_get_pkt_type(dtd),
@@ -825,7 +830,8 @@ void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
channel = next_chan = raw_vpdma_chan;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
- fmt->data_type == DATA_TYPE_C420) {
+ (fmt->data_type == DATA_TYPE_C420 ||
+ fmt->data_type == DATA_TYPE_CB420)) {
rect.height >>= 1;
rect.top >>= 1;
depth = 8;
@@ -893,7 +899,8 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
channel = next_chan = chan_info[chan].num;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
- fmt->data_type == DATA_TYPE_C420) {
+ (fmt->data_type == DATA_TYPE_C420 ||
+ fmt->data_type == DATA_TYPE_CB420)) {
rect.height >>= 1;
rect.top >>= 1;
depth = 8;
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index 28bc94129348..393fcbb3cb40 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -57,6 +57,7 @@ struct vpdma_data_format {
* line stride of source and dest
* buffers should be 16 byte aligned
*/
+#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */
#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
@@ -71,6 +72,7 @@ enum vpdma_yuv_formats {
VPDMA_DATA_FMT_C444,
VPDMA_DATA_FMT_C422,
VPDMA_DATA_FMT_C420,
+ VPDMA_DATA_FMT_CB420,
VPDMA_DATA_FMT_YCR422,
VPDMA_DATA_FMT_YC444,
VPDMA_DATA_FMT_CRY422,
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index c488609bc162..0bbee45338bd 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -92,6 +92,7 @@
#define DATA_TYPE_C444 0x4
#define DATA_TYPE_C422 0x5
#define DATA_TYPE_C420 0x6
+#define DATA_TYPE_CB420 0x16
#define DATA_TYPE_YC444 0x8
#define DATA_TYPE_YCB422 0x7
#define DATA_TYPE_YCR422 0x17
@@ -165,11 +166,11 @@ struct vpdma_dtd {
u32 xfer_length_height;
u32 w1;
};
- dma_addr_t start_addr;
+ u32 start_addr;
u32 pkt_ctl;
union {
u32 frame_width_height; /* inbound */
- dma_addr_t desc_write_addr; /* outbound */
+ u32 desc_write_addr; /* outbound */
};
union {
u32 start_h_v; /* inbound */
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 60b575bb44c4..65c2c048b018 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -52,7 +52,7 @@
#define MIN_W 32
#define MIN_H 32
#define MAX_W 2048
-#define MAX_H 1184
+#define MAX_H 2048
/* required alignments */
#define S_ALIGN 0 /* multiple of 1 */
@@ -249,6 +249,14 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_CB420],
+ },
+ },
+ {
.fourcc = V4L2_PIX_FMT_YUYV,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
@@ -311,14 +319,9 @@ static struct vpe_fmt vpe_formats[] = {
* there is one source queue and one destination queue for each m2m context.
*/
struct vpe_q_data {
- unsigned int width; /* frame width */
- unsigned int height; /* frame height */
- unsigned int nplanes; /* Current number of planes */
- unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
- enum v4l2_colorspace colorspace;
- enum v4l2_field field; /* supported field value */
+ /* current v4l2 format info */
+ struct v4l2_format format;
unsigned int flags;
- unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
struct v4l2_rect c_rect; /* crop/compose rectangle */
struct vpe_fmt *fmt; /* format info */
};
@@ -328,9 +331,14 @@ struct vpe_q_data {
#define Q_DATA_MODE_TILED BIT(1)
#define Q_DATA_INTERLACED_ALTERNATE BIT(2)
#define Q_DATA_INTERLACED_SEQ_TB BIT(3)
+#define Q_DATA_INTERLACED_SEQ_BT BIT(4)
+
+#define Q_IS_SEQ_XX (Q_DATA_INTERLACED_SEQ_TB | \
+ Q_DATA_INTERLACED_SEQ_BT)
#define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \
- Q_DATA_INTERLACED_SEQ_TB)
+ Q_DATA_INTERLACED_SEQ_TB | \
+ Q_DATA_INTERLACED_SEQ_BT)
enum {
Q_DATA_SRC = 0,
@@ -338,20 +346,25 @@ enum {
};
/* find our format description corresponding to the passed v4l2_format */
-static struct vpe_fmt *find_format(struct v4l2_format *f)
+static struct vpe_fmt *__find_format(u32 fourcc)
{
struct vpe_fmt *fmt;
unsigned int k;
for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
fmt = &vpe_formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == fourcc)
return fmt;
}
return NULL;
}
+static struct vpe_fmt *find_format(struct v4l2_format *f)
+{
+ return __find_format(f->fmt.pix.pixelformat);
+}
+
/*
* there is one vpe_dev structure in the driver, it is shared by
* all instances.
@@ -681,7 +694,8 @@ static void set_cfg_modes(struct vpe_ctx *ctx)
* Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
*/
- if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
cfg_mode = 0;
write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
@@ -696,7 +710,8 @@ static void set_line_modes(struct vpe_ctx *ctx)
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
int line_mode = 1;
- if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
line_mode = 0; /* double lines to line buffer */
/* regs for now */
@@ -741,11 +756,12 @@ static void set_src_registers(struct vpe_ctx *ctx)
static void set_dst_registers(struct vpe_ctx *ctx)
{
struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
- enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
+ const struct v4l2_format_info *finfo;
u32 val = 0;
- if (clrspc == V4L2_COLORSPACE_SRGB) {
+ finfo = v4l2_format_info(fmt->fourcc);
+ if (v4l2_is_format_rgb(finfo)) {
val |= VPE_RGB_OUT_SELECT;
vpdma_set_bg_color(ctx->dev->vpdma,
(struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
@@ -758,7 +774,8 @@ static void set_dst_registers(struct vpe_ctx *ctx)
*/
val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
- if (fmt->fourcc != V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc != V4L2_PIX_FMT_NV12 &&
+ fmt->fourcc != V4L2_PIX_FMT_NV21)
val |= VPE_DS_BYPASS;
mmr_adb->out_fmt_reg[0] = val;
@@ -847,11 +864,13 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
unsigned int src_h = s_q_data->c_rect.height;
unsigned int dst_w = d_q_data->c_rect.width;
unsigned int dst_h = d_q_data->c_rect.height;
+ struct v4l2_pix_format_mplane *spix;
size_t mv_buf_size;
int ret;
ctx->sequence = 0;
ctx->field = V4L2_FIELD_TOP;
+ spix = &s_q_data->format.fmt.pix_mp;
if ((s_q_data->flags & Q_IS_INTERLACED) &&
!(d_q_data->flags & Q_IS_INTERLACED)) {
@@ -866,9 +885,9 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
* extra space will not be used by the de-interlacer, but will
* ensure that vpdma operates correctly
*/
- bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
- VPDMA_STRIDE_ALIGN);
- mv_buf_size = bytes_per_line * s_q_data->height;
+ bytes_per_line = ALIGN((spix->width * mv->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
+ mv_buf_size = bytes_per_line * spix->height;
ctx->deinterlacing = true;
src_h <<= 1;
@@ -888,7 +907,7 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
set_dei_regs(ctx);
csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
- s_q_data->colorspace, d_q_data->colorspace);
+ &s_q_data->format, &d_q_data->format);
sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
@@ -901,14 +920,6 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
}
/*
- * Return the vpe_ctx structure for a given struct file
- */
-static struct vpe_ctx *file2ctx(struct file *file)
-{
- return container_of(file->private_data, struct vpe_ctx, fh);
-}
-
-/*
* mem2mem callbacks
*/
@@ -1010,27 +1021,33 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
struct vpe_fmt *fmt = q_data->fmt;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = !ctx->src_mv_buf_selector;
+ struct v4l2_pix_format_mplane *pix;
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
if (port == VPE_PORT_MV_OUT) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
q_data = &ctx->q_data[Q_DATA_SRC];
+ pix = &q_data->format.fmt.pix_mp;
+ stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
+ pix = &q_data->format.fmt.pix_mp;
vpdma_fmt = fmt->vpdma_fmt[plane];
/*
* If we are using a single plane buffer and
* we need to set a separate vpdma chroma channel.
*/
- if (q_data->nplanes == 1 && plane) {
+ if (pix->num_planes == 1 && plane) {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Compute required offset */
- offset = q_data->bytesperline[0] * q_data->height;
+ offset = pix->plane_fmt[0].bytesperline * pix->height;
} else {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
/* Use address as is, no offset */
@@ -1044,6 +1061,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = pix->plane_fmt[VPE_LUMA].bytesperline;
}
if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1054,8 +1072,8 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
MAX_W, MAX_H);
- vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
+ vpdma_add_out_dtd(&ctx->desc_list, pix->width,
+ stride, &q_data->c_rect,
vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
}
@@ -1067,6 +1085,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_fmt *fmt = q_data->fmt;
+ struct v4l2_pix_format_mplane *pix;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = ctx->src_mv_buf_selector;
int field = vbuf->field == V4L2_FIELD_BOTTOM;
@@ -1074,10 +1093,14 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
+ pix = &q_data->format.fmt.pix_mp;
if (port == VPE_PORT_MV_IN) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
@@ -1087,10 +1110,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
* If we are using a single plane buffer and
* we need to set a separate vpdma chroma channel.
*/
- if (q_data->nplanes == 1 && plane) {
+ if (pix->num_planes == 1 && plane) {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Compute required offset */
- offset = q_data->bytesperline[0] * q_data->height;
+ offset = pix->plane_fmt[0].bytesperline * pix->height;
} else {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
/* Use address as is, no offset */
@@ -1104,26 +1127,39 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = pix->plane_fmt[VPE_LUMA].bytesperline;
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
- /*
- * Use top or bottom field from same vb alternately
- * f,f-1,f-2 = TBT when seq is even
- * f,f-1,f-2 = BTB when seq is odd
- */
- field = (p_data->vb_index + (ctx->sequence % 2)) % 2;
+ /*
+ * field used in VPDMA desc = 0 (top) / 1 (bottom)
+ * Use top or bottom field from same vb alternately
+ * For each de-interlacing operation, f,f-1,f-2 should be one
+ * of TBT or BTB
+ */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB ||
+ q_data->flags & Q_DATA_INTERLACED_SEQ_BT) {
+ /* Select initial value based on format */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_BT)
+ field = 1;
+ else
+ field = 0;
+
+ /* Toggle for each vb_index and each operation */
+ field = (field + p_data->vb_index + ctx->sequence) % 2;
if (field) {
- /*
- * bottom field of a SEQ_TB buffer
- * Skip the top field data by
- */
- int height = q_data->height / 2;
- int bpp = fmt->fourcc == V4L2_PIX_FMT_NV12 ?
- 1 : (vpdma_fmt->depth >> 3);
+ int height = pix->height / 2;
+ int bpp;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
+ bpp = 1;
+ else
+ bpp = vpdma_fmt->depth >> 3;
+
if (plane)
height /= 2;
- dma_addr += q_data->width * height * bpp;
+
+ dma_addr += pix->width * height * bpp;
}
}
}
@@ -1136,13 +1172,14 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
frame_width = q_data->c_rect.width;
frame_height = q_data->c_rect.height;
- if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (p_data->vb_part && (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21))
frame_height /= 2;
- vpdma_add_in_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
- vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
- frame_height, 0, 0);
+ vpdma_add_in_dtd(&ctx->desc_list, pix->width, stride,
+ &q_data->c_rect, vpdma_fmt, dma_addr,
+ p_data->channel, field, flags, frame_width,
+ frame_height, 0, 0);
}
/*
@@ -1176,13 +1213,18 @@ static void device_run(void *priv)
struct sc_data *sc = ctx->dev->sc;
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
-
- if (ctx->deinterlacing && s_q_data->flags & Q_DATA_INTERLACED_SEQ_TB &&
- ctx->sequence % 2 == 0) {
- /* When using SEQ_TB buffers, When using it first time,
- * No need to remove the buffer as the next field is present
- * in the same buffer. (so that job_ready won't fail)
- * It will be removed when using bottom field
+ const struct v4l2_format_info *d_finfo;
+
+ d_finfo = v4l2_format_info(d_q_data->fmt->fourcc);
+
+ if (ctx->deinterlacing && s_q_data->flags & Q_IS_SEQ_XX &&
+ ctx->sequence % 2 == 0) {
+ /* When using SEQ_XX type buffers, each buffer has two fields
+ * each buffer has two fields (top & bottom)
+ * Removing one buffer is actually getting two fields
+ * Alternate between two operations:-
+ * Even : consume one field but DO NOT REMOVE from queue
+ * Odd : consume other field and REMOVE from queue
*/
ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[0] == NULL);
@@ -1246,7 +1288,7 @@ static void device_run(void *priv)
if (ctx->deinterlacing)
add_out_dtd(ctx, VPE_PORT_MV_OUT);
- if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ if (v4l2_is_format_rgb(d_finfo)) {
add_out_dtd(ctx, VPE_PORT_RGB_OUT);
} else {
add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
@@ -1288,7 +1330,7 @@ static void device_run(void *priv)
}
/* sync on channel control descriptors for output ports */
- if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ if (v4l2_is_format_rgb(d_finfo)) {
vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
VPE_CHAN_RGB_OUT);
} else {
@@ -1391,9 +1433,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
/* the previous dst mv buffer becomes the next src mv buffer */
ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
- if (ctx->aborting)
- goto finished;
-
s_vb = ctx->src_vbs[0];
d_vb = ctx->dst_vb;
@@ -1404,6 +1443,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
d_vb->timecode = s_vb->timecode;
d_vb->sequence = ctx->sequence;
+ s_vb->sequence = ctx->sequence;
d_q_data = &ctx->q_data[Q_DATA_DST];
if (d_q_data->flags & Q_IS_INTERLACED) {
@@ -1457,6 +1497,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->src_vbs[0] = NULL;
ctx->dst_vb = NULL;
+ if (ctx->aborting)
+ goto finished;
+
ctx->bufs_completed++;
if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
device_run(ctx);
@@ -1519,38 +1562,32 @@ static int vpe_enum_fmt(struct file *file, void *priv,
static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vb2_queue *vq;
struct vpe_q_data *q_data;
- int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
- pix->width = q_data->width;
- pix->height = q_data->height;
- pix->pixelformat = q_data->fmt->fourcc;
- pix->field = q_data->field;
+ *f = q_data->format;
- if (V4L2_TYPE_IS_OUTPUT(f->type)) {
- pix->colorspace = q_data->colorspace;
- } else {
+ if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
struct vpe_q_data *s_q_data;
+ struct v4l2_pix_format_mplane *spix;
- /* get colorspace from the source queue */
+ /* get colorimetry from the source queue */
s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ spix = &s_q_data->format.fmt.pix_mp;
- pix->colorspace = s_q_data->colorspace;
- }
-
- pix->num_planes = q_data->nplanes;
-
- for (i = 0; i < pix->num_planes; i++) {
- pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
- pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ pix->colorspace = spix->colorspace;
+ pix->xfer_func = spix->xfer_func;
+ pix->ycbcr_enc = spix->ycbcr_enc;
+ pix->quantization = spix->quantization;
}
return 0;
@@ -1564,15 +1601,18 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
unsigned int w_align;
int i, depth, depth_bytes, height;
unsigned int stride = 0;
+ const struct v4l2_format_info *finfo;
if (!fmt || !(fmt->types & type)) {
- vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
pix->pixelformat);
- return -EINVAL;
+ fmt = __find_format(V4L2_PIX_FMT_YUYV);
}
- if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
- && pix->field != V4L2_FIELD_SEQ_TB)
+ if (pix->field != V4L2_FIELD_NONE &&
+ pix->field != V4L2_FIELD_ALTERNATE &&
+ pix->field != V4L2_FIELD_SEQ_TB &&
+ pix->field != V4L2_FIELD_SEQ_BT)
pix->field = V4L2_FIELD_NONE;
depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
@@ -1615,27 +1655,25 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
&pix->height, MIN_H, MAX_H, H_ALIGN,
S_ALIGN);
- if (!pix->num_planes)
+ if (!pix->num_planes || pix->num_planes > 2)
pix->num_planes = fmt->coplanar ? 2 : 1;
else if (pix->num_planes > 1 && !fmt->coplanar)
pix->num_planes = 1;
pix->pixelformat = fmt->fourcc;
+ finfo = v4l2_format_info(fmt->fourcc);
/*
* For the actual image parameters, we need to consider the field
- * height of the image for SEQ_TB buffers.
+ * height of the image for SEQ_XX buffers.
*/
- if (pix->field == V4L2_FIELD_SEQ_TB)
+ if (pix->field == V4L2_FIELD_SEQ_TB || pix->field == V4L2_FIELD_SEQ_BT)
height = pix->height / 2;
else
height = pix->height;
if (!pix->colorspace) {
- if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
- fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
- fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
- fmt->fourcc == V4L2_PIX_FMT_BGR32) {
+ if (v4l2_is_format_rgb(finfo)) {
pix->colorspace = V4L2_COLORSPACE_SRGB;
} else {
if (height > 1280) /* HD */
@@ -1654,6 +1692,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
if (stride > plane_fmt->bytesperline)
plane_fmt->bytesperline = stride;
+ plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
+ stride,
+ VPDMA_MAX_STRIDE);
+
plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
VPDMA_STRIDE_ALIGN);
@@ -1679,7 +1721,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_fmt *fmt = find_format(f);
if (V4L2_TYPE_IS_OUTPUT(f->type))
@@ -1691,10 +1733,9 @@ static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct v4l2_plane_pix_format *plane_fmt;
+ struct v4l2_pix_format_mplane *qpix;
struct vpe_q_data *q_data;
struct vb2_queue *vq;
- int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
@@ -1709,42 +1750,34 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
if (!q_data)
return -EINVAL;
+ qpix = &q_data->format.fmt.pix_mp;
q_data->fmt = find_format(f);
- q_data->width = pix->width;
- q_data->height = pix->height;
- q_data->colorspace = pix->colorspace;
- q_data->field = pix->field;
- q_data->nplanes = pix->num_planes;
-
- for (i = 0; i < pix->num_planes; i++) {
- plane_fmt = &pix->plane_fmt[i];
-
- q_data->bytesperline[i] = plane_fmt->bytesperline;
- q_data->sizeimage[i] = plane_fmt->sizeimage;
- }
+ q_data->format = *f;
q_data->c_rect.left = 0;
q_data->c_rect.top = 0;
- q_data->c_rect.width = q_data->width;
- q_data->c_rect.height = q_data->height;
+ q_data->c_rect.width = pix->width;
+ q_data->c_rect.height = pix->height;
- if (q_data->field == V4L2_FIELD_ALTERNATE)
+ if (qpix->field == V4L2_FIELD_ALTERNATE)
q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
- else if (q_data->field == V4L2_FIELD_SEQ_TB)
+ else if (qpix->field == V4L2_FIELD_SEQ_TB)
q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
+ else if (qpix->field == V4L2_FIELD_SEQ_BT)
+ q_data->flags |= Q_DATA_INTERLACED_SEQ_BT;
else
q_data->flags &= ~Q_IS_INTERLACED;
- /* the crop height is halved for the case of SEQ_TB buffers */
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ /* the crop height is halved for the case of SEQ_XX buffers */
+ if (q_data->flags & Q_IS_SEQ_XX)
q_data->c_rect.height /= 2;
vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
- f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
- q_data->bytesperline[VPE_LUMA]);
- if (q_data->nplanes == 2)
+ f->type, pix->width, pix->height, pix->pixelformat,
+ pix->plane_fmt[0].bytesperline);
+ if (pix->num_planes == 2)
vpe_dbg(ctx->dev, " bpl_uv %d\n",
- q_data->bytesperline[VPE_CHROMA]);
+ pix->plane_fmt[1].bytesperline);
return 0;
}
@@ -1752,7 +1785,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
int ret;
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
ret = vpe_try_fmt(file, priv, f);
if (ret)
@@ -1773,6 +1806,7 @@ static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
{
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
int height;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
@@ -1783,6 +1817,8 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
if (!q_data)
return -EINVAL;
+ pix = &q_data->format.fmt.pix_mp;
+
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE:
/*
@@ -1809,27 +1845,27 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
}
/*
- * For SEQ_TB buffers, crop height should be less than the height of
+ * For SEQ_XX buffers, crop height should be less than the height of
* the field height, not the buffer height
*/
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
- height = q_data->height / 2;
+ if (q_data->flags & Q_IS_SEQ_XX)
+ height = pix->height / 2;
else
- height = q_data->height;
+ height = pix->height;
if (s->r.top < 0 || s->r.left < 0) {
vpe_err(ctx->dev, "negative values for top and left\n");
s->r.top = s->r.left = 0;
}
- v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
+ v4l_bound_align_image(&s->r.width, MIN_W, pix->width, 1,
&s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
/* adjust left/top if cropping rectangle is out of bounds */
- if (s->r.left + s->r.width > q_data->width)
- s->r.left = q_data->width - s->r.width;
- if (s->r.top + s->r.height > q_data->height)
- s->r.top = q_data->height - s->r.height;
+ if (s->r.left + s->r.width > pix->width)
+ s->r.left = pix->width - s->r.width;
+ if (s->r.top + s->r.height > pix->height)
+ s->r.top = pix->height - s->r.height;
return 0;
}
@@ -1837,8 +1873,9 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
static int vpe_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
bool use_c_rect = false;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
@@ -1849,6 +1886,8 @@ static int vpe_g_selection(struct file *file, void *fh,
if (!q_data)
return -EINVAL;
+ pix = &q_data->format.fmt.pix_mp;
+
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -1887,8 +1926,8 @@ static int vpe_g_selection(struct file *file, void *fh,
*/
s->r.left = 0;
s->r.top = 0;
- s->r.width = q_data->width;
- s->r.height = q_data->height;
+ s->r.width = pix->width;
+ s->r.height = pix->height;
}
return 0;
@@ -1898,7 +1937,7 @@ static int vpe_g_selection(struct file *file, void *fh,
static int vpe_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_q_data *q_data;
struct v4l2_selection sel = *s;
int ret;
@@ -1991,17 +2030,21 @@ static int vpe_queue_setup(struct vb2_queue *vq,
int i;
struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
q_data = get_q_data(ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
- *nplanes = q_data->nplanes;
+ pix = &q_data->format.fmt.pix_mp;
+ *nplanes = pix->num_planes;
for (i = 0; i < *nplanes; i++)
- sizes[i] = q_data->sizeimage[i];
+ sizes[i] = pix->plane_fmt[i].sizeimage;
vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
sizes[VPE_LUMA]);
- if (q_data->nplanes == 2)
+ if (*nplanes == 2)
vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
return 0;
@@ -2012,12 +2055,16 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vpe_q_data *q_data;
- int i, num_planes;
+ struct v4l2_pix_format_mplane *pix;
+ int i;
vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
q_data = get_q_data(ctx, vb->vb2_queue->type);
- num_planes = q_data->nplanes;
+ if (!q_data)
+ return -EINVAL;
+
+ pix = &q_data->format.fmt.pix_mp;
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (!(q_data->flags & Q_IS_INTERLACED)) {
@@ -2025,23 +2072,24 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
} else {
if (vbuf->field != V4L2_FIELD_TOP &&
vbuf->field != V4L2_FIELD_BOTTOM &&
- vbuf->field != V4L2_FIELD_SEQ_TB)
+ vbuf->field != V4L2_FIELD_SEQ_TB &&
+ vbuf->field != V4L2_FIELD_SEQ_BT)
return -EINVAL;
}
}
- for (i = 0; i < num_planes; i++) {
- if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ for (i = 0; i < pix->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) {
vpe_err(ctx->dev,
"data will not fit into plane (%lu < %lu)\n",
vb2_plane_size(vb, i),
- (long) q_data->sizeimage[i]);
+ (long)pix->plane_fmt[i].sizeimage);
return -EINVAL;
}
}
- for (i = 0; i < num_planes; i++)
- vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+ for (i = 0; i < pix->num_planes; i++)
+ vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage);
return 0;
}
@@ -2226,6 +2274,7 @@ static int vpe_open(struct file *file)
struct vpe_q_data *s_q_data;
struct v4l2_ctrl_handler *hdl;
struct vpe_ctx *ctx;
+ struct v4l2_pix_format_mplane *pix;
int ret;
vpe_dbg(dev, "vpe_open\n");
@@ -2261,7 +2310,7 @@ static int vpe_open(struct file *file)
init_adb_hdrs(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
+ file->private_data = ctx;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 1);
@@ -2274,23 +2323,32 @@ static int vpe_open(struct file *file)
v4l2_ctrl_handler_setup(hdl);
s_q_data = &ctx->q_data[Q_DATA_SRC];
- s_q_data->fmt = &vpe_formats[2];
- s_q_data->width = 1920;
- s_q_data->height = 1080;
- s_q_data->nplanes = 1;
- s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
+ pix = &s_q_data->format.fmt.pix_mp;
+ s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
+ pix->pixelformat = s_q_data->fmt->fourcc;
+ s_q_data->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ pix->width = 1920;
+ pix->height = 1080;
+ pix->num_planes = 1;
+ pix->plane_fmt[VPE_LUMA].bytesperline = (pix->width *
s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
- s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
- s_q_data->height);
- s_q_data->colorspace = V4L2_COLORSPACE_REC709;
- s_q_data->field = V4L2_FIELD_NONE;
+ pix->plane_fmt[VPE_LUMA].sizeimage =
+ pix->plane_fmt[VPE_LUMA].bytesperline *
+ pix->height;
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ pix->field = V4L2_FIELD_NONE;
s_q_data->c_rect.left = 0;
s_q_data->c_rect.top = 0;
- s_q_data->c_rect.width = s_q_data->width;
- s_q_data->c_rect.height = s_q_data->height;
+ s_q_data->c_rect.width = pix->width;
+ s_q_data->c_rect.height = pix->height;
s_q_data->flags = 0;
ctx->q_data[Q_DATA_DST] = *s_q_data;
+ ctx->q_data[Q_DATA_DST].format.type =
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
set_dei_shadow_registers(ctx);
set_src_registers(ctx);
@@ -2346,12 +2404,18 @@ free_ctx:
static int vpe_release(struct file *file)
{
struct vpe_dev *dev = video_drvdata(file);
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
vpe_dbg(dev, "releasing instance %p\n", ctx);
mutex_lock(&dev->dev_mutex);
free_mv_buffers(ctx);
+
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
+
vpdma_free_desc_list(&ctx->desc_list);
vpdma_free_desc_buf(&ctx->mmr_adb);
@@ -2459,6 +2523,13 @@ static int vpe_probe(struct platform_device *pdev)
struct vpe_dev *dev;
int ret, irq, func;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return ret;
+ }
+
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -2473,7 +2544,12 @@ static int vpe_probe(struct platform_device *pdev)
mutex_init(&dev->dev_mutex);
dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "vpe_top");
+ "vpe_top");
+ if (!dev->res) {
+ dev_err(&pdev->dev, "missing 'vpe_top' resources data\n");
+ return -ENODEV;
+ }
+
/*
* HACK: we get resource info from device tree in the form of a list of
* VPE sub blocks, the driver currently uses only the base of vpe_top
@@ -2568,7 +2644,7 @@ static int vpe_remove(struct platform_device *pdev)
#if defined(CONFIG_OF)
static const struct of_device_id vpe_of_match[] = {
{
- .compatible = "ti,vpe",
+ .compatible = "ti,dra7-vpe",
},
{},
};
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 0ee143ae0f6b..82350097503e 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -2139,6 +2139,9 @@ static void vicodec_v4l2_dev_release(struct v4l2_device *v4l2_dev)
v4l2_m2m_release(dev->stateful_enc.m2m_dev);
v4l2_m2m_release(dev->stateful_dec.m2m_dev);
v4l2_m2m_release(dev->stateless_dec.m2m_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
kfree(dev);
}
@@ -2250,7 +2253,6 @@ static int vicodec_remove(struct platform_device *pdev)
v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev);
v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev);
v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev);
- media_device_cleanup(&dev->mdev);
#endif
video_unregister_device(&dev->stateful_enc.vfd);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index acd3bd48c7e2..8d6b09623d88 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1073,6 +1073,9 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
if (!q_data)
return -EINVAL;
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->aborting = 0;
+
q_data->sequence = 0;
return 0;
}
@@ -1272,6 +1275,9 @@ static void vim2m_device_release(struct video_device *vdev)
v4l2_device_unregister(&dev->v4l2_dev);
v4l2_m2m_release(dev->m2m_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
kfree(dev);
}
@@ -1343,6 +1349,7 @@ static int vim2m_probe(struct platform_device *pdev)
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
+ dev->m2m_dev = NULL;
goto error_dev;
}
@@ -1395,7 +1402,6 @@ static int vim2m_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
- media_device_cleanup(&dev->mdev);
#endif
video_unregister_device(&dev->vfd);
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
index 96d06f030c31..a53b2b532e9f 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/platform/vimc/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-vimc-y := vimc-core.o vimc-common.o vimc-streamer.o
+vimc-y := vimc-core.o vimc-common.o vimc-streamer.o vimc-capture.o \
+ vimc-debayer.o vimc-scaler.o vimc-sensor.o
+
+obj-$(CONFIG_VIDEO_VIMC) += vimc.o
-obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc-capture.o vimc-debayer.o \
- vimc-scaler.o vimc-sensor.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 1d56b91830ba..76c015898cfd 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -5,10 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-vmalloc.h>
@@ -16,12 +12,9 @@
#include "vimc-common.h"
#include "vimc-streamer.h"
-#define VIMC_CAP_DRV_NAME "vimc-capture"
-
struct vimc_cap_device {
struct vimc_ent_device ved;
struct video_device vdev;
- struct device *dev;
struct v4l2_pix_format format;
struct vb2_queue queue;
struct list_head buf_list;
@@ -36,6 +29,7 @@ struct vimc_cap_device {
struct mutex lock;
u32 sequence;
struct vimc_stream stream;
+ struct media_pad pad;
};
static const struct v4l2_pix_format fmt_default = {
@@ -130,7 +124,7 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
- dev_dbg(vcap->dev, "%s: format update: "
+ dev_dbg(vcap->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vcap->vdev.name,
/* old */
@@ -306,7 +300,7 @@ static int vimc_cap_buffer_prepare(struct vb2_buffer *vb)
unsigned long size = vcap->format.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
- dev_err(vcap->dev, "%s: buffer too small (%lu < %lu)\n",
+ dev_err(vcap->ved.dev, "%s: buffer too small (%lu < %lu)\n",
vcap->vdev.name, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
@@ -328,7 +322,7 @@ static const struct vb2_ops vimc_cap_qops = {
};
static const struct media_entity_operations vimc_cap_mops = {
- .link_validate = vimc_link_validate,
+ .link_validate = vimc_vdev_link_validate,
};
static void vimc_cap_release(struct video_device *vdev)
@@ -336,19 +330,16 @@ static void vimc_cap_release(struct video_device *vdev)
struct vimc_cap_device *vcap =
container_of(vdev, struct vimc_cap_device, vdev);
- vimc_pads_cleanup(vcap->ved.pads);
+ media_entity_cleanup(vcap->ved.ent);
kfree(vcap);
}
-static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
- ved);
+ struct vimc_cap_device *vcap;
+ vcap = container_of(ved, struct vimc_cap_device, ved);
vb2_queue_release(&vcap->queue);
- media_entity_cleanup(ved->ent);
video_unregister_device(&vcap->vdev);
}
@@ -391,11 +382,10 @@ static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
return NULL;
}
-static int vimc_cap_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
const struct vimc_pix_map *vpix;
struct vimc_cap_device *vcap;
struct video_device *vdev;
@@ -405,23 +395,16 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
/* Allocate the vimc_cap_device struct */
vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
if (!vcap)
- return -ENOMEM;
-
- /* Allocate the pads */
- vcap->ved.pads =
- vimc_pads_init(1, (const unsigned long[1]) {MEDIA_PAD_FL_SINK});
- if (IS_ERR(vcap->ved.pads)) {
- ret = PTR_ERR(vcap->ved.pads);
- goto err_free_vcap;
- }
+ return NULL;
/* Initialize the media entity */
- vcap->vdev.entity.name = pdata->entity_name;
+ vcap->vdev.entity.name = vcfg_name;
vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
+ vcap->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vcap->vdev.entity,
- 1, vcap->ved.pads);
+ 1, &vcap->pad);
if (ret)
- goto err_clean_pads;
+ goto err_free_vcap;
/* Initialize the lock */
mutex_init(&vcap->lock);
@@ -440,8 +423,8 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
ret = vb2_queue_init(q);
if (ret) {
- dev_err(comp, "%s: vb2 queue init failed (err=%d)\n",
- pdata->entity_name, ret);
+ dev_err(&vimc->pdev.dev, "%s: vb2 queue init failed (err=%d)\n",
+ vcfg_name, ret);
goto err_clean_m_ent;
}
@@ -460,8 +443,7 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
vcap->ved.ent = &vcap->vdev.entity;
vcap->ved.process_frame = vimc_cap_process_frame;
vcap->ved.vdev_get_format = vimc_cap_get_format;
- dev_set_drvdata(comp, &vcap->ved);
- vcap->dev = comp;
+ vcap->ved.dev = &vimc->pdev.dev;
/* Initialize the video_device struct */
vdev = &vcap->vdev;
@@ -474,68 +456,25 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
vdev->queue = q;
vdev->v4l2_dev = v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
- strscpy(vdev->name, pdata->entity_name, sizeof(vdev->name));
+ strscpy(vdev->name, vcfg_name, sizeof(vdev->name));
video_set_drvdata(vdev, &vcap->ved);
/* Register the video_device with the v4l2 and the media framework */
ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
if (ret) {
- dev_err(comp, "%s: video register failed (err=%d)\n",
+ dev_err(&vimc->pdev.dev, "%s: video register failed (err=%d)\n",
vcap->vdev.name, ret);
goto err_release_queue;
}
- return 0;
+ return &vcap->ved;
err_release_queue:
vb2_queue_release(q);
err_clean_m_ent:
media_entity_cleanup(&vcap->vdev.entity);
-err_clean_pads:
- vimc_pads_cleanup(vcap->ved.pads);
err_free_vcap:
kfree(vcap);
- return ret;
-}
-
-static const struct component_ops vimc_cap_comp_ops = {
- .bind = vimc_cap_comp_bind,
- .unbind = vimc_cap_comp_unbind,
-};
-
-static int vimc_cap_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_cap_comp_ops);
-}
-
-static int vimc_cap_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_cap_comp_ops);
-
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_cap_driver_ids[] = {
- {
- .name = VIMC_CAP_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_cap_pdrv = {
- .probe = vimc_cap_probe,
- .remove = vimc_cap_remove,
- .id_table = vimc_cap_driver_ids,
- .driver = {
- .name = VIMC_CAP_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_cap_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Capture");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index 7e1ae0b12f1e..16ce9f3b7c75 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -164,6 +164,16 @@ static const struct vimc_pix_map vimc_pix_map_list[] = {
},
};
+bool vimc_is_source(struct media_entity *ent)
+{
+ unsigned int i;
+
+ for (i = 0; i < ent->num_pads; i++)
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SINK)
+ return false;
+ return true;
+}
+
const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
{
if (i >= ARRAY_SIZE(vimc_pix_map_list))
@@ -171,7 +181,6 @@ const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
return &vimc_pix_map_list[i];
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_index);
const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
{
@@ -183,7 +192,6 @@ const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_code);
const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
{
@@ -195,87 +203,37 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
-
-/* Helper function to allocate and initialize pads */
-struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
-{
- struct media_pad *pads;
- unsigned int i;
-
- /* Allocate memory for the pads */
- pads = kcalloc(num_pads, sizeof(*pads), GFP_KERNEL);
- if (!pads)
- return ERR_PTR(-ENOMEM);
-
- /* Initialize the pads */
- for (i = 0; i < num_pads; i++) {
- pads[i].index = i;
- pads[i].flags = pads_flag[i];
- }
-
- return pads;
-}
-EXPORT_SYMBOL_GPL(vimc_pads_init);
-int vimc_pipeline_s_stream(struct media_entity *ent, int enable)
-{
- struct v4l2_subdev *sd;
- struct media_pad *pad;
- unsigned int i;
- int ret;
-
- for (i = 0; i < ent->num_pads; i++) {
- if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
- continue;
-
- /* Start the stream in the subdevice direct connected */
- pad = media_entity_remote_pad(&ent->pads[i]);
- if (!pad)
- continue;
-
- if (!is_media_entity_v4l2_subdev(pad->entity))
- return -EINVAL;
-
- sd = media_entity_to_v4l2_subdev(pad->entity);
- ret = v4l2_subdev_call(sd, video, s_stream, enable);
- if (ret && ret != -ENOIOCTLCMD)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vimc_pipeline_s_stream);
-
-static int vimc_get_mbus_format(struct media_pad *pad,
- struct v4l2_subdev_format *fmt)
+static int vimc_get_pix_format(struct media_pad *pad,
+ struct v4l2_pix_format *fmt)
{
if (is_media_entity_v4l2_subdev(pad->entity)) {
struct v4l2_subdev *sd =
media_entity_to_v4l2_subdev(pad->entity);
+ struct v4l2_subdev_format sd_fmt;
+ const struct vimc_pix_map *pix_map;
int ret;
- fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt->pad = pad->index;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = pad->index;
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
if (ret)
return ret;
+ v4l2_fill_pix_format(fmt, &sd_fmt.format);
+ pix_map = vimc_pix_map_by_code(sd_fmt.format.code);
+ fmt->pixelformat = pix_map->pixelformat;
} else if (is_media_entity_v4l2_video_device(pad->entity)) {
struct video_device *vdev = container_of(pad->entity,
struct video_device,
entity);
struct vimc_ent_device *ved = video_get_drvdata(vdev);
- const struct vimc_pix_map *vpix;
- struct v4l2_pix_format vdev_fmt;
if (!ved->vdev_get_format)
return -ENOIOCTLCMD;
- ved->vdev_get_format(ved, &vdev_fmt);
- vpix = vimc_pix_map_by_pixelformat(vdev_fmt.pixelformat);
- v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, vpix->code);
+ ved->vdev_get_format(ved, fmt);
} else {
return -EINVAL;
}
@@ -283,16 +241,16 @@ static int vimc_get_mbus_format(struct media_pad *pad,
return 0;
}
-int vimc_link_validate(struct media_link *link)
+int vimc_vdev_link_validate(struct media_link *link)
{
- struct v4l2_subdev_format source_fmt, sink_fmt;
+ struct v4l2_pix_format source_fmt, sink_fmt;
int ret;
- ret = vimc_get_mbus_format(link->source, &source_fmt);
+ ret = vimc_get_pix_format(link->source, &source_fmt);
if (ret)
return ret;
- ret = vimc_get_mbus_format(link->sink, &sink_fmt);
+ ret = vimc_get_pix_format(link->sink, &sink_fmt);
if (ret)
return ret;
@@ -301,21 +259,21 @@ int vimc_link_validate(struct media_link *link)
"%s:snk:%dx%d (0x%x, %d, %d, %d, %d)\n",
/* src */
link->source->entity->name,
- source_fmt.format.width, source_fmt.format.height,
- source_fmt.format.code, source_fmt.format.colorspace,
- source_fmt.format.quantization, source_fmt.format.xfer_func,
- source_fmt.format.ycbcr_enc,
+ source_fmt.width, source_fmt.height,
+ source_fmt.pixelformat, source_fmt.colorspace,
+ source_fmt.quantization, source_fmt.xfer_func,
+ source_fmt.ycbcr_enc,
/* sink */
link->sink->entity->name,
- sink_fmt.format.width, sink_fmt.format.height,
- sink_fmt.format.code, sink_fmt.format.colorspace,
- sink_fmt.format.quantization, sink_fmt.format.xfer_func,
- sink_fmt.format.ycbcr_enc);
-
- /* The width, height and code must match. */
- if (source_fmt.format.width != sink_fmt.format.width
- || source_fmt.format.height != sink_fmt.format.height
- || source_fmt.format.code != sink_fmt.format.code)
+ sink_fmt.width, sink_fmt.height,
+ sink_fmt.pixelformat, sink_fmt.colorspace,
+ sink_fmt.quantization, sink_fmt.xfer_func,
+ sink_fmt.ycbcr_enc);
+
+ /* The width, height and pixelformat must match. */
+ if (source_fmt.width != sink_fmt.width ||
+ source_fmt.height != sink_fmt.height ||
+ source_fmt.pixelformat != sink_fmt.pixelformat)
return -EPIPE;
/*
@@ -323,44 +281,43 @@ int vimc_link_validate(struct media_link *link)
* to support interlaced hardware connected to bridges that support
* progressive formats only.
*/
- if (source_fmt.format.field != sink_fmt.format.field &&
- sink_fmt.format.field != V4L2_FIELD_NONE)
+ if (source_fmt.field != sink_fmt.field &&
+ sink_fmt.field != V4L2_FIELD_NONE)
return -EPIPE;
/*
* If colorspace is DEFAULT, then assume all the colorimetry is also
* DEFAULT, return 0 to skip comparing the other colorimetry parameters
*/
- if (source_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT
- || sink_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ if (source_fmt.colorspace == V4L2_COLORSPACE_DEFAULT ||
+ sink_fmt.colorspace == V4L2_COLORSPACE_DEFAULT)
return 0;
/* Colorspace must match. */
- if (source_fmt.format.colorspace != sink_fmt.format.colorspace)
+ if (source_fmt.colorspace != sink_fmt.colorspace)
return -EPIPE;
/* Colorimetry must match if they are not set to DEFAULT */
- if (source_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
- && sink_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
- && source_fmt.format.ycbcr_enc != sink_fmt.format.ycbcr_enc)
+ if (source_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT &&
+ sink_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT &&
+ source_fmt.ycbcr_enc != sink_fmt.ycbcr_enc)
return -EPIPE;
- if (source_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
- && sink_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
- && source_fmt.format.quantization != sink_fmt.format.quantization)
+ if (source_fmt.quantization != V4L2_QUANTIZATION_DEFAULT &&
+ sink_fmt.quantization != V4L2_QUANTIZATION_DEFAULT &&
+ source_fmt.quantization != sink_fmt.quantization)
return -EPIPE;
- if (source_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
- && sink_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
- && source_fmt.format.xfer_func != sink_fmt.format.xfer_func)
+ if (source_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT &&
+ sink_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT &&
+ source_fmt.xfer_func != sink_fmt.xfer_func)
return -EPIPE;
return 0;
}
-EXPORT_SYMBOL_GPL(vimc_link_validate);
static const struct media_entity_operations vimc_ent_sd_mops = {
- .link_validate = vimc_link_validate,
+ .link_validate = v4l2_subdev_link_validate,
};
int vimc_ent_sd_register(struct vimc_ent_device *ved,
@@ -369,17 +326,12 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
const char *const name,
u32 function,
u16 num_pads,
- const unsigned long *pads_flag,
+ struct media_pad *pads,
const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops)
{
int ret;
- /* Allocate the pads */
- ved->pads = vimc_pads_init(num_pads, pads_flag);
- if (IS_ERR(ved->pads))
- return PTR_ERR(ved->pads);
-
/* Fill the vimc_ent_device struct */
ved->ent = &sd->entity;
@@ -398,9 +350,9 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
/* Initialize the media entity */
- ret = media_entity_pads_init(&sd->entity, num_pads, ved->pads);
+ ret = media_entity_pads_init(&sd->entity, num_pads, pads);
if (ret)
- goto err_clean_pads;
+ return ret;
/* Register the subdev with the v4l2 and the media framework */
ret = v4l2_device_register_subdev(v4l2_dev, sd);
@@ -415,16 +367,5 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
err_clean_m_ent:
media_entity_cleanup(&sd->entity);
-err_clean_pads:
- vimc_pads_cleanup(ved->pads);
return ret;
}
-EXPORT_SYMBOL_GPL(vimc_ent_sd_register);
-
-void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd)
-{
- media_entity_cleanup(ved->ent);
- vimc_pads_cleanup(ved->pads);
- v4l2_device_unregister_subdev(sd);
-}
-EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister);
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
index 9c2e0e216c6b..87eb8259c2a8 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -8,6 +8,7 @@
#ifndef _VIMC_COMMON_H_
#define _VIMC_COMMON_H_
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
@@ -18,6 +19,7 @@
#define VIMC_CID_VIMC_BASE (0x00f00000 | 0xf000)
#define VIMC_CID_VIMC_CLASS (0x00f00000 | 1)
#define VIMC_CID_TEST_PATTERN (VIMC_CID_VIMC_BASE + 0)
+#define VIMC_CID_MEAN_WIN_SIZE (VIMC_CID_VIMC_BASE + 1)
#define VIMC_FRAME_MAX_WIDTH 4096
#define VIMC_FRAME_MAX_HEIGHT 2160
@@ -26,6 +28,10 @@
#define VIMC_FRAME_INDEX(lin, col, width, bpp) ((lin * width + col) * bpp)
+/* Source and sink pad checks */
+#define VIMC_IS_SRC(pad) (pad)
+#define VIMC_IS_SINK(pad) (!(pad))
+
/**
* struct vimc_colorimetry_clamp - Adjust colorimetry parameters
*
@@ -53,21 +59,6 @@ do { \
} while (0)
/**
- * struct vimc_platform_data - platform data to components
- *
- * @entity_name: The name of the entity to be created
- *
- * Board setup code will often provide additional information using the device's
- * platform_data field to hold additional information.
- * When injecting a new platform_device in the component system the core needs
- * to provide to the corresponding submodules the name of the entity that should
- * be used when registering the subdevice in the Media Controller system.
- */
-struct vimc_platform_data {
- char entity_name[32];
-};
-
-/**
* struct vimc_pix_map - maps media bus code with v4l2 pixel format
*
* @code: media bus format code defined by MEDIA_BUS_FMT_* macros
@@ -85,10 +76,11 @@ struct vimc_pix_map {
};
/**
- * struct vimc_ent_device - core struct that represents a node in the topology
+ * struct vimc_ent_device - core struct that represents an entity in the
+ * topology
*
+ * @dev: a pointer of the device struct of the driver
* @ent: the pointer to struct media_entity for the node
- * @pads: the list of pads of the node
* @process_frame: callback send a frame to that node
* @vdev_get_format: callback that returns the current format a pad, used
* only when is_media_entity_v4l2_video_device(ent) returns
@@ -103,8 +95,8 @@ struct vimc_pix_map {
* media_entity
*/
struct vimc_ent_device {
+ struct device *dev;
struct media_entity *ent;
- struct media_pad *pads;
void * (*process_frame)(struct vimc_ent_device *ved,
const void *frame);
void (*vdev_get_format)(struct vimc_ent_device *ved,
@@ -112,38 +104,65 @@ struct vimc_ent_device {
};
/**
- * vimc_pads_init - initialize pads
- *
- * @num_pads: number of pads to initialize
- * @pads_flags: flags to use in each pad
+ * struct vimc_device - main device for vimc driver
*
- * Helper functions to allocate/initialize pads
+ * @pdev pointer to the platform device
+ * @pipe_cfg pointer to the vimc pipeline configuration structure
+ * @ent_devs array of vimc_ent_device pointers
+ * @mdev the associated media_device parent
+ * @v4l2_dev Internal v4l2 parent device
*/
-struct media_pad *vimc_pads_init(u16 num_pads,
- const unsigned long *pads_flag);
+struct vimc_device {
+ struct platform_device pdev;
+ const struct vimc_pipeline_config *pipe_cfg;
+ struct vimc_ent_device **ent_devs;
+ struct media_device mdev;
+ struct v4l2_device v4l2_dev;
+};
/**
- * vimc_pads_cleanup - free pads
- *
- * @pads: pointer to the pads
- *
- * Helper function to free the pads initialized with vimc_pads_init
+ * struct vimc_ent_config Structure which describes individual
+ * configuration for each entity
+ *
+ * @name entity name
+ * @ved pointer to vimc_ent_device (a node in the
+ * topology)
+ * @add subdev add hook - initializes and registers
+ * subdev called from vimc-core
+ * @rm subdev rm hook - unregisters and frees
+ * subdev called from vimc-core
*/
-static inline void vimc_pads_cleanup(struct media_pad *pads)
-{
- kfree(pads);
-}
+struct vimc_ent_config {
+ const char *name;
+ struct vimc_ent_device *(*add)(struct vimc_device *vimc,
+ const char *vcfg_name);
+ void (*rm)(struct vimc_device *vimc, struct vimc_ent_device *ved);
+};
/**
- * vimc_pipeline_s_stream - start stream through the pipeline
+ * vimc_is_source - returns true if the entity has only source pads
*
- * @ent: the pointer to struct media_entity for the node
- * @enable: 1 to start the stream and 0 to stop
+ * @ent: pointer to &struct media_entity
*
- * Helper function to call the s_stream of the subdevices connected
- * in all the sink pads of the entity
*/
-int vimc_pipeline_s_stream(struct media_entity *ent, int enable);
+bool vimc_is_source(struct media_entity *ent);
+
+/* prototypes for vimc_ent_config add and rm hooks */
+struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
/**
* vimc_pix_map_by_index - get vimc_pix_map struct by its index
@@ -176,7 +195,8 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
* unique.
* @function: media entity function defined by MEDIA_ENT_F_* macros
* @num_pads: number of pads to initialize
- * @pads_flag: flags to use in each pad
+ * @pads: the array of pads of the entity, the caller should set the
+ flags of the pads
* @sd_int_ops: pointer to &struct v4l2_subdev_internal_ops
* @sd_ops: pointer to &struct v4l2_subdev_ops.
*
@@ -189,29 +209,17 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
const char *const name,
u32 function,
u16 num_pads,
- const unsigned long *pads_flag,
+ struct media_pad *pads,
const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops);
/**
- * vimc_ent_sd_unregister - cleanup and unregister a subdev node
- *
- * @ved: the vimc_ent_device struct to be cleaned up
- * @sd: the v4l2_subdev struct to be unregistered
- *
- * Helper function cleanup and unregister the struct vimc_ent_device and struct
- * v4l2_subdev which represents a subdev node in the topology
- */
-void vimc_ent_sd_unregister(struct vimc_ent_device *ved,
- struct v4l2_subdev *sd);
-
-/**
- * vimc_link_validate - validates a media link
+ * vimc_vdev_link_validate - validates a media link
*
* @link: pointer to &struct media_link
*
* This function calls validates if a media link is valid for streaming.
*/
-int vimc_link_validate(struct media_link *link);
+int vimc_vdev_link_validate(struct media_link *link);
#endif
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 571c55aa0e16..97a272f3350a 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -5,7 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -24,29 +23,6 @@
.flags = link_flags, \
}
-struct vimc_device {
- /* The platform device */
- struct platform_device pdev;
-
- /* The pipeline configuration */
- const struct vimc_pipeline_config *pipe_cfg;
-
- /* The Associated media_device parent */
- struct media_device mdev;
-
- /* Internal v4l2 parent device*/
- struct v4l2_device v4l2_dev;
-
- /* Subdevices */
- struct platform_device **subdevs;
-};
-
-/* Structure which describes individual configuration for each entity */
-struct vimc_ent_config {
- const char *name;
- const char *drv;
-};
-
/* Structure which describes links between entities */
struct vimc_ent_link {
unsigned int src_ent;
@@ -68,43 +44,52 @@ struct vimc_pipeline_config {
* Topology Configuration
*/
-static const struct vimc_ent_config ent_config[] = {
+static struct vimc_ent_config ent_config[] = {
{
.name = "Sensor A",
- .drv = "vimc-sensor",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Sensor B",
- .drv = "vimc-sensor",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Debayer A",
- .drv = "vimc-debayer",
+ .add = vimc_deb_add,
+ .rm = vimc_deb_rm,
},
{
.name = "Debayer B",
- .drv = "vimc-debayer",
+ .add = vimc_deb_add,
+ .rm = vimc_deb_rm,
},
{
.name = "Raw Capture 0",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
{
.name = "Raw Capture 1",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
{
- .name = "RGB/YUV Input",
/* TODO: change this to vimc-input when it is implemented */
- .drv = "vimc-sensor",
+ .name = "RGB/YUV Input",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Scaler",
- .drv = "vimc-scaler",
+ .add = vimc_sca_add,
+ .rm = vimc_sca_rm,
},
{
.name = "RGB/YUV Capture",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
};
@@ -127,7 +112,7 @@ static const struct vimc_ent_link ent_links[] = {
VIMC_ENT_LINK(7, 1, 8, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
};
-static const struct vimc_pipeline_config pipe_cfg = {
+static struct vimc_pipeline_config pipe_cfg = {
.ents = ent_config,
.num_ents = ARRAY_SIZE(ent_config),
.links = ent_links,
@@ -136,6 +121,14 @@ static const struct vimc_pipeline_config pipe_cfg = {
/* -------------------------------------------------------------------------- */
+static void vimc_rm_links(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ media_entity_remove_links(vimc->ent_devs[i]->ent);
+}
+
static int vimc_create_links(struct vimc_device *vimc)
{
unsigned int i;
@@ -144,32 +137,56 @@ static int vimc_create_links(struct vimc_device *vimc)
/* Initialize the links between entities */
for (i = 0; i < vimc->pipe_cfg->num_links; i++) {
const struct vimc_ent_link *link = &vimc->pipe_cfg->links[i];
- /*
- * TODO: Check another way of retrieving ved struct without
- * relying on platform_get_drvdata
- */
+
struct vimc_ent_device *ved_src =
- platform_get_drvdata(vimc->subdevs[link->src_ent]);
+ vimc->ent_devs[link->src_ent];
struct vimc_ent_device *ved_sink =
- platform_get_drvdata(vimc->subdevs[link->sink_ent]);
+ vimc->ent_devs[link->sink_ent];
ret = media_create_pad_link(ved_src->ent, link->src_pad,
ved_sink->ent, link->sink_pad,
link->flags);
if (ret)
- return ret;
+ goto err_rm_links;
}
return 0;
+
+err_rm_links:
+ vimc_rm_links(vimc);
+ return ret;
}
-static int vimc_comp_bind(struct device *master)
+static int vimc_add_subdevs(struct vimc_device *vimc)
{
- struct vimc_device *vimc = container_of(to_platform_device(master),
- struct vimc_device, pdev);
- int ret;
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
+ dev_dbg(&vimc->pdev.dev, "new entity for %s\n",
+ vimc->pipe_cfg->ents[i].name);
+ vimc->ent_devs[i] = vimc->pipe_cfg->ents[i].add(vimc,
+ vimc->pipe_cfg->ents[i].name);
+ if (!vimc->ent_devs[i]) {
+ dev_err(&vimc->pdev.dev, "add new entity for %s\n",
+ vimc->pipe_cfg->ents[i].name);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void vimc_rm_subdevs(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ if (vimc->ent_devs[i])
+ vimc->pipe_cfg->ents[i].rm(vimc, vimc->ent_devs[i]);
+}
- dev_dbg(master, "bind");
+static int vimc_register_devices(struct vimc_device *vimc)
+{
+ int ret;
/* Register the v4l2 struct */
ret = v4l2_device_register(vimc->mdev.dev, &vimc->v4l2_dev);
@@ -179,22 +196,31 @@ static int vimc_comp_bind(struct device *master)
return ret;
}
- /* Bind subdevices */
- ret = component_bind_all(master, &vimc->v4l2_dev);
- if (ret)
+ /* allocate ent_devs */
+ vimc->ent_devs = kcalloc(vimc->pipe_cfg->num_ents,
+ sizeof(*vimc->ent_devs), GFP_KERNEL);
+ if (!vimc->ent_devs) {
+ ret = -ENOMEM;
goto err_v4l2_unregister;
+ }
+
+ /* Invoke entity config hooks to initialize and register subdevs */
+ ret = vimc_add_subdevs(vimc);
+ if (ret)
+ /* remove sundevs that got added */
+ goto err_rm_subdevs;
/* Initialize links */
ret = vimc_create_links(vimc);
if (ret)
- goto err_comp_unbind_all;
+ goto err_rm_subdevs;
/* Register the media device */
ret = media_device_register(&vimc->mdev);
if (ret) {
dev_err(vimc->mdev.dev,
"media device register failed (err=%d)\n", ret);
- goto err_comp_unbind_all;
+ goto err_rm_subdevs;
}
/* Expose all subdev's nodes*/
@@ -211,98 +237,32 @@ static int vimc_comp_bind(struct device *master)
err_mdev_unregister:
media_device_unregister(&vimc->mdev);
media_device_cleanup(&vimc->mdev);
-err_comp_unbind_all:
- component_unbind_all(master, NULL);
+err_rm_subdevs:
+ vimc_rm_subdevs(vimc);
+ kfree(vimc->ent_devs);
err_v4l2_unregister:
v4l2_device_unregister(&vimc->v4l2_dev);
return ret;
}
-static void vimc_comp_unbind(struct device *master)
+static void vimc_unregister(struct vimc_device *vimc)
{
- struct vimc_device *vimc = container_of(to_platform_device(master),
- struct vimc_device, pdev);
-
- dev_dbg(master, "unbind");
-
media_device_unregister(&vimc->mdev);
media_device_cleanup(&vimc->mdev);
- component_unbind_all(master, NULL);
v4l2_device_unregister(&vimc->v4l2_dev);
+ kfree(vimc->ent_devs);
}
-static int vimc_comp_compare(struct device *comp, void *data)
-{
- return comp == data;
-}
-
-static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
-{
- struct component_match *match = NULL;
- struct vimc_platform_data pdata;
- int i;
-
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
- dev_dbg(&vimc->pdev.dev, "new pdev for %s\n",
- vimc->pipe_cfg->ents[i].drv);
-
- strscpy(pdata.entity_name, vimc->pipe_cfg->ents[i].name,
- sizeof(pdata.entity_name));
-
- vimc->subdevs[i] = platform_device_register_data(&vimc->pdev.dev,
- vimc->pipe_cfg->ents[i].drv,
- PLATFORM_DEVID_AUTO,
- &pdata,
- sizeof(pdata));
- if (IS_ERR(vimc->subdevs[i])) {
- match = ERR_CAST(vimc->subdevs[i]);
- while (--i >= 0)
- platform_device_unregister(vimc->subdevs[i]);
-
- return match;
- }
-
- component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
- &vimc->subdevs[i]->dev);
- }
-
- return match;
-}
-
-static void vimc_rm_subdevs(struct vimc_device *vimc)
-{
- unsigned int i;
-
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
- platform_device_unregister(vimc->subdevs[i]);
-}
-
-static const struct component_master_ops vimc_comp_ops = {
- .bind = vimc_comp_bind,
- .unbind = vimc_comp_unbind,
-};
-
static int vimc_probe(struct platform_device *pdev)
{
struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
- struct component_match *match = NULL;
int ret;
dev_dbg(&pdev->dev, "probe");
memset(&vimc->mdev, 0, sizeof(vimc->mdev));
- /* Create platform_device for each entity in the topology*/
- vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
- sizeof(*vimc->subdevs), GFP_KERNEL);
- if (!vimc->subdevs)
- return -ENOMEM;
-
- match = vimc_add_subdevs(vimc);
- if (IS_ERR(match))
- return PTR_ERR(match);
-
/* Link the media device within the v4l2_device */
vimc->v4l2_dev.mdev = &vimc->mdev;
@@ -314,12 +274,9 @@ static int vimc_probe(struct platform_device *pdev)
vimc->mdev.dev = &pdev->dev;
media_device_init(&vimc->mdev);
- /* Add self to the component system */
- ret = component_master_add_with_match(&pdev->dev, &vimc_comp_ops,
- match);
+ ret = vimc_register_devices(vimc);
if (ret) {
media_device_cleanup(&vimc->mdev);
- vimc_rm_subdevs(vimc);
return ret;
}
@@ -332,8 +289,8 @@ static int vimc_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "remove");
- component_master_del(&pdev->dev, &vimc_comp_ops);
vimc_rm_subdevs(vimc);
+ vimc_unregister(vimc);
return 0;
}
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index b72b8385067b..5d1b67d684bb 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -5,28 +5,16 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
-#define VIMC_DEB_DRV_NAME "vimc-debayer"
-
-static unsigned int deb_mean_win_size = 3;
-module_param(deb_mean_win_size, uint, 0000);
-MODULE_PARM_DESC(deb_mean_win_size, " the window size to calculate the mean.\n"
- "NOTE: the window size needs to be an odd number, as the main pixel "
- "stays in the center of the window, otherwise the next odd number "
- "is considered");
-
-#define IS_SINK(pad) (!pad)
-#define IS_SRC(pad) (pad)
-
enum vimc_deb_rgb_colors {
VIMC_DEB_RED = 0,
VIMC_DEB_GREEN = 1,
@@ -41,7 +29,6 @@ struct vimc_deb_pix_map {
struct vimc_deb_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
/* The active format */
struct v4l2_mbus_framefmt sink_fmt;
u32 src_code;
@@ -51,6 +38,9 @@ struct vimc_deb_device {
u8 *src_frame;
const struct vimc_deb_pix_map *sink_pix_map;
unsigned int sink_bpp;
+ unsigned int mean_win_size;
+ struct v4l2_ctrl_handler hdl;
+ struct media_pad pads[2];
};
static const struct v4l2_mbus_framefmt sink_fmt_default = {
@@ -159,7 +149,7 @@ static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
/* We only support one format for source pads */
- if (IS_SRC(code->pad)) {
+ if (VIMC_IS_SRC(code->pad)) {
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
if (code->index)
@@ -185,7 +175,7 @@ static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index)
return -EINVAL;
- if (IS_SINK(fse->pad)) {
+ if (VIMC_IS_SINK(fse->pad)) {
const struct vimc_deb_pix_map *vpix =
vimc_deb_pix_map_by_code(fse->code);
@@ -215,7 +205,7 @@ static int vimc_deb_get_fmt(struct v4l2_subdev *sd,
vdeb->sink_fmt;
/* Set the right code for the source pad */
- if (IS_SRC(fmt->pad))
+ if (VIMC_IS_SRC(fmt->pad))
fmt->format.code = vdeb->src_code;
return 0;
@@ -262,7 +252,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
* Do not change the format of the source pad,
* it is propagated from the sink
*/
- if (IS_SRC(fmt->pad)) {
+ if (VIMC_IS_SRC(fmt->pad)) {
fmt->format = *sink_fmt;
/* TODO: Add support for other formats */
fmt->format.code = vdeb->src_code;
@@ -270,7 +260,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
/* Set the new format in the sink pad */
vimc_deb_adjust_sink_fmt(&fmt->format);
- dev_dbg(vdeb->dev, "%s: sink format update: "
+ dev_dbg(vdeb->ved.dev, "%s: sink format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vdeb->sd.name,
/* old */
@@ -351,11 +341,18 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
+static const struct v4l2_subdev_core_ops vimc_deb_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops vimc_deb_video_ops = {
.s_stream = vimc_deb_s_stream,
};
static const struct v4l2_subdev_ops vimc_deb_ops = {
+ .core = &vimc_deb_core_ops,
.pad = &vimc_deb_pad_ops,
.video = &vimc_deb_video_ops,
};
@@ -389,11 +386,11 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
* the top left corner of the mean window (considering the current
* pixel as the center)
*/
- seek = deb_mean_win_size / 2;
+ seek = vdeb->mean_win_size / 2;
/* Sum the values of the colors in the mean window */
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: --- Calc pixel %dx%d, window mean %d, seek %d ---\n",
vdeb->sd.name, lin, col, vdeb->sink_fmt.height, seek);
@@ -426,7 +423,7 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
vdeb->sink_fmt.width,
vdeb->sink_bpp);
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: RGB CALC: frame index %d, win pos %dx%d, color %d\n",
vdeb->sd.name, index, wlin, wcol, color);
@@ -437,21 +434,21 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
/* Save how many values we already added */
n_rgb[color]++;
- dev_dbg(vdeb->dev, "deb: %s: RGB CALC: val %d, n %d\n",
+ dev_dbg(vdeb->ved.dev, "deb: %s: RGB CALC: val %d, n %d\n",
vdeb->sd.name, rgb[color], n_rgb[color]);
}
}
/* Calculate the mean */
for (i = 0; i < 3; i++) {
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: PRE CALC: %dx%d Color %d, val %d, n %d\n",
vdeb->sd.name, lin, col, i, rgb[i], n_rgb[i]);
if (n_rgb[i])
rgb[i] = rgb[i] / n_rgb[i];
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: FINAL CALC: %dx%d Color %d, val %d\n",
vdeb->sd.name, lin, col, i, rgb[i]);
}
@@ -476,14 +473,34 @@ static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
}
return vdeb->src_frame;
+}
+static int vimc_deb_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vimc_deb_device *vdeb =
+ container_of(ctrl->handler, struct vimc_deb_device, hdl);
+
+ switch (ctrl->id) {
+ case VIMC_CID_MEAN_WIN_SIZE:
+ vdeb->mean_win_size = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
+static const struct v4l2_ctrl_ops vimc_deb_ctrl_ops = {
+ .s_ctrl = vimc_deb_s_ctrl,
+};
+
static void vimc_deb_release(struct v4l2_subdev *sd)
{
struct vimc_deb_device *vdeb =
container_of(sd, struct vimc_deb_device, sd);
+ v4l2_ctrl_handler_free(&vdeb->hdl);
+ media_entity_cleanup(vdeb->ved.ent);
kfree(vdeb);
}
@@ -491,44 +508,69 @@ static const struct v4l2_subdev_internal_ops vimc_deb_int_ops = {
.release = vimc_deb_release,
};
-static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
- ved);
+ struct vimc_deb_device *vdeb;
- vimc_ent_sd_unregister(ved, &vdeb->sd);
+ vdeb = container_of(ved, struct vimc_deb_device, ved);
+ v4l2_device_unregister_subdev(&vdeb->sd);
}
-static int vimc_deb_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+static const struct v4l2_ctrl_config vimc_deb_ctrl_class = {
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
+ .id = VIMC_CID_VIMC_CLASS,
+ .name = "VIMC Controls",
+ .type = V4L2_CTRL_TYPE_CTRL_CLASS,
+};
+
+static const struct v4l2_ctrl_config vimc_deb_ctrl_mean_win_size = {
+ .ops = &vimc_deb_ctrl_ops,
+ .id = VIMC_CID_MEAN_WIN_SIZE,
+ .name = "Debayer Mean Window Size",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 25,
+ .step = 2,
+ .def = 3,
+};
+
+struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_deb_device *vdeb;
int ret;
/* Allocate the vdeb struct */
vdeb = kzalloc(sizeof(*vdeb), GFP_KERNEL);
if (!vdeb)
- return -ENOMEM;
+ return NULL;
+
+ /* Create controls: */
+ v4l2_ctrl_handler_init(&vdeb->hdl, 2);
+ v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_class, NULL);
+ v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_mean_win_size, NULL);
+ vdeb->sd.ctrl_handler = &vdeb->hdl;
+ if (vdeb->hdl.error) {
+ ret = vdeb->hdl.error;
+ goto err_free_vdeb;
+ }
/* Initialize ved and sd */
+ vdeb->pads[0].flags = MEDIA_PAD_FL_SINK;
+ vdeb->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
ret = vimc_ent_sd_register(&vdeb->ved, &vdeb->sd, v4l2_dev,
- pdata->entity_name,
+ vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2,
- (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
+ vdeb->pads,
&vimc_deb_int_ops, &vimc_deb_ops);
- if (ret) {
- kfree(vdeb);
- return ret;
- }
+ if (ret)
+ goto err_free_hdl;
vdeb->ved.process_frame = vimc_deb_process_frame;
- dev_set_drvdata(comp, &vdeb->ved);
- vdeb->dev = comp;
+ vdeb->ved.dev = &vimc->pdev.dev;
+ vdeb->mean_win_size = vimc_deb_ctrl_mean_win_size.def;
/* Initialize the frame format */
vdeb->sink_fmt = sink_fmt_default;
@@ -541,46 +583,12 @@ static int vimc_deb_comp_bind(struct device *comp, struct device *master,
vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
vdeb->set_rgb_src = vimc_deb_set_rgb_mbus_fmt_rgb888_1x24;
- return 0;
-}
-
-static const struct component_ops vimc_deb_comp_ops = {
- .bind = vimc_deb_comp_bind,
- .unbind = vimc_deb_comp_unbind,
-};
+ return &vdeb->ved;
-static int vimc_deb_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_deb_comp_ops);
-}
-
-static int vimc_deb_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_deb_comp_ops);
+err_free_hdl:
+ v4l2_ctrl_handler_free(&vdeb->hdl);
+err_free_vdeb:
+ kfree(vdeb);
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_deb_driver_ids[] = {
- {
- .name = VIMC_DEB_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_deb_pdrv = {
- .probe = vimc_deb_probe,
- .remove = vimc_deb_remove,
- .id_table = vimc_deb_driver_ids,
- .driver = {
- .name = VIMC_DEB_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_deb_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Debayer");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index 49ab8d9dd9c9..2f88a7d9d67b 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -5,30 +5,22 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
-#define VIMC_SCA_DRV_NAME "vimc-scaler"
-
static unsigned int sca_mult = 3;
module_param(sca_mult, uint, 0000);
MODULE_PARM_DESC(sca_mult, " the image size multiplier");
-#define IS_SINK(pad) (!pad)
-#define IS_SRC(pad) (pad)
#define MAX_ZOOM 8
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
/* NOTE: the source fmt is the same as the sink
* with the width and hight multiplied by mult
*/
@@ -37,6 +29,7 @@ struct vimc_sca_device {
u8 *src_frame;
unsigned int src_line_size;
unsigned int bpp;
+ struct media_pad pads[2];
};
static const struct v4l2_mbus_framefmt sink_fmt_default = {
@@ -98,7 +91,7 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
- if (IS_SINK(fse->pad)) {
+ if (VIMC_IS_SINK(fse->pad)) {
fse->max_width = VIMC_FRAME_MAX_WIDTH;
fse->max_height = VIMC_FRAME_MAX_HEIGHT;
} else {
@@ -121,7 +114,7 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
vsca->sink_fmt;
/* Scale the frame size for the source pad */
- if (IS_SRC(format->pad)) {
+ if (VIMC_IS_SRC(format->pad)) {
format->format.width = vsca->sink_fmt.width * sca_mult;
format->format.height = vsca->sink_fmt.height * sca_mult;
}
@@ -170,7 +163,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
* Do not change the format of the source pad,
* it is propagated from the sink
*/
- if (IS_SRC(fmt->pad)) {
+ if (VIMC_IS_SRC(fmt->pad)) {
fmt->format = *sink_fmt;
fmt->format.width = sink_fmt->width * sca_mult;
fmt->format.height = sink_fmt->height * sca_mult;
@@ -178,7 +171,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
/* Set the new format in the sink pad */
vimc_sca_adjust_sink_fmt(&fmt->format);
- dev_dbg(vsca->dev, "%s: sink format update: "
+ dev_dbg(vsca->ved.dev, "%s: sink format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsca->sd.name,
/* old */
@@ -278,7 +271,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
vsca->bpp);
pixel = &sink_frame[index];
- dev_dbg(vsca->dev,
+ dev_dbg(vsca->ved.dev,
"sca: %s: --- scale_pix sink pos %dx%d, index %d ---\n",
vsca->sd.name, lin, col, index);
@@ -288,7 +281,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
vsca->sink_fmt.width * sca_mult, vsca->bpp);
- dev_dbg(vsca->dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
+ dev_dbg(vsca->ved.dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
vsca->sd.name, lin * sca_mult, col * sca_mult, index);
/* Repeat this pixel mult times */
@@ -297,7 +290,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
* pixel repetition in a line
*/
for (j = 0; j < sca_mult * vsca->bpp; j += vsca->bpp) {
- dev_dbg(vsca->dev,
+ dev_dbg(vsca->ved.dev,
"sca: %s: sca: scale_pix src pos %d\n",
vsca->sd.name, index + j);
@@ -343,6 +336,7 @@ static void vimc_sca_release(struct v4l2_subdev *sd)
struct vimc_sca_device *vsca =
container_of(sd, struct vimc_sca_device, sd);
+ media_entity_cleanup(vsca->ved.ent);
kfree(vsca);
}
@@ -350,89 +344,45 @@ static const struct v4l2_subdev_internal_ops vimc_sca_int_ops = {
.release = vimc_sca_release,
};
-static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
- ved);
+ struct vimc_sca_device *vsca;
- vimc_ent_sd_unregister(ved, &vsca->sd);
+ vsca = container_of(ved, struct vimc_sca_device, ved);
+ v4l2_device_unregister_subdev(&vsca->sd);
}
-
-static int vimc_sca_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sca_device *vsca;
int ret;
/* Allocate the vsca struct */
vsca = kzalloc(sizeof(*vsca), GFP_KERNEL);
if (!vsca)
- return -ENOMEM;
+ return NULL;
/* Initialize ved and sd */
+ vsca->pads[0].flags = MEDIA_PAD_FL_SINK;
+ vsca->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
- pdata->entity_name,
+ vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_SCALER, 2,
- (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
+ vsca->pads,
&vimc_sca_int_ops, &vimc_sca_ops);
if (ret) {
kfree(vsca);
- return ret;
+ return NULL;
}
vsca->ved.process_frame = vimc_sca_process_frame;
- dev_set_drvdata(comp, &vsca->ved);
- vsca->dev = comp;
+ vsca->ved.dev = &vimc->pdev.dev;
/* Initialize the frame format */
vsca->sink_fmt = sink_fmt_default;
- return 0;
-}
-
-static const struct component_ops vimc_sca_comp_ops = {
- .bind = vimc_sca_comp_bind,
- .unbind = vimc_sca_comp_unbind,
-};
-
-static int vimc_sca_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_sca_comp_ops);
-}
-
-static int vimc_sca_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_sca_comp_ops);
-
- return 0;
+ return &vsca->ved;
}
-
-static const struct platform_device_id vimc_sca_driver_ids[] = {
- {
- .name = VIMC_SCA_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_sca_pdrv = {
- .probe = vimc_sca_probe,
- .remove = vimc_sca_remove,
- .id_table = vimc_sca_driver_ids,
- .driver = {
- .name = VIMC_SCA_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_sca_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Scaler");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 6c53b9fc1617..32380f504591 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -5,10 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
#include <linux/v4l2-mediabus.h>
#include <linux/vmalloc.h>
#include <media/v4l2-ctrls.h>
@@ -18,18 +14,15 @@
#include "vimc-common.h"
-#define VIMC_SEN_DRV_NAME "vimc-sensor"
-
struct vimc_sen_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
struct tpg_data tpg;
- struct task_struct *kthread_sen;
u8 *frame;
/* The active format */
struct v4l2_mbus_framefmt mbus_format;
struct v4l2_ctrl_handler hdl;
+ struct media_pad pad;
};
static const struct v4l2_mbus_framefmt fmt_default = {
@@ -164,7 +157,7 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
/* Set the new format */
vimc_sen_adjust_fmt(&fmt->format);
- dev_dbg(vsen->dev, "%s: format update: "
+ dev_dbg(vsen->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsen->sd.name,
/* old */
@@ -208,10 +201,6 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
const struct vimc_pix_map *vpix;
unsigned int frame_size;
- if (vsen->kthread_sen)
- /* tpg is already executing */
- return 0;
-
/* Calculate the frame size */
vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
frame_size = vsen->mbus_format.width * vpix->bpp *
@@ -297,6 +286,7 @@ static void vimc_sen_release(struct v4l2_subdev *sd)
v4l2_ctrl_handler_free(&vsen->hdl);
tpg_free(&vsen->tpg);
+ media_entity_cleanup(vsen->ved.ent);
kfree(vsen);
}
@@ -304,14 +294,12 @@ static const struct v4l2_subdev_internal_ops vimc_sen_int_ops = {
.release = vimc_sen_release,
};
-static void vimc_sen_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_sen_device *vsen =
- container_of(ved, struct vimc_sen_device, ved);
+ struct vimc_sen_device *vsen;
- vimc_ent_sd_unregister(ved, &vsen->sd);
+ vsen = container_of(ved, struct vimc_sen_device, ved);
+ v4l2_device_unregister_subdev(&vsen->sd);
}
/* Image Processing Controls */
@@ -331,18 +319,17 @@ static const struct v4l2_ctrl_config vimc_sen_ctrl_test_pattern = {
.qmenu = tpg_pattern_strings,
};
-static int vimc_sen_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sen_device *vsen;
int ret;
/* Allocate the vsen struct */
vsen = kzalloc(sizeof(*vsen), GFP_KERNEL);
if (!vsen)
- return -ENOMEM;
+ return NULL;
v4l2_ctrl_handler_init(&vsen->hdl, 4);
@@ -366,78 +353,36 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
goto err_free_vsen;
}
+ /* Initialize the test pattern generator */
+ tpg_init(&vsen->tpg, vsen->mbus_format.width,
+ vsen->mbus_format.height);
+ ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
+ if (ret)
+ goto err_free_hdl;
+
/* Initialize ved and sd */
+ vsen->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = vimc_ent_sd_register(&vsen->ved, &vsen->sd, v4l2_dev,
- pdata->entity_name,
- MEDIA_ENT_F_CAM_SENSOR, 1,
- (const unsigned long[1]) {MEDIA_PAD_FL_SOURCE},
+ vcfg_name,
+ MEDIA_ENT_F_CAM_SENSOR, 1, &vsen->pad,
&vimc_sen_int_ops, &vimc_sen_ops);
if (ret)
- goto err_free_hdl;
+ goto err_free_tpg;
vsen->ved.process_frame = vimc_sen_process_frame;
- dev_set_drvdata(comp, &vsen->ved);
- vsen->dev = comp;
+ vsen->ved.dev = &vimc->pdev.dev;
/* Initialize the frame format */
vsen->mbus_format = fmt_default;
- /* Initialize the test pattern generator */
- tpg_init(&vsen->tpg, vsen->mbus_format.width,
- vsen->mbus_format.height);
- ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
- if (ret)
- goto err_unregister_ent_sd;
-
- return 0;
+ return &vsen->ved;
-err_unregister_ent_sd:
- vimc_ent_sd_unregister(&vsen->ved, &vsen->sd);
+err_free_tpg:
+ tpg_free(&vsen->tpg);
err_free_hdl:
v4l2_ctrl_handler_free(&vsen->hdl);
err_free_vsen:
kfree(vsen);
- return ret;
-}
-
-static const struct component_ops vimc_sen_comp_ops = {
- .bind = vimc_sen_comp_bind,
- .unbind = vimc_sen_comp_unbind,
-};
-
-static int vimc_sen_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_sen_comp_ops);
-}
-
-static int vimc_sen_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_sen_comp_ops);
-
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_sen_driver_ids[] = {
- {
- .name = VIMC_SEN_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_sen_pdrv = {
- .probe = vimc_sen_probe,
- .remove = vimc_sen_remove,
- .id_table = vimc_sen_driver_ids,
- .driver = {
- .name = VIMC_SEN_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_sen_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Sensor");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
index 048d770e498b..cd6b55433c9e 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -7,7 +7,6 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -97,17 +96,26 @@ static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
sd = media_entity_to_v4l2_subdev(ved->ent);
ret = v4l2_subdev_call(sd, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD) {
- pr_err("subdev_call error %s\n",
- ved->ent->name);
+ dev_err(ved->dev, "subdev_call error %s\n",
+ ved->ent->name);
vimc_streamer_pipeline_terminate(stream);
return ret;
}
}
entity = vimc_get_source_entity(ved->ent);
- /* Check if the end of the pipeline was reached*/
- if (!entity)
+ /* Check if the end of the pipeline was reached */
+ if (!entity) {
+ /* the first entity of the pipe should be source only */
+ if (!vimc_is_source(ved->ent)) {
+ dev_err(ved->dev,
+ "first entity in the pipe '%s' is not a source\n",
+ ved->ent->name);
+ vimc_streamer_pipeline_terminate(stream);
+ return -EPIPE;
+ }
return 0;
+ }
/* Get the next device in the pipeline */
if (is_media_entity_v4l2_subdev(entity)) {
@@ -217,4 +225,3 @@ int vimc_streamer_s_stream(struct vimc_stream *stream,
return 0;
}
-EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index 2f5762e3309a..e8a50c506dc9 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -3,7 +3,7 @@ vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
- vivid-osd.o
+ vivid-osd.o vivid-meta-cap.o vivid-meta-out.o
ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
vivid-objs += vivid-cec.o
endif
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index 4d822dbed972..4d2413e87730 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -276,12 +276,11 @@ struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
bool is_source)
{
- char name[sizeof(dev->vid_out_dev.name) + 2];
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
+ char name[32];
- snprintf(name, sizeof(name), "%s%d",
- is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name,
- idx);
+ snprintf(name, sizeof(name), "vivid-%03d-vid-%s%d",
+ dev->inst, is_source ? "out" : "cap", idx);
return cec_allocate_adapter(&vivid_cec_adap_ops, dev,
name, caps, 1);
}
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 53315c8dd2bb..c184f9b0be69 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -37,6 +37,8 @@
#include "vivid-osd.h"
#include "vivid-cec.h"
#include "vivid-ctrls.h"
+#include "vivid-meta-cap.h"
+#include "vivid-meta-out.h"
#define VIVID_MODULE_NAME "vivid"
@@ -79,6 +81,14 @@ static int radio_tx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(radio_tx_nr, int, NULL, 0444);
MODULE_PARM_DESC(radio_tx_nr, " radioX start number, -1 is autodetect");
+static int meta_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(meta_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(meta_cap_nr, " videoX start number, -1 is autodetect");
+
+static int meta_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(meta_out_nr, int, NULL, 0444);
+MODULE_PARM_DESC(meta_out_nr, " videoX start number, -1 is autodetect");
+
static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(ccs_cap_mode, int, NULL, 0444);
MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n"
@@ -95,10 +105,15 @@ static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1
module_param_array(multiplanar, uint, NULL, 0444);
MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device.");
-/* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */
-static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d };
+/*
+ * Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr +
+ * vbi-out + vid-out + meta-cap
+ */
+static unsigned int node_types[VIVID_MAX_DEVS] = {
+ [0 ... (VIVID_MAX_DEVS - 1)] = 0x61d3d
+};
module_param_array(node_types, uint, NULL, 0444);
-MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the following meaning:\n"
+MODULE_PARM_DESC(node_types, " node types, default is 0x61d3d. Bitmask with the following meaning:\n"
"\t\t bit 0: Video Capture node\n"
"\t\t bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 4: Radio Receiver node\n"
@@ -106,7 +121,9 @@ MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the f
"\t\t bit 8: Video Output node\n"
"\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 12: Radio Transmitter node\n"
- "\t\t bit 16: Framebuffer for testing overlays");
+ "\t\t bit 16: Framebuffer for testing overlays\n"
+ "\t\t bit 17: Metadata Capture node\n"
+ "\t\t bit 18: Metadata Output node\n");
/* Default: 4 inputs */
static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 };
@@ -205,7 +222,8 @@ static int vidioc_querycap(struct file *file, void *priv,
cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
dev->vbi_cap_caps | dev->vbi_out_caps |
dev->radio_rx_caps | dev->radio_tx_caps |
- dev->sdr_cap_caps | V4L2_CAP_DEVICE_CAPS;
+ dev->sdr_cap_caps | dev->meta_cap_caps |
+ dev->meta_out_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -433,7 +451,9 @@ static bool vivid_is_last_user(struct vivid_dev *dev)
vivid_is_in_use(&dev->vbi_out_dev) +
vivid_is_in_use(&dev->sdr_cap_dev) +
vivid_is_in_use(&dev->radio_rx_dev) +
- vivid_is_in_use(&dev->radio_tx_dev);
+ vivid_is_in_use(&dev->radio_tx_dev) +
+ vivid_is_in_use(&dev->meta_cap_dev) +
+ vivid_is_in_use(&dev->meta_out_dev);
return uses == 1;
}
@@ -459,6 +479,8 @@ static int vivid_fop_release(struct file *file)
set_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->meta_out_dev.flags);
}
mutex_unlock(&dev->mutex);
if (file->private_data == dev->overlay_cap_owner)
@@ -604,6 +626,16 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_log_status = vidioc_log_status,
.vidioc_subscribe_event = vidioc_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_enum_fmt_meta_cap = vidioc_enum_fmt_meta_cap,
+ .vidioc_g_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+ .vidioc_s_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+ .vidioc_try_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+
+ .vidioc_enum_fmt_meta_out = vidioc_enum_fmt_meta_out,
+ .vidioc_g_fmt_meta_out = vidioc_g_fmt_meta_out,
+ .vidioc_s_fmt_meta_out = vidioc_g_fmt_meta_out,
+ .vidioc_try_fmt_meta_out = vidioc_g_fmt_meta_out,
};
/* -----------------------------------------------------------------
@@ -616,6 +648,9 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev)
vivid_free_controls(dev);
v4l2_device_unregister(&dev->v4l2_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
vfree(dev->scaled_line);
vfree(dev->blended_line);
vfree(dev->edid);
@@ -645,14 +680,44 @@ static const struct media_device_ops vivid_media_ops = {
};
#endif
+static int vivid_create_queue(struct vivid_dev *dev,
+ struct vb2_queue *q,
+ u32 buf_type,
+ unsigned int min_buffers_needed,
+ const struct vb2_ops *ops)
+{
+ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->multiplanar)
+ buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ else if (buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT && dev->multiplanar)
+ buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ else if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE && !dev->has_raw_vbi_cap)
+ buf_type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ else if (buf_type == V4L2_BUF_TYPE_VBI_OUTPUT && !dev->has_raw_vbi_out)
+ buf_type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
+
+ q->type = buf_type;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->io_modes |= V4L2_TYPE_IS_OUTPUT(buf_type) ? VB2_WRITE : VB2_READ;
+ if (allocators[dev->inst] != 1)
+ q->io_modes |= VB2_USERPTR;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = ops;
+ q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
+ &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = min_buffers_needed;
+ q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
+
+ return vb2_queue_init(q);
+}
+
static int vivid_create_instance(struct platform_device *pdev, int inst)
{
static const struct v4l2_dv_timings def_dv_timings =
V4L2_DV_BT_CEA_1280X720P60;
- static const struct vb2_mem_ops * const vivid_mem_ops[2] = {
- &vb2_vmalloc_memops,
- &vb2_dma_contig_memops,
- };
unsigned in_type_counter[4] = { 0, 0, 0, 0 };
unsigned out_type_counter[4] = { 0, 0, 0, 0 };
int ccs_cap = ccs_cap_mode[inst];
@@ -661,9 +726,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
bool has_modulator;
struct vivid_dev *dev;
struct video_device *vfd;
- struct vb2_queue *q;
unsigned node_type = node_types[inst];
- unsigned int allocator = allocators[inst];
v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
int ret;
int i;
@@ -758,6 +821,25 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_vbi_cap = dev->has_raw_vbi_cap | dev->has_sliced_vbi_cap;
}
+ /* do we create a meta capture device */
+ dev->has_meta_cap = node_type & 0x20000;
+
+ /* sanity checks */
+ if ((in_type_counter[WEBCAM] || in_type_counter[HDMI]) &&
+ !dev->has_vid_cap && !dev->has_meta_cap) {
+ v4l2_warn(&dev->v4l2_dev,
+ "Webcam or HDMI input without video or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+ if ((in_type_counter[TV] || in_type_counter[SVID]) &&
+ !dev->has_vid_cap && !dev->has_vbi_cap && !dev->has_meta_cap) {
+ v4l2_warn(&dev->v4l2_dev,
+ "TV or S-Video input without video, VBI or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+
/* do we create a video output device? */
dev->has_vid_out = node_type & 0x0100;
@@ -768,6 +850,24 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_vbi_out = dev->has_raw_vbi_out | dev->has_sliced_vbi_out;
}
+ /* do we create a metadata output device */
+ dev->has_meta_out = node_type & 0x40000;
+
+ /* sanity checks */
+ if (out_type_counter[SVID] &&
+ !dev->has_vid_out && !dev->has_vbi_out && !dev->has_meta_out) {
+ v4l2_warn(&dev->v4l2_dev,
+ "S-Video output without video, VBI or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+ if (out_type_counter[HDMI] && !dev->has_vid_out && !dev->has_meta_out) {
+ v4l2_warn(&dev->v4l2_dev,
+ "HDMI output without video or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+
/* do we create a radio receiver device? */
dev->has_radio_rx = node_type & 0x0010;
@@ -777,6 +877,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* do we create a software defined radio capture device? */
dev->has_sdr_cap = node_type & 0x0020;
+ /* do we have a TV tuner? */
+ dev->has_tv_tuner = in_type_counter[TV];
+
/* do we have a tuner? */
has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) ||
dev->has_radio_rx || dev->has_sdr_cap;
@@ -828,7 +931,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vid_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
if (dev->has_audio_inputs)
dev->vid_cap_caps |= V4L2_CAP_AUDIO;
- if (in_type_counter[TV])
+ if (dev->has_tv_tuner)
dev->vid_cap_caps |= V4L2_CAP_TUNER;
}
if (dev->has_vid_out) {
@@ -849,7 +952,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vbi_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
if (dev->has_audio_inputs)
dev->vbi_cap_caps |= V4L2_CAP_AUDIO;
- if (in_type_counter[TV])
+ if (dev->has_tv_tuner)
dev->vbi_cap_caps |= V4L2_CAP_TUNER;
}
if (dev->has_vbi_out) {
@@ -875,6 +978,23 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR |
V4L2_CAP_READWRITE;
+ /* set up the capabilities of meta capture device */
+ if (dev->has_meta_cap) {
+ dev->meta_cap_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_inputs)
+ dev->meta_cap_caps |= V4L2_CAP_AUDIO;
+ if (dev->has_tv_tuner)
+ dev->meta_cap_caps |= V4L2_CAP_TUNER;
+ }
+ /* set up the capabilities of meta output device */
+ if (dev->has_meta_out) {
+ dev->meta_out_caps = V4L2_CAP_META_OUTPUT |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_outputs)
+ dev->meta_out_caps |= V4L2_CAP_AUDIO;
+ }
+
ret = -ENOMEM;
/* initialize the test pattern generator */
tpg_init(&dev->tpg, 640, 360);
@@ -934,6 +1054,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_AUDIO);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_AUDIO);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_ENUMAUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_ENUMAUDIO);
}
if (!dev->has_audio_outputs) {
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_AUDOUT);
@@ -942,6 +1065,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_AUDOUT);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_AUDOUT);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_ENUMAUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_AUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_AUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_ENUMAUDOUT);
}
if (!in_type_counter[TV] && !in_type_counter[SVID]) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_STD);
@@ -959,12 +1085,16 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_FREQUENCY);
}
if (!has_tuner) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_TUNER);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_TUNER);
}
if (in_type_counter[HDMI] == 0) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_EDID);
@@ -990,12 +1120,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->sdr_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMESIZES);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMEINTERVALS);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_FREQUENCY);
/* configure internal data */
dev->fmt_cap = &vivid_formats[0];
@@ -1078,6 +1211,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
INIT_LIST_HEAD(&dev->vbi_cap_active);
INIT_LIST_HEAD(&dev->vbi_out_active);
INIT_LIST_HEAD(&dev->sdr_cap_active);
+ INIT_LIST_HEAD(&dev->meta_cap_active);
+ INIT_LIST_HEAD(&dev->meta_out_active);
INIT_LIST_HEAD(&dev->cec_work_list);
spin_lock_init(&dev->cec_slock);
@@ -1092,126 +1227,69 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
goto unreg_dev;
}
- if (allocator == 1)
+ if (allocators[inst] == 1)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- else if (allocator >= ARRAY_SIZE(vivid_mem_ops))
- allocator = 0;
/* start creating the vb2 queues */
if (dev->has_vid_cap) {
- snprintf(dev->vid_cap_dev.name, sizeof(dev->vid_cap_dev.name),
- "vivid-%03d-vid-cap", inst);
/* initialize vid_cap queue */
- q = &dev->vb_vid_cap_q;
- q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
- V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vid_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vid_cap_q,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, 2,
+ &vivid_vid_cap_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vid_out) {
- snprintf(dev->vid_out_dev.name, sizeof(dev->vid_out_dev.name),
- "vivid-%03d-vid-out", inst);
/* initialize vid_out queue */
- q = &dev->vb_vid_out_q;
- q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
- V4L2_BUF_TYPE_VIDEO_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vid_out_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vid_out_q,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT, 2,
+ &vivid_vid_out_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vbi_cap) {
/* initialize vbi_cap queue */
- q = &dev->vb_vbi_cap_q;
- q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE :
- V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vbi_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vbi_cap_q,
+ V4L2_BUF_TYPE_VBI_CAPTURE, 2,
+ &vivid_vbi_cap_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vbi_out) {
/* initialize vbi_out queue */
- q = &dev->vb_vbi_out_q;
- q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT :
- V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vbi_out_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vbi_out_q,
+ V4L2_BUF_TYPE_VBI_OUTPUT, 2,
+ &vivid_vbi_out_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_sdr_cap) {
/* initialize sdr_cap queue */
- q = &dev->vb_sdr_cap_q;
- q->type = V4L2_BUF_TYPE_SDR_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_sdr_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 8;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_sdr_cap_q,
+ V4L2_BUF_TYPE_SDR_CAPTURE, 8,
+ &vivid_sdr_cap_qops);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_meta_cap) {
+ /* initialize meta_cap queue */
+ ret = vivid_create_queue(dev, &dev->vb_meta_cap_q,
+ V4L2_BUF_TYPE_META_CAPTURE, 2,
+ &vivid_meta_cap_qops);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_meta_out) {
+ /* initialize meta_out queue */
+ ret = vivid_create_queue(dev, &dev->vb_meta_out_q,
+ V4L2_BUF_TYPE_META_OUTPUT, 1,
+ &vivid_meta_out_qops);
if (ret)
goto unreg_dev;
}
@@ -1222,7 +1300,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (ret)
goto unreg_dev;
v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
- dev->fb_info.node);
+ dev->fb_info.node);
}
#ifdef CONFIG_VIDEO_VIVID_CEC
@@ -1265,10 +1343,14 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out);
/* finally start creating the device nodes */
if (dev->has_vid_cap) {
vfd = &dev->vid_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-cap", inst);
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
vfd->device_caps = dev->vid_cap_caps;
@@ -1314,6 +1396,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_vid_out) {
vfd = &dev->vid_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-out", inst);
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
@@ -1492,6 +1576,65 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
video_device_node_name(vfd));
}
+ if (dev->has_meta_cap) {
+ vfd = &dev->meta_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-meta-cap", inst);
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->meta_cap_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_meta_cap_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_cap;
+ video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->meta_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1,
+ &dev->meta_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ meta_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev,
+ "V4L2 metadata capture device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_meta_out) {
+ vfd = &dev->meta_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-meta-out", inst);
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->meta_out_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_meta_out_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_out;
+ video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->meta_out_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vfd->entity, 1,
+ &dev->meta_out_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ meta_out_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev,
+ "V4L2 metadata output device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
#ifdef CONFIG_MEDIA_CONTROLLER
/* Register the media device */
ret = media_device_register(&dev->mdev);
@@ -1508,6 +1651,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
return 0;
unreg_dev:
+ video_unregister_device(&dev->meta_out_dev);
+ video_unregister_device(&dev->meta_cap_dev);
video_unregister_device(&dev->radio_tx_dev);
video_unregister_device(&dev->radio_rx_dev);
video_unregister_device(&dev->sdr_cap_dev);
@@ -1580,7 +1725,6 @@ static int vivid_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
- media_device_cleanup(&dev->mdev);
#endif
if (dev->has_vid_cap) {
@@ -1624,6 +1768,16 @@ static int vivid_remove(struct platform_device *pdev)
unregister_framebuffer(&dev->fb_info);
vivid_fb_release_buffers(dev);
}
+ if (dev->has_meta_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->meta_cap_dev));
+ video_unregister_device(&dev->meta_cap_dev);
+ }
+ if (dev->has_meta_out) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->meta_out_dev));
+ video_unregister_device(&dev->meta_out_dev);
+ }
cec_unregister_adapter(dev->cec_rx_adap);
for (j = 0; j < MAX_OUTPUTS; j++)
cec_unregister_adapter(dev->cec_tx_adap[j]);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 7ebb14673c75..59192b67231c 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -131,6 +131,8 @@ struct vivid_dev {
struct media_pad vbi_cap_pad;
struct media_pad vbi_out_pad;
struct media_pad sdr_cap_pad;
+ struct media_pad meta_cap_pad;
+ struct media_pad meta_out_pad;
#endif
struct v4l2_ctrl_handler ctrl_hdl_user_gen;
struct v4l2_ctrl_handler ctrl_hdl_user_vid;
@@ -153,6 +155,11 @@ struct vivid_dev {
struct v4l2_ctrl_handler ctrl_hdl_radio_tx;
struct video_device sdr_cap_dev;
struct v4l2_ctrl_handler ctrl_hdl_sdr_cap;
+ struct video_device meta_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_meta_cap;
+ struct video_device meta_out_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_meta_out;
+
spinlock_t slock;
struct mutex mutex;
@@ -164,6 +171,8 @@ struct vivid_dev {
u32 sdr_cap_caps;
u32 radio_rx_caps;
u32 radio_tx_caps;
+ u32 meta_cap_caps;
+ u32 meta_out_caps;
/* supported features */
bool multiplanar;
@@ -189,6 +198,9 @@ struct vivid_dev {
bool has_radio_tx;
bool has_sdr_cap;
bool has_fb;
+ bool has_meta_cap;
+ bool has_meta_out;
+ bool has_tv_tuner;
bool can_loop_video;
@@ -390,6 +402,8 @@ struct vivid_dev {
struct list_head vid_cap_active;
struct vb2_queue vb_vbi_cap_q;
struct list_head vbi_cap_active;
+ struct vb2_queue vb_meta_cap_q;
+ struct list_head meta_cap_active;
/* thread for generating video capture stream */
struct task_struct *kthread_vid_cap;
@@ -407,6 +421,9 @@ struct vivid_dev {
u32 vbi_cap_seq_count;
bool vbi_cap_streaming;
bool stream_sliced_vbi_cap;
+ u32 meta_cap_seq_start;
+ u32 meta_cap_seq_count;
+ bool meta_cap_streaming;
/* video output */
const struct vivid_fmt *fmt_out;
@@ -421,6 +438,8 @@ struct vivid_dev {
struct list_head vid_out_active;
struct vb2_queue vb_vbi_out_q;
struct list_head vbi_out_active;
+ struct vb2_queue vb_meta_out_q;
+ struct list_head meta_out_active;
/* video loop precalculated rectangles */
@@ -461,6 +480,9 @@ struct vivid_dev {
u32 vbi_out_seq_count;
bool vbi_out_streaming;
bool stream_sliced_vbi_out;
+ u32 meta_out_seq_start;
+ u32 meta_out_seq_count;
+ bool meta_out_streaming;
/* SDR capture */
struct vb2_queue vb_sdr_cap_q;
@@ -527,6 +549,9 @@ struct vivid_dev {
/* CEC OSD String */
char osd[14];
unsigned long osd_jiffies;
+
+ bool meta_pts;
+ bool meta_scr;
};
static inline bool vivid_is_webcam(const struct vivid_dev *dev)
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index cb19a9a73092..68e8124c7973 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -32,6 +32,7 @@
#define VIVID_CID_U32_ARRAY (VIVID_CID_CUSTOM_BASE + 8)
#define VIVID_CID_U16_MATRIX (VIVID_CID_CUSTOM_BASE + 9)
#define VIVID_CID_U8_4D_ARRAY (VIVID_CID_CUSTOM_BASE + 10)
+#define VIVID_CID_AREA (VIVID_CID_CUSTOM_BASE + 11)
#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -94,6 +95,9 @@
#define VIVID_CID_SDR_CAP_FM_DEVIATION (VIVID_CID_VIVID_BASE + 110)
+#define VIVID_CID_META_CAP_GENERATE_PTS (VIVID_CID_VIVID_BASE + 111)
+#define VIVID_CID_META_CAP_GENERATE_SCR (VIVID_CID_VIVID_BASE + 112)
+
/* General User Controls */
static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -110,6 +114,7 @@ static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
clear_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
clear_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
clear_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
break;
case VIVID_CID_BUTTON:
dev->button_pressed = 30;
@@ -262,6 +267,18 @@ static const struct v4l2_ctrl_config vivid_ctrl_disconnect = {
.type = V4L2_CTRL_TYPE_BUTTON,
};
+static const struct v4l2_area area = {
+ .width = 1000,
+ .height = 2000,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_area = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_AREA,
+ .name = "Area",
+ .type = V4L2_CTRL_TYPE_AREA,
+ .p_def.p_const = &area,
+};
/* Framebuffer Controls */
@@ -1421,6 +1438,47 @@ static const struct v4l2_ctrl_config vivid_ctrl_sdr_cap_fm_deviation = {
.step = 1,
};
+/* Metadata Capture Control */
+
+static int vivid_meta_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev,
+ ctrl_hdl_meta_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_META_CAP_GENERATE_PTS:
+ dev->meta_pts = ctrl->val;
+ break;
+ case VIVID_CID_META_CAP_GENERATE_SCR:
+ dev->meta_scr = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_meta_cap_ctrl_ops = {
+ .s_ctrl = vivid_meta_cap_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_meta_has_pts = {
+ .ops = &vivid_meta_cap_ctrl_ops,
+ .id = VIVID_CID_META_CAP_GENERATE_PTS,
+ .name = "Generate PTS",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_meta_has_src_clk = {
+ .ops = &vivid_meta_cap_ctrl_ops,
+ .id = VIVID_CID_META_CAP_GENERATE_SCR,
+ .name = "Generate SCR",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
static const struct v4l2_ctrl_config vivid_ctrl_class = {
.ops = &vivid_user_gen_ctrl_ops,
@@ -1448,6 +1506,9 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
struct v4l2_ctrl_handler *hdl_radio_rx = &dev->ctrl_hdl_radio_rx;
struct v4l2_ctrl_handler *hdl_radio_tx = &dev->ctrl_hdl_radio_tx;
struct v4l2_ctrl_handler *hdl_sdr_cap = &dev->ctrl_hdl_sdr_cap;
+ struct v4l2_ctrl_handler *hdl_meta_cap = &dev->ctrl_hdl_meta_cap;
+ struct v4l2_ctrl_handler *hdl_meta_out = &dev->ctrl_hdl_meta_out;
+
struct v4l2_ctrl_config vivid_ctrl_dv_timings = {
.ops = &vivid_vid_cap_ctrl_ops,
.id = VIVID_CID_DV_TIMINGS,
@@ -1486,6 +1547,10 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_radio_tx, &vivid_ctrl_class, NULL);
v4l2_ctrl_handler_init(hdl_sdr_cap, 19);
v4l2_ctrl_new_custom(hdl_sdr_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_meta_cap, 2);
+ v4l2_ctrl_new_custom(hdl_meta_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_meta_out, 2);
+ v4l2_ctrl_new_custom(hdl_meta_out, &vivid_ctrl_class, NULL);
/* User Controls */
dev->volume = v4l2_ctrl_new_std(hdl_user_aud, NULL,
@@ -1522,6 +1587,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL);
dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL);
dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_area, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL);
@@ -1743,6 +1809,13 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_sdr_cap,
&vivid_ctrl_sdr_cap_fm_deviation, NULL);
}
+ if (dev->has_meta_cap) {
+ v4l2_ctrl_new_custom(hdl_meta_cap,
+ &vivid_ctrl_meta_has_pts, NULL);
+ v4l2_ctrl_new_custom(hdl_meta_cap,
+ &vivid_ctrl_meta_has_src_clk, NULL);
+ }
+
if (hdl_user_gen->error)
return hdl_user_gen->error;
if (hdl_user_vid->error)
@@ -1817,6 +1890,20 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
return hdl_sdr_cap->error;
dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap;
}
+ if (dev->has_meta_cap) {
+ v4l2_ctrl_add_handler(hdl_meta_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_meta_cap, hdl_streaming, NULL, false);
+ if (hdl_meta_cap->error)
+ return hdl_meta_cap->error;
+ dev->meta_cap_dev.ctrl_handler = hdl_meta_cap;
+ }
+ if (dev->has_meta_out) {
+ v4l2_ctrl_add_handler(hdl_meta_out, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_meta_out, hdl_streaming, NULL, false);
+ if (hdl_meta_out->error)
+ return hdl_meta_out->error;
+ dev->meta_out_dev.ctrl_handler = hdl_meta_out;
+ }
return 0;
}
@@ -1836,4 +1923,6 @@ void vivid_free_controls(struct vivid_dev *dev)
v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdtv_cap);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_loop_cap);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_fb);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_out);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 003319d7816d..01a9d671b947 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -39,6 +39,7 @@
#include "vivid-osd.h"
#include "vivid-ctrls.h"
#include "vivid-kthread-cap.h"
+#include "vivid-meta-cap.h"
static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev)
{
@@ -677,6 +678,7 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
{
struct vivid_buffer *vid_cap_buf = NULL;
struct vivid_buffer *vbi_cap_buf = NULL;
+ struct vivid_buffer *meta_cap_buf = NULL;
u64 f_time = 0;
dprintk(dev, 1, "Video Capture Thread Tick\n");
@@ -704,15 +706,19 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
list_del(&vbi_cap_buf->list);
}
}
+ if (!list_empty(&dev->meta_cap_active)) {
+ meta_cap_buf = list_entry(dev->meta_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&meta_cap_buf->list);
+ }
+
spin_unlock(&dev->slock);
- if (!vid_cap_buf && !vbi_cap_buf)
+ if (!vid_cap_buf && !vbi_cap_buf && !meta_cap_buf)
goto update_mv;
f_time = dev->cap_frame_period * dev->vid_cap_seq_count +
dev->cap_stream_start + dev->time_wrap_offset;
- if (!dev->tstamp_src_is_soe)
- f_time += dev->cap_frame_eof_offset;
if (vid_cap_buf) {
v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
@@ -735,6 +741,8 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
vid_cap_buf->vb.vb2_buf.index);
vid_cap_buf->vb.vb2_buf.timestamp = f_time;
+ if (!dev->tstamp_src_is_soe)
+ vid_cap_buf->vb.vb2_buf.timestamp += dev->cap_frame_eof_offset;
}
if (vbi_cap_buf) {
@@ -756,8 +764,22 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
/* If capturing a VBI, offset by 0.05 */
vbi_period = dev->cap_frame_period * 5;
do_div(vbi_period, 100);
- vbi_cap_buf->vb.vb2_buf.timestamp = f_time + vbi_period;
+ vbi_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset + vbi_period;
+ }
+
+ if (meta_cap_buf) {
+ v4l2_ctrl_request_setup(meta_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vivid_meta_cap_fillbuff(dev, meta_cap_buf, f_time);
+ v4l2_ctrl_request_complete(meta_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vb2_buffer_done(&meta_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "meta_cap %d done\n",
+ meta_cap_buf->vb.vb2_buf.index);
+ meta_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset;
}
+
dev->dqbuf_error = false;
update_mv:
@@ -796,7 +818,11 @@ static int vivid_thread_vid_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->cap_seq_resync) {
dev->jiffies_vid_cap = cur_jiffies;
@@ -835,6 +861,7 @@ static int vivid_thread_vid_cap(void *data)
dev->cap_seq_count = buffers_since_start + dev->cap_seq_offset;
dev->vid_cap_seq_count = dev->cap_seq_count - dev->vid_cap_seq_start;
dev->vbi_cap_seq_count = dev->cap_seq_count - dev->vbi_cap_seq_start;
+ dev->meta_cap_seq_count = dev->cap_seq_count - dev->meta_cap_seq_start;
vivid_thread_vid_cap_tick(dev, dropped_bufs);
@@ -883,8 +910,10 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
if (pstreaming == &dev->vid_cap_streaming)
dev->vid_cap_seq_start = seq_count;
- else
+ else if (pstreaming == &dev->vbi_cap_streaming)
dev->vbi_cap_seq_start = seq_count;
+ else
+ dev->meta_cap_seq_start = seq_count;
*pstreaming = true;
return 0;
}
@@ -894,6 +923,7 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
dev->vid_cap_seq_start = dev->seq_wrap * 128;
dev->vbi_cap_seq_start = dev->seq_wrap * 128;
+ dev->meta_cap_seq_start = dev->seq_wrap * 128;
dev->kthread_vid_cap = kthread_run(vivid_thread_vid_cap, dev,
"%s-vid-cap", dev->v4l2_dev.name);
@@ -951,13 +981,27 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
}
}
- if (dev->vid_cap_streaming || dev->vbi_cap_streaming)
+ if (pstreaming == &dev->meta_cap_streaming) {
+ while (!list_empty(&dev->meta_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->meta_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "meta_cap buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_cap_streaming || dev->vbi_cap_streaming ||
+ dev->meta_cap_streaming)
return;
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_cap);
dev->kthread_vid_cap = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index ce5bcda2348c..6780687978f9 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -38,11 +38,13 @@
#include "vivid-osd.h"
#include "vivid-ctrls.h"
#include "vivid-kthread-out.h"
+#include "vivid-meta-out.h"
static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
{
struct vivid_buffer *vid_out_buf = NULL;
struct vivid_buffer *vbi_out_buf = NULL;
+ struct vivid_buffer *meta_out_buf = NULL;
dprintk(dev, 1, "Video Output Thread Tick\n");
@@ -69,9 +71,14 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
struct vivid_buffer, list);
list_del(&vbi_out_buf->list);
}
+ if (!list_empty(&dev->meta_out_active)) {
+ meta_out_buf = list_entry(dev->meta_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&meta_out_buf->list);
+ }
spin_unlock(&dev->slock);
- if (!vid_out_buf && !vbi_out_buf)
+ if (!vid_out_buf && !vbi_out_buf && !meta_out_buf)
return;
if (vid_out_buf) {
@@ -111,6 +118,21 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
dprintk(dev, 2, "vbi_out buffer %d done\n",
vbi_out_buf->vb.vb2_buf.index);
}
+ if (meta_out_buf) {
+ v4l2_ctrl_request_setup(meta_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ v4l2_ctrl_request_complete(meta_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ vivid_meta_out_process(dev, meta_out_buf);
+ meta_out_buf->vb.sequence = dev->meta_out_seq_count;
+ meta_out_buf->vb.vb2_buf.timestamp =
+ ktime_get_ns() + dev->time_wrap_offset;
+ vb2_buffer_done(&meta_out_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "meta_out buffer %d done\n",
+ meta_out_buf->vb.vb2_buf.index);
+ }
+
dev->dqbuf_error = false;
}
@@ -136,6 +158,7 @@ static int vivid_thread_vid_out(void *data)
dev->out_seq_count = 0xffffff80U;
dev->jiffies_vid_out = jiffies;
dev->vid_out_seq_start = dev->vbi_out_seq_start = 0;
+ dev->meta_out_seq_start = 0;
dev->out_seq_resync = false;
for (;;) {
@@ -143,7 +166,11 @@ static int vivid_thread_vid_out(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->out_seq_resync) {
dev->jiffies_vid_out = cur_jiffies;
@@ -178,6 +205,7 @@ static int vivid_thread_vid_out(void *data)
dev->out_seq_count = buffers_since_start + dev->out_seq_offset;
dev->vid_out_seq_count = dev->out_seq_count - dev->vid_out_seq_start;
dev->vbi_out_seq_count = dev->out_seq_count - dev->vbi_out_seq_start;
+ dev->meta_out_seq_count = dev->out_seq_count - dev->meta_out_seq_start;
vivid_thread_vid_out_tick(dev);
mutex_unlock(&dev->mutex);
@@ -229,8 +257,10 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
if (pstreaming == &dev->vid_out_streaming)
dev->vid_out_seq_start = seq_count;
- else
+ else if (pstreaming == &dev->vbi_out_streaming)
dev->vbi_out_seq_start = seq_count;
+ else
+ dev->meta_out_seq_start = seq_count;
*pstreaming = true;
return 0;
}
@@ -239,6 +269,7 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
dev->jiffies_vid_out = jiffies;
dev->vid_out_seq_start = dev->seq_wrap * 128;
dev->vbi_out_seq_start = dev->seq_wrap * 128;
+ dev->meta_out_seq_start = dev->seq_wrap * 128;
dev->kthread_vid_out = kthread_run(vivid_thread_vid_out, dev,
"%s-vid-out", dev->v4l2_dev.name);
@@ -296,13 +327,27 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
}
}
- if (dev->vid_out_streaming || dev->vbi_out_streaming)
+ if (pstreaming == &dev->meta_out_streaming) {
+ while (!list_empty(&dev->meta_out_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->meta_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "meta_out buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_out_streaming || dev->vbi_out_streaming ||
+ dev->meta_out_streaming)
return;
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_out);
dev->kthread_vid_out = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.c b/drivers/media/platform/vivid/vivid-meta-cap.c
new file mode 100644
index 000000000000..780f96860a6d
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-cap.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-meta-cap.c - meta capture support functions.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/usb/video.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-cap.h"
+#include "vivid-meta-cap.h"
+
+static int meta_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int size = sizeof(struct vivid_uvc_meta_buf);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ sizes[0] = size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int meta_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = sizeof(struct vivid_uvc_meta_buf);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void meta_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->meta_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int meta_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->meta_cap_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_cap(dev,
+ &dev->meta_cap_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp,
+ &dev->meta_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void meta_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_cap(dev, &dev->meta_cap_streaming);
+}
+
+static void meta_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_meta_cap);
+}
+
+const struct vb2_ops vivid_meta_cap_qops = {
+ .queue_setup = meta_cap_queue_setup,
+ .buf_prepare = meta_cap_buf_prepare,
+ .buf_queue = meta_cap_buf_queue,
+ .start_streaming = meta_cap_start_streaming,
+ .stop_streaming = meta_cap_stop_streaming,
+ .buf_request_complete = meta_cap_buf_request_complete,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_enum_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_META_CAPTURE;
+ f->pixelformat = V4L2_META_FMT_UVC;
+ return 0;
+}
+
+int vidioc_g_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (!vivid_is_webcam(dev) || !dev->has_meta_cap)
+ return -EINVAL;
+
+ meta->dataformat = V4L2_META_FMT_UVC;
+ meta->buffersize = sizeof(struct vivid_uvc_meta_buf);
+ return 0;
+}
+
+void vivid_meta_cap_fillbuff(struct vivid_dev *dev,
+ struct vivid_buffer *buf, u64 soe)
+{
+ struct vivid_uvc_meta_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ int buf_off = 0;
+
+ buf->vb.sequence = dev->meta_cap_seq_count;
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ buf->vb.sequence /= 2;
+ memset(meta, 1, vb2_plane_size(&buf->vb.vb2_buf, 0));
+
+ meta->ns = ktime_get_ns();
+ meta->sof = buf->vb.sequence * 30;
+ meta->length = sizeof(*meta) - offsetof(struct vivid_uvc_meta_buf, length);
+ meta->flags = UVC_STREAM_EOH | UVC_STREAM_EOF;
+
+ if ((buf->vb.sequence % 2) == 0)
+ meta->flags |= UVC_STREAM_FID;
+
+ dprintk(dev, 2, "%s ns:%llu sof:%4d len:%u flags: 0x%02x",
+ __func__, meta->ns, meta->sof, meta->length, meta->flags);
+ if (dev->meta_pts) {
+ meta->flags |= UVC_STREAM_PTS;
+ meta->buf[0] = div_u64(soe, VIVID_META_CLOCK_UNIT);
+ buf_off = 4;
+ dprintk(dev, 2, " pts: %u\n", *(__u32 *)(meta->buf));
+ }
+
+ if (dev->meta_scr) {
+ meta->flags |= UVC_STREAM_SCR;
+ meta->buf[buf_off] = div_u64((soe + dev->cap_frame_eof_offset),
+ VIVID_META_CLOCK_UNIT);
+
+ meta->buf[buf_off + 4] = (buf->vb.sequence * 30) % 1000;
+ dprintk(dev, 2, " stc: %u, sof counter: %u\n",
+ *(__u32 *)(meta->buf + buf_off),
+ *(__u16 *)(meta->buf + buf_off + 4));
+ }
+ dprintk(dev, 2, "\n");
+}
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.h b/drivers/media/platform/vivid/vivid-meta-cap.h
new file mode 100644
index 000000000000..4670d00d1576
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-cap.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-meta-cap.h - meta capture support functions.
+ */
+#ifndef _VIVID_META_CAP_H_
+#define _VIVID_META_CAP_H_
+
+#define VIVID_META_CLOCK_UNIT 10 /* 100 MHz */
+
+struct vivid_uvc_meta_buf {
+ __u64 ns;
+ __u16 sof;
+ __u8 length;
+ __u8 flags;
+ __u8 buf[10]; /* PTS(4)+STC(4)+SOF(2) */
+} __packed;
+
+void vivid_meta_cap_fillbuff(struct vivid_dev *dev,
+ struct vivid_buffer *buf, u64 soe);
+
+int vidioc_enum_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+
+int vidioc_g_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_format *f);
+
+extern const struct vb2_ops vivid_meta_cap_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-meta-out.c b/drivers/media/platform/vivid/vivid-meta-out.c
new file mode 100644
index 000000000000..ff8a039aba72
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-out.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-meta-out.c - meta output support functions.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/usb/video.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-out.h"
+#include "vivid-meta-out.h"
+
+static int meta_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int size = sizeof(struct vivid_meta_out_buf);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ sizes[0] = size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int meta_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = sizeof(struct vivid_meta_out_buf);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void meta_out_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->meta_out_active);
+ spin_unlock(&dev->slock);
+}
+
+static int meta_out_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->meta_out_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_out(dev,
+ &dev->meta_out_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp,
+ &dev->meta_out_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void meta_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_out(dev, &dev->meta_out_streaming);
+}
+
+static void meta_out_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_meta_out);
+}
+
+const struct vb2_ops vivid_meta_out_qops = {
+ .queue_setup = meta_out_queue_setup,
+ .buf_prepare = meta_out_buf_prepare,
+ .buf_queue = meta_out_buf_queue,
+ .start_streaming = meta_out_start_streaming,
+ .stop_streaming = meta_out_stop_streaming,
+ .buf_request_complete = meta_out_buf_request_complete,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_enum_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_META_OUTPUT;
+ f->pixelformat = V4L2_META_FMT_VIVID;
+ return 0;
+}
+
+int vidioc_g_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (!vivid_is_webcam(dev) || !dev->has_meta_out)
+ return -EINVAL;
+
+ meta->dataformat = V4L2_META_FMT_VIVID;
+ meta->buffersize = sizeof(struct vivid_meta_out_buf);
+ return 0;
+}
+
+void vivid_meta_out_process(struct vivid_dev *dev,
+ struct vivid_buffer *buf)
+{
+ struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+
+ tpg_s_brightness(&dev->tpg, meta->brightness);
+ tpg_s_contrast(&dev->tpg, meta->contrast);
+ tpg_s_saturation(&dev->tpg, meta->saturation);
+ tpg_s_hue(&dev->tpg, meta->hue);
+ dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n",
+ __func__, meta->brightness, meta->contrast,
+ meta->saturation, meta->hue);
+}
diff --git a/drivers/media/platform/vivid/vivid-meta-out.h b/drivers/media/platform/vivid/vivid-meta-out.h
new file mode 100644
index 000000000000..0c639b7c2842
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-out.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-meta-out.h - meta output support functions.
+ */
+#ifndef _VIVID_META_OUT_H_
+#define _VIVID_META_OUT_H_
+
+struct vivid_meta_out_buf {
+ u16 brightness;
+ u16 contrast;
+ u16 saturation;
+ s16 hue;
+};
+
+void vivid_meta_out_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vidioc_enum_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_s_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+
+extern const struct vb2_ops vivid_meta_out_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 9acc709b0740..2b7522e16efc 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -141,7 +141,11 @@ static int vivid_thread_sdr_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->sdr_cap_seq_resync) {
dev->jiffies_sdr_cap = cur_jiffies;
@@ -303,10 +307,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
}
/* shutdown control thread */
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_sdr_cap);
dev->kthread_sdr_cap = NULL;
- mutex_lock(&dev->mutex);
}
static void sdr_cap_buf_request_complete(struct vb2_buffer *vb)
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 8cbaa0c998ed..e94beef008c8 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -223,9 +223,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_out_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_cap)
- return 0;
-
dev->vid_cap_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
for (i = 0; i < VIDEO_MAX_FRAME; i++)
@@ -1359,7 +1356,9 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
if (i == dev->input)
return 0;
- if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
+ if (vb2_is_busy(&dev->vb_vid_cap_q) ||
+ vb2_is_busy(&dev->vb_vbi_cap_q) ||
+ vb2_is_busy(&dev->vb_meta_cap_q))
return -EBUSY;
dev->input = i;
@@ -1369,6 +1368,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
}
dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
+ dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
vivid_update_format_cap(dev, false);
if (dev->colorspace) {
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 148b663a6075..ee3446e3217c 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -161,9 +161,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_cap_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_out)
- return 0;
-
dev->vid_out_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
if (dev->start_streaming_error) {
@@ -1082,7 +1079,9 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
if (o == dev->output)
return 0;
- if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q))
+ if (vb2_is_busy(&dev->vb_vid_out_q) ||
+ vb2_is_busy(&dev->vb_vbi_out_q) ||
+ vb2_is_busy(&dev->vb_meta_out_q))
return -EBUSY;
dev->output = o;
@@ -1093,6 +1092,7 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
dev->vid_out_dev.tvnorms = 0;
dev->vbi_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
+ dev->meta_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
vivid_update_format_out(dev);
v4l2_ctrl_activate(dev->ctrl_display_present, vivid_is_hdmi_out(dev));
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index 5aec4d17eb21..2378bdae57ae 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video DMA
*
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index f71e2b650453..a528a32ea1dc 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video IP Core
*
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
index e65fce9538f9..cc52c1854dbd 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.h
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video IP Composite Device
*
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
index 90cf44245283..855845911ffc 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.h
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video Timing Controller
*
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 104ac41c6f96..112376873167 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1148,8 +1148,7 @@ static int wl1273_fm_fops_release(struct file *file)
if (radio->rds_users > 0) {
radio->rds_users--;
if (radio->rds_users == 0) {
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
+ mutex_lock(&core->lock);
radio->irq_flags &= ~WL1273_RDS_EVENT;
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 7541698a0be1..f491420d7b53 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -482,6 +482,8 @@ static int si470x_i2c_remove(struct i2c_client *client)
if (radio->gpio_reset)
gpiod_set_value(radio->gpio_reset, 0);
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
return 0;
}
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 37a850421fbb..ed95244da894 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -83,6 +83,7 @@ struct imon_usb_dev_descr {
__u16 flags;
#define IMON_NO_FLAGS 0
#define IMON_NEED_20MS_PKT_DELAY 1
+#define IMON_SUPPRESS_REPEATED_KEYS 2
struct imon_panel_key_table key_table[];
};
@@ -149,8 +150,9 @@ struct imon_context {
struct timer_list ttimer; /* touch screen timer */
int touch_x; /* x coordinate on touchscreen */
int touch_y; /* y coordinate on touchscreen */
- struct imon_usb_dev_descr *dev_descr; /* device description with key
- table for front panels */
+ const struct imon_usb_dev_descr *dev_descr;
+ /* device description with key */
+ /* table for front panels */
};
#define TOUCH_TIMEOUT (HZ/30)
@@ -315,6 +317,32 @@ static const struct imon_usb_dev_descr imon_DH102 = {
}
};
+/* imon ultrabay front panel key table */
+static const struct imon_usb_dev_descr ultrabay_table = {
+ .flags = IMON_SUPPRESS_REPEATED_KEYS,
+ .key_table = {
+ { 0x0000000f0000ffeell, KEY_MEDIA }, /* Go */
+ { 0x000000000100ffeell, KEY_UP },
+ { 0x000000000001ffeell, KEY_DOWN },
+ { 0x000000160000ffeell, KEY_ENTER },
+ { 0x0000001f0000ffeell, KEY_AUDIO }, /* Music */
+ { 0x000000200000ffeell, KEY_VIDEO }, /* Movie */
+ { 0x000000210000ffeell, KEY_CAMERA }, /* Photo */
+ { 0x000000270000ffeell, KEY_DVD }, /* DVD */
+ { 0x000000230000ffeell, KEY_TV }, /* TV */
+ { 0x000000050000ffeell, KEY_PREVIOUS }, /* Previous */
+ { 0x000000070000ffeell, KEY_REWIND },
+ { 0x000000040000ffeell, KEY_STOP },
+ { 0x000000020000ffeell, KEY_PLAYPAUSE },
+ { 0x000000080000ffeell, KEY_FASTFORWARD },
+ { 0x000000060000ffeell, KEY_NEXT }, /* Next */
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000010000ffeell, KEY_MUTE },
+ { 0, KEY_RESERVED },
+ }
+};
+
/*
* USB Device ID for iMON USB Control Boards
*
@@ -1264,9 +1292,11 @@ static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode)
static u32 imon_panel_key_lookup(struct imon_context *ictx, u64 code)
{
- int i;
+ const struct imon_panel_key_table *key_table;
u32 keycode = KEY_RESERVED;
- struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
+ int i;
+
+ key_table = ictx->dev_descr->key_table;
for (i = 0; key_table[i].hw_code != 0; i++) {
if (key_table[i].hw_code == (code | 0xffee)) {
@@ -1550,7 +1580,6 @@ static void imon_incoming_packet(struct imon_context *ictx,
u32 kc;
u64 scancode;
int press_type = 0;
- long msec;
ktime_t t;
static ktime_t prev_time;
u8 ktype;
@@ -1598,8 +1627,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */
- if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
- buf[7] == 0x86) {
+ if (ictx->touch && len == 8 && buf[7] == 0x86) {
imon_touch_event(ictx, buf);
return;
@@ -1653,14 +1681,16 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_lock_irqsave(&ictx->kc_lock, flags);
t = ktime_get();
- /* KEY_MUTE repeats from knob need to be suppressed */
- if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
- msec = ktime_ms_delta(t, prev_time);
- if (msec < ictx->idev->rep[REP_DELAY]) {
+ /* KEY repeats from knob and panel that need to be suppressed */
+ if (ictx->kc == KEY_MUTE ||
+ ictx->dev_descr->flags & IMON_SUPPRESS_REPEATED_KEYS) {
+ if (ictx->kc == ictx->last_keycode &&
+ ktime_ms_delta(t, prev_time) < ictx->idev->rep[REP_DELAY]) {
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
}
+
prev_time = t;
kc = ictx->kc;
@@ -1848,6 +1878,14 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
dev_info(ictx->dev, "0xffdc iMON Inside, iMON IR");
ictx->display_supported = false;
break;
+ /* Soundgraph iMON UltraBay */
+ case 0x98:
+ dev_info(ictx->dev, "0xffdc iMON UltraBay, LCD + IR");
+ detected_display_type = IMON_DISPLAY_TYPE_LCD;
+ allowed_protos = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE;
+ ictx->dev_descr = &ultrabay_table;
+ break;
+
default:
dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
@@ -1979,10 +2017,12 @@ out:
static struct input_dev *imon_init_idev(struct imon_context *ictx)
{
- struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
+ const struct imon_panel_key_table *key_table;
struct input_dev *idev;
int ret, i;
+ key_table = ictx->dev_descr->key_table;
+
idev = input_allocate_device();
if (!idev)
goto out;
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index d4aedcf76418..aae0a3cc9479 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -57,32 +57,18 @@ static void imon_ir_data(struct imon *imon)
* fls will tell us the highest bit set plus 1 (or 0 if no
* bits are set).
*/
+ rawir.pulse = !rawir.pulse;
bit = fls64(data & (BIT_ULL(offset) - 1));
if (bit < offset) {
- dev_dbg(imon->dev, "pulse: %d bits", offset - bit);
- rawir.pulse = true;
+ dev_dbg(imon->dev, "%s: %d bits",
+ rawir.pulse ? "pulse" : "space", offset - bit);
rawir.duration = (offset - bit) * BIT_DURATION;
ir_raw_event_store_with_filter(imon->rcdev, &rawir);
- if (bit == 0)
- break;
-
offset = bit;
}
- /*
- * Find highest clear bit which is less than offset.
- *
- * Just invert the data and use same trick as above.
- */
- bit = fls64(~data & (BIT_ULL(offset) - 1));
- dev_dbg(imon->dev, "space: %d bits", offset - bit);
-
- rawir.pulse = false;
- rawir.duration = (offset - bit) * BIT_DURATION;
- ir_raw_event_store_with_filter(imon->rcdev, &rawir);
-
- offset = bit;
+ data = ~data;
} while (offset > 0);
if (packet_no == 0x0a && !imon->rcdev->idle) {
diff --git a/drivers/media/rc/ir-rcmm-decoder.c b/drivers/media/rc/ir-rcmm-decoder.c
index 64fb65a9a19f..028df5cb1828 100644
--- a/drivers/media/rc/ir-rcmm-decoder.c
+++ b/drivers/media/rc/ir-rcmm-decoder.c
@@ -79,7 +79,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (!ev.pulse)
break;
- if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT / 2))
+ if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT))
break;
data->state = STATE_LOW;
@@ -91,7 +91,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (ev.pulse)
break;
- if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT / 2))
+ if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT))
break;
data->state = STATE_BUMP;
@@ -164,6 +164,8 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
break;
}
+ dev_dbg(&dev->dev, "RC-MM decode failed at count %d state %d (%uus %s)\n",
+ data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 3ab6cec0dc3b..07667c04c1d2 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -382,7 +382,7 @@ static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n)
ite_dbg("%s called", __func__);
/* clear the array just in case */
- memset(last_sent, 0, ARRAY_SIZE(last_sent));
+ memset(last_sent, 0, sizeof(last_sent));
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index a56fc634d2d6..63261ef6380a 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-avermedia-rm-ks.o \
rc-avertv-303.o \
rc-azurewave-ad-tu700.o \
+ rc-beelink-gs1.o \
rc-behold.o \
rc-behold-columbus.o \
rc-budget-ci-old.o \
@@ -114,6 +115,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-tt-1500.o \
rc-twinhan-dtv-cab-ci.o \
rc-twinhan1027.o \
+ rc-vega-s9x.o \
rc-videomate-m1f.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
diff --git a/drivers/media/rc/keymaps/rc-beelink-gs1.c b/drivers/media/rc/keymaps/rc-beelink-gs1.c
new file mode 100644
index 000000000000..cedbd5d20bc7
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-beelink-gs1.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2019 Clément Péron
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/*
+ * Keymap for the Beelink GS1 remote control
+ */
+
+static struct rc_map_table beelink_gs1_table[] = {
+ /*
+ * TV Keys (Power, Learn and Volume)
+ * { 0x40400d, KEY_TV },
+ * { 0x80f1, KEY_TV },
+ * { 0x80f3, KEY_TV },
+ * { 0x80f4, KEY_TV },
+ */
+
+ { 0x8051, KEY_POWER },
+ { 0x804d, KEY_MUTE },
+ { 0x8040, KEY_CONFIG },
+
+ { 0x8026, KEY_UP },
+ { 0x8028, KEY_DOWN },
+ { 0x8025, KEY_LEFT },
+ { 0x8027, KEY_RIGHT },
+ { 0x800d, KEY_OK },
+
+ { 0x8053, KEY_HOME },
+ { 0x80bc, KEY_MEDIA },
+ { 0x801b, KEY_BACK },
+ { 0x8049, KEY_MENU },
+
+ { 0x804e, KEY_VOLUMEUP },
+ { 0x8056, KEY_VOLUMEDOWN },
+
+ { 0x8054, KEY_SUBTITLE }, /* Web */
+ { 0x8052, KEY_EPG }, /* Media */
+
+ { 0x8041, KEY_CHANNELUP },
+ { 0x8042, KEY_CHANNELDOWN },
+
+ { 0x8031, KEY_1 },
+ { 0x8032, KEY_2 },
+ { 0x8033, KEY_3 },
+
+ { 0x8034, KEY_4 },
+ { 0x8035, KEY_5 },
+ { 0x8036, KEY_6 },
+
+ { 0x8037, KEY_7 },
+ { 0x8038, KEY_8 },
+ { 0x8039, KEY_9 },
+
+ { 0x8044, KEY_DELETE },
+ { 0x8030, KEY_0 },
+ { 0x8058, KEY_MODE }, /* # Input Method */
+};
+
+static struct rc_map_list beelink_gs1_map = {
+ .map = {
+ .scan = beelink_gs1_table,
+ .size = ARRAY_SIZE(beelink_gs1_table),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_BEELINK_GS1,
+ }
+};
+
+static int __init init_rc_map_beelink_gs1(void)
+{
+ return rc_map_register(&beelink_gs1_map);
+}
+
+static void __exit exit_rc_map_beelink_gs1(void)
+{
+ rc_map_unregister(&beelink_gs1_map);
+}
+
+module_init(init_rc_map_beelink_gs1)
+module_exit(exit_rc_map_beelink_gs1)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Clément Péron <peron.clem@gmail.com>");
diff --git a/drivers/media/rc/keymaps/rc-vega-s9x.c b/drivers/media/rc/keymaps/rc-vega-s9x.c
new file mode 100644
index 000000000000..bf210c4dc535
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-vega-s9x.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2019 Christian Hewitt <christianshewitt@gmail.com>
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+//
+// Keytable for the Tronsmart Vega S9x remote control
+//
+
+static struct rc_map_table vega_s9x[] = {
+ { 0x18, KEY_POWER },
+ { 0x17, KEY_MUTE }, // mouse
+
+ { 0x46, KEY_UP },
+ { 0x47, KEY_LEFT },
+ { 0x55, KEY_OK },
+ { 0x15, KEY_RIGHT },
+ { 0x16, KEY_DOWN },
+
+ { 0x06, KEY_HOME },
+ { 0x42, KEY_PLAYPAUSE},
+ { 0x40, KEY_BACK },
+
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x04, KEY_MENU },
+ { 0x10, KEY_VOLUMEUP },
+};
+
+static struct rc_map_list vega_s9x_map = {
+ .map = {
+ .scan = vega_s9x,
+ .size = ARRAY_SIZE(vega_s9x),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_VEGA_S9X,
+ }
+};
+
+static int __init init_rc_map_vega_s9x(void)
+{
+ return rc_map_register(&vega_s9x_map);
+}
+
+static void __exit exit_rc_map_vega_s9x(void)
+{
+ rc_map_unregister(&vega_s9x_map);
+}
+
+module_init(init_rc_map_vega_s9x)
+module_exit(exit_rc_map_vega_s9x)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index f078f8a3aec8..9a8c1cf54ac4 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -720,9 +720,7 @@ static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.write = ir_lirc_transmit_ir,
.unlocked_ioctl = ir_lirc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ir_lirc_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.read = ir_lirc_read,
.poll = ir_lirc_poll,
.open = ir_lirc_open,
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 3fc9829a9233..f9616158bcf4 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -564,7 +564,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
datasize = 4;
break;
case MCE_CMD_G_REVISION:
- datasize = 2;
+ datasize = 4;
break;
case MCE_RSP_EQWAKESUPPORT:
case MCE_RSP_GETWAKESOURCE:
@@ -600,14 +600,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
char *inout;
u8 cmd, subcmd, *data;
struct device *dev = ir->dev;
- int start, skip = 0;
u32 carrier, period;
- /* skip meaningless 0xb1 0x60 header bytes on orig receiver */
- if (ir->flags.microsoft_gen1 && !out && !offset)
- skip = 2;
-
- if (len <= skip)
+ if (offset < 0 || offset >= buf_len)
return;
dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
@@ -616,11 +611,32 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
inout = out ? "Request" : "Got";
- start = offset + skip;
- cmd = buf[start] & 0xff;
- subcmd = buf[start + 1] & 0xff;
- data = buf + start + 2;
+ cmd = buf[offset];
+ subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0;
+ data = &buf[offset] + 2;
+
+ /* Trace meaningless 0xb1 0x60 header bytes on original receiver */
+ if (ir->flags.microsoft_gen1 && !out && !offset) {
+ dev_dbg(dev, "MCE gen 1 header");
+ return;
+ }
+
+ /* Trace IR data header or trailer */
+ if (cmd != MCE_CMD_PORT_IR &&
+ (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) {
+ if (cmd == MCE_IRDATA_TRAILER)
+ dev_dbg(dev, "End of raw IR data");
+ else
+ dev_dbg(dev, "Raw IR data, %d pulse/space samples",
+ cmd & MCE_PACKET_LENGTH_MASK);
+ return;
+ }
+
+ /* Unexpected end of buffer? */
+ if (offset + len > buf_len)
+ return;
+ /* Decode MCE command/response */
switch (cmd) {
case MCE_CMD_NULL:
if (subcmd == MCE_CMD_NULL)
@@ -644,7 +660,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
dev_dbg(dev, "Get hw/sw rev?");
else
dev_dbg(dev, "hw/sw rev %*ph",
- 4, &buf[start + 2]);
+ 4, &buf[offset + 2]);
break;
case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested");
@@ -746,13 +762,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
default:
break;
}
-
- if (cmd == MCE_IRDATA_TRAILER)
- dev_dbg(dev, "End of raw IR data");
- else if ((cmd != MCE_CMD_PORT_IR) &&
- ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
- dev_dbg(dev, "Raw IR data, %d pulse/space samples",
- cmd & MCE_PACKET_LENGTH_MASK);
#endif
}
@@ -1136,32 +1145,62 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
}
/*
+ * Handle PORT_SYS/IR command response received from the MCE device.
+ *
+ * Assumes single response with all its data (not truncated)
+ * in buf_in[]. The response itself determines its total length
+ * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[].
+ *
* We don't do anything but print debug spew for many of the command bits
* we receive from the hardware, but some of them are useful information
* we want to store so that we can use them.
*/
-static void mceusb_handle_command(struct mceusb_dev *ir, int index)
+static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
{
+ u8 cmd = buf_in[0];
+ u8 subcmd = buf_in[1];
+ u8 *hi = &buf_in[2]; /* read only when required */
+ u8 *lo = &buf_in[3]; /* read only when required */
struct ir_raw_event rawir = {};
- u8 hi = ir->buf_in[index + 1] & 0xff;
- u8 lo = ir->buf_in[index + 2] & 0xff;
u32 carrier_cycles;
u32 cycles_fix;
- switch (ir->buf_in[index]) {
- /* the one and only 5-byte return value command */
- case MCE_RSP_GETPORTSTATUS:
- if ((ir->buf_in[index + 4] & 0xff) == 0x00)
- ir->txports_cabled |= 1 << hi;
- break;
+ if (cmd == MCE_CMD_PORT_SYS) {
+ switch (subcmd) {
+ /* the one and only 5-byte return value command */
+ case MCE_RSP_GETPORTSTATUS:
+ if (buf_in[5] == 0)
+ ir->txports_cabled |= 1 << *hi;
+ break;
+
+ /* 1-byte return value commands */
+ case MCE_RSP_EQEMVER:
+ ir->emver = *hi;
+ break;
+
+ /* No return value commands */
+ case MCE_RSP_CMD_ILLEGAL:
+ ir->need_reset = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return;
+ }
+ if (cmd != MCE_CMD_PORT_IR)
+ return;
+
+ switch (subcmd) {
/* 2-byte return value commands */
case MCE_RSP_EQIRTIMEOUT:
- ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
+ ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT);
break;
case MCE_RSP_EQIRNUMPORTS:
- ir->num_txports = hi;
- ir->num_rxports = lo;
+ ir->num_txports = *hi;
+ ir->num_rxports = *lo;
break;
case MCE_RSP_EQIRRXCFCNT:
/*
@@ -1174,7 +1213,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
*/
if (ir->carrier_report_enabled && ir->learning_active &&
ir->pulse_tunit > 0) {
- carrier_cycles = (hi << 8 | lo);
+ carrier_cycles = (*hi << 8 | *lo);
/*
* Adjust carrier cycle count by adding
* 1 missed count per pulse "on"
@@ -1192,24 +1231,24 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
break;
/* 1-byte return value commands */
- case MCE_RSP_EQEMVER:
- ir->emver = hi;
- break;
case MCE_RSP_EQIRTXPORTS:
- ir->tx_mask = hi;
+ ir->tx_mask = *hi;
break;
case MCE_RSP_EQIRRXPORTEN:
- ir->learning_active = ((hi & 0x02) == 0x02);
- if (ir->rxports_active != hi) {
+ ir->learning_active = ((*hi & 0x02) == 0x02);
+ if (ir->rxports_active != *hi) {
dev_info(ir->dev, "%s-range (0x%x) receiver active",
- ir->learning_active ? "short" : "long", hi);
- ir->rxports_active = hi;
+ ir->learning_active ? "short" : "long", *hi);
+ ir->rxports_active = *hi;
}
break;
+
+ /* No return value commands */
case MCE_RSP_CMD_ILLEGAL:
case MCE_RSP_TX_TIMEOUT:
ir->need_reset = true;
break;
+
default:
break;
}
@@ -1235,7 +1274,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1,
ir->rem + 2, false);
- mceusb_handle_command(ir, i);
+ if (i + ir->rem < buf_len)
+ mceusb_handle_command(ir, &ir->buf_in[i - 1]);
ir->parser_state = CMD_DATA;
break;
case PARSE_IRDATA:
@@ -1264,15 +1304,22 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem--;
break;
case CMD_HEADER:
- /* decode mce packets of the form (84),AA,BB,CC,DD */
- /* IR data packets can span USB messages - rem */
ir->cmd = ir->buf_in[i];
if ((ir->cmd == MCE_CMD_PORT_IR) ||
((ir->cmd & MCE_PORT_MASK) !=
MCE_COMMAND_IRDATA)) {
+ /*
+ * got PORT_SYS, PORT_IR, or unknown
+ * command response prefix
+ */
ir->parser_state = SUBCMD;
continue;
}
+ /*
+ * got IR data prefix (0x80 + num_bytes)
+ * decode MCE packets of the form {0x83, AA, BB, CC}
+ * IR data packets can span USB messages
+ */
ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
mceusb_dev_printdata(ir, ir->buf_in, buf_len,
i, ir->rem + 1, false);
@@ -1296,6 +1343,14 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
if (ir->parser_state != CMD_HEADER && !ir->rem)
ir->parser_state = CMD_HEADER;
}
+
+ /*
+ * Accept IR data spanning multiple rx buffers.
+ * Reject MCE command response spanning multiple rx buffers.
+ */
+ if (ir->parser_state != PARSE_IRDATA || !ir->rem)
+ ir->parser_state = CMD_HEADER;
+
if (event) {
dev_dbg(ir->dev, "processed IR data");
ir_raw_event_handle(ir->rc);
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 9f21b3e8b377..5f36244cc34f 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* Remote Controller core raw events header
*
* Copyright (C) 2010 by Mauro Carvalho Chehab
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 13da4c5c7d17..7741151606ef 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1773,6 +1773,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
set_bit(MSC_SCAN, dev->input_dev->mscbit);
/* Pointer/mouse events */
+ set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit);
set_bit(EV_REL, dev->input_dev->evbit);
set_bit(REL_X, dev->input_dev->relbit);
set_bit(REL_Y, dev->input_dev->relbit);
diff --git a/drivers/media/rc/tango-ir.c b/drivers/media/rc/tango-ir.c
index 451ec4e9dcfa..b8eb5bc4d9be 100644
--- a/drivers/media/rc/tango-ir.c
+++ b/drivers/media/rc/tango-ir.c
@@ -157,20 +157,10 @@ static int tango_ir_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rc_dev *rc;
struct tango_ir *ir;
- struct resource *rc5_res;
- struct resource *rc6_res;
u64 clkrate, clkdiv;
int irq, err;
u32 val;
- rc5_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!rc5_res)
- return -EINVAL;
-
- rc6_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!rc6_res)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -EINVAL;
@@ -179,11 +169,11 @@ static int tango_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
- ir->rc5_base = devm_ioremap_resource(dev, rc5_res);
+ ir->rc5_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->rc5_base))
return PTR_ERR(ir->rc5_base);
- ir->rc6_base = devm_ioremap_resource(dev, rc6_res);
+ ir->rc6_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ir->rc6_base))
return PTR_ERR(ir->rc6_base);
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 83ca5dc047ea..0e26d22f0b26 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -206,7 +206,7 @@ static int qm1d1c0042_set_params(struct dvb_frontend *fe)
if (ret < 0)
return ret;
- a = (freq + state->cfg.xtal_freq / 2) / state->cfg.xtal_freq;
+ a = DIV_ROUND_CLOSEST(freq, state->cfg.xtal_freq);
state->regs[0x06] &= 0x40;
state->regs[0x06] |= (a - 12) / 4;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index e87040d6eca7..898e0f9f8b70 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -118,6 +118,11 @@ static int si2157_init(struct dvb_frontend *fe)
goto err;
}
+ if (dev->dont_load_firmware) {
+ dev_info(&client->dev, "device is buggy, skipping firmware download\n");
+ goto skip_fw_download;
+ }
+
/* query chip revision */
memcpy(cmd.args, "\x02", 1);
cmd.wlen = 1;
@@ -440,6 +445,7 @@ static int si2157_probe(struct i2c_client *client,
i2c_set_clientdata(client, dev);
dev->fe = cfg->fe;
dev->inversion = cfg->inversion;
+ dev->dont_load_firmware = cfg->dont_load_firmware;
dev->if_port = cfg->if_port;
dev->chiptype = (u8)id->driver_data;
dev->if_frequency = 5000000; /* default value of property 0x0706 */
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index c22ca784f43f..ffdece3c2eaa 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -11,29 +11,34 @@
#include <media/media-device.h>
#include <media/dvb_frontend.h>
-/*
- * I2C address
- * 0x60
+/**
+ * struct si2157_config - configuration parameters for si2157
+ *
+ * @fe:
+ * frontend returned by driver
+ * @mdev:
+ * media device returned by driver
+ * @inversion:
+ * spectral inversion
+ * @dont_load_firmware:
+ * Instead of uploading a new firmware, use the existing one
+ * @if_port:
+ * Port selection
+ * Select the RF interface to use (pins 9+11 or 12+13)
+ *
+ * Note:
+ * The I2C address of this demod is 0x60.
*/
struct si2157_config {
- /*
- * frontend
- */
struct dvb_frontend *fe;
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_device *mdev;
#endif
- /*
- * Spectral Inversion
- */
- bool inversion;
+ unsigned int inversion:1;
+ unsigned int dont_load_firmware:1;
- /*
- * Port selection
- * Select the RF interface to use (pins 9+11 or 12+13)
- */
u8 if_port;
};
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 2bda903358da..778f81b39996 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -23,8 +23,9 @@ enum si2157_pads {
struct si2157_dev {
struct mutex i2c_mutex;
struct dvb_frontend *fe;
- bool active;
- bool inversion;
+ unsigned int active:1;
+ unsigned int inversion:1;
+ unsigned int dont_load_firmware:1;
u8 chiptype;
u8 if_port;
u32 if_frequency;
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h
index 50d017a4822a..fcca39d3e006 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/tuner-xc2028-types.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tuner-xc2028_types
*
* This file includes internal tipes to be used inside tuner-xc2028.
diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h
index 7b58bc06e35c..2dd45d0765d7 100644
--- a/drivers/media/tuners/tuner-xc2028.h
+++ b/drivers/media/tuners/tuner-xc2028.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tuner-xc2028
*
* Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 1826ff825c2e..039963a7765b 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -295,7 +295,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
mutex_unlock(&fc_usb->data_mutex);
- return 0;
+ return ret;
}
/* actual bus specific access functions,
@@ -504,7 +504,13 @@ urb_error:
static int flexcop_usb_init(struct flexcop_usb *fc_usb)
{
/* use the alternate setting with the larges buffer */
- usb_set_interface(fc_usb->udev,0,1);
+ int ret = usb_set_interface(fc_usb->udev, 0, 1);
+
+ if (ret) {
+ err("set interface failed.");
+ return ret;
+ }
+
switch (fc_usb->udev->speed) {
case USB_SPEED_LOW:
err("cannot handle USB speed because it is too slow.");
@@ -538,6 +544,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
struct flexcop_device *fc = NULL;
int ret;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
err("out of memory\n");
return -ENOMEM;
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 74f3b29d9c60..2fe2b2d335ba 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_CX231XX
depends on VIDEO_DEV && I2C && I2C_MUX
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
select VIDEO_CX25840
select VIDEO_CX2341X
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 6d218a036966..1aec4459f50a 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -60,10 +60,6 @@
#define MCI_MODE_MEMORY_READ 0x000
#define MCI_MODE_MEMORY_WRITE 0x4000
-static unsigned int mpegbufs = 8;
-module_param(mpegbufs, int, 0644);
-MODULE_PARM_DESC(mpegbufs, "number of mpeg buffers, range 2-32");
-
static unsigned int mpeglines = 128;
module_param(mpeglines, int, 0644);
MODULE_PARM_DESC(mpeglines, "number of lines in an MPEG buffer, range 2-32");
@@ -1080,16 +1076,6 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
return 0;
}
-static void cx231xx_417_check_encoder(struct cx231xx *dev)
-{
- u32 status, seq;
-
- status = 0;
- seq = 0;
- cx231xx_api_cmd(dev, CX2341X_ENC_GET_SEQ_END, 0, 2, &status, &seq);
- dprintk(1, "%s() status = %d, seq = %d\n", __func__, status, seq);
-}
-
static void cx231xx_codec_settings(struct cx231xx *dev)
{
dprintk(1, "%s()\n", __func__);
@@ -1227,40 +1213,25 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
/* ------------------------------------------------------------------ */
-static int bb_buf_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ unsigned int size = mpeglinesize * mpeglines;
- fh->dev->ts1.ts_packet_size = mpeglinesize;
- fh->dev->ts1.ts_packet_count = mpeglines;
+ dev->ts1.ts_packet_size = mpeglinesize;
+ dev->ts1.ts_packet_count = mpeglines;
- *size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
- *count = mpegbufs;
+ if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
- return 0;
-}
-
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
-{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = mpeglinesize * mpeglines;
- BUG_ON(in_interrupt());
-
- spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->USE_ISO) {
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
- } else {
- if (dev->video_mode.bulk_ctl.buf == buf)
- dev->video_mode.bulk_ctl.buf = NULL;
- }
- spin_unlock_irqrestore(&dev->video_mode.slock, flags);
- videobuf_waiton(vq, &buf->vb, 0, 0);
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ return 0;
}
static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *urb,
@@ -1276,13 +1247,13 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
return;
buf = list_entry(dma_q->active.next,
- struct cx231xx_buffer, vb.queue);
+ struct cx231xx_buffer, list);
dev->video_mode.isoc_ctl.buf = buf;
dma_q->mpeg_buffer_done = 1;
}
/* Fill buffer */
buf = dev->video_mode.isoc_ctl.buf;
- vbuf = videobuf_to_vmalloc(&buf->vb);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
if ((dma_q->mpeg_buffer_completed+len) <
mpeglines*mpeglinesize) {
@@ -1306,11 +1277,10 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
memcpy(vbuf+dma_q->mpeg_buffer_completed,
data, tail_data);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.sequence = dma_q->sequence++;
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
dma_q->mpeg_buffer_completed = 0;
if (len - tail_data > 0) {
@@ -1331,17 +1301,15 @@ static void buffer_filled(char *data, int len, struct urb *urb,
if (list_empty(&dma_q->active))
return;
- buf = list_entry(dma_q->active.next,
- struct cx231xx_buffer, vb.queue);
+ buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Fill buffer */
- vbuf = videobuf_to_vmalloc(&buf->vb);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
memcpy(vbuf, data, len);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
static int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
@@ -1394,100 +1362,104 @@ static int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
-static int bb_buf_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb, enum v4l2_field field)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = q->priv_data;
struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
- int size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ unsigned long flags;
- if (0 != buf->vb.baddr && buf->vb.bsize < size)
- return -EINVAL;
- buf->vb.width = fh->dev->ts1.ts_packet_size;
- buf->vb.height = fh->dev->ts1.ts_packet_count;
- buf->vb.size = size;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+}
- if (dev->USE_ISO) {
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
- } else {
- if (!dev->video_mode.bulk_ctl.num_bufs)
- urb_init = 1;
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
+{
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
}
- dev_dbg(dev->dev,
- "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ int ret = 0;
+
+ vidq->sequence = 0;
dev->mode_tv = 1;
- if (urb_init) {
- rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- rc = cx231xx_unmute_audio(dev);
- if (dev->USE_ISO) {
- cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
- rc = cx231xx_init_isoc(dev, mpeglines,
- mpegbufs,
- dev->ts1_mode.max_pkt_size,
- cx231xx_isoc_copy);
- } else {
- cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- rc = cx231xx_init_bulk(dev, mpeglines,
- mpegbufs,
- dev->ts1_mode.max_pkt_size,
- cx231xx_bulk_copy);
- }
- if (rc < 0)
- goto fail;
- }
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
+ cx231xx_set_gpio_value(dev, 2, 0);
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
+ cx231xx_initialize_codec(dev);
+
+ cx231xx_start_TS1(dev);
+
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (dev->USE_ISO)
+ ret = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else
+ ret = cx231xx_init_bulk(dev, 320, 5,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
-fail:
- free_buffer(q, buf);
- return rc;
+ call_all(dev, video, s_stream, 1);
+ return ret;
}
-static void bb_buf_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void stop_streaming(struct vb2_queue *vq)
{
- struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ unsigned long flags;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ call_all(dev, video, s_stream, 0);
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ cx231xx_stop_TS1(dev);
-}
+ /* do this before setting alternate! */
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
+ cx231xx_set_mode(dev, CX231XX_SUSPEND);
-static void bb_buf_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
-{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- /*struct cx231xx_fh *fh = q->priv_data;*/
- /*struct cx231xx *dev = (struct cx231xx *)fh->dev;*/
+ cx231xx_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
+ CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
+ CX231xx_RAW_BITS_NONE);
- free_buffer(q, buf);
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-static const struct videobuf_queue_ops cx231xx_qops = {
- .buf_setup = bb_buf_setup,
- .buf_prepare = bb_buf_prepare,
- .buf_queue = bb_buf_queue,
- .buf_release = bb_buf_release,
+static struct vb2_ops cx231xx_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------ */
@@ -1495,8 +1467,7 @@ static const struct videobuf_queue_ops cx231xx_qops = {
static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
bool is_50hz = dev->encodernorm.id & V4L2_STD_625_50;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1511,8 +1482,7 @@ static int vidioc_g_pixelaspect(struct file *file, void *priv,
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1533,8 +1503,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*norm = dev->encodernorm.id;
return 0;
@@ -1542,8 +1511,7 @@ static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cx231xx_tvnorms); i++)
@@ -1575,8 +1543,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev *sd;
dprintk(3, "enter vidioc_s_ctrl()\n");
@@ -1601,8 +1568,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
dprintk(3, "enter vidioc_g_fmt_vid_cap()\n");
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -1621,8 +1587,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
dprintk(3, "enter vidioc_try_fmt_vid_cap()\n");
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -1636,230 +1601,21 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_reqbufs(&fh->vidq, p);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_querybuf(&fh->vidq, p);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_qbuf(&fh->vidq, p);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
-
- return videobuf_dqbuf(&fh->vidq, b, file->f_flags & O_NONBLOCK);
-}
-
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type i)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- dprintk(3, "enter vidioc_streamon()\n");
- cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- if (dev->USE_ISO)
- cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_isoc_copy);
- else {
- cx231xx_init_bulk(dev, 320,
- 5,
- dev->ts1_mode.max_pkt_size,
- cx231xx_bulk_copy);
- }
- dprintk(3, "exit vidioc_streamon()\n");
- return videobuf_streamon(&fh->vidq);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_streamoff(&fh->vidq);
-}
-
static int vidioc_log_status(struct file *file, void *priv)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
call_all(dev, core, log_status);
return v4l2_ctrl_log_status(file, priv);
}
-static int mpeg_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct cx231xx *dev = video_drvdata(file);
- struct cx231xx_fh *fh;
-
- dprintk(2, "%s()\n", __func__);
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- mutex_unlock(&dev->lock);
- return -ENOMEM;
- }
-
- file->private_data = fh;
- v4l2_fh_init(&fh->fh, vdev);
- fh->dev = dev;
-
-
- videobuf_queue_vmalloc_init(&fh->vidq, &cx231xx_qops,
- NULL, &dev->video_mode.slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer), fh, &dev->lock);
-/*
- videobuf_queue_sg_init(&fh->vidq, &cx231xx_qops,
- dev->dev, &dev->ts1.slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
-*/
-
- cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
- cx231xx_set_gpio_value(dev, 2, 0);
-
- cx231xx_initialize_codec(dev);
-
- mutex_unlock(&dev->lock);
- v4l2_fh_add(&fh->fh);
- cx231xx_start_TS1(dev);
-
- return 0;
-}
-
-static int mpeg_release(struct file *file)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- dprintk(3, "mpeg_release()! dev=0x%p\n", dev);
-
- mutex_lock(&dev->lock);
-
- cx231xx_stop_TS1(dev);
-
- /* do this before setting alternate! */
- if (dev->USE_ISO)
- cx231xx_uninit_isoc(dev);
- else
- cx231xx_uninit_bulk(dev);
- cx231xx_set_mode(dev, CX231XX_SUSPEND);
-
- cx231xx_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
- CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
- CX231xx_RAW_BITS_NONE);
-
- /* FIXME: Review this crap */
- /* Shut device down on last close */
- if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
- if (atomic_dec_return(&dev->v4l_reader_count) == 0) {
- /* stop mpeg capture */
-
- msleep(500);
- cx231xx_417_check_encoder(dev);
-
- }
- }
-
- if (fh->vidq.streaming)
- videobuf_streamoff(&fh->vidq);
- if (fh->vidq.reading)
- videobuf_read_stop(&fh->vidq);
-
- videobuf_mmap_free(&fh->vidq);
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static ssize_t mpeg_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- /* Deal w/ A/V decoder * and mpeg encoder sync issues. */
- /* Start mpeg encoder on first read. */
- if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
- if (atomic_inc_return(&dev->v4l_reader_count) == 1) {
- if (cx231xx_initialize_codec(dev) < 0)
- return -EINVAL;
- }
- }
-
- return videobuf_read_stream(&fh->vidq, data, count, ppos, 0,
- file->f_flags & O_NONBLOCK);
-}
-
-static __poll_t mpeg_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- __poll_t req_events = poll_requested_events(wait);
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
- __poll_t res = 0;
-
- if (v4l2_event_pending(&fh->fh))
- res |= EPOLLPRI;
- else
- poll_wait(file, &fh->fh.wait, wait);
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return res;
-
- mutex_lock(&dev->lock);
- res |= videobuf_poll_stream(file, &fh->vidq, wait);
- mutex_unlock(&dev->lock);
- return res;
-}
-
-static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- dprintk(2, "%s()\n", __func__);
-
- return videobuf_mmap_mapper(&fh->vidq, vma);
-}
-
static const struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
- .open = mpeg_open,
- .release = mpeg_release,
- .read = mpeg_read,
- .poll = mpeg_poll,
- .mmap = mpeg_mmap,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -1881,12 +1637,12 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = vidioc_log_status,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = cx231xx_g_register,
@@ -1980,6 +1736,7 @@ int cx231xx_417_register(struct cx231xx *dev)
/* FIXME: Port1 hardcoded here */
int err = -ENODEV;
struct cx231xx_tsport *tsport = &dev->ts1;
+ struct vb2_queue *q;
dprintk(1, "%s()\n", __func__);
@@ -2017,6 +1774,21 @@ int cx231xx_417_register(struct cx231xx *dev)
/* Allocate and initialize V4L video device */
cx231xx_video_dev_init(dev, dev->udev,
&dev->v4l_device, &cx231xx_mpeg_template, "mpeg");
+ q = &dev->mpegq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ err = vb2_queue_init(q);
+ if (err)
+ return err;
+ dev->v4l_device.queue = q;
+
err = video_register_device(&dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 9ef362e221df..fd6e2df3d1b7 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -14,7 +14,6 @@
#include <linux/soundcard.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index d417b5fe4093..0974965e848f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1240,7 +1240,7 @@ int cx231xx_init_ctrl_pin_status(struct cx231xx *dev)
int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
u8 analog_or_digital)
{
- int status = 0;
+ int status;
/* first set the direction to output */
status = cx231xx_set_gpio_direction(dev,
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e123e74c549e..92efe6c1f47b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1479,13 +1479,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
goto err_dev_init;
}
- /* init video dma queues */
+ /* init video dma queue */
INIT_LIST_HEAD(&dev->video_mode.vidq.active);
- INIT_LIST_HEAD(&dev->video_mode.vidq.queued);
- /* init vbi dma queues */
+ /* init vbi dma queue */
INIT_LIST_HEAD(&dev->vbi_mode.vidq.active);
- INIT_LIST_HEAD(&dev->vbi_mode.vidq.queued);
/* Reset other chips required if they are tied up with GPIO pins */
cx231xx_add_into_devlist(dev);
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index fba7ccdf5a25..d2f143a096d1 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -153,131 +153,98 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
Vbi buf operations
------------------------------------------------------------------*/
-static int
-vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static int vbi_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
u32 height = 0;
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- *size = (dev->width * height * 2 * 2);
- if (0 == *count)
- *count = CX231XX_DEF_VBI_BUF;
-
- if (*count < CX231XX_MIN_BUF)
- *count = CX231XX_MIN_BUF;
-
+ *nplanes = 1;
+ sizes[0] = (dev->width * height * 2 * 2);
return 0;
}
/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
-{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
- BUG_ON(in_interrupt());
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
-
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->vbi_mode.slock, flags);
- if (dev->vbi_mode.bulk_ctl.buf == buf)
- dev->vbi_mode.bulk_ctl.buf = NULL;
- spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
-
-static int
-vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vbi_buf_prepare(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
u32 height = 0;
+ u32 size;
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- buf->vb.size = ((dev->width << 1) * height * 2);
+ size = ((dev->width << 1) * height * 2);
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ if (vb2_plane_size(vb, 0) < size)
return -EINVAL;
-
- buf->vb.width = dev->width;
- buf->vb.height = height;
- buf->vb.field = field;
- buf->vb.field = V4L2_FIELD_SEQ_TB;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
-
- if (!dev->vbi_mode.bulk_ctl.num_bufs)
- urb_init = 1;
-
- if (urb_init) {
- rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
- CX231XX_NUM_VBI_BUFS,
- dev->vbi_mode.alt_max_pkt_size[0],
- cx231xx_isoc_vbi_copy);
- if (rc < 0)
- goto fail;
- }
-
- buf->vb.state = VIDEOBUF_PREPARED;
+ vb2_set_plane_payload(vb, 0, size);
return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
}
-static void
-vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static void vbi_buf_queue(struct vb2_buffer *vb)
{
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ unsigned long flags;
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
+}
+
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
+{
+ struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
+ dev->vbi_mode.bulk_ctl.buf = NULL;
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
}
-static void vbi_buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static int vbi_start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ int ret;
+
+ vidq->sequence = 0;
+ ret = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
+ CX231XX_NUM_VBI_BUFS,
+ dev->vbi_mode.alt_max_pkt_size[0],
+ cx231xx_isoc_vbi_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+static void vbi_stop_streaming(struct vb2_queue *vq)
+{
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- free_buffer(vq, buf);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-const struct videobuf_queue_ops cx231xx_vbi_qops = {
- .buf_setup = vbi_buffer_setup,
- .buf_prepare = vbi_buffer_prepare,
- .buf_queue = vbi_buffer_queue,
- .buf_release = vbi_buffer_release,
+struct vb2_ops cx231xx_vbi_qops = {
+ .queue_setup = vbi_queue_setup,
+ .buf_prepare = vbi_buf_prepare,
+ .buf_queue = vbi_buf_queue,
+ .start_streaming = vbi_start_streaming,
+ .stop_streaming = vbi_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
@@ -512,16 +479,15 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
- /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.i); */
+ /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
dev->vbi_mode.bulk_ctl.buf = NULL;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
@@ -611,11 +577,11 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ memset(outp, 0, vb2_plane_size(&(*buf)->vb.vb2_buf, 0));
dev->vbi_mode.bulk_ctl.buf = *buf;
@@ -656,7 +622,7 @@ int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
if (buf == NULL)
return -EINVAL;
- p_out_buffer = videobuf_to_vmalloc(&buf->vb);
+ p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
if (dma_q->bytes_left_in_line != _line_size) {
current_line_bytes_copied =
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.h b/drivers/media/usb/cx231xx/cx231xx-vbi.h
index 7cddd629fbfc..0b21bee5fa30 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.h
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.h
@@ -10,7 +10,7 @@
#ifndef _CX231XX_VBI_H
#define _CX231XX_VBI_H
-extern const struct videobuf_queue_ops cx231xx_vbi_qops;
+extern struct vb2_ops cx231xx_vbi_qops;
#define NTSC_VBI_START_LINE 10 /* line 10 - 21 */
#define NTSC_VBI_END_LINE 21
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 9b51f07a729e..69abafaebbf3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -58,10 +58,10 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(CX231XX_VERSION);
-static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int vbi_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int radio_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
+static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int vbi_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int radio_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
module_param_array(card, int, NULL, 0444);
module_param_array(video_nr, int, NULL, 0444);
@@ -166,18 +166,19 @@ static inline void buffer_filled(struct cx231xx *dev,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
- cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
+ cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.vb2_buf.index);
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.field = V4L2_FIELD_INTERLACED;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, dev->size);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = NULL;
else
dev->video_mode.bulk_ctl.buf = NULL;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
@@ -241,11 +242,11 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ memset(outp, 0, dev->size);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = *buf;
@@ -653,7 +654,7 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
if (buf == NULL)
return -1;
- p_out_buffer = videobuf_to_vmalloc(&buf->vb);
+ p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
current_line_bytes_copied = _line_size - dma_q->bytes_left_in_line;
@@ -672,7 +673,7 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
lencopy = dma_q->bytes_left_in_line > bytes_to_copy ?
bytes_to_copy : dma_q->bytes_left_in_line;
- if ((u8 *)(startwrite + lencopy) > (u8 *)(p_out_buffer + buf->vb.size))
+ if ((u8 *)(startwrite + lencopy) > (u8 *)(p_out_buffer + dev->size))
return 0;
/* The below copies the UYVY data straight into video buffer */
@@ -708,149 +709,98 @@ u8 cx231xx_is_buffer_done(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q)
Videobuf operations
------------------------------------------------------------------*/
-static int
-buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- *size = (fh->dev->width * fh->dev->height * dev->format->depth + 7)>>3;
- if (0 == *count)
- *count = CX231XX_DEF_BUF;
+ dev->size = (dev->width * dev->height * dev->format->depth + 7) >> 3;
- if (*count < CX231XX_MIN_BUF)
- *count = CX231XX_MIN_BUF;
+ if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
-
- cx231xx_enable_analog_tuner(dev);
+ if (*nplanes)
+ return sizes[0] < dev->size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = dev->size;
return 0;
}
-/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
-
- BUG_ON(in_interrupt());
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ unsigned long flags;
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->USE_ISO) {
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
- } else {
- if (dev->video_mode.bulk_ctl.buf == buf)
- dev->video_mode.bulk_ctl.buf = NULL;
- }
+ list_add_tail(&buf->list, &vidq->active);
spin_unlock_irqrestore(&dev->video_mode.slock, flags);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
-static int
-buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
-
- /* The only currently supported format is 16 bits/pixel */
- buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth
- + 7) >> 3;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
-
- buf->vb.width = dev->width;
- buf->vb.height = dev->height;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
- if (dev->USE_ISO) {
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
- } else {
- if (!dev->video_mode.bulk_ctl.num_bufs)
- urb_init = 1;
- }
- dev_dbg(dev->dev,
- "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);
- if (urb_init) {
- dev->mode_tv = 0;
- if (dev->USE_ISO)
- rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_isoc_copy);
- else
- rc = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_bulk_copy);
- if (rc < 0)
- goto fail;
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
}
-
- buf->vb.state = VIDEOBUF_PREPARED;
-
- return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
}
-static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ int ret = 0;
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ vidq->sequence = 0;
+ dev->mode_tv = 0;
+ cx231xx_enable_analog_tuner(dev);
+ if (dev->USE_ISO)
+ ret = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else
+ ret = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
+ call_all(dev, video, s_stream, 1);
+ return ret;
}
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static void stop_streaming(struct vb2_queue *vq)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = (struct cx231xx *)fh->dev;
-
- cx231xx_isocdbg("cx231xx: called buffer_release\n");
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- free_buffer(vq, buf);
+ call_all(dev, video, s_stream, 0);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-static const struct videobuf_queue_ops cx231xx_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
+static struct vb2_ops cx231xx_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/********************* v4l2 interface **************************************/
@@ -872,58 +822,6 @@ void video_mux(struct cx231xx *dev, int index)
cx231xx_do_mode_ctrl_overrides(dev);
}
-/* Usage lock check functions */
-static int res_get(struct cx231xx_fh *fh)
-{
- struct cx231xx *dev = fh->dev;
- int rc = 0;
-
- /* This instance already has stream_on */
- if (fh->stream_on)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (dev->stream_on)
- return -EBUSY;
- dev->stream_on = 1;
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (dev->vbi_stream_on)
- return -EBUSY;
- dev->vbi_stream_on = 1;
- } else
- return -EINVAL;
-
- fh->stream_on = 1;
-
- return rc;
-}
-
-static int res_check(struct cx231xx_fh *fh)
-{
- return fh->stream_on;
-}
-
-static void res_free(struct cx231xx_fh *fh)
-{
- struct cx231xx *dev = fh->dev;
-
- fh->stream_on = 0;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- dev->stream_on = 0;
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- dev->vbi_stream_on = 0;
-}
-
-static int check_dev(struct cx231xx *dev)
-{
- if (dev->state & DEV_DISCONNECTED) {
- dev_err(dev->dev, "v4l2 ioctl: device not present\n");
- return -ENODEV;
- }
- return 0;
-}
-
/* ------------------------------------------------------------------
IOCTL vidioc handling
------------------------------------------------------------------*/
@@ -931,8 +829,7 @@ static int check_dev(struct cx231xx *dev)
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
@@ -960,8 +857,7 @@ static struct cx231xx_fmt *format_by_fourcc(unsigned int fourcc)
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
unsigned int width = f->fmt.pix.width;
unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
@@ -993,39 +889,25 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
- struct cx231xx_fmt *fmt;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ int rc;
- rc = check_dev(dev);
- if (rc < 0)
+ rc = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (rc)
return rc;
- vidioc_try_fmt_vid_cap(file, priv, f);
-
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmt)
- return -EINVAL;
-
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
+ if (vb2_is_busy(&dev->vidq)) {
dev_err(dev->dev, "%s: queue busy\n", __func__);
return -EBUSY;
}
- if (dev->stream_on && !fh->stream_on) {
- dev_err(dev->dev,
- "%s: device in use by another fh\n", __func__);
- return -EBUSY;
- }
-
/* set new image size */
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
- dev->format = fmt;
+ dev->format = format_by_fourcc(f->fmt.pix.pixelformat);
v4l2_fill_mbus_format(&format.format, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
call_all(dev, pad, set_fmt, NULL, &format);
@@ -1036,8 +918,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*id = dev->norm;
return 0;
@@ -1045,21 +926,15 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
if (dev->norm == norm)
return 0;
- if (videobuf_queue_is_busy(&fh->vb_vidq))
+ if (vb2_is_busy(&dev->vidq))
return -EBUSY;
dev->norm = norm;
@@ -1141,8 +1016,7 @@ void cx231xx_v4l2_create_entities(struct cx231xx *dev)
int cx231xx_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
u32 gen_stat;
unsigned int n;
int ret;
@@ -1181,8 +1055,7 @@ int cx231xx_enum_input(struct file *file, void *priv,
int cx231xx_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*i = dev->video_input;
@@ -1191,14 +1064,9 @@ int cx231xx_g_input(struct file *file, void *priv, unsigned int *i)
int cx231xx_s_input(struct file *file, void *priv, unsigned int i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
+ struct cx231xx *dev = video_drvdata(file);
dev->mode_tv = 0;
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
if (i >= MAX_CX231XX_INPUT)
return -EINVAL;
@@ -1220,13 +1088,7 @@ int cx231xx_s_input(struct file *file, void *priv, unsigned int i)
int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
+ struct cx231xx *dev = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -1244,27 +1106,15 @@ int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
int cx231xx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
if (0 != t->index)
return -EINVAL;
-#if 0
- call_all(dev, tuner, s_tuner, t);
-#endif
return 0;
}
int cx231xx_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (f->tuner)
return -EINVAL;
@@ -1277,8 +1127,7 @@ int cx231xx_g_frequency(struct file *file, void *priv,
int cx231xx_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_frequency new_freq = *f;
int rc;
u32 if_frequency = 5400000;
@@ -1287,10 +1136,6 @@ int cx231xx_s_frequency(struct file *file, void *priv,
"Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n",
f->frequency, f->type);
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
if (0 != f->tuner)
return -EINVAL;
@@ -1365,8 +1210,7 @@ int cx231xx_g_chip_info(struct file *file, void *fh,
int cx231xx_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
int ret;
u8 value[4] = { 0, 0, 0, 0 };
u32 data = 0;
@@ -1424,8 +1268,7 @@ int cx231xx_g_register(struct file *file, void *priv,
int cx231xx_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
int ret;
u8 data[4] = { 0, 0, 0, 0 };
@@ -1472,8 +1315,7 @@ int cx231xx_s_register(struct file *file, void *priv,
static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
bool is_50hz = dev->norm & V4L2_STD_625_50;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1488,8 +1330,7 @@ static int vidioc_g_pixelaspect(struct file *file, void *priv,
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1508,54 +1349,10 @@ static int vidioc_g_selection(struct file *file, void *priv,
return 0;
}
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- rc = res_get(fh);
-
- if (likely(rc >= 0))
- rc = videobuf_streamon(&fh->vb_vidq);
-
- call_all(dev, video, s_stream, 1);
-
- return rc;
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (type != fh->type)
- return -EINVAL;
-
- cx25840_call(dev, video, s_stream, 0);
-
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh);
-
- return 0;
-}
-
int cx231xx_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
strscpy(cap->driver, "cx231xx", sizeof(cap->driver));
strscpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
@@ -1587,8 +1384,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
@@ -1610,8 +1406,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
@@ -1634,77 +1429,16 @@ static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
-
- if (dev->vbi_stream_on && !fh->stream_on) {
- dev_err(dev->dev,
- "%s device in use by another fh\n", __func__);
- return -EBUSY;
- }
return vidioc_try_fmt_vbi_cap(file, priv, f);
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *rb)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_reqbufs(&fh->vb_vidq, rb);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_querybuf(&fh->vb_vidq, b);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_qbuf(&fh->vb_vidq, b);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
-}
-
/* ----------------------------------------------------------- */
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (t->index)
return -EINVAL;
@@ -1717,7 +1451,7 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
}
static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
{
- struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (t->index)
return -EINVAL;
@@ -1733,52 +1467,20 @@ static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner
*/
static int cx231xx_v4l2_open(struct file *filp)
{
- int radio = 0;
struct video_device *vdev = video_devdata(filp);
struct cx231xx *dev = video_drvdata(filp);
- struct cx231xx_fh *fh;
- enum v4l2_buf_type fh_type = 0;
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- break;
- case VFL_TYPE_VBI:
- fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
- break;
- case VFL_TYPE_RADIO:
- radio = 1;
- break;
- default:
- return -EINVAL;
- }
-
- cx231xx_videodbg("open dev=%s type=%s users=%d\n",
- video_device_node_name(vdev), v4l2_type_names[fh_type],
- dev->users);
-
-#if 0
- errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
- if (errCode < 0) {
- dev_err(dev->dev,
- "Device locked on digital mode. Can't open analog\n");
- return -EBUSY;
- }
-#endif
+ int ret;
- fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
- if (mutex_lock_interruptible(&dev->lock)) {
- kfree(fh);
+ if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(filp);
+ if (ret) {
+ mutex_unlock(&dev->lock);
+ return ret;
}
- fh->dev = dev;
- fh->type = fh_type;
- filp->private_data = fh;
- v4l2_fh_init(&fh->fh, vdev);
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
+ if (dev->users++ == 0) {
/* Power up in Analog TV mode */
if (dev->board.external_av)
cx231xx_set_power_mode(dev,
@@ -1786,10 +1488,6 @@ static int cx231xx_v4l2_open(struct file *filp)
else
cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
-#if 0
- cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
-#endif
-
/* set video alternate setting */
cx231xx_set_video_alternate(dev);
@@ -1799,38 +1497,21 @@ static int cx231xx_v4l2_open(struct file *filp)
/* device needs to be initialized before isoc transfer */
dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
-
}
- if (radio) {
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO) {
cx231xx_videodbg("video_open: setting radio device\n");
/* cx231xx_start_radio(dev); */
call_all(dev, tuner, s_radio);
}
-
- dev->users++;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_video_qops,
- NULL, &dev->video_mode.slock,
- fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
/* Set the required alternate setting VBI interface works in
Bulk mode only */
cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_vbi_qops,
- NULL, &dev->vbi_mode.slock,
- fh->type, V4L2_FIELD_SEQ_TB,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
}
mutex_unlock(&dev->lock);
- v4l2_fh_add(&fh->fh);
-
return 0;
}
@@ -1871,68 +1552,12 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
*/
static int cx231xx_close(struct file *filp)
{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
-
- cx231xx_videodbg("users=%d\n", dev->users);
-
- cx231xx_videodbg("users=%d\n", dev->users);
- if (res_check(fh))
- res_free(fh);
-
- /*
- * To workaround error number=-71 on EP0 for VideoGrabber,
- * need exclude following.
- * FIXME: It is probably safe to remove most of these, as we're
- * now avoiding the alternate setting for INDEX_VANC
- */
- if (!dev->board.no_alt_vanc)
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- if (atomic_read(&dev->devlist_count) > 0) {
- cx231xx_release_resources(dev);
- fh->dev = NULL;
- return 0;
- }
- return 0;
- }
-
- /* do this before setting alternate! */
- cx231xx_uninit_vbi_isoc(dev);
-
- /* set alternate 0 */
- if (!dev->vbi_or_sliced_cc_mode)
- cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
- else
- cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
-
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- dev->users--;
- wake_up_interruptible(&dev->open);
- return 0;
- }
+ struct cx231xx *dev = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
- v4l2_fh_del(&fh->fh);
- dev->users--;
- if (!dev->users) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- cx231xx_release_resources(dev);
- fh->dev = NULL;
- return 0;
- }
+ _vb2_fop_release(filp, NULL);
+ if (--dev->users == 0) {
/* Save some power by putting tuner to sleep */
call_all(dev, tuner, standby);
@@ -1942,20 +1567,40 @@ static int cx231xx_close(struct file *filp)
else
cx231xx_uninit_bulk(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
+ }
+
+ /*
+ * To workaround error number=-71 on EP0 for VideoGrabber,
+ * need exclude following.
+ * FIXME: It is probably safe to remove most of these, as we're
+ * now avoiding the alternate setting for INDEX_VANC
+ */
+ if (!dev->board.no_alt_vanc && vdev->vfl_type == VFL_TYPE_VBI) {
+ /* do this before setting alternate! */
+ cx231xx_uninit_vbi_isoc(dev);
/* set alternate 0 */
+ if (!dev->vbi_or_sliced_cc_mode)
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
+ else
+ cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
+
+ wake_up_interruptible_nr(&dev->open, 1);
+ return 0;
+ }
+
+ if (dev->users == 0) {
+ /* set alternate 0 */
cx231xx_set_alt_setting(dev, INDEX_VIDEO, 0);
}
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
+
wake_up_interruptible(&dev->open);
return 0;
}
static int cx231xx_v4l2_close(struct file *filp)
{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(filp);
int rc;
mutex_lock(&dev->lock);
@@ -1964,116 +1609,13 @@ static int cx231xx_v4l2_close(struct file *filp)
return rc;
}
-/*
- * cx231xx_v4l2_read()
- * will allocate buffers when called for the first time
- */
-static ssize_t
-cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
- loff_t *pos)
-{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if ((fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
- (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)) {
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- rc = videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- mutex_unlock(&dev->lock);
- return rc;
- }
- return 0;
-}
-
-/*
- * cx231xx_v4l2_poll()
- * will allocate buffers when called for the first time
- */
-static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
-{
- __poll_t req_events = poll_requested_events(wait);
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- __poll_t res = 0;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return EPOLLERR;
-
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return EPOLLERR;
-
- if (v4l2_event_pending(&fh->fh))
- res |= EPOLLPRI;
- else
- poll_wait(filp, &fh->fh.wait, wait);
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return res;
-
- if ((V4L2_BUF_TYPE_VIDEO_CAPTURE == fh->type) ||
- (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type)) {
- mutex_lock(&dev->lock);
- res |= videobuf_poll_stream(filp, &fh->vb_vidq, wait);
- mutex_unlock(&dev->lock);
- return res;
- }
- return res | EPOLLERR;
-}
-
-/*
- * cx231xx_v4l2_mmap()
- */
-static int cx231xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- mutex_unlock(&dev->lock);
-
- cx231xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n",
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end -
- (unsigned long)vma->vm_start, rc);
-
- return rc;
-}
-
static const struct v4l2_file_operations cx231xx_v4l_fops = {
.owner = THIS_MODULE,
.open = cx231xx_v4l2_open,
.release = cx231xx_v4l2_close,
- .read = cx231xx_v4l2_read,
- .poll = cx231xx_v4l2_poll,
- .mmap = cx231xx_v4l2_mmap,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -2088,17 +1630,17 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
.vidioc_g_pixelaspect = vidioc_g_pixelaspect,
.vidioc_g_selection = vidioc_g_selection,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = cx231xx_enum_input,
.vidioc_g_input = cx231xx_g_input,
.vidioc_s_input = cx231xx_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = cx231xx_g_tuner,
.vidioc_s_tuner = cx231xx_s_tuner,
.vidioc_g_frequency = cx231xx_g_frequency,
@@ -2175,6 +1717,7 @@ static void cx231xx_vdev_init(struct cx231xx *dev,
int cx231xx_register_analog_devices(struct cx231xx *dev)
{
+ struct vb2_queue *q;
int ret;
dev_info(dev->dev, "v4l2 driver version %s\n", CX231XX_VERSION);
@@ -2221,6 +1764,21 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize video media entity!\n");
#endif
dev->vdev.ctrl_handler = &dev->ctrl_handler;
+
+ q = &dev->vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+ dev->vdev.queue = q;
dev->vdev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
@@ -2254,6 +1812,21 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize vbi media entity!\n");
#endif
dev->vbi_dev.ctrl_handler = &dev->ctrl_handler;
+
+ q = &dev->vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_vbi_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+ dev->vbi_dev.queue = q;
dev->vbi_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VBI_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index b007611abc37..b32eab641793 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -20,7 +20,7 @@
#include <media/drv-intf/cx2341x.h>
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
@@ -223,8 +223,8 @@ struct cx231xx_fmt {
/* buffer for one video frame */
struct cx231xx_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
-
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
struct list_head frame;
int top_field;
int receiving;
@@ -237,7 +237,6 @@ enum ps_package_head {
struct cx231xx_dmaqueue {
struct list_head active;
- struct list_head queued;
wait_queue_head_t wq;
@@ -251,6 +250,7 @@ struct cx231xx_dmaqueue {
u32 lines_completed;
u8 field1_done;
u32 lines_per_field;
+ u32 sequence;
/*Mpeg2 control buffer*/
u8 *p_left_data;
@@ -427,23 +427,6 @@ struct cx231xx_audio {
struct cx231xx;
-struct cx231xx_fh {
- struct v4l2_fh fh;
- struct cx231xx *dev;
- unsigned int stream_on:1; /* Locks streams */
- enum v4l2_buf_type type;
-
- struct videobuf_queue vb_vidq;
-
- /* vbi capture */
- struct videobuf_queue vidq;
- struct videobuf_queue vbiq;
-
- /* MPEG Encoder specifics ONLY */
-
- atomic_t v4l_reading;
-};
-
/*****************************************************************/
/* set/get i2c */
/* 00--1Mb/s, 01-400kb/s, 10--100kb/s, 11--5Mb/s */
@@ -634,6 +617,7 @@ struct cx231xx {
int width; /* current frame width */
int height; /* current frame height */
int interlaced; /* 1=interlace fields, 0=just top fields */
+ unsigned int size;
struct cx231xx_audio adev;
@@ -657,6 +641,9 @@ struct cx231xx {
struct media_pad input_pad[MAX_CX231XX_INPUT];
#endif
+ struct vb2_queue vidq;
+ struct vb2_queue vbiq;
+
unsigned char eedata[256];
struct cx231xx_video_mode video_mode;
@@ -717,6 +704,7 @@ struct cx231xx {
u8 USE_ISO;
struct cx231xx_tvnorm encodernorm;
struct cx231xx_tsport ts1, ts2;
+ struct vb2_queue mpegq;
struct video_device v4l_device;
atomic_t v4l_reader_count;
u32 freq;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 3afd18733614..792667ee5ebc 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1197,6 +1197,15 @@ err:
return ret;
}
+/*
+ * The I2C speed register is calculated with:
+ * I2C speed register = (1000000000 / (24.4 * 16 * I2C_speed))
+ *
+ * The default speed register for it930x is 7, with means a
+ * speed of ~366 kbps
+ */
+#define I2C_SPEED_366K 7
+
static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
@@ -1208,13 +1217,13 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
- /* I2C master bus 2 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f6a7, 0x07);
+ /* I2C master bus 2 clock speed 366k */
+ ret = af9035_wr_reg(d, 0x00f6a7, I2C_SPEED_366K);
if (ret < 0)
goto err;
- /* I2C master bus 1,3 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f103, 0x07);
+ /* I2C master bus 1,3 clock speed 366k */
+ ret = af9035_wr_reg(d, 0x00f103, I2C_SPEED_366K);
if (ret < 0)
goto err;
@@ -1610,6 +1619,24 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = adap->fe[0];
+
+ /*
+ * HACK: The Logilink VG0022A has a bug: when the si2157
+ * firmware that came with the device is replaced by a new
+ * one, the I2C transfers to the tuner will return just 0xff.
+ *
+ * Probably, the vendor firmware has some patch specifically
+ * designed for this device. So, we can't replace by the
+ * generic firmware. The right solution would be to extract
+ * the si2157 firmware from the original driver and ask the
+ * driver to load the specifically designed firmware, but,
+ * while we don't have that, the next best solution is to just
+ * keep the original firmware at the device.
+ */
+ if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_DEXATEK &&
+ le16_to_cpu(d->udev->descriptor.idProduct) == 0x0100)
+ si2157_config.dont_load_firmware = true;
+
si2157_config.if_port = it930x_addresses_table[state->it930x_addresses].tuner_if_port;
ret = af9035_add_i2c_dev(d, "si2157",
it930x_addresses_table[state->it930x_addresses].tuner_i2c_addr,
@@ -2121,6 +2148,8 @@ static const struct usb_device_id af9035_id_table[] = {
&it930x_props, "ITE 9303 Generic", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD310,
&it930x_props, "AVerMedia TD310 DVB-T2", NULL) },
+ { DVB_USB_DEVICE(USB_VID_DEXATEK, 0x0100,
+ &it930x_props, "Logilink VG0022A", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index b874a49ececf..52bcc2d2efe5 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -121,6 +121,7 @@ struct dvb_usb_driver_info {
* @interval: time in ms between two queries
* @driver_type: used to point if a device supports raw mode
* @bulk_mode: device supports bulk mode for rc (disable polling mode)
+ * @timeout: set to length of last space before raw IR goes idle
*/
struct dvb_usb_rc {
const char *map_name;
@@ -130,6 +131,7 @@ struct dvb_usb_rc {
unsigned int interval;
enum rc_driver_type driver_type;
bool bulk_mode;
+ int timeout;
};
/**
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index e5e056bf9dfa..f1c79f351ec8 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -150,6 +150,7 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
dev->map_name = d->rc.map_name;
dev->allowed_protocols = d->rc.allowed_protos;
dev->change_protocol = d->rc.change_protocol;
+ dev->timeout = d->rc.timeout;
dev->priv = d;
ret = rc_register_device(dev);
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 617a306f6815..356fd8e66834 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -22,7 +22,6 @@ MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver.");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct dvbsky_state {
- struct mutex stream_mutex;
u8 ibuf[DVBSKY_BUF_LEN];
u8 obuf[DVBSKY_BUF_LEN];
u8 last_lock;
@@ -60,17 +59,19 @@ static int dvbsky_usb_generic_rw(struct dvb_usb_device *d,
static int dvbsky_stream_ctrl(struct dvb_usb_device *d, u8 onoff)
{
struct dvbsky_state *state = d_to_priv(d);
+ static const u8 obuf_pre[3] = { 0x37, 0, 0 };
+ static const u8 obuf_post[3] = { 0x36, 3, 0 };
int ret;
- u8 obuf_pre[3] = { 0x37, 0, 0 };
- u8 obuf_post[3] = { 0x36, 3, 0 };
- mutex_lock(&state->stream_mutex);
- ret = dvbsky_usb_generic_rw(d, obuf_pre, 3, NULL, 0);
+ mutex_lock(&d->usb_mutex);
+ memcpy(state->obuf, obuf_pre, 3);
+ ret = dvb_usbv2_generic_write_locked(d, state->obuf, 3);
if (!ret && onoff) {
msleep(20);
- ret = dvbsky_usb_generic_rw(d, obuf_post, 3, NULL, 0);
+ memcpy(state->obuf, obuf_post, 3);
+ ret = dvb_usbv2_generic_write_locked(d, state->obuf, 3);
}
- mutex_unlock(&state->stream_mutex);
+ mutex_unlock(&d->usb_mutex);
return ret;
}
@@ -591,17 +592,7 @@ static int dvbsky_identify_state(struct dvb_usb_device *d, const char **name)
static int dvbsky_init(struct dvb_usb_device *d)
{
struct dvbsky_state *state = d_to_priv(d);
-
- /* use default interface */
- /*
- ret = usb_set_interface(d->udev, 0, 0);
- if (ret)
- return ret;
- */
- mutex_init(&state->stream_mutex);
-
state->last_lock = 0;
-
return 0;
}
@@ -792,6 +783,9 @@ static const struct usb_device_id dvbsky_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C,
&mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C",
RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
+ { DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C_LITE,
+ &mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C Lite",
+ NULL) },
{ DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C2,
&mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C v2",
RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index c7197e534c02..19217dcf20f1 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -5,7 +5,7 @@
*/
#include <linux/string.h>
-#include "gl861.h"
+#include "dvb_usb.h"
#include "zl10353.h"
#include "qt1010.h"
@@ -14,93 +14,157 @@
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
- u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
-{
- u16 index;
- u16 value = addr << (8 + 1);
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 req, type;
- u8 *buf;
- int ret;
+struct gl861 {
+ /* USB control message buffer */
+ u8 buf[16];
- if (wo) {
- req = GL861_REQ_I2C_WRITE;
- type = GL861_WRITE;
- buf = kmemdup(wbuf, wlen, GFP_KERNEL);
- } else { /* rw */
- req = GL861_REQ_I2C_READ;
- type = GL861_READ;
- buf = kmalloc(rlen, GFP_KERNEL);
- }
- if (!buf)
- return -ENOMEM;
+ struct i2c_adapter *demod_sub_i2c;
+ struct i2c_client *i2c_client_demod;
+ struct i2c_client *i2c_client_tuner;
+};
- switch (wlen) {
- case 1:
- index = wbuf[0];
+#define CMD_WRITE_SHORT 0x01
+#define CMD_READ 0x02
+#define CMD_WRITE 0x03
+
+static int gl861_ctrl_msg(struct dvb_usb_device *d, u8 request, u16 value,
+ u16 index, void *data, u16 size)
+{
+ struct gl861 *ctx = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
+ int ret;
+ unsigned int pipe;
+ u8 requesttype;
+
+ mutex_lock(&d->usb_mutex);
+
+ switch (request) {
+ case CMD_WRITE:
+ memcpy(ctx->buf, data, size);
+ /* Fall through */
+ case CMD_WRITE_SHORT:
+ pipe = usb_sndctrlpipe(d->udev, 0);
+ requesttype = USB_TYPE_VENDOR | USB_DIR_OUT;
break;
- case 2:
- index = wbuf[0];
- value = value + wbuf[1];
+ case CMD_READ:
+ pipe = usb_rcvctrlpipe(d->udev, 0);
+ requesttype = USB_TYPE_VENDOR | USB_DIR_IN;
break;
default:
- dev_err(&d->udev->dev, "%s: wlen=%d, aborting\n",
- KBUILD_MODNAME, wlen);
- kfree(buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_mutex_unlock;
}
- usleep_range(1000, 2000); /* avoid I2C errors */
+ ret = usb_control_msg(d->udev, pipe, request, requesttype, value,
+ index, ctx->buf, size, 200);
+ dev_dbg(&intf->dev, "%d | %02x %02x %*ph %*ph %*ph %s %*ph\n",
+ ret, requesttype, request, 2, &value, 2, &index, 2, &size,
+ (requesttype & USB_DIR_IN) ? "<<<" : ">>>", size, ctx->buf);
+ if (ret < 0)
+ goto err_mutex_unlock;
- ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, type,
- value, index, buf, rlen, 2000);
+ if (request == CMD_READ)
+ memcpy(data, ctx->buf, size);
- if (!wo && ret > 0)
- memcpy(rbuf, buf, rlen);
+ usleep_range(1000, 2000); /* Avoid I2C errors */
- kfree(buf);
+ mutex_unlock(&d->usb_mutex);
+
+ return 0;
+
+err_mutex_unlock:
+ mutex_unlock(&d->usb_mutex);
+ dev_dbg(&intf->dev, "failed %d\n", ret);
return ret;
}
-/* I2C */
-static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
+static int gl861_short_write(struct dvb_usb_device *d, u8 addr, u8 reg, u8 val)
+{
+ return gl861_ctrl_msg(d, CMD_WRITE_SHORT,
+ (addr << 9) | val, reg, NULL, 0);
+}
+
+static int gl861_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i;
+ struct usb_interface *intf = d->intf;
+ struct gl861 *ctx = d_to_priv(d);
+ int ret;
+ u8 request, *data;
+ u16 value, index, size;
+
+ /* XXX: I2C adapter maximum data lengths are not tested */
+ if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ /* I2C write */
+ if (msg[0].len < 2 || msg[0].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ value = (msg[0].addr << 1) << 8;
+ index = msg[0].buf[0];
+
+ if (msg[0].len == 2) {
+ request = CMD_WRITE_SHORT;
+ value |= msg[0].buf[1];
+ size = 0;
+ data = NULL;
+ } else {
+ request = CMD_WRITE;
+ size = msg[0].len - 1;
+ data = &msg[0].buf[1];
+ }
+
+ ret = gl861_ctrl_msg(d, request, value, index, data, size);
+ } else if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
+ (msg[1].flags & I2C_M_RD)) {
+ /* I2C write + read */
+ if (msg[0].len > 1 || msg[1].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
- if (num > 2)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-
- for (i = 0; i < num; i++) {
- /* write/read request */
- if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
- if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
- msg[i].len, msg[i+1].buf, msg[i+1].len) < 0)
- break;
- i++;
- } else
- if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
- msg[i].len, NULL, 0) < 0)
- break;
+ value = (msg[0].addr << 1) << 8;
+ index = msg[0].buf[0];
+ request = CMD_READ;
+
+ ret = gl861_ctrl_msg(d, request, value, index,
+ msg[1].buf, msg[1].len);
+ } else if (num == 1 && (msg[0].flags & I2C_M_RD)) {
+ /* I2C read */
+ if (msg[0].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ value = (msg[0].addr << 1) << 8;
+ index = 0x0100;
+ request = CMD_READ;
+
+ ret = gl861_ctrl_msg(d, request, value, index,
+ msg[0].buf, msg[0].len);
+ } else {
+ /* Unsupported I2C message */
+ dev_dbg(&intf->dev, "unknown i2c msg, num %u\n", num);
+ ret = -EOPNOTSUPP;
}
+ if (ret)
+ goto err;
- mutex_unlock(&d->i2c_mutex);
- return i;
+ return num;
+err:
+ dev_dbg(&intf->dev, "failed %d\n", ret);
+ return ret;
}
-static u32 gl861_i2c_func(struct i2c_adapter *adapter)
+static u32 gl861_i2c_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm gl861_i2c_algo = {
- .master_xfer = gl861_i2c_xfer,
- .functionality = gl861_i2c_func,
+ .master_xfer = gl861_i2c_master_xfer,
+ .functionality = gl861_i2c_functionality,
};
/* Callbacks for DVB USB */
@@ -149,6 +213,8 @@ static struct dvb_usb_device_properties gl861_props = {
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct gl861),
+
.i2c_algo = &gl861_i2c_algo,
.frontend_attach = gl861_frontend_attach,
.tuner_attach = gl861_tuner_attach,
@@ -166,14 +232,6 @@ static struct dvb_usb_device_properties gl861_props = {
/*
* For Friio
*/
-
-struct friio_priv {
- struct i2c_adapter *demod_sub_i2c;
- struct i2c_client *i2c_client_demod;
- struct i2c_client *i2c_client_tuner;
- struct i2c_adapter tuner_adap;
-};
-
struct friio_config {
struct i2c_board_info demod_info;
struct tc90522_config demod_cfg;
@@ -184,132 +242,10 @@ struct friio_config {
static const struct friio_config friio_config = {
.demod_info = { I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x18), },
+ .demod_cfg = { .split_tuner_read_i2c = true, },
.tuner_info = { I2C_BOARD_INFO("tua6034_friio", 0x60), },
};
-/* For another type of I2C:
- * message sent by a USB control-read/write transaction with data stage.
- * Used in init/config of Friio.
- */
-static int
-gl861_i2c_write_ex(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen)
-{
- u8 *buf;
- int ret;
-
- buf = kmemdup(wbuf, wlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_RAW, GL861_WRITE,
- addr << (8 + 1), 0x0100, buf, wlen, 2000);
- kfree(buf);
- return ret;
-}
-
-static int
-gl861_i2c_read_ex(struct dvb_usb_device *d, u8 addr, u8 *rbuf, u16 rlen)
-{
- u8 *buf;
- int ret;
-
- buf = kmalloc(rlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
- GL861_REQ_I2C_READ, GL861_READ,
- addr << (8 + 1), 0x0100, buf, rlen, 2000);
- if (ret > 0 && rlen > 0)
- memcpy(buf, rbuf, rlen);
- kfree(buf);
- return ret;
-}
-
-/* For I2C transactions to the tuner of Friio (dvb_pll).
- *
- * Friio uses irregular USB encapsulation for tuner i2c transactions:
- * write transacions are encapsulated with a different USB 'request' value.
- *
- * Although all transactions are sent via the demod(tc90522)
- * and the demod provides an i2c adapter for them, it cannot be used in Friio
- * since it assumes using the same parent adapter with the demod,
- * which does not use the request value and uses same one for both read/write.
- * So we define a dedicated i2c adapter here.
- */
-
-static int
-friio_i2c_tuner_read(struct dvb_usb_device *d, struct i2c_msg *msg)
-{
- struct friio_priv *priv;
- u8 addr;
-
- priv = d_to_priv(d);
- addr = priv->i2c_client_demod->addr;
- return gl861_i2c_read_ex(d, addr, msg->buf, msg->len);
-}
-
-static int
-friio_i2c_tuner_write(struct dvb_usb_device *d, struct i2c_msg *msg)
-{
- u8 *buf;
- int ret;
- struct friio_priv *priv;
-
- priv = d_to_priv(d);
-
- if (msg->len < 1)
- return -EINVAL;
-
- buf = kmalloc(msg->len + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- buf[0] = msg->addr << 1;
- memcpy(buf + 1, msg->buf, msg->len);
-
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_RAW, GL861_WRITE,
- priv->i2c_client_demod->addr << (8 + 1),
- 0xFE, buf, msg->len + 1, 2000);
- kfree(buf);
- return ret;
-}
-
-static int friio_tuner_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
-{
- struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i;
-
- if (num > 2)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-
- for (i = 0; i < num; i++) {
- int ret;
-
- if (msg[i].flags & I2C_M_RD)
- ret = friio_i2c_tuner_read(d, &msg[i]);
- else
- ret = friio_i2c_tuner_write(d, &msg[i]);
-
- if (ret < 0)
- break;
-
- usleep_range(1000, 2000); /* avoid I2C errors */
- }
-
- mutex_unlock(&d->i2c_mutex);
- return i;
-}
-
-static struct i2c_algorithm friio_tuner_i2c_algo = {
- .master_xfer = friio_tuner_i2c_xfer,
- .functionality = gl861_i2c_func,
-};
/* GPIO control in Friio */
@@ -377,9 +313,11 @@ static int friio_ext_ctl(struct dvb_usb_device *d,
/* init/config of gl861 for Friio */
/* NOTE:
* This function cannot be moved to friio_init()/dvb_usbv2_init(),
- * because the init defined here must be done before any activities like I2C,
+ * because the init defined here includes a whole device reset,
+ * it must be run early before any activities like I2C,
* but friio_init() is called by dvb-usbv2 after {_frontend, _tuner}_attach(),
* where I2C communication is used.
+ * In addition, this reset is required in reset_resume() as well.
* Thus this function is set to be called from _power_ctl().
*
* Since it will be called on the early init stage
@@ -389,7 +327,7 @@ static int friio_ext_ctl(struct dvb_usb_device *d,
static int friio_reset(struct dvb_usb_device *d)
{
int i, ret;
- u8 wbuf[2], rbuf[2];
+ u8 wbuf[1], rbuf[2];
static const u8 friio_init_cmds[][2] = {
{0x33, 0x08}, {0x37, 0x40}, {0x3a, 0x1f}, {0x3b, 0xff},
@@ -401,16 +339,12 @@ static int friio_reset(struct dvb_usb_device *d)
if (ret < 0)
return ret;
- wbuf[0] = 0x11;
- wbuf[1] = 0x02;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x11, 0x02);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- wbuf[0] = 0x11;
- wbuf[1] = 0x00;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x11, 0x00);
if (ret < 0)
return ret;
@@ -420,14 +354,13 @@ static int friio_reset(struct dvb_usb_device *d)
*/
usleep_range(1000, 2000);
- wbuf[0] = 0x03;
- wbuf[1] = 0x80;
- ret = gl861_i2c_write_ex(d, 0x09, wbuf, 2);
+ wbuf[0] = 0x80;
+ ret = gl861_ctrl_msg(d, CMD_WRITE, 0x09 << 9, 0x03, wbuf, 1);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- ret = gl861_i2c_read_ex(d, 0x09, rbuf, 2);
+ ret = gl861_ctrl_msg(d, CMD_READ, 0x09 << 9, 0x0100, rbuf, 2);
if (ret < 0)
return ret;
if (rbuf[0] != 0xff || rbuf[1] != 0xff)
@@ -435,38 +368,33 @@ static int friio_reset(struct dvb_usb_device *d)
usleep_range(1000, 2000);
- ret = gl861_i2c_write_ex(d, 0x48, wbuf, 2);
+ wbuf[0] = 0x80;
+ ret = gl861_ctrl_msg(d, CMD_WRITE, 0x48 << 9, 0x03, wbuf, 1);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- ret = gl861_i2c_read_ex(d, 0x48, rbuf, 2);
+ ret = gl861_ctrl_msg(d, CMD_READ, 0x48 << 9, 0x0100, rbuf, 2);
if (ret < 0)
return ret;
if (rbuf[0] != 0xff || rbuf[1] != 0xff)
return -ENODEV;
- wbuf[0] = 0x30;
- wbuf[1] = 0x04;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x30, 0x04);
if (ret < 0)
return ret;
- wbuf[0] = 0x00;
- wbuf[1] = 0x01;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x00, 0x01);
if (ret < 0)
return ret;
- wbuf[0] = 0x06;
- wbuf[1] = 0x0f;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x06, 0x0f);
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(friio_init_cmds); i++) {
- ret = gl861_i2c_msg(d, 0x00, (u8 *)friio_init_cmds[i], 2,
- NULL, 0);
+ ret = gl861_short_write(d, 0x00, friio_init_cmds[i][0],
+ friio_init_cmds[i][1]);
if (ret < 0)
return ret;
}
@@ -488,9 +416,10 @@ static int friio_frontend_attach(struct dvb_usb_adapter *adap)
struct dvb_usb_device *d;
struct tc90522_config cfg;
struct i2c_client *cl;
- struct friio_priv *priv;
+ struct gl861 *priv;
info = &friio_config.demod_info;
+ cfg = friio_config.demod_cfg;
d = adap_to_d(adap);
cl = dvb_module_probe("tc90522", info->type,
&d->i2c_adap, info->addr, &cfg);
@@ -498,25 +427,17 @@ static int friio_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
adap->fe[0] = cfg.fe;
- /* ignore cfg.tuner_i2c and create new one */
priv = adap_to_priv(adap);
priv->i2c_client_demod = cl;
- priv->tuner_adap.algo = &friio_tuner_i2c_algo;
- priv->tuner_adap.dev.parent = &d->udev->dev;
- strscpy(priv->tuner_adap.name, d->name, sizeof(priv->tuner_adap.name));
- strlcat(priv->tuner_adap.name, "-tuner", sizeof(priv->tuner_adap.name));
- priv->demod_sub_i2c = &priv->tuner_adap;
- i2c_set_adapdata(&priv->tuner_adap, d);
-
- return i2c_add_adapter(&priv->tuner_adap);
+ priv->demod_sub_i2c = cfg.tuner_i2c;
+ return 0;
}
static int friio_frontend_detach(struct dvb_usb_adapter *adap)
{
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
- i2c_del_adapter(&priv->tuner_adap);
dvb_module_release(priv->i2c_client_demod);
return 0;
}
@@ -526,7 +447,7 @@ static int friio_tuner_attach(struct dvb_usb_adapter *adap)
const struct i2c_board_info *info;
struct dvb_pll_config cfg;
struct i2c_client *cl;
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
info = &friio_config.tuner_info;
@@ -543,7 +464,7 @@ static int friio_tuner_attach(struct dvb_usb_adapter *adap)
static int friio_tuner_detach(struct dvb_usb_adapter *adap)
{
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
dvb_module_release(priv->i2c_client_tuner);
@@ -554,7 +475,7 @@ static int friio_init(struct dvb_usb_device *d)
{
int i;
int ret;
- struct friio_priv *priv;
+ struct gl861 *priv;
static const u8 demod_init[][2] = {
{0x01, 0x40}, {0x04, 0x38}, {0x05, 0x40}, {0x07, 0x40},
@@ -606,7 +527,7 @@ static struct dvb_usb_device_properties friio_props = {
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
- .size_of_priv = sizeof(struct friio_priv),
+ .size_of_priv = sizeof(struct gl861),
.i2c_algo = &gl861_i2c_algo,
.power_ctrl = friio_power_ctrl,
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.h b/drivers/media/usb/dvb-usb-v2/gl861.h
deleted file mode 100644
index 02c00e10748a..000000000000
--- a/drivers/media/usb/dvb-usb-v2/gl861.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DVB_USB_GL861_H_
-#define _DVB_USB_GL861_H_
-
-#include "dvb_usb.h"
-
-#define GL861_WRITE 0x40
-#define GL861_READ 0xc0
-
-#define GL861_REQ_I2C_WRITE 0x01
-#define GL861_REQ_I2C_READ 0x02
-#define GL861_REQ_I2C_RAW 0x03
-
-#endif
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 1a36bda28542..5016ede7b35f 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1781,7 +1781,6 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
}
/* 'flush' ir_raw_event_store_with_filter() */
- ir_raw_event_set_idle(d->rc_dev, true);
ir_raw_event_handle(d->rc_dev);
exit:
return ret;
@@ -1804,6 +1803,8 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
rc->driver_type = RC_DRIVER_IR_RAW;
rc->query = rtl2832u_rc_query;
rc->interval = 200;
+ /* we program idle len to 0xc0, set timeout to one less */
+ rc->timeout = 0xbf * 50800;
return 0;
}
@@ -1957,7 +1958,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
/* RTL2832P devices: */
{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
- &rtl28xxu_props, "Astrometa DVB-T2", NULL) },
+ &rtl28xxu_props, "Astrometa DVB-T2",
+ RC_MAP_ASTROMETA_T2HYBRID) },
{ DVB_USB_DEVICE(0x5654, 0xca42,
&rtl28xxu_props, "GoTView MasterHD 3", NULL) },
{ }
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 02697d86e8c1..ac93e88d7038 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -976,8 +976,9 @@ static int af9005_identify_state(struct usb_device *udev,
else if (reply == 0x02)
*cold = 0;
else
- return -EIO;
- deb_info("Identify state cold = %d\n", *cold);
+ ret = -EIO;
+ if (!ret)
+ deb_info("Identify state cold = %d\n", *cold);
err:
kfree(buf);
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index f02fa0a67aa4..fac19ec46089 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -521,7 +521,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
{
u8 ircode[4];
- cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
+ if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
+ return 0;
if (ircode[2] || ircode[3])
rc_keydown(d->rc_dev, RC_PROTO_NEC,
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 49c9b70b632b..79dfbb25714b 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -31,7 +31,6 @@
#include <linux/soundcard.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 5983e72a0622..def9cdd931a9 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2487,6 +2487,24 @@ const struct em28xx_board em28xx_boards[] = {
.ir_codes = RC_MAP_HAUPPAUGE,
.leds = hauppauge_dualhd_leds,
},
+ /*
+ * 1b80:e349 Magix USB Videowandler-2
+ * (same chips as Honestech VIDBOX NW03)
+ * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner
+ */
+ [EM2861_BOARD_MAGIX_VIDEOWANDLER2] = {
+ .name = "Magix USB Videowandler-2",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = EM28XX_SAA711X,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
};
EXPORT_SYMBOL_GPL(em28xx_boards);
@@ -2696,6 +2714,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28178_BOARD_PLEX_PX_BCUD },
{ USB_DEVICE(0xeb1a, 0x5051), /* Ion Video 2 PC MKII / Startech svid2usb23 / Raygo R12-41373 */
.driver_info = EM2860_BOARD_TVP5150_REFERENCE_DESIGN },
+ { USB_DEVICE(0x1b80, 0xe349), /* Magix USB Videowandler-2 */
+ .driver_info = EM2861_BOARD_MAGIX_VIDEOWANDLER2 },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index a73faf12f7e4..0ab6c493bc74 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -471,13 +471,13 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
+ static const struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xfb, 0xff, 0x32},
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0xb8},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
+ static const struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01},
{EM2874_R80_GPIO_P0_CTRL, 0xaf, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x76},
@@ -493,7 +493,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{ -1, -1, -1, -1},
};
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -537,20 +537,20 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
static void terratec_h5_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq terratec_h5_init[] = {
+ static const struct em28xx_reg_seq terratec_h5_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_h5_end[] = {
+ static const struct em28xx_reg_seq terratec_h5_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -594,14 +594,14 @@ static void terratec_htc_stick_init(struct em28xx *dev)
* 0xe6: unknown (does not affect DVB-T).
* 0xb6: unknown (does not affect DVB-T).
*/
- struct em28xx_reg_seq terratec_htc_stick_init[] = {
+ static const struct em28xx_reg_seq terratec_htc_stick_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_htc_stick_end[] = {
+ static const struct em28xx_reg_seq terratec_htc_stick_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50},
{ -1, -1, -1, -1},
@@ -611,7 +611,7 @@ static void terratec_htc_stick_init(struct em28xx *dev)
* Init the analog decoder (not yet supported), but
* it's probably still a good idea.
*/
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -642,14 +642,14 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
+ static const struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
+ static const struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
@@ -660,7 +660,7 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
* Init the analog decoder (not yet supported), but
* it's probably still a good idea.
*/
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -704,7 +704,7 @@ static void pctv_520e_init(struct em28xx *dev)
* digital demodulator and tuner are routed via AVF4910B.
*/
int i;
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -800,7 +800,7 @@ static int em28xx_mt352_terratec_xs_init(struct dvb_frontend *fe)
static void px_bcud_init(struct em28xx *dev)
{
int i;
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs1[] = {
@@ -818,7 +818,7 @@ static void px_bcud_init(struct em28xx *dev)
{{ 0x85, 0x7a }, 2},
{{ 0x87, 0x04 }, 2},
};
- static struct em28xx_reg_seq gpio[] = {
+ static const struct em28xx_reg_seq gpio[] = {
{EM28XX_R06_I2C_CLK, 0x40, 0xff, 300},
{EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 60},
{EM28XX_R15_RGAIN, 0x20, 0xff, 0},
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index a3155ec196cc..592b98b3643a 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -949,7 +949,7 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned int bus)
unsigned char buf;
int i, rc;
- memset(i2c_devicelist, 0, ARRAY_SIZE(i2c_devicelist));
+ memset(i2c_devicelist, 0, sizeof(i2c_devicelist));
for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
dev->i2c_client[bus].addr = i;
@@ -964,7 +964,7 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned int bus)
if (bus == dev->def_i2c_bus)
dev->i2c_hash = em28xx_hash_mem(i2c_devicelist,
- ARRAY_SIZE(i2c_devicelist), 32);
+ sizeof(i2c_devicelist), 32);
}
/*
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index c8bc59059a19..4ecadd57dac7 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -149,6 +149,7 @@
#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100
#define EM2884_BOARD_TERRATEC_H6 101
#define EM2882_BOARD_ZOLID_HYBRID_TV_STICK 102
+#define EM2861_BOARD_MAGIX_VIDEOWANDLER2 103
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index 863c485f4275..97799cfb832e 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -378,6 +378,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Start the workqueue function to do the streaming */
dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
+ if (!dev->work_thread)
+ return -ENOMEM;
+
queue_work(dev->work_thread, &dev->work_struct);
return 0;
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index 3d7f6dcdd7a8..6ca947aef298 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -276,6 +276,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Start the workqueue function to do the streaming */
dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
+ if (!dev->work_thread)
+ return -ENOMEM;
+
queue_work(dev->work_thread, &dev->work_struct);
return 0;
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c
index f869eb6065ce..b23988d8c7bc 100644
--- a/drivers/media/usb/gspca/stv0680.c
+++ b/drivers/media/usb/gspca/stv0680.c
@@ -35,7 +35,7 @@ struct sd {
static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
int size)
{
- int ret = -1;
+ int ret;
u8 req_type = 0;
unsigned int pipe = 0;
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
index 7104a88b1e43..aac19d449be2 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
@@ -117,7 +117,7 @@ static int st6422_init(struct sd *sd)
{
int err = 0, i;
- const u16 st6422_bridge_init[][2] = {
+ static const u16 st6422_bridge_init[][2] = {
{ STV_ISO_ENABLE, 0x00 }, /* disable capture */
{ 0x1436, 0x00 },
{ 0x1432, 0x03 }, /* 0x00-0x1F brightness */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index a34717eba409..eaa08c7999d4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -898,8 +898,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
if (!list_empty(&vp->dev_video->devbase.fh_list) ||
- !list_empty(&vp->dev_radio->devbase.fh_list))
+ (vp->dev_radio &&
+ !list_empty(&vp->dev_radio->devbase.fh_list))) {
+ pvr2_trace(PVR2_TRACE_STRUCT,
+ "pvr2_v4l2 internal_check exit-empty id=%p", vp);
return;
+ }
pvr2_v4l2_destroy_no_lock(vp);
}
@@ -935,7 +939,8 @@ static int pvr2_v4l2_release(struct file *file)
kfree(fhp);
if (vp->channel.mc_head->disconnect_flag &&
list_empty(&vp->dev_video->devbase.fh_list) &&
- list_empty(&vp->dev_radio->devbase.fh_list)) {
+ (!vp->dev_radio ||
+ list_empty(&vp->dev_radio->devbase.fh_list))) {
pvr2_v4l2_destroy_no_lock(vp);
}
return 0;
diff --git a/drivers/media/usb/tm6000/tm6000-regs.h b/drivers/media/usb/tm6000/tm6000-regs.h
index d10424673db9..6a181f2e7ef2 100644
--- a/drivers/media/usb/tm6000/tm6000-regs.h
+++ b/drivers/media/usb/tm6000/tm6000-regs.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/tm6000/tm6000-usb-isoc.h b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
index b275dbce3a1b..e3c6933f854d 100644
--- a/drivers/media/usb/tm6000/tm6000-usb-isoc.h
+++ b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index bf396544da9a..c08c95312739 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 6f108996142d..e746c8ddfc49 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -378,8 +378,7 @@ int usbtv_audio_init(struct usbtv *usbtv)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usbtv_pcm_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), USBTV_AUDIO_BUFFER,
- USBTV_AUDIO_BUFFER);
+ NULL, USBTV_AUDIO_BUFFER, USBTV_AUDIO_BUFFER);
rv = snd_card_register(card);
if (rv)
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index cdc66adda755..93d36aab824f 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -314,6 +314,10 @@ static int usbvision_v4l2_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto unlock;
+ }
if (usbvision->user) {
err_code = -EBUSY;
} else {
@@ -377,6 +381,7 @@ unlock:
static int usbvision_v4l2_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "close");
@@ -391,9 +396,10 @@ static int usbvision_v4l2_close(struct file *file)
usbvision_scratch_free(usbvision);
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
usbvision_release(usbvision);
return 0;
@@ -453,6 +459,9 @@ static int vidioc_querycap(struct file *file, void *priv,
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ if (!usbvision->dev)
+ return -ENODEV;
+
strscpy(vc->driver, "USBVision", sizeof(vc->driver));
strscpy(vc->card,
usbvision_device_data[usbvision->dev_model].model_string,
@@ -1061,6 +1070,11 @@ static int usbvision_radio_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto out;
+ }
err_code = v4l2_fh_open(file);
if (err_code)
goto out;
@@ -1093,21 +1107,24 @@ out:
static int usbvision_radio_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "");
mutex_lock(&usbvision->v4l2_lock);
/* Set packet size to 0 */
usbvision->iface_alt = 0;
- usb_set_interface(usbvision->dev, usbvision->iface,
- usbvision->iface_alt);
+ if (usbvision->dev)
+ usb_set_interface(usbvision->dev, usbvision->iface,
+ usbvision->iface_alt);
usbvision_audio_off(usbvision);
usbvision->radio = 0;
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
v4l2_fh_release(file);
usbvision_release(usbvision);
@@ -1539,6 +1556,7 @@ err_usb:
static void usbvision_disconnect(struct usb_interface *intf)
{
struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
+ int u;
PDEBUG(DBG_PROBE, "");
@@ -1555,13 +1573,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
v4l2_device_disconnect(&usbvision->v4l2_dev);
usbvision_i2c_unregister(usbvision);
usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
+ u = usbvision->user;
usb_put_dev(usbvision->dev);
usbvision->dev = NULL; /* USB device is no more */
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->user) {
+ if (u) {
printk(KERN_INFO "%s: In use, disconnect pending\n",
__func__);
wake_up_interruptible(&usbvision->wait_frame);
diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c
index d2b109959d82..2b8af4b54117 100644
--- a/drivers/media/usb/uvc/uvc_debugfs.c
+++ b/drivers/media/usb/uvc/uvc_debugfs.c
@@ -108,15 +108,7 @@ void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream)
void uvc_debugfs_init(void)
{
- struct dentry *dir;
-
- dir = debugfs_create_dir("uvcvideo", usb_debug_root);
- if (IS_ERR_OR_NULL(dir)) {
- uvc_printk(KERN_INFO, "Unable to create debugfs directory\n");
- return;
- }
-
- uvc_debugfs_root_dir = dir;
+ uvc_debugfs_root_dir = debugfs_create_dir("uvcvideo", usb_debug_root);
}
void uvc_debugfs_cleanup(void)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 66ee168ddc7e..428235ca2635 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2151,6 +2151,20 @@ static int uvc_probe(struct usb_interface *intf,
sizeof(dev->name) - len);
}
+ /* Initialize the media device. */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &intf->dev;
+ strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
+ if (udev->serial)
+ strscpy(dev->mdev.serial, udev->serial,
+ sizeof(dev->mdev.serial));
+ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
+ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ media_device_init(&dev->mdev);
+
+ dev->vdev.mdev = &dev->mdev;
+#endif
+
/* Parse the Video Class control descriptor. */
if (uvc_parse_control(dev) < 0) {
uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
@@ -2171,19 +2185,7 @@ static int uvc_probe(struct usb_interface *intf,
"linux-uvc-devel mailing list.\n");
}
- /* Initialize the media device and register the V4L2 device. */
-#ifdef CONFIG_MEDIA_CONTROLLER
- dev->mdev.dev = &intf->dev;
- strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
- if (udev->serial)
- strscpy(dev->mdev.serial, udev->serial,
- sizeof(dev->mdev.serial));
- usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
- dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- media_device_init(&dev->mdev);
-
- dev->vdev.mdev = &dev->mdev;
-#endif
+ /* Register the V4L2 device. */
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error;
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index 99bb71b47117..b6279ad7ac84 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -51,7 +51,7 @@ static int uvc_meta_v4l2_get_format(struct file *file, void *fh,
memset(fmt, 0, sizeof(*fmt));
fmt->dataformat = stream->meta.format;
- fmt->buffersize = UVC_METATADA_BUF_SIZE;
+ fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
@@ -72,7 +72,7 @@ static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
fmt->dataformat = fmeta == dev->info->meta_format
? fmeta : V4L2_META_FMT_UVC;
- fmt->buffersize = UVC_METATADA_BUF_SIZE;
+ fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index da72577c2998..cd60c6c1749e 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -79,7 +79,7 @@ static int uvc_queue_setup(struct vb2_queue *vq,
switch (vq->type) {
case V4L2_BUF_TYPE_META_CAPTURE:
- size = UVC_METATADA_BUF_SIZE;
+ size = UVC_METADATA_BUF_SIZE;
break;
default:
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index c7c1baa90dea..f773dc5d802c 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -491,7 +491,7 @@ struct uvc_stats_stream {
unsigned int max_sof; /* Maximum STC.SOF value */
};
-#define UVC_METATADA_BUF_SIZE 1024
+#define UVC_METADATA_BUF_SIZE 1024
/**
* struct uvc_copy_op: Context structure to schedule asynchronous memcpy
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 637962825d7a..57dbcc8083bf 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -20,7 +20,6 @@
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
#include <linux/highmem.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -556,14 +555,12 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
{
unsigned char *pdest;
unsigned char *psrc;
- s32 idx = -1;
- struct zr364xx_framei *frm;
+ s32 idx = cam->cur_frame;
+ struct zr364xx_framei *frm = &cam->buffer.frame[idx];
int i = 0;
unsigned char *ptr = NULL;
_DBG("buffer to user\n");
- idx = cam->cur_frame;
- frm = &cam->buffer.frame[idx];
/* swap bytes if camera needs it */
if (cam->method == METHOD0) {
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 62f7aa92ac29..d0e5ebc736f9 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -236,77 +236,79 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{
static const struct v4l2_format_info formats[] = {
/* RGB formats */
- { .format = V4L2_PIX_FMT_BGR24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XBGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XRGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ARGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ABGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_GREY, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
/* YUV packed formats */
- { .format = V4L2_PIX_FMT_YUYV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVYU, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_UYVY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_VYUY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
/* YUV planar formats */
- { .format = V4L2_PIX_FMT_NV12, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV24, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV42, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
-
- { .format = V4L2_PIX_FMT_YUV410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YVU410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YUV411P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
/* YUV planar formats, non contiguous variant */
- { .format = V4L2_PIX_FMT_YUV420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
-
- { .format = V4L2_PIX_FMT_NV12M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
/* Bayer RGB formats */
- { .format = V4L2_PIX_FMT_SBGGR8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
};
unsigned int i;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 1d8f38824631..2928c5e0a73d 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -29,6 +29,8 @@
#define call_op(master, op) \
(has_op(master, op) ? master->ops->op(master) : 0)
+static const union v4l2_ctrl_ptr ptr_null;
+
/* Internal temporary helper struct, one for each v4l2_ext_control */
struct v4l2_ctrl_helper {
/* Pointer to the control reference of the master control */
@@ -566,6 +568,16 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Disabled at slice boundary",
"NULL",
};
+ static const char * const hevc_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const hevc_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -687,7 +699,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return hevc_tier;
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
return hevc_loop_filter_mode;
-
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ return hevc_decode_mode;
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
+ return hevc_start_code;
default:
return NULL;
}
@@ -957,6 +972,11 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -994,6 +1014,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
case V4L2_CID_PAN_SPEED: return "Pan, Speed";
case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
+ case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
/* FM Radio Modulator controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1265,6 +1286,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_LINK_FREQ:
@@ -1375,6 +1398,19 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER:
*type = V4L2_CTRL_TYPE_VP8_FRAME_HEADER;
break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS:
+ *type = V4L2_CTRL_TYPE_HEVC_SPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS:
+ *type = V4L2_CTRL_TYPE_HEVC_PPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
+ break;
+ case V4L2_CID_UNIT_CELL_SIZE:
+ *type = V4L2_CTRL_TYPE_AREA;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1520,7 +1556,8 @@ static bool std_equal(const struct v4l2_ctrl *ctrl, u32 idx,
if (ctrl->is_int)
return ptr1.p_s32[idx] == ptr2.p_s32[idx];
idx *= ctrl->elem_size;
- return !memcmp(ptr1.p + idx, ptr2.p + idx, ctrl->elem_size);
+ return !memcmp(ptr1.p_const + idx, ptr2.p_const + idx,
+ ctrl->elem_size);
}
}
@@ -1530,7 +1567,10 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
void *p = ptr.p + idx * ctrl->elem_size;
- memset(p, 0, ctrl->elem_size);
+ if (ctrl->p_def.p_const)
+ memcpy(p, ctrl->p_def.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
/*
* The cast is needed to get rid of a gcc warning complaining that
@@ -1672,7 +1712,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
{
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_area *area;
void *p = ptr.p + idx * ctrl->elem_size;
+ unsigned int i;
switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
@@ -1748,6 +1793,76 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
zero_padding(p_vp8_frame_header->entropy_header);
zero_padding(p_vp8_frame_header->coder_state);
break;
+
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ p_hevc_sps = p;
+
+ if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) {
+ p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0;
+ p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0;
+ p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
+ p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0;
+ }
+
+ if (!(p_hevc_sps->flags &
+ V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT))
+ p_hevc_sps->num_long_term_ref_pics_sps = 0;
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ p_hevc_pps = p;
+
+ if (!(p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED))
+ p_hevc_pps->diff_cu_qp_delta_depth = 0;
+
+ if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) {
+ p_hevc_pps->num_tile_columns_minus1 = 0;
+ p_hevc_pps->num_tile_rows_minus1 = 0;
+ memset(&p_hevc_pps->column_width_minus1, 0,
+ sizeof(p_hevc_pps->column_width_minus1));
+ memset(&p_hevc_pps->row_height_minus1, 0,
+ sizeof(p_hevc_pps->row_height_minus1));
+
+ p_hevc_pps->flags &=
+ ~V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED;
+ }
+
+ if (p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) {
+ p_hevc_pps->pps_beta_offset_div2 = 0;
+ p_hevc_pps->pps_tc_offset_div2 = 0;
+ }
+
+ zero_padding(*p_hevc_pps);
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ p_hevc_slice_params = p;
+
+ if (p_hevc_slice_params->num_active_dpb_entries >
+ V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
+ return -EINVAL;
+
+ zero_padding(p_hevc_slice_params->pred_weight_table);
+
+ for (i = 0; i < p_hevc_slice_params->num_active_dpb_entries;
+ i++) {
+ struct v4l2_hevc_dpb_entry *dpb_entry =
+ &p_hevc_slice_params->dpb[i];
+
+ zero_padding(*dpb_entry);
+ }
+
+ zero_padding(*p_hevc_slice_params);
+ break;
+
+ case V4L2_CTRL_TYPE_AREA:
+ area = p;
+ if (!area->width || !area->height)
+ return -EINVAL;
+ break;
+
default:
return -EINVAL;
}
@@ -1840,7 +1955,7 @@ static int ptr_to_user(struct v4l2_ext_control *c,
u32 len;
if (ctrl->is_ptr && !ctrl->is_string)
- return copy_to_user(c->ptr, ptr.p, c->size) ?
+ return copy_to_user(c->ptr, ptr.p_const, c->size) ?
-EFAULT : 0;
switch (ctrl->type) {
@@ -1955,7 +2070,7 @@ static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
{
if (ctrl == NULL)
return;
- memcpy(to.p, from.p, ctrl->elems * ctrl->elem_size);
+ memcpy(to.p, from.p_const, ctrl->elems * ctrl->elem_size);
}
/* Copy the new value to the current value. */
@@ -2354,7 +2469,8 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
s64 min, s64 max, u64 step, s64 def,
const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
u32 flags, const char * const *qmenu,
- const s64 *qmenu_int, void *priv)
+ const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
+ void *priv)
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra;
@@ -2421,6 +2537,18 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_VP8_FRAME_HEADER:
elem_size = sizeof(struct v4l2_ctrl_vp8_frame_header);
break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_sps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_pps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_AREA:
+ elem_size = sizeof(struct v4l2_area);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -2460,6 +2588,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
is_array)
sz_extra += 2 * tot_ctrl_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
+ sz_extra += elem_size;
+
ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
if (ctrl == NULL) {
handler_set_err(hdl, -ENOMEM);
@@ -2503,6 +2634,12 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->p_new.p = &ctrl->val;
ctrl->p_cur.p = &ctrl->cur.val;
}
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
+ ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
+ memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
+ }
+
for (idx = 0; idx < elems; idx++) {
ctrl->type_ops->init(ctrl, idx, ctrl->p_cur);
ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
@@ -2554,7 +2691,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
type, min, max,
is_menu ? cfg->menu_skip_mask : step, def,
cfg->dims, cfg->elem_size,
- flags, qmenu, qmenu_int, priv);
+ flags, qmenu, qmenu_int, cfg->p_def, priv);
if (ctrl)
ctrl->is_private = cfg->is_private;
return ctrl;
@@ -2579,7 +2716,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
- flags, NULL, NULL, NULL);
+ flags, NULL, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std);
@@ -2612,7 +2749,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, qmenu_int, NULL);
+ flags, qmenu, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
@@ -2644,11 +2781,32 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, NULL, NULL);
+ flags, qmenu, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
+/* Helper function for standard compound controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id,
+ const union v4l2_ctrl_ptr p_def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+ s64 min, max, step, def;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type < V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, p_def, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
+
/* Helper function for standard integer menu controls */
struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
@@ -2669,7 +2827,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, 0, def, NULL, 0,
- flags, NULL, qmenu_int, NULL);
+ flags, NULL, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
@@ -3144,6 +3302,7 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
struct v4l2_ctrl_handler *prev_hdl = NULL;
struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+ mutex_lock(main_hdl->lock);
if (list_empty(&main_hdl->requests_queued))
goto queue;
@@ -3175,18 +3334,22 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
queue:
list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
hdl->request_is_queued = true;
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
{
struct v4l2_ctrl_handler *hdl =
container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
list_del_init(&hdl->requests);
+ mutex_lock(main_hdl->lock);
if (hdl->request_is_queued) {
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
}
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_release(struct media_request_object *obj)
@@ -4080,6 +4243,18 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(ctrl->type != V4L2_CTRL_TYPE_AREA);
+ *ctrl->p_new.p_area = *area;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_area);
+
void v4l2_ctrl_request_complete(struct media_request *req,
struct v4l2_ctrl_handler *main_hdl)
{
@@ -4128,9 +4303,11 @@ void v4l2_ctrl_request_complete(struct media_request *req,
v4l2_ctrl_unlock(ctrl);
}
+ mutex_lock(main_hdl->lock);
WARN_ON(!hdl->request_is_queued);
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
+ mutex_unlock(main_hdl->lock);
media_request_object_complete(obj);
media_request_object_put(obj);
}
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 4037689a945a..da42d172714a 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -533,13 +533,23 @@ static int get_index(struct video_device *vdev)
*/
static void determine_valid_ioctls(struct video_device *vdev)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
- bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ (vdev->device_caps & vid_caps);
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ (vdev->device_caps & meta_caps);
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
@@ -571,8 +581,10 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_querymenu)
set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
- SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ if (!is_tch) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ }
SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
#ifdef CONFIG_VIDEO_ADV_DEBUG
set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
@@ -586,40 +598,32 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
- if (is_vid || is_tch) {
- /* video and metadata specific ioctls */
+ if (is_vid) {
+ /* video specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
- ops->vidioc_enum_fmt_vid_overlay ||
- ops->vidioc_enum_fmt_meta_cap)) ||
- (is_tx && (ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_meta_out)))
+ ops->vidioc_enum_fmt_vid_overlay)) ||
+ (is_tx && ops->vidioc_enum_fmt_vid_out))
set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
- ops->vidioc_g_fmt_vid_overlay ||
- ops->vidioc_g_fmt_meta_cap)) ||
+ ops->vidioc_g_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
- ops->vidioc_g_fmt_vid_out_overlay ||
- ops->vidioc_g_fmt_meta_out)))
+ ops->vidioc_g_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
- ops->vidioc_s_fmt_vid_overlay ||
- ops->vidioc_s_fmt_meta_cap)) ||
+ ops->vidioc_s_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
- ops->vidioc_s_fmt_vid_out_overlay ||
- ops->vidioc_s_fmt_meta_out)))
+ ops->vidioc_s_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
- ops->vidioc_try_fmt_vid_overlay ||
- ops->vidioc_try_fmt_meta_cap)) ||
+ ops->vidioc_try_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
- ops->vidioc_try_fmt_vid_out_overlay ||
- ops->vidioc_try_fmt_meta_out)))
+ ops->vidioc_try_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
@@ -641,7 +645,21 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
- } else if (is_vbi) {
+ }
+ if (is_meta && is_rx) {
+ /* metadata capture specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap);
+ } else if (is_meta && is_tx) {
+ /* metadata output specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out);
+ }
+ if (is_vbi) {
/* vbi specific ioctls */
if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
ops->vidioc_g_fmt_sliced_vbi_cap)) ||
@@ -659,30 +677,35 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_try_fmt_sliced_vbi_out)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ } else if (is_tch) {
+ /* touch specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
} else if (is_sdr && is_rx) {
/* SDR receiver specific ioctls */
- if (ops->vidioc_enum_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
- if (ops->vidioc_g_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
- if (ops->vidioc_s_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
- if (ops->vidioc_try_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap);
} else if (is_sdr && is_tx) {
/* SDR transmitter specific ioctls */
- if (ops->vidioc_enum_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
- if (ops->vidioc_g_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
- if (ops->vidioc_s_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
- if (ops->vidioc_try_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out);
}
- if (is_vid || is_vbi || is_sdr || is_tch) {
- /* ioctls valid for video, metadata, vbi or sdr */
+ if (is_vid || is_vbi || is_sdr || is_tch || is_meta) {
+ /* ioctls valid for video, vbi, sdr, touch and metadata */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
@@ -694,8 +717,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
}
- if (is_vid || is_vbi || is_tch) {
- /* ioctls valid for video or vbi */
+ if (is_vid || is_vbi || is_meta) {
+ /* ioctls valid for video, vbi and metadata */
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
@@ -719,8 +742,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
}
- if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
- ops->vidioc_g_std))
+ if (ops->vidioc_g_parm || ops->vidioc_g_std)
set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
@@ -734,7 +756,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
}
- if (is_rx) {
+ if (is_rx && !is_tch) {
/* receiver only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4f23e939ead0..230d65a64217 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -293,7 +293,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
if (prefix == NULL)
prefix = "";
- pr_info("%s: %s%ux%u%s%u.%u (%ux%u)\n", dev_prefix, prefix,
+ pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix,
bt->width, bt->height, bt->interlaced ? "i" : "p",
fps / 100, fps % 100, htot, vtot);
@@ -757,7 +757,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
hsync = (frame_width * 8 + 50) / 100;
- hsync = ((hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN) * GTF_CELL_GRAN;
+ hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN;
h_fp = h_blank / 2 - hsync;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 3bd1888787eb..192cac076761 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -512,6 +512,7 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
return;
kfree(vep->link_frequencies);
+ vep->link_frequencies = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 51b912743f0f..4e700583659b 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -932,12 +932,22 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
static int check_fmt(struct file *file, enum v4l2_buf_type type)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ (vfd->device_caps & vid_caps);
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ (vfd->device_caps & meta_caps);
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -996,11 +1006,11 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
return 0;
break;
case V4L2_BUF_TYPE_META_CAPTURE:
- if (is_vid && is_rx && ops->vidioc_g_fmt_meta_cap)
+ if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap)
return 0;
break;
case V4L2_BUF_TYPE_META_OUTPUT:
- if (is_vid && is_tx && ops->vidioc_g_fmt_meta_out)
+ if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out)
return 0;
break;
default:
@@ -1330,6 +1340,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
+ case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
default:
/* Compressed formats */
@@ -1356,6 +1367,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break;
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
+ case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break;
case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
@@ -1466,10 +1478,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
return ret;
}
+static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+{
+ /*
+ * The v4l2_pix_format structure contains fields that make no sense for
+ * touch. Set them to default values in this case.
+ */
+
+ p->field = V4L2_FIELD_NONE;
+ p->colorspace = V4L2_COLORSPACE_RAW;
+ p->flags = 0;
+ p->ycbcr_enc = 0;
+ p->quantization = 0;
+ p->xfer_func = 0;
+}
+
static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
if (ret)
@@ -1507,6 +1535,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
@@ -1544,21 +1574,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
-static void v4l_pix_format_touch(struct v4l2_pix_format *p)
-{
- /*
- * The v4l2_pix_format structure contains fields that make no sense for
- * touch. Set them to default values in this case.
- */
-
- p->field = V4L2_FIELD_NONE;
- p->colorspace = V4L2_COLORSPACE_RAW;
- p->flags = 0;
- p->ycbcr_enc = 0;
- p->quantization = 0;
- p->xfer_func = 0;
-}
-
static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
@@ -1602,12 +1617,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vid_out))
@@ -1633,22 +1648,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sdr_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_meta_cap))
@@ -1704,12 +1719,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vid_out))
@@ -1735,22 +1750,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sdr_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_meta_cap))
@@ -2637,7 +2652,7 @@ struct v4l2_ioctl_info {
/* Zero struct from after the field to the end */
#define INFO_FL_CLEAR(v4l2_struct, field) \
((offsetof(struct v4l2_struct, field) + \
- sizeof(((struct v4l2_struct *)0)->field)) << 16)
+ FIELD_SIZEOF(struct v4l2_struct, field)) << 16)
#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
#define DEFINE_V4L_STUB_FUNC(_vidioc) \
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 19937dd3c6f6..1afd9c6ad908 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -284,7 +284,8 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags_job, flags_out, flags_cap;
+ unsigned long flags_job;
+ struct vb2_v4l2_buffer *dst, *src;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -307,20 +308,37 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
- if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
- && !m2m_ctx->out_q_ctx.buffered) {
+ src = v4l2_m2m_next_src_buf(m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+ if (!src && !m2m_ctx->out_q_ctx.buffered) {
dprintk("No input buffers available\n");
- goto out_unlock;
+ goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
- && !m2m_ctx->cap_q_ctx.buffered) {
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available\n");
- goto cap_unlock;
+ goto job_unlock;
}
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+
+ m2m_ctx->new_frame = true;
+
+ if (src && dst && dst->is_held &&
+ dst->vb2_buf.copied_timestamp &&
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
+ dst->is_held = false;
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
+ dprintk("No output buffers available after returning held buffer\n");
+ goto job_unlock;
+ }
+ }
+
+ if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -331,13 +349,6 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
- return;
-
-cap_unlock:
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
-out_unlock:
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
job_unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
}
@@ -412,37 +423,97 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
}
}
-void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
- struct v4l2_m2m_ctx *m2m_ctx)
+/*
+ * Schedule the next job, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags;
+ /*
+ * This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes.
+ */
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ /*
+ * We might be running in atomic context,
+ * but the job must be run in non-atomic context.
+ */
+ schedule_work(&m2m_dev->job_work);
+}
+
+/*
+ * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Called by an instance not currently running\n");
- return;
+ return false;
}
list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
+ return true;
+}
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
-
- /* This instance might have more buffers ready, but since we do not
- * allow more than one job on the job_queue per instance, each has
- * to be scheduled separately after the previous one finishes. */
- __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+ bool schedule_next;
- /* We might be running in atomic context,
- * but the job must be run in non-atomic context.
+ /*
+ * This function should not be used for drivers that support
+ * holding capture buffers. Those should use
+ * v4l2_m2m_buf_done_and_job_finish() instead.
*/
- schedule_work(&m2m_dev->job_work);
+ WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ bool schedule_next = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (WARN_ON(!src_buf || !dst_buf))
+ goto unlock;
+ v4l2_m2m_buf_done(src_buf, state);
+ dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ if (!dst_buf->is_held) {
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst_buf, state);
+ }
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+unlock:
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
+}
+EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
+
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
@@ -1154,6 +1225,59 @@ int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_FLUSH)
+ return -EINVAL;
+
+ dc->flags = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
+
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct vb2_v4l2_buffer *out_vb, *cap_vb;
+ struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
+ unsigned long flags;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
+ cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
+
+ /*
+ * If there is an out buffer pending, then clear any HOLD flag.
+ *
+ * By clearing this flag we ensure that when this output
+ * buffer is processed any held capture buffer will be released.
+ */
+ if (out_vb) {
+ out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ } else if (cap_vb && cap_vb->is_held) {
+ /*
+ * If there were no output buffers, but there is a
+ * capture buffer that is held, then release that
+ * buffer.
+ */
+ cap_vb->is_held = false;
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
+ v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
+
/*
* v4l2_file_operations helpers. It is assumed here same lock is used
* for the output and the capture buffer queue.
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index f725cd9b66b9..9e987c0f840e 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -112,7 +112,7 @@ static int subdev_close(struct file *file)
return 0;
}
-static inline int check_which(__u32 which)
+static inline int check_which(u32 which)
{
if (which != V4L2_SUBDEV_FORMAT_TRY &&
which != V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -121,7 +121,7 @@ static inline int check_which(__u32 which)
return 0;
}
-static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
+static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
{
#if defined(CONFIG_MEDIA_CONTROLLER)
if (sd->entity.num_pads) {
@@ -136,7 +136,7 @@ static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
return 0;
}
-static int check_cfg(__u32 which, struct v4l2_subdev_pad_config *cfg)
+static int check_cfg(u32 which, struct v4l2_subdev_pad_config *cfg)
{
if (which == V4L2_SUBDEV_FORMAT_TRY && !cfg)
return -EINVAL;
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 439d7d886873..a113e811faab 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
static const struct dev_pm_ops smi_larb_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver mtk_smi_larb_driver = {
@@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
static const struct dev_pm_ops smi_common_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver mtk_smi_common_driver = {
diff --git a/drivers/memstick/core/Kconfig b/drivers/memstick/core/Kconfig
index 516f454fde14..08192fd70eb4 100644
--- a/drivers/memstick/core/Kconfig
+++ b/drivers/memstick/core/Kconfig
@@ -6,16 +6,16 @@
comment "MemoryStick drivers"
config MEMSTICK_UNSAFE_RESUME
- bool "Allow unsafe resume (DANGEROUS)"
- help
- If you say Y here, the MemoryStick layer will assume that all
- cards stayed in their respective slots during the suspend. The
- normal behaviour is to remove them at suspend and
- redetecting them at resume. Breaking this assumption will
- in most cases result in data corruption.
+ bool "Allow unsafe resume (DANGEROUS)"
+ help
+ If you say Y here, the MemoryStick layer will assume that all
+ cards stayed in their respective slots during the suspend. The
+ normal behaviour is to remove them at suspend and
+ redetecting them at resume. Breaking this assumption will
+ in most cases result in data corruption.
- This option is usually just for embedded systems which use
- a MemoryStick card for rootfs. Most people should say N here.
+ This option is usually just for embedded systems which use
+ a MemoryStick card for rootfs. Most people should say N here.
config MSPRO_BLOCK
tristate "MemoryStick Pro block device driver"
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 446c93ecef8f..4113343da056 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -18,7 +18,7 @@ config MEMSTICK_TIFM_MS
'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
(TIFM_7XX1)'.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called tifm_ms.
config MEMSTICK_JMICRON_38X
@@ -29,7 +29,7 @@ config MEMSTICK_JMICRON_38X
Say Y here if you want to be able to access MemoryStick cards with
the JMicron(R) JMB38X MemoryStick card reader.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called jmb38x_ms.
config MEMSTICK_R592
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 64fff6abe60e..0a9c5ddf2f59 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -433,13 +433,13 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
writel(((1 << 16) & BLOCK_COUNT_MASK)
| (data_len & BLOCK_SIZE_MASK),
host->addr + BLOCK);
- t_val = readl(host->addr + INT_STATUS_ENABLE);
- t_val |= host->req->data_dir == READ
- ? INT_STATUS_FIFO_RRDY
- : INT_STATUS_FIFO_WRDY;
+ t_val = readl(host->addr + INT_STATUS_ENABLE);
+ t_val |= host->req->data_dir == READ
+ ? INT_STATUS_FIFO_RRDY
+ : INT_STATUS_FIFO_WRDY;
- writel(t_val, host->addr + INT_STATUS_ENABLE);
- writel(t_val, host->addr + INT_SIGNAL_ENABLE);
+ writel(t_val, host->addr + INT_STATUS_ENABLE);
+ writel(t_val, host->addr + INT_SIGNAL_ENABLE);
} else {
cmd &= ~(TPC_DATA_SEL | 0xf);
host->cmd_flags |= REG_DATA;
@@ -848,7 +848,7 @@ static int jmb38x_ms_count_slots(struct pci_dev *pdev)
{
int cnt, rc = 0;
- for (cnt = 0; cnt < PCI_ROM_RESOURCE; ++cnt) {
+ for (cnt = 0; cnt < PCI_STD_NUM_BARS; ++cnt) {
if (!(IORESOURCE_MEM & pci_resource_flags(pdev, cnt)))
break;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ae24d3ea68ea..420900852166 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1210,13 +1210,6 @@ config AB8500_DEBUG
Select this option if you want debug information using the debug
filesystem, debugfs.
-config AB8500_GPADC
- bool "ST-Ericsson AB8500 GPADC driver"
- depends on AB8500_CORE && REGULATOR_AB8500
- default y
- help
- AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
-
config MFD_DB8500_PRCMU
bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
depends on UX500_SOC_DB8500
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index c1067ea46204..aed99f08739f 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -177,7 +177,6 @@ obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
-obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
# ab8500-core need to come after db8500-prcmu (which provides the channel)
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 3e9dc92cb467..bafc729fc434 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -610,107 +610,53 @@ int ab8500_suspend(struct ab8500 *ab8500)
}
static const struct mfd_cell ab8500_bm_devs[] = {
- {
- .name = "ab8500-charger",
- .of_compatible = "stericsson,ab8500-charger",
- .platform_data = &ab8500_bm_data,
- .pdata_size = sizeof(ab8500_bm_data),
- },
- {
- .name = "ab8500-btemp",
- .of_compatible = "stericsson,ab8500-btemp",
- .platform_data = &ab8500_bm_data,
- .pdata_size = sizeof(ab8500_bm_data),
- },
- {
- .name = "ab8500-fg",
- .of_compatible = "stericsson,ab8500-fg",
- .platform_data = &ab8500_bm_data,
- .pdata_size = sizeof(ab8500_bm_data),
- },
- {
- .name = "ab8500-chargalg",
- .of_compatible = "stericsson,ab8500-chargalg",
- .platform_data = &ab8500_bm_data,
- .pdata_size = sizeof(ab8500_bm_data),
- },
+ OF_MFD_CELL("ab8500-charger", NULL, &ab8500_bm_data,
+ sizeof(ab8500_bm_data), 0, "stericsson,ab8500-charger"),
+ OF_MFD_CELL("ab8500-btemp", NULL, &ab8500_bm_data,
+ sizeof(ab8500_bm_data), 0, "stericsson,ab8500-btemp"),
+ OF_MFD_CELL("ab8500-fg", NULL, &ab8500_bm_data,
+ sizeof(ab8500_bm_data), 0, "stericsson,ab8500-fg"),
+ OF_MFD_CELL("ab8500-chargalg", NULL, &ab8500_bm_data,
+ sizeof(ab8500_bm_data), 0, "stericsson,ab8500-chargalg"),
};
static const struct mfd_cell ab8500_devs[] = {
#ifdef CONFIG_DEBUG_FS
- {
- .name = "ab8500-debug",
- .of_compatible = "stericsson,ab8500-debug",
- },
+ OF_MFD_CELL("ab8500-debug",
+ NULL, NULL, 0, 0, "stericsson,ab8500-debug"),
#endif
- {
- .name = "ab8500-sysctrl",
- .of_compatible = "stericsson,ab8500-sysctrl",
- },
- {
- .name = "ab8500-ext-regulator",
- .of_compatible = "stericsson,ab8500-ext-regulator",
- },
- {
- .name = "ab8500-regulator",
- .of_compatible = "stericsson,ab8500-regulator",
- },
- {
- .name = "ab8500-clk",
- .of_compatible = "stericsson,ab8500-clk",
- },
- {
- .name = "ab8500-gpadc",
- .of_compatible = "stericsson,ab8500-gpadc",
- },
- {
- .name = "ab8500-rtc",
- .of_compatible = "stericsson,ab8500-rtc",
- },
- {
- .name = "ab8500-acc-det",
- .of_compatible = "stericsson,ab8500-acc-det",
- },
- {
-
- .name = "ab8500-poweron-key",
- .of_compatible = "stericsson,ab8500-poweron-key",
- },
- {
- .name = "ab8500-pwm",
- .of_compatible = "stericsson,ab8500-pwm",
- .id = 1,
- },
- {
- .name = "ab8500-pwm",
- .of_compatible = "stericsson,ab8500-pwm",
- .id = 2,
- },
- {
- .name = "ab8500-pwm",
- .of_compatible = "stericsson,ab8500-pwm",
- .id = 3,
- },
- {
- .name = "ab8500-denc",
- .of_compatible = "stericsson,ab8500-denc",
- },
- {
- .name = "pinctrl-ab8500",
- .of_compatible = "stericsson,ab8500-gpio",
- },
- {
- .name = "abx500-temp",
- .of_compatible = "stericsson,abx500-temp",
- },
- {
- .name = "ab8500-usb",
- .of_compatible = "stericsson,ab8500-usb",
- },
- {
- .name = "ab8500-codec",
- .of_compatible = "stericsson,ab8500-codec",
- },
+ OF_MFD_CELL("ab8500-sysctrl",
+ NULL, NULL, 0, 0, "stericsson,ab8500-sysctrl"),
+ OF_MFD_CELL("ab8500-ext-regulator",
+ NULL, NULL, 0, 0, "stericsson,ab8500-ext-regulator"),
+ OF_MFD_CELL("ab8500-regulator",
+ NULL, NULL, 0, 0, "stericsson,ab8500-regulator"),
+ OF_MFD_CELL("abx500-clk",
+ NULL, NULL, 0, 0, "stericsson,abx500-clk"),
+ OF_MFD_CELL("ab8500-gpadc",
+ NULL, NULL, 0, 0, "stericsson,ab8500-gpadc"),
+ OF_MFD_CELL("ab8500-rtc",
+ NULL, NULL, 0, 0, "stericsson,ab8500-rtc"),
+ OF_MFD_CELL("ab8500-acc-det",
+ NULL, NULL, 0, 0, "stericsson,ab8500-acc-det"),
+ OF_MFD_CELL("ab8500-poweron-key",
+ NULL, NULL, 0, 0, "stericsson,ab8500-poweron-key"),
+ OF_MFD_CELL("ab8500-pwm",
+ NULL, NULL, 0, 1, "stericsson,ab8500-pwm"),
+ OF_MFD_CELL("ab8500-pwm",
+ NULL, NULL, 0, 2, "stericsson,ab8500-pwm"),
+ OF_MFD_CELL("ab8500-pwm",
+ NULL, NULL, 0, 3, "stericsson,ab8500-pwm"),
+ OF_MFD_CELL("ab8500-denc",
+ NULL, NULL, 0, 0, "stericsson,ab8500-denc"),
+ OF_MFD_CELL("pinctrl-ab8500",
+ NULL, NULL, 0, 0, "stericsson,ab8500-gpio"),
+ OF_MFD_CELL("abx500-temp",
+ NULL, NULL, 0, 0, "stericsson,abx500-temp"),
+ OF_MFD_CELL("ab8500-usb",
+ NULL, NULL, 0, 0, "stericsson,ab8500-usb"),
+ OF_MFD_CELL("ab8500-codec",
+ NULL, NULL, 0, 0, "stericsson,ab8500-codec"),
};
static const struct mfd_cell ab9540_devs[] = {
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index f4e26b6e5362..1a9a3414d4fa 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -84,7 +84,6 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/string.h>
@@ -103,11 +102,6 @@ static int num_irqs;
static struct device_attribute **dev_attr;
static char **event_name;
-static u8 avg_sample = SAMPLE_16;
-static u8 trig_edge = RISING_EDGE;
-static u8 conv_type = ADC_SW;
-static u8 trig_timer;
-
/**
* struct ab8500_reg_range
* @first: the first address of the range
@@ -152,7 +146,6 @@ static struct hwreg_cfg hwreg_cfg = {
};
#define AB8500_NAME_STRING "ab8500"
-#define AB8500_ADC_NAME_STRING "gpadc"
#define AB8500_NUM_BANKS AB8500_DEBUG_FIELD_LAST
#define AB8500_REV_REG 0x80
@@ -1646,633 +1639,6 @@ report_write_failure:
DEFINE_SHOW_ATTRIBUTE(ab8500_modem);
-static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
-{
- int bat_ctrl_raw;
- int bat_ctrl_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL,
- avg_sample, trig_edge, trig_timer, conv_type);
- bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- BAT_CTRL, bat_ctrl_raw);
-
- seq_printf(s, "%d,0x%X\n", bat_ctrl_convert, bat_ctrl_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bat_ctrl);
-
-static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
-{
- int btemp_ball_raw;
- int btemp_ball_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL,
- avg_sample, trig_edge, trig_timer, conv_type);
- btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
- btemp_ball_raw);
-
- seq_printf(s, "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_btemp_ball);
-
-static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
-{
- int main_charger_v_raw;
- int main_charger_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- MAIN_CHARGER_V, main_charger_v_raw);
-
- seq_printf(s, "%d,0x%X\n", main_charger_v_convert, main_charger_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_v);
-
-static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
-{
- int acc_detect1_raw;
- int acc_detect1_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1,
- avg_sample, trig_edge, trig_timer, conv_type);
- acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
- acc_detect1_raw);
-
- seq_printf(s, "%d,0x%X\n", acc_detect1_convert, acc_detect1_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect1);
-
-static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
-{
- int acc_detect2_raw;
- int acc_detect2_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2,
- avg_sample, trig_edge, trig_timer, conv_type);
- acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- ACC_DETECT2, acc_detect2_raw);
-
- seq_printf(s, "%d,0x%X\n", acc_detect2_convert, acc_detect2_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect2);
-
-static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
-{
- int aux1_raw;
- int aux1_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1,
- avg_sample, trig_edge, trig_timer, conv_type);
- aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
- aux1_raw);
-
- seq_printf(s, "%d,0x%X\n", aux1_convert, aux1_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux1);
-
-static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
-{
- int aux2_raw;
- int aux2_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2,
- avg_sample, trig_edge, trig_timer, conv_type);
- aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
- aux2_raw);
-
- seq_printf(s, "%d,0x%X\n", aux2_convert, aux2_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux2);
-
-static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
-{
- int main_bat_v_raw;
- int main_bat_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
- main_bat_v_raw);
-
- seq_printf(s, "%d,0x%X\n", main_bat_v_convert, main_bat_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_bat_v);
-
-static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
-{
- int vbus_v_raw;
- int vbus_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
- vbus_v_raw);
-
- seq_printf(s, "%d,0x%X\n", vbus_v_convert, vbus_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_vbus_v);
-
-static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
-{
- int main_charger_c_raw;
- int main_charger_c_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C,
- avg_sample, trig_edge, trig_timer, conv_type);
- main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- MAIN_CHARGER_C, main_charger_c_raw);
-
- seq_printf(s, "%d,0x%X\n", main_charger_c_convert, main_charger_c_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_c);
-
-static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
-{
- int usb_charger_c_raw;
- int usb_charger_c_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C,
- avg_sample, trig_edge, trig_timer, conv_type);
- usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- USB_CHARGER_C, usb_charger_c_raw);
-
- seq_printf(s, "%d,0x%X\n", usb_charger_c_convert, usb_charger_c_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_charger_c);
-
-static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
-{
- int bk_bat_v_raw;
- int bk_bat_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- BK_BAT_V, bk_bat_v_raw);
-
- seq_printf(s, "%d,0x%X\n", bk_bat_v_convert, bk_bat_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bk_bat_v);
-
-static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
-{
- int die_temp_raw;
- int die_temp_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP,
- avg_sample, trig_edge, trig_timer, conv_type);
- die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
- die_temp_raw);
-
- seq_printf(s, "%d,0x%X\n", die_temp_convert, die_temp_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_die_temp);
-
-static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
-{
- int usb_id_raw;
- int usb_id_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- usb_id_raw = ab8500_gpadc_read_raw(gpadc, USB_ID,
- avg_sample, trig_edge, trig_timer, conv_type);
- usb_id_convert = ab8500_gpadc_ad_to_voltage(gpadc, USB_ID,
- usb_id_raw);
-
- seq_printf(s, "%d,0x%X\n", usb_id_convert, usb_id_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_id);
-
-static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
-{
- int xtal_temp_raw;
- int xtal_temp_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- xtal_temp_raw = ab8500_gpadc_read_raw(gpadc, XTAL_TEMP,
- avg_sample, trig_edge, trig_timer, conv_type);
- xtal_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, XTAL_TEMP,
- xtal_temp_raw);
-
- seq_printf(s, "%d,0x%X\n", xtal_temp_convert, xtal_temp_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_xtal_temp);
-
-static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
-{
- int vbat_true_meas_raw;
- int vbat_true_meas_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbat_true_meas_raw = ab8500_gpadc_read_raw(gpadc, VBAT_TRUE_MEAS,
- avg_sample, trig_edge, trig_timer, conv_type);
- vbat_true_meas_convert =
- ab8500_gpadc_ad_to_voltage(gpadc, VBAT_TRUE_MEAS,
- vbat_true_meas_raw);
-
- seq_printf(s, "%d,0x%X\n", vbat_true_meas_convert, vbat_true_meas_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas);
-
-static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
-{
- int bat_ctrl_raw;
- int bat_ctrl_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bat_ctrl_raw = ab8500_gpadc_double_read_raw(gpadc, BAT_CTRL_AND_IBAT,
- avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
-
- bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc, BAT_CTRL,
- bat_ctrl_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- bat_ctrl_convert, bat_ctrl_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_ctrl_and_ibat);
-
-static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
-{
- int vbat_meas_raw;
- int vbat_meas_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbat_meas_raw = ab8500_gpadc_double_read_raw(gpadc, VBAT_MEAS_AND_IBAT,
- avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
- vbat_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
- vbat_meas_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- vbat_meas_convert, vbat_meas_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_meas_and_ibat);
-
-static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
-{
- int vbat_true_meas_raw;
- int vbat_true_meas_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbat_true_meas_raw = ab8500_gpadc_double_read_raw(gpadc,
- VBAT_TRUE_MEAS_AND_IBAT, avg_sample, trig_edge,
- trig_timer, conv_type, &ibat_raw);
- vbat_true_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- VBAT_TRUE_MEAS, vbat_true_meas_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- vbat_true_meas_convert, vbat_true_meas_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas_and_ibat);
-
-static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
-{
- int bat_temp_raw;
- int bat_temp_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bat_temp_raw = ab8500_gpadc_double_read_raw(gpadc, BAT_TEMP_AND_IBAT,
- avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
- bat_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
- bat_temp_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- bat_temp_convert, bat_temp_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_temp_and_ibat);
-
-static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
-{
- struct ab8500_gpadc *gpadc;
- u16 vmain_l, vmain_h, btemp_l, btemp_h;
- u16 vbat_l, vbat_h, ibat_l, ibat_h;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- ab8540_gpadc_get_otp(gpadc, &vmain_l, &vmain_h, &btemp_l, &btemp_h,
- &vbat_l, &vbat_h, &ibat_l, &ibat_h);
- seq_printf(s,
- "VMAIN_L:0x%X\n"
- "VMAIN_H:0x%X\n"
- "BTEMP_L:0x%X\n"
- "BTEMP_H:0x%X\n"
- "VBAT_L:0x%X\n"
- "VBAT_H:0x%X\n"
- "IBAT_L:0x%X\n"
- "IBAT_H:0x%X\n",
- vmain_l, vmain_h, btemp_l, btemp_h,
- vbat_l, vbat_h, ibat_l, ibat_h);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_otp_calib);
-
-static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", avg_sample);
-
- return 0;
-}
-
-static int ab8500_gpadc_avg_sample_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_avg_sample_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_avg_sample_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_avg_sample;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_avg_sample);
- if (err)
- return err;
-
- if ((user_avg_sample == SAMPLE_1) || (user_avg_sample == SAMPLE_4)
- || (user_avg_sample == SAMPLE_8)
- || (user_avg_sample == SAMPLE_16)) {
- avg_sample = (u8) user_avg_sample;
- } else {
- dev_err(dev,
- "debugfs err input: should be egal to 1, 4, 8 or 16\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_avg_sample_fops = {
- .open = ab8500_gpadc_avg_sample_open,
- .read = seq_read,
- .write = ab8500_gpadc_avg_sample_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_trig_edge_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", trig_edge);
-
- return 0;
-}
-
-static int ab8500_gpadc_trig_edge_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_trig_edge_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_trig_edge_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_trig_edge;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_trig_edge);
- if (err)
- return err;
-
- if ((user_trig_edge == RISING_EDGE)
- || (user_trig_edge == FALLING_EDGE)) {
- trig_edge = (u8) user_trig_edge;
- } else {
- dev_err(dev, "Wrong input:\n"
- "Enter 0. Rising edge\n"
- "Enter 1. Falling edge\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_trig_edge_fops = {
- .open = ab8500_gpadc_trig_edge_open,
- .read = seq_read,
- .write = ab8500_gpadc_trig_edge_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_trig_timer_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", trig_timer);
-
- return 0;
-}
-
-static int ab8500_gpadc_trig_timer_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_trig_timer_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_trig_timer_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_trig_timer;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_trig_timer);
- if (err)
- return err;
-
- if (user_trig_timer & ~0xFF) {
- dev_err(dev,
- "debugfs error input: should be between 0 to 255\n");
- return -EINVAL;
- }
-
- trig_timer = (u8) user_trig_timer;
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_trig_timer_fops = {
- .open = ab8500_gpadc_trig_timer_open,
- .read = seq_read,
- .write = ab8500_gpadc_trig_timer_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_conv_type_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", conv_type);
-
- return 0;
-}
-
-static int ab8500_gpadc_conv_type_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_conv_type_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_conv_type_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_conv_type;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_conv_type);
- if (err)
- return err;
-
- if ((user_conv_type == ADC_SW)
- || (user_conv_type == ADC_HW)) {
- conv_type = (u8) user_conv_type;
- } else {
- dev_err(dev, "Wrong input:\n"
- "Enter 0. ADC SW conversion\n"
- "Enter 1. ADC HW conversion\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_conv_type_fops = {
- .open = ab8500_gpadc_conv_type_open,
- .read = seq_read,
- .write = ab8500_gpadc_conv_type_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
/*
* return length of an ASCII numerical value, 0 is string is not a
* numerical value.
@@ -2647,7 +2013,6 @@ static const struct file_operations ab8500_hwreg_fops = {
static int ab8500_debug_probe(struct platform_device *plf)
{
struct dentry *ab8500_dir;
- struct dentry *ab8500_gpadc_dir;
struct ab8500 *ab8500;
struct resource *res;
@@ -2689,9 +2054,6 @@ static int ab8500_debug_probe(struct platform_device *plf)
ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
- ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
- ab8500_dir);
-
debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
&plf->dev, &ab8500_bank_registers_fops);
debugfs_create_file("all-banks", S_IRUGO, ab8500_dir,
@@ -2727,83 +2089,6 @@ static int ab8500_debug_probe(struct platform_device *plf)
&plf->dev, &ab8500_hwreg_fops);
debugfs_create_file("all-modem-registers", (S_IRUGO | S_IWUSR | S_IWGRP),
ab8500_dir, &plf->dev, &ab8500_modem_fops);
- debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_bat_ctrl_fops);
- debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_btemp_ball_fops);
- debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_charger_v_fops);
- debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_acc_detect1_fops);
- debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_acc_detect2_fops);
- debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_aux1_fops);
- debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_aux2_fops);
- debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_bat_v_fops);
- debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_vbus_v_fops);
- debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_charger_c_fops);
- debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_usb_charger_c_fops);
- debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_bk_bat_v_fops);
- debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_die_temp_fops);
- debugfs_create_file("usb_id", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_usb_id_fops);
- if (is_ab8540(ab8500)) {
- debugfs_create_file("xtal_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_xtal_temp_fops);
- debugfs_create_file("vbattruemeas", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_true_meas_fops);
- debugfs_create_file("batctrl_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_bat_ctrl_and_ibat_fops);
- debugfs_create_file("vbatmeas_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_meas_and_ibat_fops);
- debugfs_create_file("vbattruemeas_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_true_meas_and_ibat_fops);
- debugfs_create_file("battemp_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_bat_temp_and_ibat_fops);
- debugfs_create_file("otp_calib", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_otp_calib_fops);
- }
- debugfs_create_file("avg_sample", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_avg_sample_fops);
- debugfs_create_file("trig_edge", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_trig_edge_fops);
- debugfs_create_file("trig_timer", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_trig_timer_fops);
- debugfs_create_file("conv_type", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_conv_type_fops);
return 0;
}
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
deleted file mode 100644
index 005f9ee34cd1..000000000000
--- a/drivers/mfd/ab8500-gpadc.c
+++ /dev/null
@@ -1,1075 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Arun R Murthy <arun.murthy@stericsson.com>
- * Author: Daniel Willerud <daniel.willerud@stericsson.com>
- * Author: Johan Palsson <johan.palsson@stericsson.com>
- * Author: M'boumba Cedric Madianga
- */
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/platform_device.h>
-#include <linux/completion.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
-
-/*
- * GPADC register offsets
- * Bank : 0x0A
- */
-#define AB8500_GPADC_CTRL1_REG 0x00
-#define AB8500_GPADC_CTRL2_REG 0x01
-#define AB8500_GPADC_CTRL3_REG 0x02
-#define AB8500_GPADC_AUTO_TIMER_REG 0x03
-#define AB8500_GPADC_STAT_REG 0x04
-#define AB8500_GPADC_MANDATAL_REG 0x05
-#define AB8500_GPADC_MANDATAH_REG 0x06
-#define AB8500_GPADC_AUTODATAL_REG 0x07
-#define AB8500_GPADC_AUTODATAH_REG 0x08
-#define AB8500_GPADC_MUX_CTRL_REG 0x09
-#define AB8540_GPADC_MANDATA2L_REG 0x09
-#define AB8540_GPADC_MANDATA2H_REG 0x0A
-#define AB8540_GPADC_APEAAX_REG 0x10
-#define AB8540_GPADC_APEAAT_REG 0x11
-#define AB8540_GPADC_APEAAM_REG 0x12
-#define AB8540_GPADC_APEAAH_REG 0x13
-#define AB8540_GPADC_APEAAL_REG 0x14
-
-/*
- * OTP register offsets
- * Bank : 0x15
- */
-#define AB8500_GPADC_CAL_1 0x0F
-#define AB8500_GPADC_CAL_2 0x10
-#define AB8500_GPADC_CAL_3 0x11
-#define AB8500_GPADC_CAL_4 0x12
-#define AB8500_GPADC_CAL_5 0x13
-#define AB8500_GPADC_CAL_6 0x14
-#define AB8500_GPADC_CAL_7 0x15
-/* New calibration for 8540 */
-#define AB8540_GPADC_OTP4_REG_7 0x38
-#define AB8540_GPADC_OTP4_REG_6 0x39
-#define AB8540_GPADC_OTP4_REG_5 0x3A
-
-/* gpadc constants */
-#define EN_VINTCORE12 0x04
-#define EN_VTVOUT 0x02
-#define EN_GPADC 0x01
-#define DIS_GPADC 0x00
-#define AVG_1 0x00
-#define AVG_4 0x20
-#define AVG_8 0x40
-#define AVG_16 0x60
-#define ADC_SW_CONV 0x04
-#define EN_ICHAR 0x80
-#define BTEMP_PULL_UP 0x08
-#define EN_BUF 0x40
-#define DIS_ZERO 0x00
-#define GPADC_BUSY 0x01
-#define EN_FALLING 0x10
-#define EN_TRIG_EDGE 0x02
-#define EN_VBIAS_XTAL_TEMP 0x02
-
-/* GPADC constants from AB8500 spec, UM0836 */
-#define ADC_RESOLUTION 1024
-#define ADC_CH_BTEMP_MIN 0
-#define ADC_CH_BTEMP_MAX 1350
-#define ADC_CH_DIETEMP_MIN 0
-#define ADC_CH_DIETEMP_MAX 1350
-#define ADC_CH_CHG_V_MIN 0
-#define ADC_CH_CHG_V_MAX 20030
-#define ADC_CH_ACCDET2_MIN 0
-#define ADC_CH_ACCDET2_MAX 2500
-#define ADC_CH_VBAT_MIN 2300
-#define ADC_CH_VBAT_MAX 4800
-#define ADC_CH_CHG_I_MIN 0
-#define ADC_CH_CHG_I_MAX 1500
-#define ADC_CH_BKBAT_MIN 0
-#define ADC_CH_BKBAT_MAX 3200
-
-/* GPADC constants from AB8540 spec */
-#define ADC_CH_IBAT_MIN (-6000) /* mA range measured by ADC for ibat */
-#define ADC_CH_IBAT_MAX 6000
-#define ADC_CH_IBAT_MIN_V (-60) /* mV range measured by ADC for ibat */
-#define ADC_CH_IBAT_MAX_V 60
-#define IBAT_VDROP_L (-56) /* mV */
-#define IBAT_VDROP_H 56
-
-/* This is used to not lose precision when dividing to get gain and offset */
-#define CALIB_SCALE 1000
-/*
- * Number of bits shift used to not lose precision
- * when dividing to get ibat gain.
- */
-#define CALIB_SHIFT_IBAT 20
-
-/* Time in ms before disabling regulator */
-#define GPADC_AUDOSUSPEND_DELAY 1
-
-#define CONVERSION_TIME 500 /* ms */
-
-enum cal_channels {
- ADC_INPUT_VMAIN = 0,
- ADC_INPUT_BTEMP,
- ADC_INPUT_VBAT,
- ADC_INPUT_IBAT,
- NBR_CAL_INPUTS,
-};
-
-/**
- * struct adc_cal_data - Table for storing gain and offset for the calibrated
- * ADC channels
- * @gain: Gain of the ADC channel
- * @offset: Offset of the ADC channel
- */
-struct adc_cal_data {
- s64 gain;
- s64 offset;
- u16 otp_calib_hi;
- u16 otp_calib_lo;
-};
-
-/**
- * struct ab8500_gpadc - AB8500 GPADC device information
- * @dev: pointer to the struct device
- * @node: a list of AB8500 GPADCs, hence prepared for
- reentrance
- * @parent: pointer to the struct ab8500
- * @ab8500_gpadc_complete: pointer to the struct completion, to indicate
- * the completion of gpadc conversion
- * @ab8500_gpadc_lock: structure of type mutex
- * @regu: pointer to the struct regulator
- * @irq_sw: interrupt number that is used by gpadc for Sw
- * conversion
- * @irq_hw: interrupt number that is used by gpadc for Hw
- * conversion
- * @cal_data array of ADC calibration data structs
- */
-struct ab8500_gpadc {
- struct device *dev;
- struct list_head node;
- struct ab8500 *parent;
- struct completion ab8500_gpadc_complete;
- struct mutex ab8500_gpadc_lock;
- struct regulator *regu;
- int irq_sw;
- int irq_hw;
- struct adc_cal_data cal_data[NBR_CAL_INPUTS];
-};
-
-static LIST_HEAD(ab8500_gpadc_list);
-
-/**
- * ab8500_gpadc_get() - returns a reference to the primary AB8500 GPADC
- * (i.e. the first GPADC in the instance list)
- */
-struct ab8500_gpadc *ab8500_gpadc_get(char *name)
-{
- struct ab8500_gpadc *gpadc;
-
- list_for_each_entry(gpadc, &ab8500_gpadc_list, node) {
- if (!strcmp(name, dev_name(gpadc->dev)))
- return gpadc;
- }
-
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(ab8500_gpadc_get);
-
-/**
- * ab8500_gpadc_ad_to_voltage() - Convert a raw ADC value to a voltage
- */
-int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, u8 channel,
- int ad_value)
-{
- int res;
-
- switch (channel) {
- case MAIN_CHARGER_V:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_VMAIN].gain) {
- res = ADC_CH_CHG_V_MIN + (ADC_CH_CHG_V_MAX -
- ADC_CH_CHG_V_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VMAIN].gain +
- gpadc->cal_data[ADC_INPUT_VMAIN].offset) / CALIB_SCALE;
- break;
-
- case XTAL_TEMP:
- case BAT_CTRL:
- case BTEMP_BALL:
- case ACC_DETECT1:
- case ADC_AUX1:
- case ADC_AUX2:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_BTEMP].gain) {
- res = ADC_CH_BTEMP_MIN + (ADC_CH_BTEMP_MAX -
- ADC_CH_BTEMP_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_BTEMP].gain +
- gpadc->cal_data[ADC_INPUT_BTEMP].offset) / CALIB_SCALE;
- break;
-
- case MAIN_BAT_V:
- case VBAT_TRUE_MEAS:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_VBAT].gain) {
- res = ADC_CH_VBAT_MIN + (ADC_CH_VBAT_MAX -
- ADC_CH_VBAT_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VBAT].gain +
- gpadc->cal_data[ADC_INPUT_VBAT].offset) / CALIB_SCALE;
- break;
-
- case DIE_TEMP:
- res = ADC_CH_DIETEMP_MIN +
- (ADC_CH_DIETEMP_MAX - ADC_CH_DIETEMP_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case ACC_DETECT2:
- res = ADC_CH_ACCDET2_MIN +
- (ADC_CH_ACCDET2_MAX - ADC_CH_ACCDET2_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case VBUS_V:
- res = ADC_CH_CHG_V_MIN +
- (ADC_CH_CHG_V_MAX - ADC_CH_CHG_V_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case MAIN_CHARGER_C:
- case USB_CHARGER_C:
- res = ADC_CH_CHG_I_MIN +
- (ADC_CH_CHG_I_MAX - ADC_CH_CHG_I_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case BK_BAT_V:
- res = ADC_CH_BKBAT_MIN +
- (ADC_CH_BKBAT_MAX - ADC_CH_BKBAT_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case IBAT_VIRTUAL_CHANNEL:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_IBAT].gain) {
- res = ADC_CH_IBAT_MIN + (ADC_CH_IBAT_MAX -
- ADC_CH_IBAT_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_IBAT].gain +
- gpadc->cal_data[ADC_INPUT_IBAT].offset)
- >> CALIB_SHIFT_IBAT;
- break;
-
- default:
- dev_err(gpadc->dev,
- "unknown channel, not possible to convert\n");
- res = -EINVAL;
- break;
-
- }
- return res;
-}
-EXPORT_SYMBOL(ab8500_gpadc_ad_to_voltage);
-
-/**
- * ab8500_gpadc_sw_hw_convert() - gpadc conversion
- * @channel: analog channel to be converted to digital data
- * @avg_sample: number of ADC sample to average
- * @trig_egde: selected ADC trig edge
- * @trig_timer: selected ADC trigger delay timer
- * @conv_type: selected conversion type (HW or SW conversion)
- *
- * This function converts the selected analog i/p to digital
- * data.
- */
-int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type)
-{
- int ad_value;
- int voltage;
-
- ad_value = ab8500_gpadc_read_raw(gpadc, channel, avg_sample,
- trig_edge, trig_timer, conv_type);
-
- /* On failure retry a second time */
- if (ad_value < 0)
- ad_value = ab8500_gpadc_read_raw(gpadc, channel, avg_sample,
- trig_edge, trig_timer, conv_type);
- if (ad_value < 0) {
- dev_err(gpadc->dev, "GPADC raw value failed ch: %d\n",
- channel);
- return ad_value;
- }
-
- voltage = ab8500_gpadc_ad_to_voltage(gpadc, channel, ad_value);
- if (voltage < 0)
- dev_err(gpadc->dev,
- "GPADC to voltage conversion failed ch: %d AD: 0x%x\n",
- channel, ad_value);
-
- return voltage;
-}
-EXPORT_SYMBOL(ab8500_gpadc_sw_hw_convert);
-
-/**
- * ab8500_gpadc_read_raw() - gpadc read
- * @channel: analog channel to be read
- * @avg_sample: number of ADC sample to average
- * @trig_edge: selected trig edge
- * @trig_timer: selected ADC trigger delay timer
- * @conv_type: selected conversion type (HW or SW conversion)
- *
- * This function obtains the raw ADC value for an hardware conversion,
- * this then needs to be converted by calling ab8500_gpadc_ad_to_voltage()
- */
-int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type)
-{
- return ab8500_gpadc_double_read_raw(gpadc, channel, avg_sample,
- trig_edge, trig_timer, conv_type,
- NULL);
-}
-
-int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
- int *ibat)
-{
- int ret;
- int looplimit = 0;
- unsigned long completion_timeout;
- u8 val, low_data, high_data, low_data2, high_data2;
- u8 val_reg1 = 0;
- unsigned int delay_min = 0;
- unsigned int delay_max = 0;
- u8 data_low_addr, data_high_addr;
-
- if (!gpadc)
- return -ENODEV;
-
- /* check if convertion is supported */
- if ((gpadc->irq_sw < 0) && (conv_type == ADC_SW))
- return -ENOTSUPP;
- if ((gpadc->irq_hw < 0) && (conv_type == ADC_HW))
- return -ENOTSUPP;
-
- mutex_lock(&gpadc->ab8500_gpadc_lock);
- /* Enable VTVout LDO this is required for GPADC */
- pm_runtime_get_sync(gpadc->dev);
-
- /* Check if ADC is not busy, lock and proceed */
- do {
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_STAT_REG, &val);
- if (ret < 0)
- goto out;
- if (!(val & GPADC_BUSY))
- break;
- msleep(20);
- } while (++looplimit < 10);
- if (looplimit >= 10 && (val & GPADC_BUSY)) {
- dev_err(gpadc->dev, "gpadc_conversion: GPADC busy");
- ret = -EINVAL;
- goto out;
- }
-
- /* Enable GPADC */
- val_reg1 |= EN_GPADC;
-
- /* Select the channel source and set average samples */
- switch (avg_sample) {
- case SAMPLE_1:
- val = channel | AVG_1;
- break;
- case SAMPLE_4:
- val = channel | AVG_4;
- break;
- case SAMPLE_8:
- val = channel | AVG_8;
- break;
- default:
- val = channel | AVG_16;
- break;
- }
-
- if (conv_type == ADC_HW) {
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL3_REG, val);
- val_reg1 |= EN_TRIG_EDGE;
- if (trig_edge)
- val_reg1 |= EN_FALLING;
- } else
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL2_REG, val);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: set avg samples failed\n");
- goto out;
- }
-
- /*
- * Enable ADC, buffering, select rising edge and enable ADC path
- * charging current sense if it needed, ABB 3.0 needs some special
- * treatment too.
- */
- switch (channel) {
- case MAIN_CHARGER_C:
- case USB_CHARGER_C:
- val_reg1 |= EN_BUF | EN_ICHAR;
- break;
- case BTEMP_BALL:
- if (!is_ab8500_2p0_or_earlier(gpadc->parent)) {
- val_reg1 |= EN_BUF | BTEMP_PULL_UP;
- /*
- * Delay might be needed for ABB8500 cut 3.0, if not,
- * remove when hardware will be availible
- */
- delay_min = 1000; /* Delay in micro seconds */
- delay_max = 10000; /* large range optimises sleepmode */
- break;
- }
- /* Intentional fallthrough */
- default:
- val_reg1 |= EN_BUF;
- break;
- }
-
- /* Write configuration to register */
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL1_REG, val_reg1);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: set Control register failed\n");
- goto out;
- }
-
- if (delay_min != 0)
- usleep_range(delay_min, delay_max);
-
- if (conv_type == ADC_HW) {
- /* Set trigger delay timer */
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_AUTO_TIMER_REG, trig_timer);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: trig timer failed\n");
- goto out;
- }
- completion_timeout = 2 * HZ;
- data_low_addr = AB8500_GPADC_AUTODATAL_REG;
- data_high_addr = AB8500_GPADC_AUTODATAH_REG;
- } else {
- /* Start SW conversion */
- ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
- ADC_SW_CONV, ADC_SW_CONV);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: start s/w conv failed\n");
- goto out;
- }
- completion_timeout = msecs_to_jiffies(CONVERSION_TIME);
- data_low_addr = AB8500_GPADC_MANDATAL_REG;
- data_high_addr = AB8500_GPADC_MANDATAH_REG;
- }
-
- /* wait for completion of conversion */
- if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete,
- completion_timeout)) {
- dev_err(gpadc->dev,
- "timeout didn't receive GPADC conv interrupt\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Read the converted RAW data */
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, data_low_addr, &low_data);
- if (ret < 0) {
- dev_err(gpadc->dev, "gpadc_conversion: read low data failed\n");
- goto out;
- }
-
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, data_high_addr, &high_data);
- if (ret < 0) {
- dev_err(gpadc->dev, "gpadc_conversion: read high data failed\n");
- goto out;
- }
-
- /* Check if double convertion is required */
- if ((channel == BAT_CTRL_AND_IBAT) ||
- (channel == VBAT_MEAS_AND_IBAT) ||
- (channel == VBAT_TRUE_MEAS_AND_IBAT) ||
- (channel == BAT_TEMP_AND_IBAT)) {
-
- if (conv_type == ADC_HW) {
- /* not supported */
- ret = -ENOTSUPP;
- dev_err(gpadc->dev,
- "gpadc_conversion: only SW double conversion supported\n");
- goto out;
- } else {
- /* Read the converted RAW data 2 */
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8540_GPADC_MANDATA2L_REG,
- &low_data2);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: read sw low data 2 failed\n");
- goto out;
- }
-
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8540_GPADC_MANDATA2H_REG,
- &high_data2);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: read sw high data 2 failed\n");
- goto out;
- }
- if (ibat != NULL) {
- *ibat = (high_data2 << 8) | low_data2;
- } else {
- dev_warn(gpadc->dev,
- "gpadc_conversion: ibat not stored\n");
- }
-
- }
- }
-
- /* Disable GPADC */
- ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
- AB8500_GPADC_CTRL1_REG, DIS_GPADC);
- if (ret < 0) {
- dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
- goto out;
- }
-
- /* Disable VTVout LDO this is required for GPADC */
- pm_runtime_mark_last_busy(gpadc->dev);
- pm_runtime_put_autosuspend(gpadc->dev);
-
- mutex_unlock(&gpadc->ab8500_gpadc_lock);
-
- return (high_data << 8) | low_data;
-
-out:
- /*
- * It has shown to be needed to turn off the GPADC if an error occurs,
- * otherwise we might have problem when waiting for the busy bit in the
- * GPADC status register to go low. In V1.1 there wait_for_completion
- * seems to timeout when waiting for an interrupt.. Not seen in V2.0
- */
- (void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
- AB8500_GPADC_CTRL1_REG, DIS_GPADC);
- pm_runtime_put(gpadc->dev);
- mutex_unlock(&gpadc->ab8500_gpadc_lock);
- dev_err(gpadc->dev,
- "gpadc_conversion: Failed to AD convert channel %d\n", channel);
- return ret;
-}
-EXPORT_SYMBOL(ab8500_gpadc_read_raw);
-
-/**
- * ab8500_bm_gpadcconvend_handler() - isr for gpadc conversion completion
- * @irq: irq number
- * @data: pointer to the data passed during request irq
- *
- * This is a interrupt service routine for gpadc conversion completion.
- * Notifies the gpadc completion is completed and the converted raw value
- * can be read from the registers.
- * Returns IRQ status(IRQ_HANDLED)
- */
-static irqreturn_t ab8500_bm_gpadcconvend_handler(int irq, void *_gpadc)
-{
- struct ab8500_gpadc *gpadc = _gpadc;
-
- complete(&gpadc->ab8500_gpadc_complete);
-
- return IRQ_HANDLED;
-}
-
-static int otp_cal_regs[] = {
- AB8500_GPADC_CAL_1,
- AB8500_GPADC_CAL_2,
- AB8500_GPADC_CAL_3,
- AB8500_GPADC_CAL_4,
- AB8500_GPADC_CAL_5,
- AB8500_GPADC_CAL_6,
- AB8500_GPADC_CAL_7,
-};
-
-static int otp4_cal_regs[] = {
- AB8540_GPADC_OTP4_REG_7,
- AB8540_GPADC_OTP4_REG_6,
- AB8540_GPADC_OTP4_REG_5,
-};
-
-static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
-{
- int i;
- int ret[ARRAY_SIZE(otp_cal_regs)];
- u8 gpadc_cal[ARRAY_SIZE(otp_cal_regs)];
- int ret_otp4[ARRAY_SIZE(otp4_cal_regs)];
- u8 gpadc_otp4[ARRAY_SIZE(otp4_cal_regs)];
- int vmain_high, vmain_low;
- int btemp_high, btemp_low;
- int vbat_high, vbat_low;
- int ibat_high, ibat_low;
- s64 V_gain, V_offset, V2A_gain, V2A_offset;
- struct ab8500 *ab8500;
-
- ab8500 = gpadc->parent;
-
- /* First we read all OTP registers and store the error code */
- for (i = 0; i < ARRAY_SIZE(otp_cal_regs); i++) {
- ret[i] = abx500_get_register_interruptible(gpadc->dev,
- AB8500_OTP_EMUL, otp_cal_regs[i], &gpadc_cal[i]);
- if (ret[i] < 0)
- dev_err(gpadc->dev, "%s: read otp reg 0x%02x failed\n",
- __func__, otp_cal_regs[i]);
- }
-
- /*
- * The ADC calibration data is stored in OTP registers.
- * The layout of the calibration data is outlined below and a more
- * detailed description can be found in UM0836
- *
- * vm_h/l = vmain_high/low
- * bt_h/l = btemp_high/low
- * vb_h/l = vbat_high/low
- *
- * Data bits 8500/9540:
- * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | | vm_h9 | vm_h8
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
- * |.......|.......|.......|.......|.......|.......|.......|.......
- *
- * Data bits 8540:
- * OTP2
- * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * |
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vm_h9 | vm_h8 | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
- * |.......|.......|.......|.......|.......|.......|.......|.......
- *
- * Data bits 8540:
- * OTP4
- * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | | ib_h9 | ib_h8 | ib_h7
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | ib_h6 | ib_h5 | ib_h4 | ib_h3 | ib_h2 | ib_h1 | ib_h0 | ib_l5
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | ib_l4 | ib_l3 | ib_l2 | ib_l1 | ib_l0 |
- *
- *
- * Ideal output ADC codes corresponding to injected input voltages
- * during manufacturing is:
- *
- * vmain_high: Vin = 19500mV / ADC ideal code = 997
- * vmain_low: Vin = 315mV / ADC ideal code = 16
- * btemp_high: Vin = 1300mV / ADC ideal code = 985
- * btemp_low: Vin = 21mV / ADC ideal code = 16
- * vbat_high: Vin = 4700mV / ADC ideal code = 982
- * vbat_low: Vin = 2380mV / ADC ideal code = 33
- */
-
- if (is_ab8540(ab8500)) {
- /* Calculate gain and offset for VMAIN if all reads succeeded*/
- if (!(ret[1] < 0 || ret[2] < 0)) {
- vmain_high = (((gpadc_cal[1] & 0xFF) << 2) |
- ((gpadc_cal[2] & 0xC0) >> 6));
- vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
-
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi =
- (u16)vmain_high;
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo =
- (u16)vmain_low;
-
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
- (19500 - 315) / (vmain_high - vmain_low);
- gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE *
- 19500 - (CALIB_SCALE * (19500 - 315) /
- (vmain_high - vmain_low)) * vmain_high;
- } else {
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
- }
-
- /* Read IBAT calibration Data */
- for (i = 0; i < ARRAY_SIZE(otp4_cal_regs); i++) {
- ret_otp4[i] = abx500_get_register_interruptible(
- gpadc->dev, AB8500_OTP_EMUL,
- otp4_cal_regs[i], &gpadc_otp4[i]);
- if (ret_otp4[i] < 0)
- dev_err(gpadc->dev,
- "%s: read otp4 reg 0x%02x failed\n",
- __func__, otp4_cal_regs[i]);
- }
-
- /* Calculate gain and offset for IBAT if all reads succeeded */
- if (!(ret_otp4[0] < 0 || ret_otp4[1] < 0 || ret_otp4[2] < 0)) {
- ibat_high = (((gpadc_otp4[0] & 0x07) << 7) |
- ((gpadc_otp4[1] & 0xFE) >> 1));
- ibat_low = (((gpadc_otp4[1] & 0x01) << 5) |
- ((gpadc_otp4[2] & 0xF8) >> 3));
-
- gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi =
- (u16)ibat_high;
- gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo =
- (u16)ibat_low;
-
- V_gain = ((IBAT_VDROP_H - IBAT_VDROP_L)
- << CALIB_SHIFT_IBAT) / (ibat_high - ibat_low);
-
- V_offset = (IBAT_VDROP_H << CALIB_SHIFT_IBAT) -
- (((IBAT_VDROP_H - IBAT_VDROP_L) <<
- CALIB_SHIFT_IBAT) / (ibat_high - ibat_low))
- * ibat_high;
- /*
- * Result obtained is in mV (at a scale factor),
- * we need to calculate gain and offset to get mA
- */
- V2A_gain = (ADC_CH_IBAT_MAX - ADC_CH_IBAT_MIN)/
- (ADC_CH_IBAT_MAX_V - ADC_CH_IBAT_MIN_V);
- V2A_offset = ((ADC_CH_IBAT_MAX_V * ADC_CH_IBAT_MIN -
- ADC_CH_IBAT_MAX * ADC_CH_IBAT_MIN_V)
- << CALIB_SHIFT_IBAT)
- / (ADC_CH_IBAT_MAX_V - ADC_CH_IBAT_MIN_V);
-
- gpadc->cal_data[ADC_INPUT_IBAT].gain =
- V_gain * V2A_gain;
- gpadc->cal_data[ADC_INPUT_IBAT].offset =
- V_offset * V2A_gain + V2A_offset;
- } else {
- gpadc->cal_data[ADC_INPUT_IBAT].gain = 0;
- }
-
- dev_dbg(gpadc->dev, "IBAT gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_IBAT].gain,
- gpadc->cal_data[ADC_INPUT_IBAT].offset);
- } else {
- /* Calculate gain and offset for VMAIN if all reads succeeded */
- if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
- vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
- ((gpadc_cal[1] & 0x3F) << 2) |
- ((gpadc_cal[2] & 0xC0) >> 6));
- vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
-
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi =
- (u16)vmain_high;
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo =
- (u16)vmain_low;
-
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
- (19500 - 315) / (vmain_high - vmain_low);
-
- gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE *
- 19500 - (CALIB_SCALE * (19500 - 315) /
- (vmain_high - vmain_low)) * vmain_high;
- } else {
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
- }
- }
-
- /* Calculate gain and offset for BTEMP if all reads succeeded */
- if (!(ret[2] < 0 || ret[3] < 0 || ret[4] < 0)) {
- btemp_high = (((gpadc_cal[2] & 0x01) << 9) |
- (gpadc_cal[3] << 1) | ((gpadc_cal[4] & 0x80) >> 7));
- btemp_low = ((gpadc_cal[4] & 0x7C) >> 2);
-
- gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_hi = (u16)btemp_high;
- gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_lo = (u16)btemp_low;
-
- gpadc->cal_data[ADC_INPUT_BTEMP].gain =
- CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low);
- gpadc->cal_data[ADC_INPUT_BTEMP].offset = CALIB_SCALE * 1300 -
- (CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low))
- * btemp_high;
- } else {
- gpadc->cal_data[ADC_INPUT_BTEMP].gain = 0;
- }
-
- /* Calculate gain and offset for VBAT if all reads succeeded */
- if (!(ret[4] < 0 || ret[5] < 0 || ret[6] < 0)) {
- vbat_high = (((gpadc_cal[4] & 0x03) << 8) | gpadc_cal[5]);
- vbat_low = ((gpadc_cal[6] & 0xFC) >> 2);
-
- gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_hi = (u16)vbat_high;
- gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_lo = (u16)vbat_low;
-
- gpadc->cal_data[ADC_INPUT_VBAT].gain = CALIB_SCALE *
- (4700 - 2380) / (vbat_high - vbat_low);
- gpadc->cal_data[ADC_INPUT_VBAT].offset = CALIB_SCALE * 4700 -
- (CALIB_SCALE * (4700 - 2380) /
- (vbat_high - vbat_low)) * vbat_high;
- } else {
- gpadc->cal_data[ADC_INPUT_VBAT].gain = 0;
- }
-
- dev_dbg(gpadc->dev, "VMAIN gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_VMAIN].gain,
- gpadc->cal_data[ADC_INPUT_VMAIN].offset);
-
- dev_dbg(gpadc->dev, "BTEMP gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_BTEMP].gain,
- gpadc->cal_data[ADC_INPUT_BTEMP].offset);
-
- dev_dbg(gpadc->dev, "VBAT gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_VBAT].gain,
- gpadc->cal_data[ADC_INPUT_VBAT].offset);
-}
-
-#ifdef CONFIG_PM
-static int ab8500_gpadc_runtime_suspend(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
-
- regulator_disable(gpadc->regu);
- return 0;
-}
-
-static int ab8500_gpadc_runtime_resume(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
- int ret;
-
- ret = regulator_enable(gpadc->regu);
- if (ret)
- dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
- return ret;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int ab8500_gpadc_suspend(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
-
- mutex_lock(&gpadc->ab8500_gpadc_lock);
-
- pm_runtime_get_sync(dev);
-
- regulator_disable(gpadc->regu);
- return 0;
-}
-
-static int ab8500_gpadc_resume(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
- int ret;
-
- ret = regulator_enable(gpadc->regu);
- if (ret)
- dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
-
- pm_runtime_mark_last_busy(gpadc->dev);
- pm_runtime_put_autosuspend(gpadc->dev);
-
- mutex_unlock(&gpadc->ab8500_gpadc_lock);
- return ret;
-}
-#endif
-
-static int ab8500_gpadc_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct ab8500_gpadc *gpadc;
-
- gpadc = devm_kzalloc(&pdev->dev,
- sizeof(struct ab8500_gpadc), GFP_KERNEL);
- if (!gpadc)
- return -ENOMEM;
-
- gpadc->irq_sw = platform_get_irq_byname(pdev, "SW_CONV_END");
- if (gpadc->irq_sw < 0)
- dev_err(gpadc->dev, "failed to get platform sw_conv_end irq\n");
-
- gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
- if (gpadc->irq_hw < 0)
- dev_err(gpadc->dev, "failed to get platform hw_conv_end irq\n");
-
- gpadc->dev = &pdev->dev;
- gpadc->parent = dev_get_drvdata(pdev->dev.parent);
- mutex_init(&gpadc->ab8500_gpadc_lock);
-
- /* Initialize completion used to notify completion of conversion */
- init_completion(&gpadc->ab8500_gpadc_complete);
-
- /* Register interrupts */
- if (gpadc->irq_sw >= 0) {
- ret = request_threaded_irq(gpadc->irq_sw, NULL,
- ab8500_bm_gpadcconvend_handler,
- IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
- "ab8500-gpadc-sw",
- gpadc);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "Failed to register interrupt irq: %d\n",
- gpadc->irq_sw);
- goto fail;
- }
- }
-
- if (gpadc->irq_hw >= 0) {
- ret = request_threaded_irq(gpadc->irq_hw, NULL,
- ab8500_bm_gpadcconvend_handler,
- IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
- "ab8500-gpadc-hw",
- gpadc);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "Failed to register interrupt irq: %d\n",
- gpadc->irq_hw);
- goto fail_irq;
- }
- }
-
- /* VTVout LDO used to power up ab8500-GPADC */
- gpadc->regu = devm_regulator_get(&pdev->dev, "vddadc");
- if (IS_ERR(gpadc->regu)) {
- ret = PTR_ERR(gpadc->regu);
- dev_err(gpadc->dev, "failed to get vtvout LDO\n");
- goto fail_irq;
- }
-
- platform_set_drvdata(pdev, gpadc);
-
- ret = regulator_enable(gpadc->regu);
- if (ret) {
- dev_err(gpadc->dev, "Failed to enable vtvout LDO: %d\n", ret);
- goto fail_enable;
- }
-
- pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(gpadc->dev);
- pm_runtime_set_active(gpadc->dev);
- pm_runtime_enable(gpadc->dev);
-
- ab8500_gpadc_read_calibration_data(gpadc);
- list_add_tail(&gpadc->node, &ab8500_gpadc_list);
- dev_dbg(gpadc->dev, "probe success\n");
-
- return 0;
-
-fail_enable:
-fail_irq:
- free_irq(gpadc->irq_sw, gpadc);
- free_irq(gpadc->irq_hw, gpadc);
-fail:
- return ret;
-}
-
-static int ab8500_gpadc_remove(struct platform_device *pdev)
-{
- struct ab8500_gpadc *gpadc = platform_get_drvdata(pdev);
-
- /* remove this gpadc entry from the list */
- list_del(&gpadc->node);
- /* remove interrupt - completion of Sw ADC conversion */
- if (gpadc->irq_sw >= 0)
- free_irq(gpadc->irq_sw, gpadc);
- if (gpadc->irq_hw >= 0)
- free_irq(gpadc->irq_hw, gpadc);
-
- pm_runtime_get_sync(gpadc->dev);
- pm_runtime_disable(gpadc->dev);
-
- regulator_disable(gpadc->regu);
-
- pm_runtime_set_suspended(gpadc->dev);
-
- pm_runtime_put_noidle(gpadc->dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ab8500_gpadc_pm_ops = {
- SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
- ab8500_gpadc_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(ab8500_gpadc_suspend,
- ab8500_gpadc_resume)
-
-};
-
-static struct platform_driver ab8500_gpadc_driver = {
- .probe = ab8500_gpadc_probe,
- .remove = ab8500_gpadc_remove,
- .driver = {
- .name = "ab8500-gpadc",
- .pm = &ab8500_gpadc_pm_ops,
- },
-};
-
-static int __init ab8500_gpadc_init(void)
-{
- return platform_driver_register(&ab8500_gpadc_driver);
-}
-subsys_initcall_sync(ab8500_gpadc_init);
-
-/**
- * ab8540_gpadc_get_otp() - returns OTP values
- *
- */
-void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
- u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
- u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h)
-{
- *vmain_l = gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo;
- *vmain_h = gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi;
- *btemp_l = gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_lo;
- *btemp_h = gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_hi;
- *vbat_l = gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_lo;
- *vbat_h = gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_hi;
- *ibat_l = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo;
- *ibat_h = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi;
-}
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 4a31907a4525..f73cf76d1373 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -814,11 +814,7 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
int ret, i;
/* Handle old non-standard DT binding */
- pdata->reset = devm_gpiod_get_from_of_node(arizona->dev,
- arizona->dev->of_node,
- "wlf,reset", 0,
- GPIOD_OUT_LOW,
- "arizona /RESET");
+ pdata->reset = devm_gpiod_get(arizona->dev, "wlf,reset", GPIOD_OUT_LOW);
if (IS_ERR(pdata->reset)) {
ret = PTR_ERR(pdata->reset);
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 6e6dfd6c1871..c4b977a5dd96 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -78,6 +78,10 @@ static const struct mfd_cell cros_ec_rtc_cells[] = {
{ .name = "cros-ec-rtc", },
};
+static const struct mfd_cell cros_ec_sensorhub_cells[] = {
+ { .name = "cros-ec-sensorhub", },
+};
+
static const struct mfd_cell cros_usbpd_charger_cells[] = {
{ .name = "cros-usbpd-charger", },
{ .name = "cros-usbpd-logger", },
@@ -112,229 +116,11 @@ static const struct mfd_cell cros_ec_vbc_cells[] = {
{ .name = "cros-ec-vbc", }
};
-static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
-{
- struct cros_ec_command *msg;
- int ret;
-
- if (ec->features[0] == -1U && ec->features[1] == -1U) {
- /* features bitmap not read yet */
- msg = kzalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
- msg->insize = sizeof(ec->features);
-
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
- ret, msg->result);
- memset(ec->features, 0, sizeof(ec->features));
- } else {
- memcpy(ec->features, msg->data, sizeof(ec->features));
- }
-
- dev_dbg(ec->dev, "EC features %08x %08x\n",
- ec->features[0], ec->features[1]);
-
- kfree(msg);
- }
-
- return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
-}
-
static void cros_ec_class_release(struct device *dev)
{
kfree(to_cros_ec_dev(dev));
}
-static void cros_ec_sensors_register(struct cros_ec_dev *ec)
-{
- /*
- * Issue a command to get the number of sensor reported.
- * Build an array of sensors driver and register them all.
- */
- int ret, i, id, sensor_num;
- struct mfd_cell *sensor_cells;
- struct cros_ec_sensor_platform *sensor_platforms;
- int sensor_type[MOTIONSENSE_TYPE_MAX];
- struct ec_params_motion_sense *params;
- struct ec_response_motion_sense *resp;
- struct cros_ec_command *msg;
-
- msg = kzalloc(sizeof(struct cros_ec_command) +
- max(sizeof(*params), sizeof(*resp)), GFP_KERNEL);
- if (msg == NULL)
- return;
-
- msg->version = 2;
- msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
- msg->outsize = sizeof(*params);
- msg->insize = sizeof(*resp);
-
- params = (struct ec_params_motion_sense *)msg->data;
- params->cmd = MOTIONSENSE_CMD_DUMP;
-
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n",
- ret, msg->result);
- goto error;
- }
-
- resp = (struct ec_response_motion_sense *)msg->data;
- sensor_num = resp->dump.sensor_count;
- /*
- * Allocate 2 extra sensors if lid angle sensor and/or FIFO are needed.
- */
- sensor_cells = kcalloc(sensor_num + 2, sizeof(struct mfd_cell),
- GFP_KERNEL);
- if (sensor_cells == NULL)
- goto error;
-
- sensor_platforms = kcalloc(sensor_num,
- sizeof(struct cros_ec_sensor_platform),
- GFP_KERNEL);
- if (sensor_platforms == NULL)
- goto error_platforms;
-
- memset(sensor_type, 0, sizeof(sensor_type));
- id = 0;
- for (i = 0; i < sensor_num; i++) {
- params->cmd = MOTIONSENSE_CMD_INFO;
- params->info.sensor_num = i;
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n",
- i, ret, msg->result);
- continue;
- }
- switch (resp->info.type) {
- case MOTIONSENSE_TYPE_ACCEL:
- sensor_cells[id].name = "cros-ec-accel";
- break;
- case MOTIONSENSE_TYPE_BARO:
- sensor_cells[id].name = "cros-ec-baro";
- break;
- case MOTIONSENSE_TYPE_GYRO:
- sensor_cells[id].name = "cros-ec-gyro";
- break;
- case MOTIONSENSE_TYPE_MAG:
- sensor_cells[id].name = "cros-ec-mag";
- break;
- case MOTIONSENSE_TYPE_PROX:
- sensor_cells[id].name = "cros-ec-prox";
- break;
- case MOTIONSENSE_TYPE_LIGHT:
- sensor_cells[id].name = "cros-ec-light";
- break;
- case MOTIONSENSE_TYPE_ACTIVITY:
- sensor_cells[id].name = "cros-ec-activity";
- break;
- default:
- dev_warn(ec->dev, "unknown type %d\n", resp->info.type);
- continue;
- }
- sensor_platforms[id].sensor_num = i;
- sensor_cells[id].id = sensor_type[resp->info.type];
- sensor_cells[id].platform_data = &sensor_platforms[id];
- sensor_cells[id].pdata_size =
- sizeof(struct cros_ec_sensor_platform);
-
- sensor_type[resp->info.type]++;
- id++;
- }
-
- if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
- ec->has_kb_wake_angle = true;
-
- if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
- sensor_cells[id].name = "cros-ec-ring";
- id++;
- }
- if (cros_ec_check_features(ec,
- EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) {
- sensor_cells[id].name = "cros-ec-lid-angle";
- id++;
- }
-
- ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
- NULL, 0, NULL);
- if (ret)
- dev_err(ec->dev, "failed to add EC sensors\n");
-
- kfree(sensor_platforms);
-error_platforms:
- kfree(sensor_cells);
-error:
- kfree(msg);
-}
-
-static struct cros_ec_sensor_platform sensor_platforms[] = {
- { .sensor_num = 0 },
- { .sensor_num = 1 }
-};
-
-static const struct mfd_cell cros_ec_accel_legacy_cells[] = {
- {
- .name = "cros-ec-accel-legacy",
- .platform_data = &sensor_platforms[0],
- .pdata_size = sizeof(struct cros_ec_sensor_platform),
- },
- {
- .name = "cros-ec-accel-legacy",
- .platform_data = &sensor_platforms[1],
- .pdata_size = sizeof(struct cros_ec_sensor_platform),
- }
-};
-
-static void cros_ec_accel_legacy_register(struct cros_ec_dev *ec)
-{
- struct cros_ec_device *ec_dev = ec->ec_dev;
- u8 status;
- int ret;
-
- /*
- * ECs that need legacy support are the main EC, directly connected to
- * the AP.
- */
- if (ec->cmd_offset != 0)
- return;
-
- /*
- * Check if EC supports direct memory reads and if EC has
- * accelerometers.
- */
- if (ec_dev->cmd_readmem) {
- ret = ec_dev->cmd_readmem(ec_dev, EC_MEMMAP_ACC_STATUS, 1,
- &status);
- if (ret < 0) {
- dev_warn(ec->dev, "EC direct read error.\n");
- return;
- }
-
- /* Check if EC has accelerometers. */
- if (!(status & EC_MEMMAP_ACC_STATUS_PRESENCE_BIT)) {
- dev_info(ec->dev, "EC does not have accelerometers.\n");
- return;
- }
- }
-
- /*
- * The device may still support accelerometers:
- * it would be an older ARM based device that do not suppor the
- * EC_CMD_GET_FEATURES command.
- *
- * Register 2 accelerometers, we will fail in the IIO driver if there
- * are no sensors.
- */
- ret = mfd_add_hotplug_devices(ec->dev, cros_ec_accel_legacy_cells,
- ARRAY_SIZE(cros_ec_accel_legacy_cells));
- if (ret)
- dev_err(ec_dev->dev, "failed to add EC sensors\n");
-}
-
static int ec_device_probe(struct platform_device *pdev)
{
int retval = -ENOMEM;
@@ -390,11 +176,14 @@ static int ec_device_probe(struct platform_device *pdev)
goto failed;
/* check whether this EC is a sensor hub. */
- if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
- cros_ec_sensors_register(ec);
- else
- /* Workaroud for older EC firmware */
- cros_ec_accel_legacy_register(ec);
+ if (cros_ec_get_sensor_count(ec) > 0) {
+ retval = mfd_add_hotplug_devices(ec->dev,
+ cros_ec_sensorhub_cells,
+ ARRAY_SIZE(cros_ec_sensorhub_cells));
+ if (retval)
+ dev_err(ec->dev, "failed to add %s subdevice: %d\n",
+ cros_ec_sensorhub_cells->name, retval);
+ }
/*
* The following subdevices can be detected by sending the
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index f1825c0ccbd0..d0fb2e52ee76 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -27,121 +27,106 @@ enum cs5535_mfd_bars {
NR_BARS,
};
-static int cs5535_mfd_res_enable(struct platform_device *pdev)
-{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't fetch device resource info\n");
- return -EIO;
- }
-
- if (!request_region(res->start, resource_size(res), DRV_NAME)) {
- dev_err(&pdev->dev, "can't request region\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static int cs5535_mfd_res_disable(struct platform_device *pdev)
-{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't fetch device resource info\n");
- return -EIO;
- }
-
- release_region(res->start, resource_size(res));
- return 0;
-}
-
static struct resource cs5535_mfd_resources[NR_BARS];
static struct mfd_cell cs5535_mfd_cells[] = {
{
- .id = SMB_BAR,
.name = "cs5535-smb",
.num_resources = 1,
.resources = &cs5535_mfd_resources[SMB_BAR],
},
{
- .id = GPIO_BAR,
.name = "cs5535-gpio",
.num_resources = 1,
.resources = &cs5535_mfd_resources[GPIO_BAR],
},
{
- .id = MFGPT_BAR,
.name = "cs5535-mfgpt",
.num_resources = 1,
.resources = &cs5535_mfd_resources[MFGPT_BAR],
},
{
- .id = PMS_BAR,
.name = "cs5535-pms",
.num_resources = 1,
.resources = &cs5535_mfd_resources[PMS_BAR],
+ },
+};
- .enable = cs5535_mfd_res_enable,
- .disable = cs5535_mfd_res_disable,
+static struct mfd_cell cs5535_olpc_mfd_cells[] = {
+ {
+ .name = "olpc-xo1-pm-acpi",
+ .num_resources = 1,
+ .resources = &cs5535_mfd_resources[ACPI_BAR],
},
{
- .id = ACPI_BAR,
- .name = "cs5535-acpi",
+ .name = "olpc-xo1-sci-acpi",
.num_resources = 1,
.resources = &cs5535_mfd_resources[ACPI_BAR],
-
- .enable = cs5535_mfd_res_enable,
- .disable = cs5535_mfd_res_disable,
},
};
-static const char *olpc_acpi_clones[] = {
- "olpc-xo1-pm-acpi",
- "olpc-xo1-sci-acpi"
-};
-
static int cs5535_mfd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int err, i;
+ int err, bar;
err = pci_enable_device(pdev);
if (err)
return err;
- /* fill in IO range for each cell; subdrivers handle the region */
- for (i = 0; i < ARRAY_SIZE(cs5535_mfd_cells); i++) {
- int bar = cs5535_mfd_cells[i].id;
+ for (bar = 0; bar < NR_BARS; bar++) {
struct resource *r = &cs5535_mfd_resources[bar];
r->flags = IORESOURCE_IO;
r->start = pci_resource_start(pdev, bar);
r->end = pci_resource_end(pdev, bar);
+ }
- /* id is used for temporarily storing BAR; unset it now */
- cs5535_mfd_cells[i].id = 0;
+ err = pci_request_region(pdev, PMS_BAR, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request PMS_BAR's IO region\n");
+ goto err_disable;
}
- err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells,
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, cs5535_mfd_cells,
ARRAY_SIZE(cs5535_mfd_cells), NULL, 0, NULL);
if (err) {
- dev_err(&pdev->dev, "MFD add devices failed: %d\n", err);
- goto err_disable;
+ dev_err(&pdev->dev,
+ "Failed to add CS5535 sub-devices: %d\n", err);
+ goto err_release_pms;
}
- if (machine_is_olpc())
- mfd_clone_cell("cs5535-acpi", olpc_acpi_clones, ARRAY_SIZE(olpc_acpi_clones));
+ if (machine_is_olpc()) {
+ err = pci_request_region(pdev, ACPI_BAR, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to request ACPI_BAR's IO region\n");
+ goto err_remove_devices;
+ }
+
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ cs5535_olpc_mfd_cells,
+ ARRAY_SIZE(cs5535_olpc_mfd_cells),
+ NULL, 0, NULL);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to add CS5535 OLPC sub-devices: %d\n",
+ err);
+ goto err_release_acpi;
+ }
+ }
dev_info(&pdev->dev, "%zu devices registered.\n",
ARRAY_SIZE(cs5535_mfd_cells));
return 0;
+err_release_acpi:
+ pci_release_region(pdev, ACPI_BAR);
+err_remove_devices:
+ mfd_remove_devices(&pdev->dev);
+err_release_pms:
+ pci_release_region(pdev, PMS_BAR);
err_disable:
pci_disable_device(pdev);
return err;
@@ -150,6 +135,11 @@ err_disable:
static void cs5535_mfd_remove(struct pci_dev *pdev)
{
mfd_remove_devices(&pdev->dev);
+
+ if (machine_is_olpc())
+ pci_release_region(pdev, ACPI_BAR);
+
+ pci_release_region(pdev, PMS_BAR);
pci_disable_device(pdev);
}
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index dfac6afa82ca..57ac58b4b5f3 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -27,6 +27,7 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
@@ -668,6 +669,14 @@ struct prcmu_fw_version *prcmu_get_fw_version(void)
return fw_info.valid ? &fw_info.version : NULL;
}
+static bool prcmu_is_ulppll_disabled(void)
+{
+ struct prcmu_fw_version *ver;
+
+ ver = prcmu_get_fw_version();
+ return ver && ver->project == PRCMU_FW_PROJECT_U8420_SYSCLK;
+}
+
bool prcmu_has_arm_maxopp(void)
{
return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
@@ -1308,10 +1317,23 @@ static int request_sysclk(bool enable)
static int request_timclk(bool enable)
{
- u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
+ u32 val;
+
+ /*
+ * On the U8420_CLKSEL firmware, the ULP (Ultra Low Power)
+ * PLL is disabled so we cannot use doze mode, this will
+ * stop the clock on this firmware.
+ */
+ if (prcmu_is_ulppll_disabled())
+ val = 0;
+ else
+ val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
if (!enable)
- val |= PRCM_TCR_STOP_TIMERS;
+ val |= PRCM_TCR_STOP_TIMERS |
+ PRCM_TCR_DOZE_MODE |
+ PRCM_TCR_TENSEL_MASK;
+
writel(val, PRCM_TCR);
return 0;
@@ -1615,7 +1637,8 @@ unsigned long prcmu_clock_rate(u8 clock)
if (clock < PRCMU_NUM_REG_CLOCKS)
return clock_rate(clock);
else if (clock == PRCMU_TIMCLK)
- return ROOT_CLOCK_RATE / 16;
+ return prcmu_is_ulppll_disabled() ?
+ 32768 : ROOT_CLOCK_RATE / 16;
else if (clock == PRCMU_SYSCLK)
return ROOT_CLOCK_RATE;
else if (clock == PRCMU_PLLSOC0)
@@ -2646,6 +2669,8 @@ static char *fw_project_name(u32 project)
return "U8520 MBL";
case PRCMU_FW_PROJECT_U8420:
return "U8420";
+ case PRCMU_FW_PROJECT_U8420_SYSCLK:
+ return "U8420-sysclk";
case PRCMU_FW_PROJECT_U9540:
return "U9540";
case PRCMU_FW_PROJECT_A9420:
@@ -2693,27 +2718,18 @@ static int db8500_irq_init(struct device_node *np)
return 0;
}
-static void dbx500_fw_version_init(struct platform_device *pdev,
- u32 version_offset)
+static void dbx500_fw_version_init(struct device_node *np)
{
- struct resource *res;
void __iomem *tcpm_base;
u32 version;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "prcmu-tcpm");
- if (!res) {
- dev_err(&pdev->dev,
- "Error: no prcmu tcpm memory region provided\n");
- return;
- }
- tcpm_base = ioremap(res->start, resource_size(res));
+ tcpm_base = of_iomap(np, 1);
if (!tcpm_base) {
- dev_err(&pdev->dev, "no prcmu tcpm mem region provided\n");
+ pr_err("no prcmu tcpm mem region provided\n");
return;
}
- version = readl(tcpm_base + version_offset);
+ version = readl(tcpm_base + DB8500_PRCMU_FW_VERSION_OFFSET);
fw_info.version.project = (version & 0xFF);
fw_info.version.api_version = (version >> 8) & 0xFF;
fw_info.version.func_version = (version >> 16) & 0xFF;
@@ -2731,7 +2747,7 @@ static void dbx500_fw_version_init(struct platform_device *pdev,
iounmap(tcpm_base);
}
-void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
+void __init db8500_prcmu_early_init(void)
{
/*
* This is a temporary remap to bring up the clocks. It is
@@ -2740,9 +2756,17 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
* clock driver can probe independently. An early initcall will
* still be needed, but it can be diverted into drivers/clk/ux500.
*/
- prcmu_base = ioremap(phy_base, size);
- if (!prcmu_base)
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu");
+ prcmu_base = of_iomap(np, 0);
+ if (!prcmu_base) {
+ of_node_put(np);
pr_err("%s: ioremap() of prcmu registers failed!\n", __func__);
+ return;
+ }
+ dbx500_fw_version_init(np);
+ of_node_put(np);
spin_lock_init(&mb0_transfer.lock);
spin_lock_init(&mb0_transfer.dbb_irqs_lock);
@@ -3024,20 +3048,13 @@ static const struct mfd_cell common_prcmu_devs[] = {
};
static const struct mfd_cell db8500_prcmu_devs[] = {
- {
- .name = "db8500-prcmu-regulators",
- .of_compatible = "stericsson,db8500-prcmu-regulator",
- .platform_data = &db8500_regulators,
- .pdata_size = sizeof(db8500_regulators),
- },
- {
- .name = "cpuidle-dbx500",
- .of_compatible = "stericsson,cpuidle-dbx500",
- },
- {
- .name = "db8500-thermal",
- .of_compatible = "stericsson,db8500-thermal",
- },
+ OF_MFD_CELL("db8500-prcmu-regulators", NULL,
+ &db8500_regulators, sizeof(db8500_regulators), 0,
+ "stericsson,db8500-prcmu-regulator"),
+ OF_MFD_CELL("cpuidle-dbx500",
+ NULL, NULL, 0, 0, "stericsson,cpuidle-dbx500"),
+ OF_MFD_CELL("db8500-thermal",
+ NULL, NULL, 0, 0, "stericsson,db8500-thermal"),
};
static int db8500_prcmu_register_ab8500(struct device *parent)
@@ -3091,7 +3108,6 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
return -ENOMEM;
}
init_prcm_registers();
- dbx500_fw_version_init(pdev, DB8500_PRCMU_FW_VERSION_OFFSET);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
if (!res) {
dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 9355db29d2f9..b33030e3385c 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -122,13 +122,25 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
.properties = apl_i2c_properties,
};
+static struct property_entry glk_i2c_properties[] = {
+ PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 313),
+ PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
+ PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 290),
+ { },
+};
+
+static const struct intel_lpss_platform_info glk_i2c_info = {
+ .clk_rate = 133000000,
+ .properties = glk_i2c_properties,
+};
+
static const struct intel_lpss_platform_info cnl_i2c_info = {
.clk_rate = 216000000,
.properties = spt_i2c_properties,
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
- /* CML */
+ /* CML-LP */
{ PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x02a9), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&spt_info },
@@ -141,6 +153,17 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x02ea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02eb), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&spt_info },
+ /* CML-H */
+ { PCI_VDEVICE(INTEL, 0x06a8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x06a9), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x06c7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x06e8), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x06e9), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x06ea), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x06eb), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&spt_info },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
@@ -174,14 +197,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
/* GLK */
- { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index bfe4ff337581..b0f0781a6b9c 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -384,7 +384,7 @@ int intel_lpss_probe(struct device *dev,
if (!lpss)
return -ENOMEM;
- lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET,
+ lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET,
LPSS_PRIV_SIZE);
if (!lpss->priv)
return -ENOMEM;
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index ab09b8225b76..429efa1f8e55 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -89,6 +89,9 @@ static struct mfd_cell crystal_cove_cht_dev[] = {
.resources = gpio_resources,
},
{
+ .name = "cht_crystal_cove_pmic",
+ },
+ {
.name = "crystal_cove_pwm",
},
};
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index a1d9be82734d..e92eeeb67a98 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -396,11 +396,7 @@ static int __init micro_probe(struct platform_device *pdev)
if (IS_ERR(micro->base))
return PTR_ERR(micro->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return -EINVAL;
-
- micro->sdlc = devm_ioremap_resource(&pdev->dev, res);
+ micro->sdlc = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(micro->sdlc))
return PTR_ERR(micro->sdlc);
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 29540cbf7593..a8cfadc1fc01 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -450,6 +450,21 @@ int madera_dev_init(struct madera *madera)
sizeof(madera->pdata));
}
+ madera->mclk[MADERA_MCLK1].id = "mclk1";
+ madera->mclk[MADERA_MCLK2].id = "mclk2";
+ madera->mclk[MADERA_MCLK3].id = "mclk3";
+
+ ret = devm_clk_bulk_get_optional(madera->dev, ARRAY_SIZE(madera->mclk),
+ madera->mclk);
+ if (ret) {
+ dev_err(madera->dev, "Failed to get clocks: %d\n", ret);
+ return ret;
+ }
+
+ /* Not using devm_clk_get to prevent breakage of existing DTs */
+ if (!madera->mclk[MADERA_MCLK2].clk)
+ dev_warn(madera->dev, "Missing MCLK2, requires 32kHz clock\n");
+
ret = madera_get_reset_gpio(madera);
if (ret)
return ret;
@@ -660,13 +675,19 @@ int madera_dev_init(struct madera *madera)
}
/* Init 32k clock sourced from MCLK2 */
+ ret = clk_prepare_enable(madera->mclk[MADERA_MCLK2].clk);
+ if (ret) {
+ dev_err(madera->dev, "Failed to enable 32k clock: %d\n", ret);
+ goto err_reset;
+ }
+
ret = regmap_update_bits(madera->regmap,
MADERA_CLOCK_32K_1,
MADERA_CLK_32K_ENA_MASK | MADERA_CLK_32K_SRC_MASK,
MADERA_CLK_32K_ENA | MADERA_32KZ_MCLK2);
if (ret) {
dev_err(madera->dev, "Failed to init 32k clock: %d\n", ret);
- goto err_reset;
+ goto err_clock;
}
pm_runtime_set_active(madera->dev);
@@ -687,6 +708,8 @@ int madera_dev_init(struct madera *madera)
err_pm_runtime:
pm_runtime_disable(madera->dev);
+err_clock:
+ clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk);
err_reset:
madera_enable_hard_reset(madera);
regulator_disable(madera->dcvdd);
@@ -713,6 +736,8 @@ int madera_dev_exit(struct madera *madera)
*/
pm_runtime_disable(madera->dev);
+ clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk);
+
regulator_disable(madera->dcvdd);
regulator_put(madera->dcvdd);
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index a851ff473a44..c7ed5c353553 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -507,7 +507,6 @@ static int max77620_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
chip->dev = &client->dev;
- chip->irq_base = -1;
chip->chip_irq = client->irq;
chip->chip_id = (enum max77620_chip_id)id->driver_data;
@@ -545,8 +544,8 @@ static int max77620_probe(struct i2c_client *client,
max77620_top_irq_chip.irq_drv_data = chip;
ret = devm_regmap_add_irq_chip(chip->dev, chip->rmap, client->irq,
- IRQF_ONESHOT | IRQF_SHARED,
- chip->irq_base, &max77620_top_irq_chip,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77620_top_irq_chip,
&chip->top_irq_data);
if (ret < 0) {
dev_err(chip->dev, "Failed to add regmap irq: %d\n", ret);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 23276a80e3b4..f5a73af60dd4 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -26,54 +26,28 @@ static struct device_type mfd_dev_type = {
int mfd_cell_enable(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
- int err = 0;
- /* only call enable hook if the cell wasn't previously enabled */
- if (atomic_inc_return(cell->usage_count) == 1)
- err = cell->enable(pdev);
-
- /* if the enable hook failed, decrement counter to allow retries */
- if (err)
- atomic_dec(cell->usage_count);
+ if (!cell->enable) {
+ dev_dbg(&pdev->dev, "No .enable() call-back registered\n");
+ return 0;
+ }
- return err;
+ return cell->enable(pdev);
}
EXPORT_SYMBOL(mfd_cell_enable);
int mfd_cell_disable(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
- int err = 0;
-
- /* only disable if no other clients are using it */
- if (atomic_dec_return(cell->usage_count) == 0)
- err = cell->disable(pdev);
-
- /* if the disable hook failed, increment to allow retries */
- if (err)
- atomic_inc(cell->usage_count);
-
- /* sanity check; did someone call disable too many times? */
- WARN_ON(atomic_read(cell->usage_count) < 0);
- return err;
-}
-EXPORT_SYMBOL(mfd_cell_disable);
-
-static int mfd_platform_add_cell(struct platform_device *pdev,
- const struct mfd_cell *cell,
- atomic_t *usage_count)
-{
- if (!cell)
+ if (!cell->disable) {
+ dev_dbg(&pdev->dev, "No .disable() call-back registered\n");
return 0;
+ }
- pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
- if (!pdev->mfd_cell)
- return -ENOMEM;
-
- pdev->mfd_cell->usage_count = usage_count;
- return 0;
+ return cell->disable(pdev);
}
+EXPORT_SYMBOL(mfd_cell_disable);
#if IS_ENABLED(CONFIG_ACPI)
static void mfd_acpi_add_device(const struct mfd_cell *cell,
@@ -134,7 +108,7 @@ static inline void mfd_acpi_add_device(const struct mfd_cell *cell,
#endif
static int mfd_add_device(struct device *parent, int id,
- const struct mfd_cell *cell, atomic_t *usage_count,
+ const struct mfd_cell *cell,
struct resource *mem_base,
int irq_base, struct irq_domain *domain)
{
@@ -154,6 +128,10 @@ static int mfd_add_device(struct device *parent, int id,
if (!pdev)
goto fail_alloc;
+ pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
+ if (!pdev->mfd_cell)
+ goto fail_device;
+
res = kcalloc(cell->num_resources, sizeof(*res), GFP_KERNEL);
if (!res)
goto fail_device;
@@ -174,6 +152,11 @@ static int mfd_add_device(struct device *parent, int id,
if (parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
if (of_device_is_compatible(np, cell->of_compatible)) {
+ if (!of_device_is_available(np)) {
+ /* Ignore disabled devices error free */
+ ret = 0;
+ goto fail_alias;
+ }
pdev->dev.of_node = np;
pdev->dev.fwnode = &np->fwnode;
break;
@@ -196,10 +179,6 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_alias;
}
- ret = mfd_platform_add_cell(pdev, cell, usage_count);
- if (ret)
- goto fail_alias;
-
for (r = 0; r < cell->num_resources; r++) {
res[r].name = cell->resources[r].name;
res[r].flags = cell->resources[r].flags;
@@ -286,16 +265,9 @@ int mfd_add_devices(struct device *parent, int id,
{
int i;
int ret;
- atomic_t *cnts;
-
- /* initialize reference counting for all cells */
- cnts = kcalloc(n_devs, sizeof(*cnts), GFP_KERNEL);
- if (!cnts)
- return -ENOMEM;
for (i = 0; i < n_devs; i++) {
- atomic_set(&cnts[i], 0);
- ret = mfd_add_device(parent, id, cells + i, cnts + i, mem_base,
+ ret = mfd_add_device(parent, id, cells + i, mem_base,
irq_base, domain);
if (ret)
goto fail;
@@ -306,17 +278,15 @@ int mfd_add_devices(struct device *parent, int id,
fail:
if (i)
mfd_remove_devices(parent);
- else
- kfree(cnts);
+
return ret;
}
EXPORT_SYMBOL(mfd_add_devices);
-static int mfd_remove_devices_fn(struct device *dev, void *c)
+static int mfd_remove_devices_fn(struct device *dev, void *data)
{
struct platform_device *pdev;
const struct mfd_cell *cell;
- atomic_t **usage_count = c;
if (dev->type != &mfd_dev_type)
return 0;
@@ -327,20 +297,13 @@ static int mfd_remove_devices_fn(struct device *dev, void *c)
regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
cell->num_parent_supplies);
- /* find the base address of usage_count pointers (for freeing) */
- if (!*usage_count || (cell->usage_count < *usage_count))
- *usage_count = cell->usage_count;
-
platform_device_unregister(pdev);
return 0;
}
void mfd_remove_devices(struct device *parent)
{
- atomic_t *cnts = NULL;
-
- device_for_each_child_reverse(parent, &cnts, mfd_remove_devices_fn);
- kfree(cnts);
+ device_for_each_child_reverse(parent, NULL, mfd_remove_devices_fn);
}
EXPORT_SYMBOL(mfd_remove_devices);
@@ -382,38 +345,5 @@ int devm_mfd_add_devices(struct device *dev, int id,
}
EXPORT_SYMBOL(devm_mfd_add_devices);
-int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
-{
- struct mfd_cell cell_entry;
- struct device *dev;
- struct platform_device *pdev;
- int i;
-
- /* fetch the parent cell's device (should already be registered!) */
- dev = bus_find_device_by_name(&platform_bus_type, NULL, cell);
- if (!dev) {
- printk(KERN_ERR "failed to find device for cell %s\n", cell);
- return -ENODEV;
- }
- pdev = to_platform_device(dev);
- memcpy(&cell_entry, mfd_get_cell(pdev), sizeof(cell_entry));
-
- WARN_ON(!cell_entry.enable);
-
- for (i = 0; i < n_clones; i++) {
- cell_entry.name = clones[i];
- /* don't give up if a single call fails; just report error */
- if (mfd_add_device(pdev->dev.parent, -1, &cell_entry,
- cell_entry.usage_count, NULL, 0, NULL))
- dev_err(dev, "failed to create platform device '%s'\n",
- clones[i]);
- }
-
- put_device(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(mfd_clone_cell);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index b2c325ead1c8..0437c858d115 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -189,16 +189,16 @@ static int mt6397_probe(struct platform_device *pdev)
switch (pmic->chip_id) {
case MT6323_CHIP_ID:
- ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
- ARRAY_SIZE(mt6323_devs), NULL,
- 0, pmic->irq_domain);
+ ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ mt6323_devs, ARRAY_SIZE(mt6323_devs),
+ NULL, 0, pmic->irq_domain);
break;
case MT6391_CHIP_ID:
case MT6397_CHIP_ID:
- ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
- ARRAY_SIZE(mt6397_devs), NULL,
- 0, pmic->irq_domain);
+ ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ mt6397_devs, ARRAY_SIZE(mt6397_devs),
+ NULL, 0, pmic->irq_domain);
break;
default:
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index e8fe705073fa..1df1a2711328 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -31,6 +31,8 @@
#define PM8916_SUBTYPE 0x0b
#define PM8004_SUBTYPE 0x0c
#define PM8909_SUBTYPE 0x0d
+#define PM8950_SUBTYPE 0x10
+#define PMI8950_SUBTYPE 0x11
#define PM8998_SUBTYPE 0x14
#define PMI8998_SUBTYPE 0x15
#define PM8005_SUBTYPE 0x18
@@ -50,6 +52,8 @@ static const struct of_device_id pmic_spmi_id_table[] = {
{ .compatible = "qcom,pm8916", .data = (void *)PM8916_SUBTYPE },
{ .compatible = "qcom,pm8004", .data = (void *)PM8004_SUBTYPE },
{ .compatible = "qcom,pm8909", .data = (void *)PM8909_SUBTYPE },
+ { .compatible = "qcom,pm8950", .data = (void *)PM8950_SUBTYPE },
+ { .compatible = "qcom,pmi8950", .data = (void *)PMI8950_SUBTYPE },
{ .compatible = "qcom,pm8998", .data = (void *)PM8998_SUBTYPE },
{ .compatible = "qcom,pmi8998", .data = (void *)PMI8998_SUBTYPE },
{ .compatible = "qcom,pm8005", .data = (void *)PM8005_SUBTYPE },
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 050478cabc95..a69a6742ecdc 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -109,11 +109,7 @@ static const struct regmap_config rk817_regmap_config = {
};
static struct resource rtc_resources[] = {
- {
- .start = RK808_IRQ_RTC_ALARM,
- .end = RK808_IRQ_RTC_ALARM,
- .flags = IORESOURCE_IRQ,
- }
+ DEFINE_RES_IRQ(RK808_IRQ_RTC_ALARM),
};
static struct resource rk817_rtc_resources[] = {
@@ -121,16 +117,8 @@ static struct resource rk817_rtc_resources[] = {
};
static struct resource rk805_key_resources[] = {
- {
- .start = RK805_IRQ_PWRON_FALL,
- .end = RK805_IRQ_PWRON_FALL,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = RK805_IRQ_PWRON_RISE,
- .end = RK805_IRQ_PWRON_RISE,
- .flags = IORESOURCE_IRQ,
- }
+ DEFINE_RES_IRQ(RK805_IRQ_PWRON_RISE),
+ DEFINE_RES_IRQ(RK805_IRQ_PWRON_FALL),
};
static struct resource rk817_pwrkey_resources[] = {
@@ -167,7 +155,7 @@ static const struct mfd_cell rk817s[] = {
{ .name = "rk808-clkout",},
{ .name = "rk808-regulator",},
{
- .name = "rk8xx-pwrkey",
+ .name = "rk805-pwrkey",
.num_resources = ARRAY_SIZE(rk817_pwrkey_resources),
.resources = &rk817_pwrkey_resources[0],
},
@@ -215,7 +203,7 @@ static const struct rk808_reg_data rk808_pre_init_reg[] = {
static const struct rk808_reg_data rk817_pre_init_reg[] = {
{RK817_RTC_CTRL_REG, RTC_STOP, RTC_STOP},
- {RK817_GPIO_INT_CFG, RK817_INT_POL_MSK, RK817_INT_POL_H},
+ {RK817_GPIO_INT_CFG, RK817_INT_POL_MSK, RK817_INT_POL_L},
{RK817_SYS_CFG(1), RK817_HOTDIE_TEMP_MSK | RK817_TSD_TEMP_MSK,
RK817_HOTDIE_105 | RK817_TSD_140},
};
diff --git a/drivers/mfd/rohm-bd70528.c b/drivers/mfd/rohm-bd70528.c
index 55599d5c5c86..ef6786fd3b00 100644
--- a/drivers/mfd/rohm-bd70528.c
+++ b/drivers/mfd/rohm-bd70528.c
@@ -105,15 +105,14 @@ static struct regmap_config bd70528_regmap = {
* register.
*/
-/* bit [0] - Shutdown register */
-unsigned int bit0_offsets[] = {0}; /* Shutdown register */
-unsigned int bit1_offsets[] = {1}; /* Power failure register */
-unsigned int bit2_offsets[] = {2}; /* VR FAULT register */
-unsigned int bit3_offsets[] = {3}; /* PMU register interrupts */
-unsigned int bit4_offsets[] = {4, 5}; /* Charger 1 and Charger 2 registers */
-unsigned int bit5_offsets[] = {6}; /* RTC register */
-unsigned int bit6_offsets[] = {7}; /* GPIO register */
-unsigned int bit7_offsets[] = {8}; /* Invalid operation register */
+static unsigned int bit0_offsets[] = {0}; /* Shutdown */
+static unsigned int bit1_offsets[] = {1}; /* Power failure */
+static unsigned int bit2_offsets[] = {2}; /* VR FAULT */
+static unsigned int bit3_offsets[] = {3}; /* PMU interrupts */
+static unsigned int bit4_offsets[] = {4, 5}; /* Charger 1 and Charger 2 */
+static unsigned int bit5_offsets[] = {6}; /* RTC */
+static unsigned int bit6_offsets[] = {7}; /* GPIO */
+static unsigned int bit7_offsets[] = {8}; /* Invalid operation */
static struct regmap_irq_sub_irq_map bd70528_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 660723276481..e22197c832e8 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -105,7 +105,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
- syscon_config.name = of_node_full_name(np);
regmap = regmap_init_mmio(NULL, base, &syscon_config);
if (IS_ERR(regmap)) {
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index fd111296b959..926c289cb040 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -182,11 +182,11 @@ static int ti_tscadc_probe(struct platform_device *pdev)
tscadc->irq = err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tscadc->tscadc_phys_base = res->start;
tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(tscadc->tscadc_base))
return PTR_ERR(tscadc->tscadc_base);
+ tscadc->tscadc_phys_base = res->start;
tscadc->regmap = devm_regmap_init_mmio(&pdev->dev,
tscadc->tscadc_base, &tscadc_regmap_config);
if (IS_ERR(tscadc->regmap)) {
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
index ebf0eadd2075..9b34a6d76094 100644
--- a/drivers/mfd/wm8998-tables.c
+++ b/drivers/mfd/wm8998-tables.c
@@ -806,12 +806,6 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
{ 0x00000EF4, 0x0001 }, /* R3828 - ISRC 2 CTRL 2 */
{ 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
- { 0x00001700, 0x0000 }, /* R5888 - FRF_COEFF_1 */
- { 0x00001701, 0x0000 }, /* R5889 - FRF_COEFF_2 */
- { 0x00001702, 0x0000 }, /* R5890 - FRF_COEFF_3 */
- { 0x00001703, 0x0000 }, /* R5891 - FRF_COEFF_4 */
- { 0x00001704, 0x0000 }, /* R5892 - DAC_COMP_1 */
- { 0x00001705, 0x0000 }, /* R5893 - DAC_COMP_2 */
};
static bool wm8998_readable_register(struct device *dev, unsigned int reg)
@@ -1492,12 +1486,6 @@ static bool wm8998_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ISRC_2_CTRL_1:
case ARIZONA_ISRC_2_CTRL_2:
case ARIZONA_ISRC_2_CTRL_3:
- case ARIZONA_FRF_COEFF_1:
- case ARIZONA_FRF_COEFF_2:
- case ARIZONA_FRF_COEFF_3:
- case ARIZONA_FRF_COEFF_4:
- case ARIZONA_V2_DAC_COMP_1:
- case ARIZONA_V2_DAC_COMP_2:
return true;
default:
return false;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c55b63750757..7f0d48f406e3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -8,7 +8,6 @@ menu "Misc devices"
config SENSORS_LIS3LV02D
tristate
depends on INPUT
- select INPUT_POLLDEV
config AD525X_DPOT
tristate "Analog Devices Digital Potentiometers"
@@ -326,14 +325,14 @@ config SENSORS_TSL2550
will be called tsl2550.
config SENSORS_BH1770
- tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
- depends on I2C
- ---help---
- Say Y here if you want to build a driver for BH1770GLC (ROHM) or
+ tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
+ depends on I2C
+ ---help---
+ Say Y here if you want to build a driver for BH1770GLC (ROHM) or
SFH7770 (Osram) combined ambient light and proximity sensor chip.
- To compile this driver as a module, choose M here: the
- module will be called bh1770glc. If unsure, say N here.
+ To compile this driver as a module, choose M here: the
+ module will be called bh1770glc. If unsure, say N here.
config SENSORS_APDS990X
tristate "APDS990X combined als and proximity sensors"
@@ -438,8 +437,8 @@ config PCI_ENDPOINT_TEST
select CRC32
tristate "PCI Endpoint Test driver"
---help---
- Enable this configuration option to enable the host side test driver
- for PCI Endpoint.
+ Enable this configuration option to enable the host side test driver
+ for PCI Endpoint.
config XILINX_SDFEC
tristate "Xilinx SDFEC 16"
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index 08b5b639d77f..7de7840f613c 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -109,7 +109,6 @@ static int __init tc_probe(struct platform_device *pdev)
struct atmel_tc *tc;
struct clk *clk;
int irq;
- struct resource *r;
unsigned int i;
if (of_get_child_count(pdev->dev.of_node))
@@ -133,8 +132,7 @@ static int __init tc_probe(struct platform_device *pdev)
if (IS_ERR(tc->slow_clk))
return PTR_ERR(tc->slow_clk);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tc->regs = devm_ioremap_resource(&pdev->dev, r);
+ tc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tc->regs))
return PTR_ERR(tc->regs);
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
index d9bff5a2217e..1f56267ed2f4 100644
--- a/drivers/misc/cardreader/Makefile
+++ b/drivers/misc/cardreader/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o
obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o
obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 40a6d199f2ea..4214f02a17fd 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -191,7 +191,6 @@ static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
{
- int err = 0;
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en)
@@ -231,7 +230,7 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
- return err;
+ return 0;
}
static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
new file mode 100644
index 000000000000..32dcec2e9dfd
--- /dev/null
+++ b/drivers/misc/cardreader/rts5261.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5261.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5261_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x13, 0x13, 0x13},
+ {0x96, 0x96, 0x96},
+ {0x7F, 0x7F, 0x7F},
+ {0x96, 0x96, 0x96},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0x3A, 0x3A, 0x3A},
+ {0xE6, 0xE6, 0xE6},
+ {0xB3, 0xB3, 0xB3},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+ /* 0x814~0x817 */
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+ if (!rts5261_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rts5261_reg_to_card_drive_sel(reg);
+
+ if (rts5261_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+
+ /* 0x724~0x727 */
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ pcr->aspm_en = rts5261_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg);
+}
+
+static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
+ SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5261_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5261_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5261_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x02);
+}
+
+static int rts5261_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5261_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5261_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+static int rts5261_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5261_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG1,
+ RTS5261_LDO1_TUNE_MASK, RTS5261_LDO1_33);
+ rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO1_POWERON, RTS5261_LDO1_POWERON);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO3318_POWERON, RTS5261_LDO3318_POWERON);
+
+ msleep(20);
+
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ rts5261_sd_set_sample_push_timing_sd30(pcr);
+
+ return 0;
+}
+
+static int rts5261_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u16 val = 0;
+
+ rtsx_pci_write_register(pcr, RTS5261_CARD_PWR_CTL,
+ RTS5261_PUPDC, RTS5261_PUPDC);
+
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val |= PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
+ RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val &= ~PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
+ RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_18);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rts5261_fill_driving(pcr, voltage);
+
+ return 0;
+}
+
+static void rts5261_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5261_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ rts5261_stop_cmd(pcr);
+ rts5261_switch_output_voltage(pcr, OUTPUT_3V3);
+
+}
+
+static void rts5261_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+static void rts5261_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
+
+}
+
+static int rts5261_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5261_card_before_power_off(pcr);
+ err = rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO_POWERON_MASK, 0);
+
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ return err;
+}
+
+static void rts5261_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_LMT_THD_MASK,
+ RTS5261_LDO1_LMT_THD_2000);
+
+ mask = SD_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rts5261_enable_ocp(pcr);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
+ }
+}
+
+static void rts5261_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+ udelay(10);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+}
+
+static void rts5261_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+
+ if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ rts5261_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ rts5261_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ }
+
+}
+
+static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
+{
+ int retval;
+ u32 lval, i;
+ u8 valid, efuse_valid, tmp;
+
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR | REG_EFUSE_POWER_MASK,
+ REG_EFUSE_POR | REG_EFUSE_POWERON);
+ udelay(1);
+ rtsx_pci_write_register(pcr, RTS5261_EFUSE_ADDR,
+ RTS5261_EFUSE_ADDR_MASK, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_EFUSE_CTL,
+ RTS5261_EFUSE_ENABLE | RTS5261_EFUSE_MODE_MASK,
+ RTS5261_EFUSE_ENABLE);
+
+ /* Wait transfer end */
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ rtsx_pci_read_register(pcr, RTS5261_EFUSE_CTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ rtsx_pci_read_register(pcr, RTS5261_EFUSE_READ_DATA, &tmp);
+ efuse_valid = ((tmp & 0x0C) >> 2);
+ pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
+
+ if (efuse_valid == 0) {
+ retval = rtsx_pci_read_config_dword(pcr,
+ PCR_SETTING_REG2, &lval);
+ if (retval != 0)
+ pcr_dbg(pcr, "read 0x814 DW fail\n");
+ pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
+ /* 0x816 */
+ valid = (u8)((lval >> 16) & 0x03);
+ pcr_dbg(pcr, "0x816: %d\n", valid);
+ }
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR, 0);
+ pcr_dbg(pcr, "Disable efuse por!\n");
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &lval);
+ lval = lval & 0x00FFFFFF;
+ retval = rtsx_pci_write_config_dword(pcr, PCR_SETTING_REG2, lval);
+ if (retval != 0)
+ pcr_dbg(pcr, "write config fail\n");
+
+ return retval;
+}
+
+static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ u32 lval;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+
+ rts5261_init_from_cfg(pcr);
+ rts5261_init_from_hw(pcr);
+
+ /* power off efuse */
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
+ RTS5261_AUX_CLK_16M_EN, 0);
+
+ /* Release PRSNT# */
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
+ RTS5261_FORCE_PRSNT_LOW, 0);
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
+ FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL,
+ PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN);
+
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+ /* Configure driving */
+ rts5261_fill_driving(pcr, OUTPUT_3V3);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+
+ /* Clear Enter RTD3_cold Information*/
+ rtsx_pci_write_register(pcr, RTS5261_FW_CTL,
+ RTS5261_INFORM_RTD3_COLD, 0);
+
+ return 0;
+}
+
+static void rts5261_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ val = FORCE_ASPM_CTL0;
+ val |= (pcr->aspm_en & 0x02);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ }
+ pcr->aspm_enabled = enable;
+
+}
+
+static void rts5261_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ val = 0;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ val = 0;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+ udelay(10);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5261_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ if (enable)
+ rts5261_enable_aspm(pcr, true);
+ else
+ rts5261_disable_aspm(pcr, false);
+}
+
+static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5261_pcr_ops = {
+ .fetch_vendor_settings = rtsx5261_fetch_vendor_settings,
+ .turn_on_led = rts5261_turn_on_led,
+ .turn_off_led = rts5261_turn_off_led,
+ .extra_init_hw = rts5261_extra_init_hw,
+ .enable_auto_blink = rts5261_enable_auto_blink,
+ .disable_auto_blink = rts5261_disable_auto_blink,
+ .card_power_on = rts5261_card_power_on,
+ .card_power_off = rts5261_card_power_off,
+ .switch_output_voltage = rts5261_switch_output_voltage,
+ .force_power_down = rts5261_force_power_down,
+ .stop_cmd = rts5261_stop_cmd,
+ .set_aspm = rts5261_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5261_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5261_enable_ocp,
+ .disable_ocp = rts5261_disable_ocp,
+ .init_ocp = rts5261_init_ocp,
+ .process_ocp = rts5261_process_ocp,
+ .clear_ocpstat = rts5261_clear_ocpstat,
+};
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u8 n, clk_divider, mcu_cnt, div;
+ static const u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = RTS5261_SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = RTS5261_SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = RTS5261_SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = RTS5261_SSC_DEPTH_512K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ n = (u8)(clk - 4);
+ if ((clk <= 4) || (n > 396))
+ return -EINVAL;
+
+ mcu_cnt = (u8)(125/clk + 3);
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ div = CLK_DIV_1;
+ while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+ DIV_N_TO_CLK) * 2;
+ n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ n = (n + 4) * 2 - 4;
+ }
+ div++;
+ }
+
+ n = (n / 2);
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_4) {
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_8) {
+ if (ssc_depth > 3)
+ ssc_depth -= 3;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ }
+ } else {
+ ssc_depth = 0;
+ }
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(SSC_CLOCK_STABLE_WAIT);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+
+}
+
+void rts5261_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 1;
+ pcr->ops = &rts5261_pcr_ops;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 27, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5261_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5261_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5261_sd_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS5261_AUTOLOAD_CFG3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+
+ option->ocp_en = 1;
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
+ option->sd_800mA_ocp_thd = RTS5261_LDO1_OCP_THD_1040;
+}
diff --git a/drivers/misc/cardreader/rts5261.h b/drivers/misc/cardreader/rts5261.h
new file mode 100644
index 000000000000..ebfdd236a553
--- /dev/null
+++ b/drivers/misc/cardreader/rts5261.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+#ifndef RTS5261_H
+#define RTS5261_H
+
+/*New add*/
+#define rts5261_vendor_setting_valid(reg) ((reg) & 0x010000)
+#define rts5261_reg_to_aspm(reg) (((reg) >> 28) ^ 0x03)
+#define rts5261_reg_check_reverse_socket(reg) ((reg) & 0x04)
+#define rts5261_reg_to_card_drive_sel(reg) ((((reg) >> 6) & 0x01) << 6)
+#define rts5261_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) ^ 0x03)
+#define rts5261_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) ^ 0x03)
+
+
+#define RTS5261_AUTOLOAD_CFG0 0xFF7B
+#define RTS5261_AUTOLOAD_CFG1 0xFF7C
+#define RTS5261_AUTOLOAD_CFG2 0xFF7D
+#define RTS5261_AUTOLOAD_CFG3 0xFF7E
+#define RTS5261_AUTOLOAD_CFG4 0xFF7F
+#define RTS5261_FORCE_PRSNT_LOW (1 << 6)
+#define RTS5261_AUX_CLK_16M_EN (1 << 5)
+
+#define RTS5261_REG_VREF 0xFE97
+#define RTS5261_PWD_SUSPND_EN (1 << 4)
+
+#define RTS5261_PAD_H3L1 0xFF79
+#define PAD_GPIO_H3L1 (1 << 3)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5261_SSC_DEPTH_MASK 0x07
+#define RTS5261_SSC_DEPTH_DISALBE 0x00
+#define RTS5261_SSC_DEPTH_8M 0x01
+#define RTS5261_SSC_DEPTH_4M 0x02
+#define RTS5261_SSC_DEPTH_2M 0x03
+#define RTS5261_SSC_DEPTH_1M 0x04
+#define RTS5261_SSC_DEPTH_512K 0x05
+#define RTS5261_SSC_DEPTH_256K 0x06
+#define RTS5261_SSC_DEPTH_128K 0x07
+
+/* efuse control register*/
+#define RTS5261_EFUSE_CTL 0xFC30
+#define RTS5261_EFUSE_ENABLE 0x80
+/* EFUSE_MODE: 0=READ 1=PROGRAM */
+#define RTS5261_EFUSE_MODE_MASK 0x40
+#define RTS5261_EFUSE_PROGRAM 0x40
+
+#define RTS5261_EFUSE_ADDR 0xFC31
+#define RTS5261_EFUSE_ADDR_MASK 0x3F
+
+#define RTS5261_EFUSE_WRITE_DATA 0xFC32
+#define RTS5261_EFUSE_READ_DATA 0xFC34
+
+/* DMACTL 0xFE2C */
+#define RTS5261_DMA_PACK_SIZE_MASK 0xF0
+
+/* FW config info register */
+#define RTS5261_FW_CFG_INFO0 0xFF50
+#define RTS5261_FW_EXPRESS_TEST_MASK (0x01<<0)
+#define RTS5261_FW_EA_MODE_MASK (0x01<<5)
+
+/* FW config register */
+#define RTS5261_FW_CFG0 0xFF54
+#define RTS5261_FW_ENTER_EXPRESS (0x01<<0)
+
+#define RTS5261_FW_CFG1 0xFF55
+#define RTS5261_SYS_CLK_SEL_MCU_CLK (0x01<<7)
+#define RTS5261_CRC_CLK_SEL_MCU_CLK (0x01<<6)
+#define RTS5261_FAKE_MCU_CLOCK_GATING (0x01<<5)
+/*MCU_bus_mode_sel: 0=real 8051 1=fake mcu*/
+#define RTS5261_MCU_BUS_SEL_MASK (0x01<<4)
+/*MCU_clock_sel:VerA 00=aux16M 01=aux400K 1x=REFCLK100M*/
+/*MCU_clock_sel:VerB 00=aux400K 01=aux16M 10=REFCLK100M*/
+#define RTS5261_MCU_CLOCK_SEL_MASK (0x03<<2)
+#define RTS5261_MCU_CLOCK_SEL_16M (0x01<<2)
+#define RTS5261_MCU_CLOCK_GATING (0x01<<1)
+#define RTS5261_DRIVER_ENABLE_FW (0x01<<0)
+
+/* FW status register */
+#define RTS5261_FW_STATUS 0xFF56
+#define RTS5261_EXPRESS_LINK_FAIL_MASK (0x01<<7)
+
+/* FW control register */
+#define RTS5261_FW_CTL 0xFF5F
+#define RTS5261_INFORM_RTD3_COLD (0x01<<5)
+
+#define RTS5261_REG_FPDCTL 0xFF60
+
+#define RTS5261_REG_LDO12_CFG 0xFF6E
+#define RTS5261_LDO12_VO_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO12_115 (0x03<<1)
+#define RTS5261_LDO12_120 (0x04<<1)
+#define RTS5261_LDO12_125 (0x05<<1)
+#define RTS5261_LDO12_130 (0x06<<1)
+#define RTS5261_LDO12_135 (0x07<<1)
+
+/* LDO control register */
+#define RTS5261_CARD_PWR_CTL 0xFD50
+#define RTS5261_SD_CLK_ISO (0x01<<7)
+#define RTS5261_PAD_SD_DAT_FW_CTRL (0x01<<6)
+#define RTS5261_PUPDC (0x01<<5)
+#define RTS5261_SD_CMD_ISO (0x01<<4)
+#define RTS5261_SD_DAT_ISO_MASK (0x0F<<0)
+
+#define RTS5261_LDO1233318_POW_CTL 0xFF70
+#define RTS5261_LDO3318_POWERON (0x01<<3)
+#define RTS5261_LDO3_POWERON (0x01<<2)
+#define RTS5261_LDO2_POWERON (0x01<<1)
+#define RTS5261_LDO1_POWERON (0x01<<0)
+#define RTS5261_LDO_POWERON_MASK (0x0F<<0)
+
+#define RTS5261_DV3318_CFG 0xFF71
+#define RTS5261_DV3318_TUNE_MASK (0x07<<4)
+#define RTS5261_DV3318_18 (0x02<<4)
+#define RTS5261_DV3318_19 (0x04<<4)
+#define RTS5261_DV3318_33 (0x07<<4)
+
+#define RTS5261_LDO1_CFG0 0xFF72
+#define RTS5261_LDO1_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO1_OCP_EN (0x01<<4)
+#define RTS5261_LDO1_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO1_OCP_LMT_EN (0x01<<1)
+
+/* CRD6603-433 190319 request changed */
+#define RTS5261_LDO1_OCP_THD_740 (0x00<<5)
+#define RTS5261_LDO1_OCP_THD_800 (0x01<<5)
+#define RTS5261_LDO1_OCP_THD_860 (0x02<<5)
+#define RTS5261_LDO1_OCP_THD_920 (0x03<<5)
+#define RTS5261_LDO1_OCP_THD_980 (0x04<<5)
+#define RTS5261_LDO1_OCP_THD_1040 (0x05<<5)
+#define RTS5261_LDO1_OCP_THD_1100 (0x06<<5)
+#define RTS5261_LDO1_OCP_THD_1160 (0x07<<5)
+
+#define RTS5261_LDO1_LMT_THD_450 (0x00<<2)
+#define RTS5261_LDO1_LMT_THD_1000 (0x01<<2)
+#define RTS5261_LDO1_LMT_THD_1500 (0x02<<2)
+#define RTS5261_LDO1_LMT_THD_2000 (0x03<<2)
+
+#define RTS5261_LDO1_CFG1 0xFF73
+#define RTS5261_LDO1_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO1_18 (0x05<<1)
+#define RTS5261_LDO1_33 (0x07<<1)
+#define RTS5261_LDO1_PWD_MASK (0x01<<0)
+
+#define RTS5261_LDO2_CFG0 0xFF74
+#define RTS5261_LDO2_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO2_OCP_EN (0x01<<4)
+#define RTS5261_LDO2_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO2_OCP_LMT_EN (0x01<<1)
+
+#define RTS5261_LDO2_OCP_THD_620 (0x00<<5)
+#define RTS5261_LDO2_OCP_THD_650 (0x01<<5)
+#define RTS5261_LDO2_OCP_THD_680 (0x02<<5)
+#define RTS5261_LDO2_OCP_THD_720 (0x03<<5)
+#define RTS5261_LDO2_OCP_THD_750 (0x04<<5)
+#define RTS5261_LDO2_OCP_THD_780 (0x05<<5)
+#define RTS5261_LDO2_OCP_THD_810 (0x06<<5)
+#define RTS5261_LDO2_OCP_THD_840 (0x07<<5)
+
+#define RTS5261_LDO2_CFG1 0xFF75
+#define RTS5261_LDO2_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO2_18 (0x05<<1)
+#define RTS5261_LDO2_33 (0x07<<1)
+#define RTS5261_LDO2_PWD_MASK (0x01<<0)
+
+#define RTS5261_LDO3_CFG0 0xFF76
+#define RTS5261_LDO3_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO3_OCP_EN (0x01<<4)
+#define RTS5261_LDO3_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO3_OCP_LMT_EN (0x01<<1)
+
+#define RTS5261_LDO3_OCP_THD_620 (0x00<<5)
+#define RTS5261_LDO3_OCP_THD_650 (0x01<<5)
+#define RTS5261_LDO3_OCP_THD_680 (0x02<<5)
+#define RTS5261_LDO3_OCP_THD_720 (0x03<<5)
+#define RTS5261_LDO3_OCP_THD_750 (0x04<<5)
+#define RTS5261_LDO3_OCP_THD_780 (0x05<<5)
+#define RTS5261_LDO3_OCP_THD_810 (0x06<<5)
+#define RTS5261_LDO3_OCP_THD_840 (0x07<<5)
+
+#define RTS5261_LDO3_CFG1 0xFF77
+#define RTS5261_LDO3_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO3_18 (0x05<<1)
+#define RTS5261_LDO3_33 (0x07<<1)
+#define RTS5261_LDO3_PWD_MASK (0x01<<0)
+
+#define RTS5261_REG_PME_FORCE_CTL 0xFF78
+#define FORCE_PM_CONTROL 0x20
+#define FORCE_PM_VALUE 0x10
+#define REG_EFUSE_BYPASS 0x08
+#define REG_EFUSE_POR 0x04
+#define REG_EFUSE_POWER_MASK 0x03
+#define REG_EFUSE_POWERON 0x03
+#define REG_EFUSE_POWEROFF 0x00
+
+
+/* Single LUN, support SD/SD EXPRESS */
+#define DEFAULT_SINGLE 0
+#define SD_LUN 1
+#define SD_EXPRESS_LUN 2
+
+/* For Change_FPGA_SSCClock Function */
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+
+int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5261_H */
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index b4a66b64f742..fd7b2167103d 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -22,6 +22,7 @@
#include <asm/unaligned.h>
#include "rtsx_pcr.h"
+#include "rts5261.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -34,9 +35,6 @@ static struct mfd_cell rtsx_pcr_cells[] = {
[RTSX_SD_CARD] = {
.name = DRV_NAME_RTSX_PCI_SDMMC,
},
- [RTSX_MS_CARD] = {
- .name = DRV_NAME_RTSX_PCI_MS,
- },
};
static const struct pci_device_id rtsx_pci_ids[] = {
@@ -51,6 +49,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -438,8 +437,16 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
if (end)
option |= RTSX_SG_END;
- val = ((u64)addr << 32) | ((u64)len << 12) | option;
+ if (PCI_PID(pcr) == PID_5261) {
+ if (len > 0xFFFF)
+ val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
+ | (((u64)len >> 16) << 6) | option;
+ else
+ val = ((u64)addr << 32) | ((u64)len << 16) | option;
+ } else {
+ val = ((u64)addr << 32) | ((u64)len << 12) | option;
+ }
put_unaligned_le64(val, ptr);
pcr->sgi++;
}
@@ -684,7 +691,6 @@ int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
else
return -EINVAL;
-
return rtsx_pci_set_pull_ctl(pcr, tbl);
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
@@ -735,6 +741,10 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
};
+ if (PCI_PID(pcr) == PID_5261)
+ return rts5261_pci_switch_clock(pcr, card_clock,
+ ssc_depth, initial_mode, double_clk, vpclk);
+
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
clk_divider = SD_CLK_DIVIDE_128;
@@ -1253,7 +1263,15 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_enable_bus_int(pcr);
/* Power on SSC */
- err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+ if (PCI_PID(pcr) == PID_5261) {
+ /* Gating real mcu clock */
+ err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
+ RTS5261_MCU_CLOCK_GATING, 0);
+ err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
+ SSC_POWER_DOWN, 0);
+ } else {
+ err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+ }
if (err < 0)
return err;
@@ -1283,7 +1301,12 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
/* Enable SSC Clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
0xFF, SSC_8X_EN | SSC_SEL_4M);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+ if (PCI_PID(pcr) == PID_5261)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+ RTS5261_SSC_DEPTH_2M);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+
/* Disable cd_pwr_save */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
/* Clear Link Ready Interrupt */
@@ -1314,6 +1337,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_524A:
case PID_525A:
case PID_5260:
+ case PID_5261:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1393,9 +1417,14 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5286:
rtl8402_init_params(pcr);
break;
+
case 0x5260:
rts5260_init_params(pcr);
break;
+
+ case 0x5261:
+ rts5261_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 98f729263dc1..ed391df52f4f 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -53,6 +53,7 @@ void rts524a_init_params(struct rtsx_pcr *pcr);
void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
void rts5260_init_params(struct rtsx_pcr *pcr);
+void rts5261_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 4d6836f19489..cb9cca35a226 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -473,12 +473,6 @@ static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
-static long device_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- return device_ioctl(file, cmd, arg);
-}
-
static int device_close(struct inode *inode, struct file *file)
{
struct cxl *adapter = file->private_data;
@@ -514,7 +508,7 @@ static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = device_open,
.unlocked_ioctl = device_ioctl,
- .compat_ioctl = device_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.release = device_close,
};
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2cccd82a3106..0681d5fdd538 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -716,9 +716,12 @@ static int at24_probe(struct i2c_client *client)
return -ENODEV;
}
- dev_info(dev, "%u byte %s EEPROM, %s, %u bytes/write\n",
- byte_len, client->name,
- writable ? "writable" : "read-only", at24->write_max);
+ if (writable)
+ dev_info(dev, "%u byte %s EEPROM, writable, %u bytes/write\n",
+ byte_len, client->name, at24->write_max);
+ else
+ dev_info(dev, "%u byte %s EEPROM, read-only\n",
+ byte_len, client->name);
return 0;
}
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2cfe3d4ae144..226b5efa6a77 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -175,6 +175,10 @@ static int eeprom_probe(struct i2c_client *client,
}
}
+ /* Let the users know they are using deprecated driver */
+ dev_notice(&client->dev,
+ "eeprom driver is deprecated, please use at24 instead\n");
+
/* create the sysfs eeprom file */
return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 1b1a794d639d..ae4ee27a63c4 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -32,8 +32,9 @@
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_INIT_HANDLE 1
#define FASTRPC_CTXID_MASK (0xFF0)
-#define INIT_FILELEN_MAX (64 * 1024 * 1024)
+#define INIT_FILELEN_MAX (2 * 1024 * 1024)
#define FASTRPC_DEVICE_NAME "fastrpc"
+#define ADSP_MMAP_ADD_PAGES 0x1000
/* Retrives number of input buffers from the scalars parameter */
#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
@@ -66,6 +67,8 @@
/* Remote Method id table */
#define FASTRPC_RMID_INIT_ATTACH 0
#define FASTRPC_RMID_INIT_RELEASE 1
+#define FASTRPC_RMID_INIT_MMAP 4
+#define FASTRPC_RMID_INIT_MUNMAP 5
#define FASTRPC_RMID_INIT_CREATE 6
#define FASTRPC_RMID_INIT_CREATE_ATTR 7
#define FASTRPC_RMID_INIT_CREATE_STATIC 8
@@ -89,6 +92,23 @@ struct fastrpc_remote_arg {
u64 len;
};
+struct fastrpc_mmap_rsp_msg {
+ u64 vaddr;
+};
+
+struct fastrpc_mmap_req_msg {
+ s32 pgid;
+ u32 flags;
+ u64 vaddr;
+ s32 num;
+};
+
+struct fastrpc_munmap_req_msg {
+ s32 pgid;
+ u64 vaddr;
+ u64 size;
+};
+
struct fastrpc_msg {
int pid; /* process group id */
int tid; /* thread id */
@@ -123,6 +143,9 @@ struct fastrpc_buf {
/* Lock for dma buf attachments */
struct mutex lock;
struct list_head attachments;
+ /* mmap support */
+ struct list_head node; /* list of user requested mmaps */
+ uintptr_t raddr;
};
struct fastrpc_dma_buf_attachment {
@@ -192,6 +215,7 @@ struct fastrpc_user {
struct list_head user;
struct list_head maps;
struct list_head pending;
+ struct list_head mmaps;
struct fastrpc_channel_ctx *cctx;
struct fastrpc_session_ctx *sctx;
@@ -269,6 +293,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
return -ENOMEM;
INIT_LIST_HEAD(&buf->attachments);
+ INIT_LIST_HEAD(&buf->node);
mutex_init(&buf->lock);
buf->fl = fl;
@@ -276,6 +301,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
buf->phys = 0;
buf->size = size;
buf->dev = dev;
+ buf->raddr = 0;
buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
GFP_KERNEL);
@@ -934,8 +960,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (err)
goto bail;
- /* Wait for remote dsp to respond or time out */
- err = wait_for_completion_interruptible(&ctx->work);
+ if (kernel) {
+ if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
+ err = -ETIMEDOUT;
+ } else {
+ err = wait_for_completion_interruptible(&ctx->work);
+ }
+
if (err)
goto bail;
@@ -954,12 +985,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
}
bail:
- /* We are done with this compute context, remove it from pending list */
- spin_lock(&fl->lock);
- list_del(&ctx->node);
- spin_unlock(&fl->lock);
- fastrpc_context_put(ctx);
-
+ if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
+ /* We are done with this compute context */
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
if (err)
dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
@@ -1131,6 +1163,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
struct fastrpc_channel_ctx *cctx = fl->cctx;
struct fastrpc_invoke_ctx *ctx, *n;
struct fastrpc_map *map, *m;
+ struct fastrpc_buf *buf, *b;
unsigned long flags;
fastrpc_release_current_dsp_process(fl);
@@ -1152,6 +1185,11 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
fastrpc_map_put(map);
}
+ list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
+ list_del(&buf->node);
+ fastrpc_buf_free(buf);
+ }
+
fastrpc_session_free(cctx, fl->sctx);
fastrpc_channel_ctx_put(cctx);
@@ -1180,6 +1218,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
mutex_init(&fl->mutex);
INIT_LIST_HEAD(&fl->pending);
INIT_LIST_HEAD(&fl->maps);
+ INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
fl->tgid = current->tgid;
fl->cctx = cctx;
@@ -1285,6 +1324,148 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
return err;
}
+static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
+ struct fastrpc_req_munmap *req)
+{
+ struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
+ struct fastrpc_buf *buf, *b;
+ struct fastrpc_munmap_req_msg req_msg;
+ struct device *dev = fl->sctx->dev;
+ int err;
+ u32 sc;
+
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
+ if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
+ break;
+ buf = NULL;
+ }
+ spin_unlock(&fl->lock);
+
+ if (!buf) {
+ dev_err(dev, "mmap not in list\n");
+ return -EINVAL;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.size = buf->size;
+ req_msg.vaddr = buf->raddr;
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+ if (!err) {
+ dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
+ spin_lock(&fl->lock);
+ list_del(&buf->node);
+ spin_unlock(&fl->lock);
+ fastrpc_buf_free(buf);
+ } else {
+ dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
+ }
+
+ return err;
+}
+
+static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_req_munmap req;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ return fastrpc_req_munmap_impl(fl, &req);
+}
+
+static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
+ struct fastrpc_buf *buf = NULL;
+ struct fastrpc_mmap_req_msg req_msg;
+ struct fastrpc_mmap_rsp_msg rsp_msg;
+ struct fastrpc_req_munmap req_unmap;
+ struct fastrpc_phy_page pages;
+ struct fastrpc_req_mmap req;
+ struct device *dev = fl->sctx->dev;
+ int err;
+ u32 sc;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ if (req.flags != ADSP_MMAP_ADD_PAGES) {
+ dev_err(dev, "flag not supported 0x%x\n", req.flags);
+ return -EINVAL;
+ }
+
+ if (req.vaddrin) {
+ dev_err(dev, "adding user allocated pages is not supported\n");
+ return -EINVAL;
+ }
+
+ err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
+ if (err) {
+ dev_err(dev, "failed to allocate buffer\n");
+ return err;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.flags = req.flags;
+ req_msg.vaddr = req.vaddrin;
+ req_msg.num = sizeof(pages);
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ pages.addr = buf->phys;
+ pages.size = buf->size;
+
+ args[1].ptr = (u64) (uintptr_t) &pages;
+ args[1].length = sizeof(pages);
+
+ args[2].ptr = (u64) (uintptr_t) &rsp_msg;
+ args[2].length = sizeof(rsp_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+ if (err) {
+ dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
+ goto err_invoke;
+ }
+
+ /* update the buffer to be able to deallocate the memory on the DSP */
+ buf->raddr = (uintptr_t) rsp_msg.vaddr;
+
+ /* let the client know the address to use */
+ req.vaddrout = rsp_msg.vaddr;
+
+ spin_lock(&fl->lock);
+ list_add_tail(&buf->node, &fl->mmaps);
+ spin_unlock(&fl->lock);
+
+ if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
+ /* unmap the memory and release the buffer */
+ req_unmap.vaddrout = buf->raddr;
+ req_unmap.size = buf->size;
+ fastrpc_req_munmap_impl(fl, &req_unmap);
+ return -EFAULT;
+ }
+
+ dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
+ buf->raddr, buf->size);
+
+ return 0;
+
+err_invoke:
+ fastrpc_buf_free(buf);
+
+ return err;
+}
+
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1305,6 +1486,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
err = fastrpc_dmabuf_alloc(fl, argp);
break;
+ case FASTRPC_IOCTL_MMAP:
+ err = fastrpc_req_mmap(fl, argp);
+ break;
+ case FASTRPC_IOCTL_MUNMAP:
+ err = fastrpc_req_munmap(fl, argp);
+ break;
default:
err = -ENOTTY;
break;
@@ -1430,8 +1617,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return -ENOMEM;
data->miscdev.minor = MISC_DYNAMIC_MINOR;
- data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
- domains[domain_id]);
+ data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
+ domains[domain_id]);
data->miscdev.fops = &fastrpc_fops;
err = misc_register(&data->miscdev);
if (err)
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 0e34c0568fed..040a0bda3125 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -1215,34 +1215,13 @@ static long genwqe_ioctl(struct file *filp, unsigned int cmd,
return rc;
}
-#if defined(CONFIG_COMPAT)
-/**
- * genwqe_compat_ioctl() - Compatibility ioctl
- *
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/genwqe<n>_card.
- *
- * @filp: file pointer.
- * @cmd: command.
- * @arg: user argument.
- * Return: zero on success or negative number on failure.
- */
-static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- return genwqe_ioctl(filp, cmd, arg);
-}
-#endif /* defined(CONFIG_COMPAT) */
-
static const struct file_operations genwqe_fops = {
.owner = THIS_MODULE,
.open = genwqe_open,
.fasync = genwqe_fasync,
.mmap = genwqe_mmap,
.unlocked_ioctl = genwqe_ioctl,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = genwqe_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.release = genwqe_release,
};
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index a9ac045dcfde..8850f475a413 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -65,6 +65,18 @@ static void cs_put(struct hl_cs *cs)
kref_put(&cs->refcount, cs_do_release);
}
+static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ /*
+ * Patched CB is created for external queues jobs, and for H/W queues
+ * jobs if the user CB was allocated by driver and MMU is disabled.
+ */
+ return (job->queue_type == QUEUE_TYPE_EXT ||
+ (job->queue_type == QUEUE_TYPE_HW &&
+ job->is_kernel_allocated_cb &&
+ !hdev->mmu_enable));
+}
+
/*
* cs_parser - parse the user command submission
*
@@ -91,11 +103,13 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
parser.patched_cb = NULL;
parser.user_cb = job->user_cb;
parser.user_cb_size = job->user_cb_size;
- parser.ext_queue = job->ext_queue;
+ parser.queue_type = job->queue_type;
+ parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
job->patched_cb = NULL;
rc = hdev->asic_funcs->cs_parser(hdev, &parser);
- if (job->ext_queue) {
+
+ if (is_cb_patched(hdev, job)) {
if (!rc) {
job->patched_cb = parser.patched_cb;
job->job_cb_size = parser.patched_cb_size;
@@ -124,7 +138,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
{
struct hl_cs *cs = job->cs;
- if (job->ext_queue) {
+ if (is_cb_patched(hdev, job)) {
hl_userptr_delete_list(hdev, &job->userptr_list);
/*
@@ -140,6 +154,19 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
}
}
+ /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
+ * enabled, the user CB isn't released in cs_parser() and thus should be
+ * released here.
+ */
+ if (job->queue_type == QUEUE_TYPE_HW &&
+ job->is_kernel_allocated_cb && hdev->mmu_enable) {
+ spin_lock(&job->user_cb->lock);
+ job->user_cb->cs_cnt--;
+ spin_unlock(&job->user_cb->lock);
+
+ hl_cb_put(job->user_cb);
+ }
+
/*
* This is the only place where there can be multiple threads
* modifying the list at the same time
@@ -150,7 +177,8 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
hl_debugfs_remove_job(hdev, job);
- if (job->ext_queue)
+ if (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW)
cs_put(cs);
kfree(job);
@@ -387,18 +415,13 @@ static void job_wq_completion(struct work_struct *work)
free_job(hdev, job);
}
-static struct hl_cb *validate_queue_index(struct hl_device *hdev,
- struct hl_cb_mgr *cb_mgr,
- struct hl_cs_chunk *chunk,
- bool *ext_queue)
+static int validate_queue_index(struct hl_device *hdev,
+ struct hl_cs_chunk *chunk,
+ enum hl_queue_type *queue_type,
+ bool *is_kernel_allocated_cb)
{
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hw_queue_properties *hw_queue_prop;
- u32 cb_handle;
- struct hl_cb *cb;
-
- /* Assume external queue */
- *ext_queue = true;
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
@@ -406,20 +429,29 @@ static struct hl_cb *validate_queue_index(struct hl_device *hdev,
(hw_queue_prop->type == QUEUE_TYPE_NA)) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
- return NULL;
+ return -EINVAL;
}
if (hw_queue_prop->driver_only) {
dev_err(hdev->dev,
"Queue index %d is restricted for the kernel driver\n",
chunk->queue_index);
- return NULL;
- } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
- *ext_queue = false;
- return (struct hl_cb *) (uintptr_t) chunk->cb_handle;
+ return -EINVAL;
}
- /* Retrieve CB object */
+ *queue_type = hw_queue_prop->type;
+ *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb;
+
+ return 0;
+}
+
+static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
+ struct hl_cb_mgr *cb_mgr,
+ struct hl_cs_chunk *chunk)
+{
+ struct hl_cb *cb;
+ u32 cb_handle;
+
cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
cb = hl_cb_get(hdev, cb_mgr, cb_handle);
@@ -444,7 +476,8 @@ release_cb:
return NULL;
}
-struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
+ enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
{
struct hl_cs_job *job;
@@ -452,12 +485,14 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
if (!job)
return NULL;
- job->ext_queue = ext_queue;
+ job->queue_type = queue_type;
+ job->is_kernel_allocated_cb = is_kernel_allocated_cb;
- if (job->ext_queue) {
+ if (is_cb_patched(hdev, job))
INIT_LIST_HEAD(&job->userptr_list);
+
+ if (job->queue_type == QUEUE_TYPE_EXT)
INIT_WORK(&job->finish_work, job_wq_completion);
- }
return job;
}
@@ -470,7 +505,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
- bool ext_queue_present = false;
+ bool int_queues_only = true;
u32 size_to_copy;
int rc, i, parse_cnt;
@@ -514,23 +549,33 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
- bool ext_queue;
+ enum hl_queue_type queue_type;
+ bool is_kernel_allocated_cb;
+
+ rc = validate_queue_index(hdev, chunk, &queue_type,
+ &is_kernel_allocated_cb);
+ if (rc)
+ goto free_cs_object;
- cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk,
- &ext_queue);
- if (ext_queue) {
- ext_queue_present = true;
+ if (is_kernel_allocated_cb) {
+ cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
rc = -EINVAL;
goto free_cs_object;
}
+ } else {
+ cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
}
- job = hl_cs_allocate_job(hdev, ext_queue);
+ if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
+ int_queues_only = false;
+
+ job = hl_cs_allocate_job(hdev, queue_type,
+ is_kernel_allocated_cb);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
- if (ext_queue)
+ if (is_kernel_allocated_cb)
goto release_cb;
else
goto free_cs_object;
@@ -540,7 +585,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
job->cs = cs;
job->user_cb = cb;
job->user_cb_size = chunk->cb_size;
- if (job->ext_queue)
+ if (is_kernel_allocated_cb)
job->job_cb_size = cb->size;
else
job->job_cb_size = chunk->cb_size;
@@ -553,10 +598,11 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/*
* Increment CS reference. When CS reference is 0, CS is
* done and can be signaled to user and free all its resources
- * Only increment for JOB on external queues, because only
- * for those JOBs we get completion
+ * Only increment for JOB on external or H/W queues, because
+ * only for those JOBs we get completion
*/
- if (job->ext_queue)
+ if (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW)
cs_get(cs);
hl_debugfs_add_job(hdev, job);
@@ -570,9 +616,9 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
}
}
- if (!ext_queue_present) {
+ if (int_queues_only) {
dev_err(hdev->dev,
- "Reject CS %d.%llu because no external queues jobs\n",
+ "Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
rc = -EINVAL;
goto free_cs_object;
@@ -580,9 +626,10 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
- dev_err(hdev->dev,
- "Failed to submit CS %d.%llu to H/W queues, error %d\n",
- cs->ctx->asid, cs->sequence, rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu to H/W queues, error %d\n",
+ cs->ctx->asid, cs->sequence, rc);
goto free_cs_object;
}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 87f37ac31ccd..20413e350343 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -307,45 +307,57 @@ static inline u64 get_hop0_addr(struct hl_ctx *ctx)
(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
}
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr, u64 mask, u64 shift)
{
return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP0_MASK) >> HOP0_SHIFT);
+ ((virt_addr & mask) >> shift);
}
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP1_MASK) >> HOP1_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask,
+ mmu_specs->hop0_shift);
}
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP2_MASK) >> HOP2_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask,
+ mmu_specs->hop1_shift);
}
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP3_MASK) >> HOP3_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask,
+ mmu_specs->hop2_shift);
}
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP4_MASK) >> HOP4_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask,
+ mmu_specs->hop3_shift);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask,
+ mmu_specs->hop4_shift);
}
static inline u64 get_next_hop_addr(u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
- return curr_pte & PHYS_ADDR_MASK;
+ return curr_pte & HOP_PHYS_ADDR_MASK;
else
return ULLONG_MAX;
}
@@ -355,7 +367,10 @@ static int mmu_show(struct seq_file *s, void *data)
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
struct hl_ctx *ctx;
+ bool is_dram_addr;
u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
@@ -377,33 +392,39 @@ static int mmu_show(struct seq_file *s, void *data)
return 0;
}
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
mutex_lock(&ctx->mmu_lock);
/* the following lookup is copied from unmap() in mmu.c */
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
hop1_addr = get_next_hop_addr(hop0_pte);
if (hop1_addr == ULLONG_MAX)
goto not_mapped;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
hop2_addr = get_next_hop_addr(hop1_pte);
if (hop2_addr == ULLONG_MAX)
goto not_mapped;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
hop3_addr = get_next_hop_addr(hop2_pte);
if (hop3_addr == ULLONG_MAX)
goto not_mapped;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
if (!(hop3_pte & LAST_MASK)) {
@@ -412,7 +433,8 @@ static int mmu_show(struct seq_file *s, void *data)
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
if (!(hop4_pte & PAGE_PRESENT_MASK))
goto not_mapped;
@@ -506,6 +528,12 @@ static int engines_show(struct seq_file *s, void *data)
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't check device idle during reset\n");
+ return 0;
+ }
+
hdev->asic_funcs->is_device_idle(hdev, NULL, s);
return 0;
@@ -534,41 +562,50 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
u64 *phys_addr)
{
struct hl_ctx *ctx = hdev->compute_ctx;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop_addr, hop_pte_addr, hop_pte;
- u64 offset_mask = HOP4_MASK | OFFSET_MASK;
+ u64 offset_mask = HOP4_MASK | FLAGS_MASK;
int rc = 0;
+ bool is_dram_addr;
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return -EINVAL;
}
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
mutex_lock(&ctx->mmu_lock);
/* hop 0 */
hop_addr = get_hop0_addr(ctx);
- hop_pte_addr = get_hop0_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 1 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop1_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 2 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop2_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 3 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop3_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
if (!(hop_pte & LAST_MASK)) {
@@ -576,10 +613,11 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr,
+ virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
- offset_mask = OFFSET_MASK;
+ offset_mask = FLAGS_MASK;
}
if (!(hop_pte & PAGE_PRESENT_MASK))
@@ -608,6 +646,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
u32 val;
ssize_t rc;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
+ return 0;
+ }
+
if (*ppos)
return 0;
@@ -637,6 +680,11 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
u32 value;
ssize_t rc;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
+ return 0;
+ }
+
rc = kstrtouint_from_user(buf, count, 16, &value);
if (rc)
return rc;
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 459fee70a597..b155e9549076 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -42,12 +42,10 @@ static void hpriv_release(struct kref *ref)
{
struct hl_fpriv *hpriv;
struct hl_device *hdev;
- struct hl_ctx *ctx;
hpriv = container_of(ref, struct hl_fpriv, refcount);
hdev = hpriv->hdev;
- ctx = hpriv->ctx;
put_pid(hpriv->taskpid);
@@ -889,13 +887,19 @@ again:
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
- /* Kill processes here after CS rollback. This is because the process
- * can't really exit until all its CSs are done, which is what we
- * do in cs rollback
- */
- if (from_hard_reset_thread)
+ if (hard_reset) {
+ /* Kill processes here after CS rollback. This is because the
+ * process can't really exit until all its CSs are done, which
+ * is what we do in cs rollback
+ */
device_kill_open_processes(hdev);
+ /* Flush the Event queue workers to make sure no other thread is
+ * reading or writing to registers during the reset
+ */
+ flush_workqueue(hdev->eq_wq);
+ }
+
/* Release kernel context */
if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
hdev->kernel_ctx = NULL;
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index ea2ca67fbfbf..f5bd03171dac 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -143,10 +143,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
if (!rc) {
- if (result == ARMCP_PACKET_FENCE_VAL)
- dev_info(hdev->dev,
- "queue test on CPU queue succeeded\n");
- else
+ if (result != ARMCP_PACKET_FENCE_VAL)
dev_err(hdev->dev,
"CPU queue test failed (0x%08lX)\n", result);
} else {
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 6fba14b81f90..c8d16aa4382c 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -72,6 +72,9 @@
*
*/
+#define GOYA_UBOOT_FW_FILE "habanalabs/goya/goya-u-boot.bin"
+#define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
+
#define GOYA_MMU_REGS_NUM 63
#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
@@ -337,17 +340,20 @@ void goya_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 1;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
NUMBER_OF_INT_HW_QUEUES; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
}
for (; i < HL_MAX_QUEUES; i++)
@@ -377,6 +383,23 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
+ prop->dmmu.hop0_shift = HOP0_SHIFT;
+ prop->dmmu.hop1_shift = HOP1_SHIFT;
+ prop->dmmu.hop2_shift = HOP2_SHIFT;
+ prop->dmmu.hop3_shift = HOP3_SHIFT;
+ prop->dmmu.hop4_shift = HOP4_SHIFT;
+ prop->dmmu.hop0_mask = HOP0_MASK;
+ prop->dmmu.hop1_mask = HOP1_MASK;
+ prop->dmmu.hop2_mask = HOP2_MASK;
+ prop->dmmu.hop3_mask = HOP3_MASK;
+ prop->dmmu.hop4_mask = HOP4_MASK;
+ prop->dmmu.huge_page_size = PAGE_SIZE_2MB;
+
+ /* No difference between PMMU and DMMU except of page size */
+ memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
+ prop->dmmu.page_size = PAGE_SIZE_2MB;
+ prop->pmmu.page_size = PAGE_SIZE_4KB;
+
prop->va_space_host_start_address = VA_HOST_SPACE_START;
prop->va_space_host_end_address = VA_HOST_SPACE_END;
prop->va_space_dram_start_address = VA_DDR_SPACE_START;
@@ -393,6 +416,9 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
+
+ strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
}
/*
@@ -1454,6 +1480,9 @@ static void goya_init_golden_registers(struct hl_device *hdev)
1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+
+ WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
+ ICACHE_FETCH_LINE_NUM, 2);
}
WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
@@ -1533,7 +1562,6 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
- u64 qman_base_addr;
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
@@ -1545,9 +1573,6 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
- qman_base_addr = hdev->asic_prop.sram_base_address +
- MME_QMAN_BASE_OFFSET;
-
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
@@ -2141,13 +2166,11 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
*/
static int goya_push_uboot_to_device(struct hl_device *hdev)
{
- char fw_name[200];
void __iomem *dst;
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+ return hl_fw_push_fw_to_device(hdev, GOYA_UBOOT_FW_FILE, dst);
}
/*
@@ -2160,13 +2183,11 @@ static int goya_push_uboot_to_device(struct hl_device *hdev)
*/
static int goya_push_linux_to_device(struct hl_device *hdev)
{
- char fw_name[200];
void __iomem *dst;
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+ return hl_fw_push_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
}
static int goya_pldm_init_cpu(struct hl_device *hdev)
@@ -2291,6 +2312,10 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
10000,
cpu_timeout);
+ /* Read U-Boot version now in case we will later fail */
+ goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
+ goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
+
if (rc) {
dev_err(hdev->dev, "Error in ARM u-boot!");
switch (status) {
@@ -2328,6 +2353,11 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
"ARM status %d - u-boot stopped by user\n",
status);
break;
+ case CPU_BOOT_STATUS_TS_INIT_FAIL:
+ dev_err(hdev->dev,
+ "ARM status %d - Thermal Sensor initialization failed\n",
+ status);
+ break;
default:
dev_err(hdev->dev,
"ARM status %d - Invalid status code\n",
@@ -2337,10 +2367,6 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
return -EIO;
}
- /* Read U-Boot version now in case we will later fail */
- goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
- goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
-
if (!hdev->fw_loading) {
dev_info(hdev->dev, "Skip loading FW\n");
goto out;
@@ -2453,7 +2479,8 @@ int goya_mmu_init(struct hl_device *hdev)
WREG32_AND(mmSTLB_STLB_FEATURE_EN,
(~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
+ VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
WREG32(mmMMU_MMU_ENABLE, 1);
WREG32(mmMMU_SPI_MASK, 0xF);
@@ -2978,9 +3005,6 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
"H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
rc = -EIO;
- } else {
- dev_info(hdev->dev, "queue test on H/W queue %d succeeded\n",
- hw_queue_id);
}
free_pkt:
@@ -3925,7 +3949,7 @@ static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
return 0;
dev_err(hdev->dev,
- "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
+ "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
parser->user_cb, parser->user_cb_size);
return -EFAULT;
@@ -3935,7 +3959,7 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
{
struct goya_device *goya = hdev->asic_specific;
- if (!parser->ext_queue)
+ if (parser->queue_type == QUEUE_TYPE_INT)
return goya_parse_cb_no_ext_queue(hdev, parser);
if (goya->hw_cap_initialized & HW_CAP_MMU)
@@ -4606,7 +4630,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
lin_dma_pkt++;
} while (--lin_dma_pkts_cnt);
- job = hl_cs_allocate_job(hdev, true);
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -4835,13 +4859,15 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
}
-static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
+static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+ u32 flags)
{
struct goya_device *goya = hdev->asic_specific;
u32 status, timeout_usec;
int rc;
- if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
return;
/* no need in L1 only invalidation in Goya */
@@ -4880,7 +4906,8 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
u32 status, timeout_usec, inv_data, pi;
int rc;
- if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
return;
/* no need in L1 only invalidation in Goya */
@@ -5137,7 +5164,8 @@ static const struct hl_asic_funcs goya_funcs = {
.init_iatu = goya_init_iatu,
.rreg = hl_rreg,
.wreg = hl_wreg,
- .halt_coresight = goya_halt_coresight
+ .halt_coresight = goya_halt_coresight,
+ .get_clk_rate = goya_get_clk_rate
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 89b6574f8e4f..c3230cb6e25c 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -233,4 +233,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
+int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index b4d406af1bed..c1ee6e2b5dff 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -8,6 +8,7 @@
#include "goyaP.h"
#include "include/goya/goya_coresight.h"
#include "include/goya/asic_reg/goya_regs.h"
+#include "include/goya/asic_reg/goya_masks.h"
#include <uapi/misc/habanalabs.h>
@@ -377,33 +378,32 @@ static int goya_config_etr(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etr *input;
- u64 base_reg = mmPSOC_ETR_BASE - CFG_BASE;
u32 val;
int rc;
- WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+ WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK);
- val = RREG32(base_reg + 0x304);
+ val = RREG32(mmPSOC_ETR_FFCR);
val |= 0x1000;
- WREG32(base_reg + 0x304, val);
+ WREG32(mmPSOC_ETR_FFCR, val);
val |= 0x40;
- WREG32(base_reg + 0x304, val);
+ WREG32(mmPSOC_ETR_FFCR, val);
- rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+ rc = goya_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
- rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+ rc = goya_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
- WREG32(base_reg + 0x20, 0);
+ WREG32(mmPSOC_ETR_CTL, 0);
if (params->enable) {
input = params->input;
@@ -423,25 +423,26 @@ static int goya_config_etr(struct hl_device *hdev,
return -EINVAL;
}
- WREG32(base_reg + 0x34, 0x3FFC);
- WREG32(base_reg + 0x4, input->buffer_size);
- WREG32(base_reg + 0x28, input->sink_mode);
- WREG32(base_reg + 0x110, 0x700);
- WREG32(base_reg + 0x118,
+ WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
+ WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
+ WREG32(mmPSOC_ETR_MODE, input->sink_mode);
+ WREG32(mmPSOC_ETR_AXICTL,
+ 0x700 | PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
- WREG32(base_reg + 0x11C,
+ WREG32(mmPSOC_ETR_DBAHI,
upper_32_bits(input->buffer_address));
- WREG32(base_reg + 0x304, 3);
- WREG32(base_reg + 0x308, 0xA);
- WREG32(base_reg + 0x20, 1);
+ WREG32(mmPSOC_ETR_FFCR, 3);
+ WREG32(mmPSOC_ETR_PSCR, 0xA);
+ WREG32(mmPSOC_ETR_CTL, 1);
} else {
- WREG32(base_reg + 0x34, 0);
- WREG32(base_reg + 0x4, 0x400);
- WREG32(base_reg + 0x118, 0);
- WREG32(base_reg + 0x11C, 0);
- WREG32(base_reg + 0x308, 0);
- WREG32(base_reg + 0x28, 0);
- WREG32(base_reg + 0x304, 0);
+ WREG32(mmPSOC_ETR_BUFWM, 0);
+ WREG32(mmPSOC_ETR_RSZ, 0x400);
+ WREG32(mmPSOC_ETR_DBALO, 0);
+ WREG32(mmPSOC_ETR_DBAHI, 0);
+ WREG32(mmPSOC_ETR_PSCR, 0);
+ WREG32(mmPSOC_ETR_MODE, 0);
+ WREG32(mmPSOC_ETR_FFCR, 0);
if (params->output_size >= sizeof(u64)) {
u32 rwp, rwphi;
@@ -451,8 +452,8 @@ static int goya_config_etr(struct hl_device *hdev,
* the buffer is set in the RWP register (lower 32
* bits), and in the RWPHI register (upper 8 bits).
*/
- rwp = RREG32(base_reg + 0x18);
- rwphi = RREG32(base_reg + 0x3c) & 0xff;
+ rwp = RREG32(mmPSOC_ETR_RWP);
+ rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
*(u64 *) params->output = ((u64) rwphi << 32) | rwp;
}
}
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index a2a700c3d597..b2ebc01e27f4 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -32,6 +32,37 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
}
}
+int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
+{
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ if (value < 0) {
+ dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
+ value);
+ return value;
+ }
+
+ *max_clk = (value / 1000 / 1000);
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ if (value < 0) {
+ dev_err(hdev->dev,
+ "Failed to retrieve device current clock %ld\n",
+ value);
+ return value;
+ }
+
+ *cur_clk = (value / 1000 / 1000);
+
+ return 0;
+}
+
static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 75862be53c60..00c949f4ccd1 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -40,8 +40,6 @@
#define HL_MAX_QUEUES 128
-#define HL_MAX_JOBS_PER_CS 64
-
/* MUST BE POWER OF 2 and larger than 1 */
#define HL_MAX_PENDING_CS 64
@@ -85,12 +83,15 @@ struct hl_fpriv;
* @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
* memories and/or operates the compute engines.
* @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
+ * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
+ * notifications are sent by H/W.
*/
enum hl_queue_type {
QUEUE_TYPE_NA,
QUEUE_TYPE_EXT,
QUEUE_TYPE_INT,
- QUEUE_TYPE_CPU
+ QUEUE_TYPE_CPU,
+ QUEUE_TYPE_HW
};
/**
@@ -98,10 +99,13 @@ enum hl_queue_type {
* @type: queue type.
* @driver_only: true if only the driver is allowed to send a job to this queue,
* false otherwise.
+ * @requires_kernel_cb: true if a CB handle must be provided for jobs on this
+ * queue, false otherwise (a CB address must be provided).
*/
struct hw_queue_properties {
enum hl_queue_type type;
u8 driver_only;
+ u8 requires_kernel_cb;
};
/**
@@ -110,8 +114,8 @@ struct hw_queue_properties {
* @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
*/
enum vm_type_t {
- VM_TYPE_USERPTR,
- VM_TYPE_PHYS_PACK
+ VM_TYPE_USERPTR = 0x1,
+ VM_TYPE_PHYS_PACK = 0x2
};
/**
@@ -127,12 +131,44 @@ enum hl_device_hw_state {
};
/**
+ * struct hl_mmu_properties - ASIC specific MMU address translation properties.
+ * @hop0_shift: shift of hop 0 mask.
+ * @hop1_shift: shift of hop 1 mask.
+ * @hop2_shift: shift of hop 2 mask.
+ * @hop3_shift: shift of hop 3 mask.
+ * @hop4_shift: shift of hop 4 mask.
+ * @hop0_mask: mask to get the PTE address in hop 0.
+ * @hop1_mask: mask to get the PTE address in hop 1.
+ * @hop2_mask: mask to get the PTE address in hop 2.
+ * @hop3_mask: mask to get the PTE address in hop 3.
+ * @hop4_mask: mask to get the PTE address in hop 4.
+ * @page_size: default page size used to allocate memory.
+ * @huge_page_size: page size used to allocate memory with huge pages.
+ */
+struct hl_mmu_properties {
+ u64 hop0_shift;
+ u64 hop1_shift;
+ u64 hop2_shift;
+ u64 hop3_shift;
+ u64 hop4_shift;
+ u64 hop0_mask;
+ u64 hop1_mask;
+ u64 hop2_mask;
+ u64 hop3_mask;
+ u64 hop4_mask;
+ u32 page_size;
+ u32 huge_page_size;
+};
+
+/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
* @armcp_info: received various information from ArmCP regarding the H/W, e.g.
* available sensors.
* @uboot_ver: F/W U-boot version.
* @preboot_ver: F/W Preboot version.
+ * @dmmu: DRAM MMU address translation properties.
+ * @pmmu: PCI (host) MMU address translation properties.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -169,53 +205,55 @@ enum hl_device_hw_state {
* @psoc_pci_pll_nf: PCI PLL NF value.
* @psoc_pci_pll_od: PCI PLL OD value.
* @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
- * @completion_queues_count: number of completion queues.
* @high_pll: high PLL frequency used by the device.
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
* @tpc_enabled_mask: which TPCs are enabled.
+ * @completion_queues_count: number of completion queues.
*/
struct asic_fixed_properties {
struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
- struct armcp_info armcp_info;
- char uboot_ver[VERSION_MAX_LEN];
- char preboot_ver[VERSION_MAX_LEN];
- u64 sram_base_address;
- u64 sram_end_address;
- u64 sram_user_base_address;
- u64 dram_base_address;
- u64 dram_end_address;
- u64 dram_user_base_address;
- u64 dram_size;
- u64 dram_pci_bar_size;
- u64 max_power_default;
- u64 va_space_host_start_address;
- u64 va_space_host_end_address;
- u64 va_space_dram_start_address;
- u64 va_space_dram_end_address;
- u64 dram_size_for_default_page_mapping;
- u64 pcie_dbi_base_address;
- u64 pcie_aux_dbi_reg_addr;
- u64 mmu_pgt_addr;
- u64 mmu_dram_default_page_addr;
- u32 mmu_pgt_size;
- u32 mmu_pte_size;
- u32 mmu_hop_table_size;
- u32 mmu_hop0_tables_total_size;
- u32 dram_page_size;
- u32 cfg_size;
- u32 sram_size;
- u32 max_asid;
- u32 num_of_events;
- u32 psoc_pci_pll_nr;
- u32 psoc_pci_pll_nf;
- u32 psoc_pci_pll_od;
- u32 psoc_pci_pll_div_factor;
- u32 high_pll;
- u32 cb_pool_cb_cnt;
- u32 cb_pool_cb_size;
- u8 completion_queues_count;
- u8 tpc_enabled_mask;
+ struct armcp_info armcp_info;
+ char uboot_ver[VERSION_MAX_LEN];
+ char preboot_ver[VERSION_MAX_LEN];
+ struct hl_mmu_properties dmmu;
+ struct hl_mmu_properties pmmu;
+ u64 sram_base_address;
+ u64 sram_end_address;
+ u64 sram_user_base_address;
+ u64 dram_base_address;
+ u64 dram_end_address;
+ u64 dram_user_base_address;
+ u64 dram_size;
+ u64 dram_pci_bar_size;
+ u64 max_power_default;
+ u64 va_space_host_start_address;
+ u64 va_space_host_end_address;
+ u64 va_space_dram_start_address;
+ u64 va_space_dram_end_address;
+ u64 dram_size_for_default_page_mapping;
+ u64 pcie_dbi_base_address;
+ u64 pcie_aux_dbi_reg_addr;
+ u64 mmu_pgt_addr;
+ u64 mmu_dram_default_page_addr;
+ u32 mmu_pgt_size;
+ u32 mmu_pte_size;
+ u32 mmu_hop_table_size;
+ u32 mmu_hop0_tables_total_size;
+ u32 dram_page_size;
+ u32 cfg_size;
+ u32 sram_size;
+ u32 max_asid;
+ u32 num_of_events;
+ u32 psoc_pci_pll_nr;
+ u32 psoc_pci_pll_nf;
+ u32 psoc_pci_pll_od;
+ u32 psoc_pci_pll_div_factor;
+ u32 high_pll;
+ u32 cb_pool_cb_cnt;
+ u32 cb_pool_cb_size;
+ u8 tpc_enabled_mask;
+ u8 completion_queues_count;
};
/**
@@ -236,8 +274,6 @@ struct hl_dma_fence {
* Command Buffers
*/
-#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
-
/**
* struct hl_cb_mgr - describes a Command Buffer Manager.
* @cb_lock: protects cb_handles.
@@ -481,8 +517,8 @@ enum hl_pll_frequency {
* @get_events_stat: retrieve event queue entries histogram.
* @read_pte: read MMU page table entry from DRAM.
* @write_pte: write MMU page table entry to DRAM.
- * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or
- * hard (L0 & L1) flush.
+ * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
+ * (L1 only) or hard (L0 & L1) flush.
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
@@ -502,6 +538,7 @@ enum hl_pll_frequency {
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
* @halt_coresight: stop the ETF and ETR traces.
+ * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -562,7 +599,8 @@ struct hl_asic_funcs {
u32 *size);
u64 (*read_pte)(struct hl_device *hdev, u64 addr);
void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
- void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
+ void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
+ u32 flags);
void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
@@ -584,6 +622,7 @@ struct hl_asic_funcs {
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
void (*halt_coresight)(struct hl_device *hdev);
+ int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
};
@@ -688,7 +727,7 @@ struct hl_ctx_mgr {
* @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions.
- * @addr: user-space virtual pointer to the start of the memory area.
+ * @addr: user-space virtual address of the start of the memory area.
* @size: size of the memory area to pin & map.
* @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
*/
@@ -752,11 +791,14 @@ struct hl_cs {
* @userptr_list: linked-list of userptr mappings that belong to this job and
* wait for completion.
* @debugfs_list: node in debugfs list of command submission jobs.
+ * @queue_type: the type of the H/W queue this job is submitted to.
* @id: the id of this job inside a CS.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @job_cb_size: the actual size of the CB that we put on the queue.
- * @ext_queue: whether the job is for external queue or internal queue.
+ * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
+ * handle to a kernel-allocated CB object, false
+ * otherwise (SRAM/DRAM/host address).
*/
struct hl_cs_job {
struct list_head cs_node;
@@ -766,39 +808,44 @@ struct hl_cs_job {
struct work_struct finish_work;
struct list_head userptr_list;
struct list_head debugfs_list;
+ enum hl_queue_type queue_type;
u32 id;
u32 hw_queue_id;
u32 user_cb_size;
u32 job_cb_size;
- u8 ext_queue;
+ u8 is_kernel_allocated_cb;
};
/**
- * struct hl_cs_parser - command submission paerser properties.
+ * struct hl_cs_parser - command submission parser properties.
* @user_cb: the CB we got from the user.
* @patched_cb: in case of patching, this is internal CB which is submitted on
* the queue instead of the CB we got from the IOCTL.
* @job_userptr_list: linked-list of userptr mappings that belong to the related
* job and wait for completion.
* @cs_sequence: the sequence number of the related CS.
+ * @queue_type: the type of the H/W queue this job is submitted to.
* @ctx_id: the ID of the context the related CS belongs to.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @patched_cb_size: the size of the CB after parsing.
- * @ext_queue: whether the job is for external queue or internal queue.
* @job_id: the id of the related job inside the related CS.
+ * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
+ * handle to a kernel-allocated CB object, false
+ * otherwise (SRAM/DRAM/host address).
*/
struct hl_cs_parser {
struct hl_cb *user_cb;
struct hl_cb *patched_cb;
struct list_head *job_userptr_list;
u64 cs_sequence;
+ enum hl_queue_type queue_type;
u32 ctx_id;
u32 hw_queue_id;
u32 user_cb_size;
u32 patched_cb_size;
- u8 ext_queue;
u8 job_id;
+ u8 is_kernel_allocated_cb;
};
@@ -1048,9 +1095,10 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
-#define WREG32_FIELD(reg, field, val) \
- WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
- (val) << REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD(reg, offset, field, val) \
+ WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
+ ~REG_FIELD_MASK(reg, field)) | \
+ (val) << REG_FIELD_SHIFT(reg, field))
/* Timeout should be longer when working with simulator but cap the
* increased timeout to some maximum
@@ -1501,7 +1549,8 @@ int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
void hl_cs_rollback_all(struct hl_device *hdev);
-struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue);
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
+ enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void goya_set_asic_funcs(struct hl_device *hdev);
@@ -1513,7 +1562,7 @@ void hl_vm_fini(struct hl_device *hdev);
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr *userptr);
-int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
+void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
void hl_userptr_delete_list(struct hl_device *hdev,
struct list_head *userptr_list);
bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 66d9c710073c..6474b868ef27 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -60,11 +60,16 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
hw_ip.sram_size = prop->sram_size - sram_kmd_size;
hw_ip.dram_size = prop->dram_size - dram_kmd_size;
- if (hw_ip.dram_size > 0)
+ if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
hw_ip.num_of_events = prop->num_of_events;
- memcpy(hw_ip.armcp_version,
- prop->armcp_info.armcp_version, VERSION_MAX_LEN);
+
+ memcpy(hw_ip.armcp_version, prop->armcp_info.armcp_version,
+ min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
+
+ memcpy(hw_ip.card_name, prop->armcp_info.card_name,
+ min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
+
hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
@@ -179,17 +184,14 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
goto out;
}
- if (output) {
- if (copy_to_user((void __user *) (uintptr_t) args->output_ptr,
- output,
- args->output_size)) {
- dev_err(hdev->dev,
- "copy to user failed in debug ioctl\n");
- rc = -EFAULT;
- goto out;
- }
+ if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
+ output, args->output_size)) {
+ dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
+ rc = -EFAULT;
+ goto out;
}
+
out:
kfree(params);
kfree(output);
@@ -221,6 +223,41 @@ static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
}
+static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_clk_rate clk_rate = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz,
+ &clk_rate.max_clk_rate_mhz);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &clk_rate,
+ min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0;
+}
+
+static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_reset_count reset_count = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
+ reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
+
+ return copy_to_user(out, &reset_count,
+ min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -239,6 +276,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_DEVICE_STATUS:
return device_status_info(hdev, args);
+ case HL_INFO_RESET_COUNT:
+ return get_reset_count(hdev, args);
+
default:
break;
}
@@ -271,6 +311,10 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
rc = hw_events_info(hdev, true, args);
break;
+ case HL_INFO_CLK_RATE:
+ rc = get_clk_rate(hdev, args);
+ break;
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
@@ -406,9 +450,8 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
retcode = func(hpriv, kdata);
- if (cmd & IOC_OUT)
- if (copy_to_user((void __user *)arg, kdata, usize))
- retcode = -EFAULT;
+ if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
+ retcode = -EFAULT;
out_err:
if (retcode)
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 55b383b2a116..91579dde9262 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -58,8 +58,8 @@ out:
}
/*
- * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
- *
+ * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
+ * H/W queue.
* @hdev: pointer to habanalabs device structure
* @q: pointer to habanalabs queue structure
* @ctl: BD's control word
@@ -73,8 +73,8 @@ out:
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
- u32 ctl, u32 len, u64 ptr)
+static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
+ struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
@@ -174,6 +174,45 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
}
/*
+ * hw_queue_sanity_checks() - Perform some sanity checks on a H/W queue.
+ * @hdev: Pointer to hl_device structure.
+ * @q: Pointer to hl_hw_queue structure.
+ * @num_of_entries: How many entries to check for space.
+ *
+ * Perform the following:
+ * - Make sure we have enough space in the completion queue.
+ * This check also ensures that there is enough space in the h/w queue, as
+ * both queues are of the same size.
+ * - Reserve space in the completion queue (needs to be reversed if there
+ * is a failure down the road before the actual submission of work).
+ *
+ * Both operations are done using the "free_slots_cnt" field of the completion
+ * queue. The CI counters of the queue and the completion queue are not
+ * needed/used for the H/W queue type.
+ */
+static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
+ int num_of_entries)
+{
+ atomic_t *free_slots =
+ &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+
+ /*
+ * Check we have enough space in the completion queue.
+ * Add -1 to counter (decrement) unless counter was already 0.
+ * In that case, CQ is full so we can't submit a new CB.
+ * atomic_add_unless will return 0 if counter was already 0.
+ */
+ if (atomic_add_negative(num_of_entries * -1, free_slots)) {
+ dev_dbg(hdev->dev, "No space for %d entries on CQ %d\n",
+ num_of_entries, q->hw_queue_id);
+ atomic_add(num_of_entries, free_slots);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
* hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
*
* @hdev: pointer to hl_device structure
@@ -188,7 +227,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
u32 cb_size, u64 cb_ptr)
{
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
- int rc;
+ int rc = 0;
/*
* The CPU queue is a synchronous queue with an effective depth of
@@ -206,11 +245,18 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
goto out;
}
- rc = ext_queue_sanity_checks(hdev, q, 1, false);
- if (rc)
- goto out;
+ /*
+ * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
+ * type only on init phase, when the queues are empty and being tested,
+ * so there is no need for sanity checks.
+ */
+ if (q->queue_type != QUEUE_TYPE_HW) {
+ rc = ext_queue_sanity_checks(hdev, q, 1, false);
+ if (rc)
+ goto out;
+ }
- ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
+ ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
out:
if (q->queue_type != QUEUE_TYPE_CPU)
@@ -220,14 +266,14 @@ out:
}
/*
- * ext_hw_queue_schedule_job - submit an JOB to an external queue
+ * ext_queue_schedule_job - submit a JOB to an external queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
+static void ext_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
@@ -260,7 +306,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
* H/W queues is done under the scheduler mutex
*
* No need to check if CQ is full because it was already
- * checked in hl_queue_sanity_checks
+ * checked in ext_queue_sanity_checks
*/
cq = &hdev->completion_queue[q->hw_queue_id];
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
@@ -274,18 +320,18 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
cq->pi = hl_cq_inc_ptr(cq->pi);
- ext_queue_submit_bd(hdev, q, ctl, len, ptr);
+ ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
- * int_hw_queue_schedule_job - submit an JOB to an internal queue
+ * int_queue_schedule_job - submit a JOB to an internal queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
-static void int_hw_queue_schedule_job(struct hl_cs_job *job)
+static void int_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
@@ -308,6 +354,60 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
}
/*
+ * hw_queue_schedule_job - submit a JOB to a H/W queue
+ *
+ * @job: pointer to the job that needs to be submitted to the queue
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void hw_queue_schedule_job(struct hl_cs_job *job)
+{
+ struct hl_device *hdev = job->cs->ctx->hdev;
+ struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
+ struct hl_cq *cq;
+ u64 ptr;
+ u32 offset, ctl, len;
+
+ /*
+ * Upon PQE completion, COMP_DATA is used as the write data to the
+ * completion queue (QMAN HBW message), and COMP_OFFSET is used as the
+ * write address offset in the SM block (QMAN LBW message).
+ * The write address offset is calculated as "COMP_OFFSET << 2".
+ */
+ offset = job->cs->sequence & (HL_MAX_PENDING_CS - 1);
+ ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
+ ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
+
+ len = job->job_cb_size;
+
+ /*
+ * A patched CB is created only if a user CB was allocated by driver and
+ * MMU is disabled. If MMU is enabled, the user CB should be used
+ * instead. If the user CB wasn't allocated by driver, assume that it
+ * holds an address.
+ */
+ if (job->patched_cb)
+ ptr = job->patched_cb->bus_address;
+ else if (job->is_kernel_allocated_cb)
+ ptr = job->user_cb->bus_address;
+ else
+ ptr = (u64) (uintptr_t) job->user_cb;
+
+ /*
+ * No need to protect pi_offset because scheduling to the
+ * H/W queues is done under the scheduler mutex
+ *
+ * No need to check if CQ is full because it was already
+ * checked in hw_queue_sanity_checks
+ */
+ cq = &hdev->completion_queue[q->hw_queue_id];
+ cq->pi = hl_cq_inc_ptr(cq->pi);
+
+ ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+}
+
+/*
* hl_hw_queue_schedule_cs - schedule a command submission
*
* @job : pointer to the CS
@@ -330,23 +430,34 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
q = &hdev->kernel_queues[0];
- /* This loop assumes all external queues are consecutive */
for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
- if (q->queue_type == QUEUE_TYPE_EXT) {
- if (cs->jobs_in_queue_cnt[i]) {
+ if (cs->jobs_in_queue_cnt[i]) {
+ switch (q->queue_type) {
+ case QUEUE_TYPE_EXT:
rc = ext_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i], true);
- if (rc)
- goto unroll_cq_resv;
- cq_cnt++;
- }
- } else if (q->queue_type == QUEUE_TYPE_INT) {
- if (cs->jobs_in_queue_cnt[i]) {
+ cs->jobs_in_queue_cnt[i], true);
+ break;
+ case QUEUE_TYPE_INT:
rc = int_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i]);
- if (rc)
- goto unroll_cq_resv;
+ cs->jobs_in_queue_cnt[i]);
+ break;
+ case QUEUE_TYPE_HW:
+ rc = hw_queue_sanity_checks(hdev, q,
+ cs->jobs_in_queue_cnt[i]);
+ break;
+ default:
+ dev_err(hdev->dev, "Queue type %d is invalid\n",
+ q->queue_type);
+ rc = -EINVAL;
+ break;
}
+
+ if (rc)
+ goto unroll_cq_resv;
+
+ if (q->queue_type == QUEUE_TYPE_EXT ||
+ q->queue_type == QUEUE_TYPE_HW)
+ cq_cnt++;
}
}
@@ -373,21 +484,30 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
- if (job->ext_queue)
- ext_hw_queue_schedule_job(job);
- else
- int_hw_queue_schedule_job(job);
+ switch (job->queue_type) {
+ case QUEUE_TYPE_EXT:
+ ext_queue_schedule_job(job);
+ break;
+ case QUEUE_TYPE_INT:
+ int_queue_schedule_job(job);
+ break;
+ case QUEUE_TYPE_HW:
+ hw_queue_schedule_job(job);
+ break;
+ default:
+ break;
+ }
cs->submitted = true;
goto out;
unroll_cq_resv:
- /* This loop assumes all external queues are consecutive */
q = &hdev->kernel_queues[0];
for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
- if ((q->queue_type == QUEUE_TYPE_EXT) &&
- (cs->jobs_in_queue_cnt[i])) {
+ if ((q->queue_type == QUEUE_TYPE_EXT ||
+ q->queue_type == QUEUE_TYPE_HW) &&
+ cs->jobs_in_queue_cnt[i]) {
atomic_t *free_slots =
&hdev->completion_queue[i].free_slots_cnt;
atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
@@ -414,8 +534,8 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
q->ci = hl_queue_inc_ptr(q->ci);
}
-static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
- struct hl_hw_queue *q, bool is_cpu_queue)
+static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+ bool is_cpu_queue)
{
void *p;
int rc;
@@ -465,7 +585,7 @@ free_queue:
return rc;
}
-static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
void *p;
@@ -485,18 +605,38 @@ static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
return 0;
}
-static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ return ext_and_cpu_queue_init(hdev, q, true);
+}
+
+static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q, true);
+ return ext_and_cpu_queue_init(hdev, q, false);
}
-static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q, false);
+ void *p;
+
+ p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ &q->bus_address,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->kernel_address = (u64) (uintptr_t) p;
+
+ /* Make sure read/write pointers are initialized to start of queue */
+ q->ci = 0;
+ q->pi = 0;
+
+ return 0;
}
/*
- * hw_queue_init - main initialization function for H/W queue object
+ * queue_init - main initialization function for H/W queue object
*
* @hdev: pointer to hl_device device structure
* @q: pointer to hl_hw_queue queue structure
@@ -505,7 +645,7 @@ static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
* Allocate dma-able memory for the queue and initialize fields
* Returns 0 on success
*/
-static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
u32 hw_queue_id)
{
int rc;
@@ -516,21 +656,20 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
- rc = ext_hw_queue_init(hdev, q);
+ rc = ext_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_INT:
- rc = int_hw_queue_init(hdev, q);
+ rc = int_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_CPU:
- rc = cpu_hw_queue_init(hdev, q);
+ rc = cpu_queue_init(hdev, q);
+ break;
+ case QUEUE_TYPE_HW:
+ rc = hw_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_NA:
q->valid = 0;
return 0;
-
default:
dev_crit(hdev->dev, "wrong queue type %d during init\n",
q->queue_type);
@@ -554,7 +693,7 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
*
* Free the queue memory
*/
-static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
+static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
{
if (!q->valid)
return;
@@ -612,7 +751,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
q->queue_type = asic->hw_queues_props[i].type;
- rc = hw_queue_init(hdev, q, i);
+ rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
"failed to initialize queue %d\n", i);
@@ -624,7 +763,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
release_queues:
for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
- hw_queue_fini(hdev, q);
+ queue_fini(hdev, q);
kfree(hdev->kernel_queues);
@@ -637,7 +776,7 @@ void hl_hw_queues_destroy(struct hl_device *hdev)
int i;
for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
- hw_queue_fini(hdev, q);
+ queue_fini(hdev, q);
kfree(hdev->kernel_queues);
}
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index 8618891d5afa..3c44ef3a23ed 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -260,4 +260,6 @@
#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+
#endif /* ASIC_REG_GOYA_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
index 19b0f0ef1d0b..fce490e6a231 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -84,6 +84,7 @@
#include "tpc6_rtr_regs.h"
#include "tpc7_nrtr_regs.h"
#include "tpc0_eml_cfg_regs.h"
+#include "psoc_etr_regs.h"
#include "psoc_global_conf_masks.h"
#include "dma_macro_masks.h"
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
new file mode 100644
index 000000000000..b7c33e025db5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_ETR_REGS_H_
+#define ASIC_REG_PSOC_ETR_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_ETR (Prototype: ETR)
+ *****************************************
+ */
+
+#define mmPSOC_ETR_RSZ 0x2C43004
+
+#define mmPSOC_ETR_STS 0x2C4300C
+
+#define mmPSOC_ETR_RRD 0x2C43010
+
+#define mmPSOC_ETR_RRP 0x2C43014
+
+#define mmPSOC_ETR_RWP 0x2C43018
+
+#define mmPSOC_ETR_TRG 0x2C4301C
+
+#define mmPSOC_ETR_CTL 0x2C43020
+
+#define mmPSOC_ETR_RWD 0x2C43024
+
+#define mmPSOC_ETR_MODE 0x2C43028
+
+#define mmPSOC_ETR_LBUFLEVEL 0x2C4302C
+
+#define mmPSOC_ETR_CBUFLEVEL 0x2C43030
+
+#define mmPSOC_ETR_BUFWM 0x2C43034
+
+#define mmPSOC_ETR_RRPHI 0x2C43038
+
+#define mmPSOC_ETR_RWPHI 0x2C4303C
+
+#define mmPSOC_ETR_AXICTL 0x2C43110
+
+#define mmPSOC_ETR_DBALO 0x2C43118
+
+#define mmPSOC_ETR_DBAHI 0x2C4311C
+
+#define mmPSOC_ETR_FFSR 0x2C43300
+
+#define mmPSOC_ETR_FFCR 0x2C43304
+
+#define mmPSOC_ETR_PSCR 0x2C43308
+
+#define mmPSOC_ETR_ITMISCOP0 0x2C43EE0
+
+#define mmPSOC_ETR_ITTRFLIN 0x2C43EE8
+
+#define mmPSOC_ETR_ITATBDATA0 0x2C43EEC
+
+#define mmPSOC_ETR_ITATBCTR2 0x2C43EF0
+
+#define mmPSOC_ETR_ITATBCTR1 0x2C43EF4
+
+#define mmPSOC_ETR_ITATBCTR0 0x2C43EF8
+
+#define mmPSOC_ETR_ITCTRL 0x2C43F00
+
+#define mmPSOC_ETR_CLAIMSET 0x2C43FA0
+
+#define mmPSOC_ETR_CLAIMCLR 0x2C43FA4
+
+#define mmPSOC_ETR_LAR 0x2C43FB0
+
+#define mmPSOC_ETR_LSR 0x2C43FB4
+
+#define mmPSOC_ETR_AUTHSTATUS 0x2C43FB8
+
+#define mmPSOC_ETR_DEVID 0x2C43FC8
+
+#define mmPSOC_ETR_DEVTYPE 0x2C43FCC
+
+#define mmPSOC_ETR_PERIPHID4 0x2C43FD0
+
+#define mmPSOC_ETR_PERIPHID5 0x2C43FD4
+
+#define mmPSOC_ETR_PERIPHID6 0x2C43FD8
+
+#define mmPSOC_ETR_PERIPHID7 0x2C43FDC
+
+#define mmPSOC_ETR_PERIPHID0 0x2C43FE0
+
+#define mmPSOC_ETR_PERIPHID1 0x2C43FE4
+
+#define mmPSOC_ETR_PERIPHID2 0x2C43FE8
+
+#define mmPSOC_ETR_PERIPHID3 0x2C43FEC
+
+#define mmPSOC_ETR_COMPID0 0x2C43FF0
+
+#define mmPSOC_ETR_COMPID1 0x2C43FF4
+
+#define mmPSOC_ETR_COMPID2 0x2C43FF8
+
+#define mmPSOC_ETR_COMPID3 0x2C43FFC
+
+#endif /* ASIC_REG_PSOC_ETR_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index 4cd04c090285..2853a2de8cf6 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -20,6 +20,8 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_DRAM_INIT_FAIL,
CPU_BOOT_STATUS_FIT_CORRUPTED,
CPU_BOOT_STATUS_UBOOT_NOT_READY,
+ CPU_BOOT_STATUS_RESERVED,
+ CPU_BOOT_STATUS_TS_INIT_FAIL,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index 71ea3c3e8ba3..a6851a9d3f03 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -12,18 +12,16 @@
#define PAGE_SHIFT_2MB 21
#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB)
#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
-#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1))
#define PAGE_PRESENT_MASK 0x0000000000001ull
#define SWAP_OUT_MASK 0x0000000000004ull
#define LAST_MASK 0x0000000000800ull
-#define PHYS_ADDR_MASK 0xFFFFFFFFFFFFF000ull
#define HOP0_MASK 0x3000000000000ull
#define HOP1_MASK 0x0FF8000000000ull
#define HOP2_MASK 0x0007FC0000000ull
#define HOP3_MASK 0x000003FE00000ull
#define HOP4_MASK 0x00000001FF000ull
-#define OFFSET_MASK 0x0000000000FFFull
+#define FLAGS_MASK 0x0000000000FFFull
#define HOP0_SHIFT 48
#define HOP1_SHIFT 39
@@ -31,8 +29,7 @@
#define HOP3_SHIFT 21
#define HOP4_SHIFT 12
-#define PTE_PHYS_ADDR_SHIFT 12
-#define PTE_PHYS_ADDR_MASK ~OFFSET_MASK
+#define HOP_PHYS_ADDR_MASK (~FLAGS_MASK)
#define HL_PTE_SIZE sizeof(u64)
#define HOP_TABLE_SIZE PAGE_SIZE_4KB
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/qman_if.h
index bf59bbe27fdc..0fdb49188ed7 100644
--- a/drivers/misc/habanalabs/include/qman_if.h
+++ b/drivers/misc/habanalabs/include/qman_if.h
@@ -23,6 +23,8 @@ struct hl_bd {
#define HL_BD_SIZE sizeof(struct hl_bd)
/*
+ * S/W CTL FIELDS.
+ *
* BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is
* valid. 1 means the repeat field is valid, 0 means not-valid,
* i.e. repeat == 1
@@ -34,6 +36,16 @@ struct hl_bd {
#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF
/*
+ * H/W CTL FIELDS
+ */
+
+#define BD_CTL_COMP_OFFSET_SHIFT 16
+#define BD_CTL_COMP_OFFSET_MASK 0x00FF0000
+
+#define BD_CTL_COMP_DATA_SHIFT 0
+#define BD_CTL_COMP_DATA_MASK 0x0000FFFF
+
+/*
* COMPLETION QUEUE
*/
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 365fb0cb8dff..6c72cb4eff54 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/genalloc.h>
-#define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
#define HL_MMU_DEBUG 0
/*
@@ -159,20 +158,19 @@ pages_pack_err:
}
/*
- * get_userptr_from_host_va - initialize userptr structure from given host
- * virtual address
- *
- * @hdev : habanalabs device structure
- * @args : parameters containing the virtual address and size
- * @p_userptr : pointer to result userptr structure
+ * dma_map_host_va - DMA mapping of the given host virtual address.
+ * @hdev: habanalabs device structure
+ * @addr: the host virtual address of the memory area
+ * @size: the size of the memory area
+ * @p_userptr: pointer to result userptr structure
*
* This function does the following:
* - Allocate userptr structure
* - Pin the given host memory using the userptr structure
* - Perform DMA mapping to have the DMA addresses of the pages
*/
-static int get_userptr_from_host_va(struct hl_device *hdev,
- struct hl_mem_in *args, struct hl_userptr **p_userptr)
+static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
+ struct hl_userptr **p_userptr)
{
struct hl_userptr *userptr;
int rc;
@@ -183,8 +181,7 @@ static int get_userptr_from_host_va(struct hl_device *hdev,
goto userptr_err;
}
- rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr,
- args->map_host.mem_size, userptr);
+ rc = hl_pin_host_memory(hdev, addr, size, userptr);
if (rc) {
dev_err(hdev->dev, "Failed to pin host memory\n");
goto pin_err;
@@ -215,16 +212,16 @@ userptr_err:
}
/*
- * free_userptr - free userptr structure
- *
- * @hdev : habanalabs device structure
- * @userptr : userptr to free
+ * dma_unmap_host_va - DMA unmapping of the given host virtual address.
+ * @hdev: habanalabs device structure
+ * @userptr: userptr to free
*
* This function does the following:
* - Unpins the physical pages
* - Frees the userptr structure
*/
-static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
+static void dma_unmap_host_va(struct hl_device *hdev,
+ struct hl_userptr *userptr)
{
hl_unpin_host_memory(hdev, userptr);
kfree(userptr);
@@ -253,10 +250,9 @@ static void dram_pg_pool_do_release(struct kref *ref)
}
/*
- * free_phys_pg_pack - free physical page pack
- *
- * @hdev : habanalabs device structure
- * @phys_pg_pack : physical page pack to free
+ * free_phys_pg_pack - free physical page pack
+ * @hdev: habanalabs device structure
+ * @phys_pg_pack: physical page pack to free
*
* This function does the following:
* - For DRAM memory only, iterate over the pack and free each physical block
@@ -264,7 +260,7 @@ static void dram_pg_pool_do_release(struct kref *ref)
* - Free the hl_vm_phys_pg_pack structure
*/
static void free_phys_pg_pack(struct hl_device *hdev,
- struct hl_vm_phys_pg_pack *phys_pg_pack)
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
u64 i;
@@ -519,8 +515,8 @@ static inline int add_va_block(struct hl_device *hdev,
* - Return the start address of the virtual block
*/
static u64 get_va_block(struct hl_device *hdev,
- struct hl_va_range *va_range, u64 size, u64 hint_addr,
- bool is_userptr)
+ struct hl_va_range *va_range, u64 size, u64 hint_addr,
+ bool is_userptr)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
u64 valid_start, valid_size, prev_start, prev_end, page_mask,
@@ -528,18 +524,17 @@ static u64 get_va_block(struct hl_device *hdev,
u32 page_size;
bool add_prev = false;
- if (is_userptr) {
+ if (is_userptr)
/*
* We cannot know if the user allocated memory with huge pages
* or not, hence we continue with the biggest possible
* granularity.
*/
- page_size = PAGE_SIZE_2MB;
- page_mask = PAGE_MASK_2MB;
- } else {
- page_size = hdev->asic_prop.dram_page_size;
- page_mask = ~((u64)page_size - 1);
- }
+ page_size = hdev->asic_prop.pmmu.huge_page_size;
+ else
+ page_size = hdev->asic_prop.dmmu.page_size;
+
+ page_mask = ~((u64)page_size - 1);
mutex_lock(&va_range->lock);
@@ -549,7 +544,6 @@ static u64 get_va_block(struct hl_device *hdev,
/* calc the first possible aligned addr */
valid_start = va_block->start;
-
if (valid_start & (page_size - 1)) {
valid_start &= page_mask;
valid_start += page_size;
@@ -561,7 +555,6 @@ static u64 get_va_block(struct hl_device *hdev,
if (valid_size >= size &&
(!new_va_block || valid_size < res_valid_size)) {
-
new_va_block = va_block;
res_valid_start = valid_start;
res_valid_size = valid_size;
@@ -631,11 +624,10 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
/*
* init_phys_pg_pack_from_userptr - initialize physical page pack from host
- * memory
- *
- * @ctx : current context
- * @userptr : userptr to initialize from
- * @pphys_pg_pack : res pointer
+ * memory
+ * @ctx: current context
+ * @userptr: userptr to initialize from
+ * @pphys_pg_pack: result pointer
*
* This function does the following:
* - Pin the physical pages related to the given virtual block
@@ -643,16 +635,19 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
* virtual block
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
- struct hl_userptr *userptr,
- struct hl_vm_phys_pg_pack **pphys_pg_pack)
+ struct hl_userptr *userptr,
+ struct hl_vm_phys_pg_pack **pphys_pg_pack)
{
+ struct hl_mmu_properties *mmu_prop = &ctx->hdev->asic_prop.pmmu;
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
u64 page_mask, total_npages;
- u32 npages, page_size = PAGE_SIZE;
+ u32 npages, page_size = PAGE_SIZE,
+ huge_page_size = mmu_prop->huge_page_size;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
+ u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
if (!phys_pg_pack)
@@ -675,14 +670,14 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
total_npages += npages;
- if ((npages % PGS_IN_2MB_PAGE) ||
- (dma_addr & (PAGE_SIZE_2MB - 1)))
+ if ((npages % pgs_in_huge_page) ||
+ (dma_addr & (huge_page_size - 1)))
is_huge_page_opt = false;
}
if (is_huge_page_opt) {
- page_size = PAGE_SIZE_2MB;
- total_npages /= PGS_IN_2MB_PAGE;
+ page_size = huge_page_size;
+ do_div(total_npages, pgs_in_huge_page);
}
page_mask = ~(((u64) page_size) - 1);
@@ -714,7 +709,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
dma_addr += page_size;
if (is_huge_page_opt)
- npages -= PGS_IN_2MB_PAGE;
+ npages -= pgs_in_huge_page;
else
npages--;
}
@@ -731,19 +726,18 @@ page_pack_arr_mem_err:
}
/*
- * map_phys_page_pack - maps the physical page pack
- *
- * @ctx : current context
- * @vaddr : start address of the virtual area to map from
- * @phys_pg_pack : the pack of physical pages to map to
+ * map_phys_pg_pack - maps the physical page pack.
+ * @ctx: current context
+ * @vaddr: start address of the virtual area to map from
+ * @phys_pg_pack: the pack of physical pages to map to
*
* This function does the following:
* - Maps each chunk of virtual memory to matching physical chunk
* - Stores number of successful mappings in the given argument
- * - Returns 0 on success, error code otherwise.
+ * - Returns 0 on success, error code otherwise
*/
-static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
- struct hl_vm_phys_pg_pack *phys_pg_pack)
+static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
@@ -783,6 +777,36 @@ err:
return rc;
}
+/*
+ * unmap_phys_pg_pack - unmaps the physical page pack
+ * @ctx: current context
+ * @vaddr: start address of the virtual area to unmap
+ * @phys_pg_pack: the pack of physical pages to unmap
+ */
+static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 next_vaddr, i;
+ u32 page_size;
+
+ page_size = phys_pg_pack->page_size;
+ next_vaddr = vaddr;
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ dev_warn_ratelimited(hdev->dev,
+ "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+
+ /*
+ * unmapping on Palladium can be really long, so avoid a CPU
+ * soft lockup bug by sleeping a little between unmapping pages
+ */
+ if (hdev->pldm)
+ usleep_range(500, 1000);
+ }
+}
+
static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
u64 *paddr)
{
@@ -839,7 +863,10 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
*device_addr = 0;
if (is_userptr) {
- rc = get_userptr_from_host_va(hdev, args, &userptr);
+ u64 addr = args->map_host.host_virt_addr,
+ size = args->map_host.mem_size;
+
+ rc = dma_map_host_va(hdev, addr, size, &userptr);
if (rc) {
dev_err(hdev->dev, "failed to get userptr from va\n");
return rc;
@@ -850,7 +877,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
- args->map_host.host_virt_addr);
+ addr);
goto init_page_pack_err;
}
@@ -909,7 +936,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
mutex_lock(&ctx->mmu_lock);
- rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack);
+ rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
mutex_unlock(&ctx->mmu_lock);
dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
@@ -917,7 +944,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto map_err;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
mutex_unlock(&ctx->mmu_lock);
@@ -955,7 +982,7 @@ shared_err:
free_phys_pg_pack(hdev, phys_pg_pack);
init_page_pack_err:
if (is_userptr)
- free_userptr(hdev, userptr);
+ dma_unmap_host_va(hdev, userptr);
return rc;
}
@@ -965,20 +992,20 @@ init_page_pack_err:
*
* @ctx : current context
* @vaddr : device virtual address to unmap
+ * @ctx_free : true if in context free flow, false otherwise.
*
* This function does the following:
* - Unmap the physical pages related to the given virtual address
* - return the device virtual block to the virtual block list
*/
-static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
+static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
+ struct hl_va_range *va_range;
enum vm_type_t *vm_type;
- u64 next_vaddr, i;
- u32 page_size;
bool is_userptr;
int rc;
@@ -1003,9 +1030,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
+ va_range = &ctx->host_va_range;
userptr = hnode->ptr;
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+ &phys_pg_pack);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1014,6 +1042,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
}
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
is_userptr = false;
+ va_range = &ctx->dram_va_range;
phys_pg_pack = hnode->ptr;
} else {
dev_warn(hdev->dev,
@@ -1029,42 +1058,41 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
goto mapping_cnt_err;
}
- page_size = phys_pg_pack->page_size;
- vaddr &= ~(((u64) page_size) - 1);
-
- next_vaddr = vaddr;
+ vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
mutex_lock(&ctx->mmu_lock);
- for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
- if (hl_mmu_unmap(ctx, next_vaddr, page_size))
- dev_warn_ratelimited(hdev->dev,
- "unmap failed for vaddr: 0x%llx\n", next_vaddr);
-
- /* unmapping on Palladium can be really long, so avoid a CPU
- * soft lockup bug by sleeping a little between unmapping pages
- */
- if (hdev->pldm)
- usleep_range(500, 1000);
- }
+ unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+ /*
+ * During context free this function is called in a loop to clean all
+ * the context mappings. Hence the cache invalidation can be called once
+ * at the loop end rather than for each iteration
+ */
+ if (!ctx_free)
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, *vm_type);
mutex_unlock(&ctx->mmu_lock);
- if (add_va_block(hdev,
- is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
- vaddr,
- vaddr + phys_pg_pack->total_size - 1))
- dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
- vaddr);
+ /*
+ * No point in maintaining the free VA block list if the context is
+ * closing as the list will be freed anyway
+ */
+ if (!ctx_free) {
+ rc = add_va_block(hdev, va_range, vaddr,
+ vaddr + phys_pg_pack->total_size - 1);
+ if (rc)
+ dev_warn(hdev->dev,
+ "add va block failed for vaddr: 0x%llx\n",
+ vaddr);
+ }
atomic_dec(&phys_pg_pack->mapping_cnt);
kfree(hnode);
if (is_userptr) {
free_phys_pg_pack(hdev, phys_pg_pack);
- free_userptr(hdev, userptr);
+ dma_unmap_host_va(hdev, userptr);
}
return 0;
@@ -1189,8 +1217,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_MEM_OP_UNMAP:
- rc = unmap_device_va(ctx,
- args->in.unmap.device_virt_addr);
+ rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
+ false);
break;
default:
@@ -1203,20 +1231,72 @@ out:
return rc;
}
+static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
+ u32 npages, u64 start, u32 offset,
+ struct hl_userptr *userptr)
+{
+ int rc;
+
+ if (!access_ok((void __user *) (uintptr_t) addr, size)) {
+ dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
+ return -EFAULT;
+ }
+
+ userptr->vec = frame_vector_create(npages);
+ if (!userptr->vec) {
+ dev_err(hdev->dev, "Failed to create frame vector\n");
+ return -ENOMEM;
+ }
+
+ rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ userptr->vec);
+
+ if (rc != npages) {
+ dev_err(hdev->dev,
+ "Failed to map host memory, user ptr probably wrong\n");
+ if (rc < 0)
+ goto destroy_framevec;
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ if (frame_vector_to_pages(userptr->vec) < 0) {
+ dev_err(hdev->dev,
+ "Failed to translate frame vector to pages\n");
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ rc = sg_alloc_table_from_pages(userptr->sgt,
+ frame_vector_pages(userptr->vec),
+ npages, offset, size, GFP_ATOMIC);
+ if (rc < 0) {
+ dev_err(hdev->dev, "failed to create SG table from pages\n");
+ goto put_framevec;
+ }
+
+ return 0;
+
+put_framevec:
+ put_vaddr_frames(userptr->vec);
+destroy_framevec:
+ frame_vector_destroy(userptr->vec);
+ return rc;
+}
+
/*
- * hl_pin_host_memory - pins a chunk of host memory
- *
- * @hdev : pointer to the habanalabs device structure
- * @addr : the user-space virtual address of the memory area
- * @size : the size of the memory area
- * @userptr : pointer to hl_userptr structure
+ * hl_pin_host_memory - pins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure
+ * @addr: the host virtual address of the memory area
+ * @size: the size of the memory area
+ * @userptr: pointer to hl_userptr structure
*
* This function does the following:
* - Pins the physical pages
- * - Create a SG list from those pages
+ * - Create an SG list from those pages
*/
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
- struct hl_userptr *userptr)
+ struct hl_userptr *userptr)
{
u64 start, end;
u32 npages, offset;
@@ -1227,11 +1307,6 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
- if (!access_ok((void __user *) (uintptr_t) addr, size)) {
- dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
- return -EFAULT;
- }
-
/*
* If the combination of the address and size requested for this memory
* region causes an integer overflow, return error.
@@ -1244,6 +1319,14 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
+ /*
+ * This function can be called also from data path, hence use atomic
+ * always as it is not a big allocation.
+ */
+ userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
+ if (!userptr->sgt)
+ return -ENOMEM;
+
start = addr & PAGE_MASK;
offset = addr & ~PAGE_MASK;
end = PAGE_ALIGN(addr + size);
@@ -1254,42 +1337,12 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
userptr->dma_mapped = false;
INIT_LIST_HEAD(&userptr->job_node);
- userptr->vec = frame_vector_create(npages);
- if (!userptr->vec) {
- dev_err(hdev->dev, "Failed to create frame vector\n");
- return -ENOMEM;
- }
-
- rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
- userptr->vec);
-
- if (rc != npages) {
- dev_err(hdev->dev,
- "Failed to map host memory, user ptr probably wrong\n");
- if (rc < 0)
- goto destroy_framevec;
- rc = -EFAULT;
- goto put_framevec;
- }
-
- if (frame_vector_to_pages(userptr->vec) < 0) {
+ rc = get_user_memory(hdev, addr, size, npages, start, offset,
+ userptr);
+ if (rc) {
dev_err(hdev->dev,
- "Failed to translate frame vector to pages\n");
- rc = -EFAULT;
- goto put_framevec;
- }
-
- userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
- if (!userptr->sgt) {
- rc = -ENOMEM;
- goto put_framevec;
- }
-
- rc = sg_alloc_table_from_pages(userptr->sgt,
- frame_vector_pages(userptr->vec),
- npages, offset, size, GFP_ATOMIC);
- if (rc < 0) {
- dev_err(hdev->dev, "failed to create SG table from pages\n");
+ "failed to get user memory for address 0x%llx\n",
+ addr);
goto free_sgt;
}
@@ -1299,34 +1352,28 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
free_sgt:
kfree(userptr->sgt);
-put_framevec:
- put_vaddr_frames(userptr->vec);
-destroy_framevec:
- frame_vector_destroy(userptr->vec);
return rc;
}
/*
- * hl_unpin_host_memory - unpins a chunk of host memory
- *
- * @hdev : pointer to the habanalabs device structure
- * @userptr : pointer to hl_userptr structure
+ * hl_unpin_host_memory - unpins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure
+ * @userptr: pointer to hl_userptr structure
*
* This function does the following:
* - Unpins the physical pages related to the host memory
* - Free the SG list
*/
-int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
+void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
{
struct page **pages;
hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped)
- hdev->asic_funcs->hl_dma_unmap_sg(hdev,
- userptr->sgt->sgl,
- userptr->sgt->nents,
- userptr->dir);
+ hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents,
+ userptr->dir);
pages = frame_vector_pages(userptr->vec);
if (!IS_ERR(pages)) {
@@ -1342,8 +1389,6 @@ int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
sg_free_table(userptr->sgt);
kfree(userptr->sgt);
-
- return 0;
}
/*
@@ -1542,43 +1587,16 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
* @hdev : pointer to the habanalabs structure
* va_range : pointer to virtual addresses range
*
- * This function initializes the following:
- * - Checks that the given range contains the whole initial range
+ * This function does the following:
* - Frees the virtual addresses block list and its lock
*/
static void hl_va_range_fini(struct hl_device *hdev,
struct hl_va_range *va_range)
{
- struct hl_vm_va_block *va_block;
-
- if (list_empty(&va_range->list)) {
- dev_warn(hdev->dev,
- "va list should not be empty on cleanup!\n");
- goto out;
- }
-
- if (!list_is_singular(&va_range->list)) {
- dev_warn(hdev->dev,
- "va list should not contain multiple blocks on cleanup!\n");
- goto free_va_list;
- }
-
- va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
-
- if (va_block->start != va_range->start_addr ||
- va_block->end != va_range->end_addr) {
- dev_warn(hdev->dev,
- "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
- va_block->start, va_block->end);
- goto free_va_list;
- }
-
-free_va_list:
mutex_lock(&va_range->lock);
clear_va_list_locked(hdev, &va_range->list);
mutex_unlock(&va_range->lock);
-out:
mutex_destroy(&va_range->lock);
}
@@ -1613,21 +1631,31 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
- if (!hash_empty(ctx->mem_hash))
- dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
+ /*
+ * Clearly something went wrong on hard reset so no point in printing
+ * another side effect error
+ */
+ if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
+ dev_notice(hdev->dev,
+ "ctx %d is freed while it has va in use\n",
+ ctx->asid);
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
dev_dbg(hdev->dev,
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
hnode->vaddr, ctx->asid);
- unmap_device_va(ctx, hnode->vaddr);
+ unmap_device_va(ctx, hnode->vaddr, true);
}
+ /* invalidate the cache once after the unmapping loop */
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
+
spin_lock(&vm->idr_lock);
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) {
dev_dbg(hdev->dev,
- "page list 0x%p of asid %d is still alive\n",
+ "page list 0x%px of asid %d is still alive\n",
phys_pg_list, ctx->asid);
atomic64_sub(phys_pg_list->total_size,
&hdev->dram_used_mem);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 176c315836f1..6262b26e2086 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -25,10 +25,9 @@ static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
return pgt_info;
}
-static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
{
struct hl_device *hdev = ctx->hdev;
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
hdev->asic_prop.mmu_hop_table_size);
@@ -37,6 +36,13 @@ static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
kfree(pgt_info);
}
+static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+
+ _free_hop(ctx, pgt_info);
+}
+
static u64 alloc_hop(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
@@ -105,8 +111,8 @@ static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
* clear the 12 LSBs and translate the shadow hop to its associated
* physical hop, and add back the original 12 LSBs.
*/
- u64 phys_val = get_phys_addr(ctx, val & PTE_PHYS_ADDR_MASK) |
- (val & OFFSET_MASK);
+ u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
+ (val & FLAGS_MASK);
ctx->hdev->asic_funcs->write_pte(ctx->hdev,
get_phys_addr(ctx, shadow_pte_addr),
@@ -159,7 +165,7 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
*/
num_of_ptes_left = pgt_info->num_of_ptes;
if (!num_of_ptes_left)
- free_hop(ctx, hop_addr);
+ _free_hop(ctx, pgt_info);
return num_of_ptes_left;
}
@@ -171,35 +177,50 @@ static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
((virt_addr & mask) >> shift);
}
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
+ mmu_prop->hop0_shift);
}
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
+ mmu_prop->hop1_shift);
}
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
+ mmu_prop->hop2_shift);
}
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
+ mmu_prop->hop3_shift);
}
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
+ mmu_prop->hop4_shift);
}
static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
- return curr_pte & PHYS_ADDR_MASK;
+ return curr_pte & HOP_PHYS_ADDR_MASK;
else
return ULLONG_MAX;
}
@@ -288,23 +309,23 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
}
/* need only pte 0 in hops 0 and 1 */
- pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop0_addr, pte_val);
- pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop1_addr, pte_val);
get_pte(ctx, hop1_addr);
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
- pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
+ pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
write_pte(ctx, hop2_pte_addr, pte_val);
get_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
- pte_val = (prop->mmu_dram_default_page_addr & PTE_PHYS_ADDR_MASK) |
+ pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
LAST_MASK | PAGE_PRESENT_MASK;
for (i = 0 ; i < num_of_hop3 ; i++) {
@@ -400,8 +421,6 @@ int hl_mmu_init(struct hl_device *hdev)
if (!hdev->mmu_enable)
return 0;
- /* MMU H/W init was already done in device hw_init() */
-
hdev->mmu_pgt_pool =
gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
@@ -427,6 +446,8 @@ int hl_mmu_init(struct hl_device *hdev)
goto err_pool_add;
}
+ /* MMU H/W init will be done in device hw_init() */
+
return 0;
err_pool_add:
@@ -450,10 +471,10 @@ void hl_mmu_fini(struct hl_device *hdev)
if (!hdev->mmu_enable)
return;
+ /* MMU H/W fini was already done in device hw_fini() */
+
kvfree(hdev->mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_pgt_pool);
-
- /* MMU H/W fini will be done in device hw_fini() */
}
/**
@@ -501,36 +522,36 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
dram_default_mapping_fini(ctx);
if (!hash_empty(ctx->mmu_shadow_hash))
- dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
+ dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
+ ctx->asid);
hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
- free_hop(ctx, pgt_info->shadow_addr);
+ _free_hop(ctx, pgt_info);
}
mutex_destroy(&ctx->mmu_lock);
}
-static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
+static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop0_addr = 0, hop0_pte_addr = 0,
hop1_addr = 0, hop1_pte_addr = 0,
hop2_addr = 0, hop2_pte_addr = 0,
hop3_addr = 0, hop3_pte_addr = 0,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte;
- bool is_dram_addr, is_huge, clear_hop3 = true;
+ bool is_huge, clear_hop3 = true;
- is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
@@ -539,7 +560,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop1_addr == ULLONG_MAX)
goto not_mapped;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
@@ -548,7 +569,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop2_addr == ULLONG_MAX)
goto not_mapped;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
@@ -557,7 +578,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop3_addr == ULLONG_MAX)
goto not_mapped;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
@@ -575,7 +596,8 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
@@ -584,7 +606,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
if (curr_pte == default_pte) {
dev_err(hdev->dev,
@@ -667,25 +689,36 @@ not_mapped:
int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr;
u32 real_page_size, npages;
int i, rc;
+ bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
- * is bigger, we break it to sub-pages and unmap them separately.
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and unmap them separately.
*/
- if ((page_size % PAGE_SIZE_2MB) == 0) {
- real_page_size = PAGE_SIZE_2MB;
- } else if ((page_size % PAGE_SIZE_4KB) == 0) {
- real_page_size = PAGE_SIZE_4KB;
+ if ((page_size % mmu_prop->huge_page_size) == 0) {
+ real_page_size = mmu_prop->huge_page_size;
+ } else if ((page_size % mmu_prop->page_size) == 0) {
+ real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not 4KB nor 2MB aligned, can't unmap\n",
- page_size);
+ "page size of %u is not %uKB nor %uMB aligned, can't unmap\n",
+ page_size,
+ mmu_prop->page_size >> 10,
+ mmu_prop->huge_page_size >> 20);
return -EFAULT;
}
@@ -694,7 +727,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
real_virt_addr = virt_addr;
for (i = 0 ; i < npages ; i++) {
- rc = _hl_mmu_unmap(ctx, real_virt_addr);
+ rc = _hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
return rc;
@@ -705,10 +738,11 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
}
static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
- u32 page_size)
+ u32 page_size, bool is_dram_addr)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop0_addr = 0, hop0_pte_addr = 0,
hop1_addr = 0, hop1_pte_addr = 0,
hop2_addr = 0, hop2_pte_addr = 0,
@@ -716,21 +750,19 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte = 0;
bool hop1_new = false, hop2_new = false, hop3_new = false,
- hop4_new = false, is_huge, is_dram_addr;
+ hop4_new = false, is_huge;
int rc = -ENOMEM;
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * This mapping function can map a 4KB/2MB page. For 2MB page there are
- * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB
- * pages only but user memory could have been allocated with one of the
- * two page sizes. Since this is a common code for all the three cases,
- * we need this hugs page check.
+ * This mapping function can map a page or a huge page. For huge page
+ * there are only 3 hops rather than 4. Currently the DRAM allocation
+ * uses huge pages only but user memory could have been allocated with
+ * one of the two page sizes. Since this is a common code for all the
+ * three cases, we need this hugs page check.
*/
- is_huge = page_size == PAGE_SIZE_2MB;
-
- is_dram_addr = hl_mem_area_inside_range(virt_addr, page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ is_huge = page_size == mmu_prop->huge_page_size;
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
@@ -738,28 +770,28 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
}
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
if (hop1_addr == ULLONG_MAX)
goto err;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
if (hop2_addr == ULLONG_MAX)
goto err;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
if (hop3_addr == ULLONG_MAX)
goto err;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
if (!is_huge) {
@@ -767,13 +799,14 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hop4_addr == ULLONG_MAX)
goto err;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
}
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
if (curr_pte != default_pte) {
@@ -813,7 +846,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
goto err;
}
- curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
+ curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
| PAGE_PRESENT_MASK;
if (is_huge)
@@ -823,25 +856,25 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hop1_new) {
curr_pte =
- (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop0_pte_addr, curr_pte);
}
if (hop2_new) {
curr_pte =
- (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop1_pte_addr, curr_pte);
get_pte(ctx, hop1_addr);
}
if (hop3_new) {
curr_pte =
- (hop3_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop2_pte_addr, curr_pte);
get_pte(ctx, hop2_addr);
}
if (!is_huge) {
if (hop4_new) {
- curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
+ curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
write_pte(ctx, hop3_pte_addr, curr_pte);
get_pte(ctx, hop3_addr);
@@ -890,25 +923,36 @@ err:
int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr, real_phys_addr;
u32 real_page_size, npages;
int i, rc, mapped_cnt = 0;
+ bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
- * is bigger, we break it to sub-pages and map them separately.
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and map them separately.
*/
- if ((page_size % PAGE_SIZE_2MB) == 0) {
- real_page_size = PAGE_SIZE_2MB;
- } else if ((page_size % PAGE_SIZE_4KB) == 0) {
- real_page_size = PAGE_SIZE_4KB;
+ if ((page_size % mmu_prop->huge_page_size) == 0) {
+ real_page_size = mmu_prop->huge_page_size;
+ } else if ((page_size % mmu_prop->page_size) == 0) {
+ real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not 4KB nor 2MB aligned, can't map\n",
- page_size);
+ "page size of %u is not %dKB nor %dMB aligned, can't unmap\n",
+ page_size,
+ mmu_prop->page_size >> 10,
+ mmu_prop->huge_page_size >> 20);
return -EFAULT;
}
@@ -923,7 +967,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
for (i = 0 ; i < npages ; i++) {
rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
- real_page_size);
+ real_page_size, is_dram_addr);
if (rc)
goto err;
@@ -937,7 +981,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
err:
real_virt_addr = virt_addr;
for (i = 0 ; i < mapped_cnt ; i++) {
- if (_hl_mmu_unmap(ctx, real_virt_addr))
+ if (_hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr))
dev_warn_ratelimited(hdev->dev,
"failed to unmap va: 0x%llx\n", real_virt_addr);
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 94dfb9e40e29..1aa433a7f66c 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/drivers/char/hpilo.h
*
diff --git a/drivers/misc/ibmvmc.h b/drivers/misc/ibmvmc.h
index e140ada8fe2c..0e1756fffeae 100644
--- a/drivers/misc/ibmvmc.h
+++ b/drivers/misc/ibmvmc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* linux/drivers/misc/ibmvmc.h
*
* IBM Power Systems Virtual Management Channel Support.
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 057d7bbde402..dd65cedf3b12 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -16,7 +16,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/poll.h>
@@ -434,23 +434,23 @@ int lis3lv02d_poweron(struct lis3lv02d *lis3)
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
-static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
+static void lis3lv02d_joystick_poll(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
int x, y, z;
mutex_lock(&lis3->mutex);
lis3lv02d_get_xyz(lis3, &x, &y, &z);
- input_report_abs(pidev->input, ABS_X, x);
- input_report_abs(pidev->input, ABS_Y, y);
- input_report_abs(pidev->input, ABS_Z, z);
- input_sync(pidev->input);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_Z, z);
+ input_sync(input);
mutex_unlock(&lis3->mutex);
}
-static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
+static int lis3lv02d_joystick_open(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
if (lis3->pm_dev)
pm_runtime_get_sync(lis3->pm_dev);
@@ -461,12 +461,14 @@ static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
* Update coordinates for the case where poll interval is 0 and
* the chip in running purely under interrupt control
*/
- lis3lv02d_joystick_poll(pidev);
+ lis3lv02d_joystick_poll(input);
+
+ return 0;
}
-static void lis3lv02d_joystick_close(struct input_polled_dev *pidev)
+static void lis3lv02d_joystick_close(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
atomic_set(&lis3->wake_thread, 0);
if (lis3->pm_dev)
@@ -497,7 +499,7 @@ out:
static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
{
- struct input_dev *dev = lis3->idev->input;
+ struct input_dev *dev = lis3->idev;
u8 click_src;
mutex_lock(&lis3->mutex);
@@ -677,26 +679,19 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
if (lis3->idev)
return -EINVAL;
- lis3->idev = input_allocate_polled_device();
- if (!lis3->idev)
+ input_dev = input_allocate_device();
+ if (!input_dev)
return -ENOMEM;
- lis3->idev->poll = lis3lv02d_joystick_poll;
- lis3->idev->open = lis3lv02d_joystick_open;
- lis3->idev->close = lis3lv02d_joystick_close;
- lis3->idev->poll_interval = MDPS_POLL_INTERVAL;
- lis3->idev->poll_interval_min = MDPS_POLL_MIN;
- lis3->idev->poll_interval_max = MDPS_POLL_MAX;
- lis3->idev->private = lis3;
- input_dev = lis3->idev->input;
-
input_dev->name = "ST LIS3LV02DL Accelerometer";
input_dev->phys = DRIVER_NAME "/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0;
input_dev->dev.parent = &lis3->pdev->dev;
- set_bit(EV_ABS, input_dev->evbit);
+ input_dev->open = lis3lv02d_joystick_open;
+ input_dev->close = lis3lv02d_joystick_close;
+
max_val = (lis3->mdps_max_val * lis3->scale) / LIS3_ACCURACY;
if (lis3->whoami == WAI_12B) {
fuzz = LIS3_DEFAULT_FUZZ_12B;
@@ -712,17 +707,32 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
+ input_set_drvdata(input_dev, lis3);
+ lis3->idev = input_dev;
+
+ err = input_setup_polling(input_dev, lis3lv02d_joystick_poll);
+ if (err)
+ goto err_free_input;
+
+ input_set_poll_interval(input_dev, MDPS_POLL_INTERVAL);
+ input_set_min_poll_interval(input_dev, MDPS_POLL_MIN);
+ input_set_max_poll_interval(input_dev, MDPS_POLL_MAX);
+
lis3->mapped_btns[0] = lis3lv02d_get_axis(abs(lis3->ac.x), btns);
lis3->mapped_btns[1] = lis3lv02d_get_axis(abs(lis3->ac.y), btns);
lis3->mapped_btns[2] = lis3lv02d_get_axis(abs(lis3->ac.z), btns);
- err = input_register_polled_device(lis3->idev);
- if (err) {
- input_free_polled_device(lis3->idev);
- lis3->idev = NULL;
- }
+ err = input_register_device(lis3->idev);
+ if (err)
+ goto err_free_input;
+ return 0;
+
+err_free_input:
+ input_free_device(input_dev);
+ lis3->idev = NULL;
return err;
+
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
@@ -738,8 +748,7 @@ void lis3lv02d_joystick_disable(struct lis3lv02d *lis3)
if (lis3->irq)
misc_deregister(&lis3->miscdev);
- input_unregister_polled_device(lis3->idev);
- input_free_polled_device(lis3->idev);
+ input_unregister_device(lis3->idev);
lis3->idev = NULL;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
@@ -895,10 +904,9 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *lis3,
(p->click_thresh_y << 4));
if (lis3->idev) {
- struct input_dev *input_dev = lis3->idev->input;
- input_set_capability(input_dev, EV_KEY, BTN_X);
- input_set_capability(input_dev, EV_KEY, BTN_Y);
- input_set_capability(input_dev, EV_KEY, BTN_Z);
+ input_set_capability(lis3->idev, EV_KEY, BTN_X);
+ input_set_capability(lis3->idev, EV_KEY, BTN_Y);
+ input_set_capability(lis3->idev, EV_KEY, BTN_Z);
}
}
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index 1b0c99883c57..c394c0b08519 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -6,7 +6,7 @@
* Copyright (C) 2008-2009 Eric Piel
*/
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/regulator/consumer.h>
#include <linux/miscdevice.h>
@@ -281,7 +281,7 @@ struct lis3lv02d {
* (1/1000th of earth gravity)
*/
- struct input_polled_dev *idev; /* input device */
+ struct input_dev *idev; /* input device */
struct platform_device *pdev; /* platform device */
struct regulator_bulk_data regulators[2];
atomic_t count; /* interrupt count after last read */
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 7284a22b1a09..a4fdad04809a 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -12,6 +12,10 @@
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
+#ifdef CONFIG_X86_32
+#include <asm/desc.h>
+#endif
+
struct lkdtm_list {
struct list_head node;
};
@@ -337,3 +341,38 @@ void lkdtm_UNSET_SMEP(void)
pr_err("FAIL: this test is x86_64-only\n");
#endif
}
+
+#ifdef CONFIG_X86_32
+void lkdtm_DOUBLE_FAULT(void)
+{
+ /*
+ * Trigger #DF by setting the stack limit to zero. This clobbers
+ * a GDT TLS slot, which is okay because the current task will die
+ * anyway due to the double fault.
+ */
+ struct desc_struct d = {
+ .type = 3, /* expand-up, writable, accessed data */
+ .p = 1, /* present */
+ .d = 1, /* 32-bit */
+ .g = 0, /* limit in bytes */
+ .s = 1, /* not system */
+ };
+
+ local_irq_disable();
+ write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
+ GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
+
+ /*
+ * Put our zero-limit segment in SS and then trigger a fault. The
+ * 4-byte access to (%esp) will fault with #SS, and the attempt to
+ * deliver the fault will recursively cause #SS and result in #DF.
+ * This whole process happens while NMIs and MCEs are blocked by the
+ * MOV SS window. This is nice because an NMI with an invalid SS
+ * would also double-fault, resulting in the NMI or MCE being lost.
+ */
+ asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
+ "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
+
+ panic("tried to double fault but didn't die\n");
+}
+#endif
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index cbc4c9045a99..ee0d6e721441 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -171,6 +171,9 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_KERNEL_DS),
CRASHTYPE(STACKLEAK_ERASING),
CRASHTYPE(CFI_FORWARD_PROTO),
+#ifdef CONFIG_X86_32
+ CRASHTYPE(DOUBLE_FAULT),
+#endif
};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index ab446e0bde97..c56d23e37643 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -28,6 +28,9 @@ void lkdtm_CORRUPT_USER_DS(void);
void lkdtm_STACK_GUARD_PAGE_LEADING(void);
void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
void lkdtm_UNSET_SMEP(void);
+#ifdef CONFIG_X86_32
+void lkdtm_DOUBLE_FAULT(void);
+#endif
/* lkdtm_heap.c */
void __init lkdtm_heap_init(void);
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 0a2b99e1af45..9ad9c01ddf41 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -46,8 +46,6 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
*/
static void number_of_connections(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
if (cldev->me_cl->props.max_number_of_connections > 1)
cldev->do_match = 0;
}
@@ -59,8 +57,6 @@ static void number_of_connections(struct mei_cl_device *cldev)
*/
static void blacklist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
cldev->do_match = 0;
}
@@ -71,8 +67,6 @@ static void blacklist(struct mei_cl_device *cldev)
*/
static void whitelist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
cldev->do_match = 1;
}
@@ -256,7 +250,6 @@ static void mei_wd(struct mei_cl_device *cldev)
{
struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
if (pdev->device == MEI_DEV_ID_WPT_LP ||
pdev->device == MEI_DEV_ID_SPT ||
pdev->device == MEI_DEV_ID_SPT_H)
@@ -410,8 +403,6 @@ static void mei_nfc(struct mei_cl_device *cldev)
bus = cldev->bus;
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
mutex_lock(&bus->device_lock);
/* we need to connect to INFO GUID */
cl = mei_cl_alloc_linked(bus);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 985bd4fd3328..a0a495c95e3c 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -791,11 +791,44 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%d", maxconn);
+}
+static DEVICE_ATTR_RO(max_conn);
+
+static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u8 fixed = mei_me_cl_fixed(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%d", fixed);
+}
+static DEVICE_ATTR_RO(fixed);
+
+static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%u", maxlen);
+}
+static DEVICE_ATTR_RO(max_len);
+
static struct attribute *mei_cldev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_uuid.attr,
&dev_attr_version.attr,
&dev_attr_modalias.attr,
+ &dev_attr_max_conn.attr,
+ &dev_attr_fixed.attr,
+ &dev_attr_max_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(mei_cldev);
@@ -873,15 +906,16 @@ static const struct device_type mei_cl_device_type = {
/**
* mei_cl_bus_set_name - set device name for me client device
+ * <controller>-<client device>
+ * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
*
* @cldev: me client device
*/
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
{
- dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X",
- cldev->name,
- mei_me_cl_uuid(cldev->me_cl),
- mei_me_cl_ver(cldev->me_cl));
+ dev_set_name(&cldev->dev, "%s-%pUl",
+ dev_name(cldev->bus->dev),
+ mei_me_cl_uuid(cldev->me_cl));
}
/**
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index c1f9e810cf81..2f8954def591 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -69,6 +69,42 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
return me_cl->props.protocol_version;
}
+/**
+ * mei_me_cl_max_conn - return me client max number of connections
+ *
+ * @me_cl: me client
+ *
+ * Return: me client max number of connections
+ */
+static inline u8 mei_me_cl_max_conn(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.max_number_of_connections;
+}
+
+/**
+ * mei_me_cl_fixed - return me client fixed address, if any
+ *
+ * @me_cl: me client
+ *
+ * Return: me client fixed address
+ */
+static inline u8 mei_me_cl_fixed(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.fixed_address;
+}
+
+/**
+ * mei_me_cl_max_len - return me client max msg length
+ *
+ * @me_cl: me client
+ *
+ * Return: me client max msg length
+ */
+static inline u32 mei_me_cl_max_len(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.max_msg_length;
+}
+
/*
* MEI IO Functions
*/
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index c681f6fab342..93027fd96c71 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -27,18 +27,6 @@
#include "mei_hdcp.h"
-static inline u8 mei_get_ddi_index(enum port port)
-{
- switch (port) {
- case PORT_A:
- return MEI_DDI_A;
- case PORT_B ... PORT_F:
- return (u8)port;
- default:
- return MEI_DDI_INVALID_PORT;
- }
-}
-
/**
* mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW
* @dev: device corresponding to the mei_cl_device
@@ -69,7 +57,8 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
session_init_in.port.integrated_port_type = data->port_type;
- session_init_in.port.physical_port = mei_get_ddi_index(data->port);
+ session_init_in.port.physical_port = (u8)data->fw_ddi;
+ session_init_in.port.attached_transcoder = (u8)data->fw_tc;
session_init_in.protocol = data->protocol;
byte = mei_cldev_send(cldev, (u8 *)&session_init_in,
@@ -138,7 +127,8 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
verify_rxcert_in.port.integrated_port_type = data->port_type;
- verify_rxcert_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_rxcert_in.port.physical_port = (u8)data->fw_ddi;
+ verify_rxcert_in.port.attached_transcoder = (u8)data->fw_tc;
verify_rxcert_in.cert_rx = rx_cert->cert_rx;
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
@@ -208,7 +198,8 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
send_hprime_in.port.integrated_port_type = data->port_type;
- send_hprime_in.port.physical_port = mei_get_ddi_index(data->port);
+ send_hprime_in.port.physical_port = (u8)data->fw_ddi;
+ send_hprime_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
@@ -265,7 +256,8 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
pairing_info_in.port.integrated_port_type = data->port_type;
- pairing_info_in.port.physical_port = mei_get_ddi_index(data->port);
+ pairing_info_in.port.physical_port = (u8)data->fw_ddi;
+ pairing_info_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
@@ -323,7 +315,8 @@ mei_hdcp_initiate_locality_check(struct device *dev,
lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
lc_init_in.port.integrated_port_type = data->port_type;
- lc_init_in.port.physical_port = mei_get_ddi_index(data->port);
+ lc_init_in.port.physical_port = (u8)data->fw_ddi;
+ lc_init_in.port.attached_transcoder = (u8)data->fw_tc;
byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in));
if (byte < 0) {
@@ -378,7 +371,8 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
verify_lprime_in.port.integrated_port_type = data->port_type;
- verify_lprime_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_lprime_in.port.physical_port = (u8)data->fw_ddi;
+ verify_lprime_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
@@ -435,7 +429,8 @@ static int mei_hdcp_get_session_key(struct device *dev,
get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
get_skey_in.port.integrated_port_type = data->port_type;
- get_skey_in.port.physical_port = mei_get_ddi_index(data->port);
+ get_skey_in.port.physical_port = (u8)data->fw_ddi;
+ get_skey_in.port.attached_transcoder = (u8)data->fw_tc;
byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in));
if (byte < 0) {
@@ -499,7 +494,8 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
verify_repeater_in.port.integrated_port_type = data->port_type;
- verify_repeater_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_repeater_in.port.physical_port = (u8)data->fw_ddi;
+ verify_repeater_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
HDCP_2_2_RXINFO_LEN);
@@ -569,7 +565,8 @@ static int mei_hdcp_verify_mprime(struct device *dev,
WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
verify_mprime_in.port.integrated_port_type = data->port_type;
- verify_mprime_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_mprime_in.port.physical_port = (u8)data->fw_ddi;
+ verify_mprime_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(verify_mprime_in.m_prime, stream_ready->m_prime,
HDCP_2_2_MPRIME_LEN);
@@ -630,7 +627,8 @@ static int mei_hdcp_enable_authentication(struct device *dev,
enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
enable_auth_in.port.integrated_port_type = data->port_type;
- enable_auth_in.port.physical_port = mei_get_ddi_index(data->port);
+ enable_auth_in.port.physical_port = (u8)data->fw_ddi;
+ enable_auth_in.port.attached_transcoder = (u8)data->fw_tc;
enable_auth_in.stream_type = data->streams[0].stream_type;
byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in,
@@ -684,7 +682,8 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
session_close_in.port.integrated_port_type = data->port_type;
- session_close_in.port.physical_port = mei_get_ddi_index(data->port);
+ session_close_in.port.physical_port = (u8)data->fw_ddi;
+ session_close_in.port.attached_transcoder = (u8)data->fw_tc;
byte = mei_cldev_send(cldev, (u8 *)&session_close_in,
sizeof(session_close_in));
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
index e4b1cd54c853..18ffc773fa18 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.h
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -184,8 +184,11 @@ struct hdcp_cmd_no_data {
/* Uniquely identifies the hdcp port being addressed for a given command. */
struct hdcp_port_id {
u8 integrated_port_type;
+ /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */
u8 physical_port;
- u16 reserved;
+ /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */
+ u8 attached_transcoder;
+ u8 reserved;
} __packed;
/*
@@ -362,16 +365,4 @@ struct wired_cmd_repeater_auth_stream_req_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
-
-enum mei_fw_ddi {
- MEI_DDI_INVALID_PORT = 0x0,
-
- MEI_DDI_B = 1,
- MEI_DDI_C,
- MEI_DDI_D,
- MEI_DDI_E,
- MEI_DDI_F,
- MEI_DDI_A = 7,
- MEI_DDI_RANGE_END = MEI_DDI_A,
-};
#endif /* __MEI_HDCP_H__ */
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c09f8bb49495..7cd67fb2365d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,6 +81,7 @@
#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
+#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
@@ -162,7 +163,8 @@ access to ME_CBD */
#define ME_IS_HRA 0x00000002
/* ME Interrupt Enable HRA - host read only access to ME_IE */
#define ME_IE_HRA 0x00000001
-
+/* TRC control shadow register */
+#define ME_TRC 0x00000030
/* H_HPG_CSR register bits */
#define H_HPG_CSR_PGIHEXR 0x00000001
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index c4f6991d3028..668418d7ea77 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -173,6 +173,27 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
}
/**
+ * mei_me_trc_status - read trc status register
+ *
+ * @dev: mei device
+ * @trc: trc status register value
+ *
+ * Return: 0 on success, error otherwise
+ */
+static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (!hw->cfg->hw_trc_supported)
+ return -EOPNOTSUPP;
+
+ *trc = mei_me_reg_read(hw, ME_TRC);
+ trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
+
+ return 0;
+}
+
+/**
* mei_me_fw_status - read fw status register from pci config space
*
* @dev: mei device
@@ -183,20 +204,19 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
static int mei_me_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
struct mei_me_hw *hw = to_me_hw(dev);
const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
int ret;
int i;
- if (!fw_status)
+ if (!fw_status || !hw->read_fws)
return -EINVAL;
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
- ret = pci_read_config_dword(pdev, fw_src->status[i],
- &fw_status->status[i]);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ ret = hw->read_fws(dev, fw_src->status[i],
+ &fw_status->status[i]);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
@@ -210,19 +230,26 @@ static int mei_me_fw_status(struct mei_device *dev,
* mei_me_hw_config - configure hw dependent settings
*
* @dev: mei device
+ *
+ * Return:
+ * * -EINVAL when read_fws is not set
+ * * 0 on success
+ *
*/
-static void mei_me_hw_config(struct mei_device *dev)
+static int mei_me_hw_config(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr, reg;
+ if (WARN_ON(!hw->read_fws))
+ return -EINVAL;
+
/* Doesn't change in runtime */
hcsr = mei_hcsr_read(dev);
hw->hbuf_depth = (hcsr & H_CBD) >> 24;
reg = 0;
- pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
hw->d0i3_supported =
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
@@ -233,6 +260,8 @@ static void mei_me_hw_config(struct mei_device *dev)
if (reg & H_D0I3C_I3)
hw->pg_state = MEI_PG_ON;
}
+
+ return 0;
}
/**
@@ -269,7 +298,7 @@ static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
}
/**
- * mei_me_intr_clear - clear and stop interrupts
+ * me_intr_clear - clear and stop interrupts
*
* @dev: the device structure
* @hcsr: supplied hcsr register value
@@ -323,9 +352,9 @@ static void mei_me_intr_disable(struct mei_device *dev)
*/
static void mei_me_synchronize_irq(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct mei_me_hw *hw = to_me_hw(dev);
- synchronize_irq(pdev->irq);
+ synchronize_irq(hw->irq);
}
/**
@@ -1294,6 +1323,7 @@ end:
static const struct mei_hw_ops mei_me_hw_ops = {
+ .trc_status = mei_me_trc_status,
.fw_status = mei_me_fw_status,
.pg_state = mei_me_pg_state,
@@ -1384,6 +1414,9 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
.dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
.dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
+#define MEI_CFG_TRC \
+ .hw_trc_supported = 1
+
/* ICH Legacy devices */
static const struct mei_cfg mei_me_ich_cfg = {
MEI_CFG_ICH_HFS,
@@ -1432,6 +1465,14 @@ static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_DMA_128,
};
+/* Tiger Lake and newer devices */
+static const struct mei_cfg mei_me_pch15_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_TRC,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1446,6 +1487,7 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+ [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
@@ -1461,19 +1503,19 @@ const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
/**
* mei_me_dev_init - allocates and initializes the mei device structure
*
- * @pdev: The pci device structure
+ * @parent: device associated with physical device (pci/platform)
* @cfg: per device generation config
*
* Return: The mei_device pointer on success, NULL on failure.
*/
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+struct mei_device *mei_me_dev_init(struct device *parent,
const struct mei_cfg *cfg)
{
struct mei_device *dev;
struct mei_me_hw *hw;
int i;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
+ dev = devm_kzalloc(parent, sizeof(struct mei_device) +
sizeof(struct mei_me_hw), GFP_KERNEL);
if (!dev)
return NULL;
@@ -1483,7 +1525,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
for (i = 0; i < DMA_DSCR_NUM; i++)
dev->dr_dscr[i].size = cfg->dma_size[i];
- mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
+ mei_device_init(dev, parent, &mei_me_hw_ops);
hw->cfg = cfg;
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 1d8794828cbc..4a8d4dcd5a91 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -21,12 +21,14 @@
* @quirk_probe: device exclusion quirk
* @dma_size: device DMA buffers size
* @fw_ver_supported: is fw version retrievable from FW
+ * @hw_trc_supported: does the hw support trc register
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
bool (*quirk_probe)(struct pci_dev *pdev);
size_t dma_size[DMA_DSCR_NUM];
u32 fw_ver_supported:1;
+ u32 hw_trc_supported:1;
};
@@ -42,16 +44,20 @@ struct mei_cfg {
*
* @cfg: per device generation config and ops
* @mem_addr: io memory address
+ * @irq: irq number
* @pg_state: power gating state
* @d0i3_supported: di03 support
* @hbuf_depth: depth of hardware host/write buffer in slots
+ * @read_fws: read FW status register handler
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
+ int irq;
enum mei_pg_state pg_state;
bool d0i3_supported;
u8 hbuf_depth;
+ int (*read_fws)(const struct mei_device *dev, int where, u32 *val);
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
@@ -74,6 +80,7 @@ struct mei_me_hw {
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
@@ -86,12 +93,13 @@ enum mei_cfg_idx {
MEI_ME_PCH8_CFG,
MEI_ME_PCH8_SPS_CFG,
MEI_ME_PCH12_CFG,
+ MEI_ME_PCH15_CFG,
MEI_ME_NUM_CFG,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx);
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+struct mei_device *mei_me_dev_init(struct device *parent,
const struct mei_cfg *cfg);
int mei_me_pg_enter_sync(struct mei_device *dev);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 5e58656b8e19..785b260b3ae9 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -660,14 +660,16 @@ static int mei_txe_fw_status(struct mei_device *dev,
}
/**
- * mei_txe_hw_config - configure hardware at the start of the devices
+ * mei_txe_hw_config - configure hardware at the start of the devices
*
* @dev: the device structure
*
* Configure hardware at the start of the device should be done only
* once at the device probe time
+ *
+ * Return: always 0
*/
-static void mei_txe_hw_config(struct mei_device *dev)
+static int mei_txe_hw_config(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
@@ -677,6 +679,8 @@ static void mei_txe_hw_config(struct mei_device *dev)
dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
hw->aliveness, hw->readiness);
+
+ return 0;
}
/**
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index b9fef773e71b..bcee77768b91 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -190,7 +190,9 @@ int mei_start(struct mei_device *dev)
/* acknowledge interrupt and stop interrupts */
mei_clear_interrupts(dev);
- mei_hw_config(dev);
+ ret = mei_hw_config(dev);
+ if (ret)
+ goto err;
dev_dbg(dev->dev, "reset in start the mei device.\n");
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 7310b476323c..f17297f2943d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -533,24 +533,6 @@ out:
}
/**
- * mei_compat_ioctl - the compat IOCTL function
- *
- * @file: pointer to file structure
- * @cmd: ioctl command
- * @data: pointer to mei message structure
- *
- * Return: 0 on success , <0 on error
- */
-#ifdef CONFIG_COMPAT
-static long mei_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long data)
-{
- return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
-}
-#endif
-
-
-/**
* mei_poll - the poll function
*
* @file: pointer to file structure
@@ -701,6 +683,29 @@ static int mei_fasync(int fd, struct file *file, int band)
}
/**
+ * trc_show - mei device trc attribute show method
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t trc_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ u32 trc;
+ int ret;
+
+ ret = mei_trc_status(dev, &trc);
+ if (ret)
+ return ret;
+ return sprintf(buf, "%08X\n", trc);
+}
+static DEVICE_ATTR_RO(trc);
+
+/**
* fw_status_show - mei device fw_status attribute show method
*
* @device: device pointer
@@ -887,6 +892,7 @@ static struct attribute *mei_attrs[] = {
&dev_attr_tx_queue_limit.attr,
&dev_attr_fw_ver.attr,
&dev_attr_dev_state.attr,
+ &dev_attr_trc.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
@@ -898,9 +904,7 @@ static const struct file_operations mei_fops = {
.owner = THIS_MODULE,
.read = mei_read,
.unlocked_ioctl = mei_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mei_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = mei_open,
.release = mei_release,
.write = mei_write,
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 0f2141178299..76f8ff5ff974 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -260,6 +260,7 @@ struct mei_cl {
* @hw_config : configure hw
*
* @fw_status : get fw status registers
+ * @trc_status : get trc status register
* @pg_state : power gating state of the device
* @pg_in_transition : is device now in pg transition
* @pg_is_enabled : is power gating enabled
@@ -287,9 +288,11 @@ struct mei_hw_ops {
bool (*hw_is_ready)(struct mei_device *dev);
int (*hw_reset)(struct mei_device *dev, bool enable);
int (*hw_start)(struct mei_device *dev);
- void (*hw_config)(struct mei_device *dev);
+ int (*hw_config)(struct mei_device *dev);
int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
+ int (*trc_status)(struct mei_device *dev, u32 *trc);
+
enum mei_pg_state (*pg_state)(struct mei_device *dev);
bool (*pg_in_transition)(struct mei_device *dev);
bool (*pg_is_enabled)(struct mei_device *dev);
@@ -614,9 +617,9 @@ void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list);
*/
-static inline void mei_hw_config(struct mei_device *dev)
+static inline int mei_hw_config(struct mei_device *dev)
{
- dev->ops->hw_config(dev);
+ return dev->ops->hw_config(dev);
}
static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
@@ -711,6 +714,13 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)
return dev->ops->rdbuf_full_slots(dev);
}
+static inline int mei_trc_status(struct mei_device *dev, u32 *trc)
+{
+ if (dev->ops->trc_status)
+ return dev->ops->trc_status(dev, trc);
+ return -EOPNOTSUPP;
+}
+
static inline int mei_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3dca63eddaa0..c845b7e40f26 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,12 +98,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
/* required last entry */
@@ -120,6 +121,13 @@ static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
#endif /* CONFIG_PM */
+static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ return pci_read_config_dword(pdev, where, val);
+}
+
/**
* mei_me_quirk_probe - probe for devices that doesn't valid ME interface
*
@@ -191,13 +199,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* allocates and initializes the mei dev structure */
- dev = mei_me_dev_init(pdev, cfg);
+ dev = mei_me_dev_init(&pdev->dev, cfg);
if (!dev) {
err = -ENOMEM;
goto end;
}
hw = to_me_hw(dev);
hw->mem_addr = pcim_iomap_table(pdev)[0];
+ hw->irq = pdev->irq;
+ hw->read_fws = mei_me_read_fws;
pci_enable_msi(pdev);
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 948f45bbf135..b6841ba6d922 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Intel MIC & related support"
-comment "Intel MIC Bus Driver"
-
config INTEL_MIC_BUS
tristate "Intel MIC Bus Driver"
depends on 64BIT && PCI && X86
@@ -18,8 +16,6 @@ config INTEL_MIC_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "SCIF Bus Driver"
-
config SCIF_BUS
tristate "SCIF Bus Driver"
depends on 64BIT && PCI && X86
@@ -35,8 +31,6 @@ config SCIF_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "VOP Bus Driver"
-
config VOP_BUS
tristate "VOP Bus Driver"
help
@@ -51,8 +45,6 @@ config VOP_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Host Driver"
-
config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
depends on 64BIT && PCI && X86
@@ -71,8 +63,6 @@ config INTEL_MIC_HOST
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Card Driver"
-
config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
depends on 64BIT && X86
@@ -90,8 +80,6 @@ config INTEL_MIC_CARD
For more information see
<http://software.intel.com/en-us/mic-developer>.
-comment "SCIF Driver"
-
config SCIF
tristate "SCIF Driver"
depends on 64BIT && PCI && X86 && SCIF_BUS && IOMMU_SUPPORT
@@ -110,8 +98,6 @@ config SCIF
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Coprocessor State Management (COSM) Drivers"
-
config MIC_COSM
tristate "Intel MIC Coprocessor State Management (COSM) Drivers"
depends on 64BIT && PCI && X86 && SCIF
@@ -128,8 +114,6 @@ config MIC_COSM
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "VOP Driver"
-
config VOP
tristate "VOP Driver"
depends on VOP_BUS
diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
index 97415afd79f3..345bf843a38e 100644
--- a/drivers/misc/ocxl/ocxl_internal.h
+++ b/drivers/misc/ocxl/ocxl_internal.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright 2017 IBM Corp.
#ifndef _OCXL_INTERNAL_H_
#define _OCXL_INTERNAL_H_
diff --git a/drivers/misc/ocxl/trace.h b/drivers/misc/ocxl/trace.h
index 024f417e7e01..17e21cb2addd 100644
--- a/drivers/misc/ocxl/trace.h
+++ b/drivers/misc/ocxl/trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright 2017 IBM Corp.
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ocxl
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 6e208a060a58..a5e317073d95 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -94,7 +94,7 @@ enum pci_barno {
struct pci_endpoint_test {
struct pci_dev *pdev;
void __iomem *base;
- void __iomem *bar[6];
+ void __iomem *bar[PCI_STD_NUM_BARS];
struct completion irq_raised;
int last_irq;
int num_irqs;
@@ -687,7 +687,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
if (!pci_endpoint_test_request_irq(test))
goto err_disable_irq;
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
base = pci_ioremap_bar(pdev, bar);
if (!base) {
@@ -740,7 +740,7 @@ err_ida_remove:
ida_simple_remove(&pci_endpoint_test_ida, id);
err_iounmap:
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
@@ -771,7 +771,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
misc_deregister(&test->miscdev);
kfree(misc_device->name);
ida_simple_remove(&pci_endpoint_test_ida, id);
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 3a8d76d1ccae..2817f4751306 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -119,7 +119,7 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
"cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
"tfh_write_restart", "tgh_invalidate"};
- seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+ seq_puts(s, "#id count aver-clks max-clks\n");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
@@ -165,8 +165,7 @@ static int cch_seq_show(struct seq_file *file, void *data)
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
- seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
- "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
+ seq_puts(file, "# gid bid ctx# asid pid cbrs dsbytes mode\n");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
@@ -191,10 +190,8 @@ static int gru_seq_show(struct seq_file *file, void *data)
struct gru_state *gru = GID_TO_GRU(gid);
if (gid == 0) {
- seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "gid", "nid",
- "ctx", "cbr", "dsr", "ctx", "cbr", "dsr");
- seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "", "", "busy",
- "busy", "busy", "free", "free", "free");
+ seq_puts(file, "# gid nid ctx cbr dsr ctx cbr dsr\n");
+ seq_puts(file, "# busy busy busy free free free\n");
}
if (gru) {
ctxfree = GRU_NUM_CCH - gru->gs_active_contexts;
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index f30448bf3a63..6c1a23cb3e8c 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -340,8 +340,6 @@ static const struct of_device_id sram_dt_ids[] = {
static int sram_probe(struct platform_device *pdev)
{
struct sram_dev *sram;
- struct resource *res;
- size_t size;
int ret;
int (*init_func)(void);
@@ -351,25 +349,14 @@ static int sram_probe(struct platform_device *pdev)
sram->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(sram->dev, "found no memory resource\n");
- return -EINVAL;
- }
-
- size = resource_size(res);
-
- if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
- dev_err(sram->dev, "could not request region for resource\n");
- return -EBUSY;
- }
-
if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
- sram->virt_base = devm_ioremap(sram->dev, res->start, size);
+ sram->virt_base = devm_platform_ioremap_resource(pdev, 0);
else
- sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
- if (!sram->virt_base)
- return -ENOMEM;
+ sram->virt_base = devm_platform_ioremap_resource_wc(pdev, 0);
+ if (IS_ERR(sram->virt_base)) {
+ dev_err(&pdev->dev, "could not map SRAM registers\n");
+ return PTR_ERR(sram->virt_base);
+ }
sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
NUMA_NO_NODE, NULL);
@@ -382,7 +369,8 @@ static int sram_probe(struct platform_device *pdev)
else
clk_prepare_enable(sram->clk);
- ret = sram_reserve_regions(sram, res);
+ ret = sram_reserve_regions(sram,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (ret)
goto err_disable_clk;
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 7d9e23aa0b92..2ae9948a91e1 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -708,7 +708,6 @@ EXPORT_SYMBOL_GPL(st_unregister);
*/
static int st_tty_open(struct tty_struct *tty)
{
- int err = 0;
struct st_data_s *st_gdata;
pr_info("%s ", __func__);
@@ -731,7 +730,8 @@ static int st_tty_open(struct tty_struct *tty)
*/
st_kim_complete(st_gdata->kim_data);
pr_debug("done %s", __func__);
- return err;
+
+ return 0;
}
static void st_tty_close(struct tty_struct *tty)
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index ff3c396146ff..ce16d6b99295 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -968,7 +968,7 @@ static const struct file_operations vmuser_fops = {
.release = vmci_host_close,
.poll = vmci_host_poll,
.unlocked_ioctl = vmci_host_unlocked_ioctl,
- .compat_ioctl = vmci_host_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice vmci_host_miscdev = {
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2c71a434c915..95b41c0891d0 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -408,38 +408,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
return 0;
}
-static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
- u32 retries_max)
-{
- int err;
- u32 retry_count = 0;
-
- if (!status || !retries_max)
- return -EINVAL;
-
- do {
- err = __mmc_send_status(card, status, 5);
- if (err)
- break;
-
- if (!R1_STATUS(*status) &&
- (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
- break; /* RPMB programming operation complete */
-
- /*
- * Rechedule to give the MMC device a chance to continue
- * processing the previous command without being polled too
- * frequently.
- */
- usleep_range(1000, 5000);
- } while (++retry_count < retries_max);
-
- if (retry_count == retries_max)
- err = -EPERM;
-
- return err;
-}
-
static int ioctl_do_sanitize(struct mmc_card *card)
{
int err;
@@ -468,6 +436,58 @@ out:
return err;
}
+static inline bool mmc_blk_in_tran_state(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+}
+
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+ u32 *resp_errs)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ int err = 0;
+ u32 status;
+
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ err = __mmc_send_status(card, &status, 5);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "error %d requesting status\n", err);
+ return err;
+ }
+
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
+
+ /*
+ * Timeout if the device never becomes ready for data and never
+ * leaves the program state.
+ */
+ if (done) {
+ dev_err(mmc_dev(card->host),
+ "Card stuck in wrong state! %s status: %#x\n",
+ __func__, status);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!mmc_blk_in_tran_state(status));
+
+ return err;
+}
+
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
@@ -477,7 +497,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct scatterlist sg;
int err;
unsigned int target_part;
- u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
@@ -611,16 +630,12 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (idata->rpmb) {
+ if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
/*
- * Ensure RPMB command has completed by polling CMD13
+ * Ensure RPMB/R1B command has completed by polling CMD13
* "Send Status".
*/
- err = ioctl_rpmb_card_status_poll(card, &status, 5);
- if (err)
- dev_err(mmc_dev(card->host),
- "%s: Card Status=0x%08X, error %d\n",
- __func__, status, err);
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
}
return err;
@@ -970,58 +985,6 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
return ms;
}
-static inline bool mmc_blk_in_tran_state(u32 status)
-{
- /*
- * Some cards mishandle the status bits, so make sure to check both the
- * busy indication and the card state.
- */
- return status & R1_READY_FOR_DATA &&
- (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
-}
-
-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, u32 *resp_errs)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
- int err = 0;
- u32 status;
-
- do {
- bool done = time_after(jiffies, timeout);
-
- err = __mmc_send_status(card, &status, 5);
- if (err) {
- pr_err("%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- return err;
- }
-
- /* Accumulate any response error bits seen */
- if (resp_errs)
- *resp_errs |= status;
-
- /*
- * Timeout if the device never becomes ready for data and never
- * leaves the program state.
- */
- if (done) {
- pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
- mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__, status);
- return -ETIMEDOUT;
- }
-
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!mmc_blk_in_tran_state(status));
-
- return err;
-}
-
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
@@ -1671,7 +1634,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
mmc_blk_send_stop(card, timeout);
- err = card_busy_detect(card, timeout, req, NULL);
+ err = card_busy_detect(card, timeout, NULL);
mmc_retune_release(card->host);
@@ -1895,7 +1858,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
return 0;
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
/*
* Do not assume data transferred correctly if there are any error bits
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 221127324709..abf8f5eb0a1c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1469,8 +1469,7 @@ void mmc_detach_bus(struct mmc_host *host)
mmc_bus_put(host);
}
-static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
- bool cd_irq)
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
{
/*
* If the device is configured as wakeup, we prevent a new sleep for
@@ -2129,7 +2128,7 @@ int mmc_hw_reset(struct mmc_host *host)
ret = host->bus_ops->hw_reset(host);
mmc_bus_put(host);
- if (ret)
+ if (ret < 0)
pr_warn("%s: tried to HW reset card, got error %d\n",
mmc_hostname(host), ret);
@@ -2297,11 +2296,8 @@ void mmc_rescan(struct work_struct *work)
mmc_bus_get(host);
- /*
- * if there is a _removable_ card registered, check whether it is
- * still present
- */
- if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
+ /* Verify a registered card to be functional, else remove it. */
+ if (host->bus_ops && !host->bus_dead)
host->bus_ops->detect(host);
host->detect_change = 0;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 328c78dbee66..575ac0257af2 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -70,6 +70,8 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
+ bool cd_irq);
int _mmc_detect_card_removed(struct mmc_host *host);
int mmc_detect_card_removed(struct mmc_host *host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c8804895595f..f6912ded652d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -297,7 +297,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
}
}
-static void mmc_part_add(struct mmc_card *card, unsigned int size,
+static void mmc_part_add(struct mmc_card *card, u64 size,
unsigned int part_cfg, char *name, int idx, bool ro,
int area_type)
{
@@ -313,7 +313,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
{
int idx;
u8 hc_erase_grp_sz, hc_wp_grp_sz;
- unsigned int part_size;
+ u64 part_size;
/*
* General purpose partition feature support --
@@ -343,8 +343,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
<< 8) +
ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
- part_size *= (size_t)(hc_erase_grp_sz *
- hc_wp_grp_sz);
+ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
mmc_part_add(card, part_size << 19,
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
"gp%d", idx, false,
@@ -362,7 +361,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
int err = 0, idx;
- unsigned int part_size;
+ u64 part_size;
struct device_node *np;
bool broken_hpi = false;
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 2d2d9ea8be4f..3dba15bccce2 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -119,7 +119,14 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
END_FIXUP
};
+
static const struct mmc_fixup sdio_fixup_methods[] = {
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_DISABLE_CD),
+
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 26cabd53ddc5..ebb387aa5158 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1048,9 +1048,35 @@ static int mmc_sdio_runtime_resume(struct mmc_host *host)
return ret;
}
+/*
+ * SDIO HW reset
+ *
+ * Returns 0 if the HW reset was executed synchronously, returns 1 if the HW
+ * reset was asynchronously scheduled, else a negative error code.
+ */
static int mmc_sdio_hw_reset(struct mmc_host *host)
{
- mmc_power_cycle(host, host->card->ocr);
+ struct mmc_card *card = host->card;
+
+ /*
+ * In case the card is shared among multiple func drivers, reset the
+ * card through a rescan work. In this way it will be removed and
+ * re-detected, thus all func drivers becomes informed about it.
+ */
+ if (atomic_read(&card->sdio_funcs_probed) > 1) {
+ if (mmc_card_removed(card))
+ return 1;
+ host->rescan_entered = 0;
+ mmc_card_set_removed(card);
+ _mmc_detect_change(host, 0, false);
+ return 1;
+ }
+
+ /*
+ * A single func driver has been probed, then let's skip the heavy
+ * hotplug dance above and execute the reset immediately.
+ */
+ mmc_power_cycle(host, card->ocr);
return mmc_sdio_reinit_card(host);
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 2963e6542958..3cc928282af7 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -138,6 +138,8 @@ static int sdio_bus_probe(struct device *dev)
if (ret)
return ret;
+ atomic_inc(&func->card->sdio_funcs_probed);
+
/* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
@@ -153,7 +155,10 @@ static int sdio_bus_probe(struct device *dev)
/* Set the default block size so the driver is sure it's something
* sensible. */
sdio_claim_host(func);
- ret = sdio_set_block_size(func, 0);
+ if (mmc_card_removed(func->card))
+ ret = -ENOMEDIUM;
+ else
+ ret = sdio_set_block_size(func, 0);
sdio_release_host(func);
if (ret)
goto disable_runtimepm;
@@ -165,6 +170,7 @@ static int sdio_bus_probe(struct device *dev)
return 0;
disable_runtimepm:
+ atomic_dec(&func->card->sdio_funcs_probed);
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
dev_pm_domain_detach(dev, false);
@@ -181,6 +187,7 @@ static int sdio_bus_remove(struct device *dev)
pm_runtime_get_sync(dev);
drv->remove(func);
+ atomic_dec(&func->card->sdio_funcs_probed);
if (func->irq_handler) {
pr_warn("WARNING: driver %s did not remove its interrupt handler!\n",
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 49ea02c467bf..d06b2dfe3c95 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -159,6 +159,7 @@ config MMC_SDHCI_OF_ASPEED
tristate "SDHCI OF support for the ASPEED SDHCI controller"
depends on MMC_SDHCI_PLTFM
depends on OF && OF_ADDRESS
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the ASPEED Secure Digital Host Controller Interface.
@@ -368,6 +369,17 @@ config MMC_SDHCI_F_SDH30
If unsure, say N.
+config MMC_SDHCI_MILBEAUT
+ tristate "SDHCI support for Socionext Milbeaut Serieas using F_SDH30"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ Needed by Milbeaut SoC for MMC / SD / SDIO support.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_IPROC
tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
@@ -1011,6 +1023,7 @@ config MMC_SDHCI_AM654
tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's AM654 SOCs. The controller supports
@@ -1019,3 +1032,11 @@ config MMC_SDHCI_AM654
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config MMC_OWL
+ tristate "Actions Semi Owl SD/MMC Host Controller support"
+ depends on HAS_DMA
+ depends on ARCH_ACTIONS || COMPILE_TEST
+ help
+ This selects support for the SD/MMC Host Controller on
+ Actions Semi Owl SoCs.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 11c4598e91d9..21d9089e5eda 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o
+obj-$(CONFIG_MMC_SDHCI_MILBEAUT) += sdhci-milbeaut.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_SDHCI_AM654) += sdhci_am654.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -73,6 +74,7 @@ obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o
obj-$(CONFIG_MMC_BCM2835) += bcm2835.o
+obj-$(CONFIG_MMC_OWL) += owl-mmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index c26fbe5f2222..6f065bb5c55a 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -583,11 +583,11 @@ static void atmci_init_debugfs(struct atmel_mci_slot *slot)
debugfs_create_file("regs", S_IRUSR, root, host, &atmci_regs_fops);
debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
- debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
- debugfs_create_x32("pending_events", S_IRUSR, root,
- (u32 *)&host->pending_events);
- debugfs_create_x32("completed_events", S_IRUSR, root,
- (u32 *)&host->completed_events);
+ debugfs_create_u32("state", S_IRUSR, root, &host->state);
+ debugfs_create_xul("pending_events", S_IRUSR, root,
+ &host->pending_events);
+ debugfs_create_xul("completed_events", S_IRUSR, root,
+ &host->completed_events);
}
#if defined(CONFIG_OF)
@@ -2347,8 +2347,7 @@ static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
static int atmci_configure_dma(struct atmel_mci *host)
{
- host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev,
- "rxtx");
+ host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx");
if (PTR_ERR(host->dma.chan) == -ENODEV) {
struct mci_platform_data *pdata = host->pdev->dev.platform_data;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 148414d7f0c9..99f61fd2a658 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1357,7 +1357,6 @@ static int bcm2835_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct clk *clk;
- struct resource *iomem;
struct bcm2835_host *host;
struct mmc_host *mmc;
const __be32 *regaddr_p;
@@ -1373,8 +1372,7 @@ static int bcm2835_probe(struct platform_device *pdev)
host->pdev = pdev;
spin_lock_init(&host->lock);
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(dev, iomem);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 22aded1065ae..916746c6c2c7 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -148,7 +148,6 @@ static int octeon_mmc_probe(struct platform_device *pdev)
{
struct device_node *cn, *node = pdev->dev.of_node;
struct cvm_mmc_host *host;
- struct resource *res;
void __iomem *base;
int mmc_irq[9];
int i, ret = 0;
@@ -205,23 +204,13 @@ static int octeon_mmc_probe(struct platform_device *pdev)
host->last_slot = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Platform resource[0] is missing\n");
- return -ENXIO;
- }
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
host->base = (void __iomem *)base;
host->reg_off = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "Platform resource[1] is missing\n");
- return -EINVAL;
- }
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base))
return PTR_ERR(base);
host->dma_base = (void __iomem *)base;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 79c55c7b4afd..fc9d4d000f97 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -176,11 +176,11 @@ static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
- debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
- debugfs_create_x32("pending_events", S_IRUSR, root,
- (u32 *)&host->pending_events);
- debugfs_create_x32("completed_events", S_IRUSR, root,
- (u32 *)&host->completed_events);
+ debugfs_create_u32("state", S_IRUSR, root, &host->state);
+ debugfs_create_xul("pending_events", S_IRUSR, root,
+ &host->pending_events);
+ debugfs_create_xul("completed_events", S_IRUSR, root,
+ &host->completed_events);
}
#endif /* defined(CONFIG_DEBUG_FS) */
@@ -3441,8 +3441,8 @@ int dw_mci_runtime_resume(struct device *dev)
* Restore the initial value at FIFOTH register
* And Invalidate the prev_blksz with zero
*/
- mci_writel(host, FIFOTH, host->fifoth_val);
- host->prev_blksz = 0;
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index f816c06ef916..78383f60a3dc 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -41,6 +41,7 @@
#define JZ_REG_MMC_RESP_FIFO 0x34
#define JZ_REG_MMC_RXFIFO 0x38
#define JZ_REG_MMC_TXFIFO 0x3C
+#define JZ_REG_MMC_LPM 0x40
#define JZ_REG_MMC_DMAC 0x44
#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
@@ -77,6 +78,8 @@
#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
+#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
+#define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
#define JZ_MMC_CMDAT_DMA_EN BIT(8)
#define JZ_MMC_CMDAT_INIT BIT(7)
#define JZ_MMC_CMDAT_BUSY BIT(6)
@@ -98,12 +101,20 @@
#define JZ_MMC_DMAC_DMA_SEL BIT(1)
#define JZ_MMC_DMAC_DMA_EN BIT(0)
+#define JZ_MMC_LPM_DRV_RISING BIT(31)
+#define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
+#define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
+#define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
+#define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
+
#define JZ_MMC_CLK_RATE 24000000
enum jz4740_mmc_version {
JZ_MMC_JZ4740,
JZ_MMC_JZ4725B,
+ JZ_MMC_JZ4760,
JZ_MMC_JZ4780,
+ JZ_MMC_X1000,
};
enum jz4740_mmc_state {
@@ -852,6 +863,22 @@ static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
}
writew(div, host->base + JZ_REG_MMC_CLKRT);
+
+ if (real_rate > 25000000) {
+ if (host->version >= JZ_MMC_X1000) {
+ writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
+ JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
+ JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ } else if (host->version >= JZ_MMC_JZ4760) {
+ writel(JZ_MMC_LPM_DRV_RISING |
+ JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ } else if (host->version >= JZ_MMC_JZ4725B)
+ writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ }
+
return real_rate;
}
@@ -895,11 +922,16 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
- host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
break;
case MMC_BUS_WIDTH_4:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
break;
+ case MMC_BUS_WIDTH_8:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
+ host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
+ break;
default:
break;
}
@@ -924,7 +956,9 @@ static const struct mmc_host_ops jz4740_mmc_ops = {
static const struct of_device_id jz4740_mmc_of_match[] = {
{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
+ { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
+ { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
{},
};
MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
@@ -1025,11 +1059,12 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
goto err_release_dma;
}
- dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
+ dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
host->use_dma ? "DMA" : "PIO",
- (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+ (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
+ ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
return 0;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 66e354d51ee9..74c6cfbf9172 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1421,7 +1421,7 @@ static int mmc_spi_probe(struct spi_device *spi)
* Index 0 is card detect
* Old boardfiles were specifying 1 ms as debounce
*/
- status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1, NULL);
+ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status) {
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index c37e70dbe250..40e72c30ea84 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -44,6 +44,7 @@
#define DRIVER_NAME "mmci-pl18x"
static void mmci_variant_init(struct mmci_host *host);
+static void ux500_variant_init(struct mmci_host *host);
static void ux500v2_variant_init(struct mmci_host *host);
static unsigned int fmax = 515633;
@@ -184,7 +185,7 @@ static struct variant_data variant_ux500 = {
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
- .init = mmci_variant_init,
+ .init = ux500_variant_init,
};
static struct variant_data variant_ux500v2 = {
@@ -261,6 +262,10 @@ static struct variant_data variant_stm32_sdmmc = {
.datalength_bits = 25,
.datactrl_blocksz = 14,
.stm32_idmabsize_mask = GENMASK(12, 5),
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
.init = sdmmc_variant_init,
};
@@ -419,7 +424,7 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
mmci_write_clkreg(host, clk);
}
-void mmci_dma_release(struct mmci_host *host)
+static void mmci_dma_release(struct mmci_host *host)
{
if (host->ops && host->ops->dma_release)
host->ops->dma_release(host);
@@ -427,7 +432,7 @@ void mmci_dma_release(struct mmci_host *host)
host->use_dma = false;
}
-void mmci_dma_setup(struct mmci_host *host)
+static void mmci_dma_setup(struct mmci_host *host)
{
if (!host->ops || !host->ops->dma_setup)
return;
@@ -462,7 +467,7 @@ static int mmci_validate_data(struct mmci_host *host,
return 0;
}
-int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
+static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
{
int err;
@@ -478,7 +483,7 @@ int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
return err;
}
-void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
+static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
int err)
{
if (host->ops && host->ops->unprep_data)
@@ -487,7 +492,7 @@ void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
data->host_cookie = 0;
}
-void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
@@ -495,7 +500,7 @@ void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
host->ops->get_next_data(host, data);
}
-int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
+static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
{
struct mmc_data *data = host->data;
int ret;
@@ -530,7 +535,7 @@ int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
return 0;
}
-void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
{
if (!host->use_dma)
return;
@@ -539,7 +544,7 @@ void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
host->ops->dma_finalize(host, data);
}
-void mmci_dma_error(struct mmci_host *host)
+static void mmci_dma_error(struct mmci_host *host)
{
if (!host->use_dma)
return;
@@ -610,6 +615,67 @@ static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
}
+static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+
+ /*
+ * Before unmasking for the busy end IRQ, confirm that the
+ * command was sent successfully. To keep track of having a
+ * command in-progress, waiting for busy signaling to end,
+ * store the status in host->busy_status.
+ *
+ * Note that, the card may need a couple of clock cycles before
+ * it starts signaling busy on DAT0, hence re-read the
+ * MMCISTATUS register here, to allow the busy bit to be set.
+ * Potentially we may even need to poll the register for a
+ * while, to allow it to be set, but tests indicates that it
+ * isn't needed.
+ */
+ if (!host->busy_status && !(status & err_msk) &&
+ (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
+ writel(readl(base + MMCIMASK0) |
+ host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+
+ host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ return false;
+ }
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent, then bail out if busy status is set and wait for the
+ * busy end IRQ.
+ *
+ * Note that, the HW triggers an IRQ on both edges while
+ * monitoring DAT0 for busy completion, but there is only one
+ * status bit in MMCISTATUS for the busy state. Therefore
+ * both the start and the end interrupts needs to be cleared,
+ * one after the other. So, clear the busy start IRQ here.
+ */
+ if (host->busy_status &&
+ (status & host->variant->busy_detect_flag)) {
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ return false;
+ }
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent and the busy bit isn't set, it means we have received
+ * the busy end IRQ. Clear and mask the IRQ, then continue to
+ * process the command.
+ */
+ if (host->busy_status) {
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+
+ writel(readl(base + MMCIMASK0) &
+ ~host->variant->busy_detect_mask, base + MMCIMASK0);
+ host->busy_status = 0;
+ }
+
+ return true;
+}
+
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
@@ -948,14 +1014,21 @@ static struct mmci_host_ops mmci_variant_ops = {
};
#endif
-void mmci_variant_init(struct mmci_host *host)
+static void mmci_variant_init(struct mmci_host *host)
+{
+ host->ops = &mmci_variant_ops;
+}
+
+static void ux500_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
}
-void ux500v2_variant_init(struct mmci_host *host)
+static void ux500v2_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
}
@@ -1075,6 +1148,7 @@ static void
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
{
void __iomem *base = host->base;
+ unsigned long long clks;
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
cmd->opcode, cmd->arg, cmd->flags);
@@ -1097,6 +1171,16 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
else
c |= host->variant->cmdreg_srsp;
}
+
+ if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
+ if (!cmd->busy_timeout)
+ cmd->busy_timeout = 10 * MSEC_PER_SEC;
+
+ clks = (unsigned long long)cmd->busy_timeout * host->cclk;
+ do_div(clks, MSEC_PER_SEC);
+ writel_relaxed(clks, host->base + MMCIDATATIMER);
+ }
+
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
@@ -1201,6 +1285,7 @@ static void
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
+ u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
void __iomem *base = host->base;
bool sbc, busy_resp;
@@ -1215,74 +1300,17 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
* handling. Note that we tag on any latent IRQs postponed
* due to waiting for busy status.
*/
- if (!((status|host->busy_status) &
- (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
+ if (host->variant->busy_timeout && busy_resp)
+ err_msk |= MCI_DATATIMEOUT;
+
+ if (!((status | host->busy_status) &
+ (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
return;
/* Handle busy detection on DAT0 if the variant supports it. */
- if (busy_resp && host->variant->busy_detect) {
-
- /*
- * Before unmasking for the busy end IRQ, confirm that the
- * command was sent successfully. To keep track of having a
- * command in-progress, waiting for busy signaling to end,
- * store the status in host->busy_status.
- *
- * Note that, the card may need a couple of clock cycles before
- * it starts signaling busy on DAT0, hence re-read the
- * MMCISTATUS register here, to allow the busy bit to be set.
- * Potentially we may even need to poll the register for a
- * while, to allow it to be set, but tests indicates that it
- * isn't needed.
- */
- if (!host->busy_status &&
- !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
- (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
-
- writel(readl(base + MMCIMASK0) |
- host->variant->busy_detect_mask,
- base + MMCIMASK0);
-
- host->busy_status =
- status & (MCI_CMDSENT|MCI_CMDRESPEND);
- return;
- }
-
- /*
- * If there is a command in-progress that has been successfully
- * sent, then bail out if busy status is set and wait for the
- * busy end IRQ.
- *
- * Note that, the HW triggers an IRQ on both edges while
- * monitoring DAT0 for busy completion, but there is only one
- * status bit in MMCISTATUS for the busy state. Therefore
- * both the start and the end interrupts needs to be cleared,
- * one after the other. So, clear the busy start IRQ here.
- */
- if (host->busy_status &&
- (status & host->variant->busy_detect_flag)) {
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
+ if (busy_resp && host->variant->busy_detect)
+ if (!host->ops->busy_complete(host, status, err_msk))
return;
- }
-
- /*
- * If there is a command in-progress that has been successfully
- * sent and the busy bit isn't set, it means we have received
- * the busy end IRQ. Clear and mask the IRQ, then continue to
- * process the command.
- */
- if (host->busy_status) {
-
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
-
- writel(readl(base + MMCIMASK0) &
- ~host->variant->busy_detect_mask,
- base + MMCIMASK0);
- host->busy_status = 0;
- }
- }
host->cmd = NULL;
@@ -1290,6 +1318,9 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
cmd->error = -EILSEQ;
+ } else if (host->variant->busy_timeout && busy_resp &&
+ status & MCI_DATATIMEOUT) {
+ cmd->error = -ETIMEDOUT;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1583,6 +1614,20 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ u32 max_busy_timeout = 0;
+
+ if (!host->variant->busy_detect)
+ return;
+
+ if (host->variant->busy_timeout && mmc->actual_clock)
+ max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
+
+ mmc->max_busy_timeout = max_busy_timeout;
+}
+
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
@@ -1687,6 +1732,8 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
mmci_set_clkreg(host, ios->clock);
+ mmci_set_max_busy_timeout(mmc);
+
if (host->ops && host->ops->set_pwrreg)
host->ops->set_pwrreg(host, pwr);
else
@@ -1957,7 +2004,6 @@ static int mmci_probe(struct amba_device *dev,
mmci_write_datactrlreg(host,
host->variant->busy_dpsm_flag);
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- mmc->max_busy_timeout = 0;
}
/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 833236ecb31e..158e1231aa23 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -164,6 +164,7 @@
#define MCI_ST_CARDBUSY (1 << 24)
/* Extended status bits for the STM32 variants */
#define MCI_STM32_BUSYD0 BIT(20)
+#define MCI_STM32_BUSYD0END BIT(21)
#define MMCICLEAR 0x038
#define MCI_CMDCRCFAILCLR (1 << 0)
@@ -287,6 +288,8 @@ struct mmci_host;
* @signal_direction: input/out direction of bus signals can be indicated
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
* @busy_detect: true if the variant supports busy detection on DAT0.
+ * @busy_timeout: true if the variant starts data timer when the DPSM
+ * enter in Wait_R or Busy state.
* @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
* @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
* indicating that the card is busy
@@ -333,6 +336,7 @@ struct variant_data {
u8 signal_direction:1;
u8 pwrreg_clkgate:1;
u8 busy_detect:1;
+ u8 busy_timeout:1;
u32 busy_dpsm_flag;
u32 busy_detect_flag;
u32 busy_detect_mask;
@@ -366,6 +370,7 @@ struct mmci_host_ops {
void (*dma_error)(struct mmci_host *host);
void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
+ bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk);
};
struct mmci_host {
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 8e83ae6920ae..a4f7e8e689d3 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -25,8 +25,8 @@ struct sdmmc_priv {
void *sg_cpu;
};
-int sdmmc_idma_validate_data(struct mmci_host *host,
- struct mmc_data *data)
+static int sdmmc_idma_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
{
struct scatterlist *sg;
int i;
@@ -282,6 +282,47 @@ static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
return datactrl;
}
+static bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+ u32 busy_d0, busy_d0end, mask, sdmmc_status;
+
+ mask = readl_relaxed(base + MMCIMASK0);
+ sdmmc_status = readl_relaxed(base + MMCISTATUS);
+ busy_d0end = sdmmc_status & MCI_STM32_BUSYD0END;
+ busy_d0 = sdmmc_status & MCI_STM32_BUSYD0;
+
+ /* complete if there is an error or busy_d0end */
+ if ((status & err_msk) || busy_d0end)
+ goto complete;
+
+ /*
+ * On response the busy signaling is reflected in the BUSYD0 flag.
+ * if busy_d0 is in-progress we must activate busyd0end interrupt
+ * to wait this completion. Else this request has no busy step.
+ */
+ if (busy_d0) {
+ if (!host->busy_status) {
+ writel_relaxed(mask | host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ host->busy_status = status &
+ (MCI_CMDSENT | MCI_CMDRESPEND);
+ }
+ return false;
+ }
+
+complete:
+ if (host->busy_status) {
+ writel_relaxed(mask & ~host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ writel_relaxed(host->variant->busy_detect_mask,
+ base + MMCICLEAR);
+ host->busy_status = 0;
+ }
+
+ return true;
+}
+
static struct mmci_host_ops sdmmc_variant_ops = {
.validate_data = sdmmc_idma_validate_data,
.prep_data = sdmmc_idma_prep_data,
@@ -292,6 +333,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_finalize = sdmmc_idma_finalize,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
+ .busy_complete = sdmmc_busy_complete,
};
void sdmmc_variant_init(struct mmci_host *host)
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index a0670e9cd012..fc6b9cf27d0b 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -608,8 +608,8 @@ static int moxart_probe(struct platform_device *pdev)
host->timeout = msecs_to_jiffies(1000);
host->sysclk = clk_get_rate(clk);
host->fifo_width = readl(host->base + REG_FEATURE) << 2;
- host->dma_chan_tx = dma_request_slave_channel_reason(dev, "tx");
- host->dma_chan_rx = dma_request_slave_channel_reason(dev, "rx");
+ host->dma_chan_tx = dma_request_chan(dev, "tx");
+ host->dma_chan_rx = dma_request_chan(dev, "rx");
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 952fa4063ff8..767e964ca5a2 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1510,8 +1510,35 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- if (mmc_pdata(host)->init_card)
- mmc_pdata(host)->init_card(card);
+ if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ struct device_node *np = mmc_dev(mmc)->of_node;
+
+ /*
+ * REVISIT: should be moved to sdio core and made more
+ * general e.g. by expanding the DT bindings of child nodes
+ * to provide a mechanism to provide this information:
+ * Documentation/devicetree/bindings/mmc/mmc-card.txt
+ */
+
+ np = of_get_compatible_child(np, "ti,wl1251");
+ if (np) {
+ /*
+ * We have TI wl1251 attached to MMC3. Pass this
+ * information to the SDIO core because it can't be
+ * probed by normal methods.
+ */
+
+ dev_info(host->dev, "found wl1251\n");
+ card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+ card->cccr.wide_bus = 1;
+ card->cis.vendor = 0x104c;
+ card->cis.device = 0x9066;
+ card->cis.blksize = 512;
+ card->cis.max_dtr = 24000000;
+ card->ocr = 0x80;
+ of_node_put(np);
+ }
+ }
}
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
new file mode 100644
index 000000000000..771e3d00f1bb
--- /dev/null
+++ b/drivers/mmc/host/owl-mmc.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Actions Semi Owl SoCs SD/MMC driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Copyright (c) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * TODO: SDIO support
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+/*
+ * SDC registers
+ */
+#define OWL_REG_SD_EN 0x0000
+#define OWL_REG_SD_CTL 0x0004
+#define OWL_REG_SD_STATE 0x0008
+#define OWL_REG_SD_CMD 0x000c
+#define OWL_REG_SD_ARG 0x0010
+#define OWL_REG_SD_RSPBUF0 0x0014
+#define OWL_REG_SD_RSPBUF1 0x0018
+#define OWL_REG_SD_RSPBUF2 0x001c
+#define OWL_REG_SD_RSPBUF3 0x0020
+#define OWL_REG_SD_RSPBUF4 0x0024
+#define OWL_REG_SD_DAT 0x0028
+#define OWL_REG_SD_BLK_SIZE 0x002c
+#define OWL_REG_SD_BLK_NUM 0x0030
+#define OWL_REG_SD_BUF_SIZE 0x0034
+
+/* SD_EN Bits */
+#define OWL_SD_EN_RANE BIT(31)
+#define OWL_SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24)
+#define OWL_SD_EN_S18EN BIT(12)
+#define OWL_SD_EN_RESE BIT(10)
+#define OWL_SD_EN_DAT1_S BIT(9)
+#define OWL_SD_EN_CLK_S BIT(8)
+#define OWL_SD_ENABLE BIT(7)
+#define OWL_SD_EN_BSEL BIT(6)
+#define OWL_SD_EN_SDIOEN BIT(3)
+#define OWL_SD_EN_DDREN BIT(2)
+#define OWL_SD_EN_DATAWID(x) (((x) & 0x3) << 0)
+
+/* SD_CTL Bits */
+#define OWL_SD_CTL_TOUTEN BIT(31)
+#define OWL_SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24)
+#define OWL_SD_CTL_DELAY_MSK GENMASK(23, 16)
+#define OWL_SD_CTL_RDELAY(x) (((x) & 0xf) << 20)
+#define OWL_SD_CTL_WDELAY(x) (((x) & 0xf) << 16)
+#define OWL_SD_CTL_CMDLEN BIT(13)
+#define OWL_SD_CTL_SCC BIT(12)
+#define OWL_SD_CTL_TCN(x) (((x) & 0xf) << 8)
+#define OWL_SD_CTL_TS BIT(7)
+#define OWL_SD_CTL_LBE BIT(6)
+#define OWL_SD_CTL_C7EN BIT(5)
+#define OWL_SD_CTL_TM(x) (((x) & 0xf) << 0)
+
+#define OWL_SD_DELAY_LOW_CLK 0x0f
+#define OWL_SD_DELAY_MID_CLK 0x0a
+#define OWL_SD_DELAY_HIGH_CLK 0x09
+#define OWL_SD_RDELAY_DDR50 0x0a
+#define OWL_SD_WDELAY_DDR50 0x08
+
+/* SD_STATE Bits */
+#define OWL_SD_STATE_DAT1BS BIT(18)
+#define OWL_SD_STATE_SDIOB_P BIT(17)
+#define OWL_SD_STATE_SDIOB_EN BIT(16)
+#define OWL_SD_STATE_TOUTE BIT(15)
+#define OWL_SD_STATE_BAEP BIT(14)
+#define OWL_SD_STATE_MEMRDY BIT(12)
+#define OWL_SD_STATE_CMDS BIT(11)
+#define OWL_SD_STATE_DAT1AS BIT(10)
+#define OWL_SD_STATE_SDIOA_P BIT(9)
+#define OWL_SD_STATE_SDIOA_EN BIT(8)
+#define OWL_SD_STATE_DAT0S BIT(7)
+#define OWL_SD_STATE_TEIE BIT(6)
+#define OWL_SD_STATE_TEI BIT(5)
+#define OWL_SD_STATE_CLNR BIT(4)
+#define OWL_SD_STATE_CLC BIT(3)
+#define OWL_SD_STATE_WC16ER BIT(2)
+#define OWL_SD_STATE_RC16ER BIT(1)
+#define OWL_SD_STATE_CRC7ER BIT(0)
+
+struct owl_mmc_host {
+ struct device *dev;
+ struct reset_control *reset;
+ void __iomem *base;
+ struct clk *clk;
+ struct completion sdc_complete;
+ spinlock_t lock;
+ int irq;
+ u32 clock;
+ bool ddr_50;
+
+ enum dma_data_direction dma_dir;
+ struct dma_chan *dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config dma_cfg;
+ struct completion dma_complete;
+
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+};
+
+static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state)
+{
+ unsigned int regval;
+
+ regval = readl(reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(regval, reg);
+}
+
+static irqreturn_t owl_irq_handler(int irq, void *devid)
+{
+ struct owl_mmc_host *owl_host = devid;
+ unsigned long flags;
+ u32 state;
+
+ spin_lock_irqsave(&owl_host->lock, flags);
+
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ if (state & OWL_SD_STATE_TEI) {
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ state |= OWL_SD_STATE_TEI;
+ writel(state, owl_host->base + OWL_REG_SD_STATE);
+ complete(&owl_host->sdc_complete);
+ }
+
+ spin_unlock_irqrestore(&owl_host->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
+{
+ struct mmc_request *mrq = owl_host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ /* Should never be NULL */
+ WARN_ON(!mrq);
+
+ owl_host->mrq = NULL;
+
+ if (data)
+ dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
+ owl_host->dma_dir);
+
+ /* Finally finish request */
+ mmc_request_done(owl_host->mmc, mrq);
+}
+
+static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
+ struct mmc_command *cmd,
+ struct mmc_data *data)
+{
+ u32 mode, state, resp[2];
+ u32 cmd_rsp_mask = 0;
+
+ init_completion(&owl_host->sdc_complete);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ mode = OWL_SD_CTL_TM(0);
+ break;
+
+ case MMC_RSP_R1:
+ if (data) {
+ if (data->flags & MMC_DATA_READ)
+ mode = OWL_SD_CTL_TM(4);
+ else
+ mode = OWL_SD_CTL_TM(5);
+ } else {
+ mode = OWL_SD_CTL_TM(1);
+ }
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+
+ break;
+
+ case MMC_RSP_R1B:
+ mode = OWL_SD_CTL_TM(3);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R2:
+ mode = OWL_SD_CTL_TM(2);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R3:
+ mode = OWL_SD_CTL_TM(1);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR;
+ break;
+
+ default:
+ dev_warn(owl_host->dev, "Unknown MMC command\n");
+ cmd->error = -EINVAL;
+ return;
+ }
+
+ /* Keep current WDELAY and RDELAY */
+ mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
+
+ /* Start to send corresponding command type */
+ writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
+ writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
+
+ /* Set LBE to send clk at the end of last read block */
+ if (data) {
+ mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000);
+ } else {
+ mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE);
+ mode |= OWL_SD_CTL_TS;
+ }
+
+ owl_host->cmd = cmd;
+
+ /* Start transfer */
+ writel(mode, owl_host->base + OWL_REG_SD_CTL);
+
+ if (data)
+ return;
+
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete, 30 * HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ cmd->error = -ETIMEDOUT;
+ return;
+ }
+
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
+ if (cmd_rsp_mask & state) {
+ if (state & OWL_SD_STATE_CLNR) {
+ dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
+ cmd->error = -EILSEQ;
+ return;
+ }
+
+ if (state & OWL_SD_STATE_CRC7ER) {
+ dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
+ cmd->error = -EILSEQ;
+ return;
+ }
+ }
+
+ if (mmc_resp_type(cmd) & MMC_RSP_136) {
+ cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
+ cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
+ cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
+ cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
+ } else {
+ resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
+ resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
+ cmd->resp[0] = resp[1] << 24 | resp[0] >> 8;
+ cmd->resp[1] = resp[1] >> 8;
+ }
+ }
+}
+
+static void owl_mmc_dma_complete(void *param)
+{
+ struct owl_mmc_host *owl_host = param;
+ struct mmc_data *data = owl_host->data;
+
+ if (data)
+ complete(&owl_host->dma_complete);
+}
+
+static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
+ struct mmc_data *data)
+{
+ u32 total;
+
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
+ true);
+ writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
+ writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
+ total = data->blksz * data->blocks;
+
+ if (total < 512)
+ writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
+ else
+ writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
+
+ if (data->flags & MMC_DATA_WRITE) {
+ owl_host->dma_dir = DMA_TO_DEVICE;
+ owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
+ } else {
+ owl_host->dma_dir = DMA_FROM_DEVICE;
+ owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
+ }
+
+ dma_map_sg(owl_host->dma->device->dev, data->sg,
+ data->sg_len, owl_host->dma_dir);
+
+ dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
+ owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
+ data->sg_len,
+ owl_host->dma_cfg.direction,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!owl_host->desc) {
+ dev_err(owl_host->dev, "Can't prepare slave sg\n");
+ return -EBUSY;
+ }
+
+ owl_host->data = data;
+
+ owl_host->desc->callback = owl_mmc_dma_complete;
+ owl_host->desc->callback_param = (void *)owl_host;
+ data->error = 0;
+
+ return 0;
+}
+
+static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ int ret;
+
+ owl_host->mrq = mrq;
+ if (mrq->data) {
+ ret = owl_mmc_prepare_data(owl_host, data);
+ if (ret < 0) {
+ data->error = ret;
+ goto err_out;
+ }
+
+ init_completion(&owl_host->dma_complete);
+ dmaengine_submit(owl_host->desc);
+ dma_async_issue_pending(owl_host->dma);
+ }
+
+ owl_mmc_send_cmd(owl_host, mrq->cmd, data);
+
+ if (data) {
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete,
+ 10 * HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ dmaengine_terminate_all(owl_host->dma);
+ goto err_out;
+ }
+
+ if (!wait_for_completion_timeout(&owl_host->dma_complete,
+ 5 * HZ)) {
+ dev_err(owl_host->dev, "DMA interrupt timeout\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ dmaengine_terminate_all(owl_host->dma);
+ goto err_out;
+ }
+
+ if (data->stop)
+ owl_mmc_send_cmd(owl_host, data->stop, NULL);
+
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+
+err_out:
+ owl_mmc_finish_request(owl_host);
+}
+
+static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
+ unsigned int rate)
+{
+ unsigned long clk_rate;
+ int ret;
+ u32 reg;
+
+ reg = readl(owl_host->base + OWL_REG_SD_CTL);
+ reg &= ~OWL_SD_CTL_DELAY_MSK;
+
+ /* Set RDELAY and WDELAY based on the clock */
+ if (rate <= 1000000) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else if ((rate > 1000000) && (rate <= 26000000)) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ /* DDR50 mode has special delay chain */
+ } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) |
+ OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else {
+ dev_err(owl_host->dev, "SD clock rate not supported\n");
+ return -EINVAL;
+ }
+
+ clk_rate = clk_round_rate(owl_host->clk, rate << 1);
+ ret = clk_set_rate(owl_host->clk, clk_rate);
+
+ return ret;
+}
+
+static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
+{
+ if (!ios->clock)
+ return;
+
+ owl_host->clock = ios->clock;
+ owl_mmc_set_clk_rate(owl_host, ios->clock);
+}
+
+static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
+ struct mmc_ios *ios)
+{
+ u32 reg;
+
+ reg = readl(owl_host->base + OWL_REG_SD_EN);
+ reg &= ~0x03;
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ break;
+ case MMC_BUS_WIDTH_4:
+ reg |= OWL_SD_EN_DATAWID(1);
+ break;
+ case MMC_BUS_WIDTH_8:
+ reg |= OWL_SD_EN_DATAWID(2);
+ break;
+ }
+
+ writel(reg, owl_host->base + OWL_REG_SD_EN);
+}
+
+static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
+{
+ reset_control_assert(owl_host->reset);
+ udelay(20);
+ reset_control_deassert(owl_host->reset);
+}
+
+static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
+{
+ u32 mode;
+
+ init_completion(&owl_host->sdc_complete);
+
+ /* Enable transfer end IRQ */
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
+ OWL_SD_STATE_TEIE, true);
+
+ /* Send init clk */
+ mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
+ mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8);
+ writel(mode, owl_host->base + OWL_REG_SD_CTL);
+
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ return;
+ }
+}
+
+static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ dev_dbg(owl_host->dev, "Powering card up\n");
+
+ /* Reset the SDC controller to clear all previous states */
+ owl_mmc_ctr_reset(owl_host);
+ clk_prepare_enable(owl_host->clk);
+ writel(OWL_SD_ENABLE | OWL_SD_EN_RESE,
+ owl_host->base + OWL_REG_SD_EN);
+
+ break;
+
+ case MMC_POWER_ON:
+ dev_dbg(owl_host->dev, "Powering card on\n");
+ owl_mmc_power_on(owl_host);
+
+ break;
+
+ case MMC_POWER_OFF:
+ dev_dbg(owl_host->dev, "Powering card off\n");
+ clk_disable_unprepare(owl_host->clk);
+
+ return;
+
+ default:
+ dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
+ break;
+ }
+
+ if (ios->clock != owl_host->clock)
+ owl_mmc_set_clk(owl_host, ios);
+
+ owl_mmc_set_bus_width(owl_host, ios);
+
+ /* Enable DDR mode if requested */
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
+ owl_host->ddr_50 = 1;
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_DDREN, true);
+ } else {
+ owl_host->ddr_50 = 0;
+ }
+}
+
+static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ /* It is enough to change the pad ctrl bit for voltage switch */
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_S18EN, false);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_S18EN, true);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct mmc_host_ops owl_mmc_ops = {
+ .request = owl_mmc_request,
+ .set_ios = owl_mmc_set_ios,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
+ .start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch,
+};
+
+static int owl_mmc_probe(struct platform_device *pdev)
+{
+ struct owl_mmc_host *owl_host;
+ struct mmc_host *mmc;
+ struct resource *res;
+ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "mmc alloc host failed\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, mmc);
+
+ owl_host = mmc_priv(mmc);
+ owl_host->dev = &pdev->dev;
+ owl_host->mmc = mmc;
+ spin_lock_init(&owl_host->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ owl_host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(owl_host->base)) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = PTR_ERR(owl_host->base);
+ goto err_free_host;
+ }
+
+ owl_host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(owl_host->clk)) {
+ dev_err(&pdev->dev, "No clock defined\n");
+ ret = PTR_ERR(owl_host->clk);
+ goto err_free_host;
+ }
+
+ owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(owl_host->reset)) {
+ dev_err(&pdev->dev, "Could not get reset control\n");
+ ret = PTR_ERR(owl_host->reset);
+ goto err_free_host;
+ }
+
+ mmc->ops = &owl_mmc_ops;
+ mmc->max_blk_count = 512;
+ mmc->max_blk_size = 512;
+ mmc->max_segs = 256;
+ mmc->max_seg_size = 262144;
+ mmc->max_req_size = 262144;
+ /* 100kHz ~ 52MHz */
+ mmc->f_min = 100000;
+ mmc->f_max = 52000000;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_4_BIT_DATA;
+ mmc->caps2 = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO);
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_165_195;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_free_host;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
+ if (!owl_host->dma) {
+ dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
+ ret = -ENXIO;
+ goto err_free_host;
+ }
+
+ dev_info(&pdev->dev, "Using %s for DMA transfers\n",
+ dma_chan_name(owl_host->dma));
+
+ owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
+ owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
+ owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ owl_host->dma_cfg.device_fc = false;
+
+ owl_host->irq = platform_get_irq(pdev, 0);
+ if (owl_host->irq < 0) {
+ ret = -EINVAL;
+ goto err_free_host;
+ }
+
+ ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
+ 0, dev_name(&pdev->dev), owl_host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d\n",
+ owl_host->irq);
+ goto err_free_host;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add host\n");
+ goto err_free_host;
+ }
+
+ dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
+
+ return 0;
+
+err_free_host:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int owl_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+ disable_irq(owl_host->irq);
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+static const struct of_device_id owl_mmc_of_match[] = {
+ {.compatible = "actions,owl-mmc",},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
+
+static struct platform_driver owl_mmc_driver = {
+ .driver = {
+ .name = "owl_mmc",
+ .of_match_table = of_match_ptr(owl_mmc_of_match),
+ },
+ .probe = owl_mmc_probe,
+ .remove = owl_mmc_remove,
+};
+module_platform_driver(owl_mmc_driver);
+
+MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver");
+MODULE_AUTHOR("Actions Semi");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index a66f8d6d61d1..18839a10594c 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -308,6 +308,7 @@ static const struct soc_device_attribute soc_whitelist[] = {
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
/* generic ones */
{ .soc_id = "r8a774a1" },
+ { .soc_id = "r8a774b1" },
{ .soc_id = "r8a774c0" },
{ .soc_id = "r8a77470" },
{ .soc_id = "r8a7795" },
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 57b582bf73d9..9289bb4d633e 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -51,6 +51,11 @@
#define ESDHC_CLOCK_HCKEN 0x00000002
#define ESDHC_CLOCK_IPGEN 0x00000001
+/* System Control 2 Register */
+#define ESDHC_SYSTEM_CONTROL_2 0x3c
+#define ESDHC_SMPCLKSEL 0x00800000
+#define ESDHC_EXTN 0x00400000
+
/* Host Controller Capabilities Register 2 */
#define ESDHC_CAPABILITIES_1 0x114
@@ -59,7 +64,16 @@
#define ESDHC_HS400_WNDW_ADJUST 0x00000040
#define ESDHC_HS400_MODE 0x00000010
#define ESDHC_TB_EN 0x00000004
+#define ESDHC_TB_MODE_MASK 0x00000003
+#define ESDHC_TB_MODE_SW 0x00000003
+#define ESDHC_TB_MODE_3 0x00000002
+
+#define ESDHC_TBSTAT 0x124
+
#define ESDHC_TBPTR 0x128
+#define ESDHC_WNDW_STRT_PTR_SHIFT 8
+#define ESDHC_WNDW_STRT_PTR_MASK (0x7f << 8)
+#define ESDHC_WNDW_END_PTR_MASK 0x7f
/* SD Clock Control Register */
#define ESDHC_SDCLKCTL 0x144
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
new file mode 100644
index 000000000000..a1aa21b9ae1c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ * Copyright (C) 2019 Socionext Inc.
+ * Takao Orito <orito.takao@socionext.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+#include "sdhci-pltfm.h"
+#include "sdhci_f_sdh30.h"
+
+/* milbeaut bridge controller register */
+#define MLB_SOFT_RESET 0x0200
+#define MLB_SOFT_RESET_RSTX BIT(0)
+
+#define MLB_WP_CD_LED_SET 0x0210
+#define MLB_WP_CD_LED_SET_LED_INV BIT(2)
+
+#define MLB_CR_SET 0x0220
+#define MLB_CR_SET_CR_TOCLKUNIT BIT(24)
+#define MLB_CR_SET_CR_TOCLKFREQ_SFT (16)
+#define MLB_CR_SET_CR_TOCLKFREQ_MASK (0x3F << MLB_CR_SET_CR_TOCLKFREQ_SFT)
+#define MLB_CR_SET_CR_BCLKFREQ_SFT (8)
+#define MLB_CR_SET_CR_BCLKFREQ_MASK (0xFF << MLB_CR_SET_CR_BCLKFREQ_SFT)
+#define MLB_CR_SET_CR_RTUNTIMER_SFT (4)
+#define MLB_CR_SET_CR_RTUNTIMER_MASK (0xF << MLB_CR_SET_CR_RTUNTIMER_SFT)
+
+#define MLB_SD_TOCLK_I_DIV 16
+#define MLB_TOCLKFREQ_UNIT_THRES 16000000
+#define MLB_CAL_TOCLKFREQ_MHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000000)
+#define MLB_CAL_TOCLKFREQ_KHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000)
+#define MLB_TOCLKFREQ_MAX 63
+#define MLB_TOCLKFREQ_MIN 1
+
+#define MLB_SD_BCLK_I_DIV 4
+#define MLB_CAL_BCLKFREQ(rate) (rate / MLB_SD_BCLK_I_DIV / 1000000)
+#define MLB_BCLKFREQ_MAX 255
+#define MLB_BCLKFREQ_MIN 1
+
+#define MLB_CDR_SET 0x0230
+#define MLB_CDR_SET_CLK2POW16 3
+
+struct f_sdhost_priv {
+ struct clk *clk_iface;
+ struct clk *clk;
+ struct device *dev;
+ bool enable_cmd_dat_delay;
+};
+
+static void sdhci_milbeaut_soft_voltage_switch(struct sdhci_host *host)
+{
+ u32 ctrl = 0;
+
+ usleep_range(2500, 3000);
+ ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+
+ ctrl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ usleep_range(2500, 3000);
+
+ ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING);
+ ctrl |= F_SDH30_CMD_CHK_DIS;
+ sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING);
+}
+
+static unsigned int sdhci_milbeaut_get_min_clock(struct sdhci_host *host)
+{
+ return F_SDH30_MIN_CLOCK;
+}
+
+static void sdhci_milbeaut_reset(struct sdhci_host *host, u8 mask)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u16 clk;
+ u32 ctl;
+ ktime_t timeout;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk = (clk & ~SDHCI_CLOCK_CARD_EN) | SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ sdhci_reset(host, mask);
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ timeout = ktime_add_ms(ktime_get(), 10);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_INT_STABLE)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ udelay(10);
+ }
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
+}
+
+static void sdhci_milbeaut_set_power(struct sdhci_host *host,
+ unsigned char mode, unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
+static const struct sdhci_ops sdhci_milbeaut_ops = {
+ .voltage_switch = sdhci_milbeaut_soft_voltage_switch,
+ .get_min_clock = sdhci_milbeaut_get_min_clock,
+ .reset = sdhci_milbeaut_reset,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_milbeaut_set_power,
+};
+
+static void sdhci_milbeaut_bridge_reset(struct sdhci_host *host,
+ int reset_flag)
+{
+ if (reset_flag)
+ sdhci_writel(host, 0, MLB_SOFT_RESET);
+ else
+ sdhci_writel(host, MLB_SOFT_RESET_RSTX, MLB_SOFT_RESET);
+}
+
+static void sdhci_milbeaut_bridge_init(struct sdhci_host *host,
+ int rate)
+{
+ u32 val, clk;
+
+ /* IO_SDIO_CR_SET should be set while reset */
+ val = sdhci_readl(host, MLB_CR_SET);
+ val &= ~(MLB_CR_SET_CR_TOCLKFREQ_MASK | MLB_CR_SET_CR_TOCLKUNIT |
+ MLB_CR_SET_CR_BCLKFREQ_MASK);
+ if (rate >= MLB_TOCLKFREQ_UNIT_THRES) {
+ clk = MLB_CAL_TOCLKFREQ_MHZ(rate);
+ clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk);
+ val |= MLB_CR_SET_CR_TOCLKUNIT |
+ (clk << MLB_CR_SET_CR_TOCLKFREQ_SFT);
+ } else {
+ clk = MLB_CAL_TOCLKFREQ_KHZ(rate);
+ clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk);
+ clk = max_t(u32, MLB_TOCLKFREQ_MIN, clk);
+ val |= clk << MLB_CR_SET_CR_TOCLKFREQ_SFT;
+ }
+
+ clk = MLB_CAL_BCLKFREQ(rate);
+ clk = min_t(u32, MLB_BCLKFREQ_MAX, clk);
+ clk = max_t(u32, MLB_BCLKFREQ_MIN, clk);
+ val |= clk << MLB_CR_SET_CR_BCLKFREQ_SFT;
+ val &= ~MLB_CR_SET_CR_RTUNTIMER_MASK;
+ sdhci_writel(host, val, MLB_CR_SET);
+
+ sdhci_writel(host, MLB_CDR_SET_CLK2POW16, MLB_CDR_SET);
+
+ sdhci_writel(host, MLB_WP_CD_LED_SET_LED_INV, MLB_WP_CD_LED_SET);
+}
+
+static void sdhci_milbeaut_vendor_init(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctl;
+
+ ctl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+ ctl &= ~F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+ ctl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+
+ ctl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
+ ctl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 |
+ F_SDH30_AHB_INCR_4;
+ ctl &= ~(F_SDH30_AHB_BIGED | F_SDH30_BUSLOCK_EN);
+ sdhci_writew(host, ctl, F_SDH30_AHB_CONFIG);
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
+}
+
+static const struct of_device_id mlb_dt_ids[] = {
+ {
+ .compatible = "socionext,milbeaut-m10v-sdhci-3.0",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mlb_dt_ids);
+
+static void sdhci_milbeaut_init(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ int rate = clk_get_rate(priv->clk);
+ u16 ctl;
+
+ sdhci_milbeaut_bridge_reset(host, 0);
+
+ ctl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ ctl &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN);
+ sdhci_writew(host, ctl, SDHCI_CLOCK_CONTROL);
+
+ sdhci_milbeaut_bridge_reset(host, 1);
+
+ sdhci_milbeaut_bridge_init(host, rate);
+ sdhci_milbeaut_bridge_reset(host, 0);
+
+ sdhci_milbeaut_vendor_init(host);
+}
+
+static int sdhci_milbeaut_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq, ret = 0;
+ struct f_sdhost_priv *priv;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "%s: no irq specified\n", __func__);
+ return irq;
+ }
+
+ host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ priv = sdhci_priv(host);
+ priv->dev = dev;
+
+ host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_CLOCK_BEFORE_RESET |
+ SDHCI_QUIRK_DELAY_AFTER_POWER;
+ host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
+ SDHCI_QUIRK2_TUNING_WORK_AROUND |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ host->hw_name = "f_sdh30";
+ host->ops = &sdhci_milbeaut_ops;
+ host->irq = irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
+ goto err;
+ }
+
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
+
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
+
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
+
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+ }
+
+ sdhci_milbeaut_init(host);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_add_host;
+
+ return 0;
+
+err_add_host:
+ clk_disable_unprepare(priv->clk);
+err_clk:
+ clk_disable_unprepare(priv->clk_iface);
+err:
+ sdhci_free_host(host);
+ return ret;
+}
+
+static int sdhci_milbeaut_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+
+ sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
+ 0xffffffff);
+
+ clk_disable_unprepare(priv->clk_iface);
+ clk_disable_unprepare(priv->clk);
+
+ sdhci_free_host(host);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_milbeaut_driver = {
+ .driver = {
+ .name = "sdhci-milbeaut",
+ .of_match_table = of_match_ptr(mlb_dt_ids),
+ },
+ .probe = sdhci_milbeaut_probe,
+ .remove = sdhci_milbeaut_remove,
+};
+
+module_platform_driver(sdhci_milbeaut_driver);
+
+MODULE_DESCRIPTION("MILBEAUT SD Card Controller driver");
+MODULE_AUTHOR("Takao Orito <orito.takao@socionext.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci-milbeaut");
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 7023cbec4017..e49b44b4d82e 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -22,6 +22,7 @@
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include "cqhci.h"
#include "sdhci-pltfm.h"
@@ -32,6 +33,10 @@
#define PHY_CLK_TOO_SLOW_HZ 400000
+/* Default settings for ZynqMP Clock Phases */
+#define ZYNQMP_ICLK_PHASE {0, 63, 63, 0, 63, 0, 0, 183, 54, 0, 0}
+#define ZYNQMP_OCLK_PHASE {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0}
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -72,13 +77,38 @@ struct sdhci_arasan_soc_ctl_map {
};
/**
+ * struct sdhci_arasan_clk_data
+ * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
+ * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @sampleclk_hw: Struct for the clock we might provide to a PHY.
+ * @sampleclk: Pointer to normal 'struct clock' for sampleclk_hw.
+ * @clk_phase_in: Array of Input Clock Phase Delays for all speed modes
+ * @clk_phase_out: Array of Output Clock Phase Delays for all speed modes
+ * @set_clk_delays: Function pointer for setting Clock Delays
+ * @clk_of_data: Platform specific runtime clock data storage pointer
+ */
+struct sdhci_arasan_clk_data {
+ struct clk_hw sdcardclk_hw;
+ struct clk *sdcardclk;
+ struct clk_hw sampleclk_hw;
+ struct clk *sampleclk;
+ int clk_phase_in[MMC_TIMING_MMC_HS400 + 1];
+ int clk_phase_out[MMC_TIMING_MMC_HS400 + 1];
+ void (*set_clk_delays)(struct sdhci_host *host);
+ void *clk_of_data;
+};
+
+struct sdhci_arasan_zynqmp_clk_data {
+ const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+/**
* struct sdhci_arasan_data
* @host: Pointer to the main SDHCI host structure.
* @clk_ahb: Pointer to the AHB clock
* @phy: Pointer to the generic phy
* @is_phy_on: True if the PHY is on; false if not.
- * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
- * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @clk_data: Struct for the Arasan Controller Clock Data.
* @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
* @soc_ctl_map: Map to get offsets into soc_ctl registers.
*/
@@ -89,8 +119,7 @@ struct sdhci_arasan_data {
bool is_phy_on;
bool has_cqe;
- struct clk_hw sdcardclk_hw;
- struct clk *sdcardclk;
+ struct sdhci_arasan_clk_data clk_data;
struct regmap *soc_ctl_base;
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
@@ -120,6 +149,12 @@ static const struct sdhci_arasan_soc_ctl_map intel_lgm_emmc_soc_ctl_map = {
.hiword_update = false,
};
+static const struct sdhci_arasan_soc_ctl_map intel_lgm_sdxc_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0x80, .width = 8, .shift = 2 },
+ .clockmultiplier = { .reg = 0, .width = -1, .shift = -1 },
+ .hiword_update = false,
+};
+
/**
* sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
*
@@ -174,6 +209,7 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
bool ctrl_phy = false;
if (!IS_ERR(sdhci_arasan->phy)) {
@@ -215,6 +251,10 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_arasan->is_phy_on = false;
}
+ /* Set the Input and Output Clock Phase Delays */
+ if (clk_data->set_clk_delays)
+ clk_data->set_clk_delays(host);
+
sdhci_set_clock(host, clock);
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE)
@@ -384,6 +424,11 @@ static struct sdhci_arasan_of_data intel_lgm_emmc_data = {
.pdata = &sdhci_arasan_cqe_pdata,
};
+static struct sdhci_arasan_of_data intel_lgm_sdxc_data = {
+ .soc_ctl_map = &intel_lgm_sdxc_soc_ctl_map,
+ .pdata = &sdhci_arasan_cqe_pdata,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -489,6 +534,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "intel,lgm-sdhci-5.1-emmc",
.data = &intel_lgm_emmc_data,
},
+ {
+ .compatible = "intel,lgm-sdhci-5.1-sdxc",
+ .data = &intel_lgm_sdxc_data,
+ },
/* Generic compatible below here */
{
.compatible = "arasan,sdhci-8.9a",
@@ -502,6 +551,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "arasan,sdhci-4.9a",
.data = &sdhci_arasan_data,
},
+ {
+ .compatible = "xlnx,zynqmp-8.9a",
+ .data = &sdhci_arasan_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
@@ -520,8 +573,10 @@ static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
struct sdhci_arasan_data *sdhci_arasan =
- container_of(hw, struct sdhci_arasan_data, sdcardclk_hw);
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
return host->mmc->actual_clock;
@@ -532,6 +587,177 @@ static const struct clk_ops arasan_sdcardclk_ops = {
};
/**
+ * sdhci_arasan_sampleclk_recalc_rate - Return the sampling clock rate
+ *
+ * Return the current actual rate of the sampling clock. This can be used
+ * to communicate with out PHY.
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @parent_rate The parent rate (should be rate of clk_xin).
+ * Returns the sample clock rate.
+ */
+static unsigned long sdhci_arasan_sampleclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+
+ return host->mmc->actual_clock;
+}
+
+static const struct clk_ops arasan_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+};
+
+/**
+ * sdhci_zynqmp_sdcardclk_set_phase - Set the SD Output Clock Tap Delays
+ *
+ * Set the SD Output Clock Tap Delays for Output path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
+ clk_data->clk_of_data;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 node_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : NODE_SD_1;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * ZynqMP does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 30 Taps are available */
+ tap_max = 30;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 15 Taps are available */
+ tap_max = 15;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 8 Taps are available */
+ tap_max = 8;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_OUTPUT, tap_delay, NULL);
+ if (ret)
+ pr_err("Error setting Output Tap Delay\n");
+
+ return ret;
+}
+
+static const struct clk_ops zynqmp_sdcardclk_ops = {
+ .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate,
+ .set_phase = sdhci_zynqmp_sdcardclk_set_phase,
+};
+
+/**
+ * sdhci_zynqmp_sampleclk_set_phase - Set the SD Input Clock Tap Delays
+ *
+ * Set the SD Input Clock Tap Delays for Input path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
+ clk_data->clk_of_data;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 node_id = !strcmp(clk_name, "clk_in_sd0") ? NODE_SD_0 : NODE_SD_1;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * ZynqMP does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 120 Taps are available */
+ tap_max = 120;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 60 Taps are available */
+ tap_max = 60;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 30 Taps are available */
+ tap_max = 30;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_INPUT, tap_delay, NULL);
+ if (ret)
+ pr_err("Error setting Input Tap Delay\n");
+
+ return ret;
+}
+
+static const struct clk_ops zynqmp_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+ .set_phase = sdhci_zynqmp_sampleclk_set_phase,
+};
+
+/**
* sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
*
* The corecfg_clockmultiplier is supposed to contain clock multiplier
@@ -609,39 +835,128 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
sdhci_arasan_syscon_write(host, &soc_ctl_map->baseclkfreq, mhz);
}
+static void sdhci_arasan_set_clk_delays(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
+
+ clk_set_phase(clk_data->sampleclk,
+ clk_data->clk_phase_in[host->timing]);
+ clk_set_phase(clk_data->sdcardclk,
+ clk_data->clk_phase_out[host->timing]);
+}
+
+static void arasan_dt_read_clk_phase(struct device *dev,
+ struct sdhci_arasan_clk_data *clk_data,
+ unsigned int timing, const char *prop)
+{
+ struct device_node *np = dev->of_node;
+
+ int clk_phase[2] = {0};
+
+ /*
+ * Read Tap Delay values from DT, if the DT does not contain the
+ * Tap Values then use the pre-defined values.
+ */
+ if (of_property_read_variable_u32_array(np, prop, &clk_phase[0],
+ 2, 0)) {
+ dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n",
+ prop, clk_data->clk_phase_in[timing],
+ clk_data->clk_phase_out[timing]);
+ return;
+ }
+
+ /* The values read are Input and Output Clock Delays in order */
+ clk_data->clk_phase_in[timing] = clk_phase[0];
+ clk_data->clk_phase_out[timing] = clk_phase[1];
+}
+
/**
- * sdhci_arasan_register_sdclk - Register the sdclk for a PHY to use
+ * arasan_dt_parse_clk_phases - Read Clock Delay values from DT
+ *
+ * Called at initialization to parse the values of Clock Delays.
+ *
+ * @dev: Pointer to our struct device.
+ * @clk_data: Pointer to the Clock Data structure
+ */
+static void arasan_dt_parse_clk_phases(struct device *dev,
+ struct sdhci_arasan_clk_data *clk_data)
+{
+ int *iclk_phase, *oclk_phase;
+ u32 mio_bank = 0;
+ int i;
+
+ /*
+ * This has been kept as a pointer and is assigned a function here.
+ * So that different controller variants can assign their own handling
+ * function.
+ */
+ clk_data->set_clk_delays = sdhci_arasan_set_clk_delays;
+
+ if (of_device_is_compatible(dev->of_node, "xlnx,zynqmp-8.9a")) {
+ iclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_ICLK_PHASE;
+ oclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_OCLK_PHASE;
+
+ of_property_read_u32(dev->of_node, "xlnx,mio-bank", &mio_bank);
+ if (mio_bank == 2) {
+ oclk_phase[MMC_TIMING_UHS_SDR104] = 90;
+ oclk_phase[MMC_TIMING_MMC_HS200] = 90;
+ }
+
+ for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
+ clk_data->clk_phase_in[i] = iclk_phase[i];
+ clk_data->clk_phase_out[i] = oclk_phase[i];
+ }
+ }
+
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_LEGACY,
+ "clk-phase-legacy");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS,
+ "clk-phase-mmc-hs");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_SD_HS,
+ "clk-phase-sd-hs");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR12,
+ "clk-phase-uhs-sdr12");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR25,
+ "clk-phase-uhs-sdr25");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR50,
+ "clk-phase-uhs-sdr50");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR104,
+ "clk-phase-uhs-sdr104");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_DDR50,
+ "clk-phase-uhs-ddr50");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_DDR52,
+ "clk-phase-mmc-ddr52");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS200,
+ "clk-phase-mmc-hs200");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS400,
+ "clk-phase-mmc-hs400");
+}
+
+/**
+ * sdhci_arasan_register_sdcardclk - Register the sdcardclk for a PHY to use
*
* Some PHY devices need to know what the actual card clock is. In order for
* them to find out, we'll provide a clock through the common clock framework
* for them to query.
*
- * Note: without seriously re-architecting SDHCI's clock code and testing on
- * all platforms, there's no way to create a totally beautiful clock here
- * with all clock ops implemented. Instead, we'll just create a clock that can
- * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
- * framework that we're doing things behind its back. This should be sufficient
- * to create nice clean device tree bindings and later (if needed) we can try
- * re-architecting SDHCI if we see some benefit to it.
- *
* @sdhci_arasan: Our private data structure.
* @clk_xin: Pointer to the functional clock
* @dev: Pointer to our struct device.
* Returns 0 on success and error value on error
*/
-static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
- struct clk *clk_xin,
- struct device *dev)
+static int
+sdhci_arasan_register_sdcardclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
{
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
struct device_node *np = dev->of_node;
struct clk_init_data sdcardclk_init;
const char *parent_clk_name;
int ret;
- /* Providing a clock to the PHY is optional; no error if missing */
- if (!of_find_property(np, "#clock-cells", NULL))
- return 0;
-
ret = of_property_read_string_index(np, "clock-output-names", 0,
&sdcardclk_init.name);
if (ret) {
@@ -653,17 +968,72 @@ static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
sdcardclk_init.parent_names = &parent_clk_name;
sdcardclk_init.num_parents = 1;
sdcardclk_init.flags = CLK_GET_RATE_NOCACHE;
- sdcardclk_init.ops = &arasan_sdcardclk_ops;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
+ sdcardclk_init.ops = &zynqmp_sdcardclk_ops;
+ else
+ sdcardclk_init.ops = &arasan_sdcardclk_ops;
+
+ clk_data->sdcardclk_hw.init = &sdcardclk_init;
+ clk_data->sdcardclk =
+ devm_clk_register(dev, &clk_data->sdcardclk_hw);
+ clk_data->sdcardclk_hw.init = NULL;
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get,
+ clk_data->sdcardclk);
+ if (ret)
+ dev_err(dev, "Failed to add sdcard clock provider\n");
+
+ return ret;
+}
+
+/**
+ * sdhci_arasan_register_sampleclk - Register the sampleclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int
+sdhci_arasan_register_sampleclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
+ struct device_node *np = dev->of_node;
+ struct clk_init_data sampleclk_init;
+ const char *parent_clk_name;
+ int ret;
- sdhci_arasan->sdcardclk_hw.init = &sdcardclk_init;
- sdhci_arasan->sdcardclk =
- devm_clk_register(dev, &sdhci_arasan->sdcardclk_hw);
- sdhci_arasan->sdcardclk_hw.init = NULL;
+ ret = of_property_read_string_index(np, "clock-output-names", 1,
+ &sampleclk_init.name);
+ if (ret) {
+ dev_err(dev, "DT has #clock-cells but no clock-output-names\n");
+ return ret;
+ }
+
+ parent_clk_name = __clk_get_name(clk_xin);
+ sampleclk_init.parent_names = &parent_clk_name;
+ sampleclk_init.num_parents = 1;
+ sampleclk_init.flags = CLK_GET_RATE_NOCACHE;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
+ sampleclk_init.ops = &zynqmp_sampleclk_ops;
+ else
+ sampleclk_init.ops = &arasan_sampleclk_ops;
+
+ clk_data->sampleclk_hw.init = &sampleclk_init;
+ clk_data->sampleclk =
+ devm_clk_register(dev, &clk_data->sampleclk_hw);
+ clk_data->sampleclk_hw.init = NULL;
ret = of_clk_add_provider(np, of_clk_src_simple_get,
- sdhci_arasan->sdcardclk);
+ clk_data->sampleclk);
if (ret)
- dev_err(dev, "Failed to add clock provider\n");
+ dev_err(dev, "Failed to add sample clock provider\n");
return ret;
}
@@ -686,6 +1056,54 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
of_clk_del_provider(dev->of_node);
}
+/**
+ * sdhci_arasan_register_sdclk - Register the sdcardclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * Note: without seriously re-architecting SDHCI's clock code and testing on
+ * all platforms, there's no way to create a totally beautiful clock here
+ * with all clock ops implemented. Instead, we'll just create a clock that can
+ * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
+ * framework that we're doing things behind its back. This should be sufficient
+ * to create nice clean device tree bindings and later (if needed) we can try
+ * re-architecting SDHCI if we see some benefit to it.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ u32 num_clks = 0;
+ int ret;
+
+ /* Providing a clock to the PHY is optional; no error if missing */
+ if (of_property_read_u32(np, "#clock-cells", &num_clks) < 0)
+ return 0;
+
+ ret = sdhci_arasan_register_sdcardclk(sdhci_arasan, clk_xin, dev);
+ if (ret)
+ return ret;
+
+ if (num_clks) {
+ ret = sdhci_arasan_register_sampleclk(sdhci_arasan, clk_xin,
+ dev);
+ if (ret) {
+ sdhci_arasan_unregister_sdclk(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
{
struct sdhci_host *host = sdhci_arasan->host;
@@ -814,6 +1232,25 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (ret)
goto clk_disable_all;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data;
+ const struct zynqmp_eemi_ops *eemi_ops;
+
+ zynqmp_clk_data = devm_kzalloc(&pdev->dev,
+ sizeof(*zynqmp_clk_data),
+ GFP_KERNEL);
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops)) {
+ ret = PTR_ERR(eemi_ops);
+ goto unreg_clk;
+ }
+
+ zynqmp_clk_data->eemi_ops = eemi_ops;
+ sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
+ }
+
+ arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
+
ret = mmc_of_parse(host->mmc);
if (ret) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 8962f6664381..56912e30c47e 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -111,7 +111,19 @@ static void aspeed_sdhci_set_bus_width(struct sdhci_host *host, int width)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
+static u32 aspeed_sdhci_readl(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE) &&
+ (host->mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH))
+ val ^= SDHCI_CARD_PRESENT;
+
+ return val;
+}
+
static const struct sdhci_ops aspeed_sdhci_ops = {
+ .read_l = aspeed_sdhci_readl,
.set_clock = aspeed_sdhci_set_clock,
.get_max_clock = aspeed_sdhci_get_max_clock,
.set_bus_width = aspeed_sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 0ae986c42bc8..5959e394b416 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -27,6 +27,9 @@
#define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
+#define SDMMC_CALCR 0x240
+#define SDMMC_CALCR_EN BIT(0)
+#define SDMMC_CALCR_ALWYSON BIT(4)
#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
@@ -35,6 +38,7 @@ struct sdhci_at91_priv {
struct clk *gck;
struct clk *mainck;
bool restore_needed;
+ bool cal_always_on;
};
static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
@@ -116,10 +120,17 @@ static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
sdhci_reset(host, mask);
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
sdhci_at91_set_force_card_detect(host);
+
+ if (priv->cal_always_on && (mask & SDHCI_RESET_ALL))
+ sdhci_writel(host, SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
+ SDMMC_CALCR);
}
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
@@ -345,6 +356,14 @@ static int sdhci_at91_probe(struct platform_device *pdev)
priv->restore_needed = false;
+ /*
+ * if SDCAL pin is wrongly connected, we must enable
+ * the analog calibration cell permanently.
+ */
+ priv->cal_always_on =
+ device_property_read_bool(&pdev->dev,
+ "microchip,sdcal-inverted");
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto clocks_disable_unprepare;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1d1953dfc54b..5cca3fa4610b 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -77,8 +77,10 @@ struct sdhci_esdhc {
bool quirk_incorrect_hostver;
bool quirk_limited_clk_division;
bool quirk_unreliable_pulse_detection;
- bool quirk_fixup_tuning;
+ bool quirk_tuning_erratum_type1;
+ bool quirk_tuning_erratum_type2;
bool quirk_ignore_data_inhibit;
+ bool in_sw_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
u32 div_ratio;
@@ -408,6 +410,8 @@ static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
@@ -416,10 +420,24 @@ static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32be(ret, host->ioaddr + base);
+
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
+ * 1us later after ESDHC_EXTN is set.
+ */
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
+ esdhc->in_sw_tuning) {
+ udelay(1);
+ ret |= ESDHC_SMPCLKSEL;
+ iowrite32be(ret, host->ioaddr + base);
+ }
+ }
}
static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
@@ -428,6 +446,18 @@ static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32(ret, host->ioaddr + base);
+
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
+ * 1us later after ESDHC_EXTN is set.
+ */
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
+ esdhc->in_sw_tuning) {
+ udelay(1);
+ ret |= ESDHC_SMPCLKSEL;
+ iowrite32(ret, host->ioaddr + base);
+ }
+ }
}
static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
@@ -560,6 +590,32 @@ static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
}
}
+static void esdhc_flush_async_fifo(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u32 val;
+
+ val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+ val |= ESDHC_FLUSH_ASYNC_FIFO;
+ sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
+ ESDHC_FLUSH_ASYNC_FIFO))
+ break;
+ if (timedout) {
+ pr_err("%s: flushing asynchronous FIFO timeout.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ usleep_range(10, 20);
+ }
+}
+
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -652,9 +708,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
esdhc_clock_enable(host, false);
- temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- temp |= ESDHC_FLUSH_ASYNC_FIFO;
- sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
+ esdhc_flush_async_fifo(host);
}
/* Wait max 20 ms */
@@ -796,16 +850,21 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
}
}
-static struct soc_device_attribute soc_fixup_tuning[] = {
+static struct soc_device_attribute soc_tuning_erratum_type1[] = {
+ { .family = "QorIQ T1023", .revision = "1.0", },
{ .family = "QorIQ T1040", .revision = "1.0", },
{ .family = "QorIQ T2080", .revision = "1.0", },
- { .family = "QorIQ T1023", .revision = "1.0", },
{ .family = "QorIQ LS1021A", .revision = "1.0", },
- { .family = "QorIQ LS1080A", .revision = "1.0", },
- { .family = "QorIQ LS2080A", .revision = "1.0", },
+ { },
+};
+
+static struct soc_device_attribute soc_tuning_erratum_type2[] = {
{ .family = "QorIQ LS1012A", .revision = "1.0", },
{ .family = "QorIQ LS1043A", .revision = "1.*", },
{ .family = "QorIQ LS1046A", .revision = "1.0", },
+ { .family = "QorIQ LS1080A", .revision = "1.0", },
+ { .family = "QorIQ LS2080A", .revision = "1.0", },
+ { .family = "QorIQ LA1575A", .revision = "1.0", },
{ },
};
@@ -814,10 +873,7 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
u32 val;
esdhc_clock_enable(host, false);
-
- val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- val |= ESDHC_FLUSH_ASYNC_FIFO;
- sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+ esdhc_flush_async_fifo(host);
val = sdhci_readl(host, ESDHC_TBCTL);
if (enable)
@@ -829,15 +885,97 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
esdhc_clock_enable(host, true);
}
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+ u8 *window_end)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 tbstat_15_8, tbstat_7_0;
+ u32 val;
+
+ if (esdhc->quirk_tuning_erratum_type1) {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ return;
+ }
+
+ /* Write TBCTL[11:8]=4'h8 */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~(0xf << 8);
+ val |= 8 << 8;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ mdelay(1);
+
+ /* Read TBCTL[31:0] register and rewrite again */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ mdelay(1);
+
+ /* Read the TBSTAT[31:0] register twice */
+ val = sdhci_readl(host, ESDHC_TBSTAT);
+ val = sdhci_readl(host, ESDHC_TBSTAT);
+
+ /* Reset data lines by setting ESDHCCTL[RSTD] */
+ sdhci_reset(host, SDHCI_RESET_DATA);
+ /* Write 32'hFFFF_FFFF to IRQSTAT register */
+ sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
+
+ /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
+ * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+ * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
+ * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
+ */
+ tbstat_7_0 = val & 0xff;
+ tbstat_15_8 = (val >> 8) & 0xff;
+
+ if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+ *window_start = 8 * esdhc->div_ratio;
+ *window_end = 4 * esdhc->div_ratio;
+ } else {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ }
+}
+
+static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
+ u8 window_start, u8 window_end)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u32 val;
+ int ret;
+
+ /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
+ val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
+ ESDHC_WNDW_STRT_PTR_MASK;
+ val |= window_end & ESDHC_WNDW_END_PTR_MASK;
+ sdhci_writel(host, val, ESDHC_TBPTR);
+
+ /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_MODE_MASK;
+ val |= ESDHC_TB_MODE_SW;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ esdhc->in_sw_tuning = true;
+ ret = sdhci_execute_tuning(mmc, opcode);
+ esdhc->in_sw_tuning = false;
+ return ret;
+}
+
static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 window_start, window_end;
+ int ret, retries = 1;
bool hs400_tuning;
unsigned int clk;
u32 val;
- int ret;
/* For tuning mode, the sd clock divisor value
* must be larger than 3 according to reference manual.
@@ -846,39 +984,73 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->clock > clk)
esdhc_of_set_clock(host, clk);
- if (esdhc->quirk_limited_clk_division &&
- host->flags & SDHCI_HS400_TUNING)
- esdhc_of_set_clock(host, host->clock);
-
esdhc_tuning_block_enable(host, true);
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
- ret = sdhci_execute_tuning(mmc, opcode);
- if (hs400_tuning) {
- val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
- val |= ESDHC_FLW_CTL_BG;
- sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
- }
+ do {
+ if (esdhc->quirk_limited_clk_division &&
+ hs400_tuning)
+ esdhc_of_set_clock(host, host->clock);
- if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
+ /* Do HW tuning */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_MODE_MASK;
+ val |= ESDHC_TB_MODE_3;
+ sdhci_writel(host, val, ESDHC_TBCTL);
- /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
- * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
- */
- val = sdhci_readl(host, ESDHC_TBPTR);
- val = (val & ~((0x7f << 8) | 0x7f)) |
- (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
- sdhci_writel(host, val, ESDHC_TBPTR);
+ ret = sdhci_execute_tuning(mmc, opcode);
+ if (ret)
+ break;
- /* program the software tuning mode by setting
- * TBCTL[TB_MODE]=2'h3
+ /* If HW tuning fails and triggers erratum,
+ * try workaround.
*/
- val = sdhci_readl(host, ESDHC_TBCTL);
- val |= 0x3;
- sdhci_writel(host, val, ESDHC_TBCTL);
- sdhci_execute_tuning(mmc, opcode);
+ ret = host->tuning_err;
+ if (ret == -EAGAIN &&
+ (esdhc->quirk_tuning_erratum_type1 ||
+ esdhc->quirk_tuning_erratum_type2)) {
+ /* Recover HS400 tuning flag */
+ if (hs400_tuning)
+ host->flags |= SDHCI_HS400_TUNING;
+ pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
+ mmc_hostname(mmc));
+ /* Do SW tuning */
+ esdhc_prepare_sw_tuning(host, &window_start,
+ &window_end);
+ ret = esdhc_execute_sw_tuning(mmc, opcode,
+ window_start,
+ window_end);
+ if (ret)
+ break;
+
+ /* Retry both HW/SW tuning with reduced clock. */
+ ret = host->tuning_err;
+ if (ret == -EAGAIN && retries) {
+ /* Recover HS400 tuning flag */
+ if (hs400_tuning)
+ host->flags |= SDHCI_HS400_TUNING;
+
+ clk = host->max_clk / (esdhc->div_ratio + 1);
+ esdhc_of_set_clock(host, clk);
+ pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
+ mmc_hostname(mmc));
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ } while (retries--);
+
+ if (ret) {
+ esdhc_tuning_block_enable(host, false);
+ } else if (hs400_tuning) {
+ val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
+ val |= ESDHC_FLW_CTL_BG;
+ sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
}
+
return ret;
}
@@ -1114,10 +1286,15 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
esdhc = sdhci_pltfm_priv(pltfm_host);
- if (soc_device_match(soc_fixup_tuning))
- esdhc->quirk_fixup_tuning = true;
+ if (soc_device_match(soc_tuning_erratum_type1))
+ esdhc->quirk_tuning_erratum_type1 = true;
+ else
+ esdhc->quirk_tuning_erratum_type1 = false;
+
+ if (soc_device_match(soc_tuning_erratum_type2))
+ esdhc->quirk_tuning_erratum_type2 = true;
else
- esdhc->quirk_fixup_tuning = false;
+ esdhc->quirk_tuning_erratum_type2 = false;
if (esdhc->vendor_ver == VENDOR_V_22)
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index eaffa85bc728..acefb76b4e15 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -21,6 +21,7 @@
#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/slot-gpio.h>
@@ -1590,11 +1591,59 @@ static int amd_probe(struct sdhci_pci_chip *chip)
return 0;
}
+static u32 sdhci_read_present_state(struct sdhci_host *host)
+{
+ return sdhci_readl(host, SDHCI_PRESENT_STATE);
+}
+
+static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 present_state;
+
+ /*
+ * SDHC 0x7906 requires a hard reset to clear all internal state.
+ * Otherwise it can get into a bad state where the DATA lines are always
+ * read as zeros.
+ */
+ if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
+ pci_clear_master(pdev);
+
+ pci_save_state(pdev);
+
+ pci_set_power_state(pdev, PCI_D3cold);
+ pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
+ pdev->current_state);
+ pci_set_power_state(pdev, PCI_D0);
+
+ pci_restore_state(pdev);
+
+ /*
+ * SDHCI_RESET_ALL says the card detect logic should not be
+ * reset, but since we need to reset the entire controller
+ * we should wait until the card detect logic has stabilized.
+ *
+ * This normally takes about 40ms.
+ */
+ readx_poll_timeout(
+ sdhci_read_present_state,
+ host,
+ present_state,
+ present_state & SDHCI_CD_STABLE,
+ 10000,
+ 100000
+ );
+ }
+
+ return sdhci_reset(host, mask);
+}
+
static const struct sdhci_ops amd_sdhci_pci_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
@@ -1673,6 +1722,8 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
+ SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 558202fe64c6..981bbbe63aff 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -55,6 +55,8 @@
#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
#define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5
+#define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4
+#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b056400e34b1..3140fe2e5dba 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -337,8 +337,19 @@ static void sdhci_init(struct sdhci_host *host, int soft)
static void sdhci_reinit(struct sdhci_host *host)
{
+ u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+
sdhci_init(host, 0);
sdhci_enable_card_detection(host);
+
+ /*
+ * A change to the card detect bits indicates a change in present state,
+ * refer sdhci_set_card_detection(). A card detect interrupt might have
+ * been missed while the host controller was being reset, so trigger a
+ * rescan to check.
+ */
+ if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
}
static void __sdhci_led_activate(struct sdhci_host *host)
@@ -2202,7 +2213,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (!(ctrl & SDHCI_CTRL_VDD_180))
return 0;
- pr_warn("%s: 3.3V regulator output did not became stable\n",
+ pr_warn("%s: 3.3V regulator output did not become stable\n",
mmc_hostname(mmc));
return -EAGAIN;
@@ -2234,7 +2245,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (ctrl & SDHCI_CTRL_VDD_180)
return 0;
- pr_warn("%s: 1.8V regulator output did not became stable\n",
+ pr_warn("%s: 1.8V regulator output did not become stable\n",
mmc_hostname(mmc));
return -EAGAIN;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index bb90757ecace..b8e897e31e2e 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -12,6 +12,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
+#include "cqhci.h"
#include "sdhci-pltfm.h"
/* CTL_CFG Registers */
@@ -68,6 +69,9 @@
#define CLOCK_TOO_SLOW_HZ 400000
+/* Command Queue Host Controller Interface Base address */
+#define SDHCI_AM654_CQE_BASE_ADDR 0x200
+
static struct regmap_config sdhci_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -259,6 +263,19 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_j721e_8bit_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -267,6 +284,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
@@ -290,6 +308,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_j721e_4bit_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
@@ -304,6 +323,40 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.pdata = &sdhci_j721e_4bit_pdata,
.flags = IOMUX_PRESENT,
};
+
+static void sdhci_am654_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static const struct cqhci_host_ops sdhci_am654_cqhci_ops = {
+ .enable = sdhci_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_am654_dumpregs,
+};
+
+static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
+{
+ struct cqhci_host *cq_host;
+ int ret;
+
+ cq_host = devm_kzalloc(host->mmc->parent, sizeof(struct cqhci_host),
+ GFP_KERNEL);
+ if (!cq_host)
+ return -ENOMEM;
+
+ cq_host->mmio = host->ioaddr + SDHCI_AM654_CQE_BASE_ADDR;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ cq_host->ops = &sdhci_am654_cqhci_ops;
+
+ host->mmc->caps2 |= MMC_CAP2_CQE;
+
+ ret = cqhci_init(cq_host, host->mmc, 1);
+
+ return ret;
+}
+
static int sdhci_am654_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -344,7 +397,23 @@ static int sdhci_am654_init(struct sdhci_host *host)
regmap_update_bits(sdhci_am654->base, CTL_CFG_2, SLOTTYPE_MASK,
ctl_cfg_2);
- return sdhci_add_host(host);
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ ret = sdhci_am654_cqe_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+
+ return 0;
+
+err_cleanup_host:
+ sdhci_cleanup_host(host);
+ return ret;
}
static int sdhci_am654_get_of_property(struct platform_device *pdev,
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index f8b939e63e02..fa0dfc657c22 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -16,31 +16,7 @@
#include <linux/clk.h>
#include "sdhci-pltfm.h"
-
-/* F_SDH30 extended Controller registers */
-#define F_SDH30_AHB_CONFIG 0x100
-#define F_SDH30_AHB_BIGED 0x00000040
-#define F_SDH30_BUSLOCK_DMA 0x00000020
-#define F_SDH30_BUSLOCK_EN 0x00000010
-#define F_SDH30_SIN 0x00000008
-#define F_SDH30_AHB_INCR_16 0x00000004
-#define F_SDH30_AHB_INCR_8 0x00000002
-#define F_SDH30_AHB_INCR_4 0x00000001
-
-#define F_SDH30_TUNING_SETTING 0x108
-#define F_SDH30_CMD_CHK_DIS 0x00010000
-
-#define F_SDH30_IO_CONTROL2 0x114
-#define F_SDH30_CRES_O_DN 0x00080000
-#define F_SDH30_MSEL_O_1_8 0x00040000
-
-#define F_SDH30_ESD_CONTROL 0x124
-#define F_SDH30_EMMC_RST 0x00000002
-#define F_SDH30_EMMC_HS200 0x01000000
-
-#define F_SDH30_CMD_DAT_DELAY 0x200
-
-#define F_SDH30_MIN_CLOCK 400000
+#include "sdhci_f_sdh30.h"
struct f_sdhost_priv {
struct clk *clk_iface;
diff --git a/drivers/mmc/host/sdhci_f_sdh30.h b/drivers/mmc/host/sdhci_f_sdh30.h
new file mode 100644
index 000000000000..fc1ad28f7ca9
--- /dev/null
+++ b/drivers/mmc/host/sdhci_f_sdh30.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ * Copyright (C) 2019 Socionext Inc.
+ *
+ */
+
+/* F_SDH30 extended Controller registers */
+#define F_SDH30_AHB_CONFIG 0x100
+#define F_SDH30_AHB_BIGED BIT(6)
+#define F_SDH30_BUSLOCK_DMA BIT(5)
+#define F_SDH30_BUSLOCK_EN BIT(4)
+#define F_SDH30_SIN BIT(3)
+#define F_SDH30_AHB_INCR_16 BIT(2)
+#define F_SDH30_AHB_INCR_8 BIT(1)
+#define F_SDH30_AHB_INCR_4 BIT(0)
+
+#define F_SDH30_TUNING_SETTING 0x108
+#define F_SDH30_CMD_CHK_DIS BIT(16)
+
+#define F_SDH30_IO_CONTROL2 0x114
+#define F_SDH30_CRES_O_DN BIT(19)
+#define F_SDH30_MSEL_O_1_8 BIT(18)
+
+#define F_SDH30_ESD_CONTROL 0x124
+#define F_SDH30_EMMC_RST BIT(1)
+#define F_SDH30_CMD_DAT_DELAY BIT(9)
+#define F_SDH30_EMMC_HS200 BIT(24)
+
+#define F_SDH30_MIN_CLOCK 400000
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 86b591100f16..c4a1d49fbea4 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1185,7 +1185,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (ret == -EPROBE_DEFER)
return ret;
- mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = pdata->max_segs ? : 32;
mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index a3680c900689..6ced1b7f642f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2070,18 +2070,11 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
kref_put(&vub300->kref, vub300_delete);
}
-static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
-{ /* NOT irq */
- struct vub300_mmc_host *vub300 = mmc_priv(mmc);
- dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
-}
-
static const struct mmc_host_ops vub300_mmc_ops = {
.request = vub300_mmc_request,
.set_ios = vub300_mmc_set_ios,
.get_ro = vub300_mmc_get_ro,
.enable_sdio_irq = vub300_enable_sdio_irq,
- .init_card = vub300_init_card,
};
static int vub300_probe(struct usb_interface *interface,
diff --git a/drivers/mtd/nand/onenand/Makefile b/drivers/mtd/nand/onenand/Makefile
index f8b624aca9cc..a27b635eb23a 100644
--- a/drivers/mtd/nand/onenand/Makefile
+++ b/drivers/mtd/nand/onenand/Makefile
@@ -9,6 +9,6 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
# Board specific.
obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
-obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung_mtd.o
onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/nand/onenand/samsung.c b/drivers/mtd/nand/onenand/samsung_mtd.c
index 55e5536a5850..55e5536a5850 100644
--- a/drivers/mtd/nand/onenand/samsung.c
+++ b/drivers/mtd/nand/onenand/samsung_mtd.c
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 1b77fff9f892..cc9a28cf9d82 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -1078,36 +1078,6 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
return err;
}
-#ifdef CONFIG_COMPAT
-static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- unsigned long translated_arg = (unsigned long)compat_ptr(arg);
-
- return vol_cdev_ioctl(file, cmd, translated_arg);
-}
-
-static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- unsigned long translated_arg = (unsigned long)compat_ptr(arg);
-
- return ubi_cdev_ioctl(file, cmd, translated_arg);
-}
-
-static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- unsigned long translated_arg = (unsigned long)compat_ptr(arg);
-
- return ctrl_cdev_ioctl(file, cmd, translated_arg);
-}
-#else
-#define vol_cdev_compat_ioctl NULL
-#define ubi_cdev_compat_ioctl NULL
-#define ctrl_cdev_compat_ioctl NULL
-#endif
-
/* UBI volume character device operations */
const struct file_operations ubi_vol_cdev_operations = {
.owner = THIS_MODULE,
@@ -1118,7 +1088,7 @@ const struct file_operations ubi_vol_cdev_operations = {
.write = vol_cdev_write,
.fsync = vol_cdev_fsync,
.unlocked_ioctl = vol_cdev_ioctl,
- .compat_ioctl = vol_cdev_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/* UBI character device operations */
@@ -1126,13 +1096,13 @@ const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
- .compat_ioctl = ubi_cdev_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/* UBI control character device operations */
const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
- .compat_ioctl = ctrl_cdev_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = no_llseek,
};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 0f847d510950..54646c2c2744 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -107,6 +107,7 @@ void ubi_dump_vol_info(const struct ubi_volume *vol)
pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
pr_err("\tcorrupted %d\n", vol->corrupted);
pr_err("\tupd_marker %d\n", vol->upd_marker);
+ pr_err("\tskip_check %d\n", vol->skip_check);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index c44c8470247e..426820ab9afe 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -57,18 +57,6 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
-static int anchor_pebs_available(struct rb_root *root)
-{
- struct rb_node *p;
- struct ubi_wl_entry *e;
-
- ubi_rb_for_each_entry(p, e, root, u.rb)
- if (e->pnum < UBI_FM_MAX_START)
- return 1;
-
- return 0;
-}
-
/**
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
* @ubi: UBI device description object
@@ -277,8 +265,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
+
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* No luck, trigger wear leveling to produce a new anchor PEB */
+ ubi->fm_do_produce_anchor = 1;
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -294,7 +300,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
return -ENOMEM;
}
- wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
__schedule_ubi_work(ubi, wrk);
return 0;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 30621c67721a..1c7be4eb3ba6 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1540,14 +1540,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
return 0;
}
- ret = ubi_ensure_anchor_pebs(ubi);
- if (ret) {
- up_write(&ubi->fm_eba_sem);
- up_write(&ubi->work_sem);
- up_write(&ubi->fm_protect);
- return ret;
- }
-
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
@@ -1618,7 +1610,8 @@ int ubi_update_fastmap(struct ubi_device *ubi)
}
spin_lock(&ubi->wl_lock);
- tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ tmp_e = ubi->fm_anchor;
+ ubi->fm_anchor = NULL;
spin_unlock(&ubi->wl_lock);
if (old_fm) {
@@ -1670,6 +1663,9 @@ out_unlock:
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
+
+ ubi_ensure_anchor_pebs(ubi);
+
return ret;
err:
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 721b6aa7936c..9688b411c930 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -491,6 +491,8 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
+ * @fm_anchor: The next anchor PEB to use for fastmap
+ * @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -599,6 +601,8 @@ struct ubi_device {
struct work_struct fm_work;
int fm_work_scheduled;
int fast_attach;
+ struct ubi_wl_entry *fm_anchor;
+ int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
@@ -789,7 +793,6 @@ struct ubi_attach_info {
* @vol_id: the volume ID on which this erasure is being performed
* @lnum: the logical eraseblock number
* @torture: if the physical eraseblock has to be tortured
- * @anchor: produce a anchor PEB to by used by fastmap
*
* The @func pointer points to the worker function. If the @shutdown argument is
* not zero, the worker has to free the resources and exit immediately as the
@@ -805,7 +808,6 @@ struct ubi_work {
int vol_id;
int lnum;
int torture;
- int anchor;
};
#include "debug.h"
@@ -968,7 +970,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
-int static inline ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
+static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
#endif
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 3fcdefe2714d..5d77a38dba54 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -339,13 +339,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
}
}
- /* If no fastmap has been written and this WL entry can be used
- * as anchor PEB, hold it back and return the second best WL entry
- * such that fastmap can use the anchor PEB later. */
- if (prev_e && !ubi->fm_disabled &&
- !ubi->fm && e->pnum < UBI_FM_MAX_START)
- return prev_e;
-
return e;
}
@@ -656,9 +649,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int erase = 0, keep = 0, vol_id = -1, lnum = -1;
-#ifdef CONFIG_MTD_UBI_FASTMAP
- int anchor = wrk->anchor;
-#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
@@ -698,11 +688,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
}
#ifdef CONFIG_MTD_UBI_FASTMAP
- /* Check whether we need to produce an anchor PEB */
- if (!anchor)
- anchor = !anchor_pebs_available(&ubi->free);
-
- if (anchor) {
+ if (ubi->fm_do_produce_anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
if (!e1)
goto out_cancel;
@@ -719,6 +705,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ ubi->fm_do_produce_anchor = 0;
} else if (!ubi->scrub.rb_node) {
#else
if (!ubi->scrub.rb_node) {
@@ -1051,7 +1038,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
goto out_cancel;
}
- wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
if (nested)
__schedule_ubi_work(ubi, wrk);
@@ -1093,8 +1079,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
spin_lock(&ubi->wl_lock);
- wl_tree_add(e, &ubi->free);
- ubi->free_count++;
+
+ if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
+ ubi->fm_anchor = e;
+ ubi->fm_do_produce_anchor = 0;
+ } else {
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+
spin_unlock(&ubi->wl_lock);
/*
@@ -1882,6 +1875,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (err)
goto out_free;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi_ensure_anchor_pebs(ubi);
+#endif
return 0;
out_free:
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index a9e2d669acd8..c93a53293786 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -2,7 +2,6 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
-static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 40b079162804..bd40b114d6cd 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -102,8 +102,8 @@ static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
debugfs_create_blob("last_rx_msg", 0400, ser->debugfs_tty_dir,
&ser->rx_blob);
- debugfs_create_x32("ser_state", 0400, ser->debugfs_tty_dir,
- (u32 *)&ser->state);
+ debugfs_create_xul("ser_state", 0400, ser->debugfs_tty_dir,
+ &ser->state);
debugfs_create_x8("tty_status", 0400, ser->debugfs_tty_dir,
&ser->tty_status);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d5ae2e1e0b0e..9c767ee252ac 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4422,6 +4422,7 @@ static int macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
+ tasklet_kill(&bp->hresp_err_tasklet);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 17d300ea9955..f51dca1526c6 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -49,4 +49,4 @@ config BE2NET_SKYHAWK
comment "WARNING: be2net is useless without any enabled chip"
depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
- BE2NET_SKYHAWK=n && BE2NET
+ BE2NET_SKYHAWK=n && BE2NET
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index a6f2063f1475..8ed85037f021 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1858,7 +1858,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
}
/* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.txt)
+ * Documentation/networking/phy.rst)
*/
phy_support_asym_pause(phy);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index aca95f64bde8..9b7a8db9860f 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
}
qpl->id = id;
- qpl->num_entries = pages;
+ qpl->num_entries = 0;
qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->pages)
@@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
/* caller handles clean up */
if (err)
return -ENOMEM;
+ qpl->num_entries++;
}
priv->num_registered_pages += pages;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0686ded7ad3a..c90080781924 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -159,6 +159,40 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
return rc;
}
+/**
+ * ibmvnic_wait_for_completion - Check device state and wait for completion
+ * @adapter: private device data
+ * @comp_done: completion structure to wait for
+ * @timeout: time to wait in milliseconds
+ *
+ * Wait for a completion signal or until the timeout limit is reached
+ * while checking that the device is still active.
+ */
+static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
+ struct completion *comp_done,
+ unsigned long timeout)
+{
+ struct net_device *netdev;
+ unsigned long div_timeout;
+ u8 retry;
+
+ netdev = adapter->netdev;
+ retry = 5;
+ div_timeout = msecs_to_jiffies(timeout / retry);
+ while (true) {
+ if (!adapter->crq.active) {
+ netdev_err(netdev, "Device down!\n");
+ return -ENODEV;
+ }
+ if (retry--)
+ break;
+ if (wait_for_completion_timeout(comp_done, div_timeout))
+ return 0;
+ }
+ netdev_err(netdev, "Operation timed out.\n");
+ return -ETIMEDOUT;
+}
+
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
@@ -176,21 +210,35 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
ltb->map_id = adapter->map_id;
adapter->map_id++;
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
if (rc) {
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_err(dev,
+ "Long term map request aborted or timed out,rc = %d\n",
+ rc);
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ mutex_unlock(&adapter->fw_lock);
return rc;
}
- wait_for_completion(&adapter->fw_done);
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ mutex_unlock(&adapter->fw_lock);
return -1;
}
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -211,22 +259,37 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb)
{
+ struct device *dev = &adapter->vdev->dev;
int rc;
memset(ltb->buff, 0, ltb->size);
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+
+ reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- if (rc)
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_info(dev,
+ "Reset failed, long term map request timed out or aborted\n");
+ mutex_unlock(&adapter->fw_lock);
return rc;
- wait_for_completion(&adapter->fw_done);
+ }
if (adapter->fw_done_rc) {
- dev_info(&adapter->vdev->dev,
+ dev_info(dev,
"Reset failed, attempting to free and reallocate buffer\n");
free_long_term_buff(adapter, ltb);
+ mutex_unlock(&adapter->fw_lock);
return alloc_long_term_buff(adapter, ltb, ltb->size);
}
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -943,13 +1006,25 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (adapter->vpd->buff)
len = adapter->vpd->len;
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
+
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
crq.get_vpd_size.cmd = GET_VPD_SIZE;
rc = ibmvnic_send_crq(adapter, &crq);
- if (rc)
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
return rc;
- wait_for_completion(&adapter->fw_done);
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+ mutex_unlock(&adapter->fw_lock);
if (!adapter->vpd->len)
return -ENODATA;
@@ -976,7 +1051,10 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
return -ENOMEM;
}
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
+
crq.get_vpd.first = IBMVNIC_CRQ_CMD;
crq.get_vpd.cmd = GET_VPD;
crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
@@ -985,10 +1063,20 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (rc) {
kfree(adapter->vpd->buff);
adapter->vpd->buff = NULL;
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
+ kfree(adapter->vpd->buff);
+ adapter->vpd->buff = NULL;
+ mutex_unlock(&adapter->fw_lock);
return rc;
}
- wait_for_completion(&adapter->fw_done);
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -1689,20 +1777,25 @@ static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
+
rc = ibmvnic_send_crq(adapter, &crq);
if (rc) {
rc = -EIO;
+ mutex_unlock(&adapter->fw_lock);
goto err;
}
- wait_for_completion(&adapter->fw_done);
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
- if (adapter->fw_done_rc) {
+ if (rc || adapter->fw_done_rc) {
rc = -EIO;
+ mutex_unlock(&adapter->fw_lock);
goto err;
}
-
+ mutex_unlock(&adapter->fw_lock);
return 0;
err:
ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
@@ -2316,12 +2409,19 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
- init_completion(&adapter->reset_done);
+ reinit_completion(&adapter->reset_done);
adapter->wait_for_reset = true;
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
- if (rc)
- return rc;
- wait_for_completion(&adapter->reset_done);
+
+ if (rc) {
+ ret = rc;
+ goto out;
+ }
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
+ if (rc) {
+ ret = -ENODEV;
+ goto out;
+ }
ret = 0;
if (adapter->reset_done_rc) {
@@ -2332,13 +2432,21 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->desired.rx_entries = adapter->fallback.rx_entries;
adapter->desired.tx_entries = adapter->fallback.tx_entries;
- init_completion(&adapter->reset_done);
+ reinit_completion(&adapter->reset_done);
adapter->wait_for_reset = true;
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
- if (rc)
- return ret;
- wait_for_completion(&adapter->reset_done);
+ if (rc) {
+ ret = rc;
+ goto out;
+ }
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
+ 60000);
+ if (rc) {
+ ret = -ENODEV;
+ goto out;
+ }
}
+out:
adapter->wait_for_reset = false;
return ret;
@@ -2603,11 +2711,13 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
cpu_to_be32(sizeof(struct ibmvnic_statistics));
/* Wait for data to be written */
- init_completion(&adapter->stats_done);
+ reinit_completion(&adapter->stats_done);
rc = ibmvnic_send_crq(adapter, &crq);
if (rc)
return;
- wait_for_completion(&adapter->stats_done);
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
+ if (rc)
+ return;
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
@@ -4408,11 +4518,24 @@ static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
memset(&crq, 0, sizeof(crq));
crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
- init_completion(&adapter->fw_done);
+
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
+
rc = ibmvnic_send_crq(adapter, &crq);
- if (rc)
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
return rc;
- wait_for_completion(&adapter->fw_done);
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ mutex_unlock(&adapter->fw_lock);
return adapter->fw_done_rc ? -EIO : 0;
}
@@ -4505,6 +4628,15 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_XPORT_EVENT:
netif_carrier_off(netdev);
adapter->crq.active = false;
+ /* terminate any thread waiting for a response
+ * from the device
+ */
+ if (!completion_done(&adapter->fw_done)) {
+ adapter->fw_done_rc = -EIO;
+ complete(&adapter->fw_done);
+ }
+ if (!completion_done(&adapter->stats_done))
+ complete(&adapter->stats_done);
if (test_bit(0, &adapter->resetting))
adapter->force_reset_recovery = true;
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
@@ -4959,7 +5091,11 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
__ibmvnic_delayed_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ mutex_init(&adapter->fw_lock);
init_completion(&adapter->init_done);
+ init_completion(&adapter->fw_done);
+ init_completion(&adapter->reset_done);
+ init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting);
do {
@@ -5017,6 +5153,7 @@ ibmvnic_stats_fail:
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+ mutex_destroy(&adapter->fw_lock);
free_netdev(netdev);
return rc;
@@ -5041,6 +5178,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
adapter->state = VNIC_REMOVED;
rtnl_unlock();
+ mutex_destroy(&adapter->fw_lock);
device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index ebc39248b334..60eccaf91b12 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1026,6 +1026,8 @@ struct ibmvnic_adapter {
int init_done_rc;
struct completion fw_done;
+ /* Used for serialization of device commands */
+ struct mutex fw_lock;
int fw_done_rc;
struct completion reset_done;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index c40729b2c184..7fad2f24dcad 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -45,7 +45,6 @@
#define BAR_0 0
#define BAR_1 1
-#define BAR_5 5
#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 416da9619928..aca97b084003 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -977,7 +977,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
if (adapter->need_ioport) {
- for (i = BAR_1; i <= BAR_5; i++) {
+ for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index e85271b68410..681d44cc9784 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -42,7 +42,6 @@
#define BAR_0 0
#define BAR_1 1
-#define BAR_5 5
struct ixgb_adapter;
#include "ixgb_hw.h"
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 0940a0da16f2..3d8c051dd327 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -412,7 +412,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
}
- for (i = BAR_1; i <= BAR_5; i++) {
+ for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 784b1e26f414..6ed87534d314 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -130,42 +130,6 @@ static const char *mlx5e_netdev_kind(struct net_device *dev)
return "unknown";
}
-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
-{
- struct dst_entry *dst;
- struct neighbour *n;
-
- int ret;
-
- ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
- fl6);
- if (ret < 0)
- return ret;
-
- if (!(*out_ttl))
- *out_ttl = ip6_dst_hoplimit(dst);
-
- ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
- if (ret < 0) {
- dst_release(dst);
- return ret;
- }
-
- n = dst_neigh_lookup(dst, &fl6->daddr);
- dst_release(dst);
- if (!n)
- return -ENOMEM;
-
- *out_n = n;
- return 0;
-}
-
static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
struct mlx5e_encap_entry *e)
{
@@ -319,6 +283,43 @@ release_neigh:
return err;
}
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ u8 *out_ttl)
+{
+ struct dst_entry *dst;
+ struct neighbour *n;
+
+ int ret;
+
+ ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
+ fl6);
+ if (ret < 0)
+ return ret;
+
+ if (!(*out_ttl))
+ *out_ttl = ip6_dst_hoplimit(dst);
+
+ ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
+ if (ret < 0) {
+ dst_release(dst);
+ return ret;
+ }
+
+ n = dst_neigh_lookup(dst, &fl6->daddr);
+ dst_release(dst);
+ if (!n)
+ return -ENOMEM;
+
+ *out_n = n;
+ return 0;
+}
+
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e)
@@ -436,6 +437,7 @@ release_neigh:
neigh_release(n);
return err;
}
+#endif
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 584074bbf669..173e2c12e1c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -837,8 +837,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_qp_table(dev);
- mlx5_init_mkey_table(dev);
-
mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev);
@@ -896,7 +894,6 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
- mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
@@ -924,7 +921,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
- mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index c501bf2a0252..42cc3c7ac5b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -36,16 +36,6 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
-{
- xa_init_flags(&dev->priv.mkey_table, XA_FLAGS_LOCK_IRQ);
-}
-
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
-{
- WARN_ON(!xa_empty(&dev->priv.mkey_table));
-}
-
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
struct mlx5_async_ctx *async_ctx, u32 *in,
@@ -54,7 +44,6 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_async_work *context)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
- struct xarray *mkeys = &dev->priv.mkey_table;
u32 mkey_index;
void *mkc;
int err;
@@ -84,16 +73,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
mkey_index, key, mkey->key);
-
- err = xa_err(xa_store_irq(mkeys, mlx5_base_mkey(mkey->key), mkey,
- GFP_KERNEL));
- if (err) {
- mlx5_core_warn(dev, "failed xarray insert of mkey 0x%x, %d\n",
- mlx5_base_mkey(mkey->key), err);
- mlx5_core_destroy_mkey(dev, mkey);
- }
-
- return err;
+ return 0;
}
EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
@@ -111,12 +91,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
{
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
- struct xarray *mkeys = &dev->priv.mkey_table;
- unsigned long flags;
-
- xa_lock_irqsave(mkeys, flags);
- __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
- xa_unlock_irqrestore(mkeys, flags);
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0e96ffab3b05..2cccadc204fd 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -583,18 +583,10 @@ int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- struct ocelot_skb *oskb =
- kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
-
- if (unlikely(!oskb))
- return -ENOMEM;
-
shinfo->tx_flags |= SKBTX_IN_PROGRESS;
-
- oskb->skb = skb;
- oskb->id = ocelot_port->ts_id % 4;
-
- list_add_tail(&oskb->head, &ocelot_port->skbs);
+ /* Store timestamp ID in cb[0] of sk_buff */
+ skb->cb[0] = ocelot_port->ts_id % 4;
+ skb_queue_tail(&ocelot_port->tx_skbs, skb);
return 0;
}
return -ENODATA;
@@ -704,12 +696,11 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
int budget = OCELOT_PTP_QUEUE_SZ;
while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
- struct list_head *pos, *tmp;
- struct sk_buff *skb = NULL;
- struct ocelot_skb *entry;
struct ocelot_port *port;
struct timespec64 ts;
+ unsigned long flags;
u32 val, id, txport;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
@@ -727,21 +718,22 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Retrieve its associated skb */
port = ocelot->ports[txport];
- list_for_each_safe(pos, tmp, &port->skbs) {
- entry = list_entry(pos, struct ocelot_skb, head);
- if (entry->id != id)
- continue;
-
- skb = entry->skb;
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
- list_del(pos);
- kfree(entry);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (skb->cb[0] != id)
+ continue;
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
}
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
/* Next ts */
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
- if (unlikely(!skb))
+ if (unlikely(!skb_match))
continue;
/* Get the h/w timestamp */
@@ -750,9 +742,9 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb, &shhwtstamps);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb_match);
}
}
EXPORT_SYMBOL(ocelot_get_txtstamp);
@@ -2205,7 +2197,7 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- INIT_LIST_HEAD(&ocelot_port->skbs);
+ skb_queue_head_init(&ocelot_port->tx_skbs);
/* Basic L2 initialization */
@@ -2490,9 +2482,7 @@ EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
- struct list_head *pos, *tmp;
struct ocelot_port *port;
- struct ocelot_skb *entry;
int i;
cancel_delayed_work(&ocelot->stats_work);
@@ -2502,14 +2492,7 @@ void ocelot_deinit(struct ocelot *ocelot)
for (i = 0; i < ocelot->num_phys_ports; i++) {
port = ocelot->ports[i];
-
- list_for_each_safe(pos, tmp, &port->skbs) {
- entry = list_entry(pos, struct ocelot_skb, head);
-
- list_del(pos);
- dev_kfree_skb_any(entry->skb);
- kfree(entry);
- }
+ skb_queue_purge(&port->tx_skbs);
}
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index dbdb7c5ae8f1..39317cdfa6cf 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -596,8 +596,8 @@ enum ionic_txq_desc_opcode {
* the @encap is set, the device will
* offload the outer header checksums using
* LCO (local checksum offload) (see
- * Documentation/networking/checksum-
- * offloads.txt for more info).
+ * Documentation/networking/checksum-offloads.rst
+ * for more info).
*
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
*
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index d47a038cb8d0..38d212686123 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1542,6 +1542,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+ tp->dev->wol_enabled = wolopts ? 1 : 0;
}
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3872,7 +3873,7 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
r8168dp_hw_jumbo_enable(tp);
break;
- case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
r8168e_hw_jumbo_enable(tp);
break;
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 292045f4581f..8237dbc3e991 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -489,7 +489,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
}
/* Get the base address of device */
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
@@ -532,7 +532,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
if (priv->plat->stmmac_clk)
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
pcim_iounmap_regions(pdev, BIT(i));
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
index 386bafe74c3f..fa8604d7b797 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -34,7 +34,7 @@ static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
return ret;
}
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pcidev, i) == 0)
continue;
ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 929f3d3354e3..ecdbde539eb7 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -384,7 +384,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
- int mcast_members;
+ int mcast_members = 0;
int idx;
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
@@ -397,11 +397,13 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
mcast_members = cpsw_ale_get_port_mask(ale_entry,
ale->port_mask_bits);
mcast_members &= ~port_mask;
+ }
+
+ if (mcast_members)
cpsw_ale_set_port_mask(ale_entry, mcast_members,
ale->port_mask_bits);
- } else {
+ else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
- }
cpsw_ale_write(ale, idx, ale_entry);
return 0;
@@ -478,6 +480,10 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
members = cpsw_ale_get_vlan_member_list(ale_entry,
ale->vlan_field_bits);
members &= ~port_mask;
+ if (!members) {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ return;
+ }
untag = cpsw_ale_get_vlan_untag_force(ale_entry,
ale->vlan_field_bits);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 250bd90627a5..9caa876ce6e8 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -955,6 +955,9 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
+
+ /* Used to temporarily save the config info across hibernation */
+ struct netvsc_device_info *saved_netvsc_dev_info;
};
/* Per channel data */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 868e22e286ca..eff8fef4f775 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2424,6 +2424,61 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
+static int netvsc_suspend(struct hv_device *dev)
+{
+ struct net_device_context *ndev_ctx;
+ struct net_device *vf_netdev, *net;
+ struct netvsc_device *nvdev;
+ int ret;
+
+ net = hv_get_drvdata(dev);
+
+ ndev_ctx = netdev_priv(net);
+ cancel_delayed_work_sync(&ndev_ctx->dwork);
+
+ rtnl_lock();
+
+ nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ if (nvdev == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ if (vf_netdev)
+ netvsc_unregister_vf(vf_netdev);
+
+ /* Save the current config info */
+ ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
+
+ ret = netvsc_detach(net, nvdev);
+out:
+ rtnl_unlock();
+
+ return ret;
+}
+
+static int netvsc_resume(struct hv_device *dev)
+{
+ struct net_device *net = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device_info *device_info;
+ int ret;
+
+ rtnl_lock();
+
+ net_device_ctx = netdev_priv(net);
+ device_info = net_device_ctx->saved_netvsc_dev_info;
+
+ ret = netvsc_attach(net, device_info);
+
+ rtnl_unlock();
+
+ kfree(device_info);
+ net_device_ctx->saved_netvsc_dev_info = NULL;
+
+ return ret;
+}
static const struct hv_vmbus_device_id id_table[] = {
/* Network guid */
{ HV_NIC_GUID, },
@@ -2438,6 +2493,8 @@ static struct hv_driver netvsc_drv = {
.id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
+ .suspend = netvsc_suspend,
+ .resume = netvsc_resume,
.driver = {
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
diff --git a/drivers/net/phy/aquantia.h b/drivers/net/phy/aquantia.h
index 5a16caab7b2f..c684b65c642c 100644
--- a/drivers/net/phy/aquantia.h
+++ b/drivers/net/phy/aquantia.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * HWMON driver for Aquantia PHY
+/* SPDX-License-Identifier: GPL-2.0 */
+/* HWMON driver for Aquantia PHY
*
* Author: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
* Author: Andrew Lunn <andrew@lunn.ch>
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 5ecacb4e64f0..c86fb9d1240c 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Broadcom Corporation
*/
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 1c7a7c57dec3..93021904c5e4 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -151,13 +151,13 @@ static int dp83869_config_port_mirroring(struct phy_device *phydev)
struct dp83869_private *dp83869 = phydev->priv;
if (dp83869->port_mirroring == DP83869_PORT_MIRRORING_EN)
- phy_set_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
- DP83869_CFG3_PORT_MIRROR_EN);
+ return phy_set_bits_mmd(phydev, DP83869_DEVADDR,
+ DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
else
- phy_clear_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
- DP83869_CFG3_PORT_MIRROR_EN);
-
- return 0;
+ return phy_clear_bits_mmd(phydev, DP83869_DEVADDR,
+ DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
}
#ifdef CONFIG_OF_MDIO
@@ -204,7 +204,7 @@ static int dp83869_of_init(struct phy_device *phydev)
&dp83869->tx_fifo_depth))
dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
- return 0;
+ return ret;
}
#else
static int dp83869_of_init(struct phy_device *phydev)
@@ -216,7 +216,7 @@ static int dp83869_of_init(struct phy_device *phydev)
static int dp83869_configure_rgmii(struct phy_device *phydev,
struct dp83869_private *dp83869)
{
- int ret, val;
+ int ret = 0, val;
if (phy_interface_is_rgmii(phydev)) {
val = phy_read(phydev, MII_DP83869_PHYCTRL);
@@ -233,13 +233,13 @@ static int dp83869_configure_rgmii(struct phy_device *phydev,
}
if (dp83869->io_impedance >= 0)
- phy_modify_mmd(phydev, DP83869_DEVADDR,
- DP83869_IO_MUX_CFG,
- DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
- dp83869->io_impedance &
- DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
+ ret = phy_modify_mmd(phydev, DP83869_DEVADDR,
+ DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
+ dp83869->io_impedance &
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
- return 0;
+ return ret;
}
static int dp83869_configure_mode(struct phy_device *phydev,
@@ -284,9 +284,11 @@ static int dp83869_configure_mode(struct phy_device *phydev,
return ret;
break;
case DP83869_RGMII_SGMII_BRIDGE:
- phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
- DP83869_SGMII_RGMII_BRIDGE,
- DP83869_SGMII_RGMII_BRIDGE);
+ ret = phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
+ DP83869_SGMII_RGMII_BRIDGE,
+ DP83869_SGMII_RGMII_BRIDGE);
+ if (ret)
+ return ret;
ret = phy_write_mmd(phydev, DP83869_DEVADDR,
DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
@@ -334,7 +336,7 @@ static int dp83869_configure_mode(struct phy_device *phydev,
return -EINVAL;
};
- return 0;
+ return ret;
}
static int dp83869_config_init(struct phy_device *phydev)
@@ -358,12 +360,13 @@ static int dp83869_config_init(struct phy_device *phydev)
/* Clock output selection if muxing property is set */
if (dp83869->clk_output_sel != DP83869_CLK_O_SEL_REF_CLK)
- phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_IO_MUX_CFG,
- DP83869_IO_MUX_CFG_CLK_O_SEL_MASK,
- dp83869->clk_output_sel <<
- DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
+ ret = phy_modify_mmd(phydev,
+ DP83869_DEVADDR, DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_CLK_O_SEL_MASK,
+ dp83869->clk_output_sel <<
+ DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
- return 0;
+ return ret;
}
static int dp83869_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h
index b7f89ad27465..e33d3ea9a907 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/phy/mdio-cavium.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2009-2016 Cavium, Inc.
*/
diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h
index 751dab281f57..b1d27f7cd23f 100644
--- a/drivers/net/phy/mdio-i2c.h
+++ b/drivers/net/phy/mdio-i2c.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MDIO I2C bridge
*
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index b1f5ccb4ad9c..8af93ada8b64 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/* Applied Micro X-Gene SoC MDIO Driver
*
* Copyright (c) 2016, Applied Micro Circuits Corporation
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 677c45985338..476db5345e1a 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -440,6 +440,15 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_MODEL(0x001cc880),
+ .name = "RTL8208 Fast Ethernet",
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc910),
.name = "RTL8211 Gigabit Ethernet",
.config_aneg = rtl8211_config_aneg,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 61824bbb5588..0cb1c2d0a8bc 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -270,7 +270,7 @@ static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
#endif /* CONFIG_PPP_MULTILINK */
-static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
+static int ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data);
static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
@@ -554,36 +554,66 @@ static __poll_t ppp_poll(struct file *file, poll_table *wait)
}
#ifdef CONFIG_PPP_FILTER
-static int get_filter(void __user *arg, struct sock_filter **p)
+static struct bpf_prog *get_filter(struct sock_fprog *uprog)
+{
+ struct sock_fprog_kern fprog;
+ struct bpf_prog *res = NULL;
+ int err;
+
+ if (!uprog->len)
+ return NULL;
+
+ /* uprog->len is unsigned short, so no overflow here */
+ fprog.len = uprog->len * sizeof(struct sock_filter);
+ fprog.filter = memdup_user(uprog->filter, fprog.len);
+ if (IS_ERR(fprog.filter))
+ return ERR_CAST(fprog.filter);
+
+ err = bpf_prog_create(&res, &fprog);
+ kfree(fprog.filter);
+
+ return err ? ERR_PTR(err) : res;
+}
+
+static struct bpf_prog *ppp_get_filter(struct sock_fprog __user *p)
{
struct sock_fprog uprog;
- struct sock_filter *code = NULL;
- int len;
- if (copy_from_user(&uprog, arg, sizeof(uprog)))
- return -EFAULT;
+ if (copy_from_user(&uprog, p, sizeof(struct sock_fprog)))
+ return ERR_PTR(-EFAULT);
+ return get_filter(&uprog);
+}
- if (!uprog.len) {
- *p = NULL;
- return 0;
- }
+#ifdef CONFIG_COMPAT
+struct sock_fprog32 {
+ unsigned short len;
+ compat_caddr_t filter;
+};
- len = uprog.len * sizeof(struct sock_filter);
- code = memdup_user(uprog.filter, len);
- if (IS_ERR(code))
- return PTR_ERR(code);
+#define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32)
+#define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32)
+
+static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p)
+{
+ struct sock_fprog32 uprog32;
+ struct sock_fprog uprog;
- *p = code;
- return uprog.len;
+ if (copy_from_user(&uprog32, p, sizeof(struct sock_fprog32)))
+ return ERR_PTR(-EFAULT);
+ uprog.len = uprog32.len;
+ uprog.filter = compat_ptr(uprog32.filter);
+ return get_filter(&uprog);
}
-#endif /* CONFIG_PPP_FILTER */
+#endif
+#endif
static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ppp_file *pf;
struct ppp *ppp;
int err = -EFAULT, val, val2, i;
- struct ppp_idle idle;
+ struct ppp_idle32 idle32;
+ struct ppp_idle64 idle64;
struct npioctl npi;
int unit, cflags;
struct slcompress *vj;
@@ -679,9 +709,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case PPPIOCSCOMPRESS:
- err = ppp_set_compress(ppp, arg);
+ {
+ struct ppp_option_data data;
+ if (copy_from_user(&data, argp, sizeof(data)))
+ err = -EFAULT;
+ else
+ err = ppp_set_compress(ppp, &data);
break;
-
+ }
case PPPIOCGUNIT:
if (put_user(ppp->file.index, p))
break;
@@ -701,10 +736,18 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = 0;
break;
- case PPPIOCGIDLE:
- idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
- idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
- if (copy_to_user(argp, &idle, sizeof(idle)))
+ case PPPIOCGIDLE32:
+ idle32.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
+ idle32.recv_idle = (jiffies - ppp->last_recv) / HZ;
+ if (copy_to_user(argp, &idle32, sizeof(idle32)))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGIDLE64:
+ idle64.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
+ idle64.recv_idle = (jiffies - ppp->last_recv) / HZ;
+ if (copy_to_user(argp, &idle64, sizeof(idle64)))
break;
err = 0;
break;
@@ -753,55 +796,25 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_PPP_FILTER
case PPPIOCSPASS:
- {
- struct sock_filter *code;
-
- err = get_filter(argp, &code);
- if (err >= 0) {
- struct bpf_prog *pass_filter = NULL;
- struct sock_fprog_kern fprog = {
- .len = err,
- .filter = code,
- };
-
- err = 0;
- if (fprog.filter)
- err = bpf_prog_create(&pass_filter, &fprog);
- if (!err) {
- ppp_lock(ppp);
- if (ppp->pass_filter)
- bpf_prog_destroy(ppp->pass_filter);
- ppp->pass_filter = pass_filter;
- ppp_unlock(ppp);
- }
- kfree(code);
- }
- break;
- }
case PPPIOCSACTIVE:
{
- struct sock_filter *code;
-
- err = get_filter(argp, &code);
- if (err >= 0) {
- struct bpf_prog *active_filter = NULL;
- struct sock_fprog_kern fprog = {
- .len = err,
- .filter = code,
- };
+ struct bpf_prog *filter = ppp_get_filter(argp);
+ struct bpf_prog **which;
- err = 0;
- if (fprog.filter)
- err = bpf_prog_create(&active_filter, &fprog);
- if (!err) {
- ppp_lock(ppp);
- if (ppp->active_filter)
- bpf_prog_destroy(ppp->active_filter);
- ppp->active_filter = active_filter;
- ppp_unlock(ppp);
- }
- kfree(code);
+ if (IS_ERR(filter)) {
+ err = PTR_ERR(filter);
+ break;
}
+ if (cmd == PPPIOCSPASS)
+ which = &ppp->pass_filter;
+ else
+ which = &ppp->active_filter;
+ ppp_lock(ppp);
+ if (*which)
+ bpf_prog_destroy(*which);
+ *which = filter;
+ ppp_unlock(ppp);
+ err = 0;
break;
}
#endif /* CONFIG_PPP_FILTER */
@@ -827,6 +840,77 @@ out:
return err;
}
+#ifdef CONFIG_COMPAT
+struct ppp_option_data32 {
+ compat_uptr_t ptr;
+ u32 length;
+ compat_int_t transmit;
+};
+#define PPPIOCSCOMPRESS32 _IOW('t', 77, struct ppp_option_data32)
+
+static long ppp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ppp_file *pf;
+ int err = -ENOIOCTLCMD;
+ void __user *argp = (void __user *)arg;
+
+ mutex_lock(&ppp_mutex);
+
+ pf = file->private_data;
+ if (pf && pf->kind == INTERFACE) {
+ struct ppp *ppp = PF_TO_PPP(pf);
+ switch (cmd) {
+#ifdef CONFIG_PPP_FILTER
+ case PPPIOCSPASS32:
+ case PPPIOCSACTIVE32:
+ {
+ struct bpf_prog *filter = compat_ppp_get_filter(argp);
+ struct bpf_prog **which;
+
+ if (IS_ERR(filter)) {
+ err = PTR_ERR(filter);
+ break;
+ }
+ if (cmd == PPPIOCSPASS32)
+ which = &ppp->pass_filter;
+ else
+ which = &ppp->active_filter;
+ ppp_lock(ppp);
+ if (*which)
+ bpf_prog_destroy(*which);
+ *which = filter;
+ ppp_unlock(ppp);
+ err = 0;
+ break;
+ }
+#endif /* CONFIG_PPP_FILTER */
+ case PPPIOCSCOMPRESS32:
+ {
+ struct ppp_option_data32 data32;
+ if (copy_from_user(&data32, argp, sizeof(data32))) {
+ err = -EFAULT;
+ } else {
+ struct ppp_option_data data = {
+ .ptr = compat_ptr(data32.ptr),
+ .length = data32.length,
+ .transmit = data32.transmit
+ };
+ err = ppp_set_compress(ppp, &data);
+ }
+ break;
+ }
+ }
+ }
+ mutex_unlock(&ppp_mutex);
+
+ /* all other commands have compatible arguments */
+ if (err == -ENOIOCTLCMD)
+ err = ppp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+
+ return err;
+}
+#endif
+
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg)
{
@@ -895,6 +979,9 @@ static const struct file_operations ppp_device_fops = {
.write = ppp_write,
.poll = ppp_poll,
.unlocked_ioctl = ppp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ppp_compat_ioctl,
+#endif
.open = ppp_open,
.release = ppp_release,
.llseek = noop_llseek,
@@ -2734,24 +2821,20 @@ ppp_output_wakeup(struct ppp_channel *chan)
/* Process the PPPIOCSCOMPRESS ioctl. */
static int
-ppp_set_compress(struct ppp *ppp, unsigned long arg)
+ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data)
{
- int err;
+ int err = -EFAULT;
struct compressor *cp, *ocomp;
- struct ppp_option_data data;
void *state, *ostate;
unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
- err = -EFAULT;
- if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
- goto out;
- if (data.length > CCP_MAX_OPTION_LENGTH)
+ if (data->length > CCP_MAX_OPTION_LENGTH)
goto out;
- if (copy_from_user(ccp_option, (void __user *) data.ptr, data.length))
+ if (copy_from_user(ccp_option, data->ptr, data->length))
goto out;
err = -EINVAL;
- if (data.length < 2 || ccp_option[1] < 2 || ccp_option[1] > data.length)
+ if (data->length < 2 || ccp_option[1] < 2 || ccp_option[1] > data->length)
goto out;
cp = try_then_request_module(
@@ -2761,8 +2844,8 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
goto out;
err = -ENOBUFS;
- if (data.transmit) {
- state = cp->comp_alloc(ccp_option, data.length);
+ if (data->transmit) {
+ state = cp->comp_alloc(ccp_option, data->length);
if (state) {
ppp_xmit_lock(ppp);
ppp->xstate &= ~SC_COMP_RUN;
@@ -2780,7 +2863,7 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
module_put(cp->owner);
} else {
- state = cp->decomp_alloc(ccp_option, data.length);
+ state = cp->decomp_alloc(ccp_option, data->length);
if (state) {
ppp_recv_lock(ppp);
ppp->rstate &= ~SC_DECOMP_RUN;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 3ae70c7e6860..a6d63665ad03 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1123,14 +1123,6 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
}
}
-#ifdef CONFIG_COMPAT
-static long tap_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations tap_fops = {
.owner = THIS_MODULE,
.open = tap_open,
@@ -1140,9 +1132,7 @@ static const struct file_operations tap_fops = {
.poll = tap_poll,
.llseek = no_llseek,
.unlocked_ioctl = tap_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = tap_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
diff --git a/drivers/net/usb/aqc111.h b/drivers/net/usb/aqc111.h
index 4d68b3a6067c..b562db4da337 100644
--- a/drivers/net/usb/aqc111.h
+++ b/drivers/net/usb/aqc111.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later
- * Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (C) 2002-2003 TiVo Inc.
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 74849da031fa..ca827802f291 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1214,8 +1214,9 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
* This needs to be a tasklet otherwise we will
* end up recursively calling this function.
*/
-static void hso_unthrottle_tasklet(struct hso_serial *serial)
+static void hso_unthrottle_tasklet(unsigned long data)
{
+ struct hso_serial *serial = (struct hso_serial *)data;
unsigned long flags;
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1265,7 +1266,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
/* Force default termio settings */
_hso_serial_set_termios(tty, NULL);
tasklet_init(&serial->unthrottle_tasklet,
- (void (*)(unsigned long))hso_unthrottle_tasklet,
+ hso_unthrottle_tasklet,
(unsigned long)serial);
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
if (result) {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index dde05e2fdc3e..30e511c2c8d0 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1573,6 +1573,13 @@ static void usbnet_bh (struct timer_list *t)
}
}
+static void usbnet_bh_tasklet(unsigned long data)
+{
+ struct timer_list *t = (struct timer_list *)data;
+
+ usbnet_bh(t);
+}
+
/*-------------------------------------------------------------------------
*
@@ -1700,7 +1707,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- dev->bh.func = (void (*)(unsigned long))usbnet_bh;
+ dev->bh.func = usbnet_bh_tasklet;
dev->bh.data = (unsigned long)&dev->delay;
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 32ae710d4f40..1081d171e477 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -421,8 +421,6 @@ extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
* Asynchronous Interfacing
*/
-#define SERIAL_MAGIC 0x5301
-
/*
* The size of the serial xmit buffer is 1 page, or 4096 bytes
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index a4d325fcf94a..452da44a21e0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1421,6 +1421,7 @@ out_err:
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct napi_struct *napi;
struct iwl_rxq *rxq;
u32 r, i, count = 0;
bool emergency = false;
@@ -1526,8 +1527,16 @@ out:
if (unlikely(emergency && count))
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
- if (rxq->napi.poll)
- napi_gro_flush(&rxq->napi, false);
+ napi = &rxq->napi;
+ if (napi->poll) {
+ if (napi->rx_count) {
+ netif_receive_skb_list(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+ }
+
+ napi_gro_flush(napi, false);
+ }
iwl_pcie_rxq_restock(trans, rxq);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index a9657ae6d782..d14e55e3c9da 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -631,6 +631,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
+ adapter->is_up = true;
goto done;
err_add_intf:
@@ -1469,6 +1470,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
mwifiex_deauthenticate(priv, NULL);
mwifiex_uninit_sw(adapter);
+ adapter->is_up = false;
if (adapter->if_ops.down_dev)
adapter->if_ops.down_dev(adapter);
@@ -1730,7 +1732,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
if (!adapter)
return 0;
- mwifiex_uninit_sw(adapter);
+ if (adapter->is_up)
+ mwifiex_uninit_sw(adapter);
if (adapter->irq_wakeup >= 0)
device_init_wakeup(adapter->dev, false);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 095837fba300..547ff3c578ee 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1017,6 +1017,7 @@ struct mwifiex_adapter {
/* For synchronizing FW initialization with device lifecycle. */
struct completion *fw_done;
+ bool is_up;
bool ext_scan;
u8 fw_api_ver;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 24c041dad9f6..fec38b6e86ff 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -444,6 +444,9 @@ static int mwifiex_sdio_suspend(struct device *dev)
return 0;
}
+ if (!adapter->is_up)
+ return -EBUSY;
+
mwifiex_enable_wake(adapter);
/* Enable the Host Sleep */
@@ -2220,22 +2223,30 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
struct sdio_func *func = card->func;
int ret;
+ /* Prepare the adapter for the reset. */
mwifiex_shutdown_sw(adapter);
+ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
- /* power cycle the adapter */
+ /* Run a HW reset of the SDIO interface. */
sdio_claim_host(func);
- mmc_hw_reset(func->card->host);
+ ret = mmc_hw_reset(func->card->host);
sdio_release_host(func);
- /* Previous save_adapter won't be valid after this. We will cancel
- * pending work requests.
- */
- clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
- clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
-
- ret = mwifiex_reinit_sw(adapter);
- if (ret)
- dev_err(&func->dev, "reinit failed: %d\n", ret);
+ switch (ret) {
+ case 1:
+ dev_dbg(&func->dev, "SDIO HW reset asynchronous\n");
+ complete_all(adapter->fw_done);
+ break;
+ case 0:
+ ret = mwifiex_reinit_sw(adapter);
+ if (ret)
+ dev_err(&func->dev, "reinit failed: %d\n", ret);
+ break;
+ default:
+ dev_err(&func->dev, "SDIO HW reset failed: %d\n", ret);
+ break;
+ }
}
/* This function read/write firmware */
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 677f1146ccf0..94569cd695c8 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -16,17 +16,12 @@
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include "wl1251.h"
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x104c
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1251
-#define SDIO_DEVICE_ID_TI_WL1251 0x9066
-#endif
-
struct wl1251_sdio {
struct sdio_func *func;
u32 elp_val;
@@ -49,7 +44,7 @@ static void wl1251_sdio_interrupt(struct sdio_func *func)
}
static const struct sdio_device_id wl1251_devices[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1251) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1251_devices);
@@ -217,6 +212,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
struct ieee80211_hw *hw;
struct wl1251_sdio *wl_sdio;
const struct wl1251_platform_data *wl1251_board_data;
+ struct device_node *np = func->dev.of_node;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
@@ -248,6 +244,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl->power_gpio = wl1251_board_data->power_gpio;
wl->irq = wl1251_board_data->irq;
wl->use_eeprom = wl1251_board_data->use_eeprom;
+ } else if (np) {
+ wl->use_eeprom = of_property_read_bool(np,
+ "ti,wl1251-has-eeprom");
+ wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+ wl->irq = of_irq_get(np, 0);
+
+ if (wl->power_gpio == -EPROBE_DEFER ||
+ wl->irq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto disable;
+ }
}
if (gpio_is_valid(wl->power_gpio)) {
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 7afaf35f2453..9fd8cf2d270c 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -26,14 +26,6 @@
#include "wl12xx_80211.h"
#include "io.h"
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
static bool dump = false;
struct wl12xx_sdio_glue {
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 65865e460ab8..04dd46647db3 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -354,13 +354,10 @@ static void pp_clear_ctx(struct pp_ctx *pp)
static void pp_setup_dbgfs(struct pp_ctx *pp)
{
struct pci_dev *pdev = pp->ntb->pdev;
- void *ret;
pp->dbgfs_dir = debugfs_create_dir(pci_name(pdev), pp_dbgfs_topdir);
- ret = debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count);
- if (!ret)
- dev_warn(&pp->ntb->dev, "DebugFS unsupported\n");
+ debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count);
}
static void pp_clear_dbgfs(struct pp_ctx *pp)
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 3e9f45aec8d1..0d04ea3d9fd7 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1261,11 +1261,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
ret = btt_data_read(arena, page, off, postmap, cur_len);
if (ret) {
- int rc;
-
/* Media error - set the e_flag */
- rc = btt_map_write(arena, premap, postmap, 0, 1,
- NVDIMM_IO_ATOMIC);
+ if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
+ dev_warn_ratelimited(to_dev(arena),
+ "Error persistently tracking bad blocks at %#x\n",
+ premap);
goto out_rtt;
}
@@ -1674,7 +1674,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
struct nd_region *nd_region;
struct btt_sb *btt_sb;
struct btt *btt;
- size_t rawsize;
+ size_t size, rawsize;
+ int rc;
if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
@@ -1685,6 +1686,11 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
if (!btt_sb)
return -ENOMEM;
+ size = nvdimm_namespace_capacity(ndns);
+ rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
+ if (rc)
+ return rc;
+
/*
* If this returns < 0, that is ok as it just means there wasn't
* an existing BTT, and we're creating a new one. We still need to
@@ -1693,7 +1699,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
*/
nd_btt_version(nd_btt, ndns, btt_sb);
- rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
+ rawsize = size - nd_btt->initial_offset;
if (rawsize < ARENA_MIN_SIZE) {
dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
dev_name(&ndns->dev),
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 3508a79110c7..05feb97e11ce 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -25,17 +25,6 @@ static void nd_btt_release(struct device *dev)
kfree(nd_btt);
}
-static struct device_type nd_btt_device_type = {
- .name = "nd_btt",
- .release = nd_btt_release,
-};
-
-bool is_nd_btt(struct device *dev)
-{
- return dev->type == &nd_btt_device_type;
-}
-EXPORT_SYMBOL(is_nd_btt);
-
struct nd_btt *to_nd_btt(struct device *dev)
{
struct nd_btt *nd_btt = container_of(dev, struct nd_btt, dev);
@@ -178,6 +167,18 @@ static const struct attribute_group *nd_btt_attribute_groups[] = {
NULL,
};
+static const struct device_type nd_btt_device_type = {
+ .name = "nd_btt",
+ .release = nd_btt_release,
+ .groups = nd_btt_attribute_groups,
+};
+
+bool is_nd_btt(struct device *dev)
+{
+ return dev->type == &nd_btt_device_type;
+}
+EXPORT_SYMBOL(is_nd_btt);
+
static struct device *__nd_btt_create(struct nd_region *nd_region,
unsigned long lbasize, u8 *uuid,
struct nd_namespace_common *ndns)
@@ -204,7 +205,6 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
dev->parent = &nd_region->dev;
dev->type = &nd_btt_device_type;
- dev->groups = nd_btt_attribute_groups;
device_initialize(&nd_btt->dev);
if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index d47412dcdf38..a8b515968569 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -300,9 +300,14 @@ static void nvdimm_bus_release(struct device *dev)
kfree(nvdimm_bus);
}
+static const struct device_type nvdimm_bus_dev_type = {
+ .release = nvdimm_bus_release,
+ .groups = nvdimm_bus_attribute_groups,
+};
+
bool is_nvdimm_bus(struct device *dev)
{
- return dev->release == nvdimm_bus_release;
+ return dev->type == &nvdimm_bus_dev_type;
}
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
@@ -355,7 +360,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
badrange_init(&nvdimm_bus->badrange);
nvdimm_bus->nd_desc = nd_desc;
nvdimm_bus->dev.parent = parent;
- nvdimm_bus->dev.release = nvdimm_bus_release;
+ nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
nvdimm_bus->dev.groups = nd_desc->attr_groups;
nvdimm_bus->dev.bus = &nvdimm_bus_type;
nvdimm_bus->dev.of_node = nd_desc->of_node;
@@ -669,10 +674,9 @@ static struct attribute *nd_device_attributes[] = {
/*
* nd_device_attribute_group - generic attributes for all devices on an nd bus
*/
-struct attribute_group nd_device_attribute_group = {
+const struct attribute_group nd_device_attribute_group = {
.attrs = nd_device_attributes,
};
-EXPORT_SYMBOL_GPL(nd_device_attribute_group);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -681,28 +685,56 @@ static ssize_t numa_node_show(struct device *dev,
}
static DEVICE_ATTR_RO(numa_node);
+static int nvdimm_dev_to_target_node(struct device *dev)
+{
+ struct device *parent = dev->parent;
+ struct nd_region *nd_region = NULL;
+
+ if (is_nd_region(dev))
+ nd_region = to_nd_region(dev);
+ else if (parent && is_nd_region(parent))
+ nd_region = to_nd_region(parent);
+
+ if (!nd_region)
+ return NUMA_NO_NODE;
+ return nd_region->target_node;
+}
+
+static ssize_t target_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", nvdimm_dev_to_target_node(dev));
+}
+static DEVICE_ATTR_RO(target_node);
+
static struct attribute *nd_numa_attributes[] = {
&dev_attr_numa_node.attr,
+ &dev_attr_target_node.attr,
NULL,
};
static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
int n)
{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+
if (!IS_ENABLED(CONFIG_NUMA))
return 0;
+ if (a == &dev_attr_target_node.attr &&
+ nvdimm_dev_to_target_node(dev) == NUMA_NO_NODE)
+ return 0;
+
return a->mode;
}
/*
* nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
*/
-struct attribute_group nd_numa_attribute_group = {
+const struct attribute_group nd_numa_attribute_group = {
.attrs = nd_numa_attributes,
.is_visible = nd_numa_attr_visible,
};
-EXPORT_SYMBOL_GPL(nd_numa_attribute_group);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
{
@@ -1227,7 +1259,7 @@ static const struct file_operations nvdimm_bus_fops = {
.owner = THIS_MODULE,
.open = nd_open,
.unlocked_ioctl = bus_ioctl,
- .compat_ioctl = bus_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
@@ -1235,7 +1267,7 @@ static const struct file_operations nvdimm_fops = {
.owner = THIS_MODULE,
.open = nd_open,
.unlocked_ioctl = dimm_ioctl,
- .compat_ioctl = dimm_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 2985ca949912..45964acba944 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -300,13 +300,14 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
return rc;
}
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
+ resource_size_t size)
{
struct resource *res = &nsio->res;
struct nd_namespace_common *ndns = &nsio->common;
- nsio->size = resource_size(res);
- if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ nsio->size = size;
+ if (!devm_request_mem_region(dev, res->start, size,
dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
@@ -318,12 +319,10 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
&nsio->res);
- nsio->addr = devm_memremap(dev, res->start, resource_size(res),
- ARCH_MEMREMAP_PMEM);
+ nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
return PTR_ERR_OR_ZERO(nsio->addr);
}
-EXPORT_SYMBOL_GPL(devm_nsio_enable);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
{
@@ -331,6 +330,5 @@ void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
devm_memunmap(dev, nsio->addr);
devm_exit_badblocks(dev, &nsio->bb);
- devm_release_mem_region(dev, res->start, resource_size(res));
+ devm_release_mem_region(dev, res->start, nsio->size);
}
-EXPORT_SYMBOL_GPL(devm_nsio_disable);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index e592c4964674..fe9bd6febdd2 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -385,10 +385,14 @@ static struct attribute *nvdimm_bus_attributes[] = {
NULL,
};
-struct attribute_group nvdimm_bus_attribute_group = {
+static const struct attribute_group nvdimm_bus_attribute_group = {
.attrs = nvdimm_bus_attributes,
};
-EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
+
+const struct attribute_group *nvdimm_bus_attribute_groups[] = {
+ &nvdimm_bus_attribute_group,
+ NULL,
+};
int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 6d22b0f83b3b..99965077bac4 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -23,17 +23,6 @@ static void nd_dax_release(struct device *dev)
kfree(nd_dax);
}
-static struct device_type nd_dax_device_type = {
- .name = "nd_dax",
- .release = nd_dax_release,
-};
-
-bool is_nd_dax(struct device *dev)
-{
- return dev ? dev->type == &nd_dax_device_type : false;
-}
-EXPORT_SYMBOL(is_nd_dax);
-
struct nd_dax *to_nd_dax(struct device *dev)
{
struct nd_dax *nd_dax = container_of(dev, struct nd_dax, nd_pfn.dev);
@@ -43,13 +32,18 @@ struct nd_dax *to_nd_dax(struct device *dev)
}
EXPORT_SYMBOL(to_nd_dax);
-static const struct attribute_group *nd_dax_attribute_groups[] = {
- &nd_pfn_attribute_group,
- &nd_device_attribute_group,
- &nd_numa_attribute_group,
- NULL,
+static const struct device_type nd_dax_device_type = {
+ .name = "nd_dax",
+ .release = nd_dax_release,
+ .groups = nd_pfn_attribute_groups,
};
+bool is_nd_dax(struct device *dev)
+{
+ return dev ? dev->type == &nd_dax_device_type : false;
+}
+EXPORT_SYMBOL(is_nd_dax);
+
static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
{
struct nd_pfn *nd_pfn;
@@ -69,7 +63,6 @@ static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
dev = &nd_pfn->dev;
dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
- dev->groups = nd_dax_attribute_groups;
dev->type = &nd_dax_device_type;
dev->parent = &nd_region->dev;
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 196aa44c4936..94ea6dba6b4f 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -202,16 +202,6 @@ static void nvdimm_release(struct device *dev)
kfree(nvdimm);
}
-static struct device_type nvdimm_device_type = {
- .name = "nvdimm",
- .release = nvdimm_release,
-};
-
-bool is_nvdimm(struct device *dev)
-{
- return dev->type == &nvdimm_device_type;
-}
-
struct nvdimm *to_nvdimm(struct device *dev)
{
struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
@@ -450,11 +440,27 @@ static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
return 0;
}
-struct attribute_group nvdimm_attribute_group = {
+static const struct attribute_group nvdimm_attribute_group = {
.attrs = nvdimm_attributes,
.is_visible = nvdimm_visible,
};
-EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
+
+static const struct attribute_group *nvdimm_attribute_groups[] = {
+ &nd_device_attribute_group,
+ &nvdimm_attribute_group,
+ NULL,
+};
+
+static const struct device_type nvdimm_device_type = {
+ .name = "nvdimm",
+ .release = nvdimm_release,
+ .groups = nvdimm_attribute_groups,
+};
+
+bool is_nvdimm(struct device *dev)
+{
+ return dev->type == &nvdimm_device_type;
+}
struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 87f72f725e4f..e02f60ad6c99 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -8,17 +8,6 @@
#include <linux/libnvdimm.h>
#include <linux/module.h>
-static const struct attribute_group *e820_pmem_attribute_groups[] = {
- &nvdimm_bus_attribute_group,
- NULL,
-};
-
-static const struct attribute_group *e820_pmem_region_attribute_groups[] = {
- &nd_region_attribute_group,
- &nd_device_attribute_group,
- NULL,
-};
-
static int e820_pmem_remove(struct platform_device *pdev)
{
struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
@@ -46,7 +35,6 @@ static int e820_register_one(struct resource *res, void *data)
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.res = res;
- ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
ndr_desc.numa_node = e820_range_to_nid(res->start);
ndr_desc.target_node = ndr_desc.numa_node;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
@@ -62,7 +50,6 @@ static int e820_pmem_probe(struct platform_device *pdev)
struct nvdimm_bus *nvdimm_bus;
int rc = -ENXIO;
- nd_desc.attr_groups = e820_pmem_attribute_groups;
nd_desc.provider_name = "e820";
nd_desc.module = THIS_MODULE;
nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index cca0a3ba1d2c..032dc61725ff 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -44,35 +44,9 @@ static void namespace_blk_release(struct device *dev)
kfree(nsblk);
}
-static const struct device_type namespace_io_device_type = {
- .name = "nd_namespace_io",
- .release = namespace_io_release,
-};
-
-static const struct device_type namespace_pmem_device_type = {
- .name = "nd_namespace_pmem",
- .release = namespace_pmem_release,
-};
-
-static const struct device_type namespace_blk_device_type = {
- .name = "nd_namespace_blk",
- .release = namespace_blk_release,
-};
-
-static bool is_namespace_pmem(const struct device *dev)
-{
- return dev ? dev->type == &namespace_pmem_device_type : false;
-}
-
-static bool is_namespace_blk(const struct device *dev)
-{
- return dev ? dev->type == &namespace_blk_device_type : false;
-}
-
-static bool is_namespace_io(const struct device *dev)
-{
- return dev ? dev->type == &namespace_io_device_type : false;
-}
+static bool is_namespace_pmem(const struct device *dev);
+static bool is_namespace_blk(const struct device *dev);
+static bool is_namespace_io(const struct device *dev);
static int is_uuid_busy(struct device *dev, void *data)
{
@@ -1329,7 +1303,7 @@ static ssize_t resource_show(struct device *dev,
return -ENXIO;
return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
}
-static DEVICE_ATTR_RO(resource);
+static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
4096, 4104, 4160, 4224, 0 };
@@ -1510,16 +1484,20 @@ static ssize_t holder_show(struct device *dev,
}
static DEVICE_ATTR_RO(holder);
-static ssize_t __holder_class_store(struct device *dev, const char *buf)
+static int __holder_class_store(struct device *dev, const char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
if (dev->driver || ndns->claim)
return -EBUSY;
- if (sysfs_streq(buf, "btt"))
- ndns->claim_class = btt_claim_class(dev);
- else if (sysfs_streq(buf, "pfn"))
+ if (sysfs_streq(buf, "btt")) {
+ int rc = btt_claim_class(dev);
+
+ if (rc < NVDIMM_CCLASS_NONE)
+ return rc;
+ ndns->claim_class = rc;
+ } else if (sysfs_streq(buf, "pfn"))
ndns->claim_class = NVDIMM_CCLASS_PFN;
else if (sysfs_streq(buf, "dax"))
ndns->claim_class = NVDIMM_CCLASS_DAX;
@@ -1528,10 +1506,6 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
else
return -EINVAL;
- /* btt_claim_class() could've returned an error */
- if (ndns->claim_class < 0)
- return ndns->claim_class;
-
return 0;
}
@@ -1539,7 +1513,7 @@ static ssize_t holder_class_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
- ssize_t rc;
+ int rc;
nd_device_lock(dev);
nvdimm_bus_lock(dev);
@@ -1547,7 +1521,7 @@ static ssize_t holder_class_store(struct device *dev,
rc = __holder_class_store(dev, buf);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
- dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
+ dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -1645,11 +1619,8 @@ static umode_t namespace_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
- if (a == &dev_attr_resource.attr) {
- if (is_namespace_blk(dev))
- return 0;
- return 0400;
- }
+ if (a == &dev_attr_resource.attr && is_namespace_blk(dev))
+ return 0;
if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
if (a == &dev_attr_size.attr)
@@ -1680,6 +1651,39 @@ static const struct attribute_group *nd_namespace_attribute_groups[] = {
NULL,
};
+static const struct device_type namespace_io_device_type = {
+ .name = "nd_namespace_io",
+ .release = namespace_io_release,
+ .groups = nd_namespace_attribute_groups,
+};
+
+static const struct device_type namespace_pmem_device_type = {
+ .name = "nd_namespace_pmem",
+ .release = namespace_pmem_release,
+ .groups = nd_namespace_attribute_groups,
+};
+
+static const struct device_type namespace_blk_device_type = {
+ .name = "nd_namespace_blk",
+ .release = namespace_blk_release,
+ .groups = nd_namespace_attribute_groups,
+};
+
+static bool is_namespace_pmem(const struct device *dev)
+{
+ return dev ? dev->type == &namespace_pmem_device_type : false;
+}
+
+static bool is_namespace_blk(const struct device *dev)
+{
+ return dev ? dev->type == &namespace_blk_device_type : false;
+}
+
+static bool is_namespace_io(const struct device *dev)
+{
+ return dev ? dev->type == &namespace_io_device_type : false;
+}
+
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
{
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
@@ -1759,6 +1763,23 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
}
EXPORT_SYMBOL(nvdimm_namespace_common_probe);
+int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
+ resource_size_t size)
+{
+ if (is_namespace_blk(&ndns->dev))
+ return 0;
+ return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
+}
+EXPORT_SYMBOL_GPL(devm_namespace_enable);
+
+void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
+{
+ if (is_namespace_blk(&ndns->dev))
+ return;
+ devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
+}
+EXPORT_SYMBOL_GPL(devm_namespace_disable);
+
static struct device **create_namespace_io(struct nd_region *nd_region)
{
struct nd_namespace_io *nsio;
@@ -2078,7 +2099,6 @@ static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
dev->parent = &nd_region->dev;
- dev->groups = nd_namespace_attribute_groups;
return &nsblk->common.dev;
}
@@ -2109,7 +2129,6 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
return NULL;
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
- dev->groups = nd_namespace_attribute_groups;
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
return dev;
@@ -2608,7 +2627,6 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
if (id < 0)
break;
dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
- dev->groups = nd_namespace_attribute_groups;
nd_device_register(dev);
}
if (i)
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index aa059439fca0..ddb9d97d9129 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -123,11 +123,7 @@ void nd_region_create_dax_seed(struct nd_region *nd_region);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
void nd_synchronize(void);
-int nvdimm_bus_register_dimms(struct nvdimm_bus *nvdimm_bus);
-int nvdimm_bus_register_regions(struct nvdimm_bus *nvdimm_bus);
-int nvdimm_bus_init_interleave_sets(struct nvdimm_bus *nvdimm_bus);
void __nd_device_register(struct device *dev);
-int nd_match_dimm(struct device *dev, void *data);
struct nd_label_id;
char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags);
bool nd_is_uuid_unique(struct device *dev, u8 *uuid);
@@ -170,6 +166,23 @@ ssize_t nd_namespace_store(struct device *dev,
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
bool is_nvdimm_bus(struct device *dev);
+#if IS_ENABLED(CONFIG_ND_CLAIM)
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
+ resource_size_t size);
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
+#else
+static inline int devm_nsio_enable(struct device *dev,
+ struct nd_namespace_io *nsio, resource_size_t size)
+{
+ return -ENXIO;
+}
+
+static inline void devm_nsio_disable(struct device *dev,
+ struct nd_namespace_io *nsio)
+{
+}
+#endif
+
#ifdef CONFIG_PROVE_LOCKING
extern struct class *nd_class;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index ee5c04070ef9..c9f6a5b5253a 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -212,6 +212,11 @@ struct nd_dax {
struct nd_pfn nd_pfn;
};
+static inline u32 nd_info_block_reserve(void)
+{
+ return ALIGN(SZ_8K, PAGE_SIZE);
+}
+
enum nd_async_mode {
ND_SYNC,
ND_ASYNC,
@@ -234,6 +239,9 @@ int __init nd_label_init(void);
void nvdimm_exit(void);
void nd_region_exit(void);
struct nvdimm;
+extern const struct attribute_group nd_device_attribute_group;
+extern const struct attribute_group nd_numa_attribute_group;
+extern const struct attribute_group *nvdimm_bus_attribute_groups[];
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
int nvdimm_check_config_data(struct device *dev);
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
@@ -297,7 +305,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region);
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns);
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
-extern struct attribute_group nd_pfn_attribute_group;
+extern const struct attribute_group *nd_pfn_attribute_groups[];
#else
static inline int nd_pfn_probe(struct device *dev,
struct nd_namespace_common *ndns)
@@ -370,29 +378,20 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res);
+int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
+ resource_size_t size);
+void devm_namespace_disable(struct device *dev,
+ struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
-
/* max struct page size independent of kernel config */
#define MAX_STRUCT_PAGE_SIZE 64
-
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
-void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
struct dev_pagemap *pgmap)
{
return -ENXIO;
}
-static inline int devm_nsio_enable(struct device *dev,
- struct nd_namespace_io *nsio)
-{
- return -ENXIO;
-}
-static inline void devm_nsio_disable(struct device *dev,
- struct nd_namespace_io *nsio)
-{
-}
#endif
int nd_blk_region_init(struct nd_region *nd_region);
int nd_region_activate(struct nd_region *nd_region);
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 97187d6c0bdb..8224d1431ea9 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -9,17 +9,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
-static const struct attribute_group *region_attr_groups[] = {
- &nd_region_attribute_group,
- &nd_device_attribute_group,
- NULL,
-};
-
-static const struct attribute_group *bus_attr_groups[] = {
- &nvdimm_bus_attribute_group,
- NULL,
-};
-
struct of_pmem_private {
struct nvdimm_bus_descriptor bus_desc;
struct nvdimm_bus *bus;
@@ -41,7 +30,6 @@ static int of_pmem_region_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->bus_desc.attr_groups = bus_attr_groups;
priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
priv->bus_desc.module = THIS_MODULE;
priv->bus_desc.of_node = np;
@@ -66,7 +54,6 @@ static int of_pmem_region_probe(struct platform_device *pdev)
* structures so passing a stack pointer is fine.
*/
memset(&ndr_desc, 0, sizeof(ndr_desc));
- ndr_desc.attr_groups = region_attr_groups;
ndr_desc.numa_node = dev_to_node(&pdev->dev);
ndr_desc.target_node = ndr_desc.numa_node;
ndr_desc.res = &pdev->resource[i];
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 60d81fae06ee..b94f7a7e94b8 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -26,17 +26,6 @@ static void nd_pfn_release(struct device *dev)
kfree(nd_pfn);
}
-static struct device_type nd_pfn_device_type = {
- .name = "nd_pfn",
- .release = nd_pfn_release,
-};
-
-bool is_nd_pfn(struct device *dev)
-{
- return dev ? dev->type == &nd_pfn_device_type : false;
-}
-EXPORT_SYMBOL(is_nd_pfn);
-
struct nd_pfn *to_nd_pfn(struct device *dev)
{
struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
@@ -229,7 +218,7 @@ static ssize_t resource_show(struct device *dev,
return rc;
}
-static DEVICE_ATTR_RO(resource);
+static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -280,25 +269,29 @@ static struct attribute *nd_pfn_attributes[] = {
NULL,
};
-static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
-{
- if (a == &dev_attr_resource.attr)
- return 0400;
- return a->mode;
-}
-
-struct attribute_group nd_pfn_attribute_group = {
+static struct attribute_group nd_pfn_attribute_group = {
.attrs = nd_pfn_attributes,
- .is_visible = pfn_visible,
};
-static const struct attribute_group *nd_pfn_attribute_groups[] = {
+const struct attribute_group *nd_pfn_attribute_groups[] = {
&nd_pfn_attribute_group,
&nd_device_attribute_group,
&nd_numa_attribute_group,
NULL,
};
+static const struct device_type nd_pfn_device_type = {
+ .name = "nd_pfn",
+ .release = nd_pfn_release,
+ .groups = nd_pfn_attribute_groups,
+};
+
+bool is_nd_pfn(struct device *dev)
+{
+ return dev ? dev->type == &nd_pfn_device_type : false;
+}
+EXPORT_SYMBOL(is_nd_pfn);
+
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns)
{
@@ -337,7 +330,6 @@ static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
dev = &nd_pfn->dev;
dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
- dev->groups = nd_pfn_attribute_groups;
dev->type = &nd_pfn_device_type;
dev->parent = &nd_region->dev;
@@ -382,6 +374,15 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
+ /*
+ * re-enable the namespace with correct size so that we can access
+ * the device memmap area.
+ */
+ devm_namespace_disable(&nd_pfn->dev, ndns);
+ rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
+ if (rc)
+ return rc;
+
do {
unsigned long zero_len;
u64 nsoff;
@@ -591,7 +592,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -ENXIO;
}
- return nd_pfn_clear_memmap_errors(nd_pfn);
+ return 0;
}
EXPORT_SYMBOL(nd_pfn_validate);
@@ -635,11 +636,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
}
EXPORT_SYMBOL(nd_pfn_probe);
-static u32 info_block_reserve(void)
-{
- return ALIGN(SZ_8K, PAGE_SIZE);
-}
-
/*
* We hotplug memory at sub-section granularity, pad the reserved area
* from the previous section base to the namespace base address.
@@ -653,7 +649,7 @@ static unsigned long init_altmap_base(resource_size_t base)
static unsigned long init_altmap_reserve(resource_size_t base)
{
- unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
+ unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
@@ -668,7 +664,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
u64 offset = le64_to_cpu(pfn_sb->dataoff);
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
- u32 reserve = info_block_reserve();
+ u32 reserve = nd_info_block_reserve();
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad;
@@ -729,6 +725,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
sig = PFN_SIG;
rc = nd_pfn_validate(nd_pfn, sig);
+ if (rc == 0)
+ return nd_pfn_clear_memmap_errors(nd_pfn);
if (rc != -ENODEV)
return rc;
@@ -796,6 +794,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
+ rc = nd_pfn_clear_memmap_errors(nd_pfn);
+ if (rc)
+ return rc;
+
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f9f76f6ba07b..ad8e4df1282b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -28,7 +28,6 @@
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
-#include "nd-core.h"
static struct device *to_dev(struct pmem_device *pmem)
{
@@ -372,6 +371,10 @@ static int pmem_attach_disk(struct device *dev,
if (!pmem)
return -ENOMEM;
+ rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
+ if (rc)
+ return rc;
+
/* while nsio_rw_bytes is active, parse a pfn info block if present */
if (is_nd_pfn(dev)) {
nd_pfn = to_nd_pfn(dev);
@@ -381,7 +384,7 @@ static int pmem_attach_disk(struct device *dev,
}
/* we're attaching a block device, disable raw namespace access */
- devm_nsio_disable(dev, nsio);
+ devm_namespace_disable(dev, ndns);
dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start;
@@ -497,15 +500,16 @@ static int nd_pmem_probe(struct device *dev)
if (IS_ERR(ndns))
return PTR_ERR(ndns);
- if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
- return -ENXIO;
-
if (is_nd_btt(dev))
return nvdimm_namespace_attach_btt(ndns);
if (is_nd_pfn(dev))
return pmem_attach_disk(dev, ndns);
+ ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
+ if (ret)
+ return ret;
+
ret = nd_btt_probe(dev, ndns);
if (ret == 0)
return -ENXIO;
@@ -532,6 +536,10 @@ static int nd_pmem_probe(struct device *dev)
return -ENXIO;
else if (ret == -EOPNOTSUPP)
return ret;
+
+ /* probe complete, attach handles namespace enabling */
+ devm_namespace_disable(dev, ndns);
+
return pmem_attach_disk(dev, ndns);
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index fbf34cf688f4..a19e535830d9 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -140,36 +140,6 @@ static void nd_region_release(struct device *dev)
kfree(nd_region);
}
-static struct device_type nd_blk_device_type = {
- .name = "nd_blk",
- .release = nd_region_release,
-};
-
-static struct device_type nd_pmem_device_type = {
- .name = "nd_pmem",
- .release = nd_region_release,
-};
-
-static struct device_type nd_volatile_device_type = {
- .name = "nd_volatile",
- .release = nd_region_release,
-};
-
-bool is_nd_pmem(struct device *dev)
-{
- return dev ? dev->type == &nd_pmem_device_type : false;
-}
-
-bool is_nd_blk(struct device *dev)
-{
- return dev ? dev->type == &nd_blk_device_type : false;
-}
-
-bool is_nd_volatile(struct device *dev)
-{
- return dev ? dev->type == &nd_volatile_device_type : false;
-}
-
struct nd_region *to_nd_region(struct device *dev)
{
struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
@@ -583,7 +553,7 @@ static ssize_t resource_show(struct device *dev,
return sprintf(buf, "%#llx\n", nd_region->ndr_start);
}
-static DEVICE_ATTR_RO(resource);
+static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t persistence_domain_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -635,12 +605,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
return 0;
- if (a == &dev_attr_resource.attr) {
- if (is_memory(dev))
- return 0400;
- else
- return 0;
- }
+ if (a == &dev_attr_resource.attr && !is_memory(dev))
+ return 0;
if (a == &dev_attr_deep_flush.attr) {
int has_flush = nvdimm_has_flush(nd_region);
@@ -674,80 +640,6 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return 0;
}
-struct attribute_group nd_region_attribute_group = {
- .attrs = nd_region_attributes,
- .is_visible = region_visible,
-};
-EXPORT_SYMBOL_GPL(nd_region_attribute_group);
-
-u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
- struct nd_namespace_index *nsindex)
-{
- struct nd_interleave_set *nd_set = nd_region->nd_set;
-
- if (!nd_set)
- return 0;
-
- if (nsindex && __le16_to_cpu(nsindex->major) == 1
- && __le16_to_cpu(nsindex->minor) == 1)
- return nd_set->cookie1;
- return nd_set->cookie2;
-}
-
-u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
-{
- struct nd_interleave_set *nd_set = nd_region->nd_set;
-
- if (nd_set)
- return nd_set->altcookie;
- return 0;
-}
-
-void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
-{
- struct nd_label_ent *label_ent, *e;
-
- lockdep_assert_held(&nd_mapping->lock);
- list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
- list_del(&label_ent->list);
- kfree(label_ent);
- }
-}
-
-/*
- * When a namespace is activated create new seeds for the next
- * namespace, or namespace-personality to be configured.
- */
-void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
-{
- nvdimm_bus_lock(dev);
- if (nd_region->ns_seed == dev) {
- nd_region_create_ns_seed(nd_region);
- } else if (is_nd_btt(dev)) {
- struct nd_btt *nd_btt = to_nd_btt(dev);
-
- if (nd_region->btt_seed == dev)
- nd_region_create_btt_seed(nd_region);
- if (nd_region->ns_seed == &nd_btt->ndns->dev)
- nd_region_create_ns_seed(nd_region);
- } else if (is_nd_pfn(dev)) {
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
-
- if (nd_region->pfn_seed == dev)
- nd_region_create_pfn_seed(nd_region);
- if (nd_region->ns_seed == &nd_pfn->ndns->dev)
- nd_region_create_ns_seed(nd_region);
- } else if (is_nd_dax(dev)) {
- struct nd_dax *nd_dax = to_nd_dax(dev);
-
- if (nd_region->dax_seed == dev)
- nd_region_create_dax_seed(nd_region);
- if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
- nd_region_create_ns_seed(nd_region);
- }
- nvdimm_bus_unlock(dev);
-}
-
static ssize_t mappingN(struct device *dev, char *buf, int n)
{
struct nd_region *nd_region = to_nd_region(dev);
@@ -855,11 +747,124 @@ static struct attribute *mapping_attributes[] = {
NULL,
};
-struct attribute_group nd_mapping_attribute_group = {
+static const struct attribute_group nd_mapping_attribute_group = {
.is_visible = mapping_visible,
.attrs = mapping_attributes,
};
-EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
+
+static const struct attribute_group nd_region_attribute_group = {
+ .attrs = nd_region_attributes,
+ .is_visible = region_visible,
+};
+
+static const struct attribute_group *nd_region_attribute_groups[] = {
+ &nd_device_attribute_group,
+ &nd_region_attribute_group,
+ &nd_numa_attribute_group,
+ &nd_mapping_attribute_group,
+ NULL,
+};
+
+static const struct device_type nd_blk_device_type = {
+ .name = "nd_blk",
+ .release = nd_region_release,
+ .groups = nd_region_attribute_groups,
+};
+
+static const struct device_type nd_pmem_device_type = {
+ .name = "nd_pmem",
+ .release = nd_region_release,
+ .groups = nd_region_attribute_groups,
+};
+
+static const struct device_type nd_volatile_device_type = {
+ .name = "nd_volatile",
+ .release = nd_region_release,
+ .groups = nd_region_attribute_groups,
+};
+
+bool is_nd_pmem(struct device *dev)
+{
+ return dev ? dev->type == &nd_pmem_device_type : false;
+}
+
+bool is_nd_blk(struct device *dev)
+{
+ return dev ? dev->type == &nd_blk_device_type : false;
+}
+
+bool is_nd_volatile(struct device *dev)
+{
+ return dev ? dev->type == &nd_volatile_device_type : false;
+}
+
+u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
+ struct nd_namespace_index *nsindex)
+{
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
+
+ if (!nd_set)
+ return 0;
+
+ if (nsindex && __le16_to_cpu(nsindex->major) == 1
+ && __le16_to_cpu(nsindex->minor) == 1)
+ return nd_set->cookie1;
+ return nd_set->cookie2;
+}
+
+u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
+{
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
+
+ if (nd_set)
+ return nd_set->altcookie;
+ return 0;
+}
+
+void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
+{
+ struct nd_label_ent *label_ent, *e;
+
+ lockdep_assert_held(&nd_mapping->lock);
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+ list_del(&label_ent->list);
+ kfree(label_ent);
+ }
+}
+
+/*
+ * When a namespace is activated create new seeds for the next
+ * namespace, or namespace-personality to be configured.
+ */
+void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
+{
+ nvdimm_bus_lock(dev);
+ if (nd_region->ns_seed == dev) {
+ nd_region_create_ns_seed(nd_region);
+ } else if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
+ if (nd_region->btt_seed == dev)
+ nd_region_create_btt_seed(nd_region);
+ if (nd_region->ns_seed == &nd_btt->ndns->dev)
+ nd_region_create_ns_seed(nd_region);
+ } else if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+ if (nd_region->pfn_seed == dev)
+ nd_region_create_pfn_seed(nd_region);
+ if (nd_region->ns_seed == &nd_pfn->ndns->dev)
+ nd_region_create_ns_seed(nd_region);
+ } else if (is_nd_dax(dev)) {
+ struct nd_dax *nd_dax = to_nd_dax(dev);
+
+ if (nd_region->dax_seed == dev)
+ nd_region_create_dax_seed(nd_region);
+ if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
+ nd_region_create_ns_seed(nd_region);
+ }
+ nvdimm_bus_unlock(dev);
+}
int nd_blk_region_init(struct nd_region *nd_region)
{
@@ -931,8 +936,8 @@ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
EXPORT_SYMBOL(nd_region_release_lane);
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
- struct nd_region_desc *ndr_desc, struct device_type *dev_type,
- const char *caller)
+ struct nd_region_desc *ndr_desc,
+ const struct device_type *dev_type, const char *caller)
{
struct nd_region *nd_region;
struct device *dev;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8e8527408db3..dfe37a525f3a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2412,16 +2412,6 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
- },
- {
- /*
- * This Kingston E8FK11.T firmware version has no interrupt
- * after resume with actions related to suspend to idle
- * https://bugzilla.kernel.org/show_bug.cgi?id=204887
- */
- .vid = 0x2646,
- .fr = "E8FK11.T",
- .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
}
};
@@ -2998,7 +2988,7 @@ static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
.unlocked_ioctl = nvme_dev_ioctl,
- .compat_ioctl = nvme_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static ssize_t nvme_sysfs_reset(struct device *dev,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index c2ec750cae6e..73567e922491 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -50,6 +50,7 @@ config NVMEM_IMX_OCOTP
config NVMEM_IMX_OCOTP_SCU
tristate "i.MX8 SCU On-Chip OTP Controller support"
depends on IMX_SCU
+ depends on HAVE_ARM_SMCCC
help
This is a driver for the SCU On-Chip OTP Controller (OCOTP)
available on i.MX8 SoCs.
@@ -119,6 +120,17 @@ config ROCKCHIP_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_efuse.
+config ROCKCHIP_OTP
+ tristate "Rockchip OTP controller support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple drive to dump specified values of Rockchip SoC
+ from otp, such as cpu-leakage.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_rockchip_otp.
+
config NVMEM_BCM_OCOTP
tristate "Broadcom On-Chip OTP Controller support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -230,4 +242,15 @@ config NVMEM_ZYNQMP
If sure, say yes. If unsure, say no.
+config SPRD_EFUSE
+ tristate "Spreadtrum SoC eFuse Support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SoCs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sprd-efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index e5c153d99a67..9e667823edb3 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -30,6 +30,8 @@ obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
nvmem_qfprom-y := qfprom.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
+obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
+nvmem-rockchip-otp-y := rockchip-otp.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_stm32_romem-y := stm32-romem.o
obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
@@ -50,3 +52,5 @@ obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
nvmem-sc27xx-efuse-y := sc27xx-efuse.o
obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
+obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o
+nvmem_sprd_efuse-y := sprd-efuse.o
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index 61a17f943f47..03f1ab23ad51 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -7,6 +7,7 @@
* Peng Fan <peng.fan@nxp.com>
*/
+#include <linux/arm-smccc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
@@ -14,14 +15,28 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#define IMX_SIP_OTP 0xC200000A
+#define IMX_SIP_OTP_WRITE 0x2
+
enum ocotp_devtype {
IMX8QXP,
IMX8QM,
};
+#define ECC_REGION BIT(0)
+#define HOLE_REGION BIT(1)
+
+struct ocotp_region {
+ u32 start;
+ u32 end;
+ u32 flag;
+};
+
struct ocotp_devtype_data {
int devtype;
int nregs;
+ u32 num_region;
+ struct ocotp_region region[];
};
struct ocotp_priv {
@@ -35,16 +50,63 @@ struct imx_sc_msg_misc_fuse_read {
u32 word;
} __packed;
+static DEFINE_MUTEX(scu_ocotp_mutex);
+
static struct ocotp_devtype_data imx8qxp_data = {
.devtype = IMX8QXP,
.nregs = 800,
+ .num_region = 3,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x110, 0x21F, HOLE_REGION},
+ {0x220, 0x31F, ECC_REGION},
+ },
};
static struct ocotp_devtype_data imx8qm_data = {
.devtype = IMX8QM,
.nregs = 800,
+ .num_region = 2,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x1a0, 0x1ff, ECC_REGION},
+ },
};
+static bool in_hole(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & HOLE_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool in_ecc(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & ECC_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
u32 *val)
{
@@ -88,18 +150,19 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
if (!p)
return -ENOMEM;
+ mutex_lock(&scu_ocotp_mutex);
+
buf = p;
for (i = index; i < (index + count); i++) {
- if (priv->data->devtype == IMX8QXP) {
- if ((i > 271) && (i < 544)) {
- *buf++ = 0;
- continue;
- }
+ if (in_hole(context, i)) {
+ *buf++ = 0;
+ continue;
}
ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, i, buf);
if (ret) {
+ mutex_unlock(&scu_ocotp_mutex);
kfree(p);
return ret;
}
@@ -108,18 +171,63 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
memcpy(val, (u8 *)p + offset % 4, bytes);
+ mutex_unlock(&scu_ocotp_mutex);
+
kfree(p);
return 0;
}
+static int imx_scu_ocotp_write(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ struct arm_smccc_res res;
+ u32 *buf = val;
+ u32 tmp;
+ u32 index;
+ int ret;
+
+ /* allow only writing one complete OTP word at a time */
+ if ((bytes != 4) || (offset % 4))
+ return -EINVAL;
+
+ index = offset >> 2;
+
+ if (in_hole(context, index))
+ return -EINVAL;
+
+ if (in_ecc(context, index)) {
+ pr_warn("ECC region, only program once\n");
+ mutex_lock(&scu_ocotp_mutex);
+ ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, index, &tmp);
+ mutex_unlock(&scu_ocotp_mutex);
+ if (ret)
+ return ret;
+ if (tmp) {
+ pr_warn("ECC region, already has value: %x\n", tmp);
+ return -EIO;
+ }
+ }
+
+ mutex_lock(&scu_ocotp_mutex);
+
+ arm_smccc_smc(IMX_SIP_OTP, IMX_SIP_OTP_WRITE, index, *buf,
+ 0, 0, 0, 0, &res);
+
+ mutex_unlock(&scu_ocotp_mutex);
+
+ return res.a0;
+}
+
static struct nvmem_config imx_scu_ocotp_nvmem_config = {
.name = "imx-scu-ocotp",
- .read_only = true,
+ .read_only = false,
.word_size = 4,
.stride = 1,
.owner = THIS_MODULE,
.reg_read = imx_scu_ocotp_read,
+ .reg_write = imx_scu_ocotp_write,
};
static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index dff2f3c357f5..fc40555ca4cd 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -521,6 +521,10 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
+ clk_prepare_enable(priv->clk);
+ imx_ocotp_clr_err_if_set(priv->base);
+ clk_disable_unprepare(priv->clk);
+
priv->params = of_device_get_match_data(&pdev->dev);
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
new file mode 100644
index 000000000000..9f53bcce2f87
--- /dev/null
+++ b/drivers/nvmem/rockchip-otp.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip OTP Driver
+ *
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* OTP Register Offsets */
+#define OTPC_SBPI_CTRL 0x0020
+#define OTPC_SBPI_CMD_VALID_PRE 0x0024
+#define OTPC_SBPI_CS_VALID_PRE 0x0028
+#define OTPC_SBPI_STATUS 0x002C
+#define OTPC_USER_CTRL 0x0100
+#define OTPC_USER_ADDR 0x0104
+#define OTPC_USER_ENABLE 0x0108
+#define OTPC_USER_Q 0x0124
+#define OTPC_INT_STATUS 0x0304
+#define OTPC_SBPI_CMD0_OFFSET 0x1000
+#define OTPC_SBPI_CMD1_OFFSET 0x1004
+
+/* OTP Register bits and masks */
+#define OTPC_USER_ADDR_MASK GENMASK(31, 16)
+#define OTPC_USE_USER BIT(0)
+#define OTPC_USE_USER_MASK GENMASK(16, 16)
+#define OTPC_USER_FSM_ENABLE BIT(0)
+#define OTPC_USER_FSM_ENABLE_MASK GENMASK(16, 16)
+#define OTPC_SBPI_DONE BIT(1)
+#define OTPC_USER_DONE BIT(2)
+
+#define SBPI_DAP_ADDR 0x02
+#define SBPI_DAP_ADDR_SHIFT 8
+#define SBPI_DAP_ADDR_MASK GENMASK(31, 24)
+#define SBPI_CMD_VALID_MASK GENMASK(31, 16)
+#define SBPI_DAP_CMD_WRF 0xC0
+#define SBPI_DAP_REG_ECC 0x3A
+#define SBPI_ECC_ENABLE 0x00
+#define SBPI_ECC_DISABLE 0x09
+#define SBPI_ENABLE BIT(0)
+#define SBPI_ENABLE_MASK GENMASK(16, 16)
+
+#define OTPC_TIMEOUT 10000
+
+struct rockchip_otp {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *rst;
+};
+
+/* list of required clocks */
+static const char * const rockchip_otp_clocks[] = {
+ "otp", "apb_pclk", "phy",
+};
+
+struct rockchip_data {
+ int size;
+};
+
+static int rockchip_otp_reset(struct rockchip_otp *otp)
+{
+ int ret;
+
+ ret = reset_control_assert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to assert otp phy %d\n", ret);
+ return ret;
+ }
+
+ udelay(2);
+
+ ret = reset_control_deassert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to deassert otp phy %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_otp_wait_status(struct rockchip_otp *otp, u32 flag)
+{
+ u32 status = 0;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(otp->base + OTPC_INT_STATUS, status,
+ (status & flag), 1, OTPC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* clean int status */
+ writel(flag, otp->base + OTPC_INT_STATUS);
+
+ return 0;
+}
+
+static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
+{
+ int ret = 0;
+
+ writel(SBPI_DAP_ADDR_MASK | (SBPI_DAP_ADDR << SBPI_DAP_ADDR_SHIFT),
+ otp->base + OTPC_SBPI_CTRL);
+
+ writel(SBPI_CMD_VALID_MASK | 0x1, otp->base + OTPC_SBPI_CMD_VALID_PRE);
+ writel(SBPI_DAP_CMD_WRF | SBPI_DAP_REG_ECC,
+ otp->base + OTPC_SBPI_CMD0_OFFSET);
+ if (enable)
+ writel(SBPI_ECC_ENABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+ else
+ writel(SBPI_ECC_DISABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+
+ writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL);
+
+ ret = rockchip_otp_wait_status(otp, OTPC_SBPI_DONE);
+ if (ret < 0)
+ dev_err(otp->dev, "timeout during ecc_enable\n");
+
+ return ret;
+}
+
+static int rockchip_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_otp *otp = context;
+ u8 *buf = val;
+ int ret = 0;
+
+ ret = clk_bulk_prepare_enable(otp->num_clks, otp->clks);
+ if (ret < 0) {
+ dev_err(otp->dev, "failed to prepare/enable clks\n");
+ return ret;
+ }
+
+ ret = rockchip_otp_reset(otp);
+ if (ret) {
+ dev_err(otp->dev, "failed to reset otp phy\n");
+ goto disable_clks;
+ }
+
+ ret = rockchip_otp_ecc_enable(otp, false);
+ if (ret < 0) {
+ dev_err(otp->dev, "rockchip_otp_ecc_enable err\n");
+ goto disable_clks;
+ }
+
+ writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+ udelay(5);
+ while (bytes--) {
+ writel(offset++ | OTPC_USER_ADDR_MASK,
+ otp->base + OTPC_USER_ADDR);
+ writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK,
+ otp->base + OTPC_USER_ENABLE);
+ ret = rockchip_otp_wait_status(otp, OTPC_USER_DONE);
+ if (ret < 0) {
+ dev_err(otp->dev, "timeout during read setup\n");
+ goto read_end;
+ }
+ *buf++ = readb(otp->base + OTPC_USER_Q);
+ }
+
+read_end:
+ writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+disable_clks:
+ clk_bulk_disable_unprepare(otp->num_clks, otp->clks);
+
+ return ret;
+}
+
+static struct nvmem_config otp_config = {
+ .name = "rockchip-otp",
+ .owner = THIS_MODULE,
+ .read_only = true,
+ .stride = 1,
+ .word_size = 1,
+ .reg_read = rockchip_otp_read,
+};
+
+static const struct rockchip_data px30_data = {
+ .size = 0x40,
+};
+
+static const struct of_device_id rockchip_otp_match[] = {
+ {
+ .compatible = "rockchip,px30-otp",
+ .data = (void *)&px30_data,
+ },
+ {
+ .compatible = "rockchip,rk3308-otp",
+ .data = (void *)&px30_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_otp_match);
+
+static int rockchip_otp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_otp *otp;
+ const struct rockchip_data *data;
+ struct nvmem_device *nvmem;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "failed to get match data\n");
+ return -EINVAL;
+ }
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_otp),
+ GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->dev = dev;
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ otp->num_clks = ARRAY_SIZE(rockchip_otp_clocks);
+ otp->clks = devm_kcalloc(dev, otp->num_clks,
+ sizeof(*otp->clks), GFP_KERNEL);
+ if (!otp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < otp->num_clks; ++i)
+ otp->clks[i].id = rockchip_otp_clocks[i];
+
+ ret = devm_clk_bulk_get(dev, otp->num_clks, otp->clks);
+ if (ret)
+ return ret;
+
+ otp->rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(otp->rst))
+ return PTR_ERR(otp->rst);
+
+ otp_config.size = data->size;
+ otp_config.priv = otp;
+ otp_config.dev = dev;
+ nvmem = devm_nvmem_register(dev, &otp_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver rockchip_otp_driver = {
+ .probe = rockchip_otp_probe,
+ .driver = {
+ .name = "rockchip-otp",
+ .of_match_table = rockchip_otp_match,
+ },
+};
+
+module_platform_driver(rockchip_otp_driver);
+MODULE_DESCRIPTION("Rockchip OTP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index c6ee21018d80..ab5e7e0bc3d8 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -211,7 +211,7 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
return ret;
}
- efuse->hwlock = hwspin_lock_request_specific(ret);
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
if (!efuse->hwlock) {
dev_err(&pdev->dev, "failed to request hwspinlock\n");
return -ENXIO;
@@ -219,7 +219,6 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
mutex_init(&efuse->mutex);
efuse->dev = &pdev->dev;
- platform_set_drvdata(pdev, efuse);
econfig.stride = 1;
econfig.word_size = 1;
@@ -232,21 +231,12 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
nvmem = devm_nvmem_register(&pdev->dev, &econfig);
if (IS_ERR(nvmem)) {
dev_err(&pdev->dev, "failed to register nvmem config\n");
- hwspin_lock_free(efuse->hwlock);
return PTR_ERR(nvmem);
}
return 0;
}
-static int sc27xx_efuse_remove(struct platform_device *pdev)
-{
- struct sc27xx_efuse *efuse = platform_get_drvdata(pdev);
-
- hwspin_lock_free(efuse->hwlock);
- return 0;
-}
-
static const struct of_device_id sc27xx_efuse_of_match[] = {
{ .compatible = "sprd,sc2731-efuse" },
{ }
@@ -254,7 +244,6 @@ static const struct of_device_id sc27xx_efuse_of_match[] = {
static struct platform_driver sc27xx_efuse_driver = {
.probe = sc27xx_efuse_probe,
- .remove = sc27xx_efuse_remove,
.driver = {
.name = "sc27xx-efuse",
.of_match_table = sc27xx_efuse_of_match,
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
new file mode 100644
index 000000000000..2f1e0fbd1901
--- /dev/null
+++ b/drivers/nvmem/sprd-efuse.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Spreadtrum Communications Inc.
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define SPRD_EFUSE_ENABLE 0x20
+#define SPRD_EFUSE_ERR_FLAG 0x24
+#define SPRD_EFUSE_ERR_CLR 0x28
+#define SPRD_EFUSE_MAGIC_NUM 0x2c
+#define SPRD_EFUSE_FW_CFG 0x50
+#define SPRD_EFUSE_PW_SWT 0x54
+#define SPRD_EFUSE_MEM(val) (0x1000 + ((val) << 2))
+
+#define SPRD_EFUSE_VDD_EN BIT(0)
+#define SPRD_EFUSE_AUTO_CHECK_EN BIT(1)
+#define SPRD_EFUSE_DOUBLE_EN BIT(2)
+#define SPRD_EFUSE_MARGIN_RD_EN BIT(3)
+#define SPRD_EFUSE_LOCK_WR_EN BIT(4)
+
+#define SPRD_EFUSE_ERR_CLR_MASK GENMASK(13, 0)
+
+#define SPRD_EFUSE_ENK1_ON BIT(0)
+#define SPRD_EFUSE_ENK2_ON BIT(1)
+#define SPRD_EFUSE_PROG_EN BIT(2)
+
+#define SPRD_EFUSE_MAGIC_NUMBER 0x8810
+
+/* Block width (bytes) definitions */
+#define SPRD_EFUSE_BLOCK_WIDTH 4
+
+/*
+ * The Spreadtrum AP efuse contains 2 parts: normal efuse and secure efuse,
+ * and we can only access the normal efuse in kernel. So define the normal
+ * block offset index and normal block numbers.
+ */
+#define SPRD_EFUSE_NORMAL_BLOCK_NUMS 24
+#define SPRD_EFUSE_NORMAL_BLOCK_OFFSET 72
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SPRD_EFUSE_HWLOCK_TIMEOUT 5000
+
+/*
+ * Since different Spreadtrum SoC chip can have different normal block numbers
+ * and offset. And some SoC can support block double feature, which means
+ * when reading or writing data to efuse memory, the controller can save double
+ * data in case one data become incorrect after a long period.
+ *
+ * Thus we should save them in the device data structure.
+ */
+struct sprd_efuse_variant_data {
+ u32 blk_nums;
+ u32 blk_offset;
+ bool blk_double;
+};
+
+struct sprd_efuse {
+ struct device *dev;
+ struct clk *clk;
+ struct hwspinlock *hwlock;
+ struct mutex mutex;
+ void __iomem *base;
+ const struct sprd_efuse_variant_data *data;
+};
+
+static const struct sprd_efuse_variant_data ums312_data = {
+ .blk_nums = SPRD_EFUSE_NORMAL_BLOCK_NUMS,
+ .blk_offset = SPRD_EFUSE_NORMAL_BLOCK_OFFSET,
+ .blk_double = false,
+};
+
+/*
+ * On Spreadtrum platform, we have multi-subsystems will access the unique
+ * efuse controller, so we need one hardware spinlock to synchronize between
+ * the multiple subsystems.
+ */
+static int sprd_efuse_lock(struct sprd_efuse *efuse)
+{
+ int ret;
+
+ mutex_lock(&efuse->mutex);
+
+ ret = hwspin_lock_timeout_raw(efuse->hwlock,
+ SPRD_EFUSE_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout get the hwspinlock\n");
+ mutex_unlock(&efuse->mutex);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sprd_efuse_unlock(struct sprd_efuse *efuse)
+{
+ hwspin_unlock_raw(efuse->hwlock);
+ mutex_unlock(&efuse->mutex);
+}
+
+static void sprd_efuse_set_prog_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val &= ~SPRD_EFUSE_ENK2_ON;
+ else
+ val &= ~SPRD_EFUSE_ENK1_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+
+ if (en)
+ val |= SPRD_EFUSE_ENK1_ON;
+ else
+ val |= SPRD_EFUSE_ENK2_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_read_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_VDD_EN;
+ else
+ val &= ~SPRD_EFUSE_VDD_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_prog_lock(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_LOCK_WR_EN;
+ else
+ val &= ~SPRD_EFUSE_LOCK_WR_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_auto_check(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_AUTO_CHECK_EN;
+ else
+ val &= ~SPRD_EFUSE_AUTO_CHECK_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_data_double(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_DOUBLE_EN;
+ else
+ val &= ~SPRD_EFUSE_DOUBLE_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_prog_en(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val |= SPRD_EFUSE_PROG_EN;
+ else
+ val &= ~SPRD_EFUSE_PROG_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+}
+
+static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
+ bool lock, u32 *data)
+{
+ u32 status;
+ int ret = 0;
+
+ /*
+ * We need set the correct magic number before writing the efuse to
+ * allow programming, and block other programming until we clear the
+ * magic number.
+ */
+ writel(SPRD_EFUSE_MAGIC_NUMBER,
+ efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ /*
+ * Power on the efuse, enable programme and enable double data
+ * if asked.
+ */
+ sprd_efuse_set_prog_power(efuse, true);
+ sprd_efuse_set_prog_en(efuse, true);
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /*
+ * Enable the auto-check function to validate if the programming is
+ * successful.
+ */
+ sprd_efuse_set_auto_check(efuse, true);
+
+ writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable auto-check and data double after programming */
+ sprd_efuse_set_auto_check(efuse, false);
+ sprd_efuse_set_data_double(efuse, false);
+
+ /*
+ * Check the efuse error status, if the programming is successful,
+ * we should lock this efuse block to avoid programming again.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "write error status %d of block %d\n", ret, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ ret = -EBUSY;
+ } else {
+ sprd_efuse_set_prog_lock(efuse, lock);
+ writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+ sprd_efuse_set_prog_lock(efuse, false);
+ }
+
+ sprd_efuse_set_prog_power(efuse, false);
+ writel(0, efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ return ret;
+}
+
+static int sprd_efuse_raw_read(struct sprd_efuse *efuse, int blk, u32 *val,
+ bool doub)
+{
+ u32 status;
+
+ /*
+ * Need power on the efuse before reading data from efuse, and will
+ * power off the efuse after reading process.
+ */
+ sprd_efuse_set_read_power(efuse, true);
+
+ /* Enable double data if asked */
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /* Start to read data from efuse block */
+ *val = readl(efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable double data */
+ sprd_efuse_set_data_double(efuse, false);
+
+ /* Power off the efuse */
+ sprd_efuse_set_read_power(efuse, false);
+
+ /*
+ * Check the efuse error status and clear them if there are some
+ * errors occurred.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "read error status %d of block %d\n", status, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_efuse_read(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ bool blk_double = efuse->data->blk_double;
+ u32 index = offset / SPRD_EFUSE_BLOCK_WIDTH + efuse->data->blk_offset;
+ u32 blk_offset = (offset % SPRD_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
+ u32 data;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ ret = sprd_efuse_raw_read(efuse, index, &data, blk_double);
+ if (!ret) {
+ data >>= blk_offset;
+ memcpy(val, &data, bytes);
+ }
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ ret = sprd_efuse_raw_prog(efuse, offset, false, false, val);
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = { };
+ struct sprd_efuse *efuse;
+ const struct sprd_efuse_variant_data *pdata;
+ int ret;
+
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->base = devm_platform_ioremap_resource(pdev, 0);
+ if (!efuse->base)
+ return -ENOMEM;
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwlock id\n");
+ return ret;
+ }
+
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
+ if (!efuse->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwlock\n");
+ return -ENXIO;
+ }
+
+ efuse->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(efuse->clk)) {
+ dev_err(&pdev->dev, "failed to get enable clock\n");
+ return PTR_ERR(efuse->clk);
+ }
+
+ mutex_init(&efuse->mutex);
+ efuse->dev = &pdev->dev;
+ efuse->data = pdata;
+
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.read_only = false;
+ econfig.name = "sprd-efuse";
+ econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH;
+ econfig.reg_read = sprd_efuse_read;
+ econfig.reg_write = sprd_efuse_write;
+ econfig.priv = efuse;
+ econfig.dev = &pdev->dev;
+ nvmem = devm_nvmem_register(&pdev->dev, &econfig);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev, "failed to register nvmem\n");
+ return PTR_ERR(nvmem);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_efuse_of_match[] = {
+ { .compatible = "sprd,ums312-efuse", .data = &ums312_data },
+ { }
+};
+
+static struct platform_driver sprd_efuse_driver = {
+ .probe = sprd_efuse_probe,
+ .driver = {
+ .name = "sprd-efuse",
+ .of_match_table = sprd_efuse_of_match,
+ },
+};
+
+module_platform_driver(sprd_efuse_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum AP efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 978427a9d5e6..99c1b8058559 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -14,6 +14,8 @@
#include <linux/slab.h>
#include <linux/string.h>
+#include "of_private.h"
+
/* Max address size we deal with */
#define OF_MAX_ADDR_CELLS 4
#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
@@ -241,6 +243,7 @@ static int parser_init(struct of_pci_range_parser *parser,
parser->node = node;
parser->pna = of_n_addr_cells(node);
parser->np = parser->pna + na + ns;
+ parser->dma = !strcmp(name, "dma-ranges");
parser->range = of_get_property(node, name, &rlen);
if (parser->range == NULL)
@@ -279,7 +282,11 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
range->pci_space = be32_to_cpup(parser->range);
range->flags = of_bus_pci_get_flags(parser->range);
range->pci_addr = of_read_number(parser->range + 1, ns);
- range->cpu_addr = of_translate_address(parser->node,
+ if (parser->dma)
+ range->cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ range->cpu_addr = of_translate_address(parser->node,
parser->range + na);
range->size = of_read_number(parser->range + parser->pna + na, ns);
@@ -292,8 +299,12 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
flags = of_bus_pci_get_flags(parser->range);
pci_addr = of_read_number(parser->range + 1, ns);
- cpu_addr = of_translate_address(parser->node,
- parser->range + na);
+ if (parser->dma)
+ cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ cpu_addr = of_translate_address(parser->node,
+ parser->range + na);
size = of_read_number(parser->range + parser->pna + na, ns);
if (flags != range->flags)
@@ -517,9 +528,13 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
*
* As far as we know, this damage only exists on Apple machines, so
* This code is only enabled on powerpc. --gcl
+ *
+ * This quirk also applies for 'dma-ranges' which frequently exist in
+ * child nodes without 'dma-ranges' in the parent nodes. --RobH
*/
ranges = of_get_property(parent, rprop, &rlen);
- if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
+ if (ranges == NULL && !of_empty_ranges_quirk(parent) &&
+ strcmp(rprop, "dma-ranges")) {
pr_debug("no ranges; cannot translate\n");
return 1;
}
@@ -695,6 +710,16 @@ static struct device_node *__of_get_dma_parent(const struct device_node *np)
return of_node_get(args.np);
}
+static struct device_node *of_get_next_dma_parent(struct device_node *np)
+{
+ struct device_node *parent;
+
+ parent = __of_get_dma_parent(np);
+ of_node_put(np);
+
+ return parent;
+}
+
u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
{
struct device_node *host;
@@ -826,25 +851,6 @@ int of_address_to_resource(struct device_node *dev, int index,
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
-struct device_node *of_find_matching_node_by_address(struct device_node *from,
- const struct of_device_id *matches,
- u64 base_address)
-{
- struct device_node *dn = of_find_matching_node(from, matches);
- struct resource res;
-
- while (dn) {
- if (!of_address_to_resource(dn, 0, &res) &&
- res.start == base_address)
- return dn;
-
- dn = of_find_matching_node(dn, matches);
- }
-
- return NULL;
-}
-
-
/**
* of_iomap - Maps the memory mapped IO for a given device_node
* @device: the device whose io range will be mapped
@@ -924,47 +930,39 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
const __be32 *ranges = NULL;
int len, naddr, nsize, pna;
int ret = 0;
+ bool found_dma_ranges = false;
u64 dmaaddr;
- if (!node)
- return -EINVAL;
-
- while (1) {
- struct device_node *parent;
-
- naddr = of_n_addr_cells(node);
- nsize = of_n_size_cells(node);
-
- parent = __of_get_dma_parent(node);
- of_node_put(node);
-
- node = parent;
- if (!node)
- break;
-
+ while (node) {
ranges = of_get_property(node, "dma-ranges", &len);
/* Ignore empty ranges, they imply no translation required */
if (ranges && len > 0)
break;
- /*
- * At least empty ranges has to be defined for parent node if
- * DMA is supported
- */
- if (!ranges)
- break;
+ /* Once we find 'dma-ranges', then a missing one is an error */
+ if (found_dma_ranges && !ranges) {
+ ret = -ENODEV;
+ goto out;
+ }
+ found_dma_ranges = true;
+
+ node = of_get_next_dma_parent(node);
}
- if (!ranges) {
+ if (!node || !ranges) {
pr_debug("no dma-ranges found for node(%pOF)\n", np);
ret = -ENODEV;
goto out;
}
- len /= sizeof(u32);
-
+ naddr = of_bus_n_addr_cells(node);
+ nsize = of_bus_n_size_cells(node);
pna = of_n_addr_cells(node);
+ if ((len / sizeof(__be32)) % (pna + naddr + nsize)) {
+ ret = -EINVAL;
+ goto out;
+ }
/* dma-ranges format:
* DMA addr : naddr cells
@@ -972,10 +970,10 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
* size : nsize cells
*/
dmaaddr = of_read_number(ranges, naddr);
- *paddr = of_translate_dma_address(np, ranges);
+ *paddr = of_translate_dma_address(node, ranges + naddr);
if (*paddr == OF_BAD_ADDR) {
- pr_err("translation of DMA address(%pad) to CPU address failed node(%pOF)\n",
- dma_addr, np);
+ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+ dmaaddr, np);
ret = -EINVAL;
goto out;
}
@@ -991,7 +989,6 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(of_dma_get_range);
/**
* of_dma_is_coherent - Check if device is coherent
@@ -1009,7 +1006,7 @@ bool of_dma_is_coherent(struct device_node *np)
of_node_put(node);
return true;
}
- node = of_get_next_parent(node);
+ node = of_get_next_dma_parent(node);
}
of_node_put(node);
return false;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 1d667eb730e1..db7fbc0c0893 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -86,34 +86,46 @@ static bool __of_node_is_type(const struct device_node *np, const char *type)
return np && match && type && !strcmp(match, type);
}
-int of_n_addr_cells(struct device_node *np)
+int of_bus_n_addr_cells(struct device_node *np)
{
u32 cells;
- do {
- if (np->parent)
- np = np->parent;
+ for (; np; np = np->parent)
if (!of_property_read_u32(np, "#address-cells", &cells))
return cells;
- } while (np->parent);
+
/* No #address-cells property for the root node */
return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
}
+
+int of_n_addr_cells(struct device_node *np)
+{
+ if (np->parent)
+ np = np->parent;
+
+ return of_bus_n_addr_cells(np);
+}
EXPORT_SYMBOL(of_n_addr_cells);
-int of_n_size_cells(struct device_node *np)
+int of_bus_n_size_cells(struct device_node *np)
{
u32 cells;
- do {
- if (np->parent)
- np = np->parent;
+ for (; np; np = np->parent)
if (!of_property_read_u32(np, "#size-cells", &cells))
return cells;
- } while (np->parent);
+
/* No #size-cells property for the root node */
return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
}
+
+int of_n_size_cells(struct device_node *np)
+{
+ if (np->parent)
+ np = np->parent;
+
+ return of_bus_n_size_cells(np);
+}
EXPORT_SYMBOL(of_n_size_cells);
#ifdef CONFIG_NUMA
diff --git a/drivers/of/device.c b/drivers/of/device.c
index da8158392010..e9127db7b067 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -93,7 +93,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
bool coherent;
unsigned long offset;
const struct iommu_ops *iommu;
- u64 mask;
+ u64 mask, end;
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
@@ -148,12 +148,13 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
+ end = dma_addr + size - 1;
+ mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
- /* ...but only set bus mask if we found valid dma-ranges earlier */
+ /* ...but only set bus limit if we found valid dma-ranges earlier */
if (!ret)
- dev->bus_dma_mask = mask;
+ dev->bus_dma_limit = end;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index f1c23aad951e..2cdf64d2456f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -947,8 +947,8 @@ int __init early_init_dt_scan_chosen_stdout(void)
if (fdt_node_check_compatible(fdt, offset, match->compatible))
continue;
- of_setup_earlycon(match, offset, options);
- return 0;
+ if (of_setup_earlycon(match, offset, options) == 0)
+ return 0;
}
return -ENODEV;
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 24786818e32e..66294d29942a 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -158,4 +158,18 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
#define for_each_transaction_entry_reverse(_oft, _te) \
list_for_each_entry_reverse(_te, &(_oft)->te_list, node)
+extern int of_bus_n_addr_cells(struct device_node *np);
+extern int of_bus_n_size_cells(struct device_node *np);
+
+#ifdef CONFIG_OF_ADDRESS
+extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
+ u64 *paddr, u64 *size);
+#else
+static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr,
+ u64 *paddr, u64 *size)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index c423e94baf0f..9617b7df7c4d 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -305,7 +305,6 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
{
struct property *new_prop = NULL, *prop;
int ret = 0;
- bool check_for_non_overlay_node = false;
if (target->in_livetree)
if (!of_prop_cmp(overlay_prop->name, "name") ||
@@ -318,6 +317,25 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
else
prop = NULL;
+ if (prop) {
+ if (!of_prop_cmp(prop->name, "#address-cells")) {
+ if (!of_prop_val_eq(prop, overlay_prop)) {
+ pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n",
+ target->np);
+ ret = -EINVAL;
+ }
+ return ret;
+
+ } else if (!of_prop_cmp(prop->name, "#size-cells")) {
+ if (!of_prop_val_eq(prop, overlay_prop)) {
+ pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n",
+ target->np);
+ ret = -EINVAL;
+ }
+ return ret;
+ }
+ }
+
if (is_symbols_prop) {
if (prop)
return -EINVAL;
@@ -330,33 +348,18 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
return -ENOMEM;
if (!prop) {
- check_for_non_overlay_node = true;
if (!target->in_livetree) {
new_prop->next = target->np->deadprops;
target->np->deadprops = new_prop;
}
ret = of_changeset_add_property(&ovcs->cset, target->np,
new_prop);
- } else if (!of_prop_cmp(prop->name, "#address-cells")) {
- if (!of_prop_val_eq(prop, new_prop)) {
- pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n",
- target->np);
- ret = -EINVAL;
- }
- } else if (!of_prop_cmp(prop->name, "#size-cells")) {
- if (!of_prop_val_eq(prop, new_prop)) {
- pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n",
- target->np);
- ret = -EINVAL;
- }
} else {
- check_for_non_overlay_node = true;
ret = of_changeset_update_property(&ovcs->cset, target->np,
new_prop);
}
- if (check_for_non_overlay_node &&
- !of_node_check_flag(target->np, OF_OVERLAY))
+ if (!of_node_check_flag(target->np, OF_OVERLAY))
pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
target->np, new_prop->name);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b47a2292fe8e..d93891a05f60 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -480,6 +480,7 @@ int of_platform_populate(struct device_node *root,
pr_debug("%s()\n", __func__);
pr_debug(" starting at: %pOF\n", root);
+ device_links_supplier_sync_state_pause();
for_each_child_of_node(root, child) {
rc = of_platform_bus_create(child, matches, lookup, parent, true);
if (rc) {
@@ -487,6 +488,8 @@ int of_platform_populate(struct device_node *root,
break;
}
}
+ device_links_supplier_sync_state_resume();
+
of_node_set_flag(root, OF_POPULATED_BUS);
of_node_put(root);
@@ -518,6 +521,7 @@ static int __init of_platform_default_populate_init(void)
if (!of_have_populated_dt())
return -ENODEV;
+ device_links_supplier_sync_state_pause();
/*
* Handle certain compatibles explicitly, since we don't want to create
* platform_devices for every node in /reserved-memory with a
@@ -538,6 +542,14 @@ static int __init of_platform_default_populate_init(void)
return 0;
}
arch_initcall_sync(of_platform_default_populate_init);
+
+static int __init of_platform_sync_state_init(void)
+{
+ if (of_have_populated_dt())
+ device_links_supplier_sync_state_resume();
+ return 0;
+}
+late_initcall_sync(of_platform_sync_state_init);
#endif
int of_platform_device_destroy(struct device *dev, void *data)
diff --git a/drivers/of/property.c b/drivers/of/property.c
index d7fa75e31f22..e851c57a15b0 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -25,6 +25,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/string.h>
+#include <linux/moduleparam.h>
#include "of_private.h"
@@ -164,7 +165,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u64_index);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -212,7 +213,7 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -260,7 +261,7 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to return found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -334,7 +335,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u64);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -872,6 +873,20 @@ of_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
of_property_count_strings(node, propname);
}
+static const char *of_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return kbasename(to_of_node(fwnode)->full_name);
+}
+
+static const char *of_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ /* Root needs no prefix here (its name is "/"). */
+ if (!to_of_node(fwnode)->parent)
+ return "";
+
+ return "/";
+}
+
static struct fwnode_handle *
of_fwnode_get_parent(const struct fwnode_handle *fwnode)
{
@@ -985,6 +1000,320 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
return of_device_get_match_data(dev);
}
+static bool of_is_ancestor_of(struct device_node *test_ancestor,
+ struct device_node *child)
+{
+ of_node_get(child);
+ while (child) {
+ if (child == test_ancestor) {
+ of_node_put(child);
+ return true;
+ }
+ child = of_get_next_parent(child);
+ }
+ return false;
+}
+
+/**
+ * of_link_to_phandle - Add device link to supplier from supplier phandle
+ * @dev: consumer device
+ * @sup_np: phandle to supplier device tree node
+ *
+ * Given a phandle to a supplier device tree node (@sup_np), this function
+ * finds the device that owns the supplier device tree node and creates a
+ * device link from @dev consumer device to the supplier device. This function
+ * doesn't create device links for invalid scenarios such as trying to create a
+ * link with a parent device as the consumer of its child device. In such
+ * cases, it returns an error.
+ *
+ * Returns:
+ * - 0 if link successfully created to supplier
+ * - -EAGAIN if linking to the supplier should be reattempted
+ * - -EINVAL if the supplier link is invalid and should not be created
+ * - -ENODEV if there is no device that corresponds to the supplier phandle
+ */
+static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
+ u32 dl_flags)
+{
+ struct device *sup_dev;
+ int ret = 0;
+ struct device_node *tmp_np = sup_np;
+ int is_populated;
+
+ of_node_get(sup_np);
+ /*
+ * Find the device node that contains the supplier phandle. It may be
+ * @sup_np or it may be an ancestor of @sup_np.
+ */
+ while (sup_np && !of_find_property(sup_np, "compatible", NULL))
+ sup_np = of_get_next_parent(sup_np);
+ if (!sup_np) {
+ dev_dbg(dev, "Not linking to %pOFP - No device\n", tmp_np);
+ return -ENODEV;
+ }
+
+ /*
+ * Don't allow linking a device node as a consumer of one of its
+ * descendant nodes. By definition, a child node can't be a functional
+ * dependency for the parent node.
+ */
+ if (of_is_ancestor_of(dev->of_node, sup_np)) {
+ dev_dbg(dev, "Not linking to %pOFP - is descendant\n", sup_np);
+ of_node_put(sup_np);
+ return -EINVAL;
+ }
+ sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+ is_populated = of_node_check_flag(sup_np, OF_POPULATED);
+ of_node_put(sup_np);
+ if (!sup_dev && is_populated) {
+ /* Early device without struct device. */
+ dev_dbg(dev, "Not linking to %pOFP - No struct device\n",
+ sup_np);
+ return -ENODEV;
+ } else if (!sup_dev) {
+ return -EAGAIN;
+ }
+ if (!device_link_add(dev, sup_dev, dl_flags))
+ ret = -EAGAIN;
+ put_device(sup_dev);
+ return ret;
+}
+
+/**
+ * parse_prop_cells - Property parsing function for suppliers
+ *
+ * @np: Pointer to device tree node containing a list
+ * @prop_name: Name of property to be parsed. Expected to hold phandle values
+ * @index: For properties holding a list of phandles, this is the index
+ * into the list.
+ * @list_name: Property name that is known to contain list of phandle(s) to
+ * supplier(s)
+ * @cells_name: property name that specifies phandles' arguments count
+ *
+ * This is a helper function to parse properties that have a known fixed name
+ * and are a list of phandles and phandle arguments.
+ *
+ * Returns:
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+static struct device_node *parse_prop_cells(struct device_node *np,
+ const char *prop_name, int index,
+ const char *list_name,
+ const char *cells_name)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp(prop_name, list_name))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, list_name, cells_name, index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+#define DEFINE_SIMPLE_PROP(fname, name, cells) \
+static struct device_node *parse_##fname(struct device_node *np, \
+ const char *prop_name, int index) \
+{ \
+ return parse_prop_cells(np, prop_name, index, name, cells); \
+}
+
+static int strcmp_suffix(const char *str, const char *suffix)
+{
+ unsigned int len, suffix_len;
+
+ len = strlen(str);
+ suffix_len = strlen(suffix);
+ if (len <= suffix_len)
+ return -1;
+ return strcmp(str + len - suffix_len, suffix);
+}
+
+/**
+ * parse_suffix_prop_cells - Suffix property parsing function for suppliers
+ *
+ * @np: Pointer to device tree node containing a list
+ * @prop_name: Name of property to be parsed. Expected to hold phandle values
+ * @index: For properties holding a list of phandles, this is the index
+ * into the list.
+ * @suffix: Property suffix that is known to contain list of phandle(s) to
+ * supplier(s)
+ * @cells_name: property name that specifies phandles' arguments count
+ *
+ * This is a helper function to parse properties that have a known fixed suffix
+ * and are a list of phandles and phandle arguments.
+ *
+ * Returns:
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+static struct device_node *parse_suffix_prop_cells(struct device_node *np,
+ const char *prop_name, int index,
+ const char *suffix,
+ const char *cells_name)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp_suffix(prop_name, suffix))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, prop_name, cells_name, index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+#define DEFINE_SUFFIX_PROP(fname, suffix, cells) \
+static struct device_node *parse_##fname(struct device_node *np, \
+ const char *prop_name, int index) \
+{ \
+ return parse_suffix_prop_cells(np, prop_name, index, suffix, cells); \
+}
+
+/**
+ * struct supplier_bindings - Property parsing functions for suppliers
+ *
+ * @parse_prop: function name
+ * parse_prop() finds the node corresponding to a supplier phandle
+ * @parse_prop.np: Pointer to device node holding supplier phandle property
+ * @parse_prop.prop_name: Name of property holding a phandle value
+ * @parse_prop.index: For properties holding a list of phandles, this is the
+ * index into the list
+ *
+ * Returns:
+ * parse_prop() return values are
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+struct supplier_bindings {
+ struct device_node *(*parse_prop)(struct device_node *np,
+ const char *prop_name, int index);
+};
+
+DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
+DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
+DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
+DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
+DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
+DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
+DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
+DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
+DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
+DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
+
+static struct device_node *parse_iommu_maps(struct device_node *np,
+ const char *prop_name, int index)
+{
+ if (strcmp(prop_name, "iommu-map"))
+ return NULL;
+
+ return of_parse_phandle(np, prop_name, (index * 4) + 1);
+}
+
+static const struct supplier_bindings of_supplier_bindings[] = {
+ { .parse_prop = parse_clocks, },
+ { .parse_prop = parse_interconnects, },
+ { .parse_prop = parse_iommus, },
+ { .parse_prop = parse_iommu_maps, },
+ { .parse_prop = parse_mboxes, },
+ { .parse_prop = parse_io_channels, },
+ { .parse_prop = parse_interrupt_parent, },
+ { .parse_prop = parse_dmas, },
+ { .parse_prop = parse_regulators, },
+ { .parse_prop = parse_gpio, },
+ { .parse_prop = parse_gpios, },
+ {}
+};
+
+/**
+ * of_link_property - Create device links to suppliers listed in a property
+ * @dev: Consumer device
+ * @con_np: The consumer device tree node which contains the property
+ * @prop_name: Name of property to be parsed
+ *
+ * This function checks if the property @prop_name that is present in the
+ * @con_np device tree node is one of the known common device tree bindings
+ * that list phandles to suppliers. If @prop_name isn't one, this function
+ * doesn't do anything.
+ *
+ * If @prop_name is one, this function attempts to create device links from the
+ * consumer device @dev to all the devices of the suppliers listed in
+ * @prop_name.
+ *
+ * Any failed attempt to create a device link will NOT result in an immediate
+ * return. of_link_property() must create links to all the available supplier
+ * devices even when attempts to create a link to one or more suppliers fail.
+ */
+static int of_link_property(struct device *dev, struct device_node *con_np,
+ const char *prop_name)
+{
+ struct device_node *phandle;
+ const struct supplier_bindings *s = of_supplier_bindings;
+ unsigned int i = 0;
+ bool matched = false;
+ int ret = 0;
+ u32 dl_flags;
+
+ if (dev->of_node == con_np)
+ dl_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ else
+ dl_flags = DL_FLAG_SYNC_STATE_ONLY;
+
+ /* Do not stop at first failed link, link all available suppliers. */
+ while (!matched && s->parse_prop) {
+ while ((phandle = s->parse_prop(con_np, prop_name, i))) {
+ matched = true;
+ i++;
+ if (of_link_to_phandle(dev, phandle, dl_flags)
+ == -EAGAIN)
+ ret = -EAGAIN;
+ of_node_put(phandle);
+ }
+ s++;
+ }
+ return ret;
+}
+
+static int of_link_to_suppliers(struct device *dev,
+ struct device_node *con_np)
+{
+ struct device_node *child;
+ struct property *p;
+ int ret = 0;
+
+ for_each_property_of_node(con_np, p)
+ if (of_link_property(dev, con_np, p->name))
+ ret = -ENODEV;
+
+ for_each_child_of_node(con_np, child)
+ if (of_link_to_suppliers(dev, child) && !ret)
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static bool of_devlink;
+core_param(of_devlink, of_devlink, bool, 0);
+
+static int of_fwnode_add_links(const struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ if (!of_devlink)
+ return 0;
+
+ if (unlikely(!is_of_node(fwnode)))
+ return 0;
+
+ return of_link_to_suppliers(dev, to_of_node(fwnode));
+}
+
const struct fwnode_operations of_fwnode_ops = {
.get = of_fwnode_get,
.put = of_fwnode_put,
@@ -993,6 +1322,8 @@ const struct fwnode_operations of_fwnode_ops = {
.property_present = of_fwnode_property_present,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
+ .get_name = of_fwnode_get_name,
+ .get_name_prefix = of_fwnode_get_name_prefix,
.get_parent = of_fwnode_get_parent,
.get_next_child_node = of_fwnode_get_next_child_node,
.get_named_child_node = of_fwnode_get_named_child_node,
@@ -1001,5 +1332,6 @@ const struct fwnode_operations of_fwnode_ops = {
.graph_get_remote_endpoint = of_fwnode_graph_get_remote_endpoint,
.graph_get_port_parent = of_fwnode_graph_get_port_parent,
.graph_parse_endpoint = of_fwnode_graph_parse_endpoint,
+ .add_links = of_fwnode_add_links,
};
EXPORT_SYMBOL_GPL(of_fwnode_ops);
diff --git a/drivers/of/unittest-data/testcases.dts b/drivers/of/unittest-data/testcases.dts
index 55fe0ee20109..a85b5e1c381a 100644
--- a/drivers/of/unittest-data/testcases.dts
+++ b/drivers/of/unittest-data/testcases.dts
@@ -15,5 +15,6 @@
#include "tests-phandle.dtsi"
#include "tests-interrupts.dtsi"
#include "tests-match.dtsi"
+#include "tests-address.dtsi"
#include "tests-platform.dtsi"
#include "tests-overlay.dtsi"
diff --git a/drivers/of/unittest-data/tests-address.dtsi b/drivers/of/unittest-data/tests-address.dtsi
new file mode 100644
index 000000000000..3fe5d3987beb
--- /dev/null
+++ b/drivers/of/unittest-data/tests-address.dtsi
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ testcase-data {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ address-tests {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* ranges here is to make sure we don't use it for
+ * dma-ranges translation */
+ ranges = <0x70000000 0x70000000 0x40000000>,
+ <0x00000000 0xd0000000 0x20000000>;
+ dma-ranges = <0x0 0x20000000 0x40000000>;
+
+ device@70000000 {
+ reg = <0x70000000 0x1000>;
+ };
+
+ bus@80000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x80000000 0x100000>;
+ dma-ranges = <0x10000000 0x0 0x40000000>;
+
+ device@1000 {
+ reg = <0x1000 0x1000>;
+ };
+ };
+
+ pci@90000000 {
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ reg = <0x90000000 0x1000>;
+ ranges = <0x42000000 0x0 0x40000000 0x40000000 0x0 0x10000000>;
+ dma-ranges = <0x42000000 0x0 0x80000000 0x00000000 0x0 0x10000000>,
+ <0x42000000 0x0 0xc0000000 0x20000000 0x0 0x10000000>;
+ };
+
+ };
+ };
+};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 92e895d86458..68b87587b2ef 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -12,6 +12,7 @@
#include <linux/hashtable.h>
#include <linux/libfdt.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -779,6 +780,95 @@ static void __init of_unittest_changeset(void)
#endif
}
+static void __init of_unittest_dma_ranges_one(const char *path,
+ u64 expect_dma_addr, u64 expect_paddr, u64 expect_size)
+{
+ struct device_node *np;
+ u64 dma_addr, paddr, size;
+ int rc;
+
+ np = of_find_node_by_path(path);
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ rc = of_dma_get_range(np, &dma_addr, &paddr, &size);
+
+ unittest(!rc, "of_dma_get_range failed on node %pOF rc=%i\n", np, rc);
+ if (!rc) {
+ unittest(size == expect_size,
+ "of_dma_get_range wrong size on node %pOF size=%llx\n", np, size);
+ unittest(paddr == expect_paddr,
+ "of_dma_get_range wrong phys addr (%llx) on node %pOF", paddr, np);
+ unittest(dma_addr == expect_dma_addr,
+ "of_dma_get_range wrong DMA addr (%llx) on node %pOF", dma_addr, np);
+ }
+ of_node_put(np);
+}
+
+static void __init of_unittest_parse_dma_ranges(void)
+{
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000",
+ 0x0, 0x20000000, 0x40000000);
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000",
+ 0x10000000, 0x20000000, 0x40000000);
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000",
+ 0x80000000, 0x20000000, 0x10000000);
+}
+
+static void __init of_unittest_pci_dma_ranges(void)
+{
+ struct device_node *np;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int i = 0;
+
+ if (!IS_ENABLED(CONFIG_PCI))
+ return;
+
+ np = of_find_node_by_path("/testcase-data/address-tests/pci@90000000");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ pr_err("missing dma-ranges property\n");
+ return;
+ }
+
+ /*
+ * Get the dma-ranges from the device tree
+ */
+ for_each_of_pci_range(&parser, &range) {
+ if (!i) {
+ unittest(range.size == 0x10000000,
+ "for_each_of_pci_range wrong size on node %pOF size=%llx\n",
+ np, range.size);
+ unittest(range.cpu_addr == 0x20000000,
+ "for_each_of_pci_range wrong CPU addr (%llx) on node %pOF",
+ range.cpu_addr, np);
+ unittest(range.pci_addr == 0x80000000,
+ "for_each_of_pci_range wrong DMA addr (%llx) on node %pOF",
+ range.pci_addr, np);
+ } else {
+ unittest(range.size == 0x10000000,
+ "for_each_of_pci_range wrong size on node %pOF size=%llx\n",
+ np, range.size);
+ unittest(range.cpu_addr == 0x40000000,
+ "for_each_of_pci_range wrong CPU addr (%llx) on node %pOF",
+ range.cpu_addr, np);
+ unittest(range.pci_addr == 0xc0000000,
+ "for_each_of_pci_range wrong DMA addr (%llx) on node %pOF",
+ range.pci_addr, np);
+ }
+ i++;
+ }
+
+ of_node_put(np);
+}
+
static void __init of_unittest_parse_interrupts(void)
{
struct device_node *np;
@@ -1146,8 +1236,10 @@ static void attach_node_and_children(struct device_node *np)
full_name = kasprintf(GFP_KERNEL, "%pOF", np);
if (!strcmp(full_name, "/__local_fixups__") ||
- !strcmp(full_name, "/__fixups__"))
+ !strcmp(full_name, "/__fixups__")) {
+ kfree(full_name);
return;
+ }
dup = of_find_node_by_path(full_name);
kfree(full_name);
@@ -2555,6 +2647,8 @@ static int __init of_unittest(void)
of_unittest_changeset();
of_unittest_parse_interrupts();
of_unittest_parse_interrupts_extended();
+ of_unittest_parse_dma_ranges();
+ of_unittest_pci_dma_ranges();
of_unittest_match_node();
of_unittest_platform_populate();
of_unittest_overlay();
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 5484a46dafda..3b00e2c8e2e9 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -45,6 +45,7 @@ static struct daisydev {
static DEFINE_SPINLOCK(topology_lock);
static int numdevs;
+static bool daisy_init_done;
/* Forward-declaration of lower-level functions. */
static int mux_present(struct parport *port);
@@ -87,6 +88,24 @@ static struct parport *clone_parport(struct parport *real, int muxport)
return extra;
}
+static int daisy_drv_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+
+ if (strcmp(drv->name, "daisy_drv"))
+ return -ENODEV;
+ if (strcmp(par_dev->name, daisy_dev_name))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver daisy_driver = {
+ .name = "daisy_drv",
+ .probe = daisy_drv_probe,
+ .devmodel = true,
+};
+
/* Discover the IEEE1284.3 topology on a port -- muxes and daisy chains.
* Return value is number of devices actually detected. */
int parport_daisy_init(struct parport *port)
@@ -98,6 +117,23 @@ int parport_daisy_init(struct parport *port)
int i;
int last_try = 0;
+ if (!daisy_init_done) {
+ /*
+ * flag should be marked true first as
+ * parport_register_driver() might try to load the low
+ * level driver which will lead to announcing new ports
+ * and which will again come back here at
+ * parport_daisy_init()
+ */
+ daisy_init_done = true;
+ i = parport_register_driver(&daisy_driver);
+ if (i) {
+ pr_err("daisy registration failed\n");
+ daisy_init_done = false;
+ return i;
+ }
+ }
+
again:
/* Because this is called before any other devices exist,
* we don't have to claim exclusive access. */
@@ -213,10 +249,12 @@ void parport_daisy_fini(struct parport *port)
struct pardevice *parport_open(int devnum, const char *name)
{
struct daisydev *p = topology;
+ struct pardev_cb par_cb;
struct parport *port;
struct pardevice *dev;
int daisy;
+ memset(&par_cb, 0, sizeof(par_cb));
spin_lock(&topology_lock);
while (p && p->devnum != devnum)
p = p->next;
@@ -230,7 +268,7 @@ struct pardevice *parport_open(int devnum, const char *name)
port = parport_get_port(p->port);
spin_unlock(&topology_lock);
- dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
+ dev = parport_register_dev_model(port, name, &par_cb, devnum);
parport_put_port(port);
if (!dev)
return NULL;
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e035174ba205..e5e6a463a941 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
- struct pardevice *dev = parport_open (devnum, "Device ID probe");
+ struct pardevice *dev = parport_open(devnum, daisy_dev_name);
if (!dev)
return -ENXIO;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 7b4ee33c1935..d6920ebeabcd 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv)
return 0;
}
+/*
+ * Iterates through all the devices connected to the bus and return 1
+ * if the device is a parallel port.
+ */
+
+static int port_detect(struct device *dev, void *dev_drv)
+{
+ if (is_parport(dev))
+ return 1;
+ return 0;
+}
+
/**
* parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
@@ -266,9 +278,6 @@ static int port_check(struct device *dev, void *dev_drv)
int __parport_register_driver(struct parport_driver *drv, struct module *owner,
const char *mod_name)
{
- if (list_empty(&portlist))
- get_lowlevel_driver();
-
if (drv->devmodel) {
/* using device model */
int ret;
@@ -282,6 +291,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
if (ret)
return ret;
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
mutex_lock(&registration_lock);
if (drv->match_port)
bus_for_each_dev(&parport_bus_type, NULL, drv,
@@ -292,6 +310,8 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
drv->devmodel = false;
+ if (list_empty(&portlist))
+ get_lowlevel_driver();
mutex_lock(&registration_lock);
list_for_each_entry(port, &portlist, list)
drv->attach(port);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index a304f5ea11b9..4bef5c2bae9f 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -52,7 +52,7 @@ config PCI_MSI
If you don't know what to do here, say Y.
config PCI_MSI_IRQ_DOMAIN
- def_bool ARC || ARM || ARM64 || X86 || RISCV
+ def_bool y
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
@@ -106,14 +106,14 @@ config PCI_PF_STUB
When in doubt, say N.
config XEN_PCIDEV_FRONTEND
- tristate "Xen PCI Frontend"
- depends on X86 && XEN
- select PCI_XEN
+ tristate "Xen PCI Frontend"
+ depends on X86 && XEN
+ select PCI_XEN
select XEN_XENBUS_FRONTEND
- default y
- help
- The PCI device frontend driver allows the kernel to import arbitrary
- PCI devices from a PCI backend to support PCI driver domains.
+ default y
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
config PCI_ATS
bool
@@ -180,12 +180,12 @@ config PCI_LABEL
select NLS
config PCI_HYPERV
- tristate "Hyper-V PCI Frontend"
- depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
+ tristate "Hyper-V PCI Frontend"
+ depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
select PCI_HYPERV_INTERFACE
- help
- The PCI device frontend driver allows the kernel to import arbitrary
- PCI devices from a PCI backend to support PCI driver domains.
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 28cdd8c0213a..522d2b974e91 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
setup-bus.o vc.o mmap.o setup-irq.o
+obj-$(CONFIG_PCI) += pcie/
+
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
@@ -15,7 +17,6 @@ endif
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
-obj-$(CONFIG_PCIEPORTBUS) += pcie/
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_PCI_ATS) += ats.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 2fccb5762c76..79c4a2ef269a 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -355,7 +355,7 @@ static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
}
-static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
+bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index e18499243f84..982b46f0a54d 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -60,8 +60,6 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
pdev = pci_physfn(dev);
if (pdev->ats_stu != ps)
return -EINVAL;
-
- atomic_inc(&pdev->ats_ref_cnt); /* count enabled VFs */
} else {
dev->ats_stu = ps;
ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
@@ -71,7 +69,6 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
dev->ats_enabled = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_enable_ats);
/**
* pci_disable_ats - disable the ATS capability
@@ -79,27 +76,17 @@ EXPORT_SYMBOL_GPL(pci_enable_ats);
*/
void pci_disable_ats(struct pci_dev *dev)
{
- struct pci_dev *pdev;
u16 ctrl;
if (WARN_ON(!dev->ats_enabled))
return;
- if (atomic_read(&dev->ats_ref_cnt))
- return; /* VFs still enabled */
-
- if (dev->is_virtfn) {
- pdev = pci_physfn(dev);
- atomic_dec(&pdev->ats_ref_cnt);
- }
-
pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl);
ctrl &= ~PCI_ATS_CTRL_ENABLE;
pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
dev->ats_enabled = 0;
}
-EXPORT_SYMBOL_GPL(pci_disable_ats);
void pci_restore_ats_state(struct pci_dev *dev)
{
@@ -113,7 +100,6 @@ void pci_restore_ats_state(struct pci_dev *dev)
ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
}
-EXPORT_SYMBOL_GPL(pci_restore_ats_state);
/**
* pci_ats_queue_depth - query the ATS Invalidate Queue Depth
@@ -140,7 +126,6 @@ int pci_ats_queue_depth(struct pci_dev *dev)
pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CAP, &cap);
return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : PCI_ATS_MAX_QDEP;
}
-EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
/**
* pci_ats_page_aligned - Return Page Aligned Request bit status.
@@ -167,9 +152,22 @@ int pci_ats_page_aligned(struct pci_dev *pdev)
return 0;
}
-EXPORT_SYMBOL_GPL(pci_ats_page_aligned);
#ifdef CONFIG_PCI_PRI
+void pci_pri_init(struct pci_dev *pdev)
+{
+ u16 status;
+
+ pdev->pri_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+
+ if (!pdev->pri_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->pri_cap + PCI_PRI_STATUS, &status);
+ if (status & PCI_PRI_STATUS_PASID)
+ pdev->pasid_required = 1;
+}
+
/**
* pci_enable_pri - Enable PRI capability
* @ pdev: PCI device structure
@@ -180,32 +178,41 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
{
u16 control, status;
u32 max_requests;
- int pos;
+ int pri = pdev->pri_cap;
+
+ /*
+ * VFs must not implement the PRI Capability. If their PF
+ * implements PRI, it is shared by the VFs, so if the PF PRI is
+ * enabled, it is also enabled for the VF.
+ */
+ if (pdev->is_virtfn) {
+ if (pci_physfn(pdev)->pri_enabled)
+ return 0;
+ return -EINVAL;
+ }
if (WARN_ON(pdev->pri_enabled))
return -EBUSY;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+ pci_read_config_word(pdev, pri + PCI_PRI_STATUS, &status);
if (!(status & PCI_PRI_STATUS_STOPPED))
return -EBUSY;
- pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
+ pci_read_config_dword(pdev, pri + PCI_PRI_MAX_REQ, &max_requests);
reqs = min(max_requests, reqs);
pdev->pri_reqs_alloc = reqs;
- pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
+ pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs);
control = PCI_PRI_CTRL_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
pdev->pri_enabled = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_enable_pri);
/**
* pci_disable_pri - Disable PRI capability
@@ -216,18 +223,21 @@ EXPORT_SYMBOL_GPL(pci_enable_pri);
void pci_disable_pri(struct pci_dev *pdev)
{
u16 control;
- int pos;
+ int pri = pdev->pri_cap;
+
+ /* VFs share the PF PRI */
+ if (pdev->is_virtfn)
+ return;
if (WARN_ON(!pdev->pri_enabled))
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pri + PCI_PRI_CTRL, &control);
control &= ~PCI_PRI_CTRL_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
pdev->pri_enabled = 0;
}
@@ -241,19 +251,20 @@ void pci_restore_pri_state(struct pci_dev *pdev)
{
u16 control = PCI_PRI_CTRL_ENABLE;
u32 reqs = pdev->pri_reqs_alloc;
- int pos;
+ int pri = pdev->pri_cap;
+
+ if (pdev->is_virtfn)
+ return;
if (!pdev->pri_enabled)
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return;
- pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
}
-EXPORT_SYMBOL_GPL(pci_restore_pri_state);
/**
* pci_reset_pri - Resets device's PRI state
@@ -265,24 +276,45 @@ EXPORT_SYMBOL_GPL(pci_restore_pri_state);
int pci_reset_pri(struct pci_dev *pdev)
{
u16 control;
- int pos;
+ int pri = pdev->pri_cap;
+
+ if (pdev->is_virtfn)
+ return 0;
if (WARN_ON(pdev->pri_enabled))
return -EBUSY;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return -EINVAL;
control = PCI_PRI_CTRL_RESET;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
return 0;
}
-EXPORT_SYMBOL_GPL(pci_reset_pri);
+
+/**
+ * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
+ * status.
+ * @pdev: PCI device structure
+ *
+ * Returns 1 if PASID is required in PRG Response Message, 0 otherwise.
+ */
+int pci_prg_resp_pasid_required(struct pci_dev *pdev)
+{
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ return pdev->pasid_required;
+}
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
+void pci_pasid_init(struct pci_dev *pdev)
+{
+ pdev->pasid_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+}
+
/**
* pci_enable_pasid - Enable the PASID capability
* @pdev: PCI device structure
@@ -295,7 +327,17 @@ EXPORT_SYMBOL_GPL(pci_reset_pri);
int pci_enable_pasid(struct pci_dev *pdev, int features)
{
u16 control, supported;
- int pos;
+ int pasid = pdev->pasid_cap;
+
+ /*
+ * VFs must not implement the PASID Capability, but if a PF
+ * supports PASID, its VFs share the PF PASID configuration.
+ */
+ if (pdev->is_virtfn) {
+ if (pci_physfn(pdev)->pasid_enabled)
+ return 0;
+ return -EINVAL;
+ }
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
@@ -303,11 +345,10 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (!pdev->eetlp_prefix_path)
return -EINVAL;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (!pasid)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+ pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
/* User wants to enable anything unsupported? */
@@ -317,13 +358,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
control = PCI_PASID_CTRL_ENABLE | features;
pdev->pasid_features = features;
- pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
pdev->pasid_enabled = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_enable_pasid);
/**
* pci_disable_pasid - Disable the PASID capability
@@ -332,20 +372,22 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid);
void pci_disable_pasid(struct pci_dev *pdev)
{
u16 control = 0;
- int pos;
+ int pasid = pdev->pasid_cap;
+
+ /* VFs share the PF PASID configuration */
+ if (pdev->is_virtfn)
+ return;
if (WARN_ON(!pdev->pasid_enabled))
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (!pasid)
return;
- pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
pdev->pasid_enabled = 0;
}
-EXPORT_SYMBOL_GPL(pci_disable_pasid);
/**
* pci_restore_pasid_state - Restore PASID capabilities
@@ -354,19 +396,20 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
void pci_restore_pasid_state(struct pci_dev *pdev)
{
u16 control;
- int pos;
+ int pasid = pdev->pasid_cap;
+
+ if (pdev->is_virtfn)
+ return;
if (!pdev->pasid_enabled)
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (!pasid)
return;
control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features;
- pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
}
-EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
/**
* pci_pasid_features - Check which PASID features are supported
@@ -381,49 +424,20 @@ EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
int pci_pasid_features(struct pci_dev *pdev)
{
u16 supported;
- int pos;
+ int pasid = pdev->pasid_cap;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ if (!pasid)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+ pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
return supported;
}
-EXPORT_SYMBOL_GPL(pci_pasid_features);
-
-/**
- * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
- * status.
- * @pdev: PCI device structure
- *
- * Returns 1 if PASID is required in PRG Response Message, 0 otherwise.
- *
- * Even though the PRG response PASID status is read from PRI Status
- * Register, since this API will mainly be used by PASID users, this
- * function is defined within #ifdef CONFIG_PCI_PASID instead of
- * CONFIG_PCI_PRI.
- */
-int pci_prg_resp_pasid_required(struct pci_dev *pdev)
-{
- u16 status;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return 0;
-
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
-
- if (status & PCI_PRI_STATUS_PASID)
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
#define PASID_NUMBER_SHIFT 8
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
@@ -437,17 +451,18 @@ EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
int pci_max_pasids(struct pci_dev *pdev)
{
u16 supported;
- int pos;
+ int pasid = pdev->pasid_cap;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ if (!pasid)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+ pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
return (1 << supported);
}
-EXPORT_SYMBOL_GPL(pci_max_pasids);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 70e078238899..c77069c8ee5d 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -22,34 +22,6 @@ config PCI_AARDVARK
controller is part of the South Bridge of the Marvel Armada
3700 SoC.
-menu "Cadence PCIe controllers support"
-
-config PCIE_CADENCE
- bool
-
-config PCIE_CADENCE_HOST
- bool "Cadence PCIe host controller"
- depends on OF
- depends on PCI
- select IRQ_DOMAIN
- select PCIE_CADENCE
- help
- Say Y here if you want to support the Cadence PCIe controller in host
- mode. This PCIe controller may be embedded into many different vendors
- SoCs.
-
-config PCIE_CADENCE_EP
- bool "Cadence PCIe endpoint controller"
- depends on OF
- depends on PCI_ENDPOINT
- select PCIE_CADENCE
- help
- Say Y here if you want to support the Cadence PCIe controller in
- endpoint mode. This PCIe controller may be embedded into many
- different vendors SoCs.
-
-endmenu
-
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
depends on ARCH_ZYNQMP || COMPILE_TEST
@@ -135,7 +107,7 @@ config PCI_V3_SEMI
config PCI_VERSATILE
bool "ARM Versatile PB PCI controller"
- depends on ARCH_VERSATILE
+ depends on ARCH_VERSATILE || COMPILE_TEST
config PCIE_IPROC
tristate
@@ -289,4 +261,5 @@ config PCI_HYPERV_INTERFACE
have a common interface with the Hyper-V PCI frontend driver.
source "drivers/pci/controller/dwc/Kconfig"
+source "drivers/pci/controller/cadence/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index a2a22c9d91af..3d4f597f15ce 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
-obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
-obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCIE_CADENCE) += cadence/
obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
new file mode 100644
index 000000000000..b76b3cf55ce5
--- /dev/null
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Cadence PCIe controllers support"
+ depends on PCI
+
+config PCIE_CADENCE
+ bool
+
+config PCIE_CADENCE_HOST
+ bool
+ depends on OF
+ select IRQ_DOMAIN
+ select PCIE_CADENCE
+
+config PCIE_CADENCE_EP
+ bool
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_CADENCE
+
+config PCIE_CADENCE_PLAT
+ bool
+
+config PCIE_CADENCE_PLAT_HOST
+ bool "Cadence PCIe platform host controller"
+ depends on OF
+ select PCIE_CADENCE_HOST
+ select PCIE_CADENCE_PLAT
+ help
+ Say Y here if you want to support the Cadence PCIe platform controller in
+ host mode. This PCIe controller may be embedded into many different
+ vendors SoCs.
+
+config PCIE_CADENCE_PLAT_EP
+ bool "Cadence PCIe platform endpoint controller"
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_CADENCE_EP
+ select PCIE_CADENCE_PLAT
+ help
+ Say Y here if you want to support the Cadence PCIe platform controller in
+ endpoint mode. This PCIe controller may be embedded into many
+ different vendors SoCs.
+
+endmenu
diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile
new file mode 100644
index 000000000000..232a3f20876a
--- /dev/null
+++ b/drivers/pci/controller/cadence/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
+obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
+obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index def7820cb824..1c173dad67d1 100644
--- a/drivers/pci/controller/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -17,35 +17,6 @@
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
-/**
- * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
- * @pcie: Cadence PCIe controller
- * @max_regions: maximum number of regions supported by hardware
- * @ob_region_map: bitmask of mapped outbound regions
- * @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
- * dedicated outbound regions is mapped.
- * @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
- * IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
- * dedicated outbound region.
- * @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
- */
-struct cdns_pcie_ep {
- struct cdns_pcie pcie;
- u32 max_regions;
- unsigned long ob_region_map;
- phys_addr_t *ob_addr;
- phys_addr_t irq_phys_addr;
- void __iomem *irq_cpu_addr;
- u64 irq_pci_addr;
- u8 irq_pci_fn;
- u8 irq_pending;
-};
-
static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
struct pci_epf_header *hdr)
{
@@ -424,28 +395,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features,
};
-static const struct of_device_id cdns_pcie_ep_of_match[] = {
- { .compatible = "cdns,cdns-pcie-ep" },
-
- { },
-};
-static int cdns_pcie_ep_probe(struct platform_device *pdev)
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = ep->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
- struct cdns_pcie_ep *ep;
- struct cdns_pcie *pcie;
- struct pci_epc *epc;
+ struct cdns_pcie *pcie = &ep->pcie;
struct resource *res;
+ struct pci_epc *epc;
int ret;
- int phy_count;
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep)
- return -ENOMEM;
- pcie = &ep->pcie;
pcie->is_rc = false;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
@@ -474,19 +434,6 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
if (!ep->ob_addr)
return -ENOMEM;
- ret = cdns_pcie_init_phy(dev, pcie);
- if (ret) {
- dev_err(dev, "failed to init phy\n");
- return ret;
- }
- platform_set_drvdata(pdev, pcie);
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- goto err_get_sync;
- }
-
/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
@@ -528,38 +475,5 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
err_init:
pm_runtime_put_sync(dev);
- err_get_sync:
- pm_runtime_disable(dev);
- cdns_pcie_disable_phy(pcie);
- phy_count = pcie->phy_count;
- while (phy_count--)
- device_link_del(pcie->link[phy_count]);
-
return ret;
}
-
-static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct cdns_pcie *pcie = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_put_sync(dev);
- if (ret < 0)
- dev_dbg(dev, "pm_runtime_put_sync failed\n");
-
- pm_runtime_disable(dev);
-
- cdns_pcie_disable_phy(pcie);
-}
-
-static struct platform_driver cdns_pcie_ep_driver = {
- .driver = {
- .name = "cdns-pcie-ep",
- .of_match_table = cdns_pcie_ep_of_match,
- .pm = &cdns_pcie_pm_ops,
- },
- .probe = cdns_pcie_ep_probe,
- .shutdown = cdns_pcie_ep_shutdown,
-};
-builtin_platform_driver(cdns_pcie_ep_driver);
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 97e251090b4f..9b1c3966414b 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -11,33 +11,6 @@
#include "pcie-cadence.h"
-/**
- * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
- * @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
- * @cfg_res: start/end offsets in the physical system memory to map PCI
- * configuration space accesses
- * @bus_range: first/last buses behind the PCIe host controller
- * @cfg_base: IO mapped window to access the PCI configuration space of a
- * single function at a time
- * @max_regions: maximum number of regions supported by the hardware
- * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
- * translation (nbits sets into the "no BAR match" register)
- * @vendor_id: PCI vendor ID
- * @device_id: PCI device ID
- */
-struct cdns_pcie_rc {
- struct cdns_pcie pcie;
- struct device *dev;
- struct resource *cfg_res;
- struct resource *bus_range;
- void __iomem *cfg_base;
- u32 max_regions;
- u32 no_bar_nbits;
- u16 vendor_id;
- u16 device_id;
-};
-
static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -92,11 +65,6 @@ static struct pci_ops cdns_pcie_host_ops = {
.write = pci_generic_config_write,
};
-static const struct of_device_id cdns_pcie_host_of_match[] = {
- { .compatible = "cdns,cdns-pcie-host" },
-
- { },
-};
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
@@ -136,10 +104,10 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
- struct resource *cfg_res = rc->cfg_res;
struct resource *mem_res = pcie->mem_res;
struct resource *bus_range = rc->bus_range;
- struct device *dev = rc->dev;
+ struct resource *cfg_res = rc->cfg_res;
+ struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
struct of_pci_range_parser parser;
struct of_pci_range range;
@@ -211,7 +179,7 @@ static int cdns_pcie_host_init(struct device *dev,
int err;
/* Parse our PCI ranges and request their resources */
- err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+ err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range);
if (err)
return err;
@@ -233,25 +201,21 @@ static int cdns_pcie_host_init(struct device *dev,
return err;
}
-static int cdns_pcie_host_probe(struct platform_device *pdev)
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = rc->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
struct list_head resources;
- struct cdns_pcie_rc *rc;
struct cdns_pcie *pcie;
struct resource *res;
int ret;
- int phy_count;
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ bridge = pci_host_bridge_from_priv(rc);
if (!bridge)
return -ENOMEM;
- rc = pci_host_bridge_priv(bridge);
- rc->dev = dev;
-
pcie = &rc->pcie;
pcie->is_rc = true;
@@ -287,21 +251,8 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
dev_err(dev, "missing \"mem\"\n");
return -EINVAL;
}
- pcie->mem_res = res;
- ret = cdns_pcie_init_phy(dev, pcie);
- if (ret) {
- dev_err(dev, "failed to init phy\n");
- return ret;
- }
- platform_set_drvdata(pdev, pcie);
-
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- goto err_get_sync;
- }
+ pcie->mem_res = res;
ret = cdns_pcie_host_init(dev, &resources, rc);
if (ret)
@@ -326,37 +277,5 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
err_init:
pm_runtime_put_sync(dev);
- err_get_sync:
- pm_runtime_disable(dev);
- cdns_pcie_disable_phy(pcie);
- phy_count = pcie->phy_count;
- while (phy_count--)
- device_link_del(pcie->link[phy_count]);
-
return ret;
}
-
-static void cdns_pcie_shutdown(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct cdns_pcie *pcie = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_put_sync(dev);
- if (ret < 0)
- dev_dbg(dev, "pm_runtime_put_sync failed\n");
-
- pm_runtime_disable(dev);
- cdns_pcie_disable_phy(pcie);
-}
-
-static struct platform_driver cdns_pcie_host_driver = {
- .driver = {
- .name = "cdns-pcie-host",
- .of_match_table = cdns_pcie_host_of_match,
- .pm = &cdns_pcie_pm_ops,
- },
- .probe = cdns_pcie_host_probe,
- .shutdown = cdns_pcie_shutdown,
-};
-builtin_platform_driver(cdns_pcie_host_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
new file mode 100644
index 000000000000..f5c6bf6dfcb8
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe platform driver.
+ *
+ * Copyright (c) 2019, Cadence Design Systems
+ * Author: Tom Joseph <tjoseph@cadence.com>
+ */
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+#include "pcie-cadence.h"
+
+/**
+ * struct cdns_plat_pcie - private data for this PCIe platform driver
+ * @pcie: Cadence PCIe controller
+ * @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex,
+ * if 0 it is in Endpoint mode.
+ */
+struct cdns_plat_pcie {
+ struct cdns_pcie *pcie;
+ bool is_rc;
+};
+
+struct cdns_plat_pcie_of_data {
+ bool is_rc;
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[];
+
+static int cdns_plat_pcie_probe(struct platform_device *pdev)
+{
+ const struct cdns_plat_pcie_of_data *data;
+ struct cdns_plat_pcie *cdns_plat_pcie;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+ int phy_count;
+ bool is_rc;
+ int ret;
+
+ match = of_match_device(cdns_plat_pcie_of_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct cdns_plat_pcie_of_data *)match->data;
+ is_rc = data->is_rc;
+
+ pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc);
+ cdns_plat_pcie = devm_kzalloc(dev, sizeof(*cdns_plat_pcie), GFP_KERNEL);
+ if (!cdns_plat_pcie)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cdns_plat_pcie);
+ if (is_rc) {
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_HOST))
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ rc = pci_host_bridge_priv(bridge);
+ rc->pcie.dev = dev;
+ cdns_plat_pcie->pcie = &rc->pcie;
+ cdns_plat_pcie->is_rc = is_rc;
+
+ ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret)
+ goto err_init;
+ } else {
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_EP))
+ return -ENODEV;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->pcie.dev = dev;
+ cdns_plat_pcie->pcie = &ep->pcie;
+ cdns_plat_pcie->is_rc = is_rc;
+
+ ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_ep_setup(ep);
+ if (ret)
+ goto err_init;
+ }
+
+ err_init:
+ pm_runtime_put_sync(dev);
+
+ err_get_sync:
+ pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(cdns_plat_pcie->pcie);
+ phy_count = cdns_plat_pcie->pcie->phy_count;
+ while (phy_count--)
+ device_link_del(cdns_plat_pcie->pcie->link[phy_count]);
+
+ return 0;
+}
+
+static void cdns_plat_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+
+ cdns_pcie_disable_phy(pcie);
+}
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_host_of_data = {
+ .is_rc = true,
+};
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_ep_of_data = {
+ .is_rc = false,
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[] = {
+ {
+ .compatible = "cdns,cdns-pcie-host",
+ .data = &cdns_plat_pcie_host_of_data,
+ },
+ {
+ .compatible = "cdns,cdns-pcie-ep",
+ .data = &cdns_plat_pcie_ep_of_data,
+ },
+ {},
+};
+
+static struct platform_driver cdns_plat_pcie_driver = {
+ .driver = {
+ .name = "cdns-pcie",
+ .of_match_table = cdns_plat_pcie_of_match,
+ .pm = &cdns_pcie_pm_ops,
+ },
+ .probe = cdns_plat_pcie_probe,
+ .shutdown = cdns_plat_pcie_shutdown,
+};
+builtin_platform_driver(cdns_plat_pcie_driver);
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index cd795f6fc1e2..cd795f6fc1e2 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
diff --git a/drivers/pci/controller/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index ae6bf2a2b3d3..a2b28b912ca4 100644
--- a/drivers/pci/controller/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (c) 2017 Cadence
// Cadence PCIe controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
@@ -190,6 +190,8 @@ enum cdns_pcie_rp_bar {
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+struct cdns_pcie;
+
enum cdns_pcie_msg_code {
MSG_CODE_ASSERT_INTA = 0x20,
MSG_CODE_ASSERT_INTB = 0x21,
@@ -231,13 +233,71 @@ enum cdns_pcie_msg_routing {
struct cdns_pcie {
void __iomem *reg_base;
struct resource *mem_res;
+ struct device *dev;
bool is_rc;
u8 bus;
int phy_count;
struct phy **phy;
struct device_link **link;
+ const struct cdns_pcie_common_ops *ops;
+};
+
+/**
+ * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
+ * @pcie: Cadence PCIe controller
+ * @dev: pointer to PCIe device
+ * @cfg_res: start/end offsets in the physical system memory to map PCI
+ * configuration space accesses
+ * @bus_range: first/last buses behind the PCIe host controller
+ * @cfg_base: IO mapped window to access the PCI configuration space of a
+ * single function at a time
+ * @max_regions: maximum number of regions supported by the hardware
+ * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
+ * translation (nbits sets into the "no BAR match" register)
+ * @vendor_id: PCI vendor ID
+ * @device_id: PCI device ID
+ */
+struct cdns_pcie_rc {
+ struct cdns_pcie pcie;
+ struct resource *cfg_res;
+ struct resource *bus_range;
+ void __iomem *cfg_base;
+ u32 max_regions;
+ u32 no_bar_nbits;
+ u16 vendor_id;
+ u16 device_id;
};
+/**
+ * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
+ * @pcie: Cadence PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ * the sending of a memory write (MSI) / normal message (legacy
+ * IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ * the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct cdns_pcie_ep {
+ struct cdns_pcie pcie;
+ u32 max_regions;
+ unsigned long ob_region_map;
+ phys_addr_t *ob_addr;
+ phys_addr_t irq_phys_addr;
+ void __iomem *irq_cpu_addr;
+ u64 irq_pci_addr;
+ u8 irq_pci_fn;
+ u8 irq_pending;
+};
+
+
/* Register access */
static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
{
@@ -306,6 +366,23 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
+#ifdef CONFIG_PCIE_CADENCE_HOST
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+#else
+static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PCIE_CADENCE_EP
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+#else
+static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
+{
+ return 0;
+}
+#endif
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 0ba988b5b5bc..625a031b2193 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -7,9 +7,9 @@ config PCIE_DW
bool
config PCIE_DW_HOST
- bool
+ bool
depends on PCI_MSI_IRQ_DOMAIN
- select PCIE_DW
+ select PCIE_DW
config PCIE_DW_EP
bool
@@ -224,7 +224,7 @@ config PCIE_HISI_STB
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
- Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
config PCI_MESON
bool "MESON PCIe controller"
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 4234ddb4722f..b20651cea09f 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -353,7 +353,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index ca9aa4501e7e..0d151cead1b7 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -58,7 +58,7 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 3a5fa26d5e56..f24f79a70d9a 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -263,6 +263,7 @@ static const struct ls_pcie_drvdata ls2088_drvdata = {
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &ls2088_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index e35e9eaa50ee..3772b02a5c55 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -16,6 +16,7 @@
#include <linux/reset.h>
#include <linux/resource.h>
#include <linux/types.h>
+#include <linux/phy/phy.h>
#include "pcie-designware.h"
@@ -96,12 +97,18 @@ struct meson_pcie_rc_reset {
struct reset_control *apb;
};
+struct meson_pcie_param {
+ bool has_shared_phy;
+};
+
struct meson_pcie {
struct dw_pcie pci;
struct meson_pcie_mem_res mem_res;
struct meson_pcie_clk_res clk_res;
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
+ struct phy *phy;
+ const struct meson_pcie_param *param;
};
static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
@@ -123,10 +130,12 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
- mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
- if (IS_ERR(mrst->phy))
- return PTR_ERR(mrst->phy);
- reset_control_deassert(mrst->phy);
+ if (!mp->param->has_shared_phy) {
+ mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
+ if (IS_ERR(mrst->phy))
+ return PTR_ERR(mrst->phy);
+ reset_control_deassert(mrst->phy);
+ }
mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
if (IS_ERR(mrst->port))
@@ -180,27 +189,52 @@ static int meson_pcie_get_mems(struct platform_device *pdev,
if (IS_ERR(mp->mem_res.cfg_base))
return PTR_ERR(mp->mem_res.cfg_base);
- /* Meson SoC has two PCI controllers use same phy register*/
- mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
- if (IS_ERR(mp->mem_res.phy_base))
- return PTR_ERR(mp->mem_res.phy_base);
+ /* Meson AXG SoC has two PCI controllers use same phy register */
+ if (!mp->param->has_shared_phy) {
+ mp->mem_res.phy_base =
+ meson_pcie_get_mem_shared(pdev, mp, "phy");
+ if (IS_ERR(mp->mem_res.phy_base))
+ return PTR_ERR(mp->mem_res.phy_base);
+ }
return 0;
}
-static void meson_pcie_power_on(struct meson_pcie *mp)
+static int meson_pcie_power_on(struct meson_pcie *mp)
{
- writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+ int ret = 0;
+
+ if (mp->param->has_shared_phy) {
+ ret = phy_init(mp->phy);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(mp->phy);
+ if (ret) {
+ phy_exit(mp->phy);
+ return ret;
+ }
+ } else
+ writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+
+ return 0;
}
-static void meson_pcie_reset(struct meson_pcie *mp)
+static int meson_pcie_reset(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
-
- reset_control_assert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- reset_control_deassert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
+ int ret = 0;
+
+ if (mp->param->has_shared_phy) {
+ ret = phy_reset(mp->phy);
+ if (ret)
+ return ret;
+ } else {
+ reset_control_assert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+ reset_control_deassert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+ }
reset_control_assert(mrst->port);
reset_control_assert(mrst->apb);
@@ -208,6 +242,8 @@ static void meson_pcie_reset(struct meson_pcie *mp)
reset_control_deassert(mrst->port);
reset_control_deassert(mrst->apb);
udelay(PCIE_RESET_DELAY);
+
+ return 0;
}
static inline struct clk *meson_pcie_probe_clock(struct device *dev,
@@ -250,15 +286,17 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
if (IS_ERR(res->port_clk))
return PTR_ERR(res->port_clk);
- res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
- if (IS_ERR(res->mipi_gate))
- return PTR_ERR(res->mipi_gate);
+ if (!mp->param->has_shared_phy) {
+ res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
+ if (IS_ERR(res->mipi_gate))
+ return PTR_ERR(res->mipi_gate);
+ }
- res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
+ res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
if (IS_ERR(res->general_clk))
return PTR_ERR(res->general_clk);
- res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
+ res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
if (IS_ERR(res->clk))
return PTR_ERR(res->clk);
@@ -287,9 +325,9 @@ static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
static void meson_pcie_assert_reset(struct meson_pcie *mp)
{
- gpiod_set_value_cansleep(mp->reset_gpio, 0);
- udelay(500);
gpiod_set_value_cansleep(mp->reset_gpio, 1);
+ udelay(500);
+ gpiod_set_value_cansleep(mp->reset_gpio, 0);
}
static void meson_pcie_init_dw(struct meson_pcie *mp)
@@ -524,6 +562,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int meson_pcie_probe(struct platform_device *pdev)
{
+ const struct meson_pcie_param *match_data;
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct meson_pcie *mp;
@@ -537,6 +576,19 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ match_data = of_device_get_match_data(dev);
+ if (!match_data) {
+ dev_err(dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+ mp->param = match_data;
+
+ if (mp->param->has_shared_phy) {
+ mp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(mp->phy))
+ return PTR_ERR(mp->phy);
+ }
+
mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(mp->reset_gpio)) {
dev_err(dev, "get reset gpio failed\n");
@@ -555,13 +607,22 @@ static int meson_pcie_probe(struct platform_device *pdev)
return ret;
}
- meson_pcie_power_on(mp);
- meson_pcie_reset(mp);
+ ret = meson_pcie_power_on(mp);
+ if (ret) {
+ dev_err(dev, "phy power on failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_reset(mp);
+ if (ret) {
+ dev_err(dev, "reset failed, %d\n", ret);
+ goto err_phy;
+ }
ret = meson_pcie_probe_clocks(mp);
if (ret) {
dev_err(dev, "init clock resources failed, %d\n", ret);
- return ret;
+ goto err_phy;
}
platform_set_drvdata(pdev, mp);
@@ -569,15 +630,36 @@ static int meson_pcie_probe(struct platform_device *pdev)
ret = meson_add_pcie_port(mp, pdev);
if (ret < 0) {
dev_err(dev, "Add PCIe port failed, %d\n", ret);
- return ret;
+ goto err_phy;
}
return 0;
+
+err_phy:
+ if (mp->param->has_shared_phy) {
+ phy_power_off(mp->phy);
+ phy_exit(mp->phy);
+ }
+
+ return ret;
}
+static struct meson_pcie_param meson_pcie_axg_param = {
+ .has_shared_phy = false,
+};
+
+static struct meson_pcie_param meson_pcie_g12a_param = {
+ .has_shared_phy = true,
+};
+
static const struct of_device_id meson_pcie_of_match[] = {
{
.compatible = "amlogic,axg-pcie",
+ .data = &meson_pcie_axg_param,
+ },
+ {
+ .compatible = "amlogic,g12a-pcie",
+ .data = &meson_pcie_g12a_param,
},
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index d00252bd8fae..9e2482bd7b6d 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -422,7 +422,7 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
artpec6_pcie_wait_for_phy(artpec6_pcie);
artpec6_pcie_set_nfts(artpec6_pcie);
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 0f36a926059a..395feb8ca051 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -10,6 +10,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci_regs.h>
@@ -78,7 +79,8 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
int i, pos, irq;
- u32 val, num_ctrls;
+ unsigned long val;
+ u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
@@ -86,14 +88,14 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
for (i = 0; i < num_ctrls; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
(i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &val);
- if (!val)
+ 4, &status);
+ if (!status)
continue;
ret = IRQ_HANDLED;
+ val = status;
pos = 0;
- while ((pos = find_next_bit((unsigned long *) &val,
- MAX_MSI_IRQS_PER_CTRL,
+ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
irq = irq_find_mapping(pp->irq_domain,
(i * MAX_MSI_IRQS_PER_CTRL) +
@@ -319,7 +321,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
- struct resource_entry *win, *tmp;
+ struct resource_entry *win;
struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
@@ -342,31 +344,20 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (!bridge)
return -ENOMEM;
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &bridge->windows, &pp->io_base);
- if (ret)
- return ret;
-
- ret = devm_request_pci_bus_resources(dev, &bridge->windows);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret)
return ret;
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
+ resource_list_for_each_entry(win, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- ret = devm_pci_remap_iospace(dev, win->res,
- pp->io_base);
- if (ret) {
- dev_warn(dev, "Error %d: failed to map resource %pR\n",
- ret, win->res);
- resource_list_destroy_entry(win);
- } else {
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- }
+ pp->io = win->res;
+ pp->io->name = "I/O";
+ pp->io_size = resource_size(pp->io);
+ pp->io_bus_addr = pp->io->start - win->offset;
+ pp->io_base = pci_pio_to_address(pp->io->start);
break;
case IORESOURCE_MEM:
pp->mem = win->res;
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index b58fdcbc664b..73646b677aff 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -70,7 +70,7 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 5a18e94e52c8..5accdd6bc388 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -214,7 +214,7 @@ struct dw_pcie_ep {
phys_addr_t phys_base;
size_t addr_size;
size_t page_size;
- u8 bar_to_atu[6];
+ u8 bar_to_atu[PCI_STD_NUM_BARS];
phys_addr_t *outbound_addr;
unsigned long *ib_window_map;
unsigned long *ob_window_map;
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index f89f5acee72d..cbe95f0ea0ca 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -40,8 +40,6 @@
#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
-#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN BIT(9)
-#define APPL_PINMUX_CLKREQ_OUT_OVRD BIT(10)
#define APPL_CTRL 0x4
#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
@@ -1193,8 +1191,8 @@ static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
if (!pcie->supports_clkreq) {
val = appl_readl(pcie, APPL_PINMUX);
- val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN;
- val |= APPL_PINMUX_CLKREQ_OUT_OVRD;
+ val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
appl_writel(pcie, val, APPL_PINMUX);
}
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 3f30ee4a00b3..8fd7badd59c2 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -33,6 +33,10 @@
#define PCL_PIPEMON 0x0044
#define PCL_PCLK_ALIVE BIT(15)
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
#define PCL_APP_READY_CTRL 0x8008
#define PCL_APP_LTSSM_ENABLE BIT(0)
@@ -85,6 +89,12 @@ static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
{
u32 val;
+ /* set RC MODE */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN;
+ val &= ~PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
/* use auxiliary power detection */
val = readl(priv->base + PCL_APP_PM0);
val |= PCL_SYS_AUX_PWR_DET;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index fc0fe4d4de49..2a20b649f40c 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -175,18 +176,20 @@
(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
-#define PIO_TIMEOUT_MS 1
+#define PIO_RETRY_CNT 500
+#define PIO_RETRY_DELAY 2 /* 2 us*/
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
#define LINK_WAIT_USLEEP_MAX 100000
+#define RETRAIN_WAIT_MAX_RETRIES 10
+#define RETRAIN_WAIT_USLEEP_US 2000
#define MSI_IRQ_NUM 32
struct advk_pcie {
struct platform_device *pdev;
void __iomem *base;
- struct list_head resources;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
struct irq_domain *msi_domain;
@@ -239,6 +242,17 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
+static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
+{
+ size_t retries;
+
+ for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
+ if (!advk_pcie_link_up(pcie))
+ break;
+ udelay(RETRAIN_WAIT_USLEEP_US);
+ }
+}
+
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
@@ -324,6 +338,14 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
advk_writel(pcie, reg, PIO_CTRL);
+ /*
+ * PERST# signal could have been asserted by pinctrl subsystem before
+ * probe() callback has been called, making the endpoint going into
+ * fundamental reset. As required by PCI Express spec a delay for at
+ * least 100ms after such a reset before link training is needed.
+ */
+ msleep(PCI_PM_D3COLD_WAIT);
+
/* Start link training */
reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
reg |= PCIE_CORE_LINK_TRAINING;
@@ -383,17 +405,16 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
+ int i;
- while (time_before(jiffies, timeout)) {
+ for (i = 0; i < PIO_RETRY_CNT; i++) {
u32 start, isr;
start = advk_readl(pcie, PIO_START);
isr = advk_readl(pcie, PIO_ISR);
if (!start && isr)
return 0;
+ udelay(PIO_RETRY_DELAY);
}
dev_err(dev, "config read/write timed out\n");
@@ -415,7 +436,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
- *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0;
+ *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -426,11 +447,20 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
return PCI_BRIDGE_EMUL_HANDLED;
}
+ case PCI_EXP_LNKCTL: {
+ /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
+ u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
+ ~(PCI_EXP_LNKSTA_LT << 16);
+ if (!advk_pcie_link_up(pcie))
+ val |= (PCI_EXP_LNKSTA_LT << 16);
+ *value = val;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
case PCI_CAP_LIST_ID:
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_LNKCAP:
- case PCI_EXP_LNKCTL:
*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
return PCI_BRIDGE_EMUL_HANDLED;
default:
@@ -447,14 +477,24 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
switch (reg) {
case PCI_EXP_DEVCTL:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ break;
+
case PCI_EXP_LNKCTL:
advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ if (new & PCI_EXP_LNKCTL_RL)
+ advk_pcie_wait_for_retrain(pcie);
break;
- case PCI_EXP_RTCTL:
- new = (new & PCI_EXP_RTCTL_PMEIE) << 3;
- advk_writel(pcie, new, PCIE_ISR0_MASK_REG);
+ case PCI_EXP_RTCTL: {
+ /* Only mask/unmask PME interrupt */
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
+ ~PCIE_MSG_PM_PME_MASK;
+ if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
+ val |= PCIE_MSG_PM_PME_MASK;
+ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
break;
+ }
case PCI_EXP_RTSTA:
new = (new & PCI_EXP_RTSTA_PME) >> 9;
@@ -479,18 +519,20 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
- bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
- bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
+ bridge->conf.vendor =
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
+ bridge->conf.device =
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
bridge->conf.class_revision =
- advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
+ cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
/* Support 32 bits I/O addressing */
bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
/* Support 64 bits memory pref */
- bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
- bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
+ bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
+ bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
@@ -910,63 +952,11 @@ static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
-{
- int err, res_valid = 0;
- struct device *dev = &pcie->pdev->dev;
- struct resource_entry *win, *tmp;
- resource_size_t iobase;
-
- INIT_LIST_HEAD(&pcie->resources);
-
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, &iobase);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, &pcie->resources);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
- struct resource *res = win->res;
-
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- err = devm_pci_remap_iospace(dev, res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
- resource_list_destroy_entry(win);
- }
- break;
- case IORESOURCE_MEM:
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
- break;
- case IORESOURCE_BUS:
- pcie->root_bus_nr = res->start;
- break;
- }
- }
-
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
- }
-
- return 0;
-
-out_release_res:
- pci_free_resource_list(&pcie->resources);
- return err;
-}
-
static int advk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
- struct resource *res;
+ struct resource *res, *bus;
struct pci_host_bridge *bridge;
int ret, irq;
@@ -991,11 +981,13 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- ret = advk_pcie_parse_request_of_pci_ranges(pcie);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, &bus);
if (ret) {
dev_err(dev, "Failed to parse resources\n");
return ret;
}
+ pcie->root_bus_nr = bus->start;
advk_pcie_setup_hw(pcie);
@@ -1014,7 +1006,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = 0;
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index bf5ece5d9291..1b67564de7af 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -375,12 +375,11 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return 0;
}
-static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
- struct device_node *np)
+static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p)
{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
struct device *dev = p->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p);
+ struct resource_entry *entry;
u32 confreg[3] = {
FARADAY_PCI_MEM1_BASE_SIZE,
FARADAY_PCI_MEM2_BASE_SIZE,
@@ -389,19 +388,13 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
int i = 0;
u32 val;
- if (of_pci_dma_range_parser_init(&parser, np)) {
- dev_err(dev, "missing dma-ranges property\n");
- return -EINVAL;
- }
-
- /*
- * Get the dma-ranges from the device tree
- */
- for_each_of_pci_range(&parser, &range) {
- u64 end = range.pci_addr + range.size - 1;
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ u64 pci_addr = entry->res->start - entry->offset;
+ u64 end = entry->res->end - entry->offset;
int ret;
- ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val);
+ ret = faraday_res_to_memcfg(pci_addr,
+ resource_size(entry->res), &val);
if (ret) {
dev_err(dev,
"DMA range %d: illegal MEM resource size\n", i);
@@ -409,7 +402,7 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
}
dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
- i + 1, range.pci_addr, end, val);
+ i + 1, pci_addr, end, val);
if (i <= 2) {
faraday_raw_pci_write_config(p, 0, 0, confreg[i],
4, val);
@@ -430,10 +423,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
const struct faraday_pci_variant *variant =
of_device_get_match_data(dev);
struct resource *regs;
- resource_size_t io_base;
struct resource_entry *win;
struct faraday_pci *p;
- struct resource *mem;
struct resource *io;
struct pci_host_bridge *host;
struct clk *clk;
@@ -441,7 +432,6 @@ static int faraday_pci_probe(struct platform_device *pdev)
unsigned char cur_bus_speed = PCI_SPEED_33MHz;
int ret;
u32 val;
- LIST_HEAD(res);
host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
if (!host)
@@ -480,44 +470,21 @@ static int faraday_pci_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &res, &io_base);
- if (ret)
- return ret;
-
- ret = devm_request_pci_bus_resources(dev, &res);
+ ret = pci_parse_request_of_pci_ranges(dev, &host->windows,
+ &host->dma_ranges, NULL);
if (ret)
return ret;
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- io = win->res;
- io->name = "Gemini PCI I/O";
- if (!faraday_res_to_memcfg(io->start - win->offset,
- resource_size(io), &val)) {
- /* setup I/O space size */
- writel(val, p->base + PCI_IOSIZE);
- } else {
- dev_err(dev, "illegal IO mem size\n");
- return -EINVAL;
- }
- ret = devm_pci_remap_iospace(dev, io, io_base);
- if (ret) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- ret, io);
- continue;
- }
- break;
- case IORESOURCE_MEM:
- mem = win->res;
- mem->name = "Gemini PCI MEM";
- break;
- case IORESOURCE_BUS:
- break;
- default:
- break;
+ win = resource_list_first_type(&host->windows, IORESOURCE_IO);
+ if (win) {
+ io = win->res;
+ if (!faraday_res_to_memcfg(io->start - win->offset,
+ resource_size(io), &val)) {
+ /* setup I/O space size */
+ writel(val, p->base + PCI_IOSIZE);
+ } else {
+ dev_err(dev, "illegal IO mem size\n");
+ return -EINVAL;
}
}
@@ -565,11 +532,10 @@ static int faraday_pci_probe(struct platform_device *pdev)
cur_bus_speed = PCI_SPEED_66MHz;
}
- ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
+ ret = faraday_pci_parse_map_dma_ranges(p);
if (ret)
return ret;
- list_splice_init(&res, &host->windows);
ret = pci_scan_root_bus_bridge(host);
if (ret) {
dev_err(dev, "failed to scan host: %d\n", ret);
@@ -581,7 +547,6 @@ static int faraday_pci_probe(struct platform_device *pdev)
pci_bus_assign_resources(p->bus);
pci_bus_add_devices(p->bus);
- pci_free_resource_list(&res);
return 0;
}
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index c8cb9c5188a4..250a3fc80ec6 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -27,7 +27,7 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
struct pci_config_window *cfg;
/* Parse our PCI ranges and request their resources */
- err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+ err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range);
if (err)
return ERR_PTR(err);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index f1f300218fab..9977abff92fc 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -76,11 +76,6 @@ static enum pci_protocol_version_t pci_protocol_versions[] = {
PCI_PROTOCOL_VERSION_1_1,
};
-/*
- * Protocol version negotiated by hv_pci_protocol_negotiation().
- */
-static enum pci_protocol_version_t pci_protocol_version;
-
#define PCI_CONFIG_MMIO_LENGTH 0x2000
#define CFG_PAGE_OFFSET 0x1000
#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -307,7 +302,7 @@ struct pci_bus_relations {
struct pci_q_res_req_response {
struct vmpacket_descriptor hdr;
s32 status; /* negative values are failures */
- u32 probed_bar[6];
+ u32 probed_bar[PCI_STD_NUM_BARS];
} __packed;
struct pci_set_power {
@@ -455,12 +450,15 @@ enum hv_pcibus_state {
hv_pcibus_init = 0,
hv_pcibus_probed,
hv_pcibus_installed,
+ hv_pcibus_removing,
hv_pcibus_removed,
hv_pcibus_maximum
};
struct hv_pcibus_device {
struct pci_sysdata sysdata;
+ /* Protocol version negotiated with the host */
+ enum pci_protocol_version_t protocol_version;
enum hv_pcibus_state state;
refcount_t remove_lock;
struct hv_device *hdev;
@@ -539,7 +537,7 @@ struct hv_pci_dev {
* What would be observed if one wrote 0xFFFFFFFF to a BAR and then
* read it back, for each of the BAR offsets within config space.
*/
- u32 probed_bar[6];
+ u32 probed_bar[PCI_STD_NUM_BARS];
};
struct hv_pci_compl {
@@ -1224,7 +1222,7 @@ static void hv_irq_unmask(struct irq_data *data)
* negative effect (yet?).
*/
- if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+ if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
/*
* PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
* HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
@@ -1394,7 +1392,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
ctxt.pci_pkt.compl_ctxt = &comp;
- switch (pci_protocol_version) {
+ switch (hbus->protocol_version) {
case PCI_PROTOCOL_VERSION_1_1:
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
@@ -1610,7 +1608,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
* so it's sufficient to just add them up without tracking alignment.
*/
list_for_each_entry(hpdev, &hbus->children, list_entry) {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
dev_err(&hbus->hdev->device,
"There's an I/O BAR in this list!\n");
@@ -1681,10 +1679,27 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
spin_lock_irqsave(&hbus->device_list_lock, flags);
+ /*
+ * Clear the memory enable bit, in case it's already set. This occurs
+ * in the suspend path of hibernation, where the device is suspended,
+ * resumed and suspended again: see hibernation_snapshot() and
+ * hibernation_platform_enter().
+ *
+ * If the memory enable bit is already set, Hyper-V sliently ignores
+ * the below BAR updates, and the related PCI device driver can not
+ * work, because reading from the device register(s) always returns
+ * 0xFFFFFFFF.
+ */
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
+ command &= ~PCI_COMMAND_MEMORY;
+ _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
+ }
+
/* Pick addresses for the BARs. */
do {
list_for_each_entry(hpdev, &hbus->children, list_entry) {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_val = hpdev->probed_bar[i];
if (bar_val == 0)
continue;
@@ -1841,7 +1856,7 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
"query resource requirements failed: %x\n",
resp->status);
} else {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
completion->hpdev->probed_bar[i] =
q_res_req->probed_bar[i];
}
@@ -2107,6 +2122,12 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
unsigned long flags;
bool pending_dr;
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hbus->hdev->device,
+ "PCI VMBus BUS_RELATIONS: ignored\n");
+ return;
+ }
+
dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
if (!dr_wrk)
return;
@@ -2223,11 +2244,19 @@ static void hv_eject_device_work(struct work_struct *work)
*/
static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct hv_device *hdev = hbus->hdev;
+
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
+ return;
+ }
+
hpdev->state = hv_pcichild_ejecting;
get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
- get_hvpcibus(hpdev->hbus);
- queue_work(hpdev->hbus->wq, &hpdev->wrk);
+ get_hvpcibus(hbus);
+ queue_work(hbus->wq, &hpdev->wrk);
}
/**
@@ -2379,8 +2408,11 @@ static void hv_pci_onchannelcallback(void *context)
* failing if the host doesn't support the necessary protocol
* level.
*/
-static int hv_pci_protocol_negotiation(struct hv_device *hdev)
+static int hv_pci_protocol_negotiation(struct hv_device *hdev,
+ enum pci_protocol_version_t version[],
+ int num_version)
{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct pci_version_request *version_req;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
@@ -2403,8 +2435,8 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
version_req = (struct pci_version_request *)&pkt->message;
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
- for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
- version_req->protocol_version = pci_protocol_versions[i];
+ for (i = 0; i < num_version; i++) {
+ version_req->protocol_version = version[i];
ret = vmbus_sendpacket(hdev->channel, version_req,
sizeof(struct pci_version_request),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
@@ -2420,10 +2452,10 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
}
if (comp_pkt.completion_status >= 0) {
- pci_protocol_version = pci_protocol_versions[i];
+ hbus->protocol_version = version[i];
dev_info(&hdev->device,
"PCI VMBus probing: Using version %#x\n",
- pci_protocol_version);
+ hbus->protocol_version);
goto exit;
}
@@ -2707,7 +2739,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
u32 wslot;
int ret;
- size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
+ size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
? sizeof(*res_assigned) : sizeof(*res_assigned2);
pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
@@ -2726,7 +2758,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
+ if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
(struct pci_resources_assigned *)&pkt->message;
res_assigned->message_type.type =
@@ -2870,9 +2902,27 @@ static int hv_pci_probe(struct hv_device *hdev,
* hv_pcibus_device contains the hypercall arguments for retargeting in
* hv_irq_unmask(). Those must not cross a page boundary.
*/
- BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
- hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
+ /*
+ * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
+ * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
+ * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
+ * alignment of hbus is important because hbus's field
+ * retarget_msi_interrupt_params must not cross a 4KB page boundary.
+ *
+ * Here we prefer kzalloc to get_zeroed_page(), because a buffer
+ * allocated by the latter is not tracked and scanned by kmemleak, and
+ * hence kmemleak reports the pointer contained in the hbus buffer
+ * (i.e. the hpdev struct, which is created in new_pcichild_device() and
+ * is tracked by hbus->children) as memory leak (false positive).
+ *
+ * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
+ * used to allocate the hbus buffer and we can avoid the kmemleak false
+ * positive by using kmemleak_alloc() and kmemleak_free() to ask
+ * kmemleak to track and scan the hbus buffer.
+ */
+ hbus = (struct hv_pcibus_device *)kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hbus)
return -ENOMEM;
hbus->state = hv_pcibus_init;
@@ -2930,7 +2980,8 @@ static int hv_pci_probe(struct hv_device *hdev,
hv_set_drvdata(hdev, hbus);
- ret = hv_pci_protocol_negotiation(hdev);
+ ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
+ ARRAY_SIZE(pci_protocol_versions));
if (ret)
goto close;
@@ -3011,7 +3062,7 @@ free_bus:
return ret;
}
-static void hv_pci_bus_exit(struct hv_device *hdev)
+static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct {
@@ -3027,16 +3078,20 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
* access the per-channel ringbuffer any longer.
*/
if (hdev->channel->rescind)
- return;
+ return 0;
- /* Delete any children which might still exist. */
- memset(&relations, 0, sizeof(relations));
- hv_pci_devices_present(hbus, &relations);
+ if (!hibernating) {
+ /* Delete any children which might still exist. */
+ memset(&relations, 0, sizeof(relations));
+ hv_pci_devices_present(hbus, &relations);
+ }
ret = hv_send_resources_released(hdev);
- if (ret)
+ if (ret) {
dev_err(&hdev->device,
"Couldn't send resources released packet(s)\n");
+ return ret;
+ }
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
@@ -3049,8 +3104,13 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
(unsigned long)&pkt.teardown_packet,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (!ret)
- wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+ if (ret)
+ return ret;
+
+ if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
+ return -ETIMEDOUT;
+
+ return 0;
}
/**
@@ -3062,6 +3122,7 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
static int hv_pci_remove(struct hv_device *hdev)
{
struct hv_pcibus_device *hbus;
+ int ret;
hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
@@ -3074,7 +3135,7 @@ static int hv_pci_remove(struct hv_device *hdev)
hbus->state = hv_pcibus_removed;
}
- hv_pci_bus_exit(hdev);
+ ret = hv_pci_bus_exit(hdev, false);
vmbus_close(hdev->channel);
@@ -3090,10 +3151,97 @@ static int hv_pci_remove(struct hv_device *hdev)
hv_put_dom_num(hbus->sysdata.domain);
- free_page((unsigned long)hbus);
+ kfree(hbus);
+ return ret;
+}
+
+static int hv_pci_suspend(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum hv_pcibus_state old_state;
+ int ret;
+
+ /*
+ * hv_pci_suspend() must make sure there are no pending work items
+ * before calling vmbus_close(), since it runs in a process context
+ * as a callback in dpm_suspend(). When it starts to run, the channel
+ * callback hv_pci_onchannelcallback(), which runs in a tasklet
+ * context, can be still running concurrently and scheduling new work
+ * items onto hbus->wq in hv_pci_devices_present() and
+ * hv_pci_eject_device(), and the work item handlers can access the
+ * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
+ * the work item handler pci_devices_present_work() ->
+ * new_pcichild_device() writes to the vmbus channel.
+ *
+ * To eliminate the race, hv_pci_suspend() disables the channel
+ * callback tasklet, sets hbus->state to hv_pcibus_removing, and
+ * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
+ * it knows that no new work item can be scheduled, and then it flushes
+ * hbus->wq and safely closes the vmbus channel.
+ */
+ tasklet_disable(&hdev->channel->callback_event);
+
+ /* Change the hbus state to prevent new work items. */
+ old_state = hbus->state;
+ if (hbus->state == hv_pcibus_installed)
+ hbus->state = hv_pcibus_removing;
+
+ tasklet_enable(&hdev->channel->callback_event);
+
+ if (old_state != hv_pcibus_installed)
+ return -EINVAL;
+
+ flush_workqueue(hbus->wq);
+
+ ret = hv_pci_bus_exit(hdev, true);
+ if (ret)
+ return ret;
+
+ vmbus_close(hdev->channel);
+
return 0;
}
+static int hv_pci_resume(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum pci_protocol_version_t version[1];
+ int ret;
+
+ hbus->state = hv_pcibus_init;
+
+ ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+ hv_pci_onchannelcallback, hbus);
+ if (ret)
+ return ret;
+
+ /* Only use the version that was in use before hibernation. */
+ version[0] = hbus->protocol_version;
+ ret = hv_pci_protocol_negotiation(hdev, version, 1);
+ if (ret)
+ goto out;
+
+ ret = hv_pci_query_relations(hdev);
+ if (ret)
+ goto out;
+
+ ret = hv_pci_enter_d0(hdev);
+ if (ret)
+ goto out;
+
+ ret = hv_send_resources_allocated(hdev);
+ if (ret)
+ goto out;
+
+ prepopulate_bars(hbus);
+
+ hbus->state = hv_pcibus_installed;
+ return 0;
+out:
+ vmbus_close(hdev->channel);
+ return ret;
+}
+
static const struct hv_vmbus_device_id hv_pci_id_table[] = {
/* PCI Pass-through Class ID */
/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
@@ -3108,6 +3256,8 @@ static struct hv_driver hv_pci_drv = {
.id_table = hv_pci_id_table,
.probe = hv_pci_probe,
.remove = hv_pci_remove,
+ .suspend = hv_pci_suspend,
+ .resume = hv_pci_resume,
};
static void __exit exit_hv_pci_drv(void)
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index d3a0419e42f2..153a64676bc9 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -554,7 +554,7 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
}
-struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
.write_base = mvebu_pci_bridge_emul_base_conf_write,
.read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
.write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
@@ -713,7 +713,7 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
ret = of_address_to_resource(np, 0, &regs);
if (ret)
- return ERR_PTR(ret);
+ return (void __iomem *)ERR_PTR(ret);
return devm_ioremap_resource(&pdev->dev, &regs);
}
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index f127ce8bd4ef..9491e266b1ea 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/pci.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci-acpi.h>
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index d219404bad92..bd05221f5a22 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -241,10 +241,8 @@ struct v3_pci {
void __iomem *config_base;
struct pci_bus *bus;
u32 config_mem;
- u32 io_mem;
u32 non_pre_mem;
u32 pre_mem;
- phys_addr_t io_bus_addr;
phys_addr_t non_pre_bus_addr;
phys_addr_t pre_bus_addr;
struct regmap *map;
@@ -520,35 +518,22 @@ static int v3_integrator_init(struct v3_pci *v3)
}
static int v3_pci_setup_resource(struct v3_pci *v3,
- resource_size_t io_base,
struct pci_host_bridge *host,
struct resource_entry *win)
{
struct device *dev = v3->dev;
struct resource *mem;
struct resource *io;
- int ret;
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
- io->name = "V3 PCI I/O";
- v3->io_mem = io_base;
- v3->io_bus_addr = io->start - win->offset;
- dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
- io, &v3->io_bus_addr);
- ret = devm_pci_remap_iospace(dev, io, io_base);
- if (ret) {
- dev_warn(dev,
- "error %d: failed to map resource %pR\n",
- ret, io);
- return ret;
- }
+
/* Setup window 2 - PCI I/O */
- writel(v3_addr_to_lb_base2(v3->io_mem) |
+ writel(v3_addr_to_lb_base2(pci_pio_to_address(io->start)) |
V3_LB_BASE2_ENABLE,
v3->base + V3_LB_BASE2);
- writew(v3_addr_to_lb_map2(v3->io_bus_addr),
+ writew(v3_addr_to_lb_map2(io->start - win->offset),
v3->base + V3_LB_MAP2);
break;
case IORESOURCE_MEM:
@@ -613,28 +598,30 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
}
static int v3_get_dma_range_config(struct v3_pci *v3,
- struct of_pci_range *range,
+ struct resource_entry *entry,
u32 *pci_base, u32 *pci_map)
{
struct device *dev = v3->dev;
- u64 cpu_end = range->cpu_addr + range->size - 1;
- u64 pci_end = range->pci_addr + range->size - 1;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_end = cpu_end - entry->offset;
+ u64 pci_addr = entry->res->start - entry->offset;
u32 val;
- if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
+ if (pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n");
return -EINVAL;
}
- val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE;
+ val = ((u32)pci_addr) & V3_PCI_BASE_M_ADR_BASE;
*pci_base = val;
- if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
+ if (cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n");
return -EINVAL;
}
- val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
+ val = ((u32)cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
- switch (range->size) {
+ switch (resource_size(entry->res)) {
case SZ_1M:
val |= V3_LB_BASE_ADR_SIZE_1MB;
break;
@@ -682,8 +669,8 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
dev_dbg(dev,
"DMA MEM CPU: 0x%016llx -> 0x%016llx => "
"PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n",
- range->cpu_addr, cpu_end,
- range->pci_addr, pci_end,
+ cpu_addr, cpu_end,
+ pci_addr, pci_end,
*pci_base, *pci_map);
return 0;
@@ -692,24 +679,16 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3,
struct device_node *np)
{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(v3);
struct device *dev = v3->dev;
+ struct resource_entry *entry;
int i = 0;
- if (of_pci_dma_range_parser_init(&parser, np)) {
- dev_err(dev, "missing dma-ranges property\n");
- return -EINVAL;
- }
-
- /*
- * Get the dma-ranges from the device tree
- */
- for_each_of_pci_range(&parser, &range) {
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
int ret;
u32 pci_base, pci_map;
- ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map);
+ ret = v3_get_dma_range_config(v3, entry, &pci_base, &pci_map);
if (ret)
return ret;
@@ -732,7 +711,6 @@ static int v3_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- resource_size_t io_base;
struct resource *regs;
struct resource_entry *win;
struct v3_pci *v3;
@@ -741,7 +719,6 @@ static int v3_pci_probe(struct platform_device *pdev)
u16 val;
int irq;
int ret;
- LIST_HEAD(res);
host = pci_alloc_host_bridge(sizeof(*v3));
if (!host)
@@ -793,12 +770,8 @@ static int v3_pci_probe(struct platform_device *pdev)
if (IS_ERR(v3->config_base))
return PTR_ERR(v3->config_base);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &io_base);
- if (ret)
- return ret;
-
- ret = devm_request_pci_bus_resources(dev, &res);
+ ret = pci_parse_request_of_pci_ranges(dev, &host->windows,
+ &host->dma_ranges, NULL);
if (ret)
return ret;
@@ -852,8 +825,8 @@ static int v3_pci_probe(struct platform_device *pdev)
writew(val, v3->base + V3_PCI_CMD);
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- ret = v3_pci_setup_resource(v3, io_base, host, win);
+ resource_list_for_each_entry(win, &host->windows) {
+ ret = v3_pci_setup_resource(v3, host, win);
if (ret) {
dev_err(dev, "error setting up resources\n");
return ret;
@@ -931,7 +904,6 @@ static int v3_pci_probe(struct platform_device *pdev)
val |= V3_SYSTEM_M_LOCK;
writew(val, v3->base + V3_SYSTEM);
- list_splice_init(&res, &host->windows);
ret = pci_scan_root_bus_bridge(host);
if (ret) {
dev_err(dev, "failed to register host: %d\n", ret);
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index f59ad2728c0b..b911359b6d81 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -62,65 +62,16 @@ static struct pci_ops pci_versatile_ops = {
.write = pci_generic_config_write,
};
-static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *res)
-{
- int err, mem = 1, res_valid = 0;
- resource_size_t iobase;
- struct resource_entry *win, *tmp;
-
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, res);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry_safe(win, tmp, res) {
- struct resource *res = win->res;
-
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- err = devm_pci_remap_iospace(dev, res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
- resource_list_destroy_entry(win);
- }
- break;
- case IORESOURCE_MEM:
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
-
- writel(res->start >> 28, PCI_IMAP(mem));
- writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
- mem++;
-
- break;
- }
- }
-
- if (res_valid)
- return 0;
-
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
-
-out_release_res:
- pci_free_resource_list(res);
- return err;
-}
-
static int versatile_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
- int ret, i, myslot = -1;
+ struct resource_entry *entry;
+ int ret, i, myslot = -1, mem = 1;
u32 val;
void __iomem *local_pci_cfg_base;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
- LIST_HEAD(pci_res);
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
@@ -141,10 +92,19 @@ static int versatile_pci_probe(struct platform_device *pdev)
if (IS_ERR(versatile_cfg_base[1]))
return PTR_ERR(versatile_cfg_base[1]);
- ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ NULL, NULL);
if (ret)
return ret;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ writel(entry->res->start >> 28, PCI_IMAP(mem));
+ writel(__pa(PAGE_OFFSET) >> 28, PCI_SMAP(mem));
+ mem++;
+ }
+ }
+
/*
* We need to discover the PCI core first to configure itself
* before the main PCI probing is performed
@@ -177,9 +137,9 @@ static int versatile_pci_probe(struct platform_device *pdev)
/*
* Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
*/
- writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
- writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
- writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_0);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_1);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_2);
/*
* For many years the kernel and QEMU were symbiotically buggy
@@ -197,7 +157,6 @@ static int versatile_pci_probe(struct platform_device *pdev)
pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
pci_add_flags(PCI_REASSIGN_ALL_BUS);
- list_splice_init(&pci_res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = NULL;
bridge->busnr = 0;
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index ffda3e8b4742..de195fd430dc 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -405,15 +405,13 @@ static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
xgene_pcie_writel(port, CFGCTL, EN_REG);
}
-static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
- struct list_head *res,
- resource_size_t io_base)
+static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
struct resource_entry *window;
struct device *dev = port->dev;
- int ret;
- resource_list_for_each_entry(window, res) {
+ resource_list_for_each_entry(window, &bridge->windows) {
struct resource *res = window->res;
u64 restype = resource_type(res);
@@ -421,11 +419,9 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
switch (restype) {
case IORESOURCE_IO:
- xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
+ xgene_pcie_setup_ob_reg(port, res, OMR3BARL,
+ pci_pio_to_address(res->start),
res->start - window->offset);
- ret = devm_pci_remap_iospace(dev, res, io_base);
- if (ret < 0)
- return ret;
break;
case IORESOURCE_MEM:
if (res->flags & IORESOURCE_PREFETCH)
@@ -485,27 +481,28 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
}
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
- struct of_pci_range *range, u8 *ib_reg_mask)
+ struct resource_entry *entry,
+ u8 *ib_reg_mask)
{
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
void *bar_addr;
u32 pim_reg;
- u64 cpu_addr = range->cpu_addr;
- u64 pci_addr = range->pci_addr;
- u64 size = range->size;
+ u64 cpu_addr = entry->res->start;
+ u64 pci_addr = cpu_addr - entry->offset;
+ u64 size = resource_size(entry->res);
u64 mask = ~(size - 1) | EN_REG;
u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
u32 bar_low;
int region;
- region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
+ region = xgene_pcie_select_ib_reg(ib_reg_mask, size);
if (region < 0) {
dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
- if (range->flags & IORESOURCE_PREFETCH)
+ if (entry->res->flags & IORESOURCE_PREFETCH)
flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
@@ -536,25 +533,13 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
- struct device_node *np = port->node;
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- struct device *dev = port->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
+ struct resource_entry *entry;
u8 ib_reg_mask = 0;
- if (of_pci_dma_range_parser_init(&parser, np)) {
- dev_err(dev, "missing dma-ranges property\n");
- return -EINVAL;
- }
-
- /* Get the dma-ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- u64 end = range.cpu_addr + range.size - 1;
+ resource_list_for_each_entry(entry, &bridge->dma_ranges)
+ xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask);
- dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
- range.flags, range.cpu_addr, end, range.pci_addr);
- xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
- }
return 0;
}
@@ -567,8 +552,7 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
xgene_pcie_writel(port, i, 0);
}
-static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
- resource_size_t io_base)
+static int xgene_pcie_setup(struct xgene_pcie_port *port)
{
struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
@@ -580,7 +564,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
xgene_pcie_writel(port, BRIDGE_CFG_0, val);
- ret = xgene_pcie_map_ranges(port, res, io_base);
+ ret = xgene_pcie_map_ranges(port);
if (ret)
return ret;
@@ -607,11 +591,9 @@ static int xgene_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct xgene_pcie_port *port;
- resource_size_t iobase = 0;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int ret;
- LIST_HEAD(res);
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
if (!bridge)
@@ -634,20 +616,15 @@ static int xgene_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &iobase);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret)
return ret;
- ret = devm_request_pci_bus_resources(dev, &res);
- if (ret)
- goto error;
-
- ret = xgene_pcie_setup(port, &res, iobase);
+ ret = xgene_pcie_setup(port);
if (ret)
- goto error;
+ return ret;
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = port;
bridge->busnr = 0;
@@ -657,7 +634,7 @@ static int xgene_pcie_probe(struct platform_device *pdev)
ret = pci_scan_root_bus_bridge(bridge);
if (ret < 0)
- goto error;
+ return ret;
bus = bridge->bus;
@@ -666,10 +643,6 @@ static int xgene_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
-
-error:
- pci_free_resource_list(&res);
- return ret;
}
static const struct of_device_id xgene_pcie_match_table[] = {
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index d2497ca43828..b447c3e4abad 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -92,7 +92,6 @@ struct altera_pcie {
u8 root_bus_nr;
struct irq_domain *irq_domain;
struct resource bus_range;
- struct list_head resources;
const struct altera_pcie_data *pcie_data;
};
@@ -670,39 +669,6 @@ static void altera_pcie_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
-{
- int err, res_valid = 0;
- struct device *dev = &pcie->pdev->dev;
- struct resource_entry *win;
-
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, NULL);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, &pcie->resources);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry(win, &pcie->resources) {
- struct resource *res = win->res;
-
- if (resource_type(res) == IORESOURCE_MEM)
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
- }
-
- if (res_valid)
- return 0;
-
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
-
-out_release_res:
- pci_free_resource_list(&pcie->resources);
- return err;
-}
-
static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
@@ -833,9 +799,8 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
- INIT_LIST_HEAD(&pcie->resources);
-
- ret = altera_pcie_parse_request_of_pci_ranges(pcie);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "Failed add resources\n");
return ret;
@@ -853,7 +818,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -884,7 +848,6 @@ static int altera_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
- pci_free_resource_list(&pcie->resources);
altera_pcie_irq_teardown(pcie);
return 0;
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 0a3f61be5625..3176ad3ab0e5 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -293,11 +293,12 @@ static const struct irq_domain_ops msi_domain_ops = {
static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
{
- u32 *msg, hwirq;
+ u32 __iomem *msg;
+ u32 hwirq;
unsigned int offs;
offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
- msg = (u32 *)(msi->eq_cpu + offs);
+ msg = (u32 __iomem *)(msi->eq_cpu + offs);
hwirq = readl(msg);
hwirq = (hwirq >> 5) + (hwirq & 0x1f);
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index 9ee6200a66f4..ff0a81a632a1 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -43,8 +43,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
struct iproc_pcie *pcie;
struct device_node *np = dev->of_node;
struct resource reg;
- resource_size_t iobase = 0;
- LIST_HEAD(resources);
struct pci_host_bridge *bridge;
int ret;
@@ -97,8 +95,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
if (IS_ERR(pcie->phy))
return PTR_ERR(pcie->phy);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
- &iobase);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
@@ -113,10 +111,9 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
pcie->map_irq = of_irq_parse_and_map_pci;
}
- ret = iproc_pcie_setup(pcie, &resources);
+ ret = iproc_pcie_setup(pcie, &bridge->windows);
if (ret) {
dev_err(dev, "PCIe controller setup failed\n");
- pci_free_resource_list(&resources);
return ret;
}
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 2d457bfdaf66..0a468c73bae3 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -1122,15 +1122,16 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
}
static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
- struct of_pci_range *range,
+ struct resource_entry *entry,
enum iproc_pcie_ib_map_type type)
{
struct device *dev = pcie->dev;
struct iproc_pcie_ib *ib = &pcie->ib;
int ret;
unsigned int region_idx, size_idx;
- u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
- resource_size_t size = range->size;
+ u64 axi_addr = entry->res->start;
+ u64 pci_addr = entry->res->start - entry->offset;
+ resource_size_t size = resource_size(entry->res);
/* iterate through all IARR mapping regions */
for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
@@ -1182,67 +1183,46 @@ err_ib:
return ret;
}
-static int iproc_pcie_add_dma_range(struct device *dev,
- struct list_head *resources,
- struct of_pci_range *range)
+static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{
- struct resource *res;
- struct resource_entry *entry, *tmp;
- struct list_head *head = resources;
-
- res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct resource_entry *entry;
+ int ret = 0;
- resource_list_for_each_entry(tmp, resources) {
- if (tmp->res->start < range->cpu_addr)
- head = &tmp->node;
+ resource_list_for_each_entry(entry, &host->dma_ranges) {
+ /* Each range entry corresponds to an inbound mapping region */
+ ret = iproc_pcie_setup_ib(pcie, entry, IPROC_PCIE_IB_MAP_MEM);
+ if (ret)
+ break;
}
- res->start = range->cpu_addr;
- res->end = res->start + range->size - 1;
-
- entry = resource_list_create_entry(res, 0);
- if (!entry)
- return -ENOMEM;
-
- entry->offset = res->start - range->cpu_addr;
- resource_list_add(entry, head);
-
- return 0;
+ return ret;
}
-static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
+static void iproc_pcie_invalidate_mapping(struct iproc_pcie *pcie)
{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- int ret;
- LIST_HEAD(resources);
+ struct iproc_pcie_ib *ib = &pcie->ib;
+ struct iproc_pcie_ob *ob = &pcie->ob;
+ int idx;
- /* Get the dma-ranges from DT */
- ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
- if (ret)
- return ret;
+ if (pcie->ep_is_internal)
+ return;
- for_each_of_pci_range(&parser, &range) {
- ret = iproc_pcie_add_dma_range(pcie->dev,
- &resources,
- &range);
- if (ret)
- goto out;
- /* Each range entry corresponds to an inbound mapping region */
- ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
- if (ret)
- goto out;
+ if (pcie->need_ob_cfg) {
+ /* iterate through all OARR mapping regions */
+ for (idx = ob->nr_windows - 1; idx >= 0; idx--) {
+ iproc_pcie_write_reg(pcie,
+ MAP_REG(IPROC_PCIE_OARR0, idx), 0);
+ }
}
- list_splice_init(&resources, &host->dma_ranges);
-
- return 0;
-out:
- pci_free_resource_list(&resources);
- return ret;
+ if (pcie->need_ib_cfg) {
+ /* iterate through all IARR mapping regions */
+ for (idx = 0; idx < ib->nr_regions; idx++) {
+ iproc_pcie_write_reg(pcie,
+ MAP_REG(IPROC_PCIE_IARR0, idx), 0);
+ }
+ }
}
static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
@@ -1276,13 +1256,16 @@ static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
{
int ret;
- struct of_pci_range range;
+ struct resource_entry entry;
- memset(&range, 0, sizeof(range));
- range.size = SZ_32K;
- range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
+ memset(&entry, 0, sizeof(entry));
+ entry.res = &entry.__res;
- ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
+ msi_addr &= ~(SZ_32K - 1);
+ entry.res->start = msi_addr;
+ entry.res->end = msi_addr + SZ_32K - 1;
+
+ ret = iproc_pcie_setup_ib(pcie, &entry, IPROC_PCIE_IB_MAP_IO);
return ret;
}
@@ -1498,10 +1481,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
return ret;
}
- ret = devm_request_pci_bus_resources(dev, res);
- if (ret)
- return ret;
-
ret = phy_init(pcie->phy);
if (ret) {
dev_err(dev, "unable to initialize PCIe PHY\n");
@@ -1517,6 +1496,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
iproc_pcie_perst_ctrl(pcie, true);
iproc_pcie_perst_ctrl(pcie, false);
+ iproc_pcie_invalidate_mapping(pcie);
+
if (pcie->need_ob_cfg) {
ret = iproc_pcie_map_ranges(pcie, res);
if (ret) {
@@ -1543,7 +1524,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (iproc_pcie_msi_enable(pcie))
dev_info(dev, "not using iProc MSI\n");
- list_splice_init(res, &host->windows);
host->busnr = 0;
host->dev.parent = dev;
host->ops = &iproc_pcie_ops;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 626a7c352dfd..cb982891b22b 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -216,7 +216,6 @@ struct mtk_pcie {
void __iomem *base;
struct clk *free_ck;
- struct resource mem;
struct list_head ports;
const struct mtk_pcie_soc *soc;
unsigned int busnr;
@@ -661,11 +660,19 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
- struct resource *mem = &pcie->mem;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct resource *mem = NULL;
+ struct resource_entry *entry;
const struct mtk_pcie_soc *soc = port->pcie->soc;
u32 val;
int err;
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (entry)
+ mem = entry->res;
+ if (!mem)
+ return -EINVAL;
+
/* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
if (pcie->base) {
val = readl(pcie->base + PCIE_SYS_CFG_V2);
@@ -1023,39 +1030,15 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
struct mtk_pcie_port *port, *tmp;
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct list_head *windows = &host->windows;
- struct resource_entry *win, *tmp_win;
- resource_size_t io_base;
+ struct resource *bus;
int err;
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- windows, &io_base);
+ err = pci_parse_request_of_pci_ranges(dev, windows,
+ &host->dma_ranges, &bus);
if (err)
return err;
- err = devm_request_pci_bus_resources(dev, windows);
- if (err < 0)
- return err;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp_win, windows) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- err = devm_pci_remap_iospace(dev, win->res, io_base);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, win->res);
- resource_list_destroy_entry(win);
- }
- break;
- case IORESOURCE_MEM:
- memcpy(&pcie->mem, win->res, sizeof(*win->res));
- pcie->mem.name = "non-prefetchable";
- break;
- case IORESOURCE_BUS:
- pcie->busnr = win->res->start;
- break;
- }
- }
+ pcie->busnr = bus->start;
for_each_available_child_of_node(node, child) {
int slot;
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index a45a6447b01d..3a696ca45bfa 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -140,7 +140,6 @@ struct mobiveil_msi { /* MSI information */
struct mobiveil_pcie {
struct platform_device *pdev;
- struct list_head resources;
void __iomem *config_axi_slave_base; /* endpoint config base */
void __iomem *csr_axi_slave_base; /* root port config base */
void __iomem *apb_csr_base; /* MSI register base */
@@ -235,7 +234,7 @@ static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
{
void *addr;
u32 val;
@@ -250,7 +249,8 @@ static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
return val;
}
-static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
+static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size)
{
void *addr;
int ret;
@@ -262,19 +262,19 @@ static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
dev_err(&pcie->pdev->dev, "write CSR address failed\n");
}
-static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
+static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
{
- return csr_read(pcie, off, 0x4);
+ return mobiveil_csr_read(pcie, off, 0x4);
}
-static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
+static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
{
- csr_write(pcie, val, off, 0x4);
+ mobiveil_csr_write(pcie, val, off, 0x4);
}
static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
{
- return (csr_readl(pcie, LTSSM_STATUS) &
+ return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
}
@@ -323,7 +323,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
- csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
return pcie->config_axi_slave_base + where;
}
@@ -353,13 +353,14 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
chained_irq_enter(chip, desc);
/* read INTx status */
- val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
- mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
intr_status = val & mask;
/* Handle INTx */
if (intr_status & PAB_INTP_INTX_MASK) {
- shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
do {
@@ -373,12 +374,13 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
bit);
/* clear interrupt handled */
- csr_writel(pcie, 1 << (PAB_INTX_START + bit),
- PAB_INTP_AMBA_MISC_STAT);
+ mobiveil_csr_writel(pcie,
+ 1 << (PAB_INTX_START + bit),
+ PAB_INTP_AMBA_MISC_STAT);
}
- shifted_status = csr_readl(pcie,
- PAB_INTP_AMBA_MISC_STAT);
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
} while (shifted_status != 0);
@@ -413,7 +415,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
}
/* Clear the interrupt status */
- csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+ mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
chained_irq_exit(chip, desc);
}
@@ -474,24 +476,24 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
return;
}
- value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+ mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
- csr_writel(pcie, upper_32_bits(size64),
- PAB_EXT_PEX_AMAP_SIZEN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
- csr_writel(pcie, lower_32_bits(cpu_addr),
- PAB_PEX_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
+ PAB_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_H(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
pcie->ib_wins_configured++;
}
@@ -515,27 +517,29 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
* program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
* to 4 KB in PAB_AXI_AMAP_CTRL register
*/
- value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
- csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
/*
* program AXI window base with appropriate value in
* PAB_AXI_AMAP_AXI_WIN0 register
*/
- csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
- PAB_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie,
+ lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_H(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
pcie->ob_wins_configured++;
}
@@ -575,46 +579,47 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
static int mobiveil_host_init(struct mobiveil_pcie *pcie)
{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u32 value, pab_ctrl, type;
struct resource_entry *win;
/* setup bus numbers */
- value = csr_readl(pcie, PCI_PRIMARY_BUS);
+ value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
value &= 0xff000000;
value |= 0x00ff0100;
- csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
/*
* program Bus Master Enable Bit in Command Register in PAB Config
* Space
*/
- value = csr_readl(pcie, PCI_COMMAND);
+ value = mobiveil_csr_readl(pcie, PCI_COMMAND);
value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- csr_writel(pcie, value, PCI_COMMAND);
+ mobiveil_csr_writel(pcie, value, PCI_COMMAND);
/*
* program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
* register
*/
- pab_ctrl = csr_readl(pcie, PAB_CTRL);
+ pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
- csr_writel(pcie, pab_ctrl, PAB_CTRL);
+ mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
- csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
- PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
*/
- value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
value |= APIO_EN_MASK;
- csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
+ mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
/* Enable PCIe PIO master */
- value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
value |= 1 << PIO_ENABLE_SHIFT;
- csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
+ mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
/*
* we'll program one outbound window for config reads and
@@ -631,7 +636,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &pcie->resources) {
+ resource_list_for_each_entry(win, &bridge->windows) {
if (resource_type(win->res) == IORESOURCE_MEM)
type = MEM_WINDOW_TYPE;
else if (resource_type(win->res) == IORESOURCE_IO)
@@ -647,10 +652,10 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
}
/* fixup for PCIe class register */
- value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+ value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
value &= 0xff;
value |= (PCI_CLASS_BRIDGE_PCI << 16);
- csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+ mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
/* setup MSI hardware registers */
mobiveil_pcie_enable_msi(pcie);
@@ -668,9 +673,9 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
- shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val &= ~mask;
- csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -684,9 +689,9 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
- shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val |= mask;
- csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -857,7 +862,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
struct pci_host_bridge *bridge;
struct device *dev = &pdev->dev;
- resource_size_t iobase;
int ret;
/* allocate the PCIe port */
@@ -875,11 +879,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
return ret;
}
- INIT_LIST_HEAD(&pcie->resources);
-
/* parse the host bridge base addresses from the device tree file */
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, &iobase);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "Getting bridge resources failed\n");
return ret;
@@ -892,24 +894,19 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
ret = mobiveil_host_init(pcie);
if (ret) {
dev_err(dev, "Failed to initialize host\n");
- goto error;
+ return ret;
}
/* initialize the IRQ domains */
ret = mobiveil_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(dev, "Failed creating IRQ Domain\n");
- goto error;
+ return ret;
}
irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
- ret = devm_request_pci_bus_resources(dev, &pcie->resources);
- if (ret)
- goto error;
-
/* Initialize bridge */
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -920,13 +917,13 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
ret = mobiveil_bringup_link(pcie);
if (ret) {
dev_info(dev, "link bring-up failed\n");
- goto error;
+ return ret;
}
/* setup the kernel resources for the newly added PCIe root bus */
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
- goto error;
+ return ret;
bus = bridge->bus;
@@ -936,9 +933,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
-error:
- pci_free_resource_list(&pcie->resources);
- return ret;
}
static const struct of_device_id mobiveil_pcie_of_match[] = {
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index f6a669a9af41..759c6542c5c8 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -30,8 +30,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include "../pci.h"
-
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
#define CONFIG_SEND_ENABLE BIT(31)
@@ -93,8 +91,11 @@
#define LINK_SPEED_2_5GTS (1 << 16)
#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058
+#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */
#define SPEED_CHANGE BIT(24)
#define SCRAMBLE_DISABLE BIT(27)
+#define LTSMDIS BIT(31)
+#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
#define PMSR 0x01105c
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
@@ -615,6 +616,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
if (IS_ENABLED(CONFIG_PCI_MSI))
rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+
/* Finish initialization - establish a PCI Express link */
rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
@@ -1014,40 +1017,43 @@ err_irq1:
}
static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
- struct of_pci_range *range,
+ struct resource_entry *entry,
int *index)
{
- u64 restype = range->flags;
- u64 cpu_addr = range->cpu_addr;
- u64 cpu_end = range->cpu_addr + range->size;
- u64 pci_addr = range->pci_addr;
+ u64 restype = entry->res->flags;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_addr = entry->res->start - entry->offset;
u32 flags = LAM_64BIT | LAR_ENABLE;
u64 mask;
- u64 size;
+ u64 size = resource_size(entry->res);
int idx = *index;
if (restype & IORESOURCE_PREFETCH)
flags |= LAM_PREFETCH;
- /*
- * If the size of the range is larger than the alignment of the start
- * address, we have to use multiple entries to perform the mapping.
- */
- if (cpu_addr > 0) {
- unsigned long nr_zeros = __ffs64(cpu_addr);
- u64 alignment = 1ULL << nr_zeros;
+ while (cpu_addr < cpu_end) {
+ if (idx >= MAX_NR_INBOUND_MAPS - 1) {
+ dev_err(pcie->dev, "Failed to map inbound regions!\n");
+ return -EINVAL;
+ }
+ /*
+ * If the size of the range is larger than the alignment of
+ * the start address, we have to use multiple entries to
+ * perform the mapping.
+ */
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
- size = min(range->size, alignment);
- } else {
- size = range->size;
- }
- /* Hardware supports max 4GiB inbound region */
- size = min(size, 1ULL << 32);
+ size = min(size, alignment);
+ }
+ /* Hardware supports max 4GiB inbound region */
+ size = min(size, 1ULL << 32);
- mask = roundup_pow_of_two(size) - 1;
- mask &= ~0xf;
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
- while (cpu_addr < cpu_end) {
/*
* Set up 64-bit inbound regions as the range parser doesn't
* distinguish between 32 and 64-bit types.
@@ -1067,41 +1073,25 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
pci_addr += size;
cpu_addr += size;
idx += 2;
-
- if (idx > MAX_NR_INBOUND_MAPS) {
- dev_err(pcie->dev, "Failed to map inbound regions!\n");
- return -EINVAL;
- }
}
*index = idx;
return 0;
}
-static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
- struct device_node *np)
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie)
{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- int index = 0;
- int err;
-
- if (of_pci_dma_range_parser_init(&parser, np))
- return -EINVAL;
-
- /* Get the dma-ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- u64 end = range.cpu_addr + range.size - 1;
-
- dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
- range.flags, range.cpu_addr, end, range.pci_addr);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ struct resource_entry *entry;
+ int index = 0, err = 0;
- err = rcar_pcie_inbound_ranges(pcie, &range, &index);
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = rcar_pcie_inbound_ranges(pcie, entry, &index);
if (err)
- return err;
+ break;
}
- return 0;
+ return err;
}
static const struct of_device_id rcar_pcie_of_match[] = {
@@ -1138,7 +1128,8 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
platform_set_drvdata(pdev, pcie);
- err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
+ err = pci_parse_request_of_pci_ranges(dev, &pcie->resources,
+ &bridge->dma_ranges, NULL);
if (err)
goto err_free_bridge;
@@ -1161,7 +1152,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
goto err_unmap_msi_irqs;
}
- err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
+ err = rcar_pcie_parse_map_dma_ranges(pcie);
if (err)
goto err_clk_disable;
@@ -1237,6 +1228,7 @@ static int rcar_pcie_resume_noirq(struct device *dev)
return 0;
/* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
return rcar_pcie_wait_for_dl(pcie);
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index ef8e677ce9d1..d9b63bfa5dd7 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -620,19 +620,13 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
dev_info(dev, "no vpcie3v3 regulator found\n");
}
- rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
- if (IS_ERR(rockchip->vpcie1v8)) {
- if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
- return PTR_ERR(rockchip->vpcie1v8);
- dev_info(dev, "no vpcie1v8 regulator found\n");
- }
+ rockchip->vpcie1v8 = devm_regulator_get(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8))
+ return PTR_ERR(rockchip->vpcie1v8);
- rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
- if (IS_ERR(rockchip->vpcie0v9)) {
- if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
- return PTR_ERR(rockchip->vpcie0v9);
- dev_info(dev, "no vpcie0v9 regulator found\n");
- }
+ rockchip->vpcie0v9 = devm_regulator_get(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9))
+ return PTR_ERR(rockchip->vpcie0v9);
return 0;
}
@@ -658,27 +652,22 @@ static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
}
}
- if (!IS_ERR(rockchip->vpcie1v8)) {
- err = regulator_enable(rockchip->vpcie1v8);
- if (err) {
- dev_err(dev, "fail to enable vpcie1v8 regulator\n");
- goto err_disable_3v3;
- }
+ err = regulator_enable(rockchip->vpcie1v8);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+ goto err_disable_3v3;
}
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- goto err_disable_1v8;
- }
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ goto err_disable_1v8;
}
return 0;
err_disable_1v8:
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie1v8);
err_disable_3v3:
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
@@ -806,19 +795,28 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
+ struct resource_entry *entry;
+ u64 pci_addr, size;
int offset;
int err;
int reg_no;
rockchip_pcie_cfg_configuration_accesses(rockchip,
AXI_WRAPPER_TYPE0_CFG);
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ size = resource_size(entry->res);
+ pci_addr = entry->res->start - entry->offset;
+ rockchip->msg_bus_addr = pci_addr;
- for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+ for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
AXI_WRAPPER_MEM_WRITE,
20 - 1,
- rockchip->mem_bus_addr +
- (reg_no << 20),
+ pci_addr + (reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC mem outbound ATU failed\n");
@@ -832,14 +830,20 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
return err;
}
- offset = rockchip->mem_size >> 20;
- for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (!entry)
+ return -ENODEV;
+
+ size = resource_size(entry->res);
+ pci_addr = entry->res->start - entry->offset;
+
+ offset = size >> 20;
+ for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip,
reg_no + 1 + offset,
AXI_WRAPPER_IO_WRITE,
20 - 1,
- rockchip->io_bus_addr +
- (reg_no << 20),
+ pci_addr + (reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC io outbound ATU failed\n");
@@ -852,8 +856,7 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
AXI_WRAPPER_NOR_MSG,
20 - 1, 0, 0);
- rockchip->msg_bus_addr = rockchip->mem_bus_addr +
- ((reg_no + offset) << 20);
+ rockchip->msg_bus_addr += ((reg_no + offset) << 20);
return err;
}
@@ -897,8 +900,7 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
rockchip_pcie_disable_clocks(rockchip);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie0v9);
return ret;
}
@@ -908,12 +910,10 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int err;
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- return err;
- }
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ return err;
}
err = rockchip_pcie_enable_clocks(rockchip);
@@ -939,8 +939,7 @@ err_err_deinit_port:
err_pcie_resume:
rockchip_pcie_disable_clocks(rockchip);
err_disable_0v9:
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie0v9);
return err;
}
@@ -950,14 +949,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
- struct resource_entry *win;
- resource_size_t io_base;
- struct resource *mem;
- struct resource *io;
+ struct resource *bus_res;
int err;
- LIST_HEAD(res);
-
if (!dev->of_node)
return -ENODEV;
@@ -995,56 +989,23 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err < 0)
goto err_deinit_port;
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &res, &io_base);
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, &bus_res);
if (err)
goto err_remove_irq_domain;
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto err_free_res;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- io = win->res;
- io->name = "I/O";
- rockchip->io_size = resource_size(io);
- rockchip->io_bus_addr = io->start - win->offset;
- err = pci_remap_iospace(io, io_base);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, io);
- continue;
- }
- rockchip->io = io;
- break;
- case IORESOURCE_MEM:
- mem = win->res;
- mem->name = "MEM";
- rockchip->mem_size = resource_size(mem);
- rockchip->mem_bus_addr = mem->start - win->offset;
- break;
- case IORESOURCE_BUS:
- rockchip->root_bus_nr = win->res->start;
- break;
- default:
- continue;
- }
- }
+ rockchip->root_bus_nr = bus_res->start;
err = rockchip_pcie_cfg_atu(rockchip);
if (err)
- goto err_unmap_iospace;
+ goto err_remove_irq_domain;
rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
if (!rockchip->msg_region) {
err = -ENOMEM;
- goto err_unmap_iospace;
+ goto err_remove_irq_domain;
}
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = rockchip;
bridge->busnr = 0;
@@ -1054,7 +1015,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err = pci_scan_root_bus_bridge(bridge);
if (err < 0)
- goto err_unmap_iospace;
+ goto err_remove_irq_domain;
bus = bridge->bus;
@@ -1068,10 +1029,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
-err_unmap_iospace:
- pci_unmap_iospace(rockchip->io);
-err_free_res:
- pci_free_resource_list(&res);
err_remove_irq_domain:
irq_domain_remove(rockchip->irq_domain);
err_deinit_port:
@@ -1081,10 +1038,8 @@ err_vpcie:
regulator_disable(rockchip->vpcie12v);
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie0v9);
err_set_vpcie:
rockchip_pcie_disable_clocks(rockchip);
return err;
@@ -1097,7 +1052,6 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(rockchip->root_bus);
pci_remove_root_bus(rockchip->root_bus);
- pci_unmap_iospace(rockchip->io);
irq_domain_remove(rockchip->irq_domain);
rockchip_pcie_deinit_phys(rockchip);
@@ -1108,10 +1062,8 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
regulator_disable(rockchip->vpcie12v);
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie0v9);
return 0;
}
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 8e87a059ce73..d90dfb354573 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Rockchip AXI PCIe controller driver
*
@@ -304,13 +304,8 @@ struct rockchip_pcie {
struct irq_domain *irq_domain;
int offset;
struct pci_bus *root_bus;
- struct resource *io;
- phys_addr_t io_bus_addr;
- u32 io_size;
void __iomem *msg_region;
- u32 mem_size;
phys_addr_t msg_bus_addr;
- phys_addr_t mem_bus_addr;
bool is_rc;
struct resource *mem_res;
};
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 45c0f344ccd1..9bd1427f2fd6 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -821,8 +821,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
struct pci_host_bridge *bridge;
int err;
- resource_size_t iobase = 0;
- LIST_HEAD(res);
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
@@ -845,24 +843,19 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &iobase);
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto error;
-
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
- goto error;
+ return err;
}
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_busno;
@@ -874,13 +867,13 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
- goto error;
+ return err;
}
}
err = pci_scan_root_bus_bridge(bridge);
if (err)
- goto error;
+ return err;
bus = bridge->bus;
@@ -889,10 +882,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
-
-error:
- pci_free_resource_list(&res);
- return err;
}
static struct platform_driver nwl_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 5bf3af3b28e6..98e55297815b 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -619,8 +619,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int err;
- resource_size_t iobase = 0;
- LIST_HEAD(res);
if (!dev->of_node)
return -ENODEV;
@@ -647,19 +645,13 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return err;
}
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &iobase);
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto error;
-
-
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = port;
bridge->busnr = 0;
@@ -673,7 +665,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
#endif
err = pci_scan_root_bus_bridge(bridge);
if (err < 0)
- goto error;
+ return err;
bus = bridge->bus;
@@ -682,10 +674,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
-
-error:
- pci_free_resource_list(&res);
- return err;
}
static const struct of_device_id xilinx_pcie_of_match[] = {
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index a35d3f3996d7..212842263f55 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -602,16 +602,30 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
/*
* Certain VMD devices may have a root port configuration option which
- * limits the bus range to between 0-127 or 128-255
+ * limits the bus range to between 0-127, 128-255, or 224-255
*/
if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
- u32 vmcap, vmconfig;
-
- pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
- pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
- if (BUS_RESTRICT_CAP(vmcap) &&
- (BUS_RESTRICT_CFG(vmconfig) == 0x1))
- vmd->busn_start = 128;
+ u16 reg16;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCAP, &reg16);
+ if (BUS_RESTRICT_CAP(reg16)) {
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG,
+ &reg16);
+
+ switch (BUS_RESTRICT_CFG(reg16)) {
+ case 1:
+ vmd->busn_start = 128;
+ break;
+ case 2:
+ vmd->busn_start = 224;
+ break;
+ case 3:
+ pci_err(vmd->dev, "Unknown Bus Offset Setting\n");
+ return -ENODEV;
+ default:
+ break;
+ }
+ }
}
res = &vmd->dev->resource[VMD_CFGBAR];
@@ -823,7 +837,7 @@ static int vmd_suspend(struct device *dev)
int i;
for (i = 0; i < vmd->msix_count; i++)
- devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
+ devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
pci_save_state(pdev);
return 0;
@@ -854,6 +868,8 @@ static const struct pci_device_id vmd_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+ .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 1cfe3687a211..5d74f81ddfe4 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -44,7 +44,7 @@
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
- void *reg[6];
+ void *reg[PCI_STD_NUM_BARS];
struct pci_epf *epf;
enum pci_barno test_reg_bar;
struct delayed_work cmd_handler;
@@ -377,7 +377,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
cancel_delayed_work(&epf_test->cmd_handler);
pci_epc_stop(epc);
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
@@ -400,7 +400,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
epc_features = epf_test->epc_features;
- for (bar = BAR_0; bar <= BAR_5; bar += add) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
epf_bar = &epf->bar[bar];
/*
* pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
@@ -450,7 +450,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
epf_test->reg[test_reg_bar] = base;
- for (bar = BAR_0; bar <= BAR_5; bar += add) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
epf_bar = &epf->bar[bar];
add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
@@ -478,7 +478,7 @@ static void pci_epf_configure_bar(struct pci_epf *epf,
bool bar_fixed_64bit;
int i;
- for (i = BAR_0; i <= BAR_5; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
epf_bar = &epf->bar[i];
bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
if (bar_fixed_64bit)
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index 2bf8bd1f0563..d2b174ce15de 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -134,7 +134,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
if (pageno < 0)
return NULL;
- *phys_addr = mem->phys_base + (pageno << page_shift);
+ *phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index e7b493c22bf3..32455a79372d 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -83,7 +83,7 @@ config HOTPLUG_PCI_CPCI_ZT5550
depends on HOTPLUG_PCI_CPCI && X86
help
Say Y here if you have an Performance Technologies (formerly Intel,
- formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
+ formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
To compile this driver as a module, choose M here: the
module will be called cpcihp_zt5550.
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index e4c46637f32f..b3869951c0eb 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -449,8 +449,15 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
/* Scan non-hotplug bridges that need to be reconfigured */
for_each_pci_bridge(dev, bus) {
- if (!hotplug_is_native(dev))
- max = pci_scan_bridge(bus, dev, max, 1);
+ if (hotplug_is_native(dev))
+ continue;
+
+ max = pci_scan_bridge(bus, dev, max, 1);
+ if (dev->subordinate) {
+ pcibios_resource_survey_bus(dev->subordinate);
+ pci_bus_size_bridges(dev->subordinate);
+ pci_bus_assign_resources(dev->subordinate);
+ }
}
}
@@ -480,7 +487,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (PCI_SLOT(dev->devfn) == slot->device)
acpiphp_native_scan_bridge(dev);
}
- pci_assign_unassigned_bridge_resources(bus->self);
} else {
LIST_HEAD(add_list);
int max, pass;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 654c972b8ea0..aa61d4c219d7 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -72,6 +72,7 @@ extern int pciehp_poll_time;
* @reset_lock: prevents access to the Data Link Layer Link Active bit in the
* Link Status register and to the Presence Detect State bit in the Slot
* Status register during a slot reset which may cause them to flap
+ * @ist_running: flag to keep user request waiting while IRQ thread is running
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
* used for synchronous slot enable/disable request via sysfs
@@ -101,6 +102,7 @@ struct controller {
struct hotplug_slot hotplug_slot; /* hotplug core interface */
struct rw_semaphore reset_lock;
+ unsigned int ist_running;
int request_result;
wait_queue_head_t requester;
};
@@ -172,10 +174,10 @@ void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn);
void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
int pciehp_query_power_fault(struct controller *ctrl);
-bool pciehp_card_present(struct controller *ctrl);
-bool pciehp_card_present_or_link_active(struct controller *ctrl);
+int pciehp_card_present(struct controller *ctrl);
+int pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
-bool pciehp_check_link_active(struct controller *ctrl);
+int pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index b3122c151b80..312cc45c44c7 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -139,10 +139,15 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl->pcie->port;
+ int ret;
pci_config_pm_runtime_get(pdev);
- *value = pciehp_card_present_or_link_active(ctrl);
+ ret = pciehp_card_present_or_link_active(ctrl);
pci_config_pm_runtime_put(pdev);
+ if (ret < 0)
+ return ret;
+
+ *value = ret;
return 0;
}
@@ -158,13 +163,13 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static void pciehp_check_presence(struct controller *ctrl)
{
- bool occupied;
+ int occupied;
down_read(&ctrl->reset_lock);
mutex_lock(&ctrl->state_lock);
occupied = pciehp_card_present_or_link_active(ctrl);
- if ((occupied && (ctrl->state == OFF_STATE ||
+ if ((occupied > 0 && (ctrl->state == OFF_STATE ||
ctrl->state == BLINKINGON_STATE)) ||
(!occupied && (ctrl->state == ON_STATE ||
ctrl->state == BLINKINGOFF_STATE)))
@@ -253,7 +258,7 @@ static bool pme_is_native(struct pcie_device *dev)
return pcie_ports_native || host->native_pme;
}
-static int pciehp_suspend(struct pcie_device *dev)
+static void pciehp_disable_interrupt(struct pcie_device *dev)
{
/*
* Disable hotplug interrupt so that it does not trigger
@@ -261,7 +266,19 @@ static int pciehp_suspend(struct pcie_device *dev)
*/
if (pme_is_native(dev))
pcie_disable_interrupt(get_service_data(dev));
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pciehp_suspend(struct pcie_device *dev)
+{
+ /*
+ * If the port is already runtime suspended we can keep it that
+ * way.
+ */
+ if (dev_pm_smart_suspend_and_suspended(&dev->port->dev))
+ return 0;
+ pciehp_disable_interrupt(dev);
return 0;
}
@@ -279,6 +296,7 @@ static int pciehp_resume_noirq(struct pcie_device *dev)
return 0;
}
+#endif
static int pciehp_resume(struct pcie_device *dev)
{
@@ -292,6 +310,12 @@ static int pciehp_resume(struct pcie_device *dev)
return 0;
}
+static int pciehp_runtime_suspend(struct pcie_device *dev)
+{
+ pciehp_disable_interrupt(dev);
+ return 0;
+}
+
static int pciehp_runtime_resume(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
@@ -318,10 +342,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
.remove = pciehp_remove,
#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = pciehp_suspend,
.resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
- .runtime_suspend = pciehp_suspend,
+#endif
+ .runtime_suspend = pciehp_runtime_suspend,
.runtime_resume = pciehp_runtime_resume,
#endif /* PM */
};
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 21af7b16d7a4..6503d15effbb 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -226,7 +226,7 @@ void pciehp_handle_disable_request(struct controller *ctrl)
void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
{
- bool present, link_active;
+ int present, link_active;
/*
* If the slot is on and presence or link has changed, turn it off.
@@ -257,7 +257,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
mutex_lock(&ctrl->state_lock);
present = pciehp_card_present(ctrl);
link_active = pciehp_check_link_active(ctrl);
- if (!present && !link_active) {
+ if (present <= 0 && link_active <= 0) {
mutex_unlock(&ctrl->state_lock);
return;
}
@@ -375,7 +375,8 @@ int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot)
ctrl->request_result = -ENODEV;
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
wait_event(ctrl->requester,
- !atomic_read(&ctrl->pending_events));
+ !atomic_read(&ctrl->pending_events) &&
+ !ctrl->ist_running);
return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
@@ -408,7 +409,8 @@ int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot)
mutex_unlock(&ctrl->state_lock);
pciehp_request(ctrl, DISABLE_SLOT);
wait_event(ctrl->requester,
- !atomic_read(&ctrl->pending_events));
+ !atomic_read(&ctrl->pending_events) &&
+ !ctrl->ist_running);
return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 1a522c1c4177..8a2cb1764386 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -68,7 +68,7 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
- while (true) {
+ do {
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
if (slot_status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n",
@@ -81,11 +81,9 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
PCI_EXP_SLTSTA_CC);
return 1;
}
- if (timeout < 0)
- break;
msleep(10);
timeout -= 10;
- }
+ } while (timeout >= 0);
return 0; /* timeout */
}
@@ -201,17 +199,29 @@ static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
pcie_do_write_cmd(ctrl, cmd, mask, false);
}
-bool pciehp_check_link_active(struct controller *ctrl)
+/**
+ * pciehp_check_link_active() - Is the link active
+ * @ctrl: PCIe hotplug controller
+ *
+ * Check whether the downstream link is currently active. Note it is
+ * possible that the card is removed immediately after this so the
+ * caller may need to take it into account.
+ *
+ * If the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_check_link_active(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 lnk_status;
- bool ret;
+ int ret;
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
+ return -ENODEV;
- if (ret)
- ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+ ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
return ret;
}
@@ -373,13 +383,29 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
}
-bool pciehp_card_present(struct controller *ctrl)
+/**
+ * pciehp_card_present() - Is the card present
+ * @ctrl: PCIe hotplug controller
+ *
+ * Function checks whether the card is currently present in the slot and
+ * in that case returns true. Note it is possible that the card is
+ * removed immediately after the check so the caller may need to take
+ * this into account.
+ *
+ * It the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_card_present(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
+ int ret;
- pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- return slot_status & PCI_EXP_SLTSTA_PDS;
+ ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
+ return -ENODEV;
+
+ return !!(slot_status & PCI_EXP_SLTSTA_PDS);
}
/**
@@ -390,10 +416,19 @@ bool pciehp_card_present(struct controller *ctrl)
* Presence Detect State bit, this helper also returns true if the Link Active
* bit is set. This is a concession to broken hotplug ports which hardwire
* Presence Detect State to zero, such as Wilocity's [1ae9:0200].
+ *
+ * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
+ * port is not present anymore returns %-ENODEV.
*/
-bool pciehp_card_present_or_link_active(struct controller *ctrl)
+int pciehp_card_present_or_link_active(struct controller *ctrl)
{
- return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
+ int ret;
+
+ ret = pciehp_card_present(ctrl);
+ if (ret)
+ return ret;
+
+ return pciehp_check_link_active(ctrl);
}
int pciehp_query_power_fault(struct controller *ctrl)
@@ -583,6 +618,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
irqreturn_t ret;
u32 events;
+ ctrl->ist_running = true;
pci_config_pm_runtime_get(pdev);
/* rerun pciehp_isr() if the port was inaccessible on interrupt */
@@ -629,6 +665,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
up_read(&ctrl->reset_lock);
pci_config_pm_runtime_put(pdev);
+ ctrl->ist_running = false;
wake_up(&ctrl->requester);
return IRQ_HANDLED;
}
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 18627bb21e9e..e408e4021cee 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -154,11 +154,11 @@ static enum pci_bus_speed get_max_bus_speed(struct slot *slot)
return speed;
}
-static int get_children_props(struct device_node *dn, const int **drc_indexes,
- const int **drc_names, const int **drc_types,
- const int **drc_power_domains)
+static int get_children_props(struct device_node *dn, const __be32 **drc_indexes,
+ const __be32 **drc_names, const __be32 **drc_types,
+ const __be32 **drc_power_domains)
{
- const int *indexes, *names, *types, *domains;
+ const __be32 *indexes, *names, *types, *domains;
indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
names = of_get_property(dn, "ibm,drc-names", NULL);
@@ -185,8 +185,8 @@ static int get_children_props(struct device_node *dn, const int **drc_indexes,
/* Verify the existence of 'drc_name' and/or 'drc_type' within the
- * current node. First obtain it's my-drc-index property. Next,
- * obtain the DRC info from it's parent. Use the my-drc-index for
+ * current node. First obtain its my-drc-index property. Next,
+ * obtain the DRC info from its parent. Use the my-drc-index for
* correlation, and obtain/validate the requested properties.
*/
@@ -194,8 +194,8 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
char *drc_type, unsigned int my_index)
{
char *name_tmp, *type_tmp;
- const int *indexes, *names;
- const int *types, *domains;
+ const __be32 *indexes, *names;
+ const __be32 *types, *domains;
int i, rc;
rc = get_children_props(dn->parent, &indexes, &names, &types, &domains);
@@ -208,7 +208,7 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
/* Iterate through parent properties, looking for my-drc-index */
for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
- if ((unsigned int) indexes[i + 1] == my_index)
+ if (be32_to_cpu(indexes[i + 1]) == my_index)
break;
name_tmp += (strlen(name_tmp) + 1);
@@ -239,6 +239,8 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
value = of_prop_next_u32(info, NULL, &entries);
if (!value)
return -EINVAL;
+ else
+ value++;
for (j = 0; j < entries; j++) {
of_read_drc_info_cell(&info, &value, &drc);
@@ -246,9 +248,10 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
/* Should now know end of current entry */
/* Found it */
- if (my_index <= drc.last_drc_index) {
+ if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) {
+ int index = my_index - drc.drc_index_start;
sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
- my_index);
+ drc.drc_name_suffix_start + index);
break;
}
}
@@ -265,7 +268,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
char *drc_type)
{
- const unsigned int *my_index;
+ const __be32 *my_index;
my_index = of_get_property(dn, "ibm,my-drc-index", NULL);
if (!my_index) {
@@ -273,12 +276,12 @@ int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
return -EINVAL;
}
- if (firmware_has_feature(FW_FEATURE_DRC_INFO))
+ if (of_find_property(dn->parent, "ibm,drc-info", NULL))
return rpaphp_check_drc_props_v2(dn, drc_name, drc_type,
- *my_index);
+ be32_to_cpu(*my_index));
else
return rpaphp_check_drc_props_v1(dn, drc_name, drc_type,
- *my_index);
+ be32_to_cpu(*my_index));
}
EXPORT_SYMBOL_GPL(rpaphp_check_drc_props);
@@ -309,10 +312,11 @@ static int is_php_type(char *drc_type)
* for built-in pci slots (even when the built-in slots are
* dlparable.)
*/
-static int is_php_dn(struct device_node *dn, const int **indexes,
- const int **names, const int **types, const int **power_domains)
+static int is_php_dn(struct device_node *dn, const __be32 **indexes,
+ const __be32 **names, const __be32 **types,
+ const __be32 **power_domains)
{
- const int *drc_types;
+ const __be32 *drc_types;
int rc;
rc = get_children_props(dn, indexes, names, &drc_types, power_domains);
@@ -326,33 +330,55 @@ static int is_php_dn(struct device_node *dn, const int **indexes,
return 1;
}
-/**
- * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem.
- * @dn: device node of slot
- *
- * This subroutine will register a hotpluggable slot with the
- * PCI hotplug infrastructure. This routine is typically called
- * during boot time, if the hotplug slots are present at boot time,
- * or is called later, by the dlpar add code, if the slot is
- * being dynamically added during runtime.
- *
- * If the device node points at an embedded (built-in) slot, this
- * routine will just return without doing anything, since embedded
- * slots cannot be hotplugged.
- *
- * To remove a slot, it suffices to call rpaphp_deregister_slot().
- */
-int rpaphp_add_slot(struct device_node *dn)
+static int rpaphp_drc_info_add_slot(struct device_node *dn)
{
struct slot *slot;
+ struct property *info;
+ struct of_drc_info drc;
+ char drc_name[MAX_DRC_NAME_LEN];
+ const __be32 *cur;
+ u32 count;
int retval = 0;
- int i;
- const int *indexes, *names, *types, *power_domains;
- char *name, *type;
- if (!dn->name || strcmp(dn->name, "pci"))
+ info = of_find_property(dn, "ibm,drc-info", NULL);
+ if (!info)
+ return 0;
+
+ cur = of_prop_next_u32(info, NULL, &count);
+ if (cur)
+ cur++;
+ else
return 0;
+ of_read_drc_info_cell(&info, &cur, &drc);
+ if (!is_php_type(drc.drc_type))
+ return 0;
+
+ sprintf(drc_name, "%s%d", drc.drc_name_prefix, drc.drc_name_suffix_start);
+
+ slot = alloc_slot_struct(dn, drc.drc_index_start, drc_name, drc.drc_power_domain);
+ if (!slot)
+ return -ENOMEM;
+
+ slot->type = simple_strtoul(drc.drc_type, NULL, 10);
+ retval = rpaphp_enable_slot(slot);
+ if (!retval)
+ retval = rpaphp_register_slot(slot);
+
+ if (retval)
+ dealloc_slot_struct(slot);
+
+ return retval;
+}
+
+static int rpaphp_drc_add_slot(struct device_node *dn)
+{
+ struct slot *slot;
+ int retval = 0;
+ int i;
+ const __be32 *indexes, *names, *types, *power_domains;
+ char *name, *type;
+
/* If this is not a hotplug slot, return without doing anything. */
if (!is_php_dn(dn, &indexes, &names, &types, &power_domains))
return 0;
@@ -391,6 +417,33 @@ int rpaphp_add_slot(struct device_node *dn)
/* XXX FIXME: reports a failure only if last entry in loop failed */
return retval;
}
+
+/**
+ * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem.
+ * @dn: device node of slot
+ *
+ * This subroutine will register a hotpluggable slot with the
+ * PCI hotplug infrastructure. This routine is typically called
+ * during boot time, if the hotplug slots are present at boot time,
+ * or is called later, by the dlpar add code, if the slot is
+ * being dynamically added during runtime.
+ *
+ * If the device node points at an embedded (built-in) slot, this
+ * routine will just return without doing anything, since embedded
+ * slots cannot be hotplugged.
+ *
+ * To remove a slot, it suffices to call rpaphp_deregister_slot().
+ */
+int rpaphp_add_slot(struct device_node *dn)
+{
+ if (!dn->name || strcmp(dn->name, "pci"))
+ return 0;
+
+ if (of_find_property(dn, "ibm,drc-info", NULL))
+ return rpaphp_drc_info_add_slot(dn);
+ else
+ return rpaphp_drc_add_slot(dn);
+}
EXPORT_SYMBOL_GPL(rpaphp_add_slot);
static void __exit cleanup_slots(void)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b3f972e8cfed..1e88fd427757 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -9,7 +9,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
@@ -254,8 +253,14 @@ static ssize_t sriov_numvfs_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ u16 num_vfs;
+
+ /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
+ device_lock(&pdev->dev);
+ num_vfs = pdev->sriov->num_VFs;
+ device_unlock(&pdev->dev);
- return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
+ return sprintf(buf, "%u\n", num_vfs);
}
/*
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0884bedcfc7a..c7709e49f0e4 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -213,12 +213,13 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
if (pci_msi_ignore_mask)
return 0;
+
desc_addr = pci_msix_desc_addr(desc);
if (!desc_addr)
return 0;
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
- if (flag)
+ if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
@@ -861,7 +862,7 @@ static int pci_msi_supported(struct pci_dev *dev, int nvec)
if (!pci_msi_enable)
return 0;
- if (!dev || dev->no_msi || dev->current_state != PCI_D0)
+ if (!dev || dev->no_msi)
return 0;
/*
@@ -972,7 +973,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
int nr_entries;
int i, j;
- if (!pci_msi_supported(dev, nvec))
+ if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0)
return -EINVAL;
nr_entries = pci_msix_vec_count(dev);
@@ -1058,7 +1059,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
int nvec;
int rc;
- if (!pci_msi_supported(dev, minvec))
+ if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0)
return -EINVAL;
/* Check whether driver already requested MSI-X IRQs */
@@ -1315,22 +1316,6 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
}
EXPORT_SYMBOL(pci_irq_get_affinity);
-/**
- * pci_irq_get_node - return the NUMA node of a particular MSI vector
- * @pdev: PCI device to operate on
- * @vec: device-relative interrupt vector index (0-based).
- */
-int pci_irq_get_node(struct pci_dev *pdev, int vec)
-{
- const struct cpumask *mask;
-
- mask = pci_irq_get_affinity(pdev, vec);
- if (mask)
- return local_memory_node(cpu_to_node(cpumask_first(mask)));
- return dev_to_node(&pdev->dev);
-}
-EXPORT_SYMBOL(pci_irq_get_node);
-
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
{
return to_pci_dev(desc->dev);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 36891e7deee3..81ceeaa6f1d5 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -236,7 +236,6 @@ void of_pci_check_probe_only(void)
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
-#if defined(CONFIG_OF_ADDRESS)
/**
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
@@ -255,16 +254,18 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* It returns zero if the range parsing has been successful or a standard error
* value if it failed.
*/
-int devm_of_pci_get_host_bridge_resources(struct device *dev,
+static int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
+ struct list_head *resources,
+ struct list_head *ib_resources,
+ resource_size_t *io_base)
{
struct device_node *dev_node = dev->of_node;
struct resource *res, tmp_res;
struct resource *bus_range;
struct of_pci_range range;
struct of_pci_range_parser parser;
- char range_type[4];
+ const char *range_type;
int err;
if (io_base)
@@ -298,12 +299,12 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
for_each_of_pci_range(&parser, &range) {
/* Read next ranges element */
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
- snprintf(range_type, 4, " IO");
+ range_type = "IO";
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
- snprintf(range_type, 4, "MEM");
+ range_type = "MEM";
else
- snprintf(range_type, 4, "err");
- dev_info(dev, " %s %#010llx..%#010llx -> %#010llx\n",
+ range_type = "err";
+ dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
range_type, range.cpu_addr,
range.cpu_addr + range.size - 1, range.pci_addr);
@@ -340,14 +341,54 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
}
+ /* Check for dma-ranges property */
+ if (!ib_resources)
+ return 0;
+ err = of_pci_dma_range_parser_init(&parser, dev_node);
+ if (err)
+ return 0;
+
+ dev_dbg(dev, "Parsing dma-ranges property...\n");
+ for_each_of_pci_range(&parser, &range) {
+ struct resource_entry *entry;
+ /*
+ * If we failed translation or got a zero-sized region
+ * then skip this range
+ */
+ if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
+ range.cpu_addr == OF_BAD_ADDR || range.size == 0)
+ continue;
+
+ dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
+ "IB MEM", range.cpu_addr,
+ range.cpu_addr + range.size - 1, range.pci_addr);
+
+
+ err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
+ if (err)
+ continue;
+
+ res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
+ if (!res) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Keep the resource list sorted */
+ resource_list_for_each_entry(entry, ib_resources)
+ if (entry->res->start > res->start)
+ break;
+
+ pci_add_resource_offset(&entry->node, res,
+ res->start - range.pci_addr);
+ }
+
return 0;
failed:
pci_free_resource_list(resources);
return err;
}
-EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
-#endif /* CONFIG_OF_ADDRESS */
#if IS_ENABLED(CONFIG_OF_IRQ)
/**
@@ -482,6 +523,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
+ struct list_head *ib_resources,
struct resource **bus_range)
{
int err, res_valid = 0;
@@ -489,8 +531,10 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
struct resource_entry *win, *tmp;
INIT_LIST_HEAD(resources);
+ if (ib_resources)
+ INIT_LIST_HEAD(ib_resources);
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
- &iobase);
+ ib_resources, &iobase);
if (err)
return err;
@@ -530,6 +574,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
pci_free_resource_list(resources);
return err;
}
+EXPORT_SYMBOL_GPL(pci_parse_request_of_pci_ranges);
#endif /* CONFIG_PCI */
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 5fd90105510d..fffa77093c08 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -270,10 +270,10 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
unsigned int flags)
{
- bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
+ bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->conf.cache_line_size = 0x10;
- bridge->conf.status = PCI_STATUS_CAP_LIST;
+ bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST);
bridge->pci_regs_behavior = kmemdup(pci_regs_behavior,
sizeof(pci_regs_behavior),
GFP_KERNEL);
@@ -284,8 +284,9 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
/* Set PCIe v2, root port, slot support */
- bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
- PCI_EXP_FLAGS_SLOT;
+ bridge->pcie_conf.cap =
+ cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+ PCI_EXP_FLAGS_SLOT);
bridge->pcie_cap_regs_behavior =
kmemdup(pcie_cap_regs_behavior,
sizeof(pcie_cap_regs_behavior),
@@ -327,7 +328,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
int reg = where & ~3;
pci_bridge_emul_read_status_t (*read_op)(struct pci_bridge_emul *bridge,
int reg, u32 *value);
- u32 *cfgspace;
+ __le32 *cfgspace;
const struct pci_bridge_reg_behavior *behavior;
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END) {
@@ -343,11 +344,11 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
reg -= PCI_CAP_PCIE_START;
read_op = bridge->ops->read_pcie;
- cfgspace = (u32 *) &bridge->pcie_conf;
+ cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
} else {
read_op = bridge->ops->read_base;
- cfgspace = (u32 *) &bridge->conf;
+ cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
}
@@ -357,7 +358,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
ret = PCI_BRIDGE_EMUL_NOT_HANDLED;
if (ret == PCI_BRIDGE_EMUL_NOT_HANDLED)
- *value = cfgspace[reg / 4];
+ *value = le32_to_cpu(cfgspace[reg / 4]);
/*
* Make sure we never return any reserved bit with a value
@@ -387,7 +388,7 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
int mask, ret, old, new, shift;
void (*write_op)(struct pci_bridge_emul *bridge, int reg,
u32 old, u32 new, u32 mask);
- u32 *cfgspace;
+ __le32 *cfgspace;
const struct pci_bridge_reg_behavior *behavior;
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END)
@@ -414,11 +415,11 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
reg -= PCI_CAP_PCIE_START;
write_op = bridge->ops->write_pcie;
- cfgspace = (u32 *) &bridge->pcie_conf;
+ cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
} else {
write_op = bridge->ops->write_base;
- cfgspace = (u32 *) &bridge->conf;
+ cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
}
@@ -431,7 +432,7 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
/* Clear the W1C bits */
new &= ~((value << shift) & (behavior[reg / 4].w1c & mask));
- cfgspace[reg / 4] = new;
+ cfgspace[reg / 4] = cpu_to_le32(new);
if (write_op)
write_op(bridge, reg, old, new, mask);
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index e65b1b79899d..b31883022a8e 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -6,65 +6,65 @@
/* PCI configuration space of a PCI-to-PCI bridge. */
struct pci_bridge_emul_conf {
- u16 vendor;
- u16 device;
- u16 command;
- u16 status;
- u32 class_revision;
+ __le16 vendor;
+ __le16 device;
+ __le16 command;
+ __le16 status;
+ __le32 class_revision;
u8 cache_line_size;
u8 latency_timer;
u8 header_type;
u8 bist;
- u32 bar[2];
+ __le32 bar[2];
u8 primary_bus;
u8 secondary_bus;
u8 subordinate_bus;
u8 secondary_latency_timer;
u8 iobase;
u8 iolimit;
- u16 secondary_status;
- u16 membase;
- u16 memlimit;
- u16 pref_mem_base;
- u16 pref_mem_limit;
- u32 prefbaseupper;
- u32 preflimitupper;
- u16 iobaseupper;
- u16 iolimitupper;
+ __le16 secondary_status;
+ __le16 membase;
+ __le16 memlimit;
+ __le16 pref_mem_base;
+ __le16 pref_mem_limit;
+ __le32 prefbaseupper;
+ __le32 preflimitupper;
+ __le16 iobaseupper;
+ __le16 iolimitupper;
u8 capabilities_pointer;
u8 reserve[3];
- u32 romaddr;
+ __le32 romaddr;
u8 intline;
u8 intpin;
- u16 bridgectrl;
+ __le16 bridgectrl;
};
/* PCI configuration space of the PCIe capabilities */
struct pci_bridge_emul_pcie_conf {
u8 cap_id;
u8 next;
- u16 cap;
- u32 devcap;
- u16 devctl;
- u16 devsta;
- u32 lnkcap;
- u16 lnkctl;
- u16 lnksta;
- u32 slotcap;
- u16 slotctl;
- u16 slotsta;
- u16 rootctl;
- u16 rsvd;
- u32 rootsta;
- u32 devcap2;
- u16 devctl2;
- u16 devsta2;
- u32 lnkcap2;
- u16 lnkctl2;
- u16 lnksta2;
- u32 slotcap2;
- u16 slotctl2;
- u16 slotsta2;
+ __le16 cap;
+ __le32 devcap;
+ __le16 devctl;
+ __le16 devsta;
+ __le32 lnkcap;
+ __le16 lnkctl;
+ __le16 lnksta;
+ __le32 slotcap;
+ __le16 slotctl;
+ __le16 slotsta;
+ __le16 rootctl;
+ __le16 rsvd;
+ __le32 rootsta;
+ __le32 devcap2;
+ __le16 devctl2;
+ __le16 devsta2;
+ __le32 lnkcap2;
+ __le16 lnkctl2;
+ __le16 lnksta2;
+ __le32 slotcap2;
+ __le16 slotctl2;
+ __le16 slotsta2;
};
struct pci_bridge_emul;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a8124e47bf6e..0454ca0e4e3f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -315,7 +315,8 @@ static long local_pci_probe(void *_ddi)
* Probe function should return < 0 for failure, 0 for success
* Treat values > 0 as success, but warn.
*/
- dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
+ pci_warn(pci_dev, "Driver probe function unexpectedly returned %d\n",
+ rc);
return 0;
}
@@ -517,6 +518,12 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
return 0;
}
+static void pci_pm_default_resume(struct pci_dev *pci_dev)
+{
+ pci_fixup_device(pci_fixup_resume, pci_dev);
+ pci_enable_wake(pci_dev, PCI_D0, false);
+}
+
#endif
#ifdef CONFIG_PM_SLEEP
@@ -524,6 +531,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
pci_power_up(pci_dev);
+ pci_update_current_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
pci_pme_restore(pci_dev);
}
@@ -578,9 +586,9 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: Device state not saved by %pS\n",
- drv->suspend);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: Device state not saved by %pS\n",
+ drv->suspend);
}
}
@@ -592,46 +600,17 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
-
- if (drv && drv->suspend_late) {
- pci_power_t prev = pci_dev->current_state;
- int error;
-
- error = drv->suspend_late(pci_dev, state);
- suspend_report_result(drv->suspend_late, error);
- if (error)
- return error;
-
- if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
- && pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: Device state not saved by %pS\n",
- drv->suspend_late);
- goto Fixup;
- }
- }
if (!pci_dev->state_saved)
pci_save_state(pci_dev);
pci_pm_set_unknown_state(pci_dev);
-Fixup:
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
return 0;
}
-static int pci_legacy_resume_early(struct device *dev)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
-
- return drv && drv->resume_early ?
- drv->resume_early(pci_dev) : 0;
-}
-
static int pci_legacy_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -645,12 +624,6 @@ static int pci_legacy_resume(struct device *dev)
/* Auxiliary functions used by the new power management framework */
-static void pci_pm_default_resume(struct pci_dev *pci_dev)
-{
- pci_fixup_device(pci_fixup_resume, pci_dev);
- pci_enable_wake(pci_dev, PCI_D0, false);
-}
-
static void pci_pm_default_suspend(struct pci_dev *pci_dev)
{
/* Disable non-bridge devices without PM support */
@@ -661,16 +634,15 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
{
struct pci_driver *drv = pci_dev->driver;
- bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
- || drv->resume_early);
+ bool ret = drv && (drv->suspend || drv->resume);
/*
* Legacy PM support is used by default, so warn if the new framework is
* supported as well. Drivers are supposed to support either the
* former, or the latter, but not both at the same time.
*/
- WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
- drv->name, pci_dev->vendor, pci_dev->device);
+ pci_WARN(pci_dev, ret && drv->driver.pm, "device %04x:%04x\n",
+ pci_dev->vendor, pci_dev->device);
return ret;
}
@@ -679,11 +651,11 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
static int pci_pm_prepare(struct device *dev)
{
- struct device_driver *drv = dev->driver;
struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (drv && drv->pm && drv->pm->prepare) {
- int error = drv->pm->prepare(dev);
+ if (pm && pm->prepare) {
+ int error = pm->prepare(dev);
if (error < 0)
return error;
@@ -793,9 +765,9 @@ static int pci_pm_suspend(struct device *dev)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pS\n",
- pm->suspend);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pS\n",
+ pm->suspend);
}
}
@@ -841,9 +813,9 @@ static int pci_pm_suspend_noirq(struct device *dev)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pS\n",
- pm->suspend_noirq);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pS\n",
+ pm->suspend_noirq);
goto Fixup;
}
}
@@ -865,7 +837,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_prepare_to_sleep(pci_dev);
}
- dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
+ pci_dbg(pci_dev, "PCI PM: Suspend power state: %s\n",
pci_power_name(pci_dev->current_state));
if (pci_dev->current_state == PCI_D0) {
@@ -880,7 +852,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
}
if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
- dev_dbg(dev, "PCI PM: Skipped\n");
+ pci_dbg(pci_dev, "PCI PM: Skipped\n");
goto Fixup;
}
@@ -917,8 +889,9 @@ Fixup:
static int pci_pm_resume_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ pci_power_t prev_state = pci_dev->current_state;
+ bool skip_bus_pm = pci_dev->skip_bus_pm;
if (dev_pm_may_skip_resume(dev))
return 0;
@@ -937,27 +910,28 @@ static int pci_pm_resume_noirq(struct device *dev)
* configuration here and attempting to put them into D0 again is
* pointless, so avoid doing that.
*/
- if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
+ if (!(skip_bus_pm && pm_suspend_no_platform()))
pci_pm_default_resume_early(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
+ pcie_pme_root_status_cleanup(pci_dev);
- if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
+ if (!skip_bus_pm && prev_state == PCI_D3cold)
+ pci_bridge_wait_for_secondary_bus(pci_dev);
- pcie_pme_root_status_cleanup(pci_dev);
+ if (pci_has_legacy_pm_support(pci_dev))
+ return 0;
- if (drv && drv->pm && drv->pm->resume_noirq)
- error = drv->pm->resume_noirq(dev);
+ if (pm && pm->resume_noirq)
+ return pm->resume_noirq(dev);
- return error;
+ return 0;
}
static int pci_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error = 0;
/*
* This is necessary for the suspend error path in which resume is
@@ -973,12 +947,12 @@ static int pci_pm_resume(struct device *dev)
if (pm) {
if (pm->resume)
- error = pm->resume(dev);
+ return pm->resume(dev);
} else {
pci_pm_reenable_device(pci_dev);
}
- return error;
+ return 0;
}
#else /* !CONFIG_SUSPEND */
@@ -993,7 +967,6 @@ static int pci_pm_resume(struct device *dev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
-
/*
* pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
* a hibernate transition
@@ -1039,16 +1012,16 @@ static int pci_pm_freeze(struct device *dev)
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
- if (drv && drv->pm && drv->pm->freeze_noirq) {
+ if (pm && pm->freeze_noirq) {
int error;
- error = drv->pm->freeze_noirq(dev);
- suspend_report_result(drv->pm->freeze_noirq, error);
+ error = pm->freeze_noirq(dev);
+ suspend_report_result(pm->freeze_noirq, error);
if (error)
return error;
}
@@ -1067,8 +1040,8 @@ static int pci_pm_freeze_noirq(struct device *dev)
static int pci_pm_thaw_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error;
if (pcibios_pm_ops.thaw_noirq) {
error = pcibios_pm_ops.thaw_noirq(dev);
@@ -1076,21 +1049,25 @@ static int pci_pm_thaw_noirq(struct device *dev)
return error;
}
- if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
-
/*
- * pci_restore_state() requires the device to be in D0 (because of MSI
- * restoration among other things), so force it into D0 in case the
- * driver's "freeze" callbacks put it into a low-power state directly.
+ * The pm->thaw_noirq() callback assumes the device has been
+ * returned to D0 and its config state has been restored.
+ *
+ * In addition, pci_restore_state() restores MSI-X state in MMIO
+ * space, which requires the device to be in D0, so return it to D0
+ * in case the driver's "freeze" callbacks put it into a low-power
+ * state.
*/
pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
- if (drv && drv->pm && drv->pm->thaw_noirq)
- error = drv->pm->thaw_noirq(dev);
+ if (pci_has_legacy_pm_support(pci_dev))
+ return 0;
+
+ if (pm && pm->thaw_noirq)
+ return pm->thaw_noirq(dev);
- return error;
+ return 0;
}
static int pci_pm_thaw(struct device *dev)
@@ -1161,24 +1138,24 @@ static int pci_pm_poweroff_late(struct device *dev)
static int pci_pm_poweroff_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- if (pci_has_legacy_pm_support(to_pci_dev(dev)))
+ if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
- if (!drv || !drv->pm) {
+ if (!pm) {
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
return 0;
}
- if (drv->pm->poweroff_noirq) {
+ if (pm->poweroff_noirq) {
int error;
- error = drv->pm->poweroff_noirq(dev);
- suspend_report_result(drv->pm->poweroff_noirq, error);
+ error = pm->poweroff_noirq(dev);
+ suspend_report_result(pm->poweroff_noirq, error);
if (error)
return error;
}
@@ -1204,8 +1181,8 @@ static int pci_pm_poweroff_noirq(struct device *dev)
static int pci_pm_restore_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error;
if (pcibios_pm_ops.restore_noirq) {
error = pcibios_pm_ops.restore_noirq(dev);
@@ -1217,19 +1194,18 @@ static int pci_pm_restore_noirq(struct device *dev)
pci_fixup_device(pci_fixup_resume_early, pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
+ return 0;
- if (drv && drv->pm && drv->pm->restore_noirq)
- error = drv->pm->restore_noirq(dev);
+ if (pm && pm->restore_noirq)
+ return pm->restore_noirq(dev);
- return error;
+ return 0;
}
static int pci_pm_restore(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error = 0;
/*
* This is necessary for the hibernation error path in which restore is
@@ -1245,12 +1221,12 @@ static int pci_pm_restore(struct device *dev)
if (pm) {
if (pm->restore)
- error = pm->restore(dev);
+ return pm->restore(dev);
} else {
pci_pm_reenable_device(pci_dev);
}
- return error;
+ return 0;
}
#else /* !CONFIG_HIBERNATE_CALLBACKS */
@@ -1295,11 +1271,11 @@ static int pci_pm_runtime_suspend(struct device *dev)
* log level.
*/
if (error == -EBUSY || error == -EAGAIN) {
- dev_dbg(dev, "can't suspend now (%ps returned %d)\n",
+ pci_dbg(pci_dev, "can't suspend now (%ps returned %d)\n",
pm->runtime_suspend, error);
return error;
} else if (error) {
- dev_err(dev, "can't suspend (%ps returned %d)\n",
+ pci_err(pci_dev, "can't suspend (%ps returned %d)\n",
pm->runtime_suspend, error);
return error;
}
@@ -1310,9 +1286,9 @@ static int pci_pm_runtime_suspend(struct device *dev)
if (pm && pm->runtime_suspend
&& !pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pS\n",
- pm->runtime_suspend);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pS\n",
+ pm->runtime_suspend);
return 0;
}
@@ -1326,9 +1302,10 @@ static int pci_pm_runtime_suspend(struct device *dev)
static int pci_pm_runtime_resume(struct device *dev)
{
- int rc = 0;
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ pci_power_t prev_state = pci_dev->current_state;
+ int error = 0;
/*
* Restoring config space is necessary even if the device is not bound
@@ -1341,22 +1318,23 @@ static int pci_pm_runtime_resume(struct device *dev)
return 0;
pci_fixup_device(pci_fixup_resume_early, pci_dev);
- pci_enable_wake(pci_dev, PCI_D0, false);
- pci_fixup_device(pci_fixup_resume, pci_dev);
+ pci_pm_default_resume(pci_dev);
+
+ if (prev_state == PCI_D3cold)
+ pci_bridge_wait_for_secondary_bus(pci_dev);
if (pm && pm->runtime_resume)
- rc = pm->runtime_resume(dev);
+ error = pm->runtime_resume(dev);
pci_dev->runtime_d3cold = false;
- return rc;
+ return error;
}
static int pci_pm_runtime_idle(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret = 0;
/*
* If pci_dev->driver is not set (unbound), the device should
@@ -1369,9 +1347,9 @@ static int pci_pm_runtime_idle(struct device *dev)
return -ENOSYS;
if (pm->runtime_idle)
- ret = pm->runtime_idle(dev);
+ return pm->runtime_idle(dev);
- return ret;
+ return 0;
}
static const struct dev_pm_ops pci_dev_pm_ops = {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 793412954529..13f766db0684 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1122,7 +1122,7 @@ static void pci_remove_resource_files(struct pci_dev *pdev)
{
int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct bin_attribute *res_attr;
res_attr = pdev->res_attr[i];
@@ -1193,7 +1193,7 @@ static int pci_create_resource_files(struct pci_dev *pdev)
int retval;
/* Expose the PCI resources from this device as files */
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
/* skip empty resources */
if (!pci_resource_len(pdev, i))
@@ -1330,7 +1330,6 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
int retval;
pcie_vpd_create_sysfs_dev_files(dev);
- pcie_aspm_create_sysfs_dev_files(dev);
if (dev->reset_fn) {
retval = device_create_file(&dev->dev, &dev_attr_reset);
@@ -1340,7 +1339,6 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
return 0;
error:
- pcie_aspm_remove_sysfs_dev_files(dev);
pcie_vpd_remove_sysfs_dev_files(dev);
return retval;
}
@@ -1416,7 +1414,6 @@ err:
static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
{
pcie_vpd_remove_sysfs_dev_files(dev);
- pcie_aspm_remove_sysfs_dev_files(dev);
if (dev->reset_fn) {
device_remove_file(&dev->dev, &dev_attr_reset);
dev->reset_fn = 0;
@@ -1539,24 +1536,6 @@ const struct attribute_group *pci_dev_groups[] = {
NULL,
};
-static const struct attribute_group pci_bridge_group = {
- .attrs = pci_bridge_attrs,
-};
-
-const struct attribute_group *pci_bridge_groups[] = {
- &pci_bridge_group,
- NULL,
-};
-
-static const struct attribute_group pcie_dev_group = {
- .attrs = pcie_dev_attrs,
-};
-
-const struct attribute_group *pcie_dev_groups[] = {
- &pcie_dev_group,
- NULL,
-};
-
static const struct attribute_group pci_dev_hp_attr_group = {
.attrs = pci_dev_hp_attrs,
.is_visible = pci_dev_hp_attrs_are_visible,
@@ -1588,6 +1567,9 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#ifdef CONFIG_PCIEAER
&aer_stats_attr_group,
#endif
+#ifdef CONFIG_PCIEASPM
+ &aspm_ctrl_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a97e2571a527..e87196cc1a7f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/init.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -85,10 +86,17 @@ unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
#define DEFAULT_HOTPLUG_IO_SIZE (256)
-#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
-/* pci=hpmemsize=nnM,hpiosize=nn can override this */
+#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
+#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
+/* hpiosize=nn can override this */
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
-unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+/*
+ * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
+ * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
+ * pci=hpmemsize=nnM overrides both
+ */
+unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
+unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
#define DEFAULT_HOTPLUG_BUS_SIZE 1
unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
@@ -674,7 +682,7 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
{
int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *r = &dev->resource[i];
if (r->start && resource_contains(r, res))
@@ -834,14 +842,16 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EINVAL;
/*
- * Validate current state:
- * Can enter D0 from any state, but if we can only go deeper
- * to sleep if we're already in a low power state
+ * Validate transition: We can enter D0 from any state, but if
+ * we're already in a low-power state, we can only go deeper. E.g.,
+ * we can go from D1 to D3, but we can't go directly from D3 to D1;
+ * we'd have to go from D3 to D0, then to D1.
*/
if (state != PCI_D0 && dev->current_state <= PCI_D3cold
&& dev->current_state > state) {
- pci_err(dev, "invalid power transition (from state %d to %d)\n",
- dev->current_state, state);
+ pci_err(dev, "invalid power transition (from %s to %s)\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
return -EINVAL;
}
@@ -851,6 +861,12 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EIO;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (pmcsr == (u16) ~0) {
+ pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+ return -EIO;
+ }
/*
* If we're (effectively) in D3, force entire word to 0.
@@ -886,13 +902,14 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
- udelay(PCI_PM_D2_DELAY);
+ msleep(PCI_PM_D2_DELAY);
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
if (dev->current_state != state)
- pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
- dev->current_state);
+ pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
/*
* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
@@ -963,7 +980,7 @@ void pci_refresh_power_state(struct pci_dev *dev)
* @dev: PCI device to handle.
* @state: State to put the device into.
*/
-static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
+int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
{
int error;
@@ -979,6 +996,7 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
return error;
}
+EXPORT_SYMBOL_GPL(pci_platform_power_transition);
/**
* pci_wakeup - Wake up a PCI device
@@ -1002,34 +1020,70 @@ void pci_wakeup_bus(struct pci_bus *bus)
pci_walk_bus(bus, pci_wakeup, NULL);
}
+static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+{
+ int delay = 1;
+ u32 id;
+
+ /*
+ * After reset, the device should not silently discard config
+ * requests, but it may still indicate that it needs more time by
+ * responding to them with CRS completions. The Root Port will
+ * generally synthesize ~0 data to complete the read (except when
+ * CRS SV is enabled and the read was for the Vendor ID; in that
+ * case it synthesizes 0x0001 data).
+ *
+ * Wait for the device to return a non-CRS completion. Read the
+ * Command register instead of Vendor ID so we don't have to
+ * contend with the CRS SV value.
+ */
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ while (id == ~0) {
+ if (delay > timeout) {
+ pci_warn(dev, "not ready %dms after %s; giving up\n",
+ delay - 1, reset_type);
+ return -ENOTTY;
+ }
+
+ if (delay > 1000)
+ pci_info(dev, "not ready %dms after %s; waiting\n",
+ delay - 1, reset_type);
+
+ msleep(delay);
+ delay *= 2;
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ }
+
+ if (delay > 1000)
+ pci_info(dev, "ready %dms after %s\n", delay - 1,
+ reset_type);
+
+ return 0;
+}
+
/**
- * __pci_start_power_transition - Start power transition of a PCI device
- * @dev: PCI device to handle.
- * @state: State to put the device into.
+ * pci_power_up - Put the given device into D0
+ * @dev: PCI device to power up
*/
-static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
+int pci_power_up(struct pci_dev *dev)
{
- if (state == PCI_D0) {
- pci_platform_power_transition(dev, PCI_D0);
+ pci_platform_power_transition(dev, PCI_D0);
+
+ /*
+ * Mandatory power management transition delays are handled in
+ * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
+ * corresponding bridge.
+ */
+ if (dev->runtime_d3cold) {
/*
- * Mandatory power management transition delays, see
- * PCI Express Base Specification Revision 2.0 Section
- * 6.6.1: Conventional Reset. Do not delay for
- * devices powered on/off by corresponding bridge,
- * because have already delayed for the bridge.
+ * When powering on a bridge from D3cold, the whole hierarchy
+ * may be powered on into D0uninitialized state, resume them to
+ * give them a chance to suspend again
*/
- if (dev->runtime_d3cold) {
- if (dev->d3cold_delay && !dev->imm_ready)
- msleep(dev->d3cold_delay);
- /*
- * When powering on a bridge from D3cold, the
- * whole hierarchy may be powered on into
- * D0uninitialized state, resume them to give
- * them a chance to suspend again
- */
- pci_wakeup_bus(dev->subordinate);
- }
+ pci_wakeup_bus(dev->subordinate);
}
+
+ return pci_raw_set_power_state(dev, PCI_D0);
}
/**
@@ -1057,27 +1111,6 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
}
/**
- * __pci_complete_power_transition - Complete power transition of a PCI device
- * @dev: PCI device to handle.
- * @state: State to put the device into.
- *
- * This function should not be called directly by device drivers.
- */
-int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
-{
- int ret;
-
- if (state <= PCI_D0)
- return -EINVAL;
- ret = pci_platform_power_transition(dev, state);
- /* Power off the bridge may power off the whole hierarchy */
- if (!ret && state == PCI_D3cold)
- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
- return ret;
-}
-EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
-
-/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
@@ -1117,7 +1150,8 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (dev->current_state == state)
return 0;
- __pci_start_power_transition(dev, state);
+ if (state == PCI_D0)
+ return pci_power_up(dev);
/*
* This device is quirked not to be put into D3, so don't put it in
@@ -1133,23 +1167,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
PCI_D3hot : state);
- if (!__pci_complete_power_transition(dev, state))
- error = 0;
+ if (pci_platform_power_transition(dev, state))
+ return error;
- return error;
-}
-EXPORT_SYMBOL(pci_set_power_state);
+ /* Powering off a bridge may power off the whole hierarchy */
+ if (state == PCI_D3cold)
+ pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
-/**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
- __pci_start_power_transition(dev, PCI_D0);
- pci_raw_set_power_state(dev, PCI_D0);
- pci_update_current_state(dev, PCI_D0);
+ return 0;
}
+EXPORT_SYMBOL(pci_set_power_state);
/**
* pci_choose_state - Choose the power state of a PCI device
@@ -1359,6 +1386,7 @@ int pci_save_state(struct pci_dev *dev)
pci_save_ltr_state(dev);
pci_save_dpc_state(dev);
+ pci_save_aer_state(dev);
return pci_save_vc_state(dev);
}
EXPORT_SYMBOL(pci_save_state);
@@ -1472,6 +1500,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_dpc_state(dev);
pci_cleanup_aer_error_status_regs(dev);
+ pci_restore_aer_state(dev);
pci_restore_config_space(dev);
@@ -3766,7 +3795,7 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
{
int i;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
pci_release_region(pdev, i);
}
@@ -3777,7 +3806,7 @@ static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
{
int i;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
if (__pci_request_region(pdev, i, res_name, excl))
goto err_out;
@@ -3825,7 +3854,7 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
void pci_release_regions(struct pci_dev *pdev)
{
- pci_release_selected_regions(pdev, (1 << 6) - 1);
+ pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
}
EXPORT_SYMBOL(pci_release_regions);
@@ -3844,7 +3873,8 @@ EXPORT_SYMBOL(pci_release_regions);
*/
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
- return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
+ return pci_request_selected_regions(pdev,
+ ((1 << PCI_STD_NUM_BARS) - 1), res_name);
}
EXPORT_SYMBOL(pci_request_regions);
@@ -3866,7 +3896,7 @@ EXPORT_SYMBOL(pci_request_regions);
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
return pci_request_selected_regions_exclusive(pdev,
- ((1 << 6) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), res_name);
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -4428,47 +4458,6 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
-static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
-{
- int delay = 1;
- u32 id;
-
- /*
- * After reset, the device should not silently discard config
- * requests, but it may still indicate that it needs more time by
- * responding to them with CRS completions. The Root Port will
- * generally synthesize ~0 data to complete the read (except when
- * CRS SV is enabled and the read was for the Vendor ID; in that
- * case it synthesizes 0x0001 data).
- *
- * Wait for the device to return a non-CRS completion. Read the
- * Command register instead of Vendor ID so we don't have to
- * contend with the CRS SV value.
- */
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- while (id == ~0) {
- if (delay > timeout) {
- pci_warn(dev, "not ready %dms after %s; giving up\n",
- delay - 1, reset_type);
- return -ENOTTY;
- }
-
- if (delay > 1000)
- pci_info(dev, "not ready %dms after %s; waiting\n",
- delay - 1, reset_type);
-
- msleep(delay);
- delay *= 2;
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- }
-
- if (delay > 1000)
- pci_info(dev, "ready %dms after %s\n", delay - 1,
- reset_type);
-
- return 0;
-}
-
/**
* pcie_has_flr - check if a device supports function level resets
* @dev: device to check
@@ -4603,16 +4592,19 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
pci_dev_d3_sleep(dev);
- return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
+ return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
}
+
/**
- * pcie_wait_for_link - Wait until link is active or inactive
+ * pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
+ * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
+ int delay)
{
int timeout = 1000;
bool ret;
@@ -4649,13 +4641,144 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
timeout -= 10;
}
if (active && ret)
- msleep(100);
+ msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
return ret == active;
}
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ return pcie_wait_for_link_delay(pdev, active, 100);
+}
+
+/*
+ * Find maximum D3cold delay required by all the devices on the bus. The
+ * spec says 100 ms, but firmware can lower it and we allow drivers to
+ * increase it as well.
+ *
+ * Called with @pci_bus_sem locked for reading.
+ */
+static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
+{
+ const struct pci_dev *pdev;
+ int min_delay = 100;
+ int max_delay = 0;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (pdev->d3cold_delay < min_delay)
+ min_delay = pdev->d3cold_delay;
+ if (pdev->d3cold_delay > max_delay)
+ max_delay = pdev->d3cold_delay;
+ }
+
+ return max(min_delay, max_delay);
+}
+
+/**
+ * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
+ * @dev: PCI bridge
+ *
+ * Handle necessary delays before access to the devices on the secondary
+ * side of the bridge are permitted after D3cold to D0 transition.
+ *
+ * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
+ * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
+ * 4.3.2.
+ */
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+{
+ struct pci_dev *child;
+ int delay;
+
+ if (pci_dev_is_disconnected(dev))
+ return;
+
+ if (!pci_is_bridge(dev) || !dev->bridge_d3)
+ return;
+
+ down_read(&pci_bus_sem);
+
+ /*
+ * We only deal with devices that are present currently on the bus.
+ * For any hot-added devices the access delay is handled in pciehp
+ * board_added(). In case of ACPI hotplug the firmware is expected
+ * to configure the devices before OS is notified.
+ */
+ if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
+ up_read(&pci_bus_sem);
+ return;
+ }
+
+ /* Take d3cold_delay requirements into account */
+ delay = pci_bus_max_d3cold_delay(dev->subordinate);
+ if (!delay) {
+ up_read(&pci_bus_sem);
+ return;
+ }
+
+ child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
+ bus_list);
+ up_read(&pci_bus_sem);
+
+ /*
+ * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
+ * accessing the device after reset (that is 1000 ms + 100 ms). In
+ * practice this should not be needed because we don't do power
+ * management for them (see pci_bridge_d3_possible()).
+ */
+ if (!pci_is_pcie(dev)) {
+ pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
+ msleep(1000 + delay);
+ return;
+ }
+
+ /*
+ * For PCIe downstream and root ports that do not support speeds
+ * greater than 5 GT/s need to wait minimum 100 ms. For higher
+ * speeds (gen3) we need to wait first for the data link layer to
+ * become active.
+ *
+ * However, 100 ms is the minimum and the PCIe spec says the
+ * software must allow at least 1s before it can determine that the
+ * device that did not respond is a broken device. There is
+ * evidence that 100 ms is not always enough, for example certain
+ * Titan Ridge xHCI controller does not always respond to
+ * configuration requests if we only wait for 100 ms (see
+ * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
+ *
+ * Therefore we wait for 100 ms and check for the device presence.
+ * If it is still not present give it an additional 100 ms.
+ */
+ if (!pcie_downstream_port(dev))
+ return;
+
+ if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
+ pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
+ msleep(delay);
+ } else {
+ pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
+ delay);
+ if (!pcie_wait_for_link_delay(dev, true, delay)) {
+ /* Did not train, no need to wait any further */
+ return;
+ }
+ }
+
+ if (!pci_device_is_present(child)) {
+ pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
+ msleep(delay);
+ }
+}
+
void pci_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
@@ -5854,6 +5977,24 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+#ifdef CONFIG_ACPI
+bool pci_pr3_present(struct pci_dev *pdev)
+{
+ struct acpi_device *adev;
+
+ if (acpi_disabled)
+ return false;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return false;
+
+ return adev->power.flags.power_resources &&
+ acpi_has_method(adev->handle, "_PR3");
+}
+EXPORT_SYMBOL_GPL(pci_pr3_present);
+#endif
+
/**
* pci_add_dma_alias - Add a DMA devfn alias for a device
* @dev: the PCI device for which alias is added
@@ -6286,8 +6427,13 @@ static int __init pci_setup(char *str)
pcie_ecrc_get_policy(str + 5);
} else if (!strncmp(str, "hpiosize=", 9)) {
pci_hotplug_io_size = memparse(str + 9, &str);
+ } else if (!strncmp(str, "hpmmiosize=", 11)) {
+ pci_hotplug_mmio_size = memparse(str + 11, &str);
+ } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
+ pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
- pci_hotplug_mem_size = memparse(str + 10, &str);
+ pci_hotplug_mmio_size = memparse(str + 10, &str);
+ pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
} else if (!strncmp(str, "hpbussize=", 10)) {
pci_hotplug_bus_size =
simple_strtoul(str + 10, &str, 0);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 3f6947ee3324..a0a53bd05a0b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -12,6 +12,7 @@ extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump;
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
+bool pcie_cap_has_rtctl(const struct pci_dev *dev);
/* Functions internal to the PCI core code */
@@ -85,7 +86,7 @@ struct pci_platform_pm_ops {
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops);
void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
void pci_refresh_power_state(struct pci_dev *dev);
-void pci_power_up(struct pci_dev *dev);
+int pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
@@ -104,6 +105,7 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -218,7 +220,8 @@ extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
extern unsigned long pci_hotplug_io_size;
-extern unsigned long pci_hotplug_mem_size;
+extern unsigned long pci_hotplug_mmio_size;
+extern unsigned long pci_hotplug_mmio_pref_size;
extern unsigned long pci_hotplug_bus_size;
/**
@@ -456,6 +459,22 @@ static inline void pci_ats_init(struct pci_dev *d) { }
static inline void pci_restore_ats_state(struct pci_dev *dev) { }
#endif /* CONFIG_PCI_ATS */
+#ifdef CONFIG_PCI_PRI
+void pci_pri_init(struct pci_dev *dev);
+void pci_restore_pri_state(struct pci_dev *pdev);
+#else
+static inline void pci_pri_init(struct pci_dev *dev) { }
+static inline void pci_restore_pri_state(struct pci_dev *pdev) { }
+#endif
+
+#ifdef CONFIG_PCI_PASID
+void pci_pasid_init(struct pci_dev *dev);
+void pci_restore_pasid_state(struct pci_dev *pdev);
+#else
+static inline void pci_pasid_init(struct pci_dev *dev) { }
+static inline void pci_restore_pasid_state(struct pci_dev *pdev) { }
+#endif
+
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
@@ -541,14 +560,6 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
-#ifdef CONFIG_PCIEASPM_DEBUG
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
-#else
-static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { }
-static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { }
-#endif
-
#ifdef CONFIG_PCIE_ECRC
void pcie_set_ecrc_checking(struct pci_dev *dev);
void pcie_ecrc_get_policy(char *str);
@@ -630,19 +641,6 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
#endif /* CONFIG_OF */
-#if defined(CONFIG_OF_ADDRESS)
-int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base);
-#else
-static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
-{
- return -EINVAL;
-}
-#endif
-
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
void pci_aer_init(struct pci_dev *dev);
@@ -667,4 +665,8 @@ static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
}
#endif
+#ifdef CONFIG_PCIEASPM
+extern const struct attribute_group aspm_ctrl_attr_group;
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 362eb8cfa53b..6e3c04b46fb1 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -4,7 +4,6 @@
#
config PCIEPORTBUS
bool "PCI Express Port Bus support"
- depends on PCI
help
This enables PCI Express Port Bus support. Users can then enable
support for Native Hot-Plug, Advanced Error Reporting, Power
@@ -63,7 +62,6 @@ config PCIE_ECRC
#
config PCIEASPM
bool "PCI Express ASPM control" if EXPERT
- depends on PCI && PCIEPORTBUS
default y
help
This enables OS control over PCI Express ASPM (Active State
@@ -79,13 +77,6 @@ config PCIEASPM
When in doubt, say Y.
-config PCIEASPM_DEBUG
- bool "Debug PCI Express ASPM"
- depends on PCIEASPM
- help
- This enables PCI Express ASPM debug support. It will add per-device
- interface to control ASPM.
-
choice
prompt "Default ASPM policy"
default PCIEASPM_DEFAULT
@@ -135,7 +126,6 @@ config PCIE_DPC
config PCIE_PTM
bool "PCI Express Precision Time Measurement support"
- depends on PCIEPORTBUS
help
This enables PCI Express Precision Time Measurement (PTM)
support.
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index b45bc47d04fe..1ca86f2e0166 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "AER: " fmt
#define dev_fmt pr_fmt
+#include <linux/bitops.h>
#include <linux/cper.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
@@ -36,7 +37,7 @@
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
-#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
+#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
unsigned int status;
@@ -201,6 +202,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev)
/**
* pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ * @str: ECRC policy from kernel command line to use
*/
void pcie_ecrc_get_policy(char *str)
{
@@ -448,12 +450,70 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
+void pci_save_aer_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+ int pos;
+
+ pos = dev->aer_cap;
+ if (!pos)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, cap++);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, cap++);
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, cap++);
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, cap++);
+ if (pcie_cap_has_rtctl(dev))
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, cap++);
+}
+
+void pci_restore_aer_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+ int pos;
+
+ pos = dev->aer_cap;
+ if (!pos)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, *cap++);
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, *cap++);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, *cap++);
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, *cap++);
+ if (pcie_cap_has_rtctl(dev))
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, *cap++);
+}
+
void pci_aer_init(struct pci_dev *dev)
{
+ int n;
+
dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!dev->aer_cap)
+ return;
- if (dev->aer_cap)
- dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+ dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+
+ /*
+ * We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
+ * PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event
+ * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec
+ * 7.8.4).
+ */
+ n = pcie_cap_has_rtctl(dev) ? 5 : 4;
+ pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
pci_cleanup_aer_error_status_regs(dev);
}
@@ -560,6 +620,7 @@ static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
"BlockedTLP", /* Bit Position 23 */
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
+ "PoisonTLPBlocked", /* Bit Position 26 */
};
static const char *aer_agent_string[] = {
@@ -657,7 +718,8 @@ const struct attribute_group aer_stats_attr_group = {
static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_info *info)
{
- int status, i, max = -1;
+ unsigned long status = info->status & ~info->mask;
+ int i, max = -1;
u64 *counter = NULL;
struct aer_stats *aer_stats = pdev->aer_stats;
@@ -682,10 +744,8 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
break;
}
- status = (info->status & ~info->mask);
- for (i = 0; i < max; i++)
- if (status & (1 << i))
- counter[i]++;
+ for_each_set_bit(i, &status, max)
+ counter[i]++;
}
static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
@@ -717,14 +777,11 @@ static void __print_tlp_header(struct pci_dev *dev,
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
- int i, status;
+ unsigned long status = info->status & ~info->mask;
const char *errmsg = NULL;
- status = (info->status & ~info->mask);
-
- for (i = 0; i < 32; i++) {
- if (!(status & (1 << i)))
- continue;
+ int i;
+ for_each_set_bit(i, &status, 32) {
if (info->severity == AER_CORRECTABLE)
errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
aer_correctable_error_string[i] : NULL;
@@ -1204,7 +1261,8 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
/**
* aer_isr - consume errors detected by root port
- * @work: definition of this work item
+ * @irq: IRQ assigned to Root Port
+ * @context: pointer to Root Port data structure
*
* Invoked, as DPC, when root port records new detected error
*/
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 652ef23bba35..0dcd44308228 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -64,6 +64,7 @@ struct pcie_link_state {
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ u32 clkpm_disable:1; /* Clock PM disabled */
/* Exit latencies */
struct aspm_latency latency_up; /* Upstream direction exit latency */
@@ -161,8 +162,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- /* Don't enable Clock PM if the link is not Clock PM capable */
- if (!link->clkpm_capable)
+ /*
+ * Don't enable Clock PM if the link is not Clock PM capable
+ * or Clock PM is disabled
+ */
+ if (!link->clkpm_capable || link->clkpm_disable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
@@ -192,7 +196,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
- link->clkpm_capable = (blacklist) ? 0 : capable;
+ link->clkpm_capable = capable;
+ link->clkpm_disable = blacklist ? 1 : 0;
}
static bool pcie_retrain_link(struct pcie_link_state *link)
@@ -894,6 +899,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
return link;
}
+static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
+{
+ struct pci_dev *child;
+
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
+ sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
+}
+
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
* It is called after the pcie and its children devices are scanned.
@@ -955,6 +968,8 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
+ pcie_aspm_update_sysfs_visibility(pdev);
+
unlock:
mutex_unlock(&aspm_lock);
out:
@@ -1061,19 +1076,26 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
{
- struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link;
+ struct pci_dev *bridge;
if (!pci_is_pcie(pdev))
- return 0;
+ return NULL;
- if (pcie_downstream_port(pdev))
- parent = pdev;
- if (!parent || !parent->link_state)
- return -EINVAL;
+ bridge = pci_upstream_bridge(pdev);
+ if (!bridge || !pci_is_pcie(bridge))
+ return NULL;
+ return bridge->link_state;
+}
+
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+{
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+
+ if (!link)
+ return -EINVAL;
/*
* A driver requested that ASPM be disabled on this device, but
* if we don't have permission to manage ASPM (e.g., on ACPI
@@ -1090,17 +1112,24 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
if (sem)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link = parent->link_state;
if (state & PCIE_LINK_STATE_L0S)
link->aspm_disable |= ASPM_STATE_L0S;
if (state & PCIE_LINK_STATE_L1)
- link->aspm_disable |= ASPM_STATE_L1;
+ /* L1 PM substates require L1 */
+ link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
+ if (state & PCIE_LINK_STATE_L1_1)
+ link->aspm_disable |= ASPM_STATE_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2)
+ link->aspm_disable |= ASPM_STATE_L1_2;
+ if (state & PCIE_LINK_STATE_L1_1_PCIPM)
+ link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
+ if (state & PCIE_LINK_STATE_L1_2_PCIPM)
+ link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
- if (state & PCIE_LINK_STATE_CLKPM) {
- link->clkpm_capable = 0;
- pcie_set_clkpm(link, 0);
- }
+ if (state & PCIE_LINK_STATE_CLKPM)
+ link->clkpm_disable = 1;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);
@@ -1172,127 +1201,161 @@ module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
/**
* pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
* @pdev: Target device.
+ *
+ * Relies on the upstream bridge's link_state being valid. The link_state
+ * is deallocated only when the last child of the bridge (i.e., @pdev or a
+ * sibling) is removed, and the caller should be holding a reference to
+ * @pdev, so this should be safe.
*/
bool pcie_aspm_enabled(struct pci_dev *pdev)
{
- struct pci_dev *bridge = pci_upstream_bridge(pdev);
- bool ret;
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- if (!bridge)
+ if (!link)
return false;
- mutex_lock(&aspm_lock);
- ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
- mutex_unlock(&aspm_lock);
-
- return ret;
+ return link->aspm_enabled;
}
EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
-#ifdef CONFIG_PCIEASPM_DEBUG
-static ssize_t link_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t aspm_attr_show_common(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, u8 state)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- return sprintf(buf, "%d\n", link_state->aspm_enabled);
+ return sprintf(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
}
-static ssize_t link_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
+static ssize_t aspm_attr_store_common(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, u8 state)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pcie_link_state *link, *root = pdev->link_state->root;
- u32 state;
-
- if (aspm_disabled)
- return -EPERM;
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+ bool state_enable;
- if (kstrtouint(buf, 10, &state))
- return -EINVAL;
- if ((state & ~ASPM_STATE_ALL) != 0)
+ if (strtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- list_for_each_entry(link, &link_list, sibling) {
- if (link->root != root)
- continue;
- pcie_config_aspm_link(link, state);
+
+ if (state_enable) {
+ link->aspm_disable &= ~state;
+ /* need to enable L1 for substates */
+ if (state & ASPM_STATE_L1SS)
+ link->aspm_disable &= ~ASPM_STATE_L1;
+ } else {
+ link->aspm_disable |= state;
}
+
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
- return n;
+
+ return len;
}
-static ssize_t clk_ctl_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+#define ASPM_ATTR(_f, _s) \
+static ssize_t _f##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
+ \
+static ssize_t _f##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
+
+ASPM_ATTR(l0s_aspm, L0S)
+ASPM_ATTR(l1_aspm, L1)
+ASPM_ATTR(l1_1_aspm, L1_1)
+ASPM_ATTR(l1_2_aspm, L1_2)
+ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
+ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
+
+static ssize_t clkpm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- return sprintf(buf, "%d\n", link_state->clkpm_enabled);
+ return sprintf(buf, "%d\n", link->clkpm_enabled);
}
-static ssize_t clk_ctl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
+static ssize_t clkpm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct pci_dev *pdev = to_pci_dev(dev);
- bool state;
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+ bool state_enable;
- if (strtobool(buf, &state))
+ if (strtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- pcie_set_clkpm_nocheck(pdev->link_state, state);
+
+ link->clkpm_disable = !state_enable;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
- return n;
+ return len;
}
-static DEVICE_ATTR_RW(link_state);
-static DEVICE_ATTR_RW(clk_ctl);
+static DEVICE_ATTR_RW(clkpm);
+static DEVICE_ATTR_RW(l0s_aspm);
+static DEVICE_ATTR_RW(l1_aspm);
+static DEVICE_ATTR_RW(l1_1_aspm);
+static DEVICE_ATTR_RW(l1_2_aspm);
+static DEVICE_ATTR_RW(l1_1_pcipm);
+static DEVICE_ATTR_RW(l1_2_pcipm);
+
+static struct attribute *aspm_ctrl_attrs[] = {
+ &dev_attr_clkpm.attr,
+ &dev_attr_l0s_aspm.attr,
+ &dev_attr_l1_aspm.attr,
+ &dev_attr_l1_1_aspm.attr,
+ &dev_attr_l1_2_aspm.attr,
+ &dev_attr_l1_1_pcipm.attr,
+ &dev_attr_l1_2_pcipm.attr,
+ NULL
+};
-static char power_group[] = "power";
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
+static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (!link_state)
- return;
-
- if (link_state->aspm_support)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clkpm_capable)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
-}
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+ static const u8 aspm_state_map[] = {
+ ASPM_STATE_L0S,
+ ASPM_STATE_L1,
+ ASPM_STATE_L1_1,
+ ASPM_STATE_L1_2,
+ ASPM_STATE_L1_1_PCIPM,
+ ASPM_STATE_L1_2_PCIPM,
+ };
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
+ if (aspm_disabled || !link)
+ return 0;
- if (!link_state)
- return;
+ if (n == 0)
+ return link->clkpm_capable ? a->mode : 0;
- if (link_state->aspm_support)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clkpm_capable)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
+ return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
}
-#endif
+
+const struct attribute_group aspm_ctrl_attr_group = {
+ .name = "link",
+ .attrs = aspm_ctrl_attrs,
+ .is_visible = aspm_ctrl_attrs_are_visible,
+};
static int __init pcie_aspm_disable(char *str)
{
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index a32ec3487a8d..e06f42f58d3d 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -291,7 +291,7 @@ static int dpc_probe(struct pcie_device *dev)
int status;
u16 ctl, cap;
- if (pcie_aer_get_firmware_first(pdev))
+ if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 944827a8c7d3..1e673619b101 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,6 +25,8 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 5
+extern bool pcie_ports_dpc_native;
+
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
#else
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 1b330129089f..5075cb9e850c 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -250,8 +250,13 @@ static int get_port_device_capability(struct pci_dev *dev)
pcie_pme_interrupt_enable(dev, false);
}
+ /*
+ * With dpc-native, allow Linux to use DPC even if it doesn't have
+ * permission to use AER.
+ */
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) &&
- pci_aer_available() && services & PCIE_PORT_SERVICE_AER)
+ pci_aer_available() &&
+ (pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
services |= PCIE_PORT_SERVICE_DPC;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 0a87091a0800..160d67c59310 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -29,12 +29,20 @@ bool pcie_ports_disabled;
*/
bool pcie_ports_native;
+/*
+ * If the user specified "pcie_ports=dpc-native", use the Linux DPC PCIe
+ * service even if the platform hasn't given us permission.
+ */
+bool pcie_ports_dpc_native;
+
static int __init pcie_port_setup(char *str)
{
if (!strncmp(str, "compat", 6))
pcie_ports_disabled = true;
else if (!strncmp(str, "native", 6))
pcie_ports_native = true;
+ else if (!strncmp(str, "dpc-native", 10))
+ pcie_ports_dpc_native = true;
return 1;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 98cfa30f3fae..9361f3aa26ab 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -21,7 +21,7 @@ static void pci_ptm_info(struct pci_dev *dev)
snprintf(clock_desc, sizeof(clock_desc), ">254ns");
break;
default:
- snprintf(clock_desc, sizeof(clock_desc), "%udns",
+ snprintf(clock_desc, sizeof(clock_desc), "%uns",
dev->ptm_granularity);
break;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3d5271a7a849..512cb4312ddd 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/msi.h>
#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci_hotplug.h>
@@ -572,6 +573,7 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
bridge->release_fn(bridge);
pci_free_resource_list(&bridge->windows);
+ pci_free_resource_list(&bridge->dma_ranges);
}
static void pci_release_host_bridge_dev(struct device *dev)
@@ -897,6 +899,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
else
pr_info("PCI host bridge to bus %s\n", name);
+ if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
+ dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
+
/* Add initial resources to the bus */
resource_list_for_each_entry_safe(window, n, &resources) {
list_move_tail(&window->node, &bridge->windows);
@@ -1089,14 +1094,15 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* @sec: updated with secondary bus number from EA
* @sub: updated with subordinate bus number from EA
*
- * If @dev is a bridge with EA capability, update @sec and @sub with
- * fixed bus numbers from the capability and return true. Otherwise,
- * return false.
+ * If @dev is a bridge with EA capability that specifies valid secondary
+ * and subordinate bus numbers, return true with the bus numbers in @sec
+ * and @sub. Otherwise return false.
*/
static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
{
int ea, offset;
u32 dw;
+ u8 ea_sec, ea_sub;
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
return false;
@@ -1108,8 +1114,13 @@ static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
offset = ea + PCI_EA_FIRST_ENT;
pci_read_config_dword(dev, offset, &dw);
- *sec = dw & PCI_EA_SEC_BUS_MASK;
- *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ ea_sec = dw & PCI_EA_SEC_BUS_MASK;
+ ea_sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ if (ea_sec == 0 || ea_sub < ea_sec)
+ return false;
+
+ *sec = ea_sec;
+ *sub = ea_sub;
return true;
}
@@ -2300,8 +2311,7 @@ void pcie_report_downtraining(struct pci_dev *dev)
static void pci_init_capabilities(struct pci_dev *dev)
{
- /* Enhanced Allocation */
- pci_ea_init(dev);
+ pci_ea_init(dev); /* Enhanced Allocation */
/* Setup MSI caps & disable MSI/MSI-X interrupts */
pci_msi_setup_pci_dev(dev);
@@ -2309,29 +2319,16 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Buffers for saving PCIe and PCI-X capabilities */
pci_allocate_cap_save_buffers(dev);
- /* Power Management */
- pci_pm_init(dev);
-
- /* Vital Product Data */
- pci_vpd_init(dev);
-
- /* Alternative Routing-ID Forwarding */
- pci_configure_ari(dev);
-
- /* Single Root I/O Virtualization */
- pci_iov_init(dev);
-
- /* Address Translation Services */
- pci_ats_init(dev);
-
- /* Enable ACS P2P upstream forwarding */
- pci_enable_acs(dev);
-
- /* Precision Time Measurement */
- pci_ptm_init(dev);
-
- /* Advanced Error Reporting */
- pci_aer_init(dev);
+ pci_pm_init(dev); /* Power Management */
+ pci_vpd_init(dev); /* Vital Product Data */
+ pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */
+ pci_iov_init(dev); /* Single Root I/O Virtualization */
+ pci_ats_init(dev); /* Address Translation Services */
+ pci_pri_init(dev); /* Page Request Interface */
+ pci_pasid_init(dev); /* Process Address Space ID */
+ pci_enable_acs(dev); /* Enable ACS P2P upstream forwarding */
+ pci_ptm_init(dev); /* Precision Time Measurement */
+ pci_aer_init(dev); /* Advanced Error Reporting */
pcie_report_downtraining(dev);
@@ -2403,13 +2400,10 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
- /* Moved out from quirk header fixup code */
pci_reassigndev_resource_alignment(dev);
- /* Clear the state_saved flag */
dev->state_saved = false;
- /* Initialize various capabilities */
pci_init_capabilities(dev);
/*
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 5495537c60c2..6ef74bf5013f 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -258,13 +258,13 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Make sure the caller is mapping a real resource for this device */
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (dev->resource[i].flags & res_bit &&
pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
- if (i >= PCI_ROM_RESOURCE)
+ if (i >= PCI_STD_NUM_BARS)
return -ENODEV;
if (fpriv->mmap_state == pci_mmap_mem &&
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 320255e5e8f8..4937a088d7d8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -474,7 +474,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
{
int i;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *r = &dev->resource[i];
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
@@ -1809,7 +1809,7 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
* The next five BARs all seem to be rubbish, so just clean
* them out.
*/
- for (i = 1; i < 6; i++)
+ for (i = 1; i < PCI_STD_NUM_BARS; i++)
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
@@ -4033,7 +4033,6 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
if (id)
pci_add_dma_alias(dev, id->driver_data);
}
-
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
/*
@@ -4081,6 +4080,40 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
/*
+ * Intel Visual Compute Accelerator (VCA) is a family of PCIe add-in devices
+ * exposing computational units via Non Transparent Bridges (NTB, PEX 87xx).
+ *
+ * Similarly to MIC x200, we need to add DMA aliases to allow buffer access
+ * when IOMMU is enabled. These aliases allow computational unit access to
+ * host memory. These aliases mark the whole VCA device as one IOMMU
+ * group.
+ *
+ * All possible slot numbers (0x20) are used, since we are unable to tell
+ * what slot is used on other side. This quirk is intended for both host
+ * and computational unit sides. The VCA devices have up to five functions
+ * (four for DMA channels and one additional).
+ */
+static void quirk_pex_vca_alias(struct pci_dev *pdev)
+{
+ const unsigned int num_pci_slots = 0x20;
+ unsigned int slot;
+
+ for (slot = 0; slot < num_pci_slots; slot++) {
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x1));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x2));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x3));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x4));
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
+
+/*
* The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are
* associated not at the root bus, but at a bridge below. This quirk avoids
* generating invalid DMA aliases.
@@ -4263,6 +4296,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
quirk_chelsio_T5_disable_root_port_attributes);
/*
+ * pci_acs_ctrl_enabled - compare desired ACS controls with those provided
+ * by a device
+ * @acs_ctrl_req: Bitmask of desired ACS controls
+ * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by
+ * the hardware design
+ *
+ * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
+ * in @acs_ctrl_ena, i.e., the device provides all the access controls the
+ * caller desires. Return 0 otherwise.
+ */
+static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
+{
+ if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
+ return 1;
+ return 0;
+}
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
@@ -4305,7 +4356,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
/* Filter out flags not applicable to multifunction */
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
- return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
#else
return -ENODEV;
#endif
@@ -4313,33 +4364,38 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
{
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ switch (dev->device) {
/*
- * Effectively selects all downstream ports for whole ThunderX 1
- * family by 0xf800 mask (which represents 8 SoCs), while the lower
- * bits of device ID are used to indicate which subdevice is used
- * within the SoC.
+ * Effectively selects all downstream ports for whole ThunderX1
+ * (which represents 8 SoCs).
*/
- return (pci_is_pcie(dev) &&
- (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
- ((dev->device & 0xf800) == 0xa000));
+ case 0xa000 ... 0xa7ff: /* ThunderX1 */
+ case 0xaf84: /* ThunderX2 */
+ case 0xb884: /* ThunderX3 */
+ return true;
+ default:
+ return false;
+ }
}
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
+ if (!pci_quirk_cavium_acs_match(dev))
+ return -ENOTTY;
+
/*
- * Cavium root ports don't advertise an ACS capability. However,
+ * Cavium Root Ports don't advertise an ACS capability. However,
* the RTL internally implements similar protection as if ACS had
- * Request Redirection, Completion Redirection, Source Validation,
+ * Source Validation, Request Redirection, Completion Redirection,
* and Upstream Forwarding features enabled. Assert that the
* hardware implements and enables equivalent ACS functionality for
* these flags.
*/
- acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
-
- if (!pci_quirk_cavium_acs_match(dev))
- return -ENOTTY;
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4349,13 +4405,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
* transactions with others, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
- * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * Many Intel PCH Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
* into that category as provided by Intel in Red Hat bugzilla 1037684.
@@ -4403,37 +4458,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
return false;
}
-#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
-
static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
- INTEL_PCH_ACS_FLAGS : 0;
-
if (!pci_quirk_intel_pch_acs_match(dev))
return -ENOTTY;
- return acs_flags & ~flags ? 0 : 1;
+ if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+
+ return pci_acs_ctrl_enabled(acs_flags, 0);
}
/*
- * These QCOM root ports do provide ACS-like features to disable peer
+ * These QCOM Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. Hardware supports source validation but it
* will report the issue as Completer Abort instead of ACS Violation.
- * Hardware doesn't support peer-to-peer and each root port is a root
- * complex with unique segment numbers. It is not possible for one root
- * port to pass traffic to another root port. All PCIe transactions are
- * terminated inside the root port.
+ * Hardware doesn't support peer-to-peer and each Root Port is a Root
+ * Complex with unique segment numbers. It is not possible for one Root
+ * Port to pass traffic to another Root Port. All PCIe transactions are
+ * terminated inside the Root Port.
*/
static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
- int ret = acs_flags & ~flags ? 0 : 1;
-
- pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret);
-
- return ret;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4534,7 +4584,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
- return acs_flags & ~ctrl ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, ctrl);
}
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4548,10 +4598,9 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
* perform peer-to-peer with other functions, allowing us to mask out
* these bits as if they were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
}
static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4562,9 +4611,8 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
* Allow each Root Port to be in a separate IOMMU group by masking
* SV/RR/CR/UF bits.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static const struct pci_dev_acs_enabled {
@@ -4666,6 +4714,17 @@ static const struct pci_dev_acs_enabled {
{ 0 }
};
+/*
+ * pci_dev_specific_acs_enabled - check whether device provides ACS controls
+ * @dev: PCI device
+ * @acs_flags: Bitmask of desired ACS controls
+ *
+ * Returns:
+ * -ENOTTY: No quirk applies to this device; we can't tell whether the
+ * device provides the desired controls
+ * 0: Device does not provide all the desired controls
+ * >0: Device provides all the controls in @acs_flags
+ */
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
@@ -4706,7 +4765,7 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
#define INTEL_BSPR_REG_BPPD (1 << 9)
/* Upstream Peer Decode Configuration Register */
-#define INTEL_UPDCR_REG 0x1114
+#define INTEL_UPDCR_REG 0x1014
/* 5:0 Peer Decode Enable bits */
#define INTEL_UPDCR_REG_MASK 0x3f
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index e7dbe21705ba..f279826204eb 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -752,24 +752,32 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
}
/*
- * Helper function for sizing routines: find first available bus resource
- * of a given type. Note: we intentionally skip the bus resources which
- * have already been assigned (that is, have non-NULL parent resource).
+ * Helper function for sizing routines. Assigned resources have non-NULL
+ * parent resource.
+ *
+ * Return first unassigned resource of the correct type. If there is none,
+ * return first assigned resource of the correct type. If none of the
+ * above, return NULL.
+ *
+ * Returning an assigned resource of the correct type allows the caller to
+ * distinguish between already assigned and no resource of the correct type.
*/
-static struct resource *find_free_bus_resource(struct pci_bus *bus,
- unsigned long type_mask,
- unsigned long type)
+static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
+ unsigned long type_mask,
+ unsigned long type)
{
+ struct resource *r, *r_assigned = NULL;
int i;
- struct resource *r;
pci_bus_for_each_resource(bus, r, i) {
if (r == &ioport_resource || r == &iomem_resource)
continue;
if (r && (r->flags & type_mask) == type && !r->parent)
return r;
+ if (r && (r->flags & type_mask) == type && !r_assigned)
+ r_assigned = r;
}
- return NULL;
+ return r_assigned;
}
static resource_size_t calculate_iosize(resource_size_t size,
@@ -866,8 +874,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
- IORESOURCE_IO);
+ struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
+ IORESOURCE_IO);
resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
resource_size_t min_align, align;
@@ -875,6 +883,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (!b_res)
return;
+ /* If resource is already assigned, nothing more to do */
+ if (b_res->parent)
+ return;
+
min_align = window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -978,7 +990,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
resource_size_t min_align, align, size, size0, size1;
resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
int order, max_order;
- struct resource *b_res = find_free_bus_resource(bus,
+ struct resource *b_res = find_bus_resource_of_type(bus,
mask | IORESOURCE_PREFETCH, type);
resource_size_t children_add_size = 0;
resource_size_t children_add_align = 0;
@@ -987,6 +999,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (!b_res)
return -ENOSPC;
+ /* If resource is already assigned, nothing more to do */
+ if (b_res->parent)
+ return 0;
+
memset(aligns, 0, sizeof(aligns));
max_order = 0;
size = 0;
@@ -1178,7 +1194,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
{
struct pci_dev *dev;
unsigned long mask, prefmask, type2 = 0, type3 = 0;
- resource_size_t additional_mem_size = 0, additional_io_size = 0;
+ resource_size_t additional_io_size = 0, additional_mmio_size = 0,
+ additional_mmio_pref_size = 0;
struct resource *b_res;
int ret;
@@ -1212,7 +1229,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
pci_bridge_check_ranges(bus);
if (bus->self->is_hotplug_bridge) {
additional_io_size = pci_hotplug_io_size;
- additional_mem_size = pci_hotplug_mem_size;
+ additional_mmio_size = pci_hotplug_mmio_size;
+ additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
}
/* Fall through */
default:
@@ -1230,9 +1248,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (b_res[2].flags & IORESOURCE_MEM_64) {
prefmask |= IORESOURCE_MEM_64;
ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head);
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mmio_pref_size,
+ additional_mmio_pref_size, realloc_head);
/*
* If successful, all non-prefetchable resources
@@ -1254,9 +1272,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (!type2) {
prefmask &= ~IORESOURCE_MEM_64;
ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head);
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mmio_pref_size,
+ additional_mmio_pref_size, realloc_head);
/*
* If successful, only non-prefetchable resources
@@ -1265,7 +1283,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (ret == 0)
mask = prefmask;
else
- additional_mem_size += additional_mem_size;
+ additional_mmio_size += additional_mmio_pref_size;
type2 = type3 = IORESOURCE_MEM;
}
@@ -1285,8 +1303,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
* prefetchable resource in a 64-bit prefetchable window.
*/
pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
- realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head);
+ realloc_head ? 0 : additional_mmio_size,
+ additional_mmio_size, realloc_head);
break;
}
}
@@ -2066,6 +2084,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
unsigned int i;
int ret;
+ down_read(&pci_bus_sem);
+
/* Walk to the root hub, releasing bridge BARs when possible */
next = bridge;
do {
@@ -2100,8 +2120,10 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
next = bridge->bus ? bridge->bus->self : NULL;
} while (next);
- if (list_empty(&saved))
+ if (list_empty(&saved)) {
+ up_read(&pci_bus_sem);
return -ENOENT;
+ }
__pci_bus_size_bridges(bridge->subordinate, &added);
__pci_bridge_assign_resources(bridge, &added, &failed);
@@ -2122,6 +2144,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
}
free_list(&saved);
+ up_read(&pci_bus_sem);
return 0;
cleanup:
@@ -2150,6 +2173,7 @@ cleanup:
pci_setup_bridge(bridge->subordinate);
}
free_list(&saved);
+ up_read(&pci_bus_sem);
return ret;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 8c94cd3fd1f2..88091bbfe77f 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -675,7 +675,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
return -ENOMEM;
s->global = ioread32(&stdev->mmio_sw_event->global_summary);
- s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {
@@ -1025,7 +1025,7 @@ static const struct file_operations switchtec_fops = {
.read = switchtec_dev_read,
.poll = switchtec_dev_poll,
.unlocked_ioctl = switchtec_dev_ioctl,
- .compat_ioctl = switchtec_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static void link_event_work(struct work_struct *work)
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index 215425296c77..3dab79e9d52b 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -45,3 +45,14 @@ config PHY_SUN9I_USB
sun9i SoCs.
This driver controls each individual USB 2 host PHY.
+
+config PHY_SUN50I_USB3
+ tristate "Allwinner H6 SoC USB3 PHY driver"
+ depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on RESET_CONTROLLER
+ select GENERIC_PHY
+ help
+ Enable this to support the USB3.0-capable transceiver that is
+ part of Allwinner H6 SoC.
+
+ This driver controls each individual USB 2+3 host PHY combo.
diff --git a/drivers/phy/allwinner/Makefile b/drivers/phy/allwinner/Makefile
index 799a65c0b58d..bd74901a1255 100644
--- a/drivers/phy/allwinner/Makefile
+++ b/drivers/phy/allwinner/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
obj-$(CONFIG_PHY_SUN6I_MIPI_DPHY) += phy-sun6i-mipi-dphy.o
obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o
+obj-$(CONFIG_PHY_SUN50I_USB3) += phy-sun50i-usb3.o
diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
new file mode 100644
index 000000000000..1169f3e83a6f
--- /dev/null
+++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Allwinner sun50i(H6) USB 3.0 phy driver
+ *
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on phy-sun9i-usb.c, which is:
+ *
+ * Copyright (C) 2014-2015 Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on code from Allwinner BSP, which is:
+ *
+ * Copyright (c) 2010-2015 Allwinner Technology Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* Interface Status and Control Registers */
+#define SUNXI_ISCR 0x00
+#define SUNXI_PIPE_CLOCK_CONTROL 0x14
+#define SUNXI_PHY_TUNE_LOW 0x18
+#define SUNXI_PHY_TUNE_HIGH 0x1c
+#define SUNXI_PHY_EXTERNAL_CONTROL 0x20
+
+/* USB2.0 Interface Status and Control Register */
+#define SUNXI_ISCR_FORCE_VBUS (3 << 12)
+
+/* PIPE Clock Control Register */
+#define SUNXI_PCC_PIPE_CLK_OPEN (1 << 6)
+
+/* PHY External Control Register */
+#define SUNXI_PEC_EXTERN_VBUS (3 << 1)
+#define SUNXI_PEC_SSC_EN (1 << 24)
+#define SUNXI_PEC_REF_SSP_EN (1 << 26)
+
+/* PHY Tune High Register */
+#define SUNXI_TX_DEEMPH_3P5DB(n) ((n) << 19)
+#define SUNXI_TX_DEEMPH_3P5DB_MASK GENMASK(24, 19)
+#define SUNXI_TX_DEEMPH_6DB(n) ((n) << 13)
+#define SUNXI_TX_DEEMPH_6GB_MASK GENMASK(18, 13)
+#define SUNXI_TX_SWING_FULL(n) ((n) << 6)
+#define SUNXI_TX_SWING_FULL_MASK GENMASK(12, 6)
+#define SUNXI_LOS_BIAS(n) ((n) << 3)
+#define SUNXI_LOS_BIAS_MASK GENMASK(5, 3)
+#define SUNXI_TXVBOOSTLVL(n) ((n) << 0)
+#define SUNXI_TXVBOOSTLVL_MASK GENMASK(0, 2)
+
+struct sun50i_usb3_phy {
+ struct phy *phy;
+ void __iomem *regs;
+ struct reset_control *reset;
+ struct clk *clk;
+};
+
+static void sun50i_usb3_phy_open(struct sun50i_usb3_phy *phy)
+{
+ u32 val;
+
+ val = readl(phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
+ val |= SUNXI_PEC_EXTERN_VBUS;
+ val |= SUNXI_PEC_SSC_EN | SUNXI_PEC_REF_SSP_EN;
+ writel(val, phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
+
+ val = readl(phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
+ val |= SUNXI_PCC_PIPE_CLK_OPEN;
+ writel(val, phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
+
+ val = readl(phy->regs + SUNXI_ISCR);
+ val |= SUNXI_ISCR_FORCE_VBUS;
+ writel(val, phy->regs + SUNXI_ISCR);
+
+ /*
+ * All the magic numbers written to the PHY_TUNE_{LOW_HIGH}
+ * registers are directly taken from the BSP USB3 driver from
+ * Allwiner.
+ */
+ writel(0x0047fc87, phy->regs + SUNXI_PHY_TUNE_LOW);
+
+ val = readl(phy->regs + SUNXI_PHY_TUNE_HIGH);
+ val &= ~(SUNXI_TXVBOOSTLVL_MASK | SUNXI_LOS_BIAS_MASK |
+ SUNXI_TX_SWING_FULL_MASK | SUNXI_TX_DEEMPH_6GB_MASK |
+ SUNXI_TX_DEEMPH_3P5DB_MASK);
+ val |= SUNXI_TXVBOOSTLVL(0x7);
+ val |= SUNXI_LOS_BIAS(0x7);
+ val |= SUNXI_TX_SWING_FULL(0x55);
+ val |= SUNXI_TX_DEEMPH_6DB(0x20);
+ val |= SUNXI_TX_DEEMPH_3P5DB(0x15);
+ writel(val, phy->regs + SUNXI_PHY_TUNE_HIGH);
+}
+
+static int sun50i_usb3_phy_init(struct phy *_phy)
+{
+ struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
+ int ret;
+
+ ret = clk_prepare_enable(phy->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(phy->reset);
+ if (ret) {
+ clk_disable_unprepare(phy->clk);
+ return ret;
+ }
+
+ sun50i_usb3_phy_open(phy);
+ return 0;
+}
+
+static int sun50i_usb3_phy_exit(struct phy *_phy)
+{
+ struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
+
+ reset_control_assert(phy->reset);
+ clk_disable_unprepare(phy->clk);
+
+ return 0;
+}
+
+static const struct phy_ops sun50i_usb3_phy_ops = {
+ .init = sun50i_usb3_phy_init,
+ .exit = sun50i_usb3_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int sun50i_usb3_phy_probe(struct platform_device *pdev)
+{
+ struct sun50i_usb3_phy *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(phy->clk)) {
+ if (PTR_ERR(phy->clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get phy clock\n");
+ return PTR_ERR(phy->clk);
+ }
+
+ phy->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(phy->reset)) {
+ dev_err(dev, "failed to get reset control\n");
+ return PTR_ERR(phy->reset);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->regs))
+ return PTR_ERR(phy->regs);
+
+ phy->phy = devm_phy_create(dev, NULL, &sun50i_usb3_phy_ops);
+ if (IS_ERR(phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(phy->phy);
+ }
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id sun50i_usb3_phy_of_match[] = {
+ { .compatible = "allwinner,sun50i-h6-usb3-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sun50i_usb3_phy_of_match);
+
+static struct platform_driver sun50i_usb3_phy_driver = {
+ .probe = sun50i_usb3_phy_probe,
+ .driver = {
+ .of_match_table = sun50i_usb3_phy_of_match,
+ .name = "sun50i-usb3-phy",
+ }
+};
+module_platform_driver(sun50i_usb3_phy_driver);
+
+MODULE_DESCRIPTION("Allwinner H6 USB 3.0 phy driver");
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index ac322d643c7a..08e322789e59 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -50,6 +50,8 @@
#define PHY_R5_PHY_CR_ACK BIT(16)
#define PHY_R5_PHY_BS_OUT BIT(17)
+#define PCIE_RESET_DELAY 500
+
struct phy_g12a_usb3_pcie_priv {
struct regmap *regmap;
struct regmap *regmap_cr;
@@ -196,6 +198,10 @@ static int phy_g12a_usb3_init(struct phy *phy)
struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
int data, ret;
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
+
/* Switch PHY to USB3 */
/* TODO figure out how to handle when PCIe was set in the bootloader */
regmap_update_bits(priv->regmap, PHY_R0,
@@ -272,24 +278,64 @@ static int phy_g12a_usb3_init(struct phy *phy)
return 0;
}
-static int phy_g12a_usb3_pcie_init(struct phy *phy)
+static int phy_g12a_usb3_pcie_power_on(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->mode == PHY_TYPE_USB3)
+ return 0;
+
+ regmap_update_bits(priv->regmap, PHY_R0,
+ PHY_R0_PCIE_POWER_STATE,
+ FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1c));
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_power_off(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->mode == PHY_TYPE_USB3)
+ return 0;
+
+ regmap_update_bits(priv->regmap, PHY_R0,
+ PHY_R0_PCIE_POWER_STATE,
+ FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1d));
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_reset(struct phy *phy)
{
struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
int ret;
- ret = reset_control_reset(priv->reset);
+ if (priv->mode == PHY_TYPE_USB3)
+ return 0;
+
+ ret = reset_control_assert(priv->reset);
if (ret)
return ret;
+ udelay(PCIE_RESET_DELAY);
+
+ ret = reset_control_deassert(priv->reset);
+ if (ret)
+ return ret;
+
+ udelay(PCIE_RESET_DELAY);
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_init(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
if (priv->mode == PHY_TYPE_USB3)
return phy_g12a_usb3_init(phy);
- /* Power UP PCIE */
- /* TODO figure out when the bootloader has set USB3 mode before */
- regmap_update_bits(priv->regmap, PHY_R0,
- PHY_R0_PCIE_POWER_STATE,
- FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1c));
-
return 0;
}
@@ -297,7 +343,10 @@ static int phy_g12a_usb3_pcie_exit(struct phy *phy)
{
struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
- return reset_control_reset(priv->reset);
+ if (priv->mode == PHY_TYPE_USB3)
+ return reset_control_reset(priv->reset);
+
+ return 0;
}
static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
@@ -326,6 +375,9 @@ static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
static const struct phy_ops phy_g12a_usb3_pcie_ops = {
.init = phy_g12a_usb3_pcie_init,
.exit = phy_g12a_usb3_pcie_exit,
+ .power_on = phy_g12a_usb3_pcie_power_on,
+ .power_off = phy_g12a_usb3_pcie_power_off,
+ .reset = phy_g12a_usb3_pcie_reset,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 3c53625f8bc2..91b5b09589d6 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -126,8 +126,8 @@ enum {
USB_CTRL_SELECTOR_COUNT,
};
-#define USB_CTRL_REG(base, reg) ((void *)base + USB_CTRL_##reg)
-#define USB_XHCI_EC_REG(base, reg) ((void *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
#define USB_CTRL_MASK(reg, field) \
USB_CTRL_##reg##_##field##_MASK
#define USB_CTRL_MASK_FAMILY(params, reg, field) \
@@ -416,7 +416,7 @@ void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void *reg;
+ void __iomem *reg;
mask = params->usb_reg_bits_map[field];
reg = params->ctrl_regs + reg_offset;
@@ -428,7 +428,7 @@ void usb_ctrl_set_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void *reg;
+ void __iomem *reg;
mask = params->usb_reg_bits_map[field];
reg = params->ctrl_regs + reg_offset;
@@ -707,7 +707,7 @@ static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
void __iomem *xhci_ec_base = params->xhci_ec_regs;
u32 val;
- if (params->family_id != 0x74371000 || xhci_ec_base == 0)
+ if (params->family_id != 0x74371000 || !xhci_ec_base)
return;
brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index 9b16f13b5ab2..34a6a9a1ceb2 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -114,7 +114,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
struct hisi_inno_phy_priv *priv;
struct phy_provider *provider;
struct device_node *child;
- struct resource *res;
int i = 0;
int ret;
@@ -122,8 +121,7 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
diff --git a/drivers/phy/hisilicon/phy-histb-combphy.c b/drivers/phy/hisilicon/phy-histb-combphy.c
index 62d10ef20296..f1cb3e4d2add 100644
--- a/drivers/phy/hisilicon/phy-histb-combphy.c
+++ b/drivers/phy/hisilicon/phy-histb-combphy.c
@@ -195,7 +195,6 @@ static int histb_combphy_probe(struct platform_device *pdev)
struct histb_combphy_priv *priv;
struct device_node *np = dev->of_node;
struct histb_combphy_mode *mode;
- struct resource *res;
u32 vals[3];
int ret;
@@ -203,8 +202,7 @@ static int histb_combphy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 544d64a84cc0..6e457967653e 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -323,7 +323,8 @@ static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
goto err_disable_pdi_clk;
/* Check if we are in "startup ready" status */
- if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0)
+ ret = ltq_vrx200_pcie_phy_wait_for_pll(phy);
+ if (ret)
goto err_disable_phy_clk;
ltq_vrx200_pcie_phy_apply_workarounds(phy);
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
index ded900b06f5a..23bc3bf5c4c0 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -216,20 +216,13 @@ static int mvebu_a3700_utmi_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mvebu_a3700_utmi *utmi;
struct phy_provider *provider;
- struct resource *res;
utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL);
if (!utmi)
return -ENOMEM;
/* Get UTMI memory region */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "Missing UTMI PHY memory resource\n");
- return -ENODEV;
- }
-
- utmi->regs = devm_ioremap_resource(dev, res);
+ utmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(utmi->regs))
return PTR_ERR(utmi->regs);
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 3c9189473407..7a33ec12f71b 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1342,7 +1342,7 @@ static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx,
static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane)
{
int i;
- struct {
+ static const struct {
u32 reg;
u32 val;
} serdes_reg[] = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 39e8deb8001e..091e20303a14 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -165,6 +165,11 @@ static const unsigned int sdm845_ufsphy_regs_layout[] = {
[QPHY_PCS_READY_STATUS] = 0x160,
};
+static const unsigned int sm8150_ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x180,
+};
+
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -879,6 +884,93 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
};
+static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0xf1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V4_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V4_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V4_MULTI_LANE_CTRL1, 0x02),
+};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
@@ -1276,6 +1368,31 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.is_dual_lane_phy = true,
};
+static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8150_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl),
+ .tx_tbl = sm8150_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_tx_tbl),
+ .rx_tbl = sm8150_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_rx_tbl),
+ .pcs_tbl = sm8150_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .is_dual_lane_phy = true,
+ .no_pcs_sw_reset = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -1998,6 +2115,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,msm8998-qmp-usb3-phy",
.data = &msm8998_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 335ea5d7ef40..ab6ff9b45a32 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -313,4 +313,100 @@
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
+/* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_PLL_IVCO 0x058
+#define QSERDES_V4_COM_CMN_IPTRIM 0x060
+#define QSERDES_V4_COM_CP_CTRL_MODE0 0x074
+#define QSERDES_V4_COM_CP_CTRL_MODE1 0x078
+#define QSERDES_V4_COM_PLL_RCTRL_MODE0 0x07c
+#define QSERDES_V4_COM_PLL_RCTRL_MODE1 0x080
+#define QSERDES_V4_COM_PLL_CCTRL_MODE0 0x084
+#define QSERDES_V4_COM_PLL_CCTRL_MODE1 0x088
+#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094
+#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac
+#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0
+#define QSERDES_V4_COM_LOCK_CMP1_MODE1 0x0b4
+#define QSERDES_V4_COM_DEC_START_MODE0 0x0bc
+#define QSERDES_V4_COM_LOCK_CMP2_MODE1 0x0b8
+#define QSERDES_V4_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V4_COM_HSCLK_SEL 0x158
+#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
+#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
+
+/* Only for QMP V4 PHY - TX registers */
+#define QSERDES_V4_TX_LANE_MODE_1 0x84
+#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
+#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
+#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0xe0
+#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0xe4
+#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
+
+/* Only for QMP V4 PHY - RX registers */
+#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
+#define QSERDES_V4_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN 0x030
+#define QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V4_RX_UCDR_PI_CONTROLS 0x044
+#define QSERDES_V4_RX_UCDR_PI_CTRL2 0x048
+#define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068
+#define QSERDES_V4_RX_AC_JTAG_MODE 0x078
+#define QSERDES_V4_RX_RX_TERM_BW 0x080
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW 0x0f8
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
+#define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100
+#define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
+#define QSERDES_V4_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V4_RX_SIGDET_LVL 0x120
+#define QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V4_RX_RX_BAND 0x128
+#define QSERDES_V4_RX_RX_MODE_00_LOW 0x170
+#define QSERDES_V4_RX_RX_MODE_00_HIGH 0x174
+#define QSERDES_V4_RX_RX_MODE_00_HIGH2 0x178
+#define QSERDES_V4_RX_RX_MODE_00_HIGH3 0x17c
+#define QSERDES_V4_RX_RX_MODE_00_HIGH4 0x180
+#define QSERDES_V4_RX_RX_MODE_01_LOW 0x184
+#define QSERDES_V4_RX_RX_MODE_01_HIGH 0x188
+#define QSERDES_V4_RX_RX_MODE_01_HIGH2 0x18c
+#define QSERDES_V4_RX_RX_MODE_01_HIGH3 0x190
+#define QSERDES_V4_RX_RX_MODE_01_HIGH4 0x194
+#define QSERDES_V4_RX_RX_MODE_10_LOW 0x198
+#define QSERDES_V4_RX_RX_MODE_10_HIGH 0x19c
+#define QSERDES_V4_RX_RX_MODE_10_HIGH2 0x1a0
+#define QSERDES_V4_RX_RX_MODE_10_HIGH3 0x1a4
+#define QSERDES_V4_RX_RX_MODE_10_HIGH4 0x1a8
+#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
+
+/* Only for QMP V4 PHY - PCS registers */
+#define QPHY_V4_PHY_START 0x000
+#define QPHY_V4_POWER_DOWN_CONTROL 0x004
+#define QPHY_V4_SW_RESET 0x008
+#define QPHY_V4_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V4_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V4_PLL_CNTL 0x02c
+#define QPHY_V4_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V4_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V4_BIST_FIXED_PAT_CTRL 0x060
+#define QPHY_V4_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V4_RX_HSGEAR_CAPABILITY 0x0b4
+#define QPHY_V4_DEBUG_BUS_CLKSEL 0x124
+#define QPHY_V4_LINECFG_DISABLE 0x148
+#define QPHY_V4_RX_MIN_HIBERN8_TIME 0x150
+#define QPHY_V4_RX_SIGDET_CTRL2 0x158
+#define QPHY_V4_TX_PWM_GEAR_BAND 0x160
+#define QPHY_V4_TX_HS_GEAR_BAND 0x168
+#define QPHY_V4_PCS_READY_STATUS 0x180
+#define QPHY_V4_TX_MID_TERM_CTRL1 0x1d8
+#define QPHY_V4_MULTI_LANE_CTRL1 0x1e0
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index b163b3a1558d..61054272a7c8 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -158,8 +158,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy)
/* setup initial state */
qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state,
uphy->vbus_edev);
- ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev,
- EXTCON_USB, &uphy->vbus_notify);
+ ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
if (ret)
goto err_ulpi;
}
@@ -180,6 +180,9 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy)
{
struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy);
+ if (uphy->vbus_edev)
+ extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
regulator_disable(uphy->v3p3);
regulator_disable(uphy->v1p8);
clk_disable_unprepare(uphy->sleep_clk);
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 2926e4937301..2e279ac0fa4d 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -71,6 +71,7 @@ struct rcar_gen2_phy_driver {
struct rcar_gen2_phy_data {
const struct phy_ops *gen2_phy_ops;
const u32 (*select_value)[PHYS_PER_CHANNEL];
+ const u32 num_channels;
};
static int rcar_gen2_phy_init(struct phy *p)
@@ -271,11 +272,13 @@ static const u32 usb20_select_value[][PHYS_PER_CHANNEL] = {
static const struct rcar_gen2_phy_data rcar_gen2_usb_phy_data = {
.gen2_phy_ops = &rcar_gen2_phy_ops,
.select_value = pci_select_value,
+ .num_channels = ARRAY_SIZE(pci_select_value),
};
static const struct rcar_gen2_phy_data rz_g1c_usb_phy_data = {
.gen2_phy_ops = &rz_g1c_phy_ops,
.select_value = usb20_select_value,
+ .num_channels = ARRAY_SIZE(usb20_select_value),
};
static const struct of_device_id rcar_gen2_phy_match_table[] = {
@@ -389,7 +392,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
channel->selected_phy = -1;
error = of_property_read_u32(np, "reg", &channel_num);
- if (error || channel_num > 2) {
+ if (error || channel_num >= data->num_channels) {
dev_err(dev, "Invalid \"reg\" property\n");
of_node_put(np);
return error;
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index b7f6b1324395..bfb22f868857 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/string.h>
#include <linux/usb/of.h>
#include <linux/workqueue.h>
@@ -320,9 +321,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
return -EIO;
- if (!strncmp(buf, "host", strlen("host")))
+ if (sysfs_streq(buf, "host"))
new_mode = PHY_MODE_USB_HOST;
- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ else if (sysfs_streq(buf, "peripheral"))
new_mode = PHY_MODE_USB_DEVICE;
else
return -EINVAL;
@@ -614,7 +615,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
return PTR_ERR(channel->base);
/* call request_irq for OTG */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index c454c90cd99e..dbd2de4d28b1 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -35,6 +35,14 @@ config PHY_ROCKCHIP_INNO_USB2
help
Support for Rockchip USB2.0 PHY with Innosilicon IP block.
+config PHY_ROCKCHIP_INNO_DSIDPHY
+ tristate "Rockchip Innosilicon MIPI/LVDS/TTL PHY driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip MIPI/LVDS/TTL PHY with
+ Innosilicon IP block.
+
config PHY_ROCKCHIP_PCIE
tristate "Rockchip PCIe PHY Driver"
depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index fd21cbaf40dd..9f59a81e4e0d 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY) += phy-rockchip-inno-dsidphy.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
new file mode 100644
index 000000000000..fc729ecd3fe9
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Wyon Bi <bivvy.bi@rock-chips.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/mfd/syscon.h>
+
+#define PSEC_PER_SEC 1000000000000LL
+
+#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
+
+/*
+ * The offset address[7:0] is distributed two parts, one from the bit7 to bit5
+ * is the first address, the other from the bit4 to bit0 is the second address.
+ * when you configure the registers, you must set both of them. The Clock Lane
+ * and Data Lane use the same registers with the same second address, but the
+ * first address is different.
+ */
+#define FIRST_ADDRESS(x) (((x) & 0x7) << 5)
+#define SECOND_ADDRESS(x) (((x) & 0x1f) << 0)
+#define PHY_REG(first, second) (FIRST_ADDRESS(first) | \
+ SECOND_ADDRESS(second))
+
+/* Analog Register Part: reg00 */
+#define BANDGAP_POWER_MASK BIT(7)
+#define BANDGAP_POWER_DOWN BIT(7)
+#define BANDGAP_POWER_ON 0
+#define LANE_EN_MASK GENMASK(6, 2)
+#define LANE_EN_CK BIT(6)
+#define LANE_EN_3 BIT(5)
+#define LANE_EN_2 BIT(4)
+#define LANE_EN_1 BIT(3)
+#define LANE_EN_0 BIT(2)
+#define POWER_WORK_MASK GENMASK(1, 0)
+#define POWER_WORK_ENABLE UPDATE(1, 1, 0)
+#define POWER_WORK_DISABLE UPDATE(2, 1, 0)
+/* Analog Register Part: reg01 */
+#define REG_SYNCRST_MASK BIT(2)
+#define REG_SYNCRST_RESET BIT(2)
+#define REG_SYNCRST_NORMAL 0
+#define REG_LDOPD_MASK BIT(1)
+#define REG_LDOPD_POWER_DOWN BIT(1)
+#define REG_LDOPD_POWER_ON 0
+#define REG_PLLPD_MASK BIT(0)
+#define REG_PLLPD_POWER_DOWN BIT(0)
+#define REG_PLLPD_POWER_ON 0
+/* Analog Register Part: reg03 */
+#define REG_FBDIV_HI_MASK BIT(5)
+#define REG_FBDIV_HI(x) UPDATE((x >> 8), 5, 5)
+#define REG_PREDIV_MASK GENMASK(4, 0)
+#define REG_PREDIV(x) UPDATE(x, 4, 0)
+/* Analog Register Part: reg04 */
+#define REG_FBDIV_LO_MASK GENMASK(7, 0)
+#define REG_FBDIV_LO(x) UPDATE(x, 7, 0)
+/* Analog Register Part: reg05 */
+#define SAMPLE_CLOCK_PHASE_MASK GENMASK(6, 4)
+#define SAMPLE_CLOCK_PHASE(x) UPDATE(x, 6, 4)
+#define CLOCK_LANE_SKEW_PHASE_MASK GENMASK(2, 0)
+#define CLOCK_LANE_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg06 */
+#define DATA_LANE_3_SKEW_PHASE_MASK GENMASK(6, 4)
+#define DATA_LANE_3_SKEW_PHASE(x) UPDATE(x, 6, 4)
+#define DATA_LANE_2_SKEW_PHASE_MASK GENMASK(2, 0)
+#define DATA_LANE_2_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg07 */
+#define DATA_LANE_1_SKEW_PHASE_MASK GENMASK(6, 4)
+#define DATA_LANE_1_SKEW_PHASE(x) UPDATE(x, 6, 4)
+#define DATA_LANE_0_SKEW_PHASE_MASK GENMASK(2, 0)
+#define DATA_LANE_0_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg08 */
+#define SAMPLE_CLOCK_DIRECTION_MASK BIT(4)
+#define SAMPLE_CLOCK_DIRECTION_REVERSE BIT(4)
+#define SAMPLE_CLOCK_DIRECTION_FORWARD 0
+/* Digital Register Part: reg00 */
+#define REG_DIG_RSTN_MASK BIT(0)
+#define REG_DIG_RSTN_NORMAL BIT(0)
+#define REG_DIG_RSTN_RESET 0
+/* Digital Register Part: reg01 */
+#define INVERT_TXCLKESC_MASK BIT(1)
+#define INVERT_TXCLKESC_ENABLE BIT(1)
+#define INVERT_TXCLKESC_DISABLE 0
+#define INVERT_TXBYTECLKHS_MASK BIT(0)
+#define INVERT_TXBYTECLKHS_ENABLE BIT(0)
+#define INVERT_TXBYTECLKHS_DISABLE 0
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg05 */
+#define T_LPX_CNT_MASK GENMASK(5, 0)
+#define T_LPX_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg06 */
+#define T_HS_PREPARE_CNT_MASK GENMASK(6, 0)
+#define T_HS_PREPARE_CNT(x) UPDATE(x, 6, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg07 */
+#define T_HS_ZERO_CNT_MASK GENMASK(5, 0)
+#define T_HS_ZERO_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg08 */
+#define T_HS_TRAIL_CNT_MASK GENMASK(6, 0)
+#define T_HS_TRAIL_CNT(x) UPDATE(x, 6, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg09 */
+#define T_HS_EXIT_CNT_MASK GENMASK(4, 0)
+#define T_HS_EXIT_CNT(x) UPDATE(x, 4, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0a */
+#define T_CLK_POST_CNT_MASK GENMASK(3, 0)
+#define T_CLK_POST_CNT(x) UPDATE(x, 3, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0c */
+#define LPDT_TX_PPI_SYNC_MASK BIT(2)
+#define LPDT_TX_PPI_SYNC_ENABLE BIT(2)
+#define LPDT_TX_PPI_SYNC_DISABLE 0
+#define T_WAKEUP_CNT_HI_MASK GENMASK(1, 0)
+#define T_WAKEUP_CNT_HI(x) UPDATE(x, 1, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0d */
+#define T_WAKEUP_CNT_LO_MASK GENMASK(7, 0)
+#define T_WAKEUP_CNT_LO(x) UPDATE(x, 7, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0e */
+#define T_CLK_PRE_CNT_MASK GENMASK(3, 0)
+#define T_CLK_PRE_CNT(x) UPDATE(x, 3, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg10 */
+#define T_TA_GO_CNT_MASK GENMASK(5, 0)
+#define T_TA_GO_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg11 */
+#define T_TA_SURE_CNT_MASK GENMASK(5, 0)
+#define T_TA_SURE_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg12 */
+#define T_TA_WAIT_CNT_MASK GENMASK(5, 0)
+#define T_TA_WAIT_CNT(x) UPDATE(x, 5, 0)
+/* LVDS Register Part: reg00 */
+#define LVDS_DIGITAL_INTERNAL_RESET_MASK BIT(2)
+#define LVDS_DIGITAL_INTERNAL_RESET_DISABLE BIT(2)
+#define LVDS_DIGITAL_INTERNAL_RESET_ENABLE 0
+/* LVDS Register Part: reg01 */
+#define LVDS_DIGITAL_INTERNAL_ENABLE_MASK BIT(7)
+#define LVDS_DIGITAL_INTERNAL_ENABLE BIT(7)
+#define LVDS_DIGITAL_INTERNAL_DISABLE 0
+/* LVDS Register Part: reg03 */
+#define MODE_ENABLE_MASK GENMASK(2, 0)
+#define TTL_MODE_ENABLE BIT(2)
+#define LVDS_MODE_ENABLE BIT(1)
+#define MIPI_MODE_ENABLE BIT(0)
+/* LVDS Register Part: reg0b */
+#define LVDS_LANE_EN_MASK GENMASK(7, 3)
+#define LVDS_DATA_LANE0_EN BIT(7)
+#define LVDS_DATA_LANE1_EN BIT(6)
+#define LVDS_DATA_LANE2_EN BIT(5)
+#define LVDS_DATA_LANE3_EN BIT(4)
+#define LVDS_CLK_LANE_EN BIT(3)
+#define LVDS_PLL_POWER_MASK BIT(2)
+#define LVDS_PLL_POWER_OFF BIT(2)
+#define LVDS_PLL_POWER_ON 0
+#define LVDS_BANDGAP_POWER_MASK BIT(0)
+#define LVDS_BANDGAP_POWER_DOWN BIT(0)
+#define LVDS_BANDGAP_POWER_ON 0
+
+#define DSI_PHY_RSTZ 0xa0
+#define PHY_ENABLECLK BIT(2)
+#define DSI_PHY_STATUS 0xb0
+#define PHY_LOCK BIT(0)
+
+struct mipi_dphy_timing {
+ unsigned int clkmiss;
+ unsigned int clkpost;
+ unsigned int clkpre;
+ unsigned int clkprepare;
+ unsigned int clksettle;
+ unsigned int clktermen;
+ unsigned int clktrail;
+ unsigned int clkzero;
+ unsigned int dtermen;
+ unsigned int eot;
+ unsigned int hsexit;
+ unsigned int hsprepare;
+ unsigned int hszero;
+ unsigned int hssettle;
+ unsigned int hsskip;
+ unsigned int hstrail;
+ unsigned int init;
+ unsigned int lpx;
+ unsigned int taget;
+ unsigned int tago;
+ unsigned int tasure;
+ unsigned int wakeup;
+};
+
+struct inno_dsidphy {
+ struct device *dev;
+ struct clk *ref_clk;
+ struct clk *pclk_phy;
+ struct clk *pclk_host;
+ void __iomem *phy_base;
+ void __iomem *host_base;
+ struct reset_control *rst;
+ enum phy_mode mode;
+
+ struct {
+ struct clk_hw hw;
+ u8 prediv;
+ u16 fbdiv;
+ unsigned long rate;
+ } pll;
+};
+
+enum {
+ REGISTER_PART_ANALOG,
+ REGISTER_PART_DIGITAL,
+ REGISTER_PART_CLOCK_LANE,
+ REGISTER_PART_DATA0_LANE,
+ REGISTER_PART_DATA1_LANE,
+ REGISTER_PART_DATA2_LANE,
+ REGISTER_PART_DATA3_LANE,
+ REGISTER_PART_LVDS,
+};
+
+static inline struct inno_dsidphy *hw_to_inno(struct clk_hw *hw)
+{
+ return container_of(hw, struct inno_dsidphy, pll.hw);
+}
+
+static void phy_update_bits(struct inno_dsidphy *inno,
+ u8 first, u8 second, u8 mask, u8 val)
+{
+ u32 reg = PHY_REG(first, second) << 2;
+ unsigned int tmp, orig;
+
+ orig = readl(inno->phy_base + reg);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ writel(tmp, inno->phy_base + reg);
+}
+
+static void mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ /* Global Operation Timing Parameters */
+ timing->clkmiss = 0;
+ timing->clkpost = 70000 + 52 * period;
+ timing->clkpre = 8 * period;
+ timing->clkprepare = 65000;
+ timing->clksettle = 95000;
+ timing->clktermen = 0;
+ timing->clktrail = 80000;
+ timing->clkzero = 260000;
+ timing->dtermen = 0;
+ timing->eot = 0;
+ timing->hsexit = 120000;
+ timing->hsprepare = 65000 + 4 * period;
+ timing->hszero = 145000 + 6 * period;
+ timing->hssettle = 85000 + 6 * period;
+ timing->hsskip = 40000;
+ timing->hstrail = max(8 * period, 60000 + 4 * period);
+ timing->init = 100000000;
+ timing->lpx = 60000;
+ timing->taget = 5 * timing->lpx;
+ timing->tago = 4 * timing->lpx;
+ timing->tasure = 2 * timing->lpx;
+ timing->wakeup = 1000000000;
+}
+
+static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
+{
+ struct mipi_dphy_timing gotp;
+ const struct {
+ unsigned long rate;
+ u8 hs_prepare;
+ u8 clk_lane_hs_zero;
+ u8 data_lane_hs_zero;
+ u8 hs_trail;
+ } timings[] = {
+ { 110000000, 0x20, 0x16, 0x02, 0x22},
+ { 150000000, 0x06, 0x16, 0x03, 0x45},
+ { 200000000, 0x18, 0x17, 0x04, 0x0b},
+ { 250000000, 0x05, 0x17, 0x05, 0x16},
+ { 300000000, 0x51, 0x18, 0x06, 0x2c},
+ { 400000000, 0x64, 0x19, 0x07, 0x33},
+ { 500000000, 0x20, 0x1b, 0x07, 0x4e},
+ { 600000000, 0x6a, 0x1d, 0x08, 0x3a},
+ { 700000000, 0x3e, 0x1e, 0x08, 0x6a},
+ { 800000000, 0x21, 0x1f, 0x09, 0x29},
+ {1000000000, 0x09, 0x20, 0x09, 0x27},
+ };
+ u32 t_txbyteclkhs, t_txclkesc, ui;
+ u32 txbyteclkhs, txclkesc, esc_clk_div;
+ u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
+ u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
+ unsigned int i;
+
+ /* Select MIPI mode */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
+ MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
+ /* Configure PLL */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_PREDIV_MASK, REG_PREDIV(inno->pll.prediv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_FBDIV_HI_MASK, REG_FBDIV_HI(inno->pll.fbdiv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
+ REG_FBDIV_LO_MASK, REG_FBDIV_LO(inno->pll.fbdiv));
+ /* Enable PLL and LDO */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_LDOPD_MASK | REG_PLLPD_MASK,
+ REG_LDOPD_POWER_ON | REG_PLLPD_POWER_ON);
+ /* Reset analog */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_SYNCRST_MASK, REG_SYNCRST_RESET);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_SYNCRST_MASK, REG_SYNCRST_NORMAL);
+ /* Reset digital */
+ phy_update_bits(inno, REGISTER_PART_DIGITAL, 0x00,
+ REG_DIG_RSTN_MASK, REG_DIG_RSTN_RESET);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_DIGITAL, 0x00,
+ REG_DIG_RSTN_MASK, REG_DIG_RSTN_NORMAL);
+
+ txbyteclkhs = inno->pll.rate / 8;
+ t_txbyteclkhs = div_u64(PSEC_PER_SEC, txbyteclkhs);
+
+ esc_clk_div = DIV_ROUND_UP(txbyteclkhs, 20000000);
+ txclkesc = txbyteclkhs / esc_clk_div;
+ t_txclkesc = div_u64(PSEC_PER_SEC, txclkesc);
+
+ ui = div_u64(PSEC_PER_SEC, inno->pll.rate);
+
+ memset(&gotp, 0, sizeof(gotp));
+ mipi_dphy_timing_get_default(&gotp, ui);
+
+ /*
+ * The value of counter for HS Ths-exit
+ * Ths-exit = Tpin_txbyteclkhs * value
+ */
+ hs_exit = DIV_ROUND_UP(gotp.hsexit, t_txbyteclkhs);
+ /*
+ * The value of counter for HS Tclk-post
+ * Tclk-post = Tpin_txbyteclkhs * value
+ */
+ clk_post = DIV_ROUND_UP(gotp.clkpost, t_txbyteclkhs);
+ /*
+ * The value of counter for HS Tclk-pre
+ * Tclk-pre = Tpin_txbyteclkhs * value
+ */
+ clk_pre = DIV_ROUND_UP(gotp.clkpre, t_txbyteclkhs);
+
+ /*
+ * The value of counter for HS Tlpx Time
+ * Tlpx = Tpin_txbyteclkhs * (2 + value)
+ */
+ lpx = DIV_ROUND_UP(gotp.lpx, t_txbyteclkhs);
+ if (lpx >= 2)
+ lpx -= 2;
+
+ /*
+ * The value of counter for HS Tta-go
+ * Tta-go for turnaround
+ * Tta-go = Ttxclkesc * value
+ */
+ ta_go = DIV_ROUND_UP(gotp.tago, t_txclkesc);
+ /*
+ * The value of counter for HS Tta-sure
+ * Tta-sure for turnaround
+ * Tta-sure = Ttxclkesc * value
+ */
+ ta_sure = DIV_ROUND_UP(gotp.tasure, t_txclkesc);
+ /*
+ * The value of counter for HS Tta-wait
+ * Tta-wait for turnaround
+ * Tta-wait = Ttxclkesc * value
+ */
+ ta_wait = DIV_ROUND_UP(gotp.taget, t_txclkesc);
+
+ for (i = 0; i < ARRAY_SIZE(timings); i++)
+ if (inno->pll.rate <= timings[i].rate)
+ break;
+
+ if (i == ARRAY_SIZE(timings))
+ --i;
+
+ hs_prepare = timings[i].hs_prepare;
+ hs_trail = timings[i].hs_trail;
+ clk_lane_hs_zero = timings[i].clk_lane_hs_zero;
+ data_lane_hs_zero = timings[i].data_lane_hs_zero;
+ wakeup = 0x3ff;
+
+ for (i = REGISTER_PART_CLOCK_LANE; i <= REGISTER_PART_DATA3_LANE; i++) {
+ if (i == REGISTER_PART_CLOCK_LANE)
+ hs_zero = clk_lane_hs_zero;
+ else
+ hs_zero = data_lane_hs_zero;
+
+ phy_update_bits(inno, i, 0x05, T_LPX_CNT_MASK,
+ T_LPX_CNT(lpx));
+ phy_update_bits(inno, i, 0x06, T_HS_PREPARE_CNT_MASK,
+ T_HS_PREPARE_CNT(hs_prepare));
+ phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_MASK,
+ T_HS_ZERO_CNT(hs_zero));
+ phy_update_bits(inno, i, 0x08, T_HS_TRAIL_CNT_MASK,
+ T_HS_TRAIL_CNT(hs_trail));
+ phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_MASK,
+ T_HS_EXIT_CNT(hs_exit));
+ phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_MASK,
+ T_CLK_POST_CNT(clk_post));
+ phy_update_bits(inno, i, 0x0e, T_CLK_PRE_CNT_MASK,
+ T_CLK_PRE_CNT(clk_pre));
+ phy_update_bits(inno, i, 0x0c, T_WAKEUP_CNT_HI_MASK,
+ T_WAKEUP_CNT_HI(wakeup >> 8));
+ phy_update_bits(inno, i, 0x0d, T_WAKEUP_CNT_LO_MASK,
+ T_WAKEUP_CNT_LO(wakeup));
+ phy_update_bits(inno, i, 0x10, T_TA_GO_CNT_MASK,
+ T_TA_GO_CNT(ta_go));
+ phy_update_bits(inno, i, 0x11, T_TA_SURE_CNT_MASK,
+ T_TA_SURE_CNT(ta_sure));
+ phy_update_bits(inno, i, 0x12, T_TA_WAIT_CNT_MASK,
+ T_TA_WAIT_CNT(ta_wait));
+ }
+
+ /* Enable all lanes on analog part */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ LANE_EN_MASK, LANE_EN_CK | LANE_EN_3 | LANE_EN_2 |
+ LANE_EN_1 | LANE_EN_0);
+}
+
+static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
+{
+ u8 prediv = 2;
+ u16 fbdiv = 28;
+
+ /* Sample clock reverse direction */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
+ SAMPLE_CLOCK_DIRECTION_MASK,
+ SAMPLE_CLOCK_DIRECTION_REVERSE);
+
+ /* Select LVDS mode */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
+ MODE_ENABLE_MASK, LVDS_MODE_ENABLE);
+ /* Configure PLL */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_PREDIV_MASK, REG_PREDIV(prediv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_FBDIV_HI_MASK, REG_FBDIV_HI(fbdiv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
+ REG_FBDIV_LO_MASK, REG_FBDIV_LO(fbdiv));
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x08, 0xff, 0xfc);
+ /* Enable PLL and Bandgap */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_PLL_POWER_MASK | LVDS_BANDGAP_POWER_MASK,
+ LVDS_PLL_POWER_ON | LVDS_BANDGAP_POWER_ON);
+
+ msleep(20);
+
+ /* Reset LVDS digital logic */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
+ LVDS_DIGITAL_INTERNAL_RESET_MASK,
+ LVDS_DIGITAL_INTERNAL_RESET_ENABLE);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
+ LVDS_DIGITAL_INTERNAL_RESET_MASK,
+ LVDS_DIGITAL_INTERNAL_RESET_DISABLE);
+ /* Enable LVDS digital logic */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x01,
+ LVDS_DIGITAL_INTERNAL_ENABLE_MASK,
+ LVDS_DIGITAL_INTERNAL_ENABLE);
+ /* Enable LVDS analog driver */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_LANE_EN_MASK, LVDS_CLK_LANE_EN |
+ LVDS_DATA_LANE0_EN | LVDS_DATA_LANE1_EN |
+ LVDS_DATA_LANE2_EN | LVDS_DATA_LANE3_EN);
+}
+
+static int inno_dsidphy_power_on(struct phy *phy)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ clk_prepare_enable(inno->pclk_phy);
+ pm_runtime_get_sync(inno->dev);
+
+ /* Bandgap power on */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ BANDGAP_POWER_MASK, BANDGAP_POWER_ON);
+ /* Enable power work */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ POWER_WORK_MASK, POWER_WORK_ENABLE);
+
+ switch (inno->mode) {
+ case PHY_MODE_MIPI_DPHY:
+ inno_dsidphy_mipi_mode_enable(inno);
+ break;
+ case PHY_MODE_LVDS:
+ inno_dsidphy_lvds_mode_enable(inno);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int inno_dsidphy_power_off(struct phy *phy)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00, LANE_EN_MASK, 0);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_LDOPD_MASK | REG_PLLPD_MASK,
+ REG_LDOPD_POWER_DOWN | REG_PLLPD_POWER_DOWN);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ POWER_WORK_MASK, POWER_WORK_DISABLE);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ BANDGAP_POWER_MASK, BANDGAP_POWER_DOWN);
+
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b, LVDS_LANE_EN_MASK, 0);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x01,
+ LVDS_DIGITAL_INTERNAL_ENABLE_MASK,
+ LVDS_DIGITAL_INTERNAL_DISABLE);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_PLL_POWER_MASK | LVDS_BANDGAP_POWER_MASK,
+ LVDS_PLL_POWER_OFF | LVDS_BANDGAP_POWER_DOWN);
+
+ pm_runtime_put(inno->dev);
+ clk_disable_unprepare(inno->pclk_phy);
+
+ return 0;
+}
+
+static int inno_dsidphy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ switch (mode) {
+ case PHY_MODE_MIPI_DPHY:
+ case PHY_MODE_LVDS:
+ inno->mode = mode;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct phy_ops inno_dsidphy_ops = {
+ .set_mode = inno_dsidphy_set_mode,
+ .power_on = inno_dsidphy_power_on,
+ .power_off = inno_dsidphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static unsigned long inno_dsidphy_pll_round_rate(struct inno_dsidphy *inno,
+ unsigned long prate,
+ unsigned long rate,
+ u8 *prediv, u16 *fbdiv)
+{
+ unsigned long best_freq = 0;
+ unsigned long fref, fout;
+ u8 min_prediv, max_prediv;
+ u8 _prediv, best_prediv = 1;
+ u16 _fbdiv, best_fbdiv = 1;
+ u32 min_delta = UINT_MAX;
+
+ /*
+ * The PLL output frequency can be calculated using a simple formula:
+ * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
+ * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
+ */
+ fref = prate / 2;
+ if (rate > 1000000000UL)
+ fout = 1000000000UL;
+ else
+ fout = rate;
+
+ /* 5Mhz < Fref / prediv < 40MHz */
+ min_prediv = DIV_ROUND_UP(fref, 40000000);
+ max_prediv = fref / 5000000;
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * _prediv;
+ do_div(tmp, fref);
+ _fbdiv = tmp;
+
+ /*
+ * The possible settings of feedback divider are
+ * 12, 13, 14, 16, ~ 511
+ */
+ if (_fbdiv == 15)
+ continue;
+
+ if (_fbdiv < 12 || _fbdiv > 511)
+ continue;
+
+ tmp = (u64)_fbdiv * fref;
+ do_div(tmp, _prediv);
+
+ delta = abs(fout - tmp);
+ if (!delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ break;
+ } else if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ min_delta = delta;
+ }
+ }
+
+ if (best_freq) {
+ *prediv = best_prediv;
+ *fbdiv = best_fbdiv;
+ }
+
+ return best_freq;
+}
+
+static long inno_dsidphy_pll_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+ unsigned long fout;
+ u16 fbdiv = 1;
+ u8 prediv = 1;
+
+ fout = inno_dsidphy_pll_round_rate(inno, *prate, rate,
+ &prediv, &fbdiv);
+
+ return fout;
+}
+
+static int inno_dsidphy_pll_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+ unsigned long fout;
+ u16 fbdiv = 1;
+ u8 prediv = 1;
+
+ fout = inno_dsidphy_pll_round_rate(inno, parent_rate, rate,
+ &prediv, &fbdiv);
+
+ dev_dbg(inno->dev, "fin=%lu, fout=%lu, prediv=%u, fbdiv=%u\n",
+ parent_rate, fout, prediv, fbdiv);
+
+ inno->pll.prediv = prediv;
+ inno->pll.fbdiv = fbdiv;
+ inno->pll.rate = fout;
+
+ return 0;
+}
+
+static unsigned long
+inno_dsidphy_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+
+ /* PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2 */
+ return (prate / inno->pll.prediv * inno->pll.fbdiv) / 2;
+}
+
+static const struct clk_ops inno_dsidphy_pll_clk_ops = {
+ .round_rate = inno_dsidphy_pll_clk_round_rate,
+ .set_rate = inno_dsidphy_pll_clk_set_rate,
+ .recalc_rate = inno_dsidphy_pll_clk_recalc_rate,
+};
+
+static int inno_dsidphy_pll_register(struct inno_dsidphy *inno)
+{
+ struct device *dev = inno->dev;
+ struct clk *clk;
+ const char *parent_name;
+ struct clk_init_data init;
+ int ret;
+
+ parent_name = __clk_get_name(inno->ref_clk);
+
+ init.name = "mipi_dphy_pll";
+ ret = of_property_read_string(dev->of_node, "clock-output-names",
+ &init.name);
+ if (ret < 0)
+ dev_dbg(dev, "phy should set clock-output-names property\n");
+
+ init.ops = &inno_dsidphy_pll_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ inno->pll.hw.init = &init;
+ clk = devm_clk_register(dev, &inno->pll.hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &inno->pll.hw);
+}
+
+static int inno_dsidphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct inno_dsidphy *inno;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+ int ret;
+
+ inno = devm_kzalloc(dev, sizeof(*inno), GFP_KERNEL);
+ if (!inno)
+ return -ENOMEM;
+
+ inno->dev = dev;
+ platform_set_drvdata(pdev, inno);
+
+ inno->phy_base = devm_platform_ioremap_resource(pdev, 0);
+ if (!inno->phy_base)
+ return -ENOMEM;
+
+ inno->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(inno->ref_clk)) {
+ ret = PTR_ERR(inno->ref_clk);
+ dev_err(dev, "failed to get ref clock: %d\n", ret);
+ return ret;
+ }
+
+ inno->pclk_phy = devm_clk_get(dev, "pclk");
+ if (IS_ERR(inno->pclk_phy)) {
+ ret = PTR_ERR(inno->pclk_phy);
+ dev_err(dev, "failed to get phy pclk: %d\n", ret);
+ return ret;
+ }
+
+ inno->rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(inno->rst)) {
+ ret = PTR_ERR(inno->rst);
+ dev_err(dev, "failed to get system reset control: %d\n", ret);
+ return ret;
+ }
+
+ phy = devm_phy_create(dev, NULL, &inno_dsidphy_ops);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ dev_err(dev, "failed to create phy: %d\n", ret);
+ return ret;
+ }
+
+ phy_set_drvdata(phy, inno);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ dev_err(dev, "failed to register phy provider: %d\n", ret);
+ return ret;
+ }
+
+ ret = inno_dsidphy_pll_register(inno);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int inno_dsidphy_remove(struct platform_device *pdev)
+{
+ struct inno_dsidphy *inno = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(inno->dev);
+
+ return 0;
+}
+
+static const struct of_device_id inno_dsidphy_of_match[] = {
+ { .compatible = "rockchip,px30-dsi-dphy", },
+ { .compatible = "rockchip,rk3128-dsi-dphy", },
+ { .compatible = "rockchip,rk3368-dsi-dphy", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, inno_dsidphy_of_match);
+
+static struct platform_driver inno_dsidphy_driver = {
+ .driver = {
+ .name = "inno-dsidphy",
+ .of_match_table = of_match_ptr(inno_dsidphy_of_match),
+ },
+ .probe = inno_dsidphy_probe,
+ .remove = inno_dsidphy_remove,
+};
+module_platform_driver(inno_dsidphy_driver);
+
+MODULE_AUTHOR("Wyon Bi <bivvy.bi@rock-chips.com>");
+MODULE_DESCRIPTION("Innosilicon MIPI/LVDS/TTL Video Combo PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index eae865ff312c..680cc0c8825c 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1423,6 +1423,7 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
};
static const struct of_device_id rockchip_usb2phy_dt_match[] = {
+ { .compatible = "rockchip,px30-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 6f3afaf9398f..84c27394c181 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -857,9 +857,32 @@ static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
+static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, USB2_VBUS_ID);
+
+ if (status) {
+ value |= VBUS_OVERRIDE;
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_FLOATING;
+ } else {
+ value &= ~VBUS_OVERRIDE;
+ }
+
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+
+ return 0;
+}
+
static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
.probe = tegra186_xusb_padctl_probe,
.remove = tegra186_xusb_padctl_remove,
+ .vbus_override = tegra186_xusb_padctl_vbus_override,
};
static const char * const tegra186_xusb_padctl_supply_names[] = {
diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
index 0c0df6897a3b..394913bb2f20 100644
--- a/drivers/phy/tegra/xusb-tegra210.c
+++ b/drivers/phy/tegra/xusb-tegra210.c
@@ -39,7 +39,10 @@
#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB 0x1
#define XUSB_PADCTL_USB2_PORT_CAP 0x008
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(x) (0x0 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(x) (0x1 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(x) (0x2 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_SS_PORT_MAP 0x014
@@ -47,6 +50,7 @@
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 5)
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 5))
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 5))
+#define XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED 0x7
#define XUSB_PADCTL_ELPG_PROGRAM1 0x024
#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN (1 << 31)
@@ -61,9 +65,14 @@
#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (8 + (x)))
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(x) (0x080 + (x) * 0x40)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP (1 << 18)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN (1 << 22)
+
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(x) (0x084 + (x) * 0x40)
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT 7
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK 0x3
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL 0x1
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18 (1 << 6)
#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x088 + (x) * 0x40)
@@ -222,6 +231,12 @@
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(x) (0xa74 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL 0xfcf01368
+#define XUSB_PADCTL_USB2_VBUS_ID 0xc60
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON (1 << 14)
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT 18
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK 0xf
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING 8
+
struct tegra210_xusb_fuse_calibration {
u32 hs_curr_level[4];
u32 hs_term_range_adj;
@@ -940,6 +955,34 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
priv = to_tegra210_xusb_padctl(padctl);
+ if (port->usb3_port_fake != -1) {
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(
+ port->usb3_port_fake);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(
+ port->usb3_port_fake, index);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+ }
+
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
@@ -957,7 +1000,14 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
value &= ~XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(index);
- value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+ if (port->mode == USB_DR_MODE_UNKNOWN)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(index);
+ else if (port->mode == USB_DR_MODE_PERIPHERAL)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(index);
+ else if (port->mode == USB_DR_MODE_HOST)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+ else if (port->mode == USB_DR_MODE_OTG)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(index);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
@@ -989,7 +1039,12 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
value &= ~(XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK <<
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT);
- value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+ if (port->mode == USB_DR_MODE_HOST)
+ value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+ else
+ value |=
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL <<
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT;
padctl_writel(padctl, value,
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
@@ -1062,6 +1117,32 @@ static int tegra210_usb2_phy_power_off(struct phy *phy)
mutex_lock(&padctl->lock);
+ if (port->usb3_port_fake != -1) {
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(250, 350);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(port->usb3_port_fake,
+ XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+ }
+
if (WARN_ON(pad->enable == 0))
goto out;
@@ -1225,13 +1306,10 @@ static int tegra210_hsic_phy_power_on(struct phy *phy)
struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- struct tegra210_xusb_padctl *priv;
unsigned int index = lane->index;
u32 value;
int err;
- priv = to_tegra210_xusb_padctl(padctl);
-
err = regulator_enable(pad->supply);
if (err)
return err;
@@ -1945,6 +2023,52 @@ static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = {
.map = tegra210_usb3_port_map,
};
+static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
+
+ if (status) {
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
+ } else {
+ value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ }
+
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
+
+ return 0;
+}
+
+static int tegra210_utmi_port_reset(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl;
+ struct tegra_xusb_lane *lane;
+ u32 value;
+
+ lane = phy_get_drvdata(phy);
+ padctl = lane->pad->padctl;
+
+ value = padctl_readl(padctl,
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(lane->index));
+
+ if ((value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP) ||
+ (value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN)) {
+ tegra210_xusb_padctl_vbus_override(padctl, false);
+ tegra210_xusb_padctl_vbus_override(padctl, true);
+ return 1;
+ }
+
+ return 0;
+}
+
static int
tegra210_xusb_read_fuse_calibration(struct tegra210_xusb_fuse_calibration *fuse)
{
@@ -2007,6 +2131,8 @@ static const struct tegra_xusb_padctl_ops tegra210_xusb_padctl_ops = {
.remove = tegra210_xusb_padctl_remove,
.usb3_set_lfps_detect = tegra210_usb3_set_lfps_detect,
.hsic_set_idle = tegra210_hsic_set_idle,
+ .vbus_override = tegra210_xusb_padctl_vbus_override,
+ .utmi_port_reset = tegra210_utmi_port_reset,
};
static const char * const tegra210_xusb_padctl_supply_names[] = {
@@ -2036,6 +2162,7 @@ const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc = {
.ops = &tegra210_xusb_padctl_ops,
.supply_names = tegra210_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra210_xusb_padctl_supply_names),
+ .need_fake_usb3_port = true,
};
EXPORT_SYMBOL_GPL(tegra210_xusb_padctl_soc);
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 2ea8497af82a..f98ec3922c02 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -800,9 +800,62 @@ static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
}
}
+static int tegra_xusb_find_unused_usb3_port(struct tegra_xusb_padctl *padctl)
+{
+ struct device_node *np;
+ unsigned int i;
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ np = tegra_xusb_find_port_node(padctl, "usb3", i);
+ if (!np || !of_device_is_available(np))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static bool tegra_xusb_port_is_companion(struct tegra_xusb_usb2_port *usb2)
+{
+ unsigned int i;
+ struct tegra_xusb_usb3_port *usb3;
+ struct tegra_xusb_padctl *padctl = usb2->base.padctl;
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ usb3 = tegra_xusb_find_usb3_port(padctl, i);
+ if (usb3 && usb3->port == usb2->base.index)
+ return true;
+ }
+
+ return false;
+}
+
+static int tegra_xusb_update_usb3_fake_port(struct tegra_xusb_usb2_port *usb2)
+{
+ int fake;
+
+ /* Disable usb3_port_fake usage by default and assign if needed */
+ usb2->usb3_port_fake = -1;
+
+ if ((usb2->mode == USB_DR_MODE_OTG ||
+ usb2->mode == USB_DR_MODE_PERIPHERAL) &&
+ !tegra_xusb_port_is_companion(usb2)) {
+ fake = tegra_xusb_find_unused_usb3_port(usb2->base.padctl);
+ if (fake < 0) {
+ dev_err(&usb2->base.dev, "no unused USB3 ports available\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(&usb2->base.dev, "Found unused usb3 port: %d\n", fake);
+ usb2->usb3_port_fake = fake;
+ }
+
+ return 0;
+}
+
static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_port *port;
+ struct tegra_xusb_usb2_port *usb2;
unsigned int i;
int err = 0;
@@ -832,6 +885,18 @@ static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
goto remove_ports;
}
+ if (padctl->soc->need_fake_usb3_port) {
+ for (i = 0; i < padctl->soc->ports.usb2.count; i++) {
+ usb2 = tegra_xusb_find_usb2_port(padctl, i);
+ if (!usb2)
+ continue;
+
+ err = tegra_xusb_update_usb3_fake_port(usb2);
+ if (err < 0)
+ goto remove_ports;
+ }
+ }
+
list_for_each_entry(port, &padctl->ports, list) {
err = port->ops->enable(port);
if (err < 0)
@@ -862,7 +927,6 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
struct resource *res;
- unsigned int i;
int err;
/* for backwards compatibility with old device trees */
@@ -907,8 +971,9 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
goto remove;
}
- for (i = 0; i < padctl->soc->num_supplies; i++)
- padctl->supplies[i].supply = padctl->soc->supply_names[i];
+ regulator_bulk_set_supply_names(padctl->supplies,
+ padctl->soc->supply_names,
+ padctl->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, padctl->soc->num_supplies,
padctl->supplies);
@@ -1056,6 +1121,28 @@ int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_set_lfps_detect);
+int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool val)
+{
+ if (padctl->soc->ops->vbus_override)
+ return padctl->soc->ops->vbus_override(padctl, val);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_set_vbus_override);
+
+int tegra_phy_xusb_utmi_port_reset(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+
+ if (padctl->soc->ops->utmi_port_reset)
+ return padctl->soc->ops->utmi_port_reset(phy);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset);
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index 093076ca27fd..da94fcce6307 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -291,6 +291,7 @@ struct tegra_xusb_usb2_port {
struct regulator *supply;
enum usb_dr_mode mode;
bool internal;
+ int usb3_port_fake;
};
static inline struct tegra_xusb_usb2_port *
@@ -372,6 +373,8 @@ struct tegra_xusb_padctl_ops {
unsigned int index, bool idle);
int (*usb3_set_lfps_detect)(struct tegra_xusb_padctl *padctl,
unsigned int index, bool enable);
+ int (*vbus_override)(struct tegra_xusb_padctl *padctl, bool set);
+ int (*utmi_port_reset)(struct phy *phy);
};
struct tegra_xusb_padctl_soc {
@@ -389,6 +392,7 @@ struct tegra_xusb_padctl_soc {
const char * const *supply_names;
unsigned int num_supplies;
+ bool need_fake_usb3_port;
};
struct tegra_xusb_padctl {
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index cbcce7cf0028..26f194779064 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -189,7 +189,6 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct usb_otg *otg;
const struct of_device_id *of_id;
- const struct usb_phy_data *phy_data;
int error;
of_id = of_match_device(of_match_ptr(dm816x_usb_phy_id_table),
@@ -220,8 +219,6 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
if (phy->usbphy_ctrl == 0x2c)
phy->instance = 1;
- phy_data = of_id->data;
-
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index a52c5bb35033..a28bd15297f5 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -69,11 +69,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
rgmii_id = 1;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b372419d61f2..3bfbf2ff6e2b 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -32,15 +32,15 @@ config DEBUG_PINCTRL
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
config PINCTRL_ARTPEC6
- bool "Axis ARTPEC-6 pin controller driver"
- depends on MACH_ARTPEC6
- select PINMUX
- select GENERIC_PINCONF
- help
- This is the driver for the Axis ARTPEC-6 pin controller. This driver
- supports pin function multiplexing as well as pin bias and drive
- strength configuration. Device tree integration instructions can be
- found in Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
+ bool "Axis ARTPEC-6 pin controller driver"
+ depends on MACH_ARTPEC6
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This is the driver for the Axis ARTPEC-6 pin controller. This driver
+ supports pin function multiplexing as well as pin bias and drive
+ strength configuration. Device tree integration instructions can be
+ found in Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
config PINCTRL_AS3722
tristate "Pinctrl and GPIO driver for ams AS3722 PMIC"
@@ -420,4 +420,22 @@ config PINCTRL_TB10X
depends on OF && ARC_PLAT_TB10X
select GPIOLIB
+config PINCTRL_EQUILIBRIUM
+ tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
+ select PINMUX
+ select PINCONF
+ select GPIOLIB
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+
+ help
+ Equilibrium pinctrl driver is a pinctrl & GPIO driver for Intel Lightning
+ Mountain network processor SoC that supports both the linux GPIO and pin
+ control frameworks. It provides interfaces to setup pinmux, assign desired
+ pin functions, configure GPIO attributes for LGM SoC pins. Pinmux and
+ pinconf settings are retrieved from device tree.
+
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index ac537fdbc998..879f312bfb75 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
+obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 5dfe7188a5f8..5a0c8e87aa7c 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -915,7 +915,6 @@ static int owl_gpio_init(struct owl_pinctrl *pctrl)
int owl_pinctrl_probe(struct platform_device *pdev,
struct owl_pinctrl_soc_data *soc_data)
{
- struct resource *res;
struct owl_pinctrl *pctrl;
int ret, i;
@@ -923,8 +922,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
if (!pctrl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index bc3b232a727a..f690fc5cd688 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1400,12 +1400,10 @@ static struct pinctrl_desc bcm281xx_pinctrl_desc = {
static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
{
struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl;
- struct resource *res;
struct pinctrl_dev *pctl;
/* So far We can assume there is only 1 bank of registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->reg_base)) {
dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
return -ENODEV;
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
index dcab2204c60c..4344c5732400 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -940,7 +940,6 @@ static int cygnus_mux_log_init(struct cygnus_pinctrl *pinctrl)
static int cygnus_pinmux_probe(struct platform_device *pdev)
{
struct cygnus_pinctrl *pinctrl;
- struct resource *res;
int i, ret;
struct pinctrl_pin_desc *pins;
unsigned num_pins = ARRAY_SIZE(cygnus_pins);
@@ -953,15 +952,13 @@ static int cygnus_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0)) {
dev_err(&pdev->dev, "unable to map I/O space\n");
return PTR_ERR(pinctrl->base0);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pinctrl->base1 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pinctrl->base1)) {
dev_err(&pdev->dev, "unable to map I/O space\n");
return PTR_ERR(pinctrl->base1);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 42f7ab383ad9..831a9318c384 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -795,8 +795,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
chip->dev = dev;
platform_set_drvdata(pdev, chip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->base);
@@ -850,7 +849,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
irqc = &chip->irqchip;
- irqc->name = "bcm-iproc-gpio";
+ irqc->name = dev_name(dev);
irqc->irq_ack = iproc_gpio_irq_ack;
irqc->irq_mask = iproc_gpio_irq_mask;
irqc->irq_unmask = iproc_gpio_irq_unmask;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 9fabc451550e..32f268f173d1 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -1042,8 +1042,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0))
return PTR_ERR(pinctrl->base0);
@@ -1057,8 +1056,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- pinctrl->pinconf_base = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->pinconf_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(pinctrl->pinconf_base))
return PTR_ERR(pinctrl->pinconf_base);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index e67ae52023ad..bed0124388c0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -64,17 +64,16 @@
* @gc: GPIO chip
* @pctl: pointer to pinctrl_dev
* @pctldesc: pinctrl descriptor
- * @irq_domain: pointer to irq domain
* @lock: lock to protect access to I/O registers
*/
struct nsp_gpio {
struct device *dev;
void __iomem *base;
void __iomem *io_ctrl;
+ struct irq_chip irqchip;
struct gpio_chip gc;
struct pinctrl_dev *pctl;
struct pinctrl_desc pctldesc;
- struct irq_domain *irq_domain;
raw_spinlock_t lock;
};
@@ -136,8 +135,8 @@ static inline bool nsp_get_bit(struct nsp_gpio *chip, enum base_type address,
static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
{
- struct nsp_gpio *chip = (struct nsp_gpio *)data;
- struct gpio_chip gc = chip->gc;
+ struct gpio_chip *gc = (struct gpio_chip *)data;
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
int bit;
unsigned long int_bits = 0;
u32 int_status;
@@ -155,14 +154,14 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
level &= readl(chip->base + NSP_GPIO_INT_MASK);
int_bits = level | event;
- for_each_set_bit(bit, &int_bits, gc.ngpio) {
+ for_each_set_bit(bit, &int_bits, gc->ngpio) {
/*
* Clear the interrupt before invoking the
* handler, so we do not leave any window
*/
writel(BIT(bit), chip->base + NSP_GPIO_EVENT);
generic_handle_irq(
- irq_linear_revmap(chip->irq_domain, bit));
+ irq_linear_revmap(gc->irq.domain, bit));
}
}
@@ -171,7 +170,8 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
static void nsp_gpio_irq_ack(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
u32 val = BIT(gpio);
u32 trigger_type;
@@ -189,7 +189,8 @@ static void nsp_gpio_irq_ack(struct irq_data *d)
*/
static void nsp_gpio_irq_set_mask(struct irq_data *d, bool unmask)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
u32 trigger_type;
@@ -202,7 +203,8 @@ static void nsp_gpio_irq_set_mask(struct irq_data *d, bool unmask)
static void nsp_gpio_irq_mask(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&chip->lock, flags);
@@ -212,7 +214,8 @@ static void nsp_gpio_irq_mask(struct irq_data *d)
static void nsp_gpio_irq_unmask(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&chip->lock, flags);
@@ -222,7 +225,8 @@ static void nsp_gpio_irq_unmask(struct irq_data *d)
static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
bool level_low;
bool falling;
@@ -265,16 +269,6 @@ static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip nsp_gpio_irq_chip = {
- .name = "gpio-a",
- .irq_enable = nsp_gpio_irq_unmask,
- .irq_disable = nsp_gpio_irq_mask,
- .irq_ack = nsp_gpio_irq_ack,
- .irq_mask = nsp_gpio_irq_mask,
- .irq_unmask = nsp_gpio_irq_unmask,
- .irq_set_type = nsp_gpio_irq_set_type,
-};
-
static int nsp_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
@@ -303,30 +297,36 @@ static int nsp_gpio_direction_output(struct gpio_chip *gc, unsigned gpio,
return 0;
}
-static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int nsp_gpio_get_direction(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
+ int val;
raw_spin_lock_irqsave(&chip->lock, flags);
- nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val));
+ val = nsp_get_bit(chip, REG, NSP_GPIO_OUT_EN, gpio);
raw_spin_unlock_irqrestore(&chip->lock, flags);
- dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
+ return !val;
}
-static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
+static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
+ unsigned long flags;
- return !!(readl(chip->base + NSP_GPIO_DATA_IN) & BIT(gpio));
+ raw_spin_lock_irqsave(&chip->lock, flags);
+ nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val));
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
+
+ dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
}
-static int nsp_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
- return irq_linear_revmap(chip->irq_domain, offset);
+ return !!(readl(chip->base + NSP_GPIO_DATA_IN) & BIT(gpio));
}
static int nsp_get_groups_count(struct pinctrl_dev *pctldev)
@@ -613,10 +613,9 @@ static const struct of_device_id nsp_gpio_of_match[] = {
static int nsp_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct nsp_gpio *chip;
struct gpio_chip *gc;
- u32 val, count;
+ u32 val;
int irq, ret;
if (of_property_read_u32(pdev->dev.of_node, "ngpios", &val)) {
@@ -631,15 +630,13 @@ static int nsp_gpio_probe(struct platform_device *pdev)
chip->dev = dev;
platform_set_drvdata(pdev, chip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->base);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- chip->io_ctrl = devm_ioremap_resource(dev, res);
+ chip->io_ctrl = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(chip->io_ctrl)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->io_ctrl);
@@ -657,46 +654,47 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->free = gpiochip_generic_free;
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
+ gc->get_direction = nsp_gpio_get_direction;
gc->set = nsp_gpio_set;
gc->get = nsp_gpio_get;
- gc->to_irq = nsp_gpio_to_irq;
/* optional GPIO interrupt support */
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
- /* Create irq domain so that each pin can be assigned an IRQ.*/
- chip->irq_domain = irq_domain_add_linear(gc->of_node, gc->ngpio,
- &irq_domain_simple_ops,
- chip);
- if (!chip->irq_domain) {
- dev_err(&pdev->dev, "Couldn't allocate IRQ domain\n");
- return -ENXIO;
- }
+ struct gpio_irq_chip *girq;
+ struct irq_chip *irqc;
- /* Map each gpio to an IRQ and set the handler for gpiolib. */
- for (count = 0; count < gc->ngpio; count++) {
- int irq = irq_create_mapping(chip->irq_domain, count);
+ irqc = &chip->irqchip;
+ irqc->name = "gpio-a";
+ irqc->irq_ack = nsp_gpio_irq_ack;
+ irqc->irq_mask = nsp_gpio_irq_mask;
+ irqc->irq_unmask = nsp_gpio_irq_unmask;
+ irqc->irq_set_type = nsp_gpio_irq_set_type;
- irq_set_chip_and_handler(irq, &nsp_gpio_irq_chip,
- handle_simple_irq);
- irq_set_chip_data(irq, chip);
- }
+ val = readl(chip->base + NSP_CHIP_A_INT_MASK);
+ val = val | NSP_CHIP_A_GPIO_INT_BIT;
+ writel(val, (chip->base + NSP_CHIP_A_INT_MASK));
/* Install ISR for this GPIO controller. */
- ret = devm_request_irq(&pdev->dev, irq, nsp_gpio_irq_handler,
- IRQF_SHARED, "gpio-a", chip);
+ ret = devm_request_irq(dev, irq, nsp_gpio_irq_handler,
+ IRQF_SHARED, "gpio-a", &chip->gc);
if (ret) {
dev_err(&pdev->dev, "Unable to request IRQ%d: %d\n",
irq, ret);
- goto err_rm_gpiochip;
+ return ret;
}
- val = readl(chip->base + NSP_CHIP_A_INT_MASK);
- val = val | NSP_CHIP_A_GPIO_INT_BIT;
- writel(val, (chip->base + NSP_CHIP_A_INT_MASK));
+ girq = &chip->gc.irq;
+ girq->chip = irqc;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
}
- ret = gpiochip_add_data(gc, chip);
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret < 0) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
@@ -705,15 +703,10 @@ static int nsp_gpio_probe(struct platform_device *pdev)
ret = nsp_gpio_register_pinconf(chip);
if (ret) {
dev_err(dev, "unable to register pinconf\n");
- goto err_rm_gpiochip;
+ return ret;
}
return 0;
-
-err_rm_gpiochip:
- gpiochip_remove(gc);
-
- return ret;
}
static struct platform_driver nsp_gpio_driver = {
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 87618a4e90e4..3756fc9d5826 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -571,8 +571,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0))
return PTR_ERR(pinctrl->base0);
@@ -586,8 +585,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- pinctrl->base2 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base2 = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(pinctrl->base2))
return PTR_ERR(pinctrl->base2);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 5d6d8b1e9062..674920daac26 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -29,6 +29,13 @@ struct pinctrl_dt_map {
static void dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
+ int i;
+
+ for (i = 0; i < num_maps; ++i) {
+ kfree_const(map[i].dev_name);
+ map[i].dev_name = NULL;
+ }
+
if (pctldev) {
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
if (ops->dt_free_map)
@@ -63,7 +70,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
/* Initialize common mapping table entry fields */
for (i = 0; i < num_maps; i++) {
- map[i].dev_name = dev_name(p->dev);
+ const char *devname;
+
+ devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
+ if (!devname)
+ goto err_free_map;
+
+ map[i].dev_name = devname;
map[i].name = statename;
if (pctldev)
map[i].ctrl_dev_name = dev_name(pctldev->dev);
@@ -71,10 +84,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
/* Remember the converted mapping table entries */
dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
- if (!dt_map) {
- dt_free_map(pctldev, map, num_maps);
- return -ENOMEM;
- }
+ if (!dt_map)
+ goto err_free_map;
dt_map->pctldev = pctldev;
dt_map->map = map;
@@ -82,6 +93,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
list_add_tail(&dt_map->node, &p->dt_maps);
return pinctrl_register_map(map, num_maps, false);
+
+err_free_map:
+ dt_free_map(pctldev, map, num_maps);
+ return -ENOMEM;
}
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
@@ -147,6 +162,16 @@ static int dt_to_map_one_config(struct pinctrl *p,
ret = ops->dt_node_to_map(pctldev, np_config, &map, &num_maps);
if (ret < 0)
return ret;
+ else if (num_maps == 0) {
+ /*
+ * If we have no valid maps (maybe caused by empty pinctrl node
+ * or typing error) ther is no need remember this, so just
+ * return.
+ */
+ dev_info(p->dev,
+ "there is not valid maps for state %s\n", statename);
+ return 0;
+ }
/* Stash the mapping table chunk away for later use */
return dt_remember_or_free_map(p, statename, pctldev, map, num_maps);
@@ -166,21 +191,6 @@ static int dt_remember_dummy_state(struct pinctrl *p, const char *statename)
return dt_remember_or_free_map(p, statename, NULL, map, 1);
}
-bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
-{
- struct device_node *np;
- struct property *prop;
- int size;
-
- np = pctldev->dev->of_node;
- if (!np)
- return false;
-
- prop = of_find_property(np, "pinctrl-0", &size);
-
- return prop ? true : false;
-}
-
int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
{
struct device_node *np = p->dev->of_node;
diff --git a/drivers/pinctrl/devicetree.h b/drivers/pinctrl/devicetree.h
index 00e645d7fac7..efa80779de4f 100644
--- a/drivers/pinctrl/devicetree.h
+++ b/drivers/pinctrl/devicetree.h
@@ -9,8 +9,6 @@ struct of_phandle_args;
#ifdef CONFIG_OF
-bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev);
-
void pinctrl_dt_free_maps(struct pinctrl *p);
int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev);
@@ -23,11 +21,6 @@ int pinctrl_parse_index_with_args(const struct device_node *np,
#else
-static inline bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
-{
- return false;
-}
-
static inline int pinctrl_dt_to_map(struct pinctrl *p,
struct pinctrl_dev *pctldev)
{
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 5f4058033ec6..3ea9ce3e0cd9 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -39,12 +39,12 @@ config PINCTRL_IMX27
config PINCTRL_IMX25
- bool "IMX25 pinctrl driver"
- depends on OF
- depends on SOC_IMX25
- select PINCTRL_IMX
- help
- Say Y here to enable the imx25 pinctrl driver
+ bool "IMX25 pinctrl driver"
+ depends on OF
+ depends on SOC_IMX25
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx25 pinctrl driver
config PINCTRL_IMX35
bool "IMX35 pinctrl driver"
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 452a14f78707..6091947a8f51 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -115,4 +115,11 @@ config PINCTRL_SUNRISEPOINT
provides an interface that allows configuring of PCH pins and
using them as GPIOs.
+config PINCTRL_TIGERLAKE
+ tristate "Intel Tiger Lake pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Tiger Lake PCH pins and using them as GPIOs.
endif
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index cb491e655749..7e620b471ef6 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
+obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2c419fa5d1c1..582fa8a75559 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -165,7 +165,7 @@ struct chv_pinctrl {
struct gpio_chip chip;
struct irq_chip irqchip;
void __iomem *regs;
- unsigned intr_lines[16];
+ unsigned int intr_lines[16];
const struct chv_community *community;
u32 saved_intmask;
struct chv_pin_context *saved_pin_context;
@@ -379,7 +379,7 @@ static const struct chv_community southwest_community = {
.gpio_ranges = southwest_gpio_ranges,
.ngpio_ranges = ARRAY_SIZE(southwest_gpio_ranges),
/*
- * Southwest community can benerate GPIO interrupts only for the
+ * Southwest community can generate GPIO interrupts only for the
* first 8 interrupts. The upper half (8-15) can only be used to
* trigger GPEs.
*/
@@ -1480,7 +1480,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
pending = readl(pctrl->regs + CHV_INTSTAT);
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
- unsigned irq, offset;
+ unsigned int irq, offset;
offset = pctrl->intr_lines[intr_line];
irq = irq_find_mapping(gc->irq.domain, offset);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 83981ad66a71..4860bc9a4e48 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1131,7 +1131,7 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
pending &= enabled;
for_each_set_bit(gpp_offset, &pending, padgrp->size) {
- unsigned irq;
+ unsigned int irq;
irq = irq_find_mapping(gc->irq.domain,
padgrp->gpio_base + gpp_offset);
@@ -1181,7 +1181,7 @@ static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
return ret;
}
-static unsigned intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
+static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
unsigned int ngpio = 0;
@@ -1595,16 +1595,65 @@ intel_gpio_is_requested(struct gpio_chip *chip, int base, unsigned int size)
return requested;
}
-static u32
-intel_gpio_update_pad_mode(void __iomem *hostown, u32 mask, u32 value)
+static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
{
u32 curr, updated;
- curr = readl(hostown);
+ curr = readl(reg);
+
updated = (curr & ~mask) | (value & mask);
- writel(updated, hostown);
+ if (curr == updated)
+ return false;
+
+ writel(updated, reg);
+ return true;
+}
+
+static void intel_restore_hostown(struct intel_pinctrl *pctrl, unsigned int c,
+ void __iomem *base, unsigned int gpp, u32 saved)
+{
+ const struct intel_community *community = &pctrl->communities[c];
+ const struct intel_padgroup *padgrp = &community->gpps[gpp];
+ struct device *dev = pctrl->dev;
+ u32 requested;
+
+ if (padgrp->gpio_base < 0)
+ return;
- return curr;
+ requested = intel_gpio_is_requested(&pctrl->chip, padgrp->gpio_base, padgrp->size);
+ if (!intel_gpio_update_reg(base + gpp * 4, requested, saved))
+ return;
+
+ dev_dbg(dev, "restored hostown %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
+}
+
+static void intel_restore_intmask(struct intel_pinctrl *pctrl, unsigned int c,
+ void __iomem *base, unsigned int gpp, u32 saved)
+{
+ struct device *dev = pctrl->dev;
+
+ if (!intel_gpio_update_reg(base + gpp * 4, ~0U, saved))
+ return;
+
+ dev_dbg(dev, "restored mask %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
+}
+
+static void intel_restore_padcfg(struct intel_pinctrl *pctrl, unsigned int pin,
+ unsigned int reg, u32 saved)
+{
+ u32 mask = (reg == PADCFG0) ? PADCFG0_GPIORXSTATE : 0;
+ unsigned int n = reg / sizeof(u32);
+ struct device *dev = pctrl->dev;
+ void __iomem *padcfg;
+
+ padcfg = intel_get_padcfg(pctrl, pin, reg);
+ if (!padcfg)
+ return;
+
+ if (!intel_gpio_update_reg(padcfg, ~mask, saved))
+ return;
+
+ dev_dbg(dev, "restored pin %u padcfg%u %#08x\n", pin, n, readl(padcfg));
}
int intel_pinctrl_resume_noirq(struct device *dev)
@@ -1620,37 +1669,13 @@ int intel_pinctrl_resume_noirq(struct device *dev)
pads = pctrl->context.pads;
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
- void __iomem *padcfg;
- u32 val;
if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
- val = readl(padcfg) & ~PADCFG0_GPIORXSTATE;
- if (val != pads[i].padcfg0) {
- writel(pads[i].padcfg0, padcfg);
- dev_dbg(dev, "restored pin %u padcfg0 %#08x\n",
- desc->number, readl(padcfg));
- }
-
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG1);
- val = readl(padcfg);
- if (val != pads[i].padcfg1) {
- writel(pads[i].padcfg1, padcfg);
- dev_dbg(dev, "restored pin %u padcfg1 %#08x\n",
- desc->number, readl(padcfg));
- }
-
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG2);
- if (padcfg) {
- val = readl(padcfg);
- if (val != pads[i].padcfg2) {
- writel(pads[i].padcfg2, padcfg);
- dev_dbg(dev, "restored pin %u padcfg2 %#08x\n",
- desc->number, readl(padcfg));
- }
- }
+ intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
+ intel_restore_padcfg(pctrl, desc->number, PADCFG1, pads[i].padcfg1);
+ intel_restore_padcfg(pctrl, desc->number, PADCFG2, pads[i].padcfg2);
}
communities = pctrl->context.communities;
@@ -1660,30 +1685,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
unsigned int gpp;
base = community->regs + community->ie_offset;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- writel(communities[i].intmask[gpp], base + gpp * 4);
- dev_dbg(dev, "restored mask %d/%u %#08x\n", i, gpp,
- readl(base + gpp * 4));
- }
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ intel_restore_intmask(pctrl, i, base, gpp, communities[i].intmask[gpp]);
base = community->regs + community->hostown_offset;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- const struct intel_padgroup *padgrp = &community->gpps[gpp];
- u32 requested = 0, value = 0;
- u32 saved = communities[i].hostown[gpp];
-
- if (padgrp->gpio_base < 0)
- continue;
-
- requested = intel_gpio_is_requested(&pctrl->chip,
- padgrp->gpio_base, padgrp->size);
- value = intel_gpio_update_pad_mode(base + gpp * 4,
- requested, saved);
- if ((value ^ saved) & requested) {
- dev_warn(dev, "restore hostown %d/%u %#8x->%#8x\n",
- i, gpp, value, saved);
- }
- }
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ intel_restore_hostown(pctrl, i, base, gpp, communities[i].hostown[gpp]);
}
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index 2e06fb1464ab..7fdf4257df1e 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -33,6 +33,7 @@
.npins = ((e) - (s) + 1), \
}
+/* Lewisburg */
static const struct pinctrl_pin_desc lbg_pins[] = {
/* GPP_A */
PINCTRL_PIN(0, "RCINB"),
@@ -72,7 +73,7 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
PINCTRL_PIN(33, "SRCCLKREQB_4"),
PINCTRL_PIN(34, "SRCCLKREQB_5"),
PINCTRL_PIN(35, "GPP_B_11"),
- PINCTRL_PIN(36, "GLB_RST_WARN_N"),
+ PINCTRL_PIN(36, "SLP_S0B"),
PINCTRL_PIN(37, "PLTRSTB"),
PINCTRL_PIN(38, "SPKR"),
PINCTRL_PIN(39, "GPP_B_15"),
@@ -185,96 +186,96 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
PINCTRL_PIN(141, "GBE_PCI_DIS"),
PINCTRL_PIN(142, "GBE_LAN_DIS"),
PINCTRL_PIN(143, "GPP_I_10"),
- PINCTRL_PIN(144, "GPIO_RCOMP_3P3"),
/* GPP_J */
- PINCTRL_PIN(145, "GBE_LED_0_0"),
- PINCTRL_PIN(146, "GBE_LED_0_1"),
- PINCTRL_PIN(147, "GBE_LED_1_0"),
- PINCTRL_PIN(148, "GBE_LED_1_1"),
- PINCTRL_PIN(149, "GBE_LED_2_0"),
- PINCTRL_PIN(150, "GBE_LED_2_1"),
- PINCTRL_PIN(151, "GBE_LED_3_0"),
- PINCTRL_PIN(152, "GBE_LED_3_1"),
- PINCTRL_PIN(153, "GBE_SCL_0"),
- PINCTRL_PIN(154, "GBE_SDA_0"),
- PINCTRL_PIN(155, "GBE_SCL_1"),
- PINCTRL_PIN(156, "GBE_SDA_1"),
- PINCTRL_PIN(157, "GBE_SCL_2"),
- PINCTRL_PIN(158, "GBE_SDA_2"),
- PINCTRL_PIN(159, "GBE_SCL_3"),
- PINCTRL_PIN(160, "GBE_SDA_3"),
- PINCTRL_PIN(161, "GBE_SDP_0_0"),
- PINCTRL_PIN(162, "GBE_SDP_0_1"),
- PINCTRL_PIN(163, "GBE_SDP_1_0"),
- PINCTRL_PIN(164, "GBE_SDP_1_1"),
- PINCTRL_PIN(165, "GBE_SDP_2_0"),
- PINCTRL_PIN(166, "GBE_SDP_2_1"),
- PINCTRL_PIN(167, "GBE_SDP_3_0"),
- PINCTRL_PIN(168, "GBE_SDP_3_1"),
+ PINCTRL_PIN(144, "GBE_LED_0_0"),
+ PINCTRL_PIN(145, "GBE_LED_0_1"),
+ PINCTRL_PIN(146, "GBE_LED_1_0"),
+ PINCTRL_PIN(147, "GBE_LED_1_1"),
+ PINCTRL_PIN(148, "GBE_LED_2_0"),
+ PINCTRL_PIN(149, "GBE_LED_2_1"),
+ PINCTRL_PIN(150, "GBE_LED_3_0"),
+ PINCTRL_PIN(151, "GBE_LED_3_1"),
+ PINCTRL_PIN(152, "GBE_SCL_0"),
+ PINCTRL_PIN(153, "GBE_SDA_0"),
+ PINCTRL_PIN(154, "GBE_SCL_1"),
+ PINCTRL_PIN(155, "GBE_SDA_1"),
+ PINCTRL_PIN(156, "GBE_SCL_2"),
+ PINCTRL_PIN(157, "GBE_SDA_2"),
+ PINCTRL_PIN(158, "GBE_SCL_3"),
+ PINCTRL_PIN(159, "GBE_SDA_3"),
+ PINCTRL_PIN(160, "GBE_SDP_0_0"),
+ PINCTRL_PIN(161, "GBE_SDP_0_1"),
+ PINCTRL_PIN(162, "GBE_SDP_1_0"),
+ PINCTRL_PIN(163, "GBE_SDP_1_1"),
+ PINCTRL_PIN(164, "GBE_SDP_2_0"),
+ PINCTRL_PIN(165, "GBE_SDP_2_1"),
+ PINCTRL_PIN(166, "GBE_SDP_3_0"),
+ PINCTRL_PIN(167, "GBE_SDP_3_1"),
/* GPP_K */
- PINCTRL_PIN(169, "GBE_RMIICLK"),
- PINCTRL_PIN(170, "GBE_RMII_TXD_0"),
- PINCTRL_PIN(171, "GBE_RMII_TXD_1"),
+ PINCTRL_PIN(168, "GBE_RMIICLK"),
+ PINCTRL_PIN(169, "GBE_RMII_RXD_0"),
+ PINCTRL_PIN(170, "GBE_RMII_RXD_1"),
+ PINCTRL_PIN(171, "GBE_RMII_CRS_DV"),
PINCTRL_PIN(172, "GBE_RMII_TX_EN"),
- PINCTRL_PIN(173, "GBE_RMII_CRS_DV"),
- PINCTRL_PIN(174, "GBE_RMII_RXD_0"),
- PINCTRL_PIN(175, "GBE_RMII_RXD_1"),
- PINCTRL_PIN(176, "GBE_RMII_RX_ER"),
- PINCTRL_PIN(177, "GBE_RMII_ARBIN"),
- PINCTRL_PIN(178, "GBE_RMII_ARB_OUT"),
- PINCTRL_PIN(179, "PE_RST_N"),
- PINCTRL_PIN(180, "GPIO_RCOMP_1P8_3P3"),
+ PINCTRL_PIN(173, "GBE_RMII_TXD_0"),
+ PINCTRL_PIN(174, "GBE_RMII_TXD_1"),
+ PINCTRL_PIN(175, "GBE_RMII_RX_ER"),
+ PINCTRL_PIN(176, "GBE_RMII_ARBIN"),
+ PINCTRL_PIN(177, "GBE_RMII_ARB_OUT"),
+ PINCTRL_PIN(178, "PE_RST_N"),
/* GPP_G */
- PINCTRL_PIN(181, "FAN_TACH_0"),
- PINCTRL_PIN(182, "FAN_TACH_1"),
- PINCTRL_PIN(183, "FAN_TACH_2"),
- PINCTRL_PIN(184, "FAN_TACH_3"),
- PINCTRL_PIN(185, "FAN_TACH_4"),
- PINCTRL_PIN(186, "FAN_TACH_5"),
- PINCTRL_PIN(187, "FAN_TACH_6"),
- PINCTRL_PIN(188, "FAN_TACH_7"),
- PINCTRL_PIN(189, "FAN_PWM_0"),
- PINCTRL_PIN(190, "FAN_PWM_1"),
- PINCTRL_PIN(191, "FAN_PWM_2"),
- PINCTRL_PIN(192, "FAN_PWM_3"),
- PINCTRL_PIN(193, "GSXDOUT"),
- PINCTRL_PIN(194, "GSXSLOAD"),
- PINCTRL_PIN(195, "GSXDIN"),
- PINCTRL_PIN(196, "GSXSRESETB"),
- PINCTRL_PIN(197, "GSXCLK"),
- PINCTRL_PIN(198, "ADR_COMPLETE"),
- PINCTRL_PIN(199, "NMIB"),
- PINCTRL_PIN(200, "SMIB"),
- PINCTRL_PIN(201, "SSATA_DEVSLP_0"),
- PINCTRL_PIN(202, "SSATA_DEVSLP_1"),
- PINCTRL_PIN(203, "SSATA_DEVSLP_2"),
- PINCTRL_PIN(204, "SSATAXPCIE0_SSATAGP0"),
+ PINCTRL_PIN(179, "FAN_TACH_0"),
+ PINCTRL_PIN(180, "FAN_TACH_1"),
+ PINCTRL_PIN(181, "FAN_TACH_2"),
+ PINCTRL_PIN(182, "FAN_TACH_3"),
+ PINCTRL_PIN(183, "FAN_TACH_4"),
+ PINCTRL_PIN(184, "FAN_TACH_5"),
+ PINCTRL_PIN(185, "FAN_TACH_6"),
+ PINCTRL_PIN(186, "FAN_TACH_7"),
+ PINCTRL_PIN(187, "FAN_PWM_0"),
+ PINCTRL_PIN(188, "FAN_PWM_1"),
+ PINCTRL_PIN(189, "FAN_PWM_2"),
+ PINCTRL_PIN(190, "FAN_PWM_3"),
+ PINCTRL_PIN(191, "GSXDOUT"),
+ PINCTRL_PIN(192, "GSXSLOAD"),
+ PINCTRL_PIN(193, "GSXDIN"),
+ PINCTRL_PIN(194, "GSXSRESETB"),
+ PINCTRL_PIN(195, "GSXCLK"),
+ PINCTRL_PIN(196, "ADR_COMPLETE"),
+ PINCTRL_PIN(197, "NMIB"),
+ PINCTRL_PIN(198, "SMIB"),
+ PINCTRL_PIN(199, "SSATA_DEVSLP_0"),
+ PINCTRL_PIN(200, "SSATA_DEVSLP_1"),
+ PINCTRL_PIN(201, "SSATA_DEVSLP_2"),
+ PINCTRL_PIN(202, "SSATAXPCIE0_SSATAGP0"),
/* GPP_H */
- PINCTRL_PIN(205, "SRCCLKREQB_6"),
- PINCTRL_PIN(206, "SRCCLKREQB_7"),
- PINCTRL_PIN(207, "SRCCLKREQB_8"),
- PINCTRL_PIN(208, "SRCCLKREQB_9"),
- PINCTRL_PIN(209, "SRCCLKREQB_10"),
- PINCTRL_PIN(210, "SRCCLKREQB_11"),
- PINCTRL_PIN(211, "SRCCLKREQB_12"),
- PINCTRL_PIN(212, "SRCCLKREQB_13"),
- PINCTRL_PIN(213, "SRCCLKREQB_14"),
- PINCTRL_PIN(214, "SRCCLKREQB_15"),
- PINCTRL_PIN(215, "SML2CLK"),
- PINCTRL_PIN(216, "SML2DATA"),
- PINCTRL_PIN(217, "SML2ALERTB"),
- PINCTRL_PIN(218, "SML3CLK"),
- PINCTRL_PIN(219, "SML3DATA"),
- PINCTRL_PIN(220, "SML3ALERTB"),
- PINCTRL_PIN(221, "SML4CLK"),
- PINCTRL_PIN(222, "SML4DATA"),
- PINCTRL_PIN(223, "SML4ALERTB"),
- PINCTRL_PIN(224, "SSATAXPCIE1_SSATAGP1"),
- PINCTRL_PIN(225, "SSATAXPCIE2_SSATAGP2"),
- PINCTRL_PIN(226, "SSATAXPCIE3_SSATAGP3"),
- PINCTRL_PIN(227, "SSATAXPCIE4_SSATAGP4"),
- PINCTRL_PIN(228, "SSATAXPCIE5_SSATAGP5"),
+ PINCTRL_PIN(203, "SRCCLKREQB_6"),
+ PINCTRL_PIN(204, "SRCCLKREQB_7"),
+ PINCTRL_PIN(205, "SRCCLKREQB_8"),
+ PINCTRL_PIN(206, "SRCCLKREQB_9"),
+ PINCTRL_PIN(207, "SRCCLKREQB_10"),
+ PINCTRL_PIN(208, "SRCCLKREQB_11"),
+ PINCTRL_PIN(209, "SRCCLKREQB_12"),
+ PINCTRL_PIN(210, "SRCCLKREQB_13"),
+ PINCTRL_PIN(211, "SRCCLKREQB_14"),
+ PINCTRL_PIN(212, "SRCCLKREQB_15"),
+ PINCTRL_PIN(213, "SML2CLK"),
+ PINCTRL_PIN(214, "SML2DATA"),
+ PINCTRL_PIN(215, "SML2ALERTB"),
+ PINCTRL_PIN(216, "SML3CLK"),
+ PINCTRL_PIN(217, "SML3DATA"),
+ PINCTRL_PIN(218, "SML3ALERTB"),
+ PINCTRL_PIN(219, "SML4CLK"),
+ PINCTRL_PIN(220, "SML4DATA"),
+ PINCTRL_PIN(221, "SML4ALERTB"),
+ PINCTRL_PIN(222, "SSATAXPCIE1_SSATAGP1"),
+ PINCTRL_PIN(223, "SSATAXPCIE2_SSATAGP2"),
+ PINCTRL_PIN(224, "SSATAXPCIE3_SSATAGP3"),
+ PINCTRL_PIN(225, "SSATAXPCIE4_SSATAGP4"),
+ PINCTRL_PIN(226, "SSATAXPCIE5_SSATAGP5"),
/* GPP_L */
+ PINCTRL_PIN(227, "GPP_L_0"),
+ PINCTRL_PIN(228, "EC_CSME_INTR_OUT"),
PINCTRL_PIN(229, "VISA2CH0_D0"),
PINCTRL_PIN(230, "VISA2CH0_D1"),
PINCTRL_PIN(231, "VISA2CH0_D2"),
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
new file mode 100644
index 000000000000..58572b15b3ce
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Tiger Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define TGL_PAD_OWN 0x020
+#define TGL_PADCFGLOCK 0x080
+#define TGL_HOSTSW_OWN 0x0b0
+#define TGL_GPI_IS 0x100
+#define TGL_GPI_IE 0x120
+
+#define TGL_GPP(r, s, e) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ }
+
+#define TGL_COMMUNITY(s, e, g) \
+ { \
+ .padown_offset = TGL_PAD_OWN, \
+ .padcfglock_offset = TGL_PADCFGLOCK, \
+ .hostown_offset = TGL_HOSTSW_OWN, \
+ .is_offset = TGL_GPI_IS, \
+ .ie_offset = TGL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Tiger Lake-LP */
+static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
+ /* GPP_B */
+ PINCTRL_PIN(0, "CORE_VID_0"),
+ PINCTRL_PIN(1, "CORE_VID_1"),
+ PINCTRL_PIN(2, "VRALERTB"),
+ PINCTRL_PIN(3, "CPU_GP_2"),
+ PINCTRL_PIN(4, "CPU_GP_3"),
+ PINCTRL_PIN(5, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(6, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(7, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(8, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(9, "I2C5_SDA"),
+ PINCTRL_PIN(10, "I2C5_SCL"),
+ PINCTRL_PIN(11, "PMCALERTB"),
+ PINCTRL_PIN(12, "SLP_S0B"),
+ PINCTRL_PIN(13, "PLTRSTB"),
+ PINCTRL_PIN(14, "SPKR"),
+ PINCTRL_PIN(15, "GSPI0_CS0B"),
+ PINCTRL_PIN(16, "GSPI0_CLK"),
+ PINCTRL_PIN(17, "GSPI0_MISO"),
+ PINCTRL_PIN(18, "GSPI0_MOSI"),
+ PINCTRL_PIN(19, "GSPI1_CS0B"),
+ PINCTRL_PIN(20, "GSPI1_CLK"),
+ PINCTRL_PIN(21, "GSPI1_MISO"),
+ PINCTRL_PIN(22, "GSPI1_MOSI"),
+ PINCTRL_PIN(23, "SML1ALERTB"),
+ PINCTRL_PIN(24, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(25, "GSPI1_CLK_LOOPBK"),
+ /* GPP_T */
+ PINCTRL_PIN(26, "I2C6_SDA"),
+ PINCTRL_PIN(27, "I2C6_SCL"),
+ PINCTRL_PIN(28, "I2C7_SDA"),
+ PINCTRL_PIN(29, "I2C7_SCL"),
+ PINCTRL_PIN(30, "UART4_RXD"),
+ PINCTRL_PIN(31, "UART4_TXD"),
+ PINCTRL_PIN(32, "UART4_RTSB"),
+ PINCTRL_PIN(33, "UART4_CTSB"),
+ PINCTRL_PIN(34, "UART5_RXD"),
+ PINCTRL_PIN(35, "UART5_TXD"),
+ PINCTRL_PIN(36, "UART5_RTSB"),
+ PINCTRL_PIN(37, "UART5_CTSB"),
+ PINCTRL_PIN(38, "UART6_RXD"),
+ PINCTRL_PIN(39, "UART6_TXD"),
+ PINCTRL_PIN(40, "UART6_RTSB"),
+ PINCTRL_PIN(41, "UART6_CTSB"),
+ /* GPP_A */
+ PINCTRL_PIN(42, "ESPI_IO_0"),
+ PINCTRL_PIN(43, "ESPI_IO_1"),
+ PINCTRL_PIN(44, "ESPI_IO_2"),
+ PINCTRL_PIN(45, "ESPI_IO_3"),
+ PINCTRL_PIN(46, "ESPI_CSB"),
+ PINCTRL_PIN(47, "ESPI_CLK"),
+ PINCTRL_PIN(48, "ESPI_RESETB"),
+ PINCTRL_PIN(49, "I2S2_SCLK"),
+ PINCTRL_PIN(50, "I2S2_SFRM"),
+ PINCTRL_PIN(51, "I2S2_TXD"),
+ PINCTRL_PIN(52, "I2S2_RXD"),
+ PINCTRL_PIN(53, "PMC_I2C_SDA"),
+ PINCTRL_PIN(54, "SATAXPCIE_1"),
+ PINCTRL_PIN(55, "PMC_I2C_SCL"),
+ PINCTRL_PIN(56, "USB2_OCB_1"),
+ PINCTRL_PIN(57, "USB2_OCB_2"),
+ PINCTRL_PIN(58, "USB2_OCB_3"),
+ PINCTRL_PIN(59, "DDSP_HPD_C"),
+ PINCTRL_PIN(60, "DDSP_HPD_B"),
+ PINCTRL_PIN(61, "DDSP_HPD_1"),
+ PINCTRL_PIN(62, "DDSP_HPD_2"),
+ PINCTRL_PIN(63, "GPPC_A_21"),
+ PINCTRL_PIN(64, "GPPC_A_22"),
+ PINCTRL_PIN(65, "I2S1_SCLK"),
+ PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup tgllp_community0_gpps[] = {
+ TGL_GPP(0, 0, 25), /* GPP_B */
+ TGL_GPP(1, 26, 41), /* GPP_T */
+ TGL_GPP(2, 42, 66), /* GPP_A */
+};
+
+static const struct intel_community tgllp_community0[] = {
+ TGL_COMMUNITY(0, 66, tgllp_community0_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community0_soc_data = {
+ .uid = "0",
+ .pins = tgllp_community0_pins,
+ .npins = ARRAY_SIZE(tgllp_community0_pins),
+ .communities = tgllp_community0,
+ .ncommunities = ARRAY_SIZE(tgllp_community0),
+};
+
+static const struct pinctrl_pin_desc tgllp_community1_pins[] = {
+ /* GPP_S */
+ PINCTRL_PIN(0, "SNDW0_CLK"),
+ PINCTRL_PIN(1, "SNDW0_DATA"),
+ PINCTRL_PIN(2, "SNDW1_CLK"),
+ PINCTRL_PIN(3, "SNDW1_DATA"),
+ PINCTRL_PIN(4, "SNDW2_CLK"),
+ PINCTRL_PIN(5, "SNDW2_DATA"),
+ PINCTRL_PIN(6, "SNDW3_CLK"),
+ PINCTRL_PIN(7, "SNDW3_DATA"),
+ /* GPP_H */
+ PINCTRL_PIN(8, "GPPC_H_0"),
+ PINCTRL_PIN(9, "GPPC_H_1"),
+ PINCTRL_PIN(10, "GPPC_H_2"),
+ PINCTRL_PIN(11, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(12, "I2C2_SDA"),
+ PINCTRL_PIN(13, "I2C2_SCL"),
+ PINCTRL_PIN(14, "I2C3_SDA"),
+ PINCTRL_PIN(15, "I2C3_SCL"),
+ PINCTRL_PIN(16, "I2C4_SDA"),
+ PINCTRL_PIN(17, "I2C4_SCL"),
+ PINCTRL_PIN(18, "SRCCLKREQB_4"),
+ PINCTRL_PIN(19, "SRCCLKREQB_5"),
+ PINCTRL_PIN(20, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(21, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(22, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(23, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(24, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(25, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(26, "CPU_C10_GATEB"),
+ PINCTRL_PIN(27, "TIME_SYNC_0"),
+ PINCTRL_PIN(28, "IMGCLKOUT_1"),
+ PINCTRL_PIN(29, "IMGCLKOUT_2"),
+ PINCTRL_PIN(30, "IMGCLKOUT_3"),
+ PINCTRL_PIN(31, "IMGCLKOUT_4"),
+ /* GPP_D */
+ PINCTRL_PIN(32, "ISH_GP_0"),
+ PINCTRL_PIN(33, "ISH_GP_1"),
+ PINCTRL_PIN(34, "ISH_GP_2"),
+ PINCTRL_PIN(35, "ISH_GP_3"),
+ PINCTRL_PIN(36, "IMGCLKOUT_0"),
+ PINCTRL_PIN(37, "SRCCLKREQB_0"),
+ PINCTRL_PIN(38, "SRCCLKREQB_1"),
+ PINCTRL_PIN(39, "SRCCLKREQB_2"),
+ PINCTRL_PIN(40, "SRCCLKREQB_3"),
+ PINCTRL_PIN(41, "ISH_SPI_CSB"),
+ PINCTRL_PIN(42, "ISH_SPI_CLK"),
+ PINCTRL_PIN(43, "ISH_SPI_MISO"),
+ PINCTRL_PIN(44, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(45, "ISH_UART0_RXD"),
+ PINCTRL_PIN(46, "ISH_UART0_TXD"),
+ PINCTRL_PIN(47, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(48, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(49, "ISH_GP_4"),
+ PINCTRL_PIN(50, "ISH_GP_5"),
+ PINCTRL_PIN(51, "I2S_MCLK1_OUT"),
+ PINCTRL_PIN(52, "GSPI2_CLK_LOOPBK"),
+ /* GPP_U */
+ PINCTRL_PIN(53, "UART3_RXD"),
+ PINCTRL_PIN(54, "UART3_TXD"),
+ PINCTRL_PIN(55, "UART3_RTSB"),
+ PINCTRL_PIN(56, "UART3_CTSB"),
+ PINCTRL_PIN(57, "GSPI3_CS0B"),
+ PINCTRL_PIN(58, "GSPI3_CLK"),
+ PINCTRL_PIN(59, "GSPI3_MISO"),
+ PINCTRL_PIN(60, "GSPI3_MOSI"),
+ PINCTRL_PIN(61, "GSPI4_CS0B"),
+ PINCTRL_PIN(62, "GSPI4_CLK"),
+ PINCTRL_PIN(63, "GSPI4_MISO"),
+ PINCTRL_PIN(64, "GSPI4_MOSI"),
+ PINCTRL_PIN(65, "GSPI5_CS0B"),
+ PINCTRL_PIN(66, "GSPI5_CLK"),
+ PINCTRL_PIN(67, "GSPI5_MISO"),
+ PINCTRL_PIN(68, "GSPI5_MOSI"),
+ PINCTRL_PIN(69, "GSPI6_CS0B"),
+ PINCTRL_PIN(70, "GSPI6_CLK"),
+ PINCTRL_PIN(71, "GSPI6_MISO"),
+ PINCTRL_PIN(72, "GSPI6_MOSI"),
+ PINCTRL_PIN(73, "GSPI3_CLK_LOOPBK"),
+ PINCTRL_PIN(74, "GSPI4_CLK_LOOPBK"),
+ PINCTRL_PIN(75, "GSPI5_CLK_LOOPBK"),
+ PINCTRL_PIN(76, "GSPI6_CLK_LOOPBK"),
+ /* vGPIO */
+ PINCTRL_PIN(77, "CNV_BTEN"),
+ PINCTRL_PIN(78, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(79, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(80, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(81, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(82, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(83, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(84, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(85, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(86, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(87, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(88, "vUART0_TXD"),
+ PINCTRL_PIN(89, "vUART0_RXD"),
+ PINCTRL_PIN(90, "vUART0_CTS_B"),
+ PINCTRL_PIN(91, "vUART0_RTS_B"),
+ PINCTRL_PIN(92, "vISH_UART0_TXD"),
+ PINCTRL_PIN(93, "vISH_UART0_RXD"),
+ PINCTRL_PIN(94, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(95, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(96, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(97, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(98, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(99, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(100, "vI2S2_SCLK"),
+ PINCTRL_PIN(101, "vI2S2_SFRM"),
+ PINCTRL_PIN(102, "vI2S2_TXD"),
+ PINCTRL_PIN(103, "vI2S2_RXD"),
+};
+
+static const struct intel_padgroup tgllp_community1_gpps[] = {
+ TGL_GPP(0, 0, 7), /* GPP_S */
+ TGL_GPP(1, 8, 31), /* GPP_H */
+ TGL_GPP(2, 32, 52), /* GPP_D */
+ TGL_GPP(3, 53, 76), /* GPP_U */
+ TGL_GPP(4, 77, 103), /* vGPIO */
+};
+
+static const struct intel_community tgllp_community1[] = {
+ TGL_COMMUNITY(0, 103, tgllp_community1_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community1_soc_data = {
+ .uid = "1",
+ .pins = tgllp_community1_pins,
+ .npins = ARRAY_SIZE(tgllp_community1_pins),
+ .communities = tgllp_community1,
+ .ncommunities = ARRAY_SIZE(tgllp_community1),
+};
+
+static const struct pinctrl_pin_desc tgllp_community4_pins[] = {
+ /* GPP_C */
+ PINCTRL_PIN(0, "SMBCLK"),
+ PINCTRL_PIN(1, "SMBDATA"),
+ PINCTRL_PIN(2, "SMBALERTB"),
+ PINCTRL_PIN(3, "SML0CLK"),
+ PINCTRL_PIN(4, "SML0DATA"),
+ PINCTRL_PIN(5, "SML0ALERTB"),
+ PINCTRL_PIN(6, "SML1CLK"),
+ PINCTRL_PIN(7, "SML1DATA"),
+ PINCTRL_PIN(8, "UART0_RXD"),
+ PINCTRL_PIN(9, "UART0_TXD"),
+ PINCTRL_PIN(10, "UART0_RTSB"),
+ PINCTRL_PIN(11, "UART0_CTSB"),
+ PINCTRL_PIN(12, "UART1_RXD"),
+ PINCTRL_PIN(13, "UART1_TXD"),
+ PINCTRL_PIN(14, "UART1_RTSB"),
+ PINCTRL_PIN(15, "UART1_CTSB"),
+ PINCTRL_PIN(16, "I2C0_SDA"),
+ PINCTRL_PIN(17, "I2C0_SCL"),
+ PINCTRL_PIN(18, "I2C1_SDA"),
+ PINCTRL_PIN(19, "I2C1_SCL"),
+ PINCTRL_PIN(20, "UART2_RXD"),
+ PINCTRL_PIN(21, "UART2_TXD"),
+ PINCTRL_PIN(22, "UART2_RTSB"),
+ PINCTRL_PIN(23, "UART2_CTSB"),
+ /* GPP_F */
+ PINCTRL_PIN(24, "CNV_BRI_DT"),
+ PINCTRL_PIN(25, "CNV_BRI_RSP"),
+ PINCTRL_PIN(26, "CNV_RGI_DT"),
+ PINCTRL_PIN(27, "CNV_RGI_RSP"),
+ PINCTRL_PIN(28, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(29, "GPPC_F_5"),
+ PINCTRL_PIN(30, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(31, "GPPC_F_7"),
+ PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
+ PINCTRL_PIN(33, "BOOTMPC"),
+ PINCTRL_PIN(34, "GPPC_F_10"),
+ PINCTRL_PIN(35, "GPPC_F_11"),
+ PINCTRL_PIN(36, "GSXDOUT"),
+ PINCTRL_PIN(37, "GSXSLOAD"),
+ PINCTRL_PIN(38, "GSXDIN"),
+ PINCTRL_PIN(39, "GSXSRESETB"),
+ PINCTRL_PIN(40, "GSXCLK"),
+ PINCTRL_PIN(41, "GMII_MDC"),
+ PINCTRL_PIN(42, "GMII_MDIO"),
+ PINCTRL_PIN(43, "SRCCLKREQB_6"),
+ PINCTRL_PIN(44, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
+ PINCTRL_PIN(46, "VNN_CTRL"),
+ PINCTRL_PIN(47, "V1P05_CTRL"),
+ PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
+ /* HVCMOS */
+ PINCTRL_PIN(49, "L_BKLTEN"),
+ PINCTRL_PIN(50, "L_BKLTCTL"),
+ PINCTRL_PIN(51, "L_VDDEN"),
+ PINCTRL_PIN(52, "SYS_PWROK"),
+ PINCTRL_PIN(53, "SYS_RESETB"),
+ PINCTRL_PIN(54, "MLK_RSTB"),
+ /* GPP_E */
+ PINCTRL_PIN(55, "SATAXPCIE_0"),
+ PINCTRL_PIN(56, "SPI1_IO_2"),
+ PINCTRL_PIN(57, "SPI1_IO_3"),
+ PINCTRL_PIN(58, "CPU_GP_0"),
+ PINCTRL_PIN(59, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(60, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(61, "GPPC_E_6"),
+ PINCTRL_PIN(62, "CPU_GP_1"),
+ PINCTRL_PIN(63, "SPI1_CS1B"),
+ PINCTRL_PIN(64, "USB2_OCB_0"),
+ PINCTRL_PIN(65, "SPI1_CSB"),
+ PINCTRL_PIN(66, "SPI1_CLK"),
+ PINCTRL_PIN(67, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(68, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(69, "DDSP_HPD_A"),
+ PINCTRL_PIN(70, "ISH_GP_6"),
+ PINCTRL_PIN(71, "ISH_GP_7"),
+ PINCTRL_PIN(72, "GPPC_E_17"),
+ PINCTRL_PIN(73, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(74, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(75, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(76, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(77, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(78, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
+ /* JTAG */
+ PINCTRL_PIN(80, "JTAG_TDO"),
+ PINCTRL_PIN(81, "JTAGX"),
+ PINCTRL_PIN(82, "PRDYB"),
+ PINCTRL_PIN(83, "PREQB"),
+ PINCTRL_PIN(84, "CPU_TRSTB"),
+ PINCTRL_PIN(85, "JTAG_TDI"),
+ PINCTRL_PIN(86, "JTAG_TMS"),
+ PINCTRL_PIN(87, "JTAG_TCK"),
+ PINCTRL_PIN(88, "DBG_PMODE"),
+};
+
+static const struct intel_padgroup tgllp_community4_gpps[] = {
+ TGL_GPP(0, 0, 23), /* GPP_C */
+ TGL_GPP(1, 24, 48), /* GPP_F */
+ TGL_GPP(2, 49, 54), /* HVCMOS */
+ TGL_GPP(3, 55, 79), /* GPP_E */
+ TGL_GPP(4, 80, 88), /* JTAG */
+};
+
+static const struct intel_community tgllp_community4[] = {
+ TGL_COMMUNITY(0, 88, tgllp_community4_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community4_soc_data = {
+ .uid = "4",
+ .pins = tgllp_community4_pins,
+ .npins = ARRAY_SIZE(tgllp_community4_pins),
+ .communities = tgllp_community4,
+ .ncommunities = ARRAY_SIZE(tgllp_community4),
+};
+
+static const struct pinctrl_pin_desc tgllp_community5_pins[] = {
+ /* GPP_R */
+ PINCTRL_PIN(0, "HDA_BCLK"),
+ PINCTRL_PIN(1, "HDA_SYNC"),
+ PINCTRL_PIN(2, "HDA_SDO"),
+ PINCTRL_PIN(3, "HDA_SDI_0"),
+ PINCTRL_PIN(4, "HDA_RSTB"),
+ PINCTRL_PIN(5, "HDA_SDI_1"),
+ PINCTRL_PIN(6, "GPP_R_6"),
+ PINCTRL_PIN(7, "GPP_R_7"),
+ /* SPI */
+ PINCTRL_PIN(8, "SPI0_IO_2"),
+ PINCTRL_PIN(9, "SPI0_IO_3"),
+ PINCTRL_PIN(10, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(11, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(12, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(13, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(14, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(15, "SPI0_CLK"),
+ PINCTRL_PIN(16, "SPI0_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup tgllp_community5_gpps[] = {
+ TGL_GPP(0, 0, 7), /* GPP_R */
+ TGL_GPP(1, 8, 16), /* SPI */
+};
+
+static const struct intel_community tgllp_community5[] = {
+ TGL_COMMUNITY(0, 16, tgllp_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community5_soc_data = {
+ .uid = "5",
+ .pins = tgllp_community5_pins,
+ .npins = ARRAY_SIZE(tgllp_community5_pins),
+ .communities = tgllp_community5,
+ .ncommunities = ARRAY_SIZE(tgllp_community5),
+};
+
+static const struct intel_pinctrl_soc_data *tgllp_soc_data_array[] = {
+ &tgllp_community0_soc_data,
+ &tgllp_community1_soc_data,
+ &tgllp_community4_soc_data,
+ &tgllp_community5_soc_data,
+ NULL
+};
+
+static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
+ { "INT34C5", (kernel_ulong_t)tgllp_soc_data_array },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
+
+static struct platform_driver tgl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_uid,
+ .driver = {
+ .name = "tigerlake-pinctrl",
+ .acpi_match_table = tgl_pinctrl_acpi_match,
+ .pm = &tgl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(tgl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Tiger Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 53f52b9a0acd..67f8444f7a0c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -982,7 +982,6 @@ static const struct mtk_eint_xt mtk_eint_xt = {
static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
if (!of_property_read_bool(np, "interrupt-controller"))
return -ENODEV;
@@ -991,8 +990,7 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
if (!pctl->eint)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
+ pctl->eint->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->eint->base))
return PTR_ERR(pctl->eint->base);
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index df55f617aa98..3cb119105ddb 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -54,4 +54,10 @@ config PINCTRL_MESON_G12A
select PINCTRL_MESON_AXG_PMX
default y
+config PINCTRL_MESON_A1
+ bool "Meson a1 Soc pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON_AXG_PMX
+ default y
+
endif
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index a69c565f2f13..1a5bffe953f9 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_PINCTRL_MESON_GXL) += pinctrl-meson-gxl.o
obj-$(CONFIG_PINCTRL_MESON_AXG_PMX) += pinctrl-meson-axg-pmx.o
obj-$(CONFIG_PINCTRL_MESON_AXG) += pinctrl-meson-axg.o
obj-$(CONFIG_PINCTRL_MESON_G12A) += pinctrl-meson-g12a.o
+obj-$(CONFIG_PINCTRL_MESON_A1) += pinctrl-meson-a1.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
new file mode 100644
index 000000000000..0bcec03f344a
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -0,0 +1,942 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Pin controller and GPIO driver for Amlogic Meson A1 SoC.
+ *
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#include <dt-bindings/gpio/meson-a1-gpio.h>
+#include "pinctrl-meson.h"
+#include "pinctrl-meson-axg-pmx.h"
+
+static const struct pinctrl_pin_desc meson_a1_periphs_pins[] = {
+ MESON_PIN(GPIOP_0),
+ MESON_PIN(GPIOP_1),
+ MESON_PIN(GPIOP_2),
+ MESON_PIN(GPIOP_3),
+ MESON_PIN(GPIOP_4),
+ MESON_PIN(GPIOP_5),
+ MESON_PIN(GPIOP_6),
+ MESON_PIN(GPIOP_7),
+ MESON_PIN(GPIOP_8),
+ MESON_PIN(GPIOP_9),
+ MESON_PIN(GPIOP_10),
+ MESON_PIN(GPIOP_11),
+ MESON_PIN(GPIOP_12),
+ MESON_PIN(GPIOB_0),
+ MESON_PIN(GPIOB_1),
+ MESON_PIN(GPIOB_2),
+ MESON_PIN(GPIOB_3),
+ MESON_PIN(GPIOB_4),
+ MESON_PIN(GPIOB_5),
+ MESON_PIN(GPIOB_6),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOF_0),
+ MESON_PIN(GPIOF_1),
+ MESON_PIN(GPIOF_2),
+ MESON_PIN(GPIOF_3),
+ MESON_PIN(GPIOF_4),
+ MESON_PIN(GPIOF_5),
+ MESON_PIN(GPIOF_6),
+ MESON_PIN(GPIOF_7),
+ MESON_PIN(GPIOF_8),
+ MESON_PIN(GPIOF_9),
+ MESON_PIN(GPIOF_10),
+ MESON_PIN(GPIOF_11),
+ MESON_PIN(GPIOF_12),
+ MESON_PIN(GPIOA_0),
+ MESON_PIN(GPIOA_1),
+ MESON_PIN(GPIOA_2),
+ MESON_PIN(GPIOA_3),
+ MESON_PIN(GPIOA_4),
+ MESON_PIN(GPIOA_5),
+ MESON_PIN(GPIOA_6),
+ MESON_PIN(GPIOA_7),
+ MESON_PIN(GPIOA_8),
+ MESON_PIN(GPIOA_9),
+ MESON_PIN(GPIOA_10),
+ MESON_PIN(GPIOA_11),
+};
+
+/* psram */
+static const unsigned int psram_clkn_pins[] = { GPIOP_0 };
+static const unsigned int psram_clkp_pins[] = { GPIOP_1 };
+static const unsigned int psram_ce_n_pins[] = { GPIOP_2 };
+static const unsigned int psram_rst_n_pins[] = { GPIOP_3 };
+static const unsigned int psram_adq0_pins[] = { GPIOP_4 };
+static const unsigned int psram_adq1_pins[] = { GPIOP_5 };
+static const unsigned int psram_adq2_pins[] = { GPIOP_6 };
+static const unsigned int psram_adq3_pins[] = { GPIOP_7 };
+static const unsigned int psram_adq4_pins[] = { GPIOP_8 };
+static const unsigned int psram_adq5_pins[] = { GPIOP_9 };
+static const unsigned int psram_adq6_pins[] = { GPIOP_10 };
+static const unsigned int psram_adq7_pins[] = { GPIOP_11 };
+static const unsigned int psram_dqs_dm_pins[] = { GPIOP_12 };
+
+/* sdcard */
+static const unsigned int sdcard_d0_b_pins[] = { GPIOB_0 };
+static const unsigned int sdcard_d1_b_pins[] = { GPIOB_1 };
+static const unsigned int sdcard_d2_b_pins[] = { GPIOB_2 };
+static const unsigned int sdcard_d3_b_pins[] = { GPIOB_3 };
+static const unsigned int sdcard_clk_b_pins[] = { GPIOB_4 };
+static const unsigned int sdcard_cmd_b_pins[] = { GPIOB_5 };
+
+static const unsigned int sdcard_d0_x_pins[] = { GPIOX_0 };
+static const unsigned int sdcard_d1_x_pins[] = { GPIOX_1 };
+static const unsigned int sdcard_d2_x_pins[] = { GPIOX_2 };
+static const unsigned int sdcard_d3_x_pins[] = { GPIOX_3 };
+static const unsigned int sdcard_clk_x_pins[] = { GPIOX_4 };
+static const unsigned int sdcard_cmd_x_pins[] = { GPIOX_5 };
+
+/* spif */
+static const unsigned int spif_mo_pins[] = { GPIOB_0 };
+static const unsigned int spif_mi_pins[] = { GPIOB_1 };
+static const unsigned int spif_wp_n_pins[] = { GPIOB_2 };
+static const unsigned int spif_hold_n_pins[] = { GPIOB_3 };
+static const unsigned int spif_clk_pins[] = { GPIOB_4 };
+static const unsigned int spif_cs_pins[] = { GPIOB_5 };
+
+/* i2c0 */
+static const unsigned int i2c0_sck_f9_pins[] = { GPIOF_9 };
+static const unsigned int i2c0_sda_f10_pins[] = { GPIOF_10 };
+static const unsigned int i2c0_sck_f11_pins[] = { GPIOF_11 };
+static const unsigned int i2c0_sda_f12_pins[] = { GPIOF_12 };
+
+/* i2c1 */
+static const unsigned int i2c1_sda_x_pins[] = { GPIOX_9 };
+static const unsigned int i2c1_sck_x_pins[] = { GPIOX_10 };
+static const unsigned int i2c1_sda_a_pins[] = { GPIOA_10 };
+static const unsigned int i2c1_sck_a_pins[] = { GPIOA_11 };
+
+/* i2c2 */
+static const unsigned int i2c2_sck_x0_pins[] = { GPIOX_0 };
+static const unsigned int i2c2_sda_x1_pins[] = { GPIOX_1 };
+static const unsigned int i2c2_sck_x15_pins[] = { GPIOX_15 };
+static const unsigned int i2c2_sda_x16_pins[] = { GPIOX_16 };
+static const unsigned int i2c2_sck_a4_pins[] = { GPIOA_4 };
+static const unsigned int i2c2_sda_a5_pins[] = { GPIOA_5 };
+static const unsigned int i2c2_sck_a8_pins[] = { GPIOA_8 };
+static const unsigned int i2c2_sda_a9_pins[] = { GPIOA_9 };
+
+/* i2c3 */
+static const unsigned int i2c3_sck_f_pins[] = { GPIOF_4 };
+static const unsigned int i2c3_sda_f_pins[] = { GPIOF_5 };
+static const unsigned int i2c3_sck_x_pins[] = { GPIOX_11 };
+static const unsigned int i2c3_sda_x_pins[] = { GPIOX_12 };
+
+/* i2c slave */
+static const unsigned int i2c_slave_sck_a_pins[] = { GPIOA_10 };
+static const unsigned int i2c_slave_sda_a_pins[] = { GPIOA_11 };
+static const unsigned int i2c_slave_sck_f_pins[] = { GPIOF_11 };
+static const unsigned int i2c_slave_sda_f_pins[] = { GPIOF_12 };
+
+/* uart_a */
+static const unsigned int uart_a_tx_pins[] = { GPIOX_11 };
+static const unsigned int uart_a_rx_pins[] = { GPIOX_12 };
+static const unsigned int uart_a_cts_pins[] = { GPIOX_13 };
+static const unsigned int uart_a_rts_pins[] = { GPIOX_14 };
+
+/* uart_b */
+static const unsigned int uart_b_tx_x_pins[] = { GPIOX_7 };
+static const unsigned int uart_b_rx_x_pins[] = { GPIOX_8 };
+static const unsigned int uart_b_tx_f_pins[] = { GPIOF_0 };
+static const unsigned int uart_b_rx_f_pins[] = { GPIOF_1 };
+
+/* uart_c */
+static const unsigned int uart_c_tx_x0_pins[] = { GPIOX_0 };
+static const unsigned int uart_c_rx_x1_pins[] = { GPIOX_1 };
+static const unsigned int uart_c_cts_pins[] = { GPIOX_2 };
+static const unsigned int uart_c_rts_pins[] = { GPIOX_3 };
+static const unsigned int uart_c_tx_x15_pins[] = { GPIOX_15 };
+static const unsigned int uart_c_rx_x16_pins[] = { GPIOX_16 };
+
+/* pmw_a */
+static const unsigned int pwm_a_x6_pins[] = { GPIOX_6 };
+static const unsigned int pwm_a_x7_pins[] = { GPIOX_7 };
+static const unsigned int pwm_a_f6_pins[] = { GPIOF_6 };
+static const unsigned int pwm_a_f10_pins[] = { GPIOF_10 };
+static const unsigned int pwm_a_a_pins[] = { GPIOA_5 };
+
+/* pmw_b */
+static const unsigned int pwm_b_x_pins[] = { GPIOX_8 };
+static const unsigned int pwm_b_f_pins[] = { GPIOF_7 };
+static const unsigned int pwm_b_a_pins[] = { GPIOA_11 };
+
+/* pmw_c */
+static const unsigned int pwm_c_x_pins[] = { GPIOX_9 };
+static const unsigned int pwm_c_f3_pins[] = { GPIOF_3 };
+static const unsigned int pwm_c_f8_pins[] = { GPIOF_8 };
+static const unsigned int pwm_c_a_pins[] = { GPIOA_10 };
+
+/* pwm_d */
+static const unsigned int pwm_d_x10_pins[] = { GPIOX_10 };
+static const unsigned int pwm_d_x13_pins[] = { GPIOX_13 };
+static const unsigned int pwm_d_x15_pins[] = { GPIOX_15 };
+static const unsigned int pwm_d_f_pins[] = { GPIOF_11 };
+
+/* pwm_e */
+static const unsigned int pwm_e_p_pins[] = { GPIOP_3 };
+static const unsigned int pwm_e_x2_pins[] = { GPIOX_2 };
+static const unsigned int pwm_e_x14_pins[] = { GPIOX_14 };
+static const unsigned int pwm_e_x16_pins[] = { GPIOX_16 };
+static const unsigned int pwm_e_f_pins[] = { GPIOF_3 };
+static const unsigned int pwm_e_a_pins[] = { GPIOA_0 };
+
+/* pwm_f */
+static const unsigned int pwm_f_b_pins[] = { GPIOB_6 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_3 };
+static const unsigned int pwm_f_f4_pins[] = { GPIOF_4 };
+static const unsigned int pwm_f_f12_pins[] = { GPIOF_12 };
+
+/* pwm_a_hiz */
+static const unsigned int pwm_a_hiz_f8_pins[] = { GPIOF_8 };
+static const unsigned int pwm_a_hiz_f10_pins[] = { GPIOF_10 };
+static const unsigned int pmw_a_hiz_f6_pins[] = { GPIOF_6 };
+
+/* pwm_b_hiz */
+static const unsigned int pwm_b_hiz_pins[] = { GPIOF_7 };
+
+/* pmw_c_hiz */
+static const unsigned int pwm_c_hiz_pins[] = { GPIOF_8 };
+
+/* tdm_a */
+static const unsigned int tdm_a_dout1_pins[] = { GPIOX_7 };
+static const unsigned int tdm_a_dout0_pins[] = { GPIOX_8 };
+static const unsigned int tdm_a_fs_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_sclk_pins[] = { GPIOX_10 };
+static const unsigned int tdm_a_din1_pins[] = { GPIOX_7 };
+static const unsigned int tdm_a_din0_pins[] = { GPIOX_8 };
+static const unsigned int tdm_a_slv_fs_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_slv_sclk_pins[] = { GPIOX_10 };
+
+/* spi_a */
+static const unsigned int spi_a_mosi_x2_pins[] = { GPIOX_2 };
+static const unsigned int spi_a_ss0_x3_pins[] = { GPIOX_3 };
+static const unsigned int spi_a_sclk_x4_pins[] = { GPIOX_4 };
+static const unsigned int spi_a_miso_x5_pins[] = { GPIOX_5 };
+static const unsigned int spi_a_mosi_x7_pins[] = { GPIOX_7 };
+static const unsigned int spi_a_miso_x8_pins[] = { GPIOX_8 };
+static const unsigned int spi_a_ss0_x9_pins[] = { GPIOX_9 };
+static const unsigned int spi_a_sclk_x10_pins[] = { GPIOX_10 };
+
+static const unsigned int spi_a_mosi_a_pins[] = { GPIOA_6 };
+static const unsigned int spi_a_miso_a_pins[] = { GPIOA_7 };
+static const unsigned int spi_a_ss0_a_pins[] = { GPIOA_8 };
+static const unsigned int spi_a_sclk_a_pins[] = { GPIOA_9 };
+
+/* pdm */
+static const unsigned int pdm_din0_x_pins[] = { GPIOX_7 };
+static const unsigned int pdm_din1_x_pins[] = { GPIOX_8 };
+static const unsigned int pdm_din2_x_pins[] = { GPIOX_9 };
+static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
+
+static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
+static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
+static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
+static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
+
+/* gen_clk */
+static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
+static const unsigned int gen_clk_f8_pins[] = { GPIOF_8 };
+static const unsigned int gen_clk_f10_pins[] = { GPIOF_10 };
+static const unsigned int gen_clk_a_pins[] = { GPIOA_11 };
+
+/* jtag_a */
+static const unsigned int jtag_a_clk_pins[] = { GPIOF_4 };
+static const unsigned int jtag_a_tms_pins[] = { GPIOF_5 };
+static const unsigned int jtag_a_tdi_pins[] = { GPIOF_6 };
+static const unsigned int jtag_a_tdo_pins[] = { GPIOF_7 };
+
+/* clk_32_in */
+static const unsigned int clk_32k_in_pins[] = { GPIOF_2 };
+
+/* ir in */
+static const unsigned int remote_input_f_pins[] = { GPIOF_3 };
+static const unsigned int remote_input_a_pins[] = { GPIOA_11 };
+
+/* ir out */
+static const unsigned int remote_out_pins[] = { GPIOF_5 };
+
+/* spdif */
+static const unsigned int spdif_in_f6_pins[] = { GPIOF_6 };
+static const unsigned int spdif_in_f7_pins[] = { GPIOF_7 };
+
+/* sw */
+static const unsigned int swclk_pins[] = { GPIOF_4 };
+static const unsigned int swdio_pins[] = { GPIOF_5 };
+
+/* clk_25 */
+static const unsigned int clk25_pins[] = { GPIOF_10 };
+
+/* cec_a */
+static const unsigned int cec_a_pins[] = { GPIOF_2 };
+
+/* cec_b */
+static const unsigned int cec_b_pins[] = { GPIOF_2 };
+
+/* clk12_24 */
+static const unsigned int clk12_24_pins[] = { GPIOF_10 };
+
+/* mclk_0 */
+static const unsigned int mclk_0_pins[] = { GPIOA_0 };
+
+/* tdm_b */
+static const unsigned int tdm_b_sclk_pins[] = { GPIOA_1 };
+static const unsigned int tdm_b_fs_pins[] = { GPIOA_2 };
+static const unsigned int tdm_b_dout0_pins[] = { GPIOA_3 };
+static const unsigned int tdm_b_dout1_pins[] = { GPIOA_4 };
+static const unsigned int tdm_b_dout2_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_dout3_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_dout4_pins[] = { GPIOA_7 };
+static const unsigned int tdm_b_dout5_pins[] = { GPIOA_8 };
+static const unsigned int tdm_b_slv_sclk_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_slv_fs_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_din0_pins[] = { GPIOA_7 };
+static const unsigned int tdm_b_din1_pins[] = { GPIOA_8 };
+static const unsigned int tdm_b_din2_pins[] = { GPIOA_9 };
+
+/* mclk_vad */
+static const unsigned int mclk_vad_pins[] = { GPIOA_0 };
+
+/* tdm_vad */
+static const unsigned int tdm_vad_sclk_a1_pins[] = { GPIOA_1 };
+static const unsigned int tdm_vad_fs_a2_pins[] = { GPIOA_2 };
+static const unsigned int tdm_vad_sclk_a5_pins[] = { GPIOA_5 };
+static const unsigned int tdm_vad_fs_a6_pins[] = { GPIOA_6 };
+
+/* tst_out */
+static const unsigned int tst_out0_pins[] = { GPIOA_0 };
+static const unsigned int tst_out1_pins[] = { GPIOA_1 };
+static const unsigned int tst_out2_pins[] = { GPIOA_2 };
+static const unsigned int tst_out3_pins[] = { GPIOA_3 };
+static const unsigned int tst_out4_pins[] = { GPIOA_4 };
+static const unsigned int tst_out5_pins[] = { GPIOA_5 };
+static const unsigned int tst_out6_pins[] = { GPIOA_6 };
+static const unsigned int tst_out7_pins[] = { GPIOA_7 };
+static const unsigned int tst_out8_pins[] = { GPIOA_8 };
+static const unsigned int tst_out9_pins[] = { GPIOA_9 };
+static const unsigned int tst_out10_pins[] = { GPIOA_10 };
+static const unsigned int tst_out11_pins[] = { GPIOA_11 };
+
+/* mute */
+static const unsigned int mute_key_pins[] = { GPIOA_4 };
+static const unsigned int mute_en_pins[] = { GPIOA_5 };
+
+static struct meson_pmx_group meson_a1_periphs_groups[] = {
+ GPIO_GROUP(GPIOP_0),
+ GPIO_GROUP(GPIOP_1),
+ GPIO_GROUP(GPIOP_2),
+ GPIO_GROUP(GPIOP_3),
+ GPIO_GROUP(GPIOP_4),
+ GPIO_GROUP(GPIOP_5),
+ GPIO_GROUP(GPIOP_6),
+ GPIO_GROUP(GPIOP_7),
+ GPIO_GROUP(GPIOP_8),
+ GPIO_GROUP(GPIOP_9),
+ GPIO_GROUP(GPIOP_10),
+ GPIO_GROUP(GPIOP_11),
+ GPIO_GROUP(GPIOP_12),
+ GPIO_GROUP(GPIOB_0),
+ GPIO_GROUP(GPIOB_1),
+ GPIO_GROUP(GPIOB_2),
+ GPIO_GROUP(GPIOB_3),
+ GPIO_GROUP(GPIOB_4),
+ GPIO_GROUP(GPIOB_5),
+ GPIO_GROUP(GPIOB_6),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOF_0),
+ GPIO_GROUP(GPIOF_1),
+ GPIO_GROUP(GPIOF_2),
+ GPIO_GROUP(GPIOF_3),
+ GPIO_GROUP(GPIOF_4),
+ GPIO_GROUP(GPIOF_5),
+ GPIO_GROUP(GPIOF_6),
+ GPIO_GROUP(GPIOF_7),
+ GPIO_GROUP(GPIOF_8),
+ GPIO_GROUP(GPIOF_9),
+ GPIO_GROUP(GPIOF_10),
+ GPIO_GROUP(GPIOF_11),
+ GPIO_GROUP(GPIOF_12),
+ GPIO_GROUP(GPIOA_0),
+ GPIO_GROUP(GPIOA_1),
+ GPIO_GROUP(GPIOA_2),
+ GPIO_GROUP(GPIOA_3),
+ GPIO_GROUP(GPIOA_4),
+ GPIO_GROUP(GPIOA_5),
+ GPIO_GROUP(GPIOA_6),
+ GPIO_GROUP(GPIOA_7),
+ GPIO_GROUP(GPIOA_8),
+ GPIO_GROUP(GPIOA_9),
+ GPIO_GROUP(GPIOA_10),
+ GPIO_GROUP(GPIOA_11),
+
+ /* bank P func1 */
+ GROUP(psram_clkn, 1),
+ GROUP(psram_clkp, 1),
+ GROUP(psram_ce_n, 1),
+ GROUP(psram_rst_n, 1),
+ GROUP(psram_adq0, 1),
+ GROUP(psram_adq1, 1),
+ GROUP(psram_adq2, 1),
+ GROUP(psram_adq3, 1),
+ GROUP(psram_adq4, 1),
+ GROUP(psram_adq5, 1),
+ GROUP(psram_adq6, 1),
+ GROUP(psram_adq7, 1),
+ GROUP(psram_dqs_dm, 1),
+
+ /*bank P func2 */
+ GROUP(pwm_e_p, 2),
+
+ /*bank B func1 */
+ GROUP(spif_mo, 1),
+ GROUP(spif_mi, 1),
+ GROUP(spif_wp_n, 1),
+ GROUP(spif_hold_n, 1),
+ GROUP(spif_clk, 1),
+ GROUP(spif_cs, 1),
+ GROUP(pwm_f_b, 1),
+
+ /*bank B func2 */
+ GROUP(sdcard_d0_b, 2),
+ GROUP(sdcard_d1_b, 2),
+ GROUP(sdcard_d2_b, 2),
+ GROUP(sdcard_d3_b, 2),
+ GROUP(sdcard_clk_b, 2),
+ GROUP(sdcard_cmd_b, 2),
+
+ /*bank X func1 */
+ GROUP(sdcard_d0_x, 1),
+ GROUP(sdcard_d1_x, 1),
+ GROUP(sdcard_d2_x, 1),
+ GROUP(sdcard_d3_x, 1),
+ GROUP(sdcard_clk_x, 1),
+ GROUP(sdcard_cmd_x, 1),
+ GROUP(pwm_a_x6, 1),
+ GROUP(tdm_a_dout1, 1),
+ GROUP(tdm_a_dout0, 1),
+ GROUP(tdm_a_fs, 1),
+ GROUP(tdm_a_sclk, 1),
+ GROUP(uart_a_tx, 1),
+ GROUP(uart_a_rx, 1),
+ GROUP(uart_a_cts, 1),
+ GROUP(uart_a_rts, 1),
+ GROUP(pwm_d_x15, 1),
+ GROUP(pwm_e_x16, 1),
+
+ /*bank X func2 */
+ GROUP(i2c2_sck_x0, 2),
+ GROUP(i2c2_sda_x1, 2),
+ GROUP(spi_a_mosi_x2, 2),
+ GROUP(spi_a_ss0_x3, 2),
+ GROUP(spi_a_sclk_x4, 2),
+ GROUP(spi_a_miso_x5, 2),
+ GROUP(tdm_a_din1, 2),
+ GROUP(tdm_a_din0, 2),
+ GROUP(tdm_a_slv_fs, 2),
+ GROUP(tdm_a_slv_sclk, 2),
+ GROUP(i2c3_sck_x, 2),
+ GROUP(i2c3_sda_x, 2),
+ GROUP(pwm_d_x13, 2),
+ GROUP(pwm_e_x14, 2),
+ GROUP(i2c2_sck_x15, 2),
+ GROUP(i2c2_sda_x16, 2),
+
+ /*bank X func3 */
+ GROUP(uart_c_tx_x0, 3),
+ GROUP(uart_c_rx_x1, 3),
+ GROUP(uart_c_cts, 3),
+ GROUP(uart_c_rts, 3),
+ GROUP(pdm_din0_x, 3),
+ GROUP(pdm_din1_x, 3),
+ GROUP(pdm_din2_x, 3),
+ GROUP(pdm_dclk_x, 3),
+ GROUP(uart_c_tx_x15, 3),
+ GROUP(uart_c_rx_x16, 3),
+
+ /*bank X func4 */
+ GROUP(pwm_e_x2, 4),
+ GROUP(pwm_f_x, 4),
+ GROUP(spi_a_mosi_x7, 4),
+ GROUP(spi_a_miso_x8, 4),
+ GROUP(spi_a_ss0_x9, 4),
+ GROUP(spi_a_sclk_x10, 4),
+
+ /*bank X func5 */
+ GROUP(uart_b_tx_x, 5),
+ GROUP(uart_b_rx_x, 5),
+ GROUP(i2c1_sda_x, 5),
+ GROUP(i2c1_sck_x, 5),
+
+ /*bank X func6 */
+ GROUP(pwm_a_x7, 6),
+ GROUP(pwm_b_x, 6),
+ GROUP(pwm_c_x, 6),
+ GROUP(pwm_d_x10, 6),
+
+ /*bank X func7 */
+ GROUP(gen_clk_x, 7),
+
+ /*bank F func1 */
+ GROUP(uart_b_tx_f, 1),
+ GROUP(uart_b_rx_f, 1),
+ GROUP(remote_input_f, 1),
+ GROUP(jtag_a_clk, 1),
+ GROUP(jtag_a_tms, 1),
+ GROUP(jtag_a_tdi, 1),
+ GROUP(jtag_a_tdo, 1),
+ GROUP(gen_clk_f8, 1),
+ GROUP(pwm_a_f10, 1),
+ GROUP(i2c0_sck_f11, 1),
+ GROUP(i2c0_sda_f12, 1),
+
+ /*bank F func2 */
+ GROUP(clk_32k_in, 2),
+ GROUP(pwm_e_f, 2),
+ GROUP(pwm_f_f4, 2),
+ GROUP(remote_out, 2),
+ GROUP(spdif_in_f6, 2),
+ GROUP(spdif_in_f7, 2),
+ GROUP(pwm_a_hiz_f8, 2),
+ GROUP(pwm_a_hiz_f10, 2),
+ GROUP(pwm_d_f, 2),
+ GROUP(pwm_f_f12, 2),
+
+ /*bank F func3 */
+ GROUP(pwm_c_f3, 3),
+ GROUP(swclk, 3),
+ GROUP(swdio, 3),
+ GROUP(pwm_a_f6, 3),
+ GROUP(pwm_b_f, 3),
+ GROUP(pwm_c_f8, 3),
+ GROUP(clk25, 3),
+ GROUP(i2c_slave_sck_f, 3),
+ GROUP(i2c_slave_sda_f, 3),
+
+ /*bank F func4 */
+ GROUP(cec_a, 4),
+ GROUP(i2c3_sck_f, 4),
+ GROUP(i2c3_sda_f, 4),
+ GROUP(pmw_a_hiz_f6, 4),
+ GROUP(pwm_b_hiz, 4),
+ GROUP(pwm_c_hiz, 4),
+ GROUP(i2c0_sck_f9, 4),
+ GROUP(i2c0_sda_f10, 4),
+
+ /*bank F func5 */
+ GROUP(cec_b, 5),
+ GROUP(clk12_24, 5),
+
+ /*bank F func7 */
+ GROUP(gen_clk_f10, 7),
+
+ /*bank A func1 */
+ GROUP(mclk_0, 1),
+ GROUP(tdm_b_sclk, 1),
+ GROUP(tdm_b_fs, 1),
+ GROUP(tdm_b_dout0, 1),
+ GROUP(tdm_b_dout1, 1),
+ GROUP(tdm_b_dout2, 1),
+ GROUP(tdm_b_dout3, 1),
+ GROUP(tdm_b_dout4, 1),
+ GROUP(tdm_b_dout5, 1),
+ GROUP(remote_input_a, 1),
+
+ /*bank A func2 */
+ GROUP(pwm_e_a, 2),
+ GROUP(tdm_b_slv_sclk, 2),
+ GROUP(tdm_b_slv_fs, 2),
+ GROUP(tdm_b_din0, 2),
+ GROUP(tdm_b_din1, 2),
+ GROUP(tdm_b_din2, 2),
+ GROUP(i2c1_sda_a, 2),
+ GROUP(i2c1_sck_a, 2),
+
+ /*bank A func3 */
+ GROUP(i2c2_sck_a4, 3),
+ GROUP(i2c2_sda_a5, 3),
+ GROUP(pdm_din2_a, 3),
+ GROUP(pdm_din1_a, 3),
+ GROUP(pdm_din0_a, 3),
+ GROUP(pdm_dclk, 3),
+ GROUP(pwm_c_a, 3),
+ GROUP(pwm_b_a, 3),
+
+ /*bank A func4 */
+ GROUP(pwm_a_a, 4),
+ GROUP(spi_a_mosi_a, 4),
+ GROUP(spi_a_miso_a, 4),
+ GROUP(spi_a_ss0_a, 4),
+ GROUP(spi_a_sclk_a, 4),
+ GROUP(i2c_slave_sck_a, 4),
+ GROUP(i2c_slave_sda_a, 4),
+
+ /*bank A func5 */
+ GROUP(mclk_vad, 5),
+ GROUP(tdm_vad_sclk_a1, 5),
+ GROUP(tdm_vad_fs_a2, 5),
+ GROUP(tdm_vad_sclk_a5, 5),
+ GROUP(tdm_vad_fs_a6, 5),
+ GROUP(i2c2_sck_a8, 5),
+ GROUP(i2c2_sda_a9, 5),
+
+ /*bank A func6 */
+ GROUP(tst_out0, 6),
+ GROUP(tst_out1, 6),
+ GROUP(tst_out2, 6),
+ GROUP(tst_out3, 6),
+ GROUP(tst_out4, 6),
+ GROUP(tst_out5, 6),
+ GROUP(tst_out6, 6),
+ GROUP(tst_out7, 6),
+ GROUP(tst_out8, 6),
+ GROUP(tst_out9, 6),
+ GROUP(tst_out10, 6),
+ GROUP(tst_out11, 6),
+
+ /*bank A func7 */
+ GROUP(mute_key, 7),
+ GROUP(mute_en, 7),
+ GROUP(gen_clk_a, 7),
+};
+
+static const char * const gpio_periphs_groups[] = {
+ "GPIOP_0", "GPIOP_1", "GPIOP_2", "GPIOP_3", "GPIOP_4",
+ "GPIOP_5", "GPIOP_6", "GPIOP_7", "GPIOP_8", "GPIOP_9",
+ "GPIOP_10", "GPIOP_11", "GPIOP_12",
+
+ "GPIOB_0", "GPIOB_1", "GPIOB_2", "GPIOB_3", "GPIOB_4",
+ "GPIOB_5", "GPIOB_6",
+
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+ "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+ "GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
+ "GPIOX_15", "GPIOX_16",
+
+ "GPIOF_0", "GPIOF_1", "GPIOF_2", "GPIOF_3", "GPIOF_4",
+ "GPIOF_5", "GPIOF_6", "GPIOF_7", "GPIOF_8", "GPIOF_9",
+ "GPIOF_10", "GPIOF_11", "GPIOF_12",
+
+ "GPIOA_0", "GPIOA_1", "GPIOA_2", "GPIOA_3", "GPIOA_4",
+ "GPIOA_5", "GPIOA_6", "GPIOA_7", "GPIOA_8", "GPIOA_9",
+ "GPIOA_10", "GPIOA_11",
+};
+
+static const char * const psram_groups[] = {
+ "psram_clkn", "psram_clkp", "psram_ce_n", "psram_rst_n", "psram_adq0",
+ "psram_adq1", "psram_adq2", "psram_adq3", "psram_adq4", "psram_adq5",
+ "psram_adq6", "psram_adq7", "psram_dqs_dm",
+};
+
+static const char * const pwm_a_groups[] = {
+ "pwm_a_x6", "pwm_a_x7", "pwm_a_f10", "pwm_a_f6", "pwm_a_a",
+};
+
+static const char * const pwm_b_groups[] = {
+ "pwm_b_x", "pwm_b_f", "pwm_b_a",
+};
+
+static const char * const pwm_c_groups[] = {
+ "pwm_c_x", "pwm_c_f3", "pwm_c_f8", "pwm_c_a",
+};
+
+static const char * const pwm_d_groups[] = {
+ "pwm_d_x15", "pwm_d_x13", "pwm_d_x10", "pwm_d_f",
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e_p", "pwm_e_x16", "pwm_e_x14", "pwm_e_x2", "pwm_e_f",
+ "pwm_e_a",
+};
+
+static const char * const pwm_f_groups[] = {
+ "pwm_f_b", "pwm_f_x", "pwm_f_f4", "pwm_f_f12",
+};
+
+static const char * const pwm_a_hiz_groups[] = {
+ "pwm_a_hiz_f8", "pwm_a_hiz_f10", "pwm_a_hiz_f6",
+};
+
+static const char * const pwm_b_hiz_groups[] = {
+ "pwm_b_hiz",
+};
+
+static const char * const pwm_c_hiz_groups[] = {
+ "pwm_c_hiz",
+};
+
+static const char * const spif_groups[] = {
+ "spif_mo", "spif_mi", "spif_wp_n", "spif_hold_n", "spif_clk",
+ "spif_cs",
+};
+
+static const char * const sdcard_groups[] = {
+ "sdcard_d0_b", "sdcard_d1_b", "sdcard_d2_b", "sdcard_d3_b",
+ "sdcard_clk_b", "sdcard_cmd_b",
+
+ "sdcard_d0_x", "sdcard_d1_x", "sdcard_d2_x", "sdcard_d3_x",
+ "sdcard_clk_x", "sdcard_cmd_x",
+};
+
+static const char * const tdm_a_groups[] = {
+ "tdm_a_din0", "tdm_a_din1", "tdm_a_fs", "tdm_a_sclk",
+ "tdm_a_slv_fs", "tdm_a_slv_sclk", "tdm_a_dout0", "tdm_a_dout1",
+};
+
+static const char * const uart_a_groups[] = {
+ "uart_a_tx", "uart_a_rx", "uart_a_cts", "uart_a_rts",
+};
+
+static const char * const uart_b_groups[] = {
+ "uart_b_tx_x", "uart_b_rx_x", "uart_b_tx_f", "uart_b_rx_f",
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_c_tx_x0", "uart_c_rx_x1", "uart_c_cts", "uart_c_rts",
+ "uart_c_tx_x15", "uart_c_rx_x16",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0_sck_f11", "i2c0_sda_f12", "i2c0_sck_f9", "i2c0_sda_f10",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_sda_x", "i2c1_sck_x", "i2c1_sda_a", "i2c1_sck_a",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_sck_x0", "i2c2_sda_x1", "i2c2_sck_x15", "i2c2_sda_x16",
+ "i2c2_sck_a4", "i2c2_sda_a5", "i2c2_sck_a8", "i2c2_sda_a9",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3_sck_x", "i2c3_sda_x", "i2c3_sck_f", "i2c3_sda_f",
+};
+
+static const char * const i2c_slave_groups[] = {
+ "i2c_slave_sda_a", "i2c_slave_sck_a",
+ "i2c_slave_sda_f", "i2c_slave_sck_f",
+};
+
+static const char * const spi_a_groups[] = {
+ "spi_a_mosi_x2", "spi_a_ss0_x3", "spi_a_sclk_x4", "spi_a_miso_x5",
+ "spi_a_mosi_x7", "spi_a_miso_x8", "spi_a_ss0_x9", "spi_a_sclk_x10",
+
+ "spi_a_mosi_a", "spi_a_miso_a", "spi_a_ss0_a", "spi_a_sclk_a",
+};
+
+static const char * const pdm_groups[] = {
+ "pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
+ "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+};
+
+static const char * const gen_clk_groups[] = {
+ "gen_clk_x", "gen_clk_f8", "gen_clk_f10", "gen_clk_a",
+};
+
+static const char * const remote_input_groups[] = {
+ "remote_input_f",
+ "remote_input_a",
+};
+
+static const char * const jtag_a_groups[] = {
+ "jtag_a_clk", "jtag_a_tms", "jtag_a_tdi", "jtag_a_tdo",
+};
+
+static const char * const clk_32k_in_groups[] = {
+ "clk_32k_in",
+};
+
+static const char * const remote_out_groups[] = {
+ "remote_out",
+};
+
+static const char * const spdif_in_groups[] = {
+ "spdif_in_f6", "spdif_in_f7",
+};
+
+static const char * const sw_groups[] = {
+ "swclk", "swdio",
+};
+
+static const char * const clk25_groups[] = {
+ "clk_25",
+};
+
+static const char * const cec_a_groups[] = {
+ "cec_a",
+};
+
+static const char * const cec_b_groups[] = {
+ "cec_b",
+};
+
+static const char * const clk12_24_groups[] = {
+ "clk12_24",
+};
+
+static const char * const mclk_0_groups[] = {
+ "mclk_0",
+};
+
+static const char * const tdm_b_groups[] = {
+ "tdm_b_din0", "tdm_b_din1", "tdm_b_din2",
+ "tdm_b_sclk", "tdm_b_fs", "tdm_b_dout0", "tdm_b_dout1",
+ "tdm_b_dout2", "tdm_b_dout3", "tdm_b_dout4", "tdm_b_dout5",
+ "tdm_b_slv_sclk", "tdm_b_slv_fs",
+};
+
+static const char * const mclk_vad_groups[] = {
+ "mclk_vad",
+};
+
+static const char * const tdm_vad_groups[] = {
+ "tdm_vad_sclk_a1", "tdm_vad_fs_a2", "tdm_vad_sclk_a5", "tdm_vad_fs_a6",
+};
+
+static const char * const tst_out_groups[] = {
+ "tst_out0", "tst_out1", "tst_out2", "tst_out3",
+ "tst_out4", "tst_out5", "tst_out6", "tst_out7",
+ "tst_out8", "tst_out9", "tst_out10", "tst_out11",
+};
+
+static const char * const mute_groups[] = {
+ "mute_key", "mute_en",
+};
+
+static struct meson_pmx_func meson_a1_periphs_functions[] = {
+ FUNCTION(gpio_periphs),
+ FUNCTION(psram),
+ FUNCTION(pwm_a),
+ FUNCTION(pwm_b),
+ FUNCTION(pwm_c),
+ FUNCTION(pwm_d),
+ FUNCTION(pwm_e),
+ FUNCTION(pwm_f),
+ FUNCTION(pwm_a_hiz),
+ FUNCTION(pwm_b_hiz),
+ FUNCTION(pwm_c_hiz),
+ FUNCTION(spif),
+ FUNCTION(sdcard),
+ FUNCTION(tdm_a),
+ FUNCTION(uart_a),
+ FUNCTION(uart_b),
+ FUNCTION(uart_c),
+ FUNCTION(i2c0),
+ FUNCTION(i2c1),
+ FUNCTION(i2c2),
+ FUNCTION(i2c3),
+ FUNCTION(spi_a),
+ FUNCTION(pdm),
+ FUNCTION(gen_clk),
+ FUNCTION(remote_input),
+ FUNCTION(jtag_a),
+ FUNCTION(clk_32k_in),
+ FUNCTION(remote_out),
+ FUNCTION(spdif_in),
+ FUNCTION(sw),
+ FUNCTION(clk25),
+ FUNCTION(cec_a),
+ FUNCTION(cec_b),
+ FUNCTION(clk12_24),
+ FUNCTION(mclk_0),
+ FUNCTION(tdm_b),
+ FUNCTION(mclk_vad),
+ FUNCTION(tdm_vad),
+ FUNCTION(tst_out),
+ FUNCTION(mute),
+};
+
+static struct meson_bank meson_a1_periphs_banks[] = {
+ /* name first last irq pullen pull dir out in ds*/
+ BANK_DS("P", GPIOP_0, GPIOP_12, 0, 12, 0x3, 0, 0x4, 0,
+ 0x2, 0, 0x1, 0, 0x0, 0, 0x5, 0),
+ BANK_DS("B", GPIOB_0, GPIOB_6, 13, 19, 0x13, 0, 0x14, 0,
+ 0x12, 0, 0x11, 0, 0x10, 0, 0x15, 0),
+ BANK_DS("X", GPIOX_0, GPIOX_16, 20, 36, 0x23, 0, 0x24, 0,
+ 0x22, 0, 0x21, 0, 0x20, 0, 0x25, 0),
+ BANK_DS("F", GPIOF_0, GPIOF_12, 37, 49, 0x33, 0, 0x34, 0,
+ 0x32, 0, 0x31, 0, 0x30, 0, 0x35, 0),
+ BANK_DS("A", GPIOA_0, GPIOA_11, 50, 61, 0x43, 0, 0x44, 0,
+ 0x42, 0, 0x41, 0, 0x40, 0, 0x45, 0),
+};
+
+static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
+ /* name first lask reg offset */
+ BANK_PMX("P", GPIOP_0, GPIOP_12, 0x0, 0),
+ BANK_PMX("B", GPIOB_0, GPIOB_6, 0x2, 0),
+ BANK_PMX("X", GPIOX_0, GPIOX_16, 0x3, 0),
+ BANK_PMX("F", GPIOF_0, GPIOF_12, 0x6, 0),
+ BANK_PMX("A", GPIOA_0, GPIOA_11, 0x8, 0),
+};
+
+static struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = {
+ .pmx_banks = meson_a1_periphs_pmx_banks,
+ .num_pmx_banks = ARRAY_SIZE(meson_a1_periphs_pmx_banks),
+};
+
+static struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = {
+ .name = "periphs-banks",
+ .pins = meson_a1_periphs_pins,
+ .groups = meson_a1_periphs_groups,
+ .funcs = meson_a1_periphs_functions,
+ .banks = meson_a1_periphs_banks,
+ .num_pins = ARRAY_SIZE(meson_a1_periphs_pins),
+ .num_groups = ARRAY_SIZE(meson_a1_periphs_groups),
+ .num_funcs = ARRAY_SIZE(meson_a1_periphs_functions),
+ .num_banks = ARRAY_SIZE(meson_a1_periphs_banks),
+ .pmx_ops = &meson_axg_pmx_ops,
+ .pmx_data = &meson_a1_periphs_pmx_banks_data,
+ .parse_dt = &meson_a1_parse_dt_extra,
+};
+
+static const struct of_device_id meson_a1_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-a1-periphs-pinctrl",
+ .data = &meson_a1_periphs_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_a1_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-a1-pinctrl",
+ .of_match_table = meson_a1_pinctrl_dt_match,
+ },
+};
+
+builtin_platform_driver(meson_a1_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index ad502eda4afa..072765db93d7 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -1066,6 +1066,7 @@ static struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = {
.num_banks = ARRAY_SIZE(meson_axg_aobus_banks),
.pmx_ops = &meson_axg_pmx_ops,
.pmx_data = &meson_axg_aobus_pmx_banks_data,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_axg_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 582665fd362a..41850e3c0091 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -1362,6 +1362,14 @@ static struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = {
.num_pmx_banks = ARRAY_SIZE(meson_g12a_aobus_pmx_banks),
};
+static int meson_g12a_aobus_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ pc->reg_pull = pc->reg_gpio;
+ pc->reg_pullen = pc->reg_gpio;
+
+ return 0;
+}
+
static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_g12a_periphs_pins,
@@ -1388,6 +1396,7 @@ static struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = {
.num_banks = ARRAY_SIZE(meson_g12a_aobus_banks),
.pmx_ops = &meson_axg_pmx_ops,
.pmx_data = &meson_g12a_aobus_pmx_banks_data,
+ .parse_dt = meson_g12a_aobus_parse_dt_extra,
};
static const struct of_device_id meson_g12a_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 5bfa56f3847e..926b9997159a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -851,6 +851,7 @@ static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson_gxbb_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_gxbb_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 72c5373c8dc1..1b6e8646700f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -820,6 +820,7 @@ static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson_gxl_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxl_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_gxl_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 8bba9d053d9f..3c80828a5e50 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -625,7 +625,7 @@ static struct regmap *meson_map_resource(struct meson_pinctrl *pc,
i = of_property_match_string(node, "reg-names", name);
if (of_address_to_resource(node, i, &res))
- return ERR_PTR(-ENOENT);
+ return NULL;
base = devm_ioremap_resource(pc->dev, &res);
if (IS_ERR(base))
@@ -665,26 +665,24 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
pc->of_node = gpio_np;
pc->reg_mux = meson_map_resource(pc, gpio_np, "mux");
- if (IS_ERR(pc->reg_mux)) {
+ if (IS_ERR_OR_NULL(pc->reg_mux)) {
dev_err(pc->dev, "mux registers not found\n");
- return PTR_ERR(pc->reg_mux);
+ return pc->reg_mux ? PTR_ERR(pc->reg_mux) : -ENOENT;
}
pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
- if (IS_ERR(pc->reg_gpio)) {
+ if (IS_ERR_OR_NULL(pc->reg_gpio)) {
dev_err(pc->dev, "gpio registers not found\n");
- return PTR_ERR(pc->reg_gpio);
+ return pc->reg_gpio ? PTR_ERR(pc->reg_gpio) : -ENOENT;
}
pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
- /* Use gpio region if pull one is not present */
if (IS_ERR(pc->reg_pull))
- pc->reg_pull = pc->reg_gpio;
+ pc->reg_pull = NULL;
pc->reg_pullen = meson_map_resource(pc, gpio_np, "pull-enable");
- /* Use pull region if pull-enable one is not present */
if (IS_ERR(pc->reg_pullen))
- pc->reg_pullen = pc->reg_pull;
+ pc->reg_pullen = NULL;
pc->reg_ds = meson_map_resource(pc, gpio_np, "ds");
if (IS_ERR(pc->reg_ds)) {
@@ -692,6 +690,28 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
pc->reg_ds = NULL;
}
+ if (pc->data->parse_dt)
+ return pc->data->parse_dt(pc);
+
+ return 0;
+}
+
+int meson8_aobus_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ if (!pc->reg_pull)
+ return -EINVAL;
+
+ pc->reg_pullen = pc->reg_pull;
+
+ return 0;
+}
+
+int meson_a1_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ pc->reg_pull = pc->reg_gpio;
+ pc->reg_pullen = pc->reg_gpio;
+ pc->reg_ds = pc->reg_gpio;
+
return 0;
}
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index c696f3241a36..f8b0ff9d419a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -11,6 +11,8 @@
#include <linux/regmap.h>
#include <linux/types.h>
+struct meson_pinctrl;
+
/**
* struct meson_pmx_group - a pinmux group
*
@@ -114,6 +116,7 @@ struct meson_pinctrl_data {
unsigned int num_banks;
const struct pinmux_ops *pmx_ops;
void *pmx_data;
+ int (*parse_dt)(struct meson_pinctrl *pc);
};
struct meson_pinctrl {
@@ -171,3 +174,7 @@ int meson_pmx_get_groups(struct pinctrl_dev *pcdev,
/* Common probe function */
int meson_pinctrl_probe(struct platform_device *pdev);
+/* Common ao groups extra dt parse function for SoCs before g12a */
+int meson8_aobus_parse_dt_extra(struct meson_pinctrl *pc);
+/* Common extra dt parse function for SoCs like A1 */
+int meson_a1_parse_dt_extra(struct meson_pinctrl *pc);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 0b97befa6335..dd17100efdcf 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -1103,6 +1103,7 @@ static struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson8_aobus_functions),
.num_banks = ARRAY_SIZE(meson8_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = &meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson8_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index a7de388388e6..2d5339edd0b7 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -962,6 +962,7 @@ static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
.num_banks = ARRAY_SIZE(meson8b_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = &meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson8b_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index d69c25798871..0d12894d3ee1 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -46,8 +46,8 @@ config PINCTRL_ORION
select PINCTRL_MVEBU
config PINCTRL_ARMADA_37XX
- bool
- select GENERIC_PINCONF
- select MFD_SYSCON
- select PINCONF
- select PINMUX
+ bool
+ select GENERIC_PINCONF
+ select MFD_SYSCON
+ select PINCONF
+ select PINMUX
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index f2f5fcd9a237..aa9dcde0f069 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -595,10 +595,10 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
regmap_read(info->regmap, in_reg, &in_val);
/* Set initial polarity based on current input level. */
- if (in_val & d->mask)
- val |= d->mask; /* falling */
+ if (in_val & BIT(d->hwirq % GPIO_PER_REG))
+ val |= BIT(d->hwirq % GPIO_PER_REG); /* falling */
else
- val &= ~d->mask; /* rising */
+ val &= ~(BIT(d->hwirq % GPIO_PER_REG)); /* rising */
break;
}
default:
@@ -722,6 +722,8 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct device_node *np = info->dev->of_node;
struct gpio_chip *gc = &info->gpio_chip;
struct irq_chip *irqchip = &info->irq_chip;
+ struct gpio_irq_chip *girq = &gc->irq;
+ struct device *dev = &pdev->dev;
struct resource res;
int ret = -ENODEV, i, nr_irq_parent;
@@ -732,19 +734,21 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
break;
}
};
- if (ret)
+ if (ret) {
+ dev_err(dev, "no gpio-controller child node\n");
return ret;
+ }
nr_irq_parent = of_irq_count(np);
spin_lock_init(&info->irq_lock);
if (!nr_irq_parent) {
- dev_err(&pdev->dev, "Invalid or no IRQ\n");
+ dev_err(dev, "invalid or no IRQ\n");
return 0;
}
if (of_address_to_resource(info->dev->of_node, 1, &res)) {
- dev_err(info->dev, "cannot find IO resource\n");
+ dev_err(dev, "cannot find IO resource\n");
return -ENOENT;
}
@@ -759,27 +763,27 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
irqchip->irq_set_type = armada_37xx_irq_set_type;
irqchip->irq_startup = armada_37xx_irq_startup;
irqchip->name = info->data->name;
- ret = gpiochip_irqchip_add(gc, irqchip, 0,
- handle_edge_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_info(&pdev->dev, "could not add irqchip\n");
- return ret;
- }
-
+ girq->chip = irqchip;
+ girq->parent_handler = armada_37xx_irq_handler;
/*
* Many interrupts are connected to the parent interrupt
* controller. But we do not take advantage of this and use
* the chained irq with all of them.
*/
+ girq->num_parents = nr_irq_parent;
+ girq->parents = devm_kcalloc(&pdev->dev, nr_irq_parent,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
int irq = irq_of_parse_and_map(np, i);
if (irq < 0)
continue;
-
- gpiochip_set_chained_irqchip(gc, irqchip, irq,
- armada_37xx_irq_handler);
+ girq->parents[i] = irq;
}
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
return 0;
}
@@ -809,10 +813,10 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
gc->of_node = np;
gc->label = info->data->name;
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
+ ret = armada_37xx_irqchip_register(pdev, info);
if (ret)
return ret;
- ret = armada_37xx_irqchip_register(pdev, info);
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
return ret;
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 00cfaf2c9d4a..a1f93859e7ca 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -759,12 +759,10 @@ int mvebu_pinctrl_simple_mmio_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
struct mvebu_mpp_ctrl_data *mpp_data;
- struct resource *res;
void __iomem *base;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 29bb9d8cbbb5..cc97d270be61 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -220,17 +220,14 @@ static int orion_pinctrl_probe(struct platform_device *pdev)
{
const struct of_device_id *match =
of_match_device(orion_pinctrl_of_match, &pdev->dev);
- struct resource *res;
pdev->dev.platform_data = (void*)match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ mpp_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mpp_base))
return PTR_ERR(mpp_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- high_mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ high_mpp_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(high_mpp_base))
return PTR_ERR(high_mpp_base);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index 726c0b5501fa..b9246e0b4fe2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -391,6 +391,15 @@ static const unsigned mc0_a_1_pins[] = { DB8500_PIN_AC2, /* MC0_CMDDIR */
DB8500_PIN_AA2, /* MC0_DAT2 */
DB8500_PIN_AA1 /* MC0_DAT3 */
};
+/* MMC/SD card 0 interface without CMD/DAT0/DAT2 direction control */
+static const unsigned mc0_a_2_pins[] = { DB8500_PIN_AA3, /* MC0_FBCLK */
+ DB8500_PIN_AA4, /* MC0_CLK */
+ DB8500_PIN_AB2, /* MC0_CMD */
+ DB8500_PIN_Y4, /* MC0_DAT0 */
+ DB8500_PIN_Y2, /* MC0_DAT1 */
+ DB8500_PIN_AA2, /* MC0_DAT2 */
+ DB8500_PIN_AA1 /* MC0_DAT3 */
+};
/* Often only 4 bits are used, then these are not needed (only used for MMC) */
static const unsigned mc0_dat47_a_1_pins[] = { DB8500_PIN_W2, /* MC0_DAT4 */
DB8500_PIN_W3, /* MC0_DAT5 */
@@ -670,6 +679,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(mc0_a_2, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0_dat47_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0dat31dir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
@@ -828,7 +838,7 @@ DB8500_FUNC_GROUPS(ipi2c, "ipi2c_a_1", "ipi2c_a_2");
*/
DB8500_FUNC_GROUPS(msp0, "msp0txrx_a_1", "msp0tfstck_a_1", "msp0rfstck_a_1",
"msp0txrx_b_1", "msp0sck_b_1");
-DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_dat47_a_1", "mc0dat31dir_a_1");
+DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_a_2", "mc0_dat47_a_1", "mc0dat31dir_a_1");
/* MSP0 can swap RX/TX like MSP0 but has no SCK pin available */
DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1");
DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 2a8190b11d10..95f864dfdef4 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -248,9 +248,6 @@ struct nmk_gpio_chip {
void __iomem *addr;
struct clk *clk;
unsigned int bank;
- unsigned int parent_irq;
- int latent_parent_irq;
- u32 (*get_latent_status)(unsigned int bank);
void (*set_ioforce)(bool enable);
spinlock_t lock;
bool sleepmode;
@@ -802,13 +799,19 @@ static void nmk_gpio_irq_shutdown(struct irq_data *d)
clk_disable(nmk_chip->clk);
}
-static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
+static void nmk_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *host_chip = irq_desc_get_chip(desc);
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ u32 status;
chained_irq_enter(host_chip, desc);
+ clk_enable(nmk_chip->clk);
+ status = readl(nmk_chip->addr + NMK_GPIO_IS);
+ clk_disable(nmk_chip->clk);
+
while (status) {
int bit = __ffs(status);
@@ -819,28 +822,6 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
chained_irq_exit(host_chip, desc);
}
-static void nmk_gpio_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status;
-
- clk_enable(nmk_chip->clk);
- status = readl(nmk_chip->addr + NMK_GPIO_IS);
- clk_disable(nmk_chip->clk);
-
- __nmk_gpio_irq_handler(desc, status);
-}
-
-static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status = nmk_chip->get_latent_status(nmk_chip->bank);
-
- __nmk_gpio_irq_handler(desc, status);
-}
-
/* I/O Functions */
static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
@@ -1103,8 +1084,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
+ struct gpio_irq_chip *girq;
struct irq_chip *irqchip;
- int latent_irq;
bool supports_sleepmode;
int irq;
int ret;
@@ -1125,15 +1106,10 @@ static int nmk_gpio_probe(struct platform_device *dev)
if (irq < 0)
return irq;
- /* It's OK for this IRQ not to be present */
- latent_irq = platform_get_irq(dev, 1);
-
/*
* The virt address in nmk_chip->addr is in the nomadik register space,
* so we can simply convert the resource address, without remapping
*/
- nmk_chip->parent_irq = irq;
- nmk_chip->latent_parent_irq = latent_irq;
nmk_chip->sleepmode = supports_sleepmode;
spin_lock_init(&nmk_chip->lock);
@@ -1163,6 +1139,19 @@ static int nmk_gpio_probe(struct platform_device *dev)
chip->base,
chip->base + chip->ngpio - 1);
+ girq = &chip->irq;
+ girq->chip = irqchip;
+ girq->parent_handler = nmk_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&dev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
clk_enable(nmk_chip->clk);
nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
clk_disable(nmk_chip->clk);
@@ -1174,33 +1163,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
platform_set_drvdata(dev, nmk_chip);
- /*
- * Let the generic code handle this edge IRQ, the the chained
- * handler will perform the actual work of handling the parent
- * interrupt.
- */
- ret = gpiochip_irqchip_add(chip,
- irqchip,
- 0,
- handle_edge_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&dev->dev, "could not add irqchip\n");
- gpiochip_remove(&nmk_chip->chip);
- return -ENODEV;
- }
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(chip,
- irqchip,
- nmk_chip->parent_irq,
- nmk_gpio_irq_handler);
- if (nmk_chip->latent_parent_irq > 0)
- gpiochip_set_chained_irqchip(chip,
- irqchip,
- nmk_chip->latent_parent_irq,
- nmk_gpio_latent_irq_handler);
-
- dev_info(&dev->dev, "at address %p\n", nmk_chip->addr);
+ dev_info(&dev->dev, "chip registered\n");
return 0;
}
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 17f909d8b63a..22077cbe6880 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -1954,6 +1954,22 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
int ret, id;
for (id = 0 ; id < pctrl->bank_num ; id++) {
+ struct gpio_irq_chip *girq;
+
+ girq = &pctrl->gpio_bank[id].gc.irq;
+ girq->chip = &pctrl->gpio_bank[id].irq_chip;
+ girq->parent_handler = npcmgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(pctrl->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
+ goto err_register;
+ }
+ girq->parents[0] = pctrl->gpio_bank[id].irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
ret = devm_gpiochip_add_data(pctrl->dev,
&pctrl->gpio_bank[id].gc,
&pctrl->gpio_bank[id]);
@@ -1972,22 +1988,6 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
gpiochip_remove(&pctrl->gpio_bank[id].gc);
goto err_register;
}
-
- ret = gpiochip_irqchip_add(&pctrl->gpio_bank[id].gc,
- &pctrl->gpio_bank[id].irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(pctrl->dev,
- "Failed to add IRQ chip %u\n", id);
- gpiochip_remove(&pctrl->gpio_bank[id].gc);
- goto err_register;
- }
-
- gpiochip_set_chained_irqchip(&pctrl->gpio_bank[id].gc,
- &pctrl->gpio_bank[id].irq_chip,
- pctrl->gpio_bank[id].irq,
- npcmgpio_irq_handler);
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 2c61141519f8..eab078244a4c 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -540,7 +540,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
unsigned int i, irqnr;
unsigned long flags;
- u32 *regs, regval;
+ u32 __iomem *regs;
+ u32 regval;
u64 status, mask;
/* Read the wake status */
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index e3239cf926f9..986e04ac6b5b 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -936,7 +936,6 @@ static void artpec6_pmx_reset(struct artpec6_pmx *pmx)
static int artpec6_pmx_probe(struct platform_device *pdev)
{
struct artpec6_pmx *pmx;
- struct resource *res;
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx)
@@ -944,8 +943,7 @@ static int artpec6_pmx_probe(struct platform_device *pdev)
pmx->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmx->base = devm_ioremap_resource(&pdev->dev, res);
+ pmx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmx->base))
return PTR_ERR(pmx->base);
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index d6de4d360cd4..694912409fd9 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -328,6 +328,33 @@ static int atmel_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(reg & BIT(pin->line));
}
+static int atmel_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
+ unsigned int bank;
+
+ bitmap_zero(bits, atmel_pioctrl->npins);
+
+ for (bank = 0; bank < atmel_pioctrl->nbanks; bank++) {
+ unsigned int word = bank;
+ unsigned int offset = 0;
+ unsigned int reg;
+
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ word = BIT_WORD(bank * ATMEL_PIO_NPINS_PER_BANK);
+ offset = bank * ATMEL_PIO_NPINS_PER_BANK % BITS_PER_LONG;
+#endif
+ if (!mask[word])
+ continue;
+
+ reg = atmel_gpio_read(atmel_pioctrl, bank, ATMEL_PIO_PDSR);
+ bits[word] |= mask[word] & (reg << offset);
+ }
+
+ return 0;
+}
+
static int atmel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
@@ -358,11 +385,46 @@ static void atmel_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
BIT(pin->line));
}
+static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
+ unsigned int bank;
+
+ for (bank = 0; bank < atmel_pioctrl->nbanks; bank++) {
+ unsigned int bitmask;
+ unsigned int word = bank;
+
+/*
+ * On a 64-bit platform, BITS_PER_LONG is 64 so it is necessary to iterate over
+ * two 32bit words to handle the whole bitmask
+ */
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ word = BIT_WORD(bank * ATMEL_PIO_NPINS_PER_BANK);
+#endif
+ if (!mask[word])
+ continue;
+
+ bitmask = mask[word] & bits[word];
+ atmel_gpio_write(atmel_pioctrl, bank, ATMEL_PIO_SODR, bitmask);
+
+ bitmask = mask[word] & ~bits[word];
+ atmel_gpio_write(atmel_pioctrl, bank, ATMEL_PIO_CODR, bitmask);
+
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ mask[word] >>= ATMEL_PIO_NPINS_PER_BANK;
+ bits[word] >>= ATMEL_PIO_NPINS_PER_BANK;
+#endif
+ }
+}
+
static struct gpio_chip atmel_gpio_chip = {
.direction_input = atmel_gpio_direction_input,
.get = atmel_gpio_get,
+ .get_multiple = atmel_gpio_get_multiple,
.direction_output = atmel_gpio_direction_output,
.set = atmel_gpio_set,
+ .set_multiple = atmel_gpio_set_multiple,
.to_irq = atmel_gpio_to_irq,
.base = 0,
};
@@ -955,8 +1017,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl->nbanks = atmel_pioctrl_data->nbanks;
atmel_pioctrl->npins = atmel_pioctrl->nbanks * ATMEL_PIO_NPINS_PER_BANK;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- atmel_pioctrl->reg_base = devm_ioremap_resource(dev, res);
+ atmel_pioctrl->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pioctrl->reg_base))
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d6e7e9f0ddec..207f266e9cf2 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -85,8 +85,8 @@ enum drive_strength_bit {
DRIVE_STRENGTH_SHIFT)
enum slewrate_bit {
- SLEWRATE_BIT_DIS,
SLEWRATE_BIT_ENA,
+ SLEWRATE_BIT_DIS,
};
#define SLEWRATE_BIT_MSK(name) (SLEWRATE_BIT_##name << SLEWRATE_SHIFT)
@@ -669,7 +669,7 @@ static void at91_mux_sam9x60_set_slewrate(void __iomem *pio, unsigned pin,
{
unsigned int tmp;
- if (setting < SLEWRATE_BIT_DIS || setting > SLEWRATE_BIT_ENA)
+ if (setting < SLEWRATE_BIT_ENA || setting > SLEWRATE_BIT_DIS)
return;
tmp = readl_relaxed(pio + SAM9X60_PIO_SLEWR);
@@ -1723,9 +1723,11 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
struct irq_chip *gpio_irqchip;
- int ret, i;
+ struct gpio_irq_chip *girq;
+ int i;
- gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip), GFP_KERNEL);
+ gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip),
+ GFP_KERNEL);
if (!gpio_irqchip)
return -ENOMEM;
@@ -1747,33 +1749,30 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
* handler will perform the actual work of handling the parent
* interrupt.
*/
- ret = gpiochip_irqchip_add(&at91_gpio->chip,
- gpio_irqchip,
- 0,
- handle_edge_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev, "at91_gpio.%d: Couldn't add irqchip to gpiochip.\n",
- at91_gpio->pioc_idx);
- return ret;
- }
+ girq = &at91_gpio->chip.irq;
+ girq->chip = gpio_irqchip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
- /* The top level handler handles one bank of GPIOs, except
+ /*
+ * The top level handler handles one bank of GPIOs, except
* on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
if (!gpiochip_prev) {
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(&at91_gpio->chip,
- gpio_irqchip,
- at91_gpio->pioc_virq,
- gpio_irq_handler);
+ girq->parent_handler = gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = at91_gpio->pioc_virq;
return 0;
}
prev = gpiochip_get_data(gpiochip_prev);
-
/* we can only have 2 banks before */
for (i = 0; i < 2; i++) {
if (prev->next) {
@@ -1812,7 +1811,6 @@ static const struct of_device_id at91_gpio_of_match[] = {
static int at91_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
struct at91_gpio_chip *at91_chip = NULL;
struct gpio_chip *chip;
struct pinctrl_gpio_range *range;
@@ -1840,8 +1838,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
goto err;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_chip->regbase = devm_ioremap_resource(&pdev->dev, res);
+ at91_chip->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(at91_chip->regbase)) {
ret = PTR_ERR(at91_chip->regbase);
goto err;
@@ -1903,6 +1900,10 @@ static int at91_gpio_probe(struct platform_device *pdev)
range->npins = chip->ngpio;
range->gc = chip;
+ ret = at91_gpio_of_irq_setup(pdev, at91_chip);
+ if (ret)
+ goto gpiochip_add_err;
+
ret = gpiochip_add_data(chip, at91_chip);
if (ret)
goto gpiochip_add_err;
@@ -1910,16 +1911,10 @@ static int at91_gpio_probe(struct platform_device *pdev)
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
- ret = at91_gpio_of_irq_setup(pdev, at91_chip);
- if (ret)
- goto irq_setup_err;
-
dev_info(&pdev->dev, "at address %p\n", at91_chip->regbase);
return 0;
-irq_setup_err:
- gpiochip_remove(chip);
gpiochip_add_err:
clk_enable_err:
clk_disable_unprepare(at91_chip->clock);
diff --git a/drivers/pinctrl/pinctrl-bm1880.c b/drivers/pinctrl/pinctrl-bm1880.c
index 63b130cb1ffb..f7dff4f14101 100644
--- a/drivers/pinctrl/pinctrl-bm1880.c
+++ b/drivers/pinctrl/pinctrl-bm1880.c
@@ -1308,15 +1308,13 @@ static struct pinctrl_desc bm1880_desc = {
static int bm1880_pinctrl_probe(struct platform_device *pdev)
{
- struct resource *res;
struct bm1880_pinctrl *pctrl;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 08b9e909e917..2905348ff430 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -615,7 +615,7 @@ static struct coh901_pinpair coh901_pintable[] = {
static int __init u300_gpio_probe(struct platform_device *pdev)
{
struct u300_gpio *gpio;
- struct resource *memres;
+ struct gpio_irq_chip *girq;
int err = 0;
int portno;
u32 val;
@@ -632,8 +632,7 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
gpio->chip.base = 0;
gpio->dev = &pdev->dev;
- memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base = devm_ioremap_resource(&pdev->dev, memres);
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
@@ -672,26 +671,17 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
gpio->base + U300_GPIO_CR);
u300_gpio_init_coh901571(gpio);
-#ifdef CONFIG_OF_GPIO
- gpio->chip.of_node = pdev->dev.of_node;
-#endif
- err = gpiochip_add_data(&gpio->chip, gpio);
- if (err) {
- dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
- goto err_no_chip;
- }
-
- err = gpiochip_irqchip_add(&gpio->chip,
- &u300_gpio_irqchip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_EDGE_FALLING);
- if (err) {
- dev_err(gpio->dev, "no GPIO irqchip\n");
- goto err_no_irqchip;
+ girq = &gpio->chip.irq;
+ girq->chip = &u300_gpio_irqchip;
+ girq->parent_handler = u300_gpio_irq_handler;
+ girq->num_parents = U300_GPIO_NUM_PORTS;
+ girq->parents = devm_kcalloc(gpio->dev, U300_GPIO_NUM_PORTS,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ err = -ENOMEM;
+ goto err_dis_clk;
}
-
- /* Add each port with its IRQ separately */
for (portno = 0 ; portno < U300_GPIO_NUM_PORTS; portno++) {
struct u300_gpio_port *port = &gpio->ports[portno];
@@ -700,16 +690,21 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
port->gpio = gpio;
port->irq = platform_get_irq(pdev, portno);
-
- gpiochip_set_chained_irqchip(&gpio->chip,
- &u300_gpio_irqchip,
- port->irq,
- u300_gpio_irq_handler);
+ girq->parents[portno] = port->irq;
/* Turns off irq force (test register) for this port */
writel(0x0, gpio->base + portno * gpio->stride + ifr);
}
- dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
+ girq->default_type = IRQ_TYPE_EDGE_FALLING;
+ girq->handler = handle_simple_irq;
+#ifdef CONFIG_OF_GPIO
+ gpio->chip.of_node = pdev->dev.of_node;
+#endif
+ err = gpiochip_add_data(&gpio->chip, gpio);
+ if (err) {
+ dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
+ goto err_dis_clk;
+ }
/*
* Add pinctrl pin ranges, the pin controller must be registered
@@ -729,9 +724,8 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
return 0;
err_no_range:
-err_no_irqchip:
gpiochip_remove(&gpio->chip);
-err_no_chip:
+err_dis_clk:
clk_disable_unprepare(gpio->clk);
dev_err(&pdev->dev, "module ERROR:%d\n", err);
return err;
diff --git a/drivers/pinctrl/pinctrl-da850-pupd.c b/drivers/pinctrl/pinctrl-da850-pupd.c
index d06f13a79740..5a0a1f20c843 100644
--- a/drivers/pinctrl/pinctrl-da850-pupd.c
+++ b/drivers/pinctrl/pinctrl-da850-pupd.c
@@ -146,14 +146,12 @@ static int da850_pupd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct da850_pupd_data *data;
- struct resource *res;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base)) {
dev_err(dev, "Could not map resource\n");
return PTR_ERR(data->base);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 7e1ceee5895b..ff702cfbaa28 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -270,7 +270,6 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
static int dc_pinctrl_probe(struct platform_device *pdev)
{
struct dc_pinmap *pmap;
- struct resource *r;
struct pinctrl_pin_desc *pins;
struct pinctrl_desc *pctl_desc;
char *pin_names;
@@ -281,8 +280,7 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
if (!pmap)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmap->regs = devm_ioremap_resource(&pdev->dev, r);
+ pmap->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmap->regs))
return PTR_ERR(pmap->regs);
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
new file mode 100644
index 000000000000..067271b7d35a
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -0,0 +1,945 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 Intel Corporation */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinmux.h"
+#include "pinctrl-equilibrium.h"
+
+#define PIN_NAME_FMT "io-%d"
+#define PIN_NAME_LEN 10
+#define PAD_REG_OFF 0x100
+
+static void eqbr_gpio_disable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNENCLR);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_enable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ gc->direction_input(gc, offset);
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNRNSET);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_ack_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNCR);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_mask_ack_irq(struct irq_data *d)
+{
+ eqbr_gpio_disable_irq(d);
+ eqbr_gpio_ack_irq(d);
+}
+
+static inline void eqbr_cfg_bit(void __iomem *addr,
+ unsigned int offset, unsigned int set)
+{
+ if (set)
+ writel(readl(addr) | BIT(offset), addr);
+ else
+ writel(readl(addr) & ~BIT(offset), addr);
+}
+
+static int eqbr_irq_type_cfg(struct gpio_irq_type *type,
+ struct eqbr_gpio_ctrl *gctrl,
+ unsigned int offset)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ eqbr_cfg_bit(gctrl->membase + GPIO_IRNCFG, offset, type->trig_type);
+ eqbr_cfg_bit(gctrl->membase + GPIO_EXINTCR1, offset, type->trig_type);
+ eqbr_cfg_bit(gctrl->membase + GPIO_EXINTCR0, offset, type->logic_type);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+
+ return 0;
+}
+
+static int eqbr_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ struct gpio_irq_type it;
+
+ memset(&it, 0, sizeof(it));
+
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE)
+ return 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_NEGATIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_BOTH_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ it.trig_type = GPIO_LEVEL_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ it.trig_type = GPIO_LEVEL_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_NEGATIVE_TRIG;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ eqbr_irq_type_cfg(&it, gctrl, offset);
+ if (it.trig_type == GPIO_EDGE_TRIG)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
+ return 0;
+}
+
+static void eqbr_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ struct irq_chip *ic = irq_desc_get_chip(desc);
+ unsigned long pins, offset;
+
+ chained_irq_enter(ic, desc);
+ pins = readl(gctrl->membase + GPIO_IRNCR);
+
+ for_each_set_bit(offset, &pins, gc->ngpio)
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+
+ chained_irq_exit(ic, desc);
+}
+
+static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl)
+{
+ struct gpio_irq_chip *girq;
+ struct gpio_chip *gc;
+
+ gc = &gctrl->chip;
+ gc->label = gctrl->name;
+#if defined(CONFIG_OF_GPIO)
+ gc->of_node = gctrl->node;
+#endif
+
+ if (!of_property_read_bool(gctrl->node, "interrupt-controller")) {
+ dev_dbg(dev, "gc %s: doesn't act as interrupt controller!\n",
+ gctrl->name);
+ return 0;
+ }
+
+ gctrl->ic.name = "gpio_irq";
+ gctrl->ic.irq_mask = eqbr_gpio_disable_irq;
+ gctrl->ic.irq_unmask = eqbr_gpio_enable_irq;
+ gctrl->ic.irq_ack = eqbr_gpio_ack_irq;
+ gctrl->ic.irq_mask_ack = eqbr_gpio_mask_ack_irq;
+ gctrl->ic.irq_set_type = eqbr_gpio_set_irq_type;
+
+ girq = &gctrl->chip.irq;
+ girq->chip = &gctrl->ic;
+ girq->parent_handler = eqbr_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parents[0] = gctrl->virq;
+
+ return 0;
+}
+
+static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct eqbr_gpio_ctrl *gctrl;
+ struct device_node *np;
+ struct resource res;
+ int i, ret;
+
+ for (i = 0; i < drvdata->nr_gpio_ctrls; i++) {
+ gctrl = drvdata->gpio_ctrls + i;
+ np = gctrl->node;
+
+ gctrl->name = devm_kasprintf(dev, GFP_KERNEL, "gpiochip%d", i);
+ if (!gctrl->name)
+ return -ENOMEM;
+
+ if (of_address_to_resource(np, 0, &res)) {
+ dev_err(dev, "Failed to get GPIO register address\n");
+ return -ENXIO;
+ }
+
+ gctrl->membase = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(gctrl->membase))
+ return PTR_ERR(gctrl->membase);
+
+ gctrl->virq = irq_of_parse_and_map(np, 0);
+ if (!gctrl->virq) {
+ dev_err(dev, "%s: failed to parse and map irq\n",
+ gctrl->name);
+ return -ENXIO;
+ }
+ raw_spin_lock_init(&gctrl->lock);
+
+ ret = bgpio_init(&gctrl->chip, dev, gctrl->bank->nr_pins / 8,
+ gctrl->membase + GPIO_IN,
+ gctrl->membase + GPIO_OUTSET,
+ gctrl->membase + GPIO_OUTCLR,
+ gctrl->membase + GPIO_DIR,
+ NULL, 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+
+ ret = gpiochip_setup(dev, gctrl);
+ if (ret)
+ return ret;
+
+ ret = devm_gpiochip_add_data(dev, &gctrl->chip, gctrl);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct eqbr_pin_bank
+*find_pinbank_via_pin(struct eqbr_pinctrl_drv_data *pctl, unsigned int pin)
+{
+ struct eqbr_pin_bank *bank;
+ int i;
+
+ for (i = 0; i < pctl->nr_banks; i++) {
+ bank = &pctl->pin_banks[i];
+ if (pin >= bank->pin_base &&
+ (pin - bank->pin_base) < bank->nr_pins)
+ return bank;
+ }
+
+ return NULL;
+}
+
+static const struct pinctrl_ops eqbr_pctl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int eqbr_set_pin_mux(struct eqbr_pinctrl_drv_data *pctl,
+ unsigned int pmx, unsigned int pin)
+{
+ struct eqbr_pin_bank *bank;
+ unsigned long flags;
+ unsigned int offset;
+ void __iomem *mem;
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev, "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ if (!(bank->aval_pinmap & BIT(offset))) {
+ dev_err(pctl->dev,
+ "PIN: %u is not valid, pinbase: %u, bitmap: %u\n",
+ pin, bank->pin_base, bank->aval_pinmap);
+ return -ENODEV;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ writel(pmx, mem + (offset * 4));
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return 0;
+}
+
+static int eqbr_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ struct group_desc *grp;
+ unsigned int *pinmux;
+ int i;
+
+ func = pinmux_generic_get_function(pctldev, selector);
+ if (!func)
+ return -EINVAL;
+
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return -EINVAL;
+
+ pinmux = grp->data;
+ for (i = 0; i < grp->num_pins; i++)
+ eqbr_set_pin_mux(pctl, pinmux[i], grp->pins[i]);
+
+ return 0;
+}
+
+static int eqbr_pinmux_gpio_request(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return eqbr_set_pin_mux(pctl, EQBR_GPIO_MODE, pin);
+}
+
+static const struct pinmux_ops eqbr_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = eqbr_pinmux_set_mux,
+ .gpio_request_enable = eqbr_pinmux_gpio_request,
+ .strict = true,
+};
+
+static int get_drv_cur(void __iomem *mem, unsigned int offset)
+{
+ unsigned int idx = offset / DRV_CUR_PINS; /* 0-15, 16-31 per register*/
+ unsigned int pin_offset = offset % DRV_CUR_PINS;
+
+ return PARSE_DRV_CURRENT(readl(mem + REG_DRCC(idx)), pin_offset);
+}
+
+static struct eqbr_gpio_ctrl
+*get_gpio_ctrls_via_bank(struct eqbr_pinctrl_drv_data *pctl,
+ struct eqbr_pin_bank *bank)
+{
+ int i;
+
+ for (i = 0; i < pctl->nr_gpio_ctrls; i++) {
+ if (pctl->gpio_ctrls[i].bank == bank)
+ return &pctl->gpio_ctrls[i];
+ }
+
+ return NULL;
+}
+
+static int eqbr_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct eqbr_gpio_ctrl *gctrl;
+ struct eqbr_pin_bank *bank;
+ unsigned long flags;
+ unsigned int offset;
+ void __iomem *mem;
+ u32 val;
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev, "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ if (!(bank->aval_pinmap & BIT(offset))) {
+ dev_err(pctl->dev,
+ "PIN: %u is not valid, pinbase: %u, bitmap: %u\n",
+ pin, bank->pin_base, bank->aval_pinmap);
+ return -ENODEV;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = !!(readl(mem + REG_PUEN) & BIT(offset));
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = !!(readl(mem + REG_PDEN) & BIT(offset));
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ val = !!(readl(mem + REG_OD) & BIT(offset));
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ val = get_drv_cur(mem, offset);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ val = !!(readl(mem + REG_SRC) & BIT(offset));
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ gctrl = get_gpio_ctrls_via_bank(pctl, bank);
+ if (!gctrl) {
+ dev_err(pctl->dev, "Failed to find gpio via bank pinbase: %u, pin: %u\n",
+ bank->pin_base, pin);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return -ENODEV;
+ }
+ val = !!(readl(gctrl->membase + GPIO_DIR) & BIT(offset));
+ break;
+ default:
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return -ENOTSUPP;
+ }
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ *config = pinconf_to_config_packed(param, val);
+;
+ return 0;
+}
+
+static int eqbr_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct eqbr_gpio_ctrl *gctrl;
+ enum pin_config_param param;
+ struct eqbr_pin_bank *bank;
+ unsigned int val, offset;
+ struct gpio_chip *gc;
+ unsigned long flags;
+ void __iomem *mem;
+ u32 regval, mask;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ val = pinconf_to_config_argument(configs[i]);
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev,
+ "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mem += REG_PUEN;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mem += REG_PDEN;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ mem += REG_OD;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mem += REG_DRCC(offset / DRV_CUR_PINS);
+ offset = (offset % DRV_CUR_PINS) * 2;
+ mask = GENMASK(1, 0) << offset;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ mem += REG_SRC;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ gctrl = get_gpio_ctrls_via_bank(pctl, bank);
+ if (!gctrl) {
+ dev_err(pctl->dev, "Failed to find gpio via bank pinbase: %u, pin: %u\n",
+ bank->pin_base, pin);
+ return -ENODEV;
+ }
+ gc = &gctrl->chip;
+ gc->direction_output(gc, offset, 0);
+ continue;
+ default:
+ return -ENOTSUPP;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ regval = readl(mem);
+ regval = (regval & ~mask) | ((val << offset) & mask);
+ writel(regval, mem);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ }
+
+ return 0;
+}
+
+static int eqbr_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *config)
+{
+ unsigned int i, npins, old = 0;
+ const unsigned int *pins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ if (eqbr_pinconf_get(pctldev, pins[i], config))
+ return -ENOTSUPP;
+
+ if (i && old != *config)
+ return -ENOTSUPP;
+
+ old = *config;
+ }
+ return 0;
+}
+
+static int eqbr_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = eqbr_pinconf_set(pctldev, pins[i], configs, num_configs);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static const struct pinconf_ops eqbr_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = eqbr_pinconf_get,
+ .pin_config_set = eqbr_pinconf_set,
+ .pin_config_group_get = eqbr_pinconf_group_get,
+ .pin_config_group_set = eqbr_pinconf_group_set,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static bool is_func_exist(struct eqbr_pmx_func *funcs, const char *name,
+ unsigned int nr_funcs, unsigned int *idx)
+{
+ int i;
+
+ if (!funcs)
+ return false;
+
+ for (i = 0; i < nr_funcs; i++) {
+ if (funcs[i].name && !strcmp(funcs[i].name, name)) {
+ *idx = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int funcs_utils(struct device *dev, struct eqbr_pmx_func *funcs,
+ unsigned int *nr_funcs, funcs_util_ops op)
+{
+ struct device_node *node = dev->of_node;
+ struct device_node *np;
+ struct property *prop;
+ const char *fn_name;
+ unsigned int fid;
+ int i, j;
+
+ i = 0;
+ for_each_child_of_node(node, np) {
+ prop = of_find_property(np, "groups", NULL);
+ if (!prop)
+ continue;
+
+ if (of_property_read_string(np, "function", &fn_name)) {
+ /* some groups may not have function, it's OK */
+ dev_dbg(dev, "Group %s: not function binded!\n",
+ (char *)prop->value);
+ continue;
+ }
+
+ switch (op) {
+ case OP_COUNT_NR_FUNCS:
+ if (!is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ *nr_funcs = *nr_funcs + 1;
+ break;
+
+ case OP_ADD_FUNCS:
+ if (!is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ funcs[i].name = fn_name;
+ break;
+
+ case OP_COUNT_NR_FUNC_GRPS:
+ if (is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ funcs[fid].nr_groups++;
+ break;
+
+ case OP_ADD_FUNC_GRPS:
+ if (is_func_exist(funcs, fn_name, *nr_funcs, &fid)) {
+ for (j = 0; j < funcs[fid].nr_groups; j++)
+ if (!funcs[fid].groups[j])
+ break;
+ funcs[fid].groups[j] = prop->value;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static int eqbr_build_functions(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct eqbr_pmx_func *funcs = NULL;
+ unsigned int nr_funcs = 0;
+ int i, ret;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_COUNT_NR_FUNCS);
+ if (ret)
+ return ret;
+
+ funcs = devm_kcalloc(dev, nr_funcs, sizeof(*funcs), GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_ADD_FUNCS);
+ if (ret)
+ return ret;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_COUNT_NR_FUNC_GRPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_funcs; i++) {
+ if (!funcs[i].nr_groups)
+ continue;
+ funcs[i].groups = devm_kcalloc(dev, funcs[i].nr_groups,
+ sizeof(*(funcs[i].groups)),
+ GFP_KERNEL);
+ if (!funcs[i].groups)
+ return -ENOMEM;
+ }
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_ADD_FUNC_GRPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_funcs; i++) {
+ ret = pinmux_generic_add_function(drvdata->pctl_dev,
+ funcs[i].name,
+ funcs[i].groups,
+ funcs[i].nr_groups,
+ drvdata);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register function %s\n",
+ funcs[i].name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct device_node *node = dev->of_node;
+ unsigned int *pinmux, pin_id, pinmux_id;
+ struct group_desc group;
+ struct device_node *np;
+ struct property *prop;
+ int j, err;
+
+ for_each_child_of_node(node, np) {
+ prop = of_find_property(np, "groups", NULL);
+ if (!prop)
+ continue;
+
+ group.num_pins = of_property_count_u32_elems(np, "pins");
+ if (group.num_pins < 0) {
+ dev_err(dev, "No pins in the group: %s\n", prop->name);
+ return -EINVAL;
+ }
+ group.name = prop->value;
+ group.pins = devm_kcalloc(dev, group.num_pins,
+ sizeof(*(group.pins)), GFP_KERNEL);
+ if (!group.pins)
+ return -ENOMEM;
+
+ pinmux = devm_kcalloc(dev, group.num_pins, sizeof(*pinmux),
+ GFP_KERNEL);
+ if (!pinmux)
+ return -ENOMEM;
+
+ for (j = 0; j < group.num_pins; j++) {
+ if (of_property_read_u32_index(np, "pins", j, &pin_id)) {
+ dev_err(dev, "Group %s: Read intel pins id failed\n",
+ group.name);
+ return -EINVAL;
+ }
+ if (pin_id >= drvdata->pctl_desc.npins) {
+ dev_err(dev, "Group %s: Invalid pin ID, idx: %d, pin %u\n",
+ group.name, j, pin_id);
+ return -EINVAL;
+ }
+ group.pins[j] = pin_id;
+ if (of_property_read_u32_index(np, "pinmux", j, &pinmux_id)) {
+ dev_err(dev, "Group %s: Read intel pinmux id failed\n",
+ group.name);
+ return -EINVAL;
+ }
+ pinmux[j] = pinmux_id;
+ }
+
+ err = pinctrl_generic_add_group(drvdata->pctl_dev, group.name,
+ group.pins, group.num_pins,
+ pinmux);
+ if (err < 0) {
+ dev_err(dev, "Failed to register group %s\n", group.name);
+ return err;
+ }
+ memset(&group, 0, sizeof(group));
+ pinmux = NULL;
+ }
+
+ return 0;
+}
+
+static int pinctrl_reg(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct pinctrl_desc *pctl_desc;
+ struct pinctrl_pin_desc *pdesc;
+ struct device *dev;
+ unsigned int nr_pins;
+ char *pin_names;
+ int i, ret;
+
+ dev = drvdata->dev;
+ pctl_desc = &drvdata->pctl_desc;
+ pctl_desc->name = "eqbr-pinctrl";
+ pctl_desc->owner = THIS_MODULE;
+ pctl_desc->pctlops = &eqbr_pctl_ops;
+ pctl_desc->pmxops = &eqbr_pinmux_ops;
+ pctl_desc->confops = &eqbr_pinconf_ops;
+ raw_spin_lock_init(&drvdata->lock);
+
+ for (i = 0, nr_pins = 0; i < drvdata->nr_banks; i++)
+ nr_pins += drvdata->pin_banks[i].nr_pins;
+
+ pdesc = devm_kcalloc(dev, nr_pins, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc)
+ return -ENOMEM;
+ pin_names = devm_kcalloc(dev, nr_pins, PIN_NAME_LEN, GFP_KERNEL);
+ if (!pin_names)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pins; i++) {
+ sprintf(pin_names, PIN_NAME_FMT, i);
+ pdesc[i].number = i;
+ pdesc[i].name = pin_names;
+ pin_names += PIN_NAME_LEN;
+ }
+ pctl_desc->pins = pdesc;
+ pctl_desc->npins = nr_pins;
+ dev_dbg(dev, "pinctrl total pin number: %u\n", nr_pins);
+
+ ret = devm_pinctrl_register_and_init(dev, pctl_desc, drvdata,
+ &drvdata->pctl_dev);
+ if (ret)
+ return ret;
+
+ ret = eqbr_build_groups(drvdata);
+ if (ret) {
+ dev_err(dev, "Failed to build groups\n");
+ return ret;
+ }
+
+ ret = eqbr_build_functions(drvdata);
+ if (ret) {
+ dev_err(dev, "Failed to build groups\n");
+ return ret;
+ }
+
+ return pinctrl_enable(drvdata->pctl_dev);
+}
+
+static int pinbank_init(struct device_node *np,
+ struct eqbr_pinctrl_drv_data *drvdata,
+ struct eqbr_pin_bank *bank, unsigned int id)
+{
+ struct device *dev = drvdata->dev;
+ struct of_phandle_args spec;
+ int ret;
+
+ bank->membase = drvdata->membase + id * PAD_REG_OFF;
+
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &spec);
+ if (ret) {
+ dev_err(dev, "gpio-range not available!\n");
+ return ret;
+ }
+
+ bank->pin_base = spec.args[1];
+ bank->nr_pins = spec.args[2];
+
+ bank->aval_pinmap = readl(bank->membase + REG_AVAIL);
+ bank->id = id;
+
+ dev_dbg(dev, "pinbank id: %d, reg: %px, pinbase: %u, pin number: %u, pinmap: 0x%x\n",
+ id, bank->membase, bank->pin_base,
+ bank->nr_pins, bank->aval_pinmap);
+
+ return ret;
+}
+
+static int pinbank_probe(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct device_node *np_gpio;
+ struct eqbr_gpio_ctrl *gctrls;
+ struct eqbr_pin_bank *banks;
+ int i, nr_gpio;
+
+ /* Count gpio bank number */
+ nr_gpio = 0;
+ for_each_node_by_name(np_gpio, "gpio") {
+ if (of_device_is_available(np_gpio))
+ nr_gpio++;
+ }
+
+ if (!nr_gpio) {
+ dev_err(dev, "NO pin bank available!\n");
+ return -ENODEV;
+ }
+
+ /* Count pin bank number and gpio controller number */
+ banks = devm_kcalloc(dev, nr_gpio, sizeof(*banks), GFP_KERNEL);
+ if (!banks)
+ return -ENOMEM;
+
+ gctrls = devm_kcalloc(dev, nr_gpio, sizeof(*gctrls), GFP_KERNEL);
+ if (!gctrls)
+ return -ENOMEM;
+
+ dev_dbg(dev, "found %d gpio controller!\n", nr_gpio);
+
+ /* Initialize Pin bank */
+ i = 0;
+ for_each_node_by_name(np_gpio, "gpio") {
+ if (!of_device_is_available(np_gpio))
+ continue;
+
+ pinbank_init(np_gpio, drvdata, banks + i, i);
+
+ gctrls[i].node = np_gpio;
+ gctrls[i].bank = banks + i;
+ i++;
+ }
+
+ drvdata->pin_banks = banks;
+ drvdata->nr_banks = nr_gpio;
+ drvdata->gpio_ctrls = gctrls;
+ drvdata->nr_gpio_ctrls = nr_gpio;
+
+ return 0;
+}
+
+static int eqbr_pinctrl_probe(struct platform_device *pdev)
+{
+ struct eqbr_pinctrl_drv_data *drvdata;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = dev;
+
+ drvdata->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drvdata->membase))
+ return PTR_ERR(drvdata->membase);
+
+ ret = pinbank_probe(drvdata);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_reg(drvdata);
+ if (ret)
+ return ret;
+
+ ret = gpiolib_reg(drvdata);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, drvdata);
+ return 0;
+}
+
+static const struct of_device_id eqbr_pinctrl_dt_match[] = {
+ { .compatible = "intel,lgm-io" },
+ {}
+};
+
+static struct platform_driver eqbr_pinctrl_driver = {
+ .probe = eqbr_pinctrl_probe,
+ .driver = {
+ .name = "eqbr-pinctrl",
+ .of_match_table = eqbr_pinctrl_dt_match,
+ },
+};
+
+module_platform_driver(eqbr_pinctrl_driver);
+
+MODULE_AUTHOR("Zhu Yixin <yixin.zhu@intel.com>, Rahul Tanwar <rahul.tanwar@intel.com>");
+MODULE_DESCRIPTION("Pinctrl Driver for LGM SoC (Equilibrium)");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-equilibrium.h b/drivers/pinctrl/pinctrl-equilibrium.h
new file mode 100644
index 000000000000..83cb7dafc657
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-equilibrium.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef __PINCTRL_EQUILIBRIUM_H
+#define __PINCTRL_EQUILIBRIUM_H
+
+/* PINPAD register offset */
+#define REG_PMX_BASE 0x0 /* Port Multiplexer Control Register */
+#define REG_PUEN 0x80 /* PULL UP Enable Register */
+#define REG_PDEN 0x84 /* PULL DOWN Enable Register */
+#define REG_SRC 0x88 /* Slew Rate Control Register */
+#define REG_DCC0 0x8C /* Drive Current Control Register 0 */
+#define REG_DCC1 0x90 /* Drive Current Control Register 1 */
+#define REG_OD 0x94 /* Open Drain Enable Register */
+#define REG_AVAIL 0x98 /* Pad Control Availability Register */
+#define DRV_CUR_PINS 16 /* Drive Current pin number per register */
+#define REG_DRCC(x) (REG_DCC0 + (x) * 4) /* Driver current macro */
+
+/* GPIO register offset */
+#define GPIO_OUT 0x0 /* Data Output Register */
+#define GPIO_IN 0x4 /* Data Input Register */
+#define GPIO_DIR 0x8 /* Direction Register */
+#define GPIO_EXINTCR0 0x18 /* External Interrupt Control Register 0 */
+#define GPIO_EXINTCR1 0x1C /* External Interrupt Control Register 1 */
+#define GPIO_IRNCR 0x20 /* IRN Capture Register */
+#define GPIO_IRNICR 0x24 /* IRN Interrupt Control Register */
+#define GPIO_IRNEN 0x28 /* IRN Interrupt Enable Register */
+#define GPIO_IRNCFG 0x2C /* IRN Interrupt Configuration Register */
+#define GPIO_IRNRNSET 0x30 /* IRN Interrupt Enable Set Register */
+#define GPIO_IRNENCLR 0x34 /* IRN Interrupt Enable Clear Register */
+#define GPIO_OUTSET 0x40 /* Output Set Register */
+#define GPIO_OUTCLR 0x44 /* Output Clear Register */
+#define GPIO_DIRSET 0x48 /* Direction Set Register */
+#define GPIO_DIRCLR 0x4C /* Direction Clear Register */
+
+/* parse given pin's driver current value */
+#define PARSE_DRV_CURRENT(val, pin) (((val) >> ((pin) * 2)) & 0x3)
+
+#define GPIO_EDGE_TRIG 0
+#define GPIO_LEVEL_TRIG 1
+#define GPIO_SINGLE_EDGE 0
+#define GPIO_BOTH_EDGE 1
+#define GPIO_POSITIVE_TRIG 0
+#define GPIO_NEGATIVE_TRIG 1
+
+#define EQBR_GPIO_MODE 0
+
+typedef enum {
+ OP_COUNT_NR_FUNCS,
+ OP_ADD_FUNCS,
+ OP_COUNT_NR_FUNC_GRPS,
+ OP_ADD_FUNC_GRPS,
+ OP_NONE,
+} funcs_util_ops;
+
+/**
+ * struct gpio_irq_type: gpio irq configuration
+ * @trig_type: level trigger or edge trigger
+ * @edge_type: sigle edge or both edge
+ * @logic_type: positive trigger or negative trigger
+ */
+struct gpio_irq_type {
+ unsigned int trig_type;
+ unsigned int edge_type;
+ unsigned int logic_type;
+};
+
+/**
+ * struct eqbr_pmx_func: represent a pin function.
+ * @name: name of the pin function, used to lookup the function.
+ * @groups: one or more names of pin groups that provide this function.
+ * @nr_groups: number of groups included in @groups.
+ */
+struct eqbr_pmx_func {
+ const char *name;
+ const char **groups;
+ unsigned int nr_groups;
+};
+
+/**
+ * struct eqbr_pin_bank: represent a pin bank.
+ * @membase: base address of the pin bank register.
+ * @id: bank id, to idenify the unique bank.
+ * @pin_base: starting pin number of the pin bank.
+ * @nr_pins: number of the pins of the pin bank.
+ * @aval_pinmap: available pin bitmap of the pin bank.
+ */
+struct eqbr_pin_bank {
+ void __iomem *membase;
+ unsigned int id;
+ unsigned int pin_base;
+ unsigned int nr_pins;
+ u32 aval_pinmap;
+};
+
+/**
+ * struct eqbr_gpio_ctrl: represent a gpio controller.
+ * @node: device node of gpio controller.
+ * @bank: pointer to corresponding pin bank.
+ * @membase: base address of the gpio controller.
+ * @chip: gpio chip.
+ * @ic: irq chip.
+ * @name: gpio chip name.
+ * @virq: irq number of the gpio chip to parent's irq domain.
+ * @lock: spin lock to protect gpio register write.
+ */
+struct eqbr_gpio_ctrl {
+ struct device_node *node;
+ struct eqbr_pin_bank *bank;
+ void __iomem *membase;
+ struct gpio_chip chip;
+ struct irq_chip ic;
+ const char *name;
+ unsigned int virq;
+ raw_spinlock_t lock; /* protect gpio register */
+};
+
+/**
+ * struct eqbr_pinctrl_drv_data:
+ * @dev: device instance representing the controller.
+ * @pctl_desc: pin controller descriptor.
+ * @pctl_dev: pin control class device
+ * @membase: base address of pin controller
+ * @pin_banks: list of pin banks of the driver.
+ * @nr_banks: number of pin banks.
+ * @gpio_ctrls: list of gpio controllers.
+ * @nr_gpio_ctrls: number of gpio controllers.
+ * @lock: protect pinctrl register write
+ */
+struct eqbr_pinctrl_drv_data {
+ struct device *dev;
+ struct pinctrl_desc pctl_desc;
+ struct pinctrl_dev *pctl_dev;
+ void __iomem *membase;
+ struct eqbr_pin_bank *pin_banks;
+ unsigned int nr_banks;
+ struct eqbr_gpio_ctrl *gpio_ctrls;
+ unsigned int nr_gpio_ctrls;
+ raw_spinlock_t lock; /* protect pinpad register */
+};
+
+#endif /* __PINCTRL_EQUILIBRIUM_H */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 6e2683016c1f..24e0e2ef47a4 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -686,6 +686,7 @@ static int jz4770_mac_rmii_pins[] = {
0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
};
static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
+static int jz4770_otg_pins[] = { 0x8a, };
static int jz4770_uart0_data_funcs[] = { 0, 0, };
static int jz4770_uart0_hwflow_funcs[] = { 0, 0, };
@@ -744,6 +745,7 @@ static int jz4770_pwm_pwm6_funcs[] = { 0, };
static int jz4770_pwm_pwm7_funcs[] = { 0, };
static int jz4770_mac_rmii_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
static int jz4770_mac_mii_funcs[] = { 0, 0, };
+static int jz4770_otg_funcs[] = { 0, };
static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
@@ -799,6 +801,7 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7),
INGENIC_PIN_GROUP("mac-rmii", jz4770_mac_rmii),
INGENIC_PIN_GROUP("mac-mii", jz4770_mac_mii),
+ INGENIC_PIN_GROUP("otg-vbus", jz4770_otg),
};
static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -841,6 +844,7 @@ static const char *jz4770_pwm5_groups[] = { "pwm5", };
static const char *jz4770_pwm6_groups[] = { "pwm6", };
static const char *jz4770_pwm7_groups[] = { "pwm7", };
static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", };
+static const char *jz4770_otg_groups[] = { "otg-vbus", };
static const struct function_desc jz4770_functions[] = {
{ "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
@@ -871,6 +875,7 @@ static const struct function_desc jz4770_functions[] = {
{ "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
{ "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
{ "mac", jz4770_mac_groups, ARRAY_SIZE(jz4770_mac_groups), },
+ { "otg", jz4770_otg_groups, ARRAY_SIZE(jz4770_otg_groups), },
};
static const struct ingenic_chip_info jz4770_chip_info = {
@@ -1801,19 +1806,30 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled);
}
+static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, bool high)
+{
+ if (jzpc->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ else
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
+}
+
static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int num_configs)
{
struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- unsigned int cfg;
+ unsigned int cfg, arg;
+ int ret;
for (cfg = 0; cfg < num_configs; cfg++) {
switch (pinconf_to_config_param(configs[cfg])) {
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_OUTPUT:
continue;
default:
return -ENOTSUPP;
@@ -1821,6 +1837,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
}
for (cfg = 0; cfg < num_configs; cfg++) {
+ arg = pinconf_to_config_argument(configs[cfg]);
+
switch (pinconf_to_config_param(configs[cfg])) {
case PIN_CONFIG_BIAS_DISABLE:
dev_dbg(jzpc->dev, "disable pull-over for pin P%c%u\n",
@@ -1844,6 +1862,14 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_bias(jzpc, pin, true);
break;
+ case PIN_CONFIG_OUTPUT:
+ ret = pinctrl_gpio_direction_output(pin);
+ if (ret)
+ return ret;
+
+ ingenic_set_output_level(jzpc, pin, arg);
+ break;
+
default:
unreachable();
}
@@ -1940,6 +1966,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
{
struct ingenic_gpio_chip *jzgc;
struct device *dev = jzpc->dev;
+ struct gpio_irq_chip *girq;
unsigned int bank;
int err;
@@ -1982,10 +2009,6 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.free = gpiochip_generic_free;
}
- err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
- if (err)
- return err;
-
jzgc->irq = irq_of_parse_and_map(node, 0);
if (!jzgc->irq)
return -EINVAL;
@@ -2000,13 +2023,22 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
- err = gpiochip_irqchip_add(&jzgc->gc, &jzgc->irq_chip, 0,
- handle_level_irq, IRQ_TYPE_NONE);
+ girq = &jzgc->gc.irq;
+ girq->chip = &jzgc->irq_chip;
+ girq->parent_handler = ingenic_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = jzgc->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
+ err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
if (err)
return err;
- gpiochip_set_chained_irqchip(&jzgc->gc, &jzgc->irq_chip,
- jzgc->irq, ingenic_gpio_irq_handler);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 06be55dab341..e4677546aec4 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -1324,15 +1324,13 @@ static int lpc18xx_create_group_func_map(struct device *dev,
static int lpc18xx_scu_probe(struct platform_device *pdev)
{
struct lpc18xx_scu_data *scu;
- struct resource *res;
int ret;
scu = devm_kzalloc(&pdev->dev, sizeof(*scu), GFP_KERNEL);
if (!scu)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scu->base = devm_ioremap_resource(&pdev->dev, res);
+ scu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(scu->base))
return PTR_ERR(scu->base);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index fb76fb2e9ea5..eb3dd0d46d6c 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -736,6 +736,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
struct ocelot_pinctrl *info)
{
struct gpio_chip *gc;
+ struct gpio_irq_chip *girq;
int ret, irq;
info->gpio_chip = ocelot_gpiolib_chip;
@@ -747,22 +748,26 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
gc->of_node = info->dev->of_node;
gc->label = "ocelot-gpio";
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
- if (ret)
- return ret;
-
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq <= 0)
return irq;
- ret = gpiochip_irqchip_add(gc, &ocelot_irqchip, 0, handle_edge_irq,
- IRQ_TYPE_NONE);
+ girq = &gc->irq;
+ girq->chip = &ocelot_irqchip;
+ girq->parent_handler = ocelot_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
return ret;
- gpiochip_set_chained_irqchip(gc, &ocelot_irqchip, irq,
- ocelot_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index 55488ca246f1..674b7b5919df 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -1196,7 +1196,7 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
struct oxnas_gpio_bank *bank;
unsigned int id, ngpios;
int irq, ret;
- struct resource *res;
+ struct gpio_irq_chip *girq;
if (of_parse_phandle_with_fixed_args(np, "gpio-ranges",
3, 0, &pinspec)) {
@@ -1219,8 +1219,7 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank = &oxnas_gpio_banks[id];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ bank->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bank->reg_base))
return PTR_ERR(bank->reg_base);
@@ -1232,6 +1231,18 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank->gpio_chip.parent = &pdev->dev;
bank->gpio_chip.of_node = np;
bank->gpio_chip.ngpio = ngpios;
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = oxnas_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
@@ -1239,18 +1250,6 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
- id, ret);
- gpiochip_remove(&bank->gpio_chip);
- return ret;
- }
-
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, oxnas_gpio_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index e7f6dd5ab578..e5d6d3f9753e 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -2202,7 +2202,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
struct pic32_gpio_bank *bank;
u32 id;
int irq, ret;
- struct resource *res;
+ struct gpio_irq_chip *girq;
if (of_property_read_u32(np, "microchip,gpio-bank", &id)) {
dev_err(&pdev->dev, "microchip,gpio-bank property not found\n");
@@ -2216,8 +2216,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
bank = &pic32_gpio_banks[id];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ bank->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bank->reg_base))
return PTR_ERR(bank->reg_base);
@@ -2240,25 +2239,23 @@ static int pic32_gpio_probe(struct platform_device *pdev)
bank->gpio_chip.parent = &pdev->dev;
bank->gpio_chip.of_node = np;
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = pic32_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->parents[0] = irq;
ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
id, ret);
return ret;
}
-
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
- id, ret);
- gpiochip_remove(&bank->gpio_chip);
- return ret;
- }
-
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, pic32_gpio_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 379e9a6a6d89..fa370c171cad 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1352,6 +1352,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
for (i = 0; i < pctl->nbanks; i++) {
char child_name[sizeof("gpioXX")];
struct device_node *child;
+ struct gpio_irq_chip *girq;
snprintf(child_name, sizeof(child_name), "gpio%d", i);
child = of_get_child_by_name(node, child_name);
@@ -1383,23 +1384,28 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
bank->gpio_chip.parent = pctl->dev;
bank->gpio_chip.of_node = child;
- ret = gpiochip_add_data(&bank->gpio_chip, bank);
- if (ret < 0) {
- dev_err(pctl->dev, "Failed to add GPIO chip %u: %d\n",
- i, ret);
+
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = pistachio_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(pctl->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
goto err;
}
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
- dev_err(pctl->dev, "Failed to add IRQ chip %u: %d\n",
+ dev_err(pctl->dev, "Failed to add GPIO chip %u: %d\n",
i, ret);
- gpiochip_remove(&bank->gpio_chip);
goto err;
}
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, pistachio_gpio_irq_handler);
ret = gpiochip_add_pin_range(&bank->gpio_chip,
dev_name(pctl->dev), 0,
@@ -1429,7 +1435,6 @@ static const struct of_device_id pistachio_pinctrl_of_match[] = {
static int pistachio_pinctrl_probe(struct platform_device *pdev)
{
struct pistachio_pinctrl *pctl;
- struct resource *res;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
@@ -1437,8 +1442,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
pctl->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, pctl);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->base))
return PTR_ERR(pctl->base);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index dc0bbf198cbc..fc9a2a9959d9 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -58,6 +58,7 @@ enum rockchip_pinctrl_type {
RK3128,
RK3188,
RK3288,
+ RK3308,
RK3368,
RK3399,
};
@@ -70,6 +71,7 @@ enum rockchip_pinctrl_type {
#define IOMUX_SOURCE_PMU BIT(2)
#define IOMUX_UNROUTED BIT(3)
#define IOMUX_WIDTH_3BIT BIT(4)
+#define IOMUX_WIDTH_2BIT BIT(5)
/**
* @type: iomux variant using IOMUX_* constants
@@ -656,6 +658,100 @@ static struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
},
};
+static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
+ {
+ .num = 1,
+ .pin = 14,
+ .reg = 0x28,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 15,
+ .reg = 0x2c,
+ .bit = 0,
+ .mask = 0x3
+ }, {
+ .num = 1,
+ .pin = 18,
+ .reg = 0x30,
+ .bit = 4,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 19,
+ .reg = 0x30,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 20,
+ .reg = 0x30,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 21,
+ .reg = 0x34,
+ .bit = 0,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 22,
+ .reg = 0x34,
+ .bit = 4,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 23,
+ .reg = 0x34,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 3,
+ .pin = 12,
+ .reg = 0x68,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 3,
+ .pin = 13,
+ .reg = 0x68,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 2,
+ .pin = 2,
+ .reg = 0x608,
+ .bit = 0,
+ .mask = 0x7
+ }, {
+ .num = 2,
+ .pin = 3,
+ .reg = 0x608,
+ .bit = 4,
+ .mask = 0x7
+ }, {
+ .num = 2,
+ .pin = 16,
+ .reg = 0x610,
+ .bit = 8,
+ .mask = 0x7
+ }, {
+ .num = 3,
+ .pin = 10,
+ .reg = 0x610,
+ .bit = 0,
+ .mask = 0x7
+ }, {
+ .num = 3,
+ .pin = 11,
+ .reg = 0x610,
+ .bit = 4,
+ .mask = 0x7
+ },
+};
+
static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
{
.num = 2,
@@ -982,6 +1078,192 @@ static struct rockchip_mux_route_data rk3288_mux_route_data[] = {
},
};
+static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
+ {
+ /* rtc_clk */
+ .bank_num = 0,
+ .pin = 19,
+ .func = 1,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 0) | BIT(0),
+ }, {
+ /* uart2_rxm0 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 2) | BIT(16 + 3),
+ }, {
+ /* uart2_rxm1 */
+ .bank_num = 4,
+ .pin = 26,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 2) | BIT(16 + 3) | BIT(2),
+ }, {
+ /* i2c3_sdam0 */
+ .bank_num = 0,
+ .pin = 15,
+ .func = 2,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9),
+ }, {
+ /* i2c3_sdam1 */
+ .bank_num = 3,
+ .pin = 12,
+ .func = 2,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(8),
+ }, {
+ /* i2c3_sdam2 */
+ .bank_num = 2,
+ .pin = 0,
+ .func = 3,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(9),
+ }, {
+ /* i2s-8ch-1-sclktxm0 */
+ .bank_num = 1,
+ .pin = 3,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* i2s-8ch-1-sclkrxm0 */
+ .bank_num = 1,
+ .pin = 4,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* i2s-8ch-1-sclktxm1 */
+ .bank_num = 1,
+ .pin = 13,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* i2s-8ch-1-sclkrxm1 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* pdm-clkm0 */
+ .bank_num = 1,
+ .pin = 4,
+ .func = 3,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13),
+ }, {
+ /* pdm-clkm1 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 4,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
+ }, {
+ /* pdm-clkm2 */
+ .bank_num = 2,
+ .pin = 6,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
+ }, {
+ /* pdm-clkm-m2 */
+ .bank_num = 2,
+ .pin = 4,
+ .func = 3,
+ .route_offset = 0x600,
+ .route_val = BIT(16 + 2) | BIT(2),
+ }, {
+ /* spi1_miso */
+ .bank_num = 3,
+ .pin = 10,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 9),
+ }, {
+ /* spi1_miso_m1 */
+ .bank_num = 2,
+ .pin = 4,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 9) | BIT(9),
+ }, {
+ /* owire_m0 */
+ .bank_num = 0,
+ .pin = 11,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11),
+ }, {
+ /* owire_m1 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 7,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10),
+ }, {
+ /* owire_m2 */
+ .bank_num = 2,
+ .pin = 2,
+ .func = 5,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11),
+ }, {
+ /* can_rxd_m0 */
+ .bank_num = 0,
+ .pin = 11,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13),
+ }, {
+ /* can_rxd_m1 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 5,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
+ }, {
+ /* can_rxd_m2 */
+ .bank_num = 2,
+ .pin = 2,
+ .func = 4,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
+ }, {
+ /* mac_rxd0_m0 */
+ .bank_num = 1,
+ .pin = 20,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 14),
+ }, {
+ /* mac_rxd0_m1 */
+ .bank_num = 4,
+ .pin = 2,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 14) | BIT(14),
+ }, {
+ /* uart3_rx */
+ .bank_num = 3,
+ .pin = 12,
+ .func = 4,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 15),
+ }, {
+ /* uart3_rx_m1 */
+ .bank_num = 0,
+ .pin = 17,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 15) | BIT(15),
+ },
+};
+
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
{
/* uart2dbg_rxm0 */
@@ -1475,6 +1757,26 @@ static int rv1108_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3308_SCHMITT_PINS_PER_REG 8
+#define RK3308_SCHMITT_BANK_STRIDE 16
+#define RK3308_SCHMITT_GRF_OFFSET 0x1a0
+
+static int rk3308_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_SCHMITT_GRF_OFFSET;
+
+ *reg += bank->bank_num * RK3308_SCHMITT_BANK_STRIDE;
+ *reg += ((pin_num / RK3308_SCHMITT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3308_SCHMITT_PINS_PER_REG;
+
+ return 0;
+}
+
#define RK2928_PULL_OFFSET 0x118
#define RK2928_PULL_PINS_PER_REG 16
#define RK2928_PULL_BANK_STRIDE 8
@@ -1646,6 +1948,40 @@ static void rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit *= RK3288_DRV_BITS_PER_PIN;
}
+#define RK3308_PULL_OFFSET 0xa0
+
+static void rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_PULL_OFFSET;
+ *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3188_PULL_PINS_PER_REG);
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+}
+
+#define RK3308_DRV_GRF_OFFSET 0x100
+
+static void rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_DRV_GRF_OFFSET;
+ *reg += bank->bank_num * RK3288_DRV_BANK_STRIDE;
+ *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3288_DRV_PINS_PER_REG);
+ *bit *= RK3288_DRV_BITS_PER_PIN;
+}
+
#define RK3368_PULL_GRF_OFFSET 0x100
#define RK3368_PULL_PMU_OFFSET 0x10
@@ -1986,6 +2322,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
pull_type = bank->pull_type[pin_num / 8];
@@ -2030,6 +2367,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
pull_type = bank->pull_type[pin_num / 8];
@@ -2293,6 +2631,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
@@ -3303,7 +3642,8 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
* 4bit iomux'es are spread over two registers.
*/
inc = (iom->type & (IOMUX_WIDTH_4BIT |
- IOMUX_WIDTH_3BIT)) ? 8 : 4;
+ IOMUX_WIDTH_3BIT |
+ IOMUX_WIDTH_2BIT)) ? 8 : 4;
if (iom->type & IOMUX_SOURCE_PMU)
pmu_offs += inc;
else
@@ -3709,6 +4049,44 @@ static struct rockchip_pin_ctrl rk3288_pin_ctrl = {
.drv_calc_reg = rk3288_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3308_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+};
+
+static struct rockchip_pin_ctrl rk3308_pin_ctrl = {
+ .pin_banks = rk3308_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3308_pin_banks),
+ .label = "RK3308-GPIO",
+ .type = RK3308,
+ .grf_mux_offset = 0x0,
+ .iomux_recalced = rk3308_mux_recalced_data,
+ .niomux_recalced = ARRAY_SIZE(rk3308_mux_recalced_data),
+ .iomux_routes = rk3308_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3308_mux_route_data),
+ .pull_calc_reg = rk3308_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3308_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3308_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3328_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
@@ -3849,6 +4227,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3228_pin_ctrl },
{ .compatible = "rockchip,rk3288-pinctrl",
.data = &rk3288_pin_ctrl },
+ { .compatible = "rockchip,rk3308-pinctrl",
+ .data = &rk3308_pin_ctrl },
{ .compatible = "rockchip,rk3328-pinctrl",
.data = &rk3328_pin_ctrl },
{ .compatible = "rockchip,rk3368-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 017fc6b3e27e..215db220d795 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -617,12 +617,6 @@ static void rza1_pin_reset(struct rza1_port *port, unsigned int pin)
spin_unlock_irqrestore(&port->lock, irqflags);
}
-static inline int rza1_pin_get_direction(struct rza1_port *port,
- unsigned int pin)
-{
- return !!rza1_get_bit(port, RZA1_PM_REG, pin);
-}
-
/**
* rza1_pin_set_direction() - set I/O direction on a pin in port mode
*
@@ -783,7 +777,7 @@ static int rza1_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
{
struct rza1_port *port = gpiochip_get_data(chip);
- return rza1_pin_get_direction(port, gpio);
+ return !!rza1_get_bit(port, RZA1_PM_REG, gpio);
}
static int rza1_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/pinctrl-rza2.c
index 3be1d833bf25..a205964e839b 100644
--- a/drivers/pinctrl/pinctrl-rza2.c
+++ b/drivers/pinctrl/pinctrl-rza2.c
@@ -213,8 +213,8 @@ static const char * const rza2_gpio_names[] = {
"PC_0", "PC_1", "PC_2", "PC_3", "PC_4", "PC_5", "PC_6", "PC_7",
"PD_0", "PD_1", "PD_2", "PD_3", "PD_4", "PD_5", "PD_6", "PD_7",
"PE_0", "PE_1", "PE_2", "PE_3", "PE_4", "PE_5", "PE_6", "PE_7",
- "PF_0", "PF_1", "PF_2", "PF_3", "P0_4", "PF_5", "PF_6", "PF_7",
- "PG_0", "PG_1", "PG_2", "P0_3", "PG_4", "PG_5", "PG_6", "PG_7",
+ "PF_0", "PF_1", "PF_2", "PF_3", "PF_4", "PF_5", "PF_6", "PF_7",
+ "PG_0", "PG_1", "PG_2", "PG_3", "PG_4", "PG_5", "PG_6", "PG_7",
"PH_0", "PH_1", "PH_2", "PH_3", "PH_4", "PH_5", "PH_6", "PH_7",
/* port I does not exist */
"PJ_0", "PJ_1", "PJ_2", "PJ_3", "PJ_4", "PJ_5", "PJ_6", "PJ_7",
@@ -462,7 +462,6 @@ static const struct pinmux_ops rza2_pinmux_ops = {
static int rza2_pinctrl_probe(struct platform_device *pdev)
{
struct rza2_pinctrl_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -471,8 +470,7 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/pinctrl/pinctrl-rzn1.c b/drivers/pinctrl/pinctrl-rzn1.c
index 0f6f8a10a53a..39538d40dbf3 100644
--- a/drivers/pinctrl/pinctrl-rzn1.c
+++ b/drivers/pinctrl/pinctrl-rzn1.c
@@ -487,7 +487,7 @@ static int rzn1_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
{
struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- const u32 reg_drive[4] = { 4, 6, 8, 12 };
+ static const u32 reg_drive[4] = { 4, 6, 8, 12 };
u32 pull, drive, l1mux;
u32 l1, l2, arg = 0;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 00db8b9efb2c..4f39a7945d01 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1477,7 +1477,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
struct device *dev = info->dev;
int bank_num = of_alias_get_id(np, "gpio");
struct resource res, irq_res;
- int gpio_irq = 0, err;
+ int err;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
@@ -1500,12 +1500,6 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
range->pin_base = range->base = range->id * ST_GPIO_PINS_PER_BANK;
range->npins = bank->gpio_chip.ngpio;
range->gc = &bank->gpio_chip;
- err = gpiochip_add_data(&bank->gpio_chip, bank);
- if (err) {
- dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
- return err;
- }
- dev_info(dev, "%s bank added.\n", range->name);
/**
* GPIO bank can have one of the two possible types of
@@ -1527,23 +1521,40 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
*/
if (of_irq_to_resource(np, 0, &irq_res) > 0) {
- gpio_irq = irq_res.start;
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &st_gpio_irqchip,
- gpio_irq, st_gpio_irq_handler);
- }
+ struct gpio_irq_chip *girq;
+ int gpio_irq = irq_res.start;
- if (info->irqmux_base || gpio_irq > 0) {
- err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
- 0, handle_simple_irq,
- IRQ_TYPE_NONE);
- if (err) {
- gpiochip_remove(&bank->gpio_chip);
- dev_info(dev, "could not add irqchip\n");
- return err;
+ /* This is not a valid IRQ */
+ if (gpio_irq <= 0) {
+ dev_err(dev, "invalid IRQ for %pOF bank\n", np);
+ goto skip_irq;
}
- } else {
- dev_info(dev, "No IRQ support for %pOF bank\n", np);
+ /* We need to have a mux as well */
+ if (!info->irqmux_base) {
+ dev_err(dev, "no irqmux for %pOF bank\n", np);
+ goto skip_irq;
+ }
+
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &st_gpio_irqchip;
+ girq->parent_handler = st_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = gpio_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ }
+
+skip_irq:
+ err = gpiochip_add_data(&bank->gpio_chip, bank);
+ if (err) {
+ dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
+ return err;
}
+ dev_info(dev, "%s bank added.\n", range->name);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index ccdf0bb21414..16723797fa7c 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -505,6 +505,25 @@ static void stmfx_pinctrl_irq_bus_sync_unlock(struct irq_data *data)
mutex_unlock(&pctl->lock);
}
+static int stmfx_gpio_irq_request_resources(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = stmfx_gpio_direction_input(gpio_chip, data->hwirq);
+ if (ret)
+ return ret;
+
+ return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+}
+
+static void stmfx_gpio_irq_release_resources(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+
+ return gpiochip_relres_irq(gpio_chip, data->hwirq);
+}
+
static void stmfx_pinctrl_irq_toggle_trigger(struct stmfx_pinctrl *pctl,
unsigned int offset)
{
@@ -664,6 +683,8 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->irq_chip.irq_set_type = stmfx_pinctrl_irq_set_type;
pctl->irq_chip.irq_bus_lock = stmfx_pinctrl_irq_bus_lock;
pctl->irq_chip.irq_bus_sync_unlock = stmfx_pinctrl_irq_bus_sync_unlock;
+ pctl->irq_chip.irq_request_resources = stmfx_gpio_irq_request_resources;
+ pctl->irq_chip.irq_release_resources = stmfx_gpio_irq_release_resources;
ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip,
0, handle_bad_irq, IRQ_TYPE_NONE);
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 1f64e2e7efd9..ab49bd708969 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -747,7 +747,6 @@ static struct pinctrl_desc tb10x_pindesc = {
static int tb10x_pinctrl_probe(struct platform_device *pdev)
{
int ret = -EINVAL;
- struct resource *mem;
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
struct device_node *child;
@@ -768,8 +767,7 @@ static int tb10x_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, state);
mutex_init(&state->mutex);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->base = devm_ioremap_resource(dev, mem);
+ state->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->base)) {
ret = PTR_ERR(state->base);
goto fail;
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 348423bb39dd..cc306448259e 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1055,7 +1055,6 @@ static struct pinctrl_desc u300_pmx_desc = {
static int u300_pmx_probe(struct platform_device *pdev)
{
struct u300_pmx *upmx;
- struct resource *res;
/* Create state holders etc for this driver */
upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
@@ -1064,8 +1063,7 @@ static int u300_pmx_probe(struct platform_device *pdev)
upmx->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- upmx->virtbase = devm_ioremap_resource(&pdev->dev, res);
+ upmx->virtbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(upmx->virtbase))
return PTR_ERR(upmx->virtbase);
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 913d38f29b73..5e3f31b55eb7 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1705,12 +1705,10 @@ static int pinmux_xway_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct pinctrl_xway_soc *xway_soc;
- struct resource *res;
int ret, i;
/* get and remap our register range */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res);
+ xway_info.membase[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xway_info.membase[0]))
return PTR_ERR(xway_info.membase[0]);
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa25x.c b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
index 8d1247078ae5..95640698422f 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa25x.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
@@ -216,25 +216,20 @@ static int pxa25x_pinctrl_probe(struct platform_device *pdev)
void __iomem *base_af[8];
void __iomem *base_dir[4];
void __iomem *base_sleep[4];
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_af[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_af[0]))
return PTR_ERR(base_af[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[0] = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base_dir[0]))
return PTR_ERR(base_dir[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[3] = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(base_dir[3]))
return PTR_ERR(base_dir[3]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_sleep[0] = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(base_sleep[0]))
return PTR_ERR(base_sleep[0]);
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa27x.c b/drivers/pinctrl/pxa/pinctrl-pxa27x.c
index 64943e819af6..48ccfb50b23e 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa27x.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa27x.c
@@ -508,25 +508,20 @@ static int pxa27x_pinctrl_probe(struct platform_device *pdev)
void __iomem *base_af[8];
void __iomem *base_dir[4];
void __iomem *base_sleep[4];
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_af[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_af[0]))
return PTR_ERR(base_af[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[0] = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base_dir[0]))
return PTR_ERR(base_dir[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[3] = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(base_dir[3]))
return PTR_ERR(base_dir[3]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_sleep[0] = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(base_sleep[0]))
return PTR_ERR(base_sleep[0]);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 32fc2458b8eb..811af2f81c39 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -90,6 +90,16 @@ config PINCTRL_MSM8916
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_MSM8976
+ tristate "Qualcomm 8976 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8976 platform.
+ The Qualcomm MSM8956, APQ8056, APQ8076 platforms are also
+ supported by this driver.
+
config PINCTRL_MSM8994
tristate "Qualcomm 8994 pin controller driver"
depends on GPIOLIB && OF
@@ -132,32 +142,33 @@ config PINCTRL_QDF2XXX
Qualcomm Technologies QDF2xxx SOCs.
config PINCTRL_QCOM_SPMI_PMIC
- tristate "Qualcomm SPMI PMIC pin controller driver"
- depends on GPIOLIB && OF && SPMI
- select REGMAP_SPMI
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB_IRQCHIP
- select IRQ_DOMAIN_HIERARCHY
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
- which are using SPMI for communication with SoC. Example PMIC's
- devices are pm8841, pm8941 and pma8084.
+ tristate "Qualcomm SPMI PMIC pin controller driver"
+ depends on GPIOLIB && OF && SPMI
+ select REGMAP_SPMI
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SPMI for communication with SoC. Example PMIC's
+ devices are pm8841, pm8941 and pma8084.
config PINCTRL_QCOM_SSBI_PMIC
- tristate "Qualcomm SSBI PMIC pin controller driver"
- depends on GPIOLIB && OF
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select IRQ_DOMAIN_HIERARCHY
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
- which are using SSBI for communication with SoC. Example PMIC's
- devices are pm8058 and pm8921.
+ tristate "Qualcomm SSBI PMIC pin controller driver"
+ depends on GPIOLIB && OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SSBI for communication with SoC. Example PMIC's
+ devices are pm8058 and pm8921.
config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
@@ -169,30 +180,30 @@ config PINCTRL_SC7180
Technologies Inc SC7180 platform.
config PINCTRL_SDM660
- tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SDM660 platform.
+ tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDM660 platform.
config PINCTRL_SDM845
- tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
- depends on GPIOLIB && (OF || ACPI)
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SDM845 platform.
+ tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
+ depends on GPIOLIB && (OF || ACPI)
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDM845 platform.
config PINCTRL_SM8150
- tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SM8150 platform.
+ tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8150 platform.
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index f8bb0c265381..c2c2f9ad6827 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_MSM8998) += pinctrl-msm8998.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 763da0be10d6..5d6f9f61ce02 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -23,6 +23,8 @@
#include <linux/pm.h>
#include <linux/log2.h>
+#include <linux/soc/qcom/irq.h>
+
#include "../core.h"
#include "../pinconf.h"
#include "pinctrl-msm.h"
@@ -44,6 +46,7 @@
* @enabled_irqs: Bitmap of currently enabled irqs.
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
+ * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
* @soc; Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles.
*/
@@ -61,6 +64,7 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
@@ -707,6 +711,12 @@ static void msm_gpio_irq_mask(struct irq_data *d)
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_mask_parent(d);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -751,6 +761,12 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_unmask_parent(d);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -778,10 +794,35 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
static void msm_gpio_irq_enable(struct irq_data *d)
{
+ /*
+ * Clear the interrupt that may be pending before we enable
+ * the line.
+ * This is especially a problem with the GPIOs routed to the
+ * PDC. These GPIOs are direct-connect interrupts to the GIC.
+ * Disabling the interrupt line at the PDC does not prevent
+ * the interrupt from being latched at the GIC. The state at
+ * GIC needs to be cleared before enabling.
+ */
+ if (d->parent_data) {
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ irq_chip_enable_parent(d);
+ }
msm_gpio_irq_clear_unmask(d, true);
}
+static void msm_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data)
+ irq_chip_disable_parent(d);
+
+ if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ msm_gpio_irq_mask(d);
+}
+
static void msm_gpio_irq_unmask(struct irq_data *d)
{
msm_gpio_irq_clear_unmask(d, false);
@@ -795,6 +836,9 @@ static void msm_gpio_irq_ack(struct irq_data *d)
unsigned long flags;
u32 val;
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -820,6 +864,12 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_set_type_parent(d, type);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return 0;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -912,6 +962,15 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags;
+ /*
+ * While they may not wake up when the TLMM is powered off,
+ * some GPIOs would like to wakeup the system from suspend
+ * when TLMM is powered on. To allow that, enable the GPIO
+ * summary line to be wakeup capable at GIC.
+ */
+ if (d->parent_data)
+ irq_chip_set_wake_parent(d, on);
+
raw_spin_lock_irqsave(&pctrl->lock, flags);
irq_set_irq_wake(pctrl->irq, on);
@@ -990,6 +1049,30 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static int msm_gpio_wakeirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_gpio_wakeirq_map *map;
+ int i;
+
+ *parent = GPIO_NO_WAKE_IRQ;
+ *parent_type = IRQ_TYPE_EDGE_RISING;
+
+ for (i = 0; i < pctrl->soc->nwakeirq_map; i++) {
+ map = &pctrl->soc->wakeirq_map[i];
+ if (map->gpio == child) {
+ *parent = map->wakeirq;
+ break;
+ }
+ }
+
+ return 0;
+}
+
static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl)
{
if (pctrl->soc->reserved_gpios)
@@ -1002,8 +1085,10 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
{
struct gpio_chip *chip;
struct gpio_irq_chip *girq;
- int ret;
- unsigned ngpio = pctrl->soc->ngpios;
+ int i, ret;
+ unsigned gpio, ngpio = pctrl->soc->ngpios;
+ struct device_node *np;
+ bool skip;
if (WARN_ON(ngpio > MAX_NR_GPIO))
return -EINVAL;
@@ -1020,17 +1105,40 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
pctrl->irq_chip.name = "msmgpio";
pctrl->irq_chip.irq_enable = msm_gpio_irq_enable;
+ pctrl->irq_chip.irq_disable = msm_gpio_irq_disable;
pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
+ pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
+ np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
+ if (np) {
+ chip->irq.parent_domain = irq_find_matching_host(np,
+ DOMAIN_BUS_WAKEUP);
+ of_node_put(np);
+ if (!chip->irq.parent_domain)
+ return -EPROBE_DEFER;
+ chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
+
+ /*
+ * Let's skip handling the GPIOs, if the parent irqchip
+ * is handling the direct connect IRQ of the GPIO.
+ */
+ skip = irq_domain_qcom_handle_wakeup(chip->irq.parent_domain);
+ for (i = 0; skip && i < pctrl->soc->nwakeirq_map; i++) {
+ gpio = pctrl->soc->wakeirq_map[i].gpio;
+ set_bit(gpio, pctrl->skip_wake_irqs);
+ }
+ }
+
girq = &chip->irq;
girq->chip = &pctrl->irq_chip;
girq->parent_handler = msm_gpio_irq_handler;
+ girq->fwnode = pctrl->dev->fwnode;
girq->num_parents = 1;
girq->parents = devm_kcalloc(pctrl->dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
@@ -1150,8 +1258,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pctrl->regs[i]);
}
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->regs[0] = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->regs[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->regs[0]))
return PTR_ERR(pctrl->regs[0]);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 48569cda8471..9452da18a78b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -92,6 +92,16 @@ struct msm_pingroup {
};
/**
+ * struct msm_gpio_wakeirq_map - Map of GPIOs and their wakeup pins
+ * @gpio: The GPIOs that are wakeup capable
+ * @wakeirq: The interrupt at the always-on interrupt controller
+ */
+struct msm_gpio_wakeirq_map {
+ unsigned int gpio;
+ unsigned int wakeirq;
+};
+
+/**
* struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration
* @pins: An array describing all pins the pin controller affects.
* @npins: The number of entries in @pins.
@@ -101,6 +111,8 @@ struct msm_pingroup {
* @ngroups: The numbmer of entries in @groups.
* @ngpio: The number of pingroups the driver should expose as GPIOs.
* @pull_no_keeper: The SoC does not support keeper bias.
+ * @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM
+ * @nwakeirq_map: The number of entries in @wakeirq_map
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -114,6 +126,8 @@ struct msm_pinctrl_soc_data {
const char *const *tiles;
unsigned int ntiles;
const int *reserved_gpios;
+ const struct msm_gpio_wakeirq_map *wakeirq_map;
+ unsigned int nwakeirq_map;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
new file mode 100644
index 000000000000..e1259ce27396
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (c) 2016, AngeloGioacchino Del Regno <kholk11@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8976_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "SDC1_CLK"),
+ PINCTRL_PIN(146, "SDC1_CMD"),
+ PINCTRL_PIN(147, "SDC1_DATA"),
+ PINCTRL_PIN(148, "SDC1_RCLK"),
+ PINCTRL_PIN(149, "SDC2_CLK"),
+ PINCTRL_PIN(150, "SDC2_CMD"),
+ PINCTRL_PIN(151, "SDC2_DATA"),
+ PINCTRL_PIN(152, "QDSD_CLK"),
+ PINCTRL_PIN(153, "QDSD_CMD"),
+ PINCTRL_PIN(154, "QDSD_DATA0"),
+ PINCTRL_PIN(155, "QDSD_DATA1"),
+ PINCTRL_PIN(156, "QDSD_DATA2"),
+ PINCTRL_PIN(157, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+
+static const unsigned int sdc1_clk_pins[] = { 145 };
+static const unsigned int sdc1_cmd_pins[] = { 146 };
+static const unsigned int sdc1_data_pins[] = { 147 };
+static const unsigned int sdc1_rclk_pins[] = { 148 };
+static const unsigned int sdc2_clk_pins[] = { 149 };
+static const unsigned int sdc2_cmd_pins[] = { 150 };
+static const unsigned int sdc2_data_pins[] = { 151 };
+static const unsigned int qdsd_clk_pins[] = { 152 };
+static const unsigned int qdsd_cmd_pins[] = { 153 };
+static const unsigned int qdsd_data0_pins[] = { 154 };
+static const unsigned int qdsd_data1_pins[] = { 155 };
+static const unsigned int qdsd_data2_pins[] = { 156 };
+static const unsigned int qdsd_data3_pins[] = { 157 };
+
+enum msm8976_functions {
+ msm_mux_gpio,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_spi1,
+ msm_mux_smb_int,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_i2c2,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_blsp_spi3,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_blsp_i2c3,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_blsp_spi4,
+ msm_mux_cap_int,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_uart5,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_m_voc,
+ msm_mux_blsp_i2c5,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart6,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_blsp_i2c6,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_mdp_vsync,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_sec_mi2s_mclk_a,
+ msm_mux_cam_mclk,
+ msm_mux_cci0_i2c,
+ msm_mux_cci1_i2c,
+ msm_mux_blsp1_spi,
+ msm_mux_blsp3_spi,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_uim_batt,
+ msm_mux_sd_write,
+ msm_mux_uim1_data,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_reset,
+ msm_mux_uim1_present,
+ msm_mux_uim2_data,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_reset,
+ msm_mux_uim2_present,
+ msm_mux_ts_xvdd,
+ msm_mux_mipi_dsi0,
+ msm_mux_us_euro,
+ msm_mux_ts_resout,
+ msm_mux_ts_sample,
+ msm_mux_sec_mi2s_mclk_b,
+ msm_mux_pri_mi2s,
+ msm_mux_codec_reset,
+ msm_mux_cdc_pdm0,
+ msm_mux_us_emitter,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_mclk_c,
+ msm_mux_lpass_slimbus,
+ msm_mux_lpass_slimbus0,
+ msm_mux_lpass_slimbus1,
+ msm_mux_codec_int1,
+ msm_mux_codec_int2,
+ msm_mux_wcss_bt,
+ msm_mux_sdc3,
+ msm_mux_wcss_wlan2,
+ msm_mux_wcss_wlan1,
+ msm_mux_wcss_wlan0,
+ msm_mux_wcss_wlan,
+ msm_mux_wcss_fm,
+ msm_mux_key_volp,
+ msm_mux_key_snapshot,
+ msm_mux_key_focus,
+ msm_mux_key_home,
+ msm_mux_pwr_down,
+ msm_mux_dmic0_clk,
+ msm_mux_hdmi_int,
+ msm_mux_dmic0_data,
+ msm_mux_wsa_vi,
+ msm_mux_wsa_en,
+ msm_mux_blsp_spi8,
+ msm_mux_wsa_irq,
+ msm_mux_blsp_i2c8,
+ msm_mux_pa_indicator,
+ msm_mux_modem_tsync,
+ msm_mux_ssbi_wtr1,
+ msm_mux_gsm1_tx,
+ msm_mux_gsm0_tx,
+ msm_mux_sdcard_det,
+ msm_mux_sec_mi2s,
+ msm_mux_ss_switch,
+ msm_mux_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const smb_int_groups[] = {
+ "gpio1",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio105",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29", "gpio30",
+ "gpio31", "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio38",
+ "gpio116", "gpio126", "gpio128", "gpio129",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio12",
+};
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const cap_int_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio134", "gpio135", "gpio136", "gpio137",
+};
+static const char * const blsp_uart5_groups[] = {
+ "gpio134", "gpio135", "gpio136", "gpio137",
+};
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio46",
+};
+const char * const m_voc_groups[] = {
+ "gpio123", "gpio124",
+};
+static const char * const blsp_i2c5_groups[] = {
+ "gpio136", "gpio137",
+};
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio45",
+};
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio47", "gpio48", "gpio62", "gpio69", "gpio120",
+ "gpio121", "gpio130", "gpio131",
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio5",
+};
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio5",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio24", "gpio25",
+};
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio126",
+};
+static const char * const sec_mi2s_mclk_a_groups[] = {
+ "gpio62",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio26", "gpio27", "gpio28",
+};
+static const char * const cci0_i2c_groups[] = {
+ "gpio30", "gpio29",
+};
+static const char * const cci1_i2c_groups[] = {
+ "gpio104", "gpio103",
+};
+static const char * const blsp1_spi_groups[] = {
+ "gpio101",
+};
+static const char * const blsp3_spi_groups[] = {
+ "gpio106", "gpio107",
+};
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio49",
+};
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio50",
+};
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio51",
+};
+static const char * const uim_batt_groups[] = {
+ "gpio61",
+};
+static const char * const sd_write_groups[] = {
+ "gpio50",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio51",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio52",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio53",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio54",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio55",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio56",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio57",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio58",
+};
+static const char * const ts_xvdd_groups[] = {
+ "gpio60",
+};
+static const char * const mipi_dsi0_groups[] = {
+ "gpio61",
+};
+static const char * const us_euro_groups[] = {
+ "gpio63",
+};
+static const char * const ts_resout_groups[] = {
+ "gpio64",
+};
+static const char * const ts_sample_groups[] = {
+ "gpio65",
+};
+static const char * const sec_mi2s_mclk_b_groups[] = {
+ "gpio66",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio122", "gpio123", "gpio124", "gpio125", "gpio127",
+};
+static const char * const codec_reset_groups[] = {
+ "gpio67",
+};
+static const char * const cdc_pdm0_groups[] = {
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121",
+};
+static const char * const us_emitter_groups[] = {
+ "gpio68",
+};
+static const char * const pri_mi2s_mclk_b_groups[] = {
+ "gpio62",
+};
+static const char * const pri_mi2s_mclk_c_groups[] = {
+ "gpio116",
+};
+static const char * const lpass_slimbus_groups[] = {
+ "gpio117",
+};
+static const char * const lpass_slimbus0_groups[] = {
+ "gpio118",
+};
+static const char * const lpass_slimbus1_groups[] = {
+ "gpio119",
+};
+static const char * const codec_int1_groups[] = {
+ "gpio73",
+};
+static const char * const codec_int2_groups[] = {
+ "gpio74",
+};
+static const char * const wcss_bt_groups[] = {
+ "gpio39", "gpio47", "gpio88",
+};
+static const char * const sdc3_groups[] = {
+ "gpio39", "gpio40", "gpio41",
+ "gpio42", "gpio43", "gpio44",
+};
+static const char * const wcss_wlan2_groups[] = {
+ "gpio40",
+};
+static const char * const wcss_wlan1_groups[] = {
+ "gpio41",
+};
+static const char * const wcss_wlan0_groups[] = {
+ "gpio42",
+};
+static const char * const wcss_wlan_groups[] = {
+ "gpio43", "gpio44",
+};
+static const char * const wcss_fm_groups[] = {
+ "gpio45", "gpio46",
+};
+static const char * const key_volp_groups[] = {
+ "gpio85",
+};
+static const char * const key_snapshot_groups[] = {
+ "gpio86",
+};
+static const char * const key_focus_groups[] = {
+ "gpio87",
+};
+static const char * const key_home_groups[] = {
+ "gpio88",
+};
+static const char * const pwr_down_groups[] = {
+ "gpio89",
+};
+static const char * const dmic0_clk_groups[] = {
+ "gpio66",
+};
+static const char * const hdmi_int_groups[] = {
+ "gpio90",
+};
+static const char * const dmic0_data_groups[] = {
+ "gpio67",
+};
+static const char * const wsa_vi_groups[] = {
+ "gpio108", "gpio109",
+};
+static const char * const wsa_en_groups[] = {
+ "gpio96",
+};
+static const char * const blsp_spi8_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const wsa_irq_groups[] = {
+ "gpio97",
+};
+static const char * const blsp_i2c8_groups[] = {
+ "gpio18", "gpio19",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio92",
+};
+static const char * const modem_tsync_groups[] = {
+ "gpio93",
+};
+static const char * const nav_tsync_groups[] = {
+ "gpio93",
+};
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio79", "gpio94",
+};
+static const char * const gsm1_tx_groups[] = {
+ "gpio95",
+};
+static const char * const gsm0_tx_groups[] = {
+ "gpio99",
+};
+static const char * const sdcard_det_groups[] = {
+ "gpio133",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio102", "gpio105", "gpio134", "gpio135",
+};
+
+static const char * const ss_switch_groups[] = {
+ "gpio139",
+};
+
+static const struct msm_function msm8976_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(blsp_spi1),
+ FUNCTION(smb_int),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(blsp_spi3),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(blsp_spi4),
+ FUNCTION(cap_int),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_uart5),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(m_voc),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart6),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(mdp_vsync),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(sec_mi2s_mclk_a),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci0_i2c),
+ FUNCTION(cci1_i2c),
+ FUNCTION(blsp1_spi),
+ FUNCTION(blsp3_spi),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(uim_batt),
+ FUNCTION(sd_write),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim1_present),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim2_present),
+ FUNCTION(ts_xvdd),
+ FUNCTION(mipi_dsi0),
+ FUNCTION(us_euro),
+ FUNCTION(ts_resout),
+ FUNCTION(ts_sample),
+ FUNCTION(sec_mi2s_mclk_b),
+ FUNCTION(pri_mi2s),
+ FUNCTION(codec_reset),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(us_emitter),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(pri_mi2s_mclk_c),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(lpass_slimbus0),
+ FUNCTION(lpass_slimbus1),
+ FUNCTION(codec_int1),
+ FUNCTION(codec_int2),
+ FUNCTION(wcss_bt),
+ FUNCTION(sdc3),
+ FUNCTION(wcss_wlan2),
+ FUNCTION(wcss_wlan1),
+ FUNCTION(wcss_wlan0),
+ FUNCTION(wcss_wlan),
+ FUNCTION(wcss_fm),
+ FUNCTION(key_volp),
+ FUNCTION(key_snapshot),
+ FUNCTION(key_focus),
+ FUNCTION(key_home),
+ FUNCTION(pwr_down),
+ FUNCTION(dmic0_clk),
+ FUNCTION(hdmi_int),
+ FUNCTION(dmic0_data),
+ FUNCTION(wsa_vi),
+ FUNCTION(wsa_en),
+ FUNCTION(blsp_spi8),
+ FUNCTION(wsa_irq),
+ FUNCTION(blsp_i2c8),
+ FUNCTION(pa_indicator),
+ FUNCTION(modem_tsync),
+ FUNCTION(ssbi_wtr1),
+ FUNCTION(gsm1_tx),
+ FUNCTION(gsm0_tx),
+ FUNCTION(sdcard_det),
+ FUNCTION(sec_mi2s),
+ FUNCTION(ss_switch),
+};
+
+static const struct msm_pingroup msm8976_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(1, blsp_spi1, blsp_uart1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, NA, NA, NA, qdss_tracectl_b, NA, NA, NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, NA, NA, NA, qdss_traceclk_b, NA, NA, NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, blsp_spi3, NA, NA, NA, NA, qdss_tracedata_a, NA, NA, NA),
+ PINGROUP(9, blsp_spi3, NA, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ PINGROUP(10, blsp_spi3, NA, blsp_i2c3, NA, NA, qdss_tracedata_a, NA, NA, NA),
+ PINGROUP(11, blsp_spi3, NA, blsp_i2c3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, blsp_spi4, NA, gcc_gp2_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, blsp_spi4, NA, gcc_gp3_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, blsp_spi4, NA, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, blsp_spi4, NA, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, blsp_spi8, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(17, blsp_spi8, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, blsp_spi8, NA, blsp_i2c8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(19, blsp_spi8, NA, blsp_i2c8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(20, blsp_spi6, blsp_uart6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, blsp_spi6, blsp_uart6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, cam_mclk, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(27, cam_mclk, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(28, cam_mclk, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(29, cci0_i2c, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(30, cci0_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ PINGROUP(32, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(34, NA, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(35, NA, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(37, NA, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ PINGROUP(39, wcss_bt, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(40, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(41, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(42, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(43, wcss_wlan, sdc3, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ PINGROUP(44, wcss_wlan, sdc3, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, wcss_fm, NA, qdss_tracectl_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(46, wcss_fm, NA, NA, qdss_traceclk_a, NA, NA, NA, NA, NA),
+ PINGROUP(47, wcss_bt, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, wcss_bt, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, NA, NA, gcc_gp1_clk_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, NA, sd_write, gcc_gp2_clk_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, uim2_data, gcc_gp3_clk_a, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, uim2_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, sec_mi2s_mclk_a, pri_mi2s_mclk_b, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, dmic0_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, dmic0_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, pa_indicator, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, modem_tsync, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, gsm0_tx, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(100, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, blsp1_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, sec_mi2s, gcc_gp1_clk_b, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, blsp3_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, blsp3_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, wsa_vi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, wsa_vi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(116, pri_mi2s_mclk_c, cdc_pdm0, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(117, lpass_slimbus, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(118, lpass_slimbus0, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, lpass_slimbus1, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(120, cdc_pdm0, NA, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(121, cdc_pdm0, NA, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(122, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(123, pri_mi2s, m_voc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(124, pri_mi2s, m_voc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(125, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(126, pri_mi2s_mclk_a, sec_mi2s_mclk_b, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(127, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(128, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(129, qdss_tracedata_b, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(130, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(131, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(134, blsp_spi5, blsp_uart5, sec_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(135, blsp_spi5, blsp_uart5, sec_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(136, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA, NA, NA),
+ PINGROUP(137, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA, NA, NA),
+ PINGROUP(138, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(139, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(140, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(141, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(142, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(143, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x10a000, 15, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_pinctrl_soc_data msm8976_pinctrl = {
+ .pins = msm8976_pins,
+ .npins = ARRAY_SIZE(msm8976_pins),
+ .functions = msm8976_functions,
+ .nfunctions = ARRAY_SIZE(msm8976_functions),
+ .groups = msm8976_groups,
+ .ngroups = ARRAY_SIZE(msm8976_groups),
+ .ngpios = 145,
+};
+
+static int msm8976_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8976_pinctrl);
+}
+
+static const struct of_device_id msm8976_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8976-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8976_pinctrl_driver = {
+ .driver = {
+ .name = "msm8976-pinctrl",
+ .of_match_table = msm8976_pinctrl_of_match,
+ },
+ .probe = msm8976_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8976_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8976_pinctrl_driver);
+}
+arch_initcall(msm8976_pinctrl_init);
+
+static void __exit msm8976_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8976_pinctrl_driver);
+}
+module_exit(msm8976_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm msm8976 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8976_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 6399c8a2bc22..d6cfad7417b1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -77,6 +77,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
@@ -102,6 +103,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = 3, \
.drv_bit = 0, \
@@ -1087,14 +1089,14 @@ static const struct msm_pingroup sc7180_groups[] = {
[116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _),
[117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _),
[118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _),
- [119] = UFS_RESET(ufs_reset, 0x97f000),
- [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x97a000, 15, 0),
- [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x97a000, 13, 6),
- [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x97a000, 11, 3),
- [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x97a000, 9, 0),
- [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x97b000, 14, 6),
- [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x97b000, 11, 3),
- [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x97b000, 9, 0),
+ [119] = UFS_RESET(ufs_reset, 0x7f000),
+ [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x7a000, 15, 0),
+ [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x7a000, 13, 6),
+ [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x7a000, 11, 3),
+ [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x7a000, 9, 0),
+ [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x7b000, 14, 6),
+ [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x7b000, 11, 3),
+ [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0),
};
static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index ce495970459d..2834d2c1338c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/acpi.h>
@@ -1282,6 +1282,24 @@ static const int sdm845_acpi_reserved_gpios[] = {
0, 1, 2, 3, 81, 82, 83, 84, -1
};
+static const struct msm_gpio_wakeirq_map sdm845_pdc_map[] = {
+ { 1, 30 }, { 3, 31 }, { 5, 32 }, { 10, 33 }, { 11, 34 },
+ { 20, 35 }, { 22, 36 }, { 24, 37 }, { 26, 38 }, { 30, 39 },
+ { 31, 117 }, { 32, 41 }, { 34, 42 }, { 36, 43 }, { 37, 44 },
+ { 38, 45 }, { 39, 46 }, { 40, 47 }, { 41, 115 }, { 43, 49 },
+ { 44, 50 }, { 46, 51 }, { 48, 52 }, { 49, 118 }, { 52, 54 },
+ { 53, 55 }, { 54, 56 }, { 56, 57 }, { 57, 58 }, { 58, 59 },
+ { 59, 60 }, { 60, 61 }, { 61, 62 }, { 62, 63 }, { 63, 64 },
+ { 64, 65 }, { 66, 66 }, { 68, 67 }, { 71, 68 }, { 73, 69 },
+ { 77, 70 }, { 78, 71 }, { 79, 72 }, { 80, 73 }, { 84, 74 },
+ { 85, 75 }, { 86, 76 }, { 88, 77 }, { 89, 116 }, { 91, 79 },
+ { 92, 80 }, { 95, 81 }, { 96, 82 }, { 97, 83 }, { 101, 84 },
+ { 103, 85 }, { 104, 86 }, { 115, 90 }, { 116, 91 }, { 117, 92 },
+ { 118, 93 }, { 119, 94 }, { 120, 95 }, { 121, 96 }, { 122, 97 },
+ { 123, 98 }, { 124, 99 }, { 125, 100 }, { 127, 102 }, { 128, 103 },
+ { 129, 104 }, { 130, 105 }, { 132, 106 }, { 133, 107 }, { 145, 108 },
+};
+
static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.pins = sdm845_pins,
.npins = ARRAY_SIZE(sdm845_pins),
@@ -1290,6 +1308,9 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.groups = sdm845_groups,
.ngroups = ARRAY_SIZE(sdm845_groups),
.ngpios = 151,
+ .wakeirq_map = sdm845_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
+
};
static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index f1fece5b9c06..653d1095bfea 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1108,6 +1108,9 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8916-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
+ /* pm8950 has 8 GPIOs with holes on 3 */
+ { .compatible = "qcom,pm8950-gpio", .data = (void *) 8 },
+ { .compatible = "qcom,pmi8950-gpio", .data = (void *) 2 },
{ .compatible = "qcom,pm8994-gpio", .data = (void *) 22 },
{ .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8998-gpio", .data = (void *) 26 },
@@ -1121,6 +1124,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8150b-gpio", .data = (void *) 12 },
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
{ },
};
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 91407b024cf3..48602dba4967 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -915,6 +915,8 @@ static const struct of_device_id pmic_mpp_of_match[] = {
{ .compatible = "qcom,pm8841-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8916-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
+ { .compatible = "qcom,pm8950-mpp" }, /* 4 MPP's */
+ { .compatible = "qcom,pmi8950-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8994-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,spmi-mpp" }, /* Generic */
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index c1f7d0799ebe..dca86886b1f9 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -56,7 +56,6 @@
/**
* struct pm8xxx_pin_data - dynamic configuration for a pin
* @reg: address of the control register
- * @irq: IRQ from the PMIC interrupt controller
* @power_source: logical selected voltage source, mapping in static data
* is used translate to register values
* @mode: operating mode for the pin (input/output)
@@ -72,7 +71,6 @@
*/
struct pm8xxx_pin_data {
unsigned reg;
- int irq;
u8 power_source;
u8 mode;
bool open_drain;
@@ -93,9 +91,6 @@ struct pm8xxx_gpio {
struct pinctrl_desc desc;
unsigned npins;
-
- struct fwnode_handle *fwnode;
- struct irq_domain *domain;
};
static const struct pinconf_generic_params pm8xxx_gpio_bindings[] = {
@@ -491,13 +486,16 @@ static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ int ret, irq;
bool state;
- int ret;
- if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT) {
- ret = pin->output_value;
- } else if (pin->irq >= 0) {
- ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+ if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT)
+ return pin->output_value;
+
+ irq = chip->to_irq(chip, offset);
+ if (irq >= 0) {
+ ret = irq_get_irqchip_state(irq, IRQCHIP_STATE_LINE_LEVEL,
+ &state);
if (!ret)
ret = !!state;
} else
@@ -535,37 +533,6 @@ static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
}
-static int pm8xxx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
- struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
- struct irq_fwspec fwspec;
- int ret;
-
- fwspec.fwnode = pctrl->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = offset + PM8XXX_GPIO_PHYSICAL_OFFSET;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
-
- ret = irq_create_fwspec_mapping(&fwspec);
-
- /*
- * Cache the IRQ since pm8xxx_gpio_get() needs this to get determine the
- * line level.
- */
- pin->irq = ret;
-
- return ret;
-}
-
-static void pm8xxx_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
- struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
- struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
-
- pin->irq = -1;
-}
-
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
@@ -624,13 +591,11 @@ static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#endif
static const struct gpio_chip pm8xxx_gpio_template = {
- .free = pm8xxx_gpio_free,
.direction_input = pm8xxx_gpio_direction_input,
.direction_output = pm8xxx_gpio_direction_output,
.get = pm8xxx_gpio_get,
.set = pm8xxx_gpio_set,
.of_xlate = pm8xxx_gpio_of_xlate,
- .to_irq = pm8xxx_gpio_to_irq,
.dbg_show = pm8xxx_gpio_dbg_show,
.owner = THIS_MODULE,
};
@@ -712,43 +677,24 @@ static int pm8xxx_domain_translate(struct irq_domain *domain,
return 0;
}
-static int pm8xxx_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *data)
+static unsigned int pm8xxx_child_offset_to_irq(struct gpio_chip *chip,
+ unsigned int offset)
{
- struct pm8xxx_gpio *pctrl = container_of(domain->host_data,
- struct pm8xxx_gpio, chip);
- struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
- irq_hw_number_t hwirq;
- unsigned int type;
- int ret, i;
-
- ret = pm8xxx_domain_translate(domain, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
- &pm8xxx_irq_chip, pctrl, handle_level_irq,
- NULL, NULL);
+ return offset + PM8XXX_GPIO_PHYSICAL_OFFSET;
+}
- parent_fwspec.fwnode = domain->parent->fwnode;
- parent_fwspec.param_count = 2;
- parent_fwspec.param[0] = hwirq + 0xc0;
- parent_fwspec.param[1] = fwspec->param[1];
+static int pm8xxx_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int child_hwirq,
+ unsigned int child_type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type)
+{
+ *parent_hwirq = child_hwirq + 0xc0;
+ *parent_type = child_type;
- return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
- &parent_fwspec);
+ return 0;
}
-static const struct irq_domain_ops pm8xxx_domain_ops = {
- .activate = gpiochip_irq_domain_activate,
- .alloc = pm8xxx_domain_alloc,
- .deactivate = gpiochip_irq_domain_deactivate,
- .free = irq_domain_free_irqs_common,
- .translate = pm8xxx_domain_translate,
-};
-
static const struct of_device_id pm8xxx_gpio_of_match[] = {
{ .compatible = "qcom,pm8018-gpio", .data = (void *) 6 },
{ .compatible = "qcom,pm8038-gpio", .data = (void *) 12 },
@@ -765,6 +711,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
struct irq_domain *parent_domain;
struct device_node *parent_node;
struct pinctrl_pin_desc *pins;
+ struct gpio_irq_chip *girq;
struct pm8xxx_gpio *pctrl;
int ret, i;
@@ -800,7 +747,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
for (i = 0; i < pctrl->desc.npins; i++) {
pin_data[i].reg = SSBI_REG_ADDR_GPIO(i);
- pin_data[i].irq = -1;
ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
if (ret)
@@ -841,19 +787,21 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
if (!parent_domain)
return -ENXIO;
- pctrl->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
- pctrl->domain = irq_domain_create_hierarchy(parent_domain, 0,
- pctrl->chip.ngpio,
- pctrl->fwnode,
- &pm8xxx_domain_ops,
- &pctrl->chip);
- if (!pctrl->domain)
- return -ENODEV;
+ girq = &pctrl->chip.irq;
+ girq->chip = &pm8xxx_irq_chip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
+ girq->parent_domain = parent_domain;
+ girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
+ girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
+ girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(&pdev->dev, "failed register gpiochip\n");
- goto err_chip_add_data;
+ return ret;
}
/*
@@ -883,8 +831,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
unregister_gpiochip:
gpiochip_remove(&pctrl->chip);
-err_chip_add_data:
- irq_domain_remove(pctrl->domain);
return ret;
}
@@ -894,7 +840,6 @@ static int pm8xxx_gpio_remove(struct platform_device *pdev)
struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
- irq_domain_remove(pctrl->domain);
return 0;
}
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index ebc27b06718c..0599f5127b01 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -486,8 +486,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
if (match) {
irq_chip = kmemdup(match->data,
sizeof(*irq_chip), GFP_KERNEL);
- if (!irq_chip)
+ if (!irq_chip) {
+ of_node_put(np);
return -ENOMEM;
+ }
wkup_np = np;
break;
}
@@ -504,6 +506,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
bank->nr_pins, &exynos_eint_irqd_ops, bank);
if (!bank->irq_domain) {
dev_err(dev, "wkup irq domain add failed\n");
+ of_node_put(wkup_np);
return -ENXIO;
}
@@ -518,8 +521,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
weint_data = devm_kcalloc(dev,
bank->nr_pins, sizeof(*weint_data),
GFP_KERNEL);
- if (!weint_data)
+ if (!weint_data) {
+ of_node_put(wkup_np);
return -ENOMEM;
+ }
for (idx = 0; idx < bank->nr_pins; ++idx) {
irq = irq_of_parse_and_map(bank->of_node, idx);
@@ -536,10 +541,13 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
}
}
- if (!muxed_banks)
+ if (!muxed_banks) {
+ of_node_put(wkup_np);
return 0;
+ }
irq = irq_of_parse_and_map(wkup_np, 0);
+ of_node_put(wkup_np);
if (!irq) {
dev_err(dev, "irq number for muxed EINTs not found\n");
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 7e824e4d20f4..9bd0a3de101d 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -490,8 +490,10 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL);
- if (!eint_data)
+ if (!eint_data) {
+ of_node_put(eint_np);
return -ENOMEM;
+ }
eint_data->drvdata = d;
@@ -503,12 +505,14 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint_np);
return -ENXIO;
}
eint_data->parents[i] = irq;
irq_set_chained_handler_and_data(irq, handlers[i], eint_data);
}
+ of_node_put(eint_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index c399f0932af5..f97f8179f2b1 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -704,8 +704,10 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ of_node_put(eint0_np);
return -ENOMEM;
+ }
data->drvdata = d;
for (i = 0; i < NUM_EINT0_IRQ; ++i) {
@@ -714,6 +716,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint0_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint0_np);
return -ENXIO;
}
@@ -721,6 +724,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
s3c64xx_eint0_handlers[i],
data);
}
+ of_node_put(eint0_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index de0477bb469d..f26574ef234a 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -272,6 +272,7 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
&reserved_maps, num_maps);
if (ret < 0) {
samsung_dt_free_map(pctldev, *map, *num_maps);
+ of_node_put(np);
return ret;
}
}
@@ -785,8 +786,10 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
if (!of_get_child_count(cfg_np)) {
ret = samsung_pinctrl_create_function(dev, drvdata,
cfg_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
@@ -797,8 +800,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
for_each_child_of_node(cfg_np, func_np) {
ret = samsung_pinctrl_create_function(dev, drvdata,
func_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(func_np);
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 2dd716b016a3..28d66e7cb098 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -17,6 +17,7 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7745 if ARCH_R8A7745
select PINCTRL_PFC_R8A77470 if ARCH_R8A77470
select PINCTRL_PFC_R8A774A1 if ARCH_R8A774A1
+ select PINCTRL_PFC_R8A774B1 if ARCH_R8A774B1
select PINCTRL_PFC_R8A774C0 if ARCH_R8A774C0
select PINCTRL_PFC_R8A7778 if ARCH_R8A7778
select PINCTRL_PFC_R8A7779 if ARCH_R8A7779
@@ -26,7 +27,8 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
select PINCTRL_PFC_R8A7795 if ARCH_R8A7795
- select PINCTRL_PFC_R8A7796 if ARCH_R8A7796
+ select PINCTRL_PFC_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
select PINCTRL_PFC_R8A77970 if ARCH_R8A77970
select PINCTRL_PFC_R8A77980 if ARCH_R8A77980
@@ -86,6 +88,9 @@ config PINCTRL_PFC_R8A77470
config PINCTRL_PFC_R8A774A1
bool "RZ/G2M pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A774B1
+ bool "RZ/G2N pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A774C0
bool "RZ/G2E pin control support" if COMPILE_TEST
@@ -113,9 +118,12 @@ config PINCTRL_PFC_R8A7794
config PINCTRL_PFC_R8A7795
bool "R-Car H3 pin control support" if COMPILE_TEST
-config PINCTRL_PFC_R8A7796
+config PINCTRL_PFC_R8A77960
bool "R-Car M3-W pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77961
+ bool "R-Car M3-W+ pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A77965
bool "R-Car M3-N pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 8c95abcfcc00..3bc05666e1a6 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7744) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A77470) += pfc-r8a77470.o
obj-$(CONFIG_PINCTRL_PFC_R8A774A1) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A774B1) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A774C0) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
@@ -19,7 +20,8 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795.o
obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795-es1.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7796) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77960) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77961) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77965) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A77970) += pfc-r8a77970.o
obj-$(CONFIG_PINCTRL_PFC_R8A77980) += pfc-r8a77980.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index b8640ad41bef..65e52688f091 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -29,12 +29,12 @@
static int sh_pfc_map_resources(struct sh_pfc *pfc,
struct platform_device *pdev)
{
- unsigned int num_windows, num_irqs;
struct sh_pfc_window *windows;
unsigned int *irqs = NULL;
+ unsigned int num_windows;
struct resource *res;
unsigned int i;
- int irq;
+ int num_irqs;
/* Count the MEM and IRQ resources. */
for (num_windows = 0;; num_windows++) {
@@ -42,17 +42,13 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc,
if (!res)
break;
}
- for (num_irqs = 0;; num_irqs++) {
- irq = platform_get_irq(pdev, num_irqs);
- if (irq == -EPROBE_DEFER)
- return irq;
- if (irq < 0)
- break;
- }
-
if (num_windows == 0)
return -EINVAL;
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0)
+ return num_irqs;
+
/* Allocate memory windows and IRQs arrays. */
windows = devm_kcalloc(pfc->dev, num_windows, sizeof(*windows),
GFP_KERNEL);
@@ -518,6 +514,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a774a1_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A774B1
+ {
+ .compatible = "renesas,pfc-r8a774b1",
+ .data = &r8a774b1_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A774C0
{
.compatible = "renesas,pfc-r8a774c0",
@@ -579,10 +581,16 @@ static const struct of_device_id sh_pfc_of_table[] = {
},
#endif /* DEBUG */
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7796
+#ifdef CONFIG_PINCTRL_PFC_R8A77960
{
.compatible = "renesas,pfc-r8a7796",
- .data = &r8a7796_pinmux_info,
+ .data = &r8a77960_pinmux_info,
+ },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77961
+ {
+ .compatible = "renesas,pfc-r8a77961",
+ .data = &r8a77961_pinmux_info,
},
#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77965
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 95f9aae3bfba..ad05da8f6516 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -718,7 +718,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_MSEL(IP1_27_24, A20, I2C_SEL_3_0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 7df010f757b1..d3145aa135d0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -726,7 +726,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 61db7c7a35ec..a2496baca85d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7796 processor support - PFC hardware block.
+ * R8A7796 (R-Car M3-W/W+) support - PFC hardware block.
*
* Copyright (C) 2016-2019 Renesas Electronics Corp.
*
@@ -729,7 +729,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
@@ -6210,8 +6210,8 @@ const struct sh_pfc_soc_info r8a774a1_pinmux_info = {
};
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7796
-const struct sh_pfc_soc_info r8a7796_pinmux_info = {
+#ifdef CONFIG_PINCTRL_PFC_R8A77960
+const struct sh_pfc_soc_info r8a77960_pinmux_info = {
.name = "r8a77960_pfc",
.ops = &r8a7796_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
@@ -6236,3 +6236,30 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77961
+const struct sh_pfc_soc_info r8a77961_pinmux_info = {
+ .name = "r8a77961_pfc",
+ .ops = &r8a7796_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index 697c77a4ea95..8bdf33c807f6 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -732,7 +732,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
@@ -4378,355 +4378,362 @@ static const unsigned int vin5_clk_mux[] = {
VI5_CLK_MARK,
};
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(audio_clk_a_a),
- SH_PFC_PIN_GROUP(audio_clk_a_b),
- SH_PFC_PIN_GROUP(audio_clk_a_c),
- SH_PFC_PIN_GROUP(audio_clk_b_a),
- SH_PFC_PIN_GROUP(audio_clk_b_b),
- SH_PFC_PIN_GROUP(audio_clk_c_a),
- SH_PFC_PIN_GROUP(audio_clk_c_b),
- SH_PFC_PIN_GROUP(audio_clkout_a),
- SH_PFC_PIN_GROUP(audio_clkout_b),
- SH_PFC_PIN_GROUP(audio_clkout_c),
- SH_PFC_PIN_GROUP(audio_clkout_d),
- SH_PFC_PIN_GROUP(audio_clkout1_a),
- SH_PFC_PIN_GROUP(audio_clkout1_b),
- SH_PFC_PIN_GROUP(audio_clkout2_a),
- SH_PFC_PIN_GROUP(audio_clkout2_b),
- SH_PFC_PIN_GROUP(audio_clkout3_a),
- SH_PFC_PIN_GROUP(audio_clkout3_b),
- SH_PFC_PIN_GROUP(avb_link),
- SH_PFC_PIN_GROUP(avb_magic),
- SH_PFC_PIN_GROUP(avb_phy_int),
- SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
- SH_PFC_PIN_GROUP(avb_mdio),
- SH_PFC_PIN_GROUP(avb_mii),
- SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
- SH_PFC_PIN_GROUP(avb_avtp_match_b),
- SH_PFC_PIN_GROUP(avb_avtp_capture_b),
- SH_PFC_PIN_GROUP(can0_data_a),
- SH_PFC_PIN_GROUP(can0_data_b),
- SH_PFC_PIN_GROUP(can1_data),
- SH_PFC_PIN_GROUP(can_clk),
- SH_PFC_PIN_GROUP(canfd0_data_a),
- SH_PFC_PIN_GROUP(canfd0_data_b),
- SH_PFC_PIN_GROUP(canfd1_data),
- SH_PFC_PIN_GROUP(drif0_ctrl_a),
- SH_PFC_PIN_GROUP(drif0_data0_a),
- SH_PFC_PIN_GROUP(drif0_data1_a),
- SH_PFC_PIN_GROUP(drif0_ctrl_b),
- SH_PFC_PIN_GROUP(drif0_data0_b),
- SH_PFC_PIN_GROUP(drif0_data1_b),
- SH_PFC_PIN_GROUP(drif0_ctrl_c),
- SH_PFC_PIN_GROUP(drif0_data0_c),
- SH_PFC_PIN_GROUP(drif0_data1_c),
- SH_PFC_PIN_GROUP(drif1_ctrl_a),
- SH_PFC_PIN_GROUP(drif1_data0_a),
- SH_PFC_PIN_GROUP(drif1_data1_a),
- SH_PFC_PIN_GROUP(drif1_ctrl_b),
- SH_PFC_PIN_GROUP(drif1_data0_b),
- SH_PFC_PIN_GROUP(drif1_data1_b),
- SH_PFC_PIN_GROUP(drif1_ctrl_c),
- SH_PFC_PIN_GROUP(drif1_data0_c),
- SH_PFC_PIN_GROUP(drif1_data1_c),
- SH_PFC_PIN_GROUP(drif2_ctrl_a),
- SH_PFC_PIN_GROUP(drif2_data0_a),
- SH_PFC_PIN_GROUP(drif2_data1_a),
- SH_PFC_PIN_GROUP(drif2_ctrl_b),
- SH_PFC_PIN_GROUP(drif2_data0_b),
- SH_PFC_PIN_GROUP(drif2_data1_b),
- SH_PFC_PIN_GROUP(drif3_ctrl_a),
- SH_PFC_PIN_GROUP(drif3_data0_a),
- SH_PFC_PIN_GROUP(drif3_data1_a),
- SH_PFC_PIN_GROUP(drif3_ctrl_b),
- SH_PFC_PIN_GROUP(drif3_data0_b),
- SH_PFC_PIN_GROUP(drif3_data1_b),
- SH_PFC_PIN_GROUP(du_rgb666),
- SH_PFC_PIN_GROUP(du_rgb888),
- SH_PFC_PIN_GROUP(du_clk_out_0),
- SH_PFC_PIN_GROUP(du_clk_out_1),
- SH_PFC_PIN_GROUP(du_sync),
- SH_PFC_PIN_GROUP(du_oddf),
- SH_PFC_PIN_GROUP(du_cde),
- SH_PFC_PIN_GROUP(du_disp),
- SH_PFC_PIN_GROUP(hscif0_data),
- SH_PFC_PIN_GROUP(hscif0_clk),
- SH_PFC_PIN_GROUP(hscif0_ctrl),
- SH_PFC_PIN_GROUP(hscif1_data_a),
- SH_PFC_PIN_GROUP(hscif1_clk_a),
- SH_PFC_PIN_GROUP(hscif1_ctrl_a),
- SH_PFC_PIN_GROUP(hscif1_data_b),
- SH_PFC_PIN_GROUP(hscif1_clk_b),
- SH_PFC_PIN_GROUP(hscif1_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_a),
- SH_PFC_PIN_GROUP(hscif2_clk_a),
- SH_PFC_PIN_GROUP(hscif2_ctrl_a),
- SH_PFC_PIN_GROUP(hscif2_data_b),
- SH_PFC_PIN_GROUP(hscif2_clk_b),
- SH_PFC_PIN_GROUP(hscif2_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_c),
- SH_PFC_PIN_GROUP(hscif2_clk_c),
- SH_PFC_PIN_GROUP(hscif2_ctrl_c),
- SH_PFC_PIN_GROUP(hscif3_data_a),
- SH_PFC_PIN_GROUP(hscif3_clk),
- SH_PFC_PIN_GROUP(hscif3_ctrl),
- SH_PFC_PIN_GROUP(hscif3_data_b),
- SH_PFC_PIN_GROUP(hscif3_data_c),
- SH_PFC_PIN_GROUP(hscif3_data_d),
- SH_PFC_PIN_GROUP(hscif4_data_a),
- SH_PFC_PIN_GROUP(hscif4_clk),
- SH_PFC_PIN_GROUP(hscif4_ctrl),
- SH_PFC_PIN_GROUP(hscif4_data_b),
- SH_PFC_PIN_GROUP(i2c0),
- SH_PFC_PIN_GROUP(i2c1_a),
- SH_PFC_PIN_GROUP(i2c1_b),
- SH_PFC_PIN_GROUP(i2c2_a),
- SH_PFC_PIN_GROUP(i2c2_b),
- SH_PFC_PIN_GROUP(i2c3),
- SH_PFC_PIN_GROUP(i2c5),
- SH_PFC_PIN_GROUP(i2c6_a),
- SH_PFC_PIN_GROUP(i2c6_b),
- SH_PFC_PIN_GROUP(i2c6_c),
- SH_PFC_PIN_GROUP(intc_ex_irq0),
- SH_PFC_PIN_GROUP(intc_ex_irq1),
- SH_PFC_PIN_GROUP(intc_ex_irq2),
- SH_PFC_PIN_GROUP(intc_ex_irq3),
- SH_PFC_PIN_GROUP(intc_ex_irq4),
- SH_PFC_PIN_GROUP(intc_ex_irq5),
- SH_PFC_PIN_GROUP(msiof0_clk),
- SH_PFC_PIN_GROUP(msiof0_sync),
- SH_PFC_PIN_GROUP(msiof0_ss1),
- SH_PFC_PIN_GROUP(msiof0_ss2),
- SH_PFC_PIN_GROUP(msiof0_txd),
- SH_PFC_PIN_GROUP(msiof0_rxd),
- SH_PFC_PIN_GROUP(msiof1_clk_a),
- SH_PFC_PIN_GROUP(msiof1_sync_a),
- SH_PFC_PIN_GROUP(msiof1_ss1_a),
- SH_PFC_PIN_GROUP(msiof1_ss2_a),
- SH_PFC_PIN_GROUP(msiof1_txd_a),
- SH_PFC_PIN_GROUP(msiof1_rxd_a),
- SH_PFC_PIN_GROUP(msiof1_clk_b),
- SH_PFC_PIN_GROUP(msiof1_sync_b),
- SH_PFC_PIN_GROUP(msiof1_ss1_b),
- SH_PFC_PIN_GROUP(msiof1_ss2_b),
- SH_PFC_PIN_GROUP(msiof1_txd_b),
- SH_PFC_PIN_GROUP(msiof1_rxd_b),
- SH_PFC_PIN_GROUP(msiof1_clk_c),
- SH_PFC_PIN_GROUP(msiof1_sync_c),
- SH_PFC_PIN_GROUP(msiof1_ss1_c),
- SH_PFC_PIN_GROUP(msiof1_ss2_c),
- SH_PFC_PIN_GROUP(msiof1_txd_c),
- SH_PFC_PIN_GROUP(msiof1_rxd_c),
- SH_PFC_PIN_GROUP(msiof1_clk_d),
- SH_PFC_PIN_GROUP(msiof1_sync_d),
- SH_PFC_PIN_GROUP(msiof1_ss1_d),
- SH_PFC_PIN_GROUP(msiof1_ss2_d),
- SH_PFC_PIN_GROUP(msiof1_txd_d),
- SH_PFC_PIN_GROUP(msiof1_rxd_d),
- SH_PFC_PIN_GROUP(msiof1_clk_e),
- SH_PFC_PIN_GROUP(msiof1_sync_e),
- SH_PFC_PIN_GROUP(msiof1_ss1_e),
- SH_PFC_PIN_GROUP(msiof1_ss2_e),
- SH_PFC_PIN_GROUP(msiof1_txd_e),
- SH_PFC_PIN_GROUP(msiof1_rxd_e),
- SH_PFC_PIN_GROUP(msiof1_clk_f),
- SH_PFC_PIN_GROUP(msiof1_sync_f),
- SH_PFC_PIN_GROUP(msiof1_ss1_f),
- SH_PFC_PIN_GROUP(msiof1_ss2_f),
- SH_PFC_PIN_GROUP(msiof1_txd_f),
- SH_PFC_PIN_GROUP(msiof1_rxd_f),
- SH_PFC_PIN_GROUP(msiof1_clk_g),
- SH_PFC_PIN_GROUP(msiof1_sync_g),
- SH_PFC_PIN_GROUP(msiof1_ss1_g),
- SH_PFC_PIN_GROUP(msiof1_ss2_g),
- SH_PFC_PIN_GROUP(msiof1_txd_g),
- SH_PFC_PIN_GROUP(msiof1_rxd_g),
- SH_PFC_PIN_GROUP(msiof2_clk_a),
- SH_PFC_PIN_GROUP(msiof2_sync_a),
- SH_PFC_PIN_GROUP(msiof2_ss1_a),
- SH_PFC_PIN_GROUP(msiof2_ss2_a),
- SH_PFC_PIN_GROUP(msiof2_txd_a),
- SH_PFC_PIN_GROUP(msiof2_rxd_a),
- SH_PFC_PIN_GROUP(msiof2_clk_b),
- SH_PFC_PIN_GROUP(msiof2_sync_b),
- SH_PFC_PIN_GROUP(msiof2_ss1_b),
- SH_PFC_PIN_GROUP(msiof2_ss2_b),
- SH_PFC_PIN_GROUP(msiof2_txd_b),
- SH_PFC_PIN_GROUP(msiof2_rxd_b),
- SH_PFC_PIN_GROUP(msiof2_clk_c),
- SH_PFC_PIN_GROUP(msiof2_sync_c),
- SH_PFC_PIN_GROUP(msiof2_ss1_c),
- SH_PFC_PIN_GROUP(msiof2_ss2_c),
- SH_PFC_PIN_GROUP(msiof2_txd_c),
- SH_PFC_PIN_GROUP(msiof2_rxd_c),
- SH_PFC_PIN_GROUP(msiof2_clk_d),
- SH_PFC_PIN_GROUP(msiof2_sync_d),
- SH_PFC_PIN_GROUP(msiof2_ss1_d),
- SH_PFC_PIN_GROUP(msiof2_ss2_d),
- SH_PFC_PIN_GROUP(msiof2_txd_d),
- SH_PFC_PIN_GROUP(msiof2_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_a),
- SH_PFC_PIN_GROUP(msiof3_sync_a),
- SH_PFC_PIN_GROUP(msiof3_ss1_a),
- SH_PFC_PIN_GROUP(msiof3_ss2_a),
- SH_PFC_PIN_GROUP(msiof3_txd_a),
- SH_PFC_PIN_GROUP(msiof3_rxd_a),
- SH_PFC_PIN_GROUP(msiof3_clk_b),
- SH_PFC_PIN_GROUP(msiof3_sync_b),
- SH_PFC_PIN_GROUP(msiof3_ss1_b),
- SH_PFC_PIN_GROUP(msiof3_ss2_b),
- SH_PFC_PIN_GROUP(msiof3_txd_b),
- SH_PFC_PIN_GROUP(msiof3_rxd_b),
- SH_PFC_PIN_GROUP(msiof3_clk_c),
- SH_PFC_PIN_GROUP(msiof3_sync_c),
- SH_PFC_PIN_GROUP(msiof3_txd_c),
- SH_PFC_PIN_GROUP(msiof3_rxd_c),
- SH_PFC_PIN_GROUP(msiof3_clk_d),
- SH_PFC_PIN_GROUP(msiof3_sync_d),
- SH_PFC_PIN_GROUP(msiof3_ss1_d),
- SH_PFC_PIN_GROUP(msiof3_txd_d),
- SH_PFC_PIN_GROUP(msiof3_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_e),
- SH_PFC_PIN_GROUP(msiof3_sync_e),
- SH_PFC_PIN_GROUP(msiof3_ss1_e),
- SH_PFC_PIN_GROUP(msiof3_ss2_e),
- SH_PFC_PIN_GROUP(msiof3_txd_e),
- SH_PFC_PIN_GROUP(msiof3_rxd_e),
- SH_PFC_PIN_GROUP(pwm0),
- SH_PFC_PIN_GROUP(pwm1_a),
- SH_PFC_PIN_GROUP(pwm1_b),
- SH_PFC_PIN_GROUP(pwm2_a),
- SH_PFC_PIN_GROUP(pwm2_b),
- SH_PFC_PIN_GROUP(pwm3_a),
- SH_PFC_PIN_GROUP(pwm3_b),
- SH_PFC_PIN_GROUP(pwm4_a),
- SH_PFC_PIN_GROUP(pwm4_b),
- SH_PFC_PIN_GROUP(pwm5_a),
- SH_PFC_PIN_GROUP(pwm5_b),
- SH_PFC_PIN_GROUP(pwm6_a),
- SH_PFC_PIN_GROUP(pwm6_b),
- SH_PFC_PIN_GROUP(sata0_devslp_a),
- SH_PFC_PIN_GROUP(sata0_devslp_b),
- SH_PFC_PIN_GROUP(scif0_data),
- SH_PFC_PIN_GROUP(scif0_clk),
- SH_PFC_PIN_GROUP(scif0_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_a),
- SH_PFC_PIN_GROUP(scif1_clk),
- SH_PFC_PIN_GROUP(scif1_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_b),
- SH_PFC_PIN_GROUP(scif2_data_a),
- SH_PFC_PIN_GROUP(scif2_clk),
- SH_PFC_PIN_GROUP(scif2_data_b),
- SH_PFC_PIN_GROUP(scif3_data_a),
- SH_PFC_PIN_GROUP(scif3_clk),
- SH_PFC_PIN_GROUP(scif3_ctrl),
- SH_PFC_PIN_GROUP(scif3_data_b),
- SH_PFC_PIN_GROUP(scif4_data_a),
- SH_PFC_PIN_GROUP(scif4_clk_a),
- SH_PFC_PIN_GROUP(scif4_ctrl_a),
- SH_PFC_PIN_GROUP(scif4_data_b),
- SH_PFC_PIN_GROUP(scif4_clk_b),
- SH_PFC_PIN_GROUP(scif4_ctrl_b),
- SH_PFC_PIN_GROUP(scif4_data_c),
- SH_PFC_PIN_GROUP(scif4_clk_c),
- SH_PFC_PIN_GROUP(scif4_ctrl_c),
- SH_PFC_PIN_GROUP(scif5_data_a),
- SH_PFC_PIN_GROUP(scif5_clk_a),
- SH_PFC_PIN_GROUP(scif5_data_b),
- SH_PFC_PIN_GROUP(scif5_clk_b),
- SH_PFC_PIN_GROUP(scif_clk_a),
- SH_PFC_PIN_GROUP(scif_clk_b),
- SH_PFC_PIN_GROUP(sdhi0_data1),
- SH_PFC_PIN_GROUP(sdhi0_data4),
- SH_PFC_PIN_GROUP(sdhi0_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_cd),
- SH_PFC_PIN_GROUP(sdhi0_wp),
- SH_PFC_PIN_GROUP(sdhi1_data1),
- SH_PFC_PIN_GROUP(sdhi1_data4),
- SH_PFC_PIN_GROUP(sdhi1_ctrl),
- SH_PFC_PIN_GROUP(sdhi1_cd),
- SH_PFC_PIN_GROUP(sdhi1_wp),
- SH_PFC_PIN_GROUP(sdhi2_data1),
- SH_PFC_PIN_GROUP(sdhi2_data4),
- SH_PFC_PIN_GROUP(sdhi2_data8),
- SH_PFC_PIN_GROUP(sdhi2_ctrl),
- SH_PFC_PIN_GROUP(sdhi2_cd_a),
- SH_PFC_PIN_GROUP(sdhi2_wp_a),
- SH_PFC_PIN_GROUP(sdhi2_cd_b),
- SH_PFC_PIN_GROUP(sdhi2_wp_b),
- SH_PFC_PIN_GROUP(sdhi2_ds),
- SH_PFC_PIN_GROUP(sdhi3_data1),
- SH_PFC_PIN_GROUP(sdhi3_data4),
- SH_PFC_PIN_GROUP(sdhi3_data8),
- SH_PFC_PIN_GROUP(sdhi3_ctrl),
- SH_PFC_PIN_GROUP(sdhi3_cd),
- SH_PFC_PIN_GROUP(sdhi3_wp),
- SH_PFC_PIN_GROUP(sdhi3_ds),
- SH_PFC_PIN_GROUP(ssi0_data),
- SH_PFC_PIN_GROUP(ssi01239_ctrl),
- SH_PFC_PIN_GROUP(ssi1_data_a),
- SH_PFC_PIN_GROUP(ssi1_data_b),
- SH_PFC_PIN_GROUP(ssi1_ctrl_a),
- SH_PFC_PIN_GROUP(ssi1_ctrl_b),
- SH_PFC_PIN_GROUP(ssi2_data_a),
- SH_PFC_PIN_GROUP(ssi2_data_b),
- SH_PFC_PIN_GROUP(ssi2_ctrl_a),
- SH_PFC_PIN_GROUP(ssi2_ctrl_b),
- SH_PFC_PIN_GROUP(ssi3_data),
- SH_PFC_PIN_GROUP(ssi349_ctrl),
- SH_PFC_PIN_GROUP(ssi4_data),
- SH_PFC_PIN_GROUP(ssi4_ctrl),
- SH_PFC_PIN_GROUP(ssi5_data),
- SH_PFC_PIN_GROUP(ssi5_ctrl),
- SH_PFC_PIN_GROUP(ssi6_data),
- SH_PFC_PIN_GROUP(ssi6_ctrl),
- SH_PFC_PIN_GROUP(ssi7_data),
- SH_PFC_PIN_GROUP(ssi78_ctrl),
- SH_PFC_PIN_GROUP(ssi8_data),
- SH_PFC_PIN_GROUP(ssi9_data_a),
- SH_PFC_PIN_GROUP(ssi9_data_b),
- SH_PFC_PIN_GROUP(ssi9_ctrl_a),
- SH_PFC_PIN_GROUP(ssi9_ctrl_b),
- SH_PFC_PIN_GROUP(tmu_tclk1_a),
- SH_PFC_PIN_GROUP(tmu_tclk1_b),
- SH_PFC_PIN_GROUP(tmu_tclk2_a),
- SH_PFC_PIN_GROUP(tmu_tclk2_b),
- SH_PFC_PIN_GROUP(tpu_to0),
- SH_PFC_PIN_GROUP(tpu_to1),
- SH_PFC_PIN_GROUP(tpu_to2),
- SH_PFC_PIN_GROUP(tpu_to3),
- SH_PFC_PIN_GROUP(usb0),
- SH_PFC_PIN_GROUP(usb1),
- SH_PFC_PIN_GROUP(usb30),
- VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
- SH_PFC_PIN_GROUP(vin4_data18_a),
- VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
- SH_PFC_PIN_GROUP(vin4_data18_b),
- VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
- SH_PFC_PIN_GROUP(vin4_sync),
- SH_PFC_PIN_GROUP(vin4_field),
- SH_PFC_PIN_GROUP(vin4_clkenb),
- SH_PFC_PIN_GROUP(vin4_clk),
- VIN_DATA_PIN_GROUP(vin5_data, 8),
- VIN_DATA_PIN_GROUP(vin5_data, 10),
- VIN_DATA_PIN_GROUP(vin5_data, 12),
- VIN_DATA_PIN_GROUP(vin5_data, 16),
- SH_PFC_PIN_GROUP(vin5_sync),
- SH_PFC_PIN_GROUP(vin5_field),
- SH_PFC_PIN_GROUP(vin5_clkenb),
- SH_PFC_PIN_GROUP(vin5_clk),
+static const struct {
+ struct sh_pfc_pin_group common[318];
+ struct sh_pfc_pin_group automotive[30];
+} pinmux_groups = {
+ .common = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(can0_data_a),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(canfd0_data_a),
+ SH_PFC_PIN_GROUP(canfd0_data_b),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c5),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk_a),
+ SH_PFC_PIN_GROUP(msiof1_sync_a),
+ SH_PFC_PIN_GROUP(msiof1_ss1_a),
+ SH_PFC_PIN_GROUP(msiof1_ss2_a),
+ SH_PFC_PIN_GROUP(msiof1_txd_a),
+ SH_PFC_PIN_GROUP(msiof1_rxd_a),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_txd_b),
+ SH_PFC_PIN_GROUP(msiof1_rxd_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_ss1_c),
+ SH_PFC_PIN_GROUP(msiof1_ss2_c),
+ SH_PFC_PIN_GROUP(msiof1_txd_c),
+ SH_PFC_PIN_GROUP(msiof1_rxd_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_ss2_d),
+ SH_PFC_PIN_GROUP(msiof1_txd_d),
+ SH_PFC_PIN_GROUP(msiof1_rxd_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_ss1_e),
+ SH_PFC_PIN_GROUP(msiof1_ss2_e),
+ SH_PFC_PIN_GROUP(msiof1_txd_e),
+ SH_PFC_PIN_GROUP(msiof1_rxd_e),
+ SH_PFC_PIN_GROUP(msiof1_clk_f),
+ SH_PFC_PIN_GROUP(msiof1_sync_f),
+ SH_PFC_PIN_GROUP(msiof1_ss1_f),
+ SH_PFC_PIN_GROUP(msiof1_ss2_f),
+ SH_PFC_PIN_GROUP(msiof1_txd_f),
+ SH_PFC_PIN_GROUP(msiof1_rxd_f),
+ SH_PFC_PIN_GROUP(msiof1_clk_g),
+ SH_PFC_PIN_GROUP(msiof1_sync_g),
+ SH_PFC_PIN_GROUP(msiof1_ss1_g),
+ SH_PFC_PIN_GROUP(msiof1_ss2_g),
+ SH_PFC_PIN_GROUP(msiof1_txd_g),
+ SH_PFC_PIN_GROUP(msiof1_rxd_g),
+ SH_PFC_PIN_GROUP(msiof2_clk_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_ss1_a),
+ SH_PFC_PIN_GROUP(msiof2_ss2_a),
+ SH_PFC_PIN_GROUP(msiof2_txd_a),
+ SH_PFC_PIN_GROUP(msiof2_rxd_a),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_txd_b),
+ SH_PFC_PIN_GROUP(msiof2_rxd_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_ss1_c),
+ SH_PFC_PIN_GROUP(msiof2_ss2_c),
+ SH_PFC_PIN_GROUP(msiof2_txd_c),
+ SH_PFC_PIN_GROUP(msiof2_rxd_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_txd_d),
+ SH_PFC_PIN_GROUP(msiof2_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_ss2_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
+ SH_PFC_PIN_GROUP(msiof3_clk_c),
+ SH_PFC_PIN_GROUP(msiof3_sync_c),
+ SH_PFC_PIN_GROUP(msiof3_txd_c),
+ SH_PFC_PIN_GROUP(msiof3_rxd_c),
+ SH_PFC_PIN_GROUP(msiof3_clk_d),
+ SH_PFC_PIN_GROUP(msiof3_sync_d),
+ SH_PFC_PIN_GROUP(msiof3_ss1_d),
+ SH_PFC_PIN_GROUP(msiof3_txd_d),
+ SH_PFC_PIN_GROUP(msiof3_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_e),
+ SH_PFC_PIN_GROUP(msiof3_sync_e),
+ SH_PFC_PIN_GROUP(msiof3_ss1_e),
+ SH_PFC_PIN_GROUP(msiof3_ss2_e),
+ SH_PFC_PIN_GROUP(msiof3_txd_e),
+ SH_PFC_PIN_GROUP(msiof3_rxd_e),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(sata0_devslp_a),
+ SH_PFC_PIN_GROUP(sata0_devslp_b),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_ctrl),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_clk_a),
+ SH_PFC_PIN_GROUP(scif4_ctrl_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_ctrl_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_clk_c),
+ SH_PFC_PIN_GROUP(scif4_ctrl_c),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_clk_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_clk_b),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+ SH_PFC_PIN_GROUP(tmu_tclk1_a),
+ SH_PFC_PIN_GROUP(tmu_tclk1_b),
+ SH_PFC_PIN_GROUP(tmu_tclk2_a),
+ SH_PFC_PIN_GROUP(tmu_tclk2_b),
+ SH_PFC_PIN_GROUP(tpu_to0),
+ SH_PFC_PIN_GROUP(tpu_to1),
+ SH_PFC_PIN_GROUP(tpu_to2),
+ SH_PFC_PIN_GROUP(tpu_to3),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb30),
+ VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
+ SH_PFC_PIN_GROUP(vin4_data18_a),
+ VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
+ SH_PFC_PIN_GROUP(vin4_data18_b),
+ VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_sync),
+ SH_PFC_PIN_GROUP(vin4_field),
+ SH_PFC_PIN_GROUP(vin4_clkenb),
+ SH_PFC_PIN_GROUP(vin4_clk),
+ VIN_DATA_PIN_GROUP(vin5_data, 8),
+ VIN_DATA_PIN_GROUP(vin5_data, 10),
+ VIN_DATA_PIN_GROUP(vin5_data, 12),
+ VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_sync),
+ SH_PFC_PIN_GROUP(vin5_field),
+ SH_PFC_PIN_GROUP(vin5_clkenb),
+ SH_PFC_PIN_GROUP(vin5_clk),
+ },
+ .automotive = {
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
+ }
};
static const char * const audio_clk_groups[] = {
@@ -5241,62 +5248,69 @@ static const char * const vin5_groups[] = {
"vin5_clk",
};
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(audio_clk),
- SH_PFC_FUNCTION(avb),
- SH_PFC_FUNCTION(can0),
- SH_PFC_FUNCTION(can1),
- SH_PFC_FUNCTION(can_clk),
- SH_PFC_FUNCTION(canfd0),
- SH_PFC_FUNCTION(canfd1),
- SH_PFC_FUNCTION(drif0),
- SH_PFC_FUNCTION(drif1),
- SH_PFC_FUNCTION(drif2),
- SH_PFC_FUNCTION(drif3),
- SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(hscif0),
- SH_PFC_FUNCTION(hscif1),
- SH_PFC_FUNCTION(hscif2),
- SH_PFC_FUNCTION(hscif3),
- SH_PFC_FUNCTION(hscif4),
- SH_PFC_FUNCTION(i2c0),
- SH_PFC_FUNCTION(i2c1),
- SH_PFC_FUNCTION(i2c2),
- SH_PFC_FUNCTION(i2c3),
- SH_PFC_FUNCTION(i2c5),
- SH_PFC_FUNCTION(i2c6),
- SH_PFC_FUNCTION(intc_ex),
- SH_PFC_FUNCTION(msiof0),
- SH_PFC_FUNCTION(msiof1),
- SH_PFC_FUNCTION(msiof2),
- SH_PFC_FUNCTION(msiof3),
- SH_PFC_FUNCTION(pwm0),
- SH_PFC_FUNCTION(pwm1),
- SH_PFC_FUNCTION(pwm2),
- SH_PFC_FUNCTION(pwm3),
- SH_PFC_FUNCTION(pwm4),
- SH_PFC_FUNCTION(pwm5),
- SH_PFC_FUNCTION(pwm6),
- SH_PFC_FUNCTION(sata0),
- SH_PFC_FUNCTION(scif0),
- SH_PFC_FUNCTION(scif1),
- SH_PFC_FUNCTION(scif2),
- SH_PFC_FUNCTION(scif3),
- SH_PFC_FUNCTION(scif4),
- SH_PFC_FUNCTION(scif5),
- SH_PFC_FUNCTION(scif_clk),
- SH_PFC_FUNCTION(sdhi0),
- SH_PFC_FUNCTION(sdhi1),
- SH_PFC_FUNCTION(sdhi2),
- SH_PFC_FUNCTION(sdhi3),
- SH_PFC_FUNCTION(ssi),
- SH_PFC_FUNCTION(tmu),
- SH_PFC_FUNCTION(tpu),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb1),
- SH_PFC_FUNCTION(usb30),
- SH_PFC_FUNCTION(vin4),
- SH_PFC_FUNCTION(vin5),
+static const struct {
+ struct sh_pfc_function common[51];
+ struct sh_pfc_function automotive[4];
+} pinmux_functions = {
+ .common = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(can_clk),
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c5),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(sata0),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(tmu),
+ SH_PFC_FUNCTION(tpu),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb30),
+ SH_PFC_FUNCTION(vin4),
+ SH_PFC_FUNCTION(vin5),
+ },
+ .automotive = {
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
+ }
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -6425,6 +6439,32 @@ static const struct sh_pfc_soc_operations r8a77965_pinmux_ops = {
.set_bias = r8a77965_pinmux_set_bias,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A774B1
+const struct sh_pfc_soc_info r8a774b1_pinmux_info = {
+ .name = "r8a774b1_pfc",
+ .ops = &r8a77965_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.name = "r8a77965_pfc",
.ops = &r8a77965_pinmux_ops,
@@ -6434,10 +6474,12 @@ const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
@@ -6447,3 +6489,4 @@ const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
index 2dfb8d9cfda1..c926a59dd21c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -232,8 +232,8 @@
#define IP2_11_8 FM(AVB_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_15_12 FM(BS_N) FM(PWM0_A) FM(AVB_MAGIC) FM(VI4_CLK) F_(0, 0) FM(TX3_C) F_(0, 0) FM(VI5_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_19_16 FM(RD_N) FM(PWM1_A) FM(AVB_LINK) FM(VI4_FIELD) F_(0, 0) FM(RX3_C) FM(FSCLKST2_N_A) FM(VI5_DATA0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH_A) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE_A) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_31_28 FM(A0) FM(IRQ0) FM(PWM2_A) FM(MSIOF3_SS1_B) FM(VI5_CLK_A) FM(DU_CDE) FM(HRX3_D) FM(IERX) FM(QSTB_QHE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_3_0 FM(A1) FM(IRQ1) FM(PWM3_A) FM(DU_DOTCLKIN1) FM(VI5_DATA0_A) FM(DU_DISP_CDE) FM(SDA6_B) FM(IETX) FM(QCPV_QDE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_7_4 FM(A2) FM(IRQ2) FM(AVB_AVTP_PPS) FM(VI4_CLKENB) FM(VI5_DATA1_A) FM(DU_DISP) FM(SCL6_B) F_(0, 0) FM(QSTVB_QVE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -448,6 +448,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0))
/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
+#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
#define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
#define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
#define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
@@ -468,7 +470,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define PINMUX_MOD_SELS \
\
-MOD_SEL0_30_29 \
+ MOD_SEL1_31 \
+MOD_SEL0_30_29 MOD_SEL1_30 \
MOD_SEL1_29 \
MOD_SEL0_28 MOD_SEL1_28 \
MOD_SEL0_27_26 \
@@ -634,7 +637,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_23_20, RD_WR_N),
PINMUX_IPSR_MSEL(IP2_23_20, SCL7_A, SEL_I2C7_0),
- PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH_A),
+ PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH),
PINMUX_IPSR_GPSR(IP2_23_20, VI4_VSYNC_N),
PINMUX_IPSR_GPSR(IP2_23_20, TX5_B),
PINMUX_IPSR_MSEL(IP2_23_20, SCK3_C, SEL_SCIF3_2),
@@ -642,7 +645,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_27_24, EX_WAIT0),
PINMUX_IPSR_MSEL(IP2_27_24, SDA7_A, SEL_I2C7_0),
- PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE_A),
+ PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE),
PINMUX_IPSR_GPSR(IP2_27_24, VI4_HSYNC_N),
PINMUX_IPSR_MSEL(IP2_27_24, RX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP2_27_24, PWM6_A, SEL_PWM6_0),
@@ -1058,7 +1061,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_GPSR(IP10_27_24, SSI_SCK2_B),
+ PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1),
PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0),
PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP),
@@ -1067,7 +1070,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_GPSR(IP10_31_28, SSI_WS2_B),
+ PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1),
PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0),
/* IPSR11 */
@@ -1085,13 +1088,13 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0),
- PINMUX_IPSR_GPSR(IP11_11_8, SSI_SCK2_A),
+ PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0),
PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC),
PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1),
PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0),
PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A),
- PINMUX_IPSR_GPSR(IP11_15_12, SSI_WS2_A),
+ PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0),
PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0),
PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1),
@@ -1196,7 +1199,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0),
PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1),
PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1),
- PINMUX_IPSR_GPSR(IP13_19_16, SIM0_D_A),
+ PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0),
PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT),
PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1),
@@ -1264,7 +1267,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2),
PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3),
PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1),
- PINMUX_IPSR_GPSR(IP15_15_12, SIM0_D_B),
+ PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1),
PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6),
PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0),
@@ -1524,22 +1527,22 @@ static const unsigned int avb_avtp_pps_mux[] = {
AVB_AVTP_PPS_MARK,
};
-static const unsigned int avb_avtp_match_a_pins[] = {
- /* AVB_AVTP_MATCH_A */
+static const unsigned int avb_avtp_match_pins[] = {
+ /* AVB_AVTP_MATCH */
RCAR_GP_PIN(2, 24),
};
-static const unsigned int avb_avtp_match_a_mux[] = {
- AVB_AVTP_MATCH_A_MARK,
+static const unsigned int avb_avtp_match_mux[] = {
+ AVB_AVTP_MATCH_MARK,
};
-static const unsigned int avb_avtp_capture_a_pins[] = {
- /* AVB_AVTP_CAPTURE_A */
+static const unsigned int avb_avtp_capture_pins[] = {
+ /* AVB_AVTP_CAPTURE */
RCAR_GP_PIN(2, 25),
};
-static const unsigned int avb_avtp_capture_a_mux[] = {
- AVB_AVTP_CAPTURE_A_MARK,
+static const unsigned int avb_avtp_capture_mux[] = {
+ AVB_AVTP_CAPTURE_MARK,
};
/* - CAN ------------------------------------------------------------------ */
@@ -3784,8 +3787,8 @@ static const struct {
SH_PFC_PIN_GROUP(avb_phy_int),
SH_PFC_PIN_GROUP(avb_mii),
SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match),
+ SH_PFC_PIN_GROUP(avb_avtp_capture),
SH_PFC_PIN_GROUP(can0_data),
SH_PFC_PIN_GROUP(can1_data),
SH_PFC_PIN_GROUP(can_clk),
@@ -4061,8 +4064,8 @@ static const char * const avb_groups[] = {
"avb_phy_int",
"avb_mii",
"avb_avtp_pps",
- "avb_avtp_match_a",
- "avb_avtp_capture_a",
+ "avb_avtp_match",
+ "avb_avtp_capture",
};
static const char * const can0_groups[] = {
@@ -4957,11 +4960,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- GROUP(2, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1,
- 2, 2, 2, 1, 1, 2, 1, 4),
+ GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
+ 1, 2, 2, 2, 1, 1, 2, 1, 4),
GROUP(
- /* RESERVED 31, 30 */
- 0, 0, 0, 0,
+ MOD_SEL1_31
+ MOD_SEL1_30
MOD_SEL1_29
MOD_SEL1_28
/* RESERVED 27 */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 5dfd991ffdaa..dbc36079c381 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -1450,7 +1450,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(ET0_ETXD2_A),
GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
GPIO_FN(ET0_ETXD3_A),
- GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
+ GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4),
GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
@@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_20 [1] */
FN_EX_WAIT0, FN_TCLK1_B,
/* IP3_19_18 [2] */
- FN_RD_WR, FN_TCLK1_B, 0, 0,
+ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
/* IP3_17_15 [3] */
FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
FN_ET0_ETXD3_A, 0, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 835148fc0f28..640d2a4cb838 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -309,6 +309,7 @@ extern const struct sh_pfc_soc_info r8a7744_pinmux_info;
extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
extern const struct sh_pfc_soc_info r8a77470_pinmux_info;
extern const struct sh_pfc_soc_info r8a774a1_pinmux_info;
+extern const struct sh_pfc_soc_info r8a774b1_pinmux_info;
extern const struct sh_pfc_soc_info r8a774c0_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
@@ -319,7 +320,8 @@ extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
extern const struct sh_pfc_soc_info r8a7795es1_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7796_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77960_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77961_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
extern const struct sh_pfc_soc_info r8a77970_pinmux_info;
extern const struct sh_pfc_soc_info r8a77980_pinmux_info;
@@ -422,12 +424,12 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
/*
* Describe a pinmux configuration in which a pin is physically multiplexed
* with other pins.
- * - ipsr: IPSR field (unused, for documentation purposes only)
+ * - ipsr: IPSR field
* - fn: Function name
* - psel: Physical multiplexing selector
*/
#define PINMUX_IPSR_PHYS(ipsr, fn, psel) \
- PINMUX_DATA(fn##_MARK, FN_##psel)
+ PINMUX_DATA(fn##_MARK, FN_##psel, FN_##ipsr)
/*
* Describe a pinmux configuration for a single-function pin with GPIO
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 924080362bf7..b1a9611f46b3 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5996,6 +5996,7 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
struct gpio_chip *chip;
u32 nbank;
int ret, idx;
+ struct gpio_irq_chip *girq;
ret = of_property_read_u32(np, "gpio-banks", &nbank);
if (ret) {
@@ -6048,24 +6049,15 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
chip->of_gpio_n_cells = 2;
chip->parent = &pdev->dev;
- /* Add gpio chip to system */
- ret = gpiochip_add_data(chip, a7gc);
- if (ret) {
- dev_err(&pdev->dev,
- "%pOF: error in probe function with status %d\n",
- np, ret);
- goto failed;
- }
-
- /* Add gpio chip to irq subsystem */
- ret = gpiochip_irqchip_add(chip, &atlas7_gpio_irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev,
- "could not connect irqchip to gpiochip\n");
- goto failed;
- }
-
+ girq = &chip->irq;
+ girq->chip = &atlas7_gpio_irq_chip;
+ girq->parent_handler = atlas7_gpio_handle_irq;
+ girq->num_parents = nbank;
+ girq->parents = devm_kcalloc(&pdev->dev, nbank,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (idx = 0; idx < nbank; idx++) {
struct atlas7_gpio_bank *bank;
@@ -6084,9 +6076,18 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
goto failed;
}
bank->irq = ret;
+ girq->parents[idx] = ret;
+ }
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- gpiochip_set_chained_irqchip(chip, &atlas7_gpio_irq_chip,
- bank->irq, atlas7_gpio_handle_irq);
+ /* Add gpio chip to system */
+ ret = gpiochip_add_data(chip, a7gc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%pOF: error in probe function with status %d\n",
+ np, ret);
+ goto failed;
}
platform_set_drvdata(pdev, a7gc);
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 780c31bb4009..1ebcb957c654 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -785,6 +785,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
struct sirfsoc_gpio_bank *bank;
void __iomem *regs;
struct platform_device *pdev;
+ struct gpio_irq_chip *girq;
u32 pullups[SIRFSOC_GPIO_NO_OF_BANKS], pulldowns[SIRFSOC_GPIO_NO_OF_BANKS];
@@ -816,36 +817,33 @@ static int sirfsoc_gpio_probe(struct device_node *np)
sgpio->chip.gc.parent = &pdev->dev;
sgpio->chip.regs = regs;
- err = gpiochip_add_data(&sgpio->chip.gc, sgpio);
- if (err) {
- dev_err(&pdev->dev, "%pOF: error in probe function with status %d\n",
- np, err);
- goto out;
- }
-
- err = gpiochip_irqchip_add(&sgpio->chip.gc,
- &sirfsoc_irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
- if (err) {
- dev_err(&pdev->dev,
- "could not connect irqchip to gpiochip\n");
- goto out_banks;
- }
-
+ girq = &sgpio->chip.gc.irq;
+ girq->chip = &sirfsoc_irq_chip;
+ girq->parent_handler = sirfsoc_gpio_handle_irq;
+ girq->num_parents = SIRFSOC_GPIO_NO_OF_BANKS;
+ girq->parents = devm_kcalloc(&pdev->dev, SIRFSOC_GPIO_NO_OF_BANKS,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
bank = &sgpio->sgpio_bank[i];
spin_lock_init(&bank->lock);
bank->parent_irq = platform_get_irq(pdev, i);
if (bank->parent_irq < 0) {
err = bank->parent_irq;
- goto out_banks;
+ goto out;
}
+ girq->parents[i] = bank->parent_irq;
+ }
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- gpiochip_set_chained_irqchip(&sgpio->chip.gc,
- &sirfsoc_irq_chip,
- bank->parent_irq,
- sirfsoc_gpio_handle_irq);
+ err = gpiochip_add_data(&sgpio->chip.gc, sgpio);
+ if (err) {
+ dev_err(&pdev->dev, "%pOF: error in probe function with status %d\n",
+ np, err);
+ goto out;
}
err = gpiochip_add_pin_range(&sgpio->chip.gc, dev_name(&pdev->dev),
@@ -867,7 +865,6 @@ static int sirfsoc_gpio_probe(struct device_node *np)
return 0;
out_no_range:
-out_banks:
gpiochip_remove(&sgpio->chip.gc);
out:
iounmap(regs);
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 9d906474f3e4..1ebbc49b16f1 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -515,15 +515,13 @@ end:
static int plgpio_probe(struct platform_device *pdev)
{
struct plgpio *plgpio;
- struct resource *res;
int ret, irq;
plgpio = devm_kzalloc(&pdev->dev, sizeof(*plgpio), GFP_KERNEL);
if (!plgpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- plgpio->base = devm_ioremap_resource(&pdev->dev, res);
+ plgpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(plgpio->base))
return PTR_ERR(plgpio->base);
@@ -569,40 +567,35 @@ static int plgpio_probe(struct platform_device *pdev)
}
}
- ret = gpiochip_add_data(&plgpio->chip, plgpio);
- if (ret) {
- dev_err(&pdev->dev, "unable to add gpio chip\n");
- goto unprepare_clk;
- }
-
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_info(&pdev->dev, "PLGPIO registered without IRQs\n");
- return 0;
+ if (irq > 0) {
+ struct gpio_irq_chip *girq;
+
+ girq = &plgpio->chip.irq;
+ girq->chip = &plgpio_irqchip;
+ girq->parent_handler = plgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ dev_info(&pdev->dev, "PLGPIO registering with IRQs\n");
+ } else {
+ dev_info(&pdev->dev, "PLGPIO registering without IRQs\n");
}
- ret = gpiochip_irqchip_add(&plgpio->chip,
- &plgpio_irqchip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_add_data(&plgpio->chip, plgpio);
if (ret) {
- dev_err(&pdev->dev, "failed to add irqchip to gpiochip\n");
- goto remove_gpiochip;
+ dev_err(&pdev->dev, "unable to add gpio chip\n");
+ goto unprepare_clk;
}
- gpiochip_set_chained_irqchip(&plgpio->chip,
- &plgpio_irqchip,
- irq,
- plgpio_irq_handler);
-
- dev_info(&pdev->dev, "PLGPIO registered with IRQs\n");
-
return 0;
-remove_gpiochip:
- dev_info(&pdev->dev, "Remove gpiochip\n");
- gpiochip_remove(&plgpio->chip);
unprepare_clk:
if (!IS_ERR(plgpio->clk))
clk_unprepare(plgpio->clk);
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 7ec19c73f870..948f56abb9ae 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -358,7 +358,6 @@ int spear_pinctrl_probe(struct platform_device *pdev,
struct spear_pinctrl_machdata *machdata)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
struct spear_pmx *pmx;
if (!machdata)
@@ -368,8 +367,7 @@ int spear_pinctrl_probe(struct platform_device *pdev,
if (!pmx)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmx->vbase = devm_ioremap_resource(&pdev->dev, res);
+ pmx->vbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmx->vbase))
return PTR_ERR(pmx->vbase);
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 7b95bf5a82a9..157712ab05a8 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -41,7 +41,8 @@
#define PUBCP_SLEEP_MODE BIT(14)
#define TGLDSP_SLEEP_MODE BIT(15)
#define AGDSP_SLEEP_MODE BIT(16)
-#define SLEEP_MODE_MASK GENMASK(3, 0)
+#define CM4_SLEEP_MODE BIT(17)
+#define SLEEP_MODE_MASK GENMASK(5, 0)
#define SLEEP_MODE_SHIFT 13
#define SLEEP_INPUT BIT(1)
@@ -81,6 +82,7 @@ enum pin_sleep_mode {
PUBCP_SLEEP = BIT(1),
TGLDSP_SLEEP = BIT(2),
AGDSP_SLEEP = BIT(3),
+ CM4_SLEEP = BIT(4),
};
enum pin_func_sel {
@@ -484,6 +486,13 @@ static int sprd_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin_id,
SLEEP_PULL_UP_MASK) << 16;
arg |= (reg >> PULL_UP_SHIFT) & PULL_UP_MASK;
break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if ((reg & (SLEEP_PULL_DOWN | SLEEP_PULL_UP)) ||
+ (reg & (PULL_DOWN | PULL_UP_4_7K | PULL_UP_20K)))
+ return -EINVAL;
+
+ arg = 1;
+ break;
case PIN_CONFIG_SLEEP_HARDWARE_STATE:
arg = 0;
break;
@@ -609,6 +618,8 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
val |= TGLDSP_SLEEP_MODE;
if (arg & AGDSP_SLEEP)
val |= AGDSP_SLEEP_MODE;
+ if (arg & CM4_SLEEP)
+ val |= CM4_SLEEP_MODE;
mask = SLEEP_MODE_MASK;
shift = SLEEP_MODE_SHIFT;
@@ -674,6 +685,16 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
shift = PULL_UP_SHIFT;
}
break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (is_sleep_config == true) {
+ val = shift = 0;
+ mask = SLEEP_PULL_DOWN | SLEEP_PULL_UP;
+ } else {
+ val = shift = 0;
+ mask = PULL_DOWN | PULL_UP_20K |
+ PULL_UP_4_7K;
+ }
+ break;
case PIN_CONFIG_SLEEP_HARDWARE_STATE:
continue;
default:
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0cbca30b75dc..b35c3245ab3f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1385,7 +1385,6 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
struct pinctrl_pin_desc *pins;
struct sunxi_pinctrl *pctl;
struct pinmux_ops *pmxops;
- struct resource *res;
int i, ret, last_pin, pin_idx;
struct clk *clk;
@@ -1396,8 +1395,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
raw_spin_lock_init(&pctl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->membase = devm_ioremap_resource(&pdev->dev, res);
+ pctl->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->membase))
return PTR_ERR(pctl->membase);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 95002e3ecaff..6f7b3767f453 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -873,7 +873,6 @@ int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
{
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
- struct resource *res;
struct phy *phy;
int err;
@@ -885,11 +884,16 @@ int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
mutex_init(&padctl->lock);
padctl->dev = &pdev->dev;
+ /*
+ * Note that we can't replace this by of_device_get_match_data()
+ * because we need the separate matching table for this legacy code on
+ * Tegra124. of_device_get_match_data() would attempt to use the table
+ * from the updated driver and fail.
+ */
match = of_match_node(tegra_xusb_padctl_of_match, pdev->dev.of_node);
padctl->soc = match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- padctl->regs = devm_ioremap_resource(&pdev->dev, res);
+ padctl->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(padctl->regs))
return PTR_ERR(padctl->regs);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index e9a7cbb9aa33..692d8b3e2a20 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -781,8 +781,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
return -ENOMEM;
for (i = 0; i < pmx->nbanks; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- pmx->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ pmx->regs[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(pmx->regs[i]))
return PTR_ERR(pmx->regs[i]);
}
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index e5e7f1f22813..b522ca010332 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -496,7 +496,7 @@ static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev,
return -EINVAL;
rows = pinctrl_count_index_with_args(np, name);
- if (rows == -EINVAL)
+ if (rows < 0)
return rows;
*map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 4d5cd7d8c760..ea910a18b4d7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -553,10 +553,8 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
struct wmt_pinctrl_data *data)
{
int err;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 9512045420ec..786bf89487d6 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -387,7 +387,6 @@ int zx_pinctrl_init(struct platform_device *pdev,
struct pinctrl_desc *pctldesc;
struct zx_pinctrl *zpctl;
struct device_node *np;
- struct resource *res;
int ret;
zpctl = devm_kzalloc(&pdev->dev, sizeof(*zpctl), GFP_KERNEL);
@@ -396,8 +395,7 @@ int zx_pinctrl_init(struct platform_device *pdev,
spin_lock_init(&zpctl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- zpctl->base = devm_ioremap_resource(&pdev->dev, res);
+ zpctl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(zpctl->base))
return PTR_ERR(zpctl->base);
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index ee5f08ea57b6..5f57282a28da 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -132,9 +132,9 @@ config CROS_EC_LPC
module will be called cros_ec_lpcs.
config CROS_EC_PROTO
- bool
- help
- ChromeOS EC communication protocol helpers.
+ bool
+ help
+ ChromeOS EC communication protocol helpers.
config CROS_KBD_LED_BACKLIGHT
tristate "Backlight LED support for Chrome OS keyboards"
@@ -190,6 +190,19 @@ config CROS_EC_DEBUGFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_debugfs.
+config CROS_EC_SENSORHUB
+ tristate "ChromeOS EC MEMS Sensor Hub"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ Allow loading IIO sensors. This driver is loaded by MFD and will in
+ turn query the EC and register the sensors.
+ It also spreads the sensor data coming from the EC to the IIO sensor
+ object.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_sensorhub.
+
config CROS_EC_SYSFS
tristate "ChromeOS EC control and information through sysfs"
depends on MFD_CROS_EC_DEV && SYSFS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 477ec3d1d1c9..aacd5920d8a1 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
+obj-$(CONFIG_CROS_EC_SENSORHUB) += cros_ec_sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index fd77e6fa74c2..6d6ce86a1408 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -31,13 +31,32 @@ static struct cros_ec_platform pd_p = {
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX),
};
-static irqreturn_t ec_irq_thread(int irq, void *data)
+static irqreturn_t ec_irq_handler(int irq, void *data)
{
struct cros_ec_device *ec_dev = data;
- bool wake_event = true;
+
+ ec_dev->last_event_time = cros_ec_get_time_ns();
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * cros_ec_handle_event() - process and forward pending events on EC
+ * @ec_dev: Device with events to process.
+ *
+ * Call this function in a loop when the kernel is notified that the EC has
+ * pending events.
+ *
+ * Return: true if more events are still pending and this function should be
+ * called again.
+ */
+bool cros_ec_handle_event(struct cros_ec_device *ec_dev)
+{
+ bool wake_event;
+ bool ec_has_more_events;
int ret;
- ret = cros_ec_get_next_event(ec_dev, &wake_event);
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &ec_has_more_events);
/*
* Signal only if wake host events or any interrupt if
@@ -50,6 +69,20 @@ static irqreturn_t ec_irq_thread(int irq, void *data)
if (ret > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
0, ec_dev);
+
+ return ec_has_more_events;
+}
+EXPORT_SYMBOL(cros_ec_handle_event);
+
+static irqreturn_t ec_irq_thread(int irq, void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+ bool ec_has_more_events;
+
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
+
return IRQ_HANDLED;
}
@@ -104,6 +137,15 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
return ret;
}
+/**
+ * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
+ * @ec_dev: Device to register.
+ *
+ * Before calling this, allocate a pointer to a new device and then fill
+ * in all the fields up to the --private-- marker.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_register(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
@@ -131,10 +173,12 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
return err;
}
- if (ec_dev->irq) {
- err = devm_request_threaded_irq(dev, ec_dev->irq, NULL,
- ec_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "chromeos-ec", ec_dev);
+ if (ec_dev->irq > 0) {
+ err = devm_request_threaded_irq(dev, ec_dev->irq,
+ ec_irq_handler,
+ ec_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "chromeos-ec", ec_dev);
if (err) {
dev_err(dev, "Failed to request IRQ %d: %d",
ec_dev->irq, err);
@@ -198,6 +242,14 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
}
EXPORT_SYMBOL(cros_ec_register);
+/**
+ * cros_ec_unregister() - Remove a ChromeOS EC.
+ * @ec_dev: Device to unregister.
+ *
+ * Call this to deregister a ChromeOS EC, then clean up any private data.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_unregister(struct cros_ec_device *ec_dev)
{
if (ec_dev->pd)
@@ -209,6 +261,14 @@ int cros_ec_unregister(struct cros_ec_device *ec_dev)
EXPORT_SYMBOL(cros_ec_unregister);
#ifdef CONFIG_PM_SLEEP
+/**
+ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+ * @ec_dev: Device to suspend.
+ *
+ * This can be called by drivers to handle a suspend event.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_suspend(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
@@ -238,11 +298,19 @@ EXPORT_SYMBOL(cros_ec_suspend);
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
while (ec_dev->mkbp_event_supported &&
- cros_ec_get_next_event(ec_dev, NULL) > 0)
+ cros_ec_get_next_event(ec_dev, NULL, NULL) > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
}
+/**
+ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+ * @ec_dev: Device to resume.
+ *
+ * This can be called by drivers to handle a resume event.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_resume(struct cros_ec_device *ec_dev)
{
int ret;
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 25ca2c894b4d..e5996821d08b 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -136,11 +136,11 @@ static void ish_evt_handler(struct work_struct *work)
struct ishtp_cl_data *client_data =
container_of(work, struct ishtp_cl_data, work_ec_evt);
struct cros_ec_device *ec_dev = client_data->ec_dev;
+ bool ec_has_more_events;
- if (cros_ec_get_next_event(ec_dev, NULL) > 0) {
- blocking_notifier_call_chain(&ec_dev->event_notifier,
- 0, ec_dev);
- }
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
}
/**
@@ -200,13 +200,14 @@ static int ish_send(struct ishtp_cl_data *client_data,
* process_recv() - Received and parse incoming packet
* @cros_ish_cl: Client instance to get stats
* @rb_in_proc: Host interface message buffer
+ * @timestamp: Timestamp of when parent callback started
*
* Parse the incoming packet. If it is a response packet then it will
* update per instance flags and wake up the caller waiting to for the
* response. If it is an event packet then it will schedule event work.
*/
static void process_recv(struct ishtp_cl *cros_ish_cl,
- struct ishtp_cl_rb *rb_in_proc)
+ struct ishtp_cl_rb *rb_in_proc, ktime_t timestamp)
{
size_t data_len = rb_in_proc->buf_idx;
struct ishtp_cl_data *client_data =
@@ -295,6 +296,11 @@ error_wake_up:
break;
case CROS_MKBP_EVENT:
+ /*
+ * Set timestamp from beginning of function since we actually
+ * got an incoming MKBP event
+ */
+ client_data->ec_dev->last_event_time = timestamp;
/* The event system doesn't send any data in buffer */
schedule_work(&client_data->work_ec_evt);
@@ -322,10 +328,17 @@ static void ish_event_cb(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl_rb *rb_in_proc;
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
+ ktime_t timestamp;
+
+ /*
+ * Take timestamp as close to hardware interrupt as possible for sensor
+ * timestamps.
+ */
+ timestamp = cros_ec_get_time_ns();
while ((rb_in_proc = ishtp_cl_rx_get_rb(cros_ish_cl)) != NULL) {
/* Decide what to do with received data */
- process_recv(cros_ish_cl, rb_in_proc);
+ process_recv(cros_ish_cl, rb_in_proc, timestamp);
}
}
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 7d10d909435f..dccf479c6625 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -312,11 +312,20 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
{
struct cros_ec_device *ec_dev = data;
+ bool ec_has_more_events;
+ int ret;
- if (ec_dev->mkbp_event_supported &&
- cros_ec_get_next_event(ec_dev, NULL) > 0)
- blocking_notifier_call_chain(&ec_dev->event_notifier, 0,
- ec_dev);
+ ec_dev->last_event_time = cros_ec_get_time_ns();
+
+ if (ec_dev->mkbp_event_supported)
+ do {
+ ret = cros_ec_get_next_event(ec_dev, NULL,
+ &ec_has_more_events);
+ if (ret > 0)
+ blocking_notifier_call_chain(
+ &ec_dev->event_notifier, 0,
+ ec_dev);
+ } while (ec_has_more_events);
if (value == ACPI_NOTIFY_DEVICE_WAKE)
pm_system_wakeup();
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index f659f96bda12..da1b1c450433 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -117,6 +117,17 @@ static int send_command(struct cros_ec_device *ec_dev,
return ret;
}
+/**
+ * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
+ * @ec_dev: Device to register.
+ * @msg: Message to write.
+ *
+ * This is intended to be used by all ChromeOS EC drivers, but at present
+ * only SPI uses it. Once LPC uses the same protocol it can start using it.
+ * I2C could use it now, with a refactor of the existing code.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -141,6 +152,16 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
}
EXPORT_SYMBOL(cros_ec_prepare_tx);
+/**
+ * cros_ec_check_result() - Check ec_msg->result.
+ * @ec_dev: EC device.
+ * @msg: Message to check.
+ *
+ * This is used by ChromeOS EC drivers to check the ec_msg->result for
+ * errors and to warn about them.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -326,6 +347,13 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
return ret;
}
+/**
+ * cros_ec_query_all() - Query the protocol version supported by the
+ * ChromeOS EC.
+ * @ec_dev: Device to register.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_query_all(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
@@ -428,7 +456,10 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
if (ret < 0 || ver_mask == 0)
ec_dev->mkbp_event_supported = 0;
else
- ec_dev->mkbp_event_supported = 1;
+ ec_dev->mkbp_event_supported = fls(ver_mask);
+
+ dev_dbg(ec_dev->dev, "MKBP support version %u\n",
+ ec_dev->mkbp_event_supported - 1);
/* Probe if host sleep v1 is supported for S0ix failure detection. */
ret = cros_ec_get_host_command_version_mask(ec_dev,
@@ -453,6 +484,16 @@ exit:
}
EXPORT_SYMBOL(cros_ec_query_all);
+/**
+ * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used
+ * instead of calling the EC's cmd_xfer() callback directly.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -500,6 +541,18 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
}
EXPORT_SYMBOL(cros_ec_cmd_xfer);
+/**
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * This function is identical to cros_ec_cmd_xfer, except it returns success
+ * status only if both the command was transmitted successfully and the EC
+ * replied with success status. It's not necessary to check msg->result when
+ * using this function.
+ *
+ * Return: The number of bytes transferred on success or negative error code.
+ */
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -519,6 +572,7 @@ EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
static int get_next_event_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg,
+ struct ec_response_get_next_event_v1 *event,
int version, uint32_t size)
{
int ret;
@@ -531,7 +585,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret > 0) {
ec_dev->event_size = ret - 1;
- memcpy(&ec_dev->event_data, msg->data, ret);
+ ec_dev->event_data = *event;
}
return ret;
@@ -539,30 +593,26 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
static int get_next_event(struct cros_ec_device *ec_dev)
{
- u8 buffer[sizeof(struct cros_ec_command) + sizeof(ec_dev->event_data)];
- struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
- static int cmd_version = 1;
- int ret;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_get_next_event_v1 event;
+ } __packed buf;
+ struct cros_ec_command *msg = &buf.msg;
+ struct ec_response_get_next_event_v1 *event = &buf.event;
+ const int cmd_version = ec_dev->mkbp_event_supported - 1;
+ memset(msg, 0, sizeof(*msg));
if (ec_dev->suspended) {
dev_dbg(ec_dev->dev, "Device suspended.\n");
return -EHOSTDOWN;
}
- if (cmd_version == 1) {
- ret = get_next_event_xfer(ec_dev, msg, cmd_version,
- sizeof(struct ec_response_get_next_event_v1));
- if (ret < 0 || msg->result != EC_RES_INVALID_VERSION)
- return ret;
-
- /* Fallback to version 0 for future send attempts */
- cmd_version = 0;
- }
-
- ret = get_next_event_xfer(ec_dev, msg, cmd_version,
+ if (cmd_version == 0)
+ return get_next_event_xfer(ec_dev, msg, event, 0,
sizeof(struct ec_response_get_next_event));
- return ret;
+ return get_next_event_xfer(ec_dev, msg, event, cmd_version,
+ sizeof(struct ec_response_get_next_event_v1));
}
static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
@@ -584,27 +634,60 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
return ec_dev->event_size;
}
-int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
+/**
+ * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ * @wake_event: Pointer to a bool set to true upon return if the event might be
+ * treated as a wake event. Ignored if null.
+ * @has_more_events: Pointer to bool set to true if more than one event is
+ * pending.
+ * Some EC will set this flag to indicate cros_ec_get_next_event()
+ * can be called multiple times in a row.
+ * It is an optimization to prevent issuing a EC command for
+ * nothing or wait for another interrupt from the EC to process
+ * the next message.
+ * Ignored if null.
+ *
+ * Return: negative error code on errors; 0 for no data; or else number of
+ * bytes received (i.e., an event was retrieved successfully). Event types are
+ * written out to @ec_dev->event_data.event_type on success.
+ */
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
+ bool *wake_event,
+ bool *has_more_events)
{
u8 event_type;
u32 host_event;
int ret;
- if (!ec_dev->mkbp_event_supported) {
- ret = get_keyboard_state_event(ec_dev);
- if (ret <= 0)
- return ret;
+ /*
+ * Default value for wake_event.
+ * Wake up on keyboard event, wake up for spurious interrupt or link
+ * error to the EC.
+ */
+ if (wake_event)
+ *wake_event = true;
- if (wake_event)
- *wake_event = true;
+ /*
+ * Default value for has_more_events.
+ * EC will raise another interrupt if AP does not process all events
+ * anyway.
+ */
+ if (has_more_events)
+ *has_more_events = false;
- return ret;
- }
+ if (!ec_dev->mkbp_event_supported)
+ return get_keyboard_state_event(ec_dev);
ret = get_next_event(ec_dev);
if (ret <= 0)
return ret;
+ if (has_more_events)
+ *has_more_events = ec_dev->event_data.event_type &
+ EC_MKBP_HAS_MORE_EVENTS;
+ ec_dev->event_data.event_type &= EC_MKBP_EVENT_TYPE_MASK;
+
if (wake_event) {
event_type = ec_dev->event_data.event_type;
host_event = cros_ec_get_host_event(ec_dev);
@@ -619,15 +702,22 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
else if (host_event &&
!(host_event & ec_dev->host_event_wake_mask))
*wake_event = false;
- /* Consider all other events as wake events. */
- else
- *wake_event = true;
}
return ret;
}
EXPORT_SYMBOL(cros_ec_get_next_event);
+/**
+ * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ *
+ * When MKBP is supported, when the EC raises an interrupt, we collect the
+ * events raised and call the functions in the ec notifier. This function
+ * is a helper to know which events are raised.
+ *
+ * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
+ */
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev)
{
u32 host_event;
@@ -647,3 +737,120 @@ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev)
return host_event;
}
EXPORT_SYMBOL(cros_ec_get_host_event);
+
+/**
+ * cros_ec_check_features() - Test for the presence of EC features
+ *
+ * @ec: EC device, does not have to be connected directly to the AP,
+ * can be daisy chained through another device.
+ * @feature: One of ec_feature_code bit.
+ *
+ * Call this function to test whether the ChromeOS EC supports a feature.
+ *
+ * Return: 1 if supported, 0 if not
+ */
+int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ if (ec->features[0] == -1U && ec->features[1] == -1U) {
+ /* features bitmap not read yet */
+ msg = kzalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
+ msg->insize = sizeof(ec->features);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
+ ret, msg->result);
+ memset(ec->features, 0, sizeof(ec->features));
+ } else {
+ memcpy(ec->features, msg->data, sizeof(ec->features));
+ }
+
+ dev_dbg(ec->dev, "EC features %08x %08x\n",
+ ec->features[0], ec->features[1]);
+
+ kfree(msg);
+ }
+
+ return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
+}
+EXPORT_SYMBOL_GPL(cros_ec_check_features);
+
+/**
+ * cros_ec_get_sensor_count() - Return the number of MEMS sensors supported.
+ *
+ * @ec: EC device, does not have to be connected directly to the AP,
+ * can be daisy chained through another device.
+ * Return: < 0 in case of error.
+ */
+int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
+{
+ /*
+ * Issue a command to get the number of sensor reported.
+ * If not supported, check for legacy mode.
+ */
+ int ret, sensor_count;
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct cros_ec_command *msg;
+ struct cros_ec_device *ec_dev = ec->ec_dev;
+ u8 status;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 1;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*resp);
+
+ params = (struct ec_params_motion_sense *)msg->data;
+ params->cmd = MOTIONSENSE_CMD_DUMP;
+
+ ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ if (ret < 0) {
+ sensor_count = ret;
+ } else if (msg->result != EC_RES_SUCCESS) {
+ sensor_count = -EPROTO;
+ } else {
+ resp = (struct ec_response_motion_sense *)msg->data;
+ sensor_count = resp->dump.sensor_count;
+ }
+ kfree(msg);
+
+ /*
+ * Check legacy mode: Let's find out if sensors are accessible
+ * via LPC interface.
+ */
+ if (sensor_count == -EPROTO &&
+ ec->cmd_offset == 0 &&
+ ec_dev->cmd_readmem) {
+ ret = ec_dev->cmd_readmem(ec_dev, EC_MEMMAP_ACC_STATUS,
+ 1, &status);
+ if (ret >= 0 &&
+ (status & EC_MEMMAP_ACC_STATUS_PRESENCE_BIT)) {
+ /*
+ * We have 2 sensors, one in the lid, one in the base.
+ */
+ sensor_count = 2;
+ } else {
+ /*
+ * EC uses LPC interface and no sensors are presented.
+ */
+ sensor_count = 0;
+ }
+ } else if (sensor_count == -EPROTO) {
+ /* EC responded, but does not understand DUMP command. */
+ sensor_count = 0;
+ }
+ return sensor_count;
+}
+EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index 0c3738c3244d..bd068afe43b5 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -143,22 +143,11 @@ cros_ec_rpmsg_host_event_function(struct work_struct *host_event_work)
struct cros_ec_rpmsg,
host_event_work);
struct cros_ec_device *ec_dev = dev_get_drvdata(&ec_rpmsg->rpdev->dev);
- bool wake_event = true;
- int ret;
-
- ret = cros_ec_get_next_event(ec_dev, &wake_event);
-
- /*
- * Signal only if wake host events or any interrupt if
- * cros_ec_get_next_event() returned an error (default value for
- * wake_event is true)
- */
- if (wake_event && device_may_wakeup(ec_dev->dev))
- pm_wakeup_event(ec_dev->dev, 0);
+ bool ec_has_more_events;
- if (ret > 0)
- blocking_notifier_call_chain(&ec_dev->event_notifier,
- 0, ec_dev);
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
}
static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
new file mode 100644
index 000000000000..04d8879689e9
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sensor HUB driver that discovers sensors behind a ChromeOS Embedded
+ * Controller.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define DRV_NAME "cros-ec-sensorhub"
+
+static void cros_ec_sensorhub_free_sensor(void *arg)
+{
+ struct platform_device *pdev = arg;
+
+ platform_device_unregister(pdev);
+}
+
+static int cros_ec_sensorhub_allocate_sensor(struct device *parent,
+ char *sensor_name,
+ int sensor_num)
+{
+ struct cros_ec_sensor_platform sensor_platforms = {
+ .sensor_num = sensor_num,
+ };
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(parent, sensor_name,
+ PLATFORM_DEVID_AUTO,
+ &sensor_platforms,
+ sizeof(sensor_platforms));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return devm_add_action_or_reset(parent,
+ cros_ec_sensorhub_free_sensor,
+ pdev);
+}
+
+static int cros_ec_sensorhub_register(struct device *dev,
+ struct cros_ec_sensorhub *sensorhub)
+{
+ int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
+ struct cros_ec_dev *ec = sensorhub->ec;
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct cros_ec_command *msg;
+ int ret, i, sensor_num;
+ char *name;
+
+ sensor_num = cros_ec_get_sensor_count(ec);
+ if (sensor_num < 0) {
+ dev_err(dev,
+ "Unable to retrieve sensor information (err:%d)\n",
+ sensor_num);
+ return sensor_num;
+ }
+
+ if (sensor_num == 0) {
+ dev_err(dev, "Zero sensors reported.\n");
+ return -EINVAL;
+ }
+
+ /* Prepare a message to send INFO command to each sensor. */
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 1;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*resp);
+ params = (struct ec_params_motion_sense *)msg->data;
+ resp = (struct ec_response_motion_sense *)msg->data;
+
+ for (i = 0; i < sensor_num; i++) {
+ params->cmd = MOTIONSENSE_CMD_INFO;
+ params->info.sensor_num = i;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ dev_warn(dev, "no info for EC sensor %d : %d/%d\n",
+ i, ret, msg->result);
+ continue;
+ }
+
+ switch (resp->info.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ name = "cros-ec-accel";
+ break;
+ case MOTIONSENSE_TYPE_BARO:
+ name = "cros-ec-baro";
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ name = "cros-ec-gyro";
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ name = "cros-ec-mag";
+ break;
+ case MOTIONSENSE_TYPE_PROX:
+ name = "cros-ec-prox";
+ break;
+ case MOTIONSENSE_TYPE_LIGHT:
+ name = "cros-ec-light";
+ break;
+ case MOTIONSENSE_TYPE_ACTIVITY:
+ name = "cros-ec-activity";
+ break;
+ default:
+ dev_warn(dev, "unknown type %d\n", resp->info.type);
+ continue;
+ }
+
+ ret = cros_ec_sensorhub_allocate_sensor(dev, name, i);
+ if (ret)
+ goto error;
+
+ sensor_type[resp->info.type]++;
+ }
+
+ if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
+ ec->has_kb_wake_angle = true;
+
+ if (cros_ec_check_features(ec,
+ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) {
+ ret = cros_ec_sensorhub_allocate_sensor(dev,
+ "cros-ec-lid-angle",
+ 0);
+ if (ret)
+ goto error;
+ }
+
+ kfree(msg);
+ return 0;
+
+error:
+ kfree(msg);
+ return ret;
+}
+
+static int cros_ec_sensorhub_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_sensorhub *data;
+ int ret;
+ int i;
+
+ data = devm_kzalloc(dev, sizeof(struct cros_ec_sensorhub), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->ec = dev_get_drvdata(dev->parent);
+ dev_set_drvdata(dev, data);
+
+ /* Check whether this EC is a sensor hub. */
+ if (cros_ec_check_features(data->ec, EC_FEATURE_MOTION_SENSE)) {
+ ret = cros_ec_sensorhub_register(dev, data);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If the device has sensors but does not claim to
+ * be a sensor hub, we are in legacy mode.
+ */
+ for (i = 0; i < 2; i++) {
+ ret = cros_ec_sensorhub_allocate_sensor(dev,
+ "cros-ec-accel-legacy", i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_sensorhub_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_sensorhub_probe,
+};
+
+module_platform_driver(cros_ec_sensorhub_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS EC MEMS Sensor Hub Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
index 6f80ff4532ae..5af1d66d9eca 100644
--- a/drivers/platform/chrome/cros_ec_trace.c
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -98,7 +98,10 @@
TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
- TRACE_SYMBOL(EC_CMD_CODEC_I2S), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_WOV), \
TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
TRACE_SYMBOL(EC_CMD_ACPI_READ), \
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index 2430e8b82810..374cdd1e868a 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -224,6 +224,7 @@ static int cros_usbpd_logger_remove(struct platform_device *pd)
struct logger_data *logger = platform_get_drvdata(pd);
cancel_delayed_work_sync(&logger->log_work);
+ destroy_workqueue(logger->log_workqueue);
return 0;
}
diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig
index 89007b0bc743..365f30e116ee 100644
--- a/drivers/platform/chrome/wilco_ec/Kconfig
+++ b/drivers/platform/chrome/wilco_ec/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config WILCO_EC
tristate "ChromeOS Wilco Embedded Controller"
- depends on ACPI && X86 && CROS_EC_LPC
+ depends on ACPI && X86 && CROS_EC_LPC && LEDS_CLASS
help
If you say Y here, you get support for talking to the ChromeOS
Wilco EC over an eSPI bus. This uses a simple byte-level protocol
diff --git a/drivers/platform/chrome/wilco_ec/Makefile b/drivers/platform/chrome/wilco_ec/Makefile
index bc817164596e..ecb3145cab18 100644
--- a/drivers/platform/chrome/wilco_ec/Makefile
+++ b/drivers/platform/chrome/wilco_ec/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-wilco_ec-objs := core.o mailbox.o properties.o sysfs.o
+wilco_ec-objs := core.o keyboard_leds.o mailbox.o \
+ properties.o sysfs.o
obj-$(CONFIG_WILCO_EC) += wilco_ec.o
wilco_ec_debugfs-objs := debugfs.o
obj-$(CONFIG_WILCO_EC_DEBUGFS) += wilco_ec_debugfs.o
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
index 3724bf4b77c6..5210c357feef 100644
--- a/drivers/platform/chrome/wilco_ec/core.c
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -5,10 +5,6 @@
* Copyright 2018 Google LLC
*
* This is the entry point for the drivers that control the Wilco EC.
- * This driver is responsible for several tasks:
- * - Initialize the register interface that is used by wilco_ec_mailbox()
- * - Create a platform device which is picked up by the debugfs driver
- * - Create a platform device which is picked up by the RTC driver
*/
#include <linux/acpi.h>
@@ -87,12 +83,31 @@ static int wilco_ec_probe(struct platform_device *pdev)
goto unregister_debugfs;
}
+ /* Set up the keyboard backlight LEDs. */
+ ret = wilco_keyboard_leds_init(ec);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to initialize keyboard LEDs: %d\n",
+ ret);
+ goto unregister_rtc;
+ }
+
ret = wilco_ec_add_sysfs(ec);
if (ret < 0) {
dev_err(dev, "Failed to create sysfs entries: %d", ret);
goto unregister_rtc;
}
+ /* Register child device to be found by charger config driver. */
+ ec->charger_pdev = platform_device_register_data(dev, "wilco-charger",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ if (IS_ERR(ec->charger_pdev)) {
+ dev_err(dev, "Failed to create charger platform device\n");
+ ret = PTR_ERR(ec->charger_pdev);
+ goto remove_sysfs;
+ }
+
/* Register child device that will be found by the telemetry driver. */
ec->telem_pdev = platform_device_register_data(dev, "wilco_telem",
PLATFORM_DEVID_AUTO,
@@ -100,11 +115,13 @@ static int wilco_ec_probe(struct platform_device *pdev)
if (IS_ERR(ec->telem_pdev)) {
dev_err(dev, "Failed to create telemetry platform device\n");
ret = PTR_ERR(ec->telem_pdev);
- goto remove_sysfs;
+ goto unregister_charge_config;
}
return 0;
+unregister_charge_config:
+ platform_device_unregister(ec->charger_pdev);
remove_sysfs:
wilco_ec_remove_sysfs(ec);
unregister_rtc:
@@ -120,6 +137,7 @@ static int wilco_ec_remove(struct platform_device *pdev)
{
struct wilco_ec_device *ec = platform_get_drvdata(pdev);
+ platform_device_unregister(ec->charger_pdev);
wilco_ec_remove_sysfs(ec);
platform_device_unregister(ec->telem_pdev);
platform_device_unregister(ec->rtc_pdev);
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index 8d65a1e2f1a3..df5a5f6c3ec6 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -160,29 +160,29 @@ static const struct file_operations fops_raw = {
#define CMD_KB_CHROME 0x88
#define SUB_CMD_H1_GPIO 0x0A
+#define SUB_CMD_TEST_EVENT 0x0B
-struct h1_gpio_status_request {
+struct ec_request {
u8 cmd; /* Always CMD_KB_CHROME */
u8 reserved;
- u8 sub_cmd; /* Always SUB_CMD_H1_GPIO */
+ u8 sub_cmd;
} __packed;
-struct hi_gpio_status_response {
+struct ec_response {
u8 status; /* 0 if allowed */
- u8 val; /* BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL */
+ u8 val;
} __packed;
-static int h1_gpio_get(void *arg, u64 *val)
+static int send_ec_cmd(struct wilco_ec_device *ec, u8 sub_cmd, u8 *out_val)
{
- struct wilco_ec_device *ec = arg;
- struct h1_gpio_status_request rq;
- struct hi_gpio_status_response rs;
+ struct ec_request rq;
+ struct ec_response rs;
struct wilco_ec_message msg;
int ret;
memset(&rq, 0, sizeof(rq));
rq.cmd = CMD_KB_CHROME;
- rq.sub_cmd = SUB_CMD_H1_GPIO;
+ rq.sub_cmd = sub_cmd;
memset(&msg, 0, sizeof(msg));
msg.type = WILCO_EC_MSG_LEGACY;
@@ -196,14 +196,39 @@ static int h1_gpio_get(void *arg, u64 *val)
if (rs.status)
return -EIO;
- *val = rs.val;
+ *out_val = rs.val;
return 0;
}
+/**
+ * h1_gpio_get() - Gets h1 gpio status.
+ * @arg: The wilco EC device.
+ * @val: BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL
+ */
+static int h1_gpio_get(void *arg, u64 *val)
+{
+ return send_ec_cmd(arg, SUB_CMD_H1_GPIO, (u8 *)val);
+}
+
DEFINE_DEBUGFS_ATTRIBUTE(fops_h1_gpio, h1_gpio_get, NULL, "0x%02llx\n");
/**
+ * test_event_set() - Sends command to EC to cause an EC test event.
+ * @arg: The wilco EC device.
+ * @val: unused.
+ */
+static int test_event_set(void *arg, u64 val)
+{
+ u8 ret;
+
+ return send_ec_cmd(arg, SUB_CMD_TEST_EVENT, &ret);
+}
+
+/* Format is unused since it is only required for get method which is NULL */
+DEFINE_DEBUGFS_ATTRIBUTE(fops_test_event, NULL, test_event_set, "%llu\n");
+
+/**
* wilco_ec_debugfs_probe() - Create the debugfs node
* @pdev: The platform device, probably created in core.c
*
@@ -226,6 +251,8 @@ static int wilco_ec_debugfs_probe(struct platform_device *pdev)
debugfs_create_file("raw", 0644, debug_info->dir, NULL, &fops_raw);
debugfs_create_file("h1_gpio", 0444, debug_info->dir, ec,
&fops_h1_gpio);
+ debugfs_create_file("test_event", 0200, debug_info->dir, ec,
+ &fops_test_event);
return 0;
}
diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
new file mode 100644
index 000000000000..bb0edf51dfda
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Keyboard backlight LED driver for the Wilco Embedded Controller
+ *
+ * Copyright 2019 Google LLC
+ *
+ * Since the EC will never change the backlight level of its own accord,
+ * we don't need to implement a brightness_get() method.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/slab.h>
+
+#define WILCO_EC_COMMAND_KBBL 0x75
+#define WILCO_KBBL_MODE_FLAG_PWM BIT(1) /* Set brightness by percent. */
+#define WILCO_KBBL_DEFAULT_BRIGHTNESS 0
+
+struct wilco_keyboard_leds {
+ struct wilco_ec_device *ec;
+ struct led_classdev keyboard;
+};
+
+enum wilco_kbbl_subcommand {
+ WILCO_KBBL_SUBCMD_GET_FEATURES = 0x00,
+ WILCO_KBBL_SUBCMD_GET_STATE = 0x01,
+ WILCO_KBBL_SUBCMD_SET_STATE = 0x02,
+};
+
+/**
+ * struct wilco_keyboard_leds_msg - Message to/from EC for keyboard LED control.
+ * @command: Always WILCO_EC_COMMAND_KBBL.
+ * @status: Set by EC to 0 on success, 0xFF on failure.
+ * @subcmd: One of enum wilco_kbbl_subcommand.
+ * @reserved3: Should be 0.
+ * @mode: Bit flags for used mode, we want to use WILCO_KBBL_MODE_FLAG_PWM.
+ * @reserved5to8: Should be 0.
+ * @percent: Brightness in 0-100. Only meaningful in PWM mode.
+ * @reserved10to15: Should be 0.
+ */
+struct wilco_keyboard_leds_msg {
+ u8 command;
+ u8 status;
+ u8 subcmd;
+ u8 reserved3;
+ u8 mode;
+ u8 reserved5to8[4];
+ u8 percent;
+ u8 reserved10to15[6];
+} __packed;
+
+/* Send a request, get a response, and check that the response is good. */
+static int send_kbbl_msg(struct wilco_ec_device *ec,
+ struct wilco_keyboard_leds_msg *request,
+ struct wilco_keyboard_leds_msg *response)
+{
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = request;
+ msg.request_size = sizeof(*request);
+ msg.response_data = response;
+ msg.response_size = sizeof(*response);
+
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0) {
+ dev_err(ec->dev,
+ "Failed sending keyboard LEDs command: %d", ret);
+ return ret;
+ }
+
+ if (response->status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d",
+ response->status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_SET_STATE;
+ request.mode = WILCO_KBBL_MODE_FLAG_PWM;
+ request.percent = brightness;
+
+ return send_kbbl_msg(ec, &request, &response);
+}
+
+static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_GET_FEATURES;
+
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ *exists = response.status != 0xFF;
+
+ return 0;
+}
+
+/**
+ * kbbl_init() - Initialize the state of the keyboard backlight.
+ * @ec: EC device to talk to.
+ *
+ * Gets the current brightness, ensuring that the BIOS already initialized the
+ * backlight to PWM mode. If not in PWM mode, then the current brightness is
+ * meaningless, so set the brightness to WILCO_KBBL_DEFAULT_BRIGHTNESS.
+ *
+ * Return: Final brightness of the keyboard, or negative error code on failure.
+ */
+static int kbbl_init(struct wilco_ec_device *ec)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_GET_STATE;
+
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
+ return response.percent;
+
+ ret = set_kbbl(ec, WILCO_KBBL_DEFAULT_BRIGHTNESS);
+ if (ret < 0)
+ return ret;
+
+ return WILCO_KBBL_DEFAULT_BRIGHTNESS;
+}
+
+static int wilco_keyboard_leds_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct wilco_keyboard_leds *wkl =
+ container_of(cdev, struct wilco_keyboard_leds, keyboard);
+ return set_kbbl(wkl->ec, brightness);
+}
+
+int wilco_keyboard_leds_init(struct wilco_ec_device *ec)
+{
+ struct wilco_keyboard_leds *wkl;
+ bool leds_exist;
+ int ret;
+
+ ret = kbbl_exist(ec, &leds_exist);
+ if (ret < 0) {
+ dev_err(ec->dev,
+ "Failed checking keyboard LEDs support: %d", ret);
+ return ret;
+ }
+ if (!leds_exist)
+ return 0;
+
+ wkl = devm_kzalloc(ec->dev, sizeof(*wkl), GFP_KERNEL);
+ if (!wkl)
+ return -ENOMEM;
+
+ wkl->ec = ec;
+ wkl->keyboard.name = "platform::kbd_backlight";
+ wkl->keyboard.max_brightness = 100;
+ wkl->keyboard.flags = LED_CORE_SUSPENDRESUME;
+ wkl->keyboard.brightness_set_blocking = wilco_keyboard_leds_set;
+ ret = kbbl_init(ec);
+ if (ret < 0)
+ return ret;
+ wkl->keyboard.brightness = ret;
+
+ return devm_led_classdev_register(ec->dev, &wkl->keyboard);
+}
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
index 3b86a21005d3..f0d174b6bb21 100644
--- a/drivers/platform/chrome/wilco_ec/sysfs.c
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -23,6 +23,26 @@ struct boot_on_ac_request {
u8 reserved7;
} __packed;
+#define CMD_USB_CHARGE 0x39
+
+enum usb_charge_op {
+ USB_CHARGE_GET = 0,
+ USB_CHARGE_SET = 1,
+};
+
+struct usb_charge_request {
+ u8 cmd; /* Always CMD_USB_CHARGE */
+ u8 reserved;
+ u8 op; /* One of enum usb_charge_op */
+ u8 val; /* When setting, either 0 or 1 */
+} __packed;
+
+struct usb_charge_response {
+ u8 reserved;
+ u8 status; /* Set by EC to 0 on success, other value on failure */
+ u8 val; /* When getting, set by EC to either 0 or 1 */
+} __packed;
+
#define CMD_EC_INFO 0x38
enum get_ec_info_op {
CMD_GET_EC_LABEL = 0,
@@ -131,12 +151,83 @@ static ssize_t model_number_show(struct device *dev,
static DEVICE_ATTR_RO(model_number);
+static int send_usb_charge(struct wilco_ec_device *ec,
+ struct usb_charge_request *rq,
+ struct usb_charge_response *rs)
+{
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = rq;
+ msg.request_size = sizeof(*rq);
+ msg.response_data = rs;
+ msg.response_size = sizeof(*rs);
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (rs->status)
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t usb_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct usb_charge_request rq;
+ struct usb_charge_response rs;
+ int ret;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_USB_CHARGE;
+ rq.op = USB_CHARGE_GET;
+
+ ret = send_usb_charge(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", rs.val);
+}
+
+static ssize_t usb_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct usb_charge_request rq;
+ struct usb_charge_response rs;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_USB_CHARGE;
+ rq.op = USB_CHARGE_SET;
+ rq.val = val;
+
+ ret = send_usb_charge(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(usb_charge);
static struct attribute *wilco_dev_attrs[] = {
&dev_attr_boot_on_ac.attr,
&dev_attr_build_date.attr,
&dev_attr_build_revision.attr,
&dev_attr_model_number.attr,
+ &dev_attr_usb_charge.attr,
&dev_attr_version.attr,
NULL,
};
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index b9d03c33d8dc..1176d543191a 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -406,8 +406,8 @@ static int telem_device_remove(struct platform_device *pdev)
struct telem_device_data *dev_data = platform_get_drvdata(pdev);
cdev_device_del(&dev_data->cdev, &dev_data->dev);
- put_device(&dev_data->dev);
ida_simple_remove(&telem_ida, MINOR(dev_data->dev.devt));
+ put_device(&dev_data->dev);
return 0;
}
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 77b35df3a801..f3d09b1631e3 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig GOLDFISH
bool "Platform support for Goldfish virtual devices"
- depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
help
Say Y here to get to see options for the Goldfish virtual platform.
This option alone does not add any kernel code.
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index 530fe7e31397..56e037623b29 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -41,7 +41,19 @@ config MLXBF_TMFIFO
depends on VIRTIO_CONSOLE && VIRTIO_NET
help
Say y here to enable TmFifo support. The TmFifo driver provides
- platform driver support for the TmFifo which supports console
- and networking based on the virtio framework.
+ platform driver support for the TmFifo which supports console
+ and networking based on the virtio framework.
+
+config MLXBF_BOOTCTL
+ tristate "Mellanox BlueField Firmware Boot Control driver"
+ depends on ARM64
+ depends on ACPI
+ help
+ The Mellanox BlueField firmware implements functionality to
+ request swapping the primary and alternate eMMC boot partition,
+ and to set up a watchdog that can undo that swap if the system
+ does not boot up correctly. This driver provides sysfs access
+ to the userspace tools, to be used in conjunction with the eMMC
+ device driver to do necessary initial swap of the boot partition.
endif # MELLANOX_PLATFORM
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
index a229bda18fd9..499623ccf2fe 100644
--- a/drivers/platform/mellanox/Makefile
+++ b/drivers/platform/mellanox/Makefile
@@ -3,6 +3,7 @@
# Makefile for linux/drivers/platform/mellanox
# Mellanox Platform-Specific Drivers
#
+obj-$(CONFIG_MLXBF_BOOTCTL) += mlxbf-bootctl.o
obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o
obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
new file mode 100644
index 000000000000..61753b648506
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox boot control driver
+ *
+ * This driver provides a sysfs interface for systems management
+ * software to manage reset-time actions.
+ *
+ * Copyright (C) 2019 Mellanox Technologies
+ */
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mlxbf-bootctl.h"
+
+#define MLXBF_BOOTCTL_SB_SECURE_MASK 0x03
+#define MLXBF_BOOTCTL_SB_TEST_MASK 0x0c
+
+#define MLXBF_SB_KEY_NUM 4
+
+/* UUID used to probe ATF service. */
+static const char *mlxbf_bootctl_svc_uuid_str =
+ "89c036b4-e7d7-11e6-8797-001aca00bfc4";
+
+struct mlxbf_bootctl_name {
+ u32 value;
+ const char *name;
+};
+
+static struct mlxbf_bootctl_name boot_names[] = {
+ { MLXBF_BOOTCTL_EXTERNAL, "external" },
+ { MLXBF_BOOTCTL_EMMC, "emmc" },
+ { MLNX_BOOTCTL_SWAP_EMMC, "swap_emmc" },
+ { MLXBF_BOOTCTL_EMMC_LEGACY, "emmc_legacy" },
+ { MLXBF_BOOTCTL_NONE, "none" },
+};
+
+static const char * const mlxbf_bootctl_lifecycle_states[] = {
+ [0] = "Production",
+ [1] = "GA Secured",
+ [2] = "GA Non-Secured",
+ [3] = "RMA",
+};
+
+/* ARM SMC call which is atomic and no need for lock. */
+static int mlxbf_bootctl_smc(unsigned int smc_op, int smc_arg)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(smc_op, smc_arg, 0, 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+/* Return the action in integer or an error code. */
+static int mlxbf_bootctl_reset_action_to_val(const char *action)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(boot_names); i++)
+ if (sysfs_streq(boot_names[i].name, action))
+ return boot_names[i].value;
+
+ return -EINVAL;
+}
+
+/* Return the action in string. */
+static const char *mlxbf_bootctl_action_to_string(int action)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(boot_names); i++)
+ if (boot_names[i].value == action)
+ return boot_names[i].name;
+
+ return "invalid action";
+}
+
+static ssize_t post_reset_wdog_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+
+ ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_POST_RESET_WDOG, 0);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t post_reset_wdog_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_SET_POST_RESET_WDOG, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t mlxbf_bootctl_show(int smc_op, char *buf)
+{
+ int action;
+
+ action = mlxbf_bootctl_smc(smc_op, 0);
+ if (action < 0)
+ return action;
+
+ return sprintf(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
+}
+
+static int mlxbf_bootctl_store(int smc_op, const char *buf, size_t count)
+{
+ int ret, action;
+
+ action = mlxbf_bootctl_reset_action_to_val(buf);
+ if (action < 0)
+ return action;
+
+ ret = mlxbf_bootctl_smc(smc_op, action);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t reset_action_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return mlxbf_bootctl_show(MLXBF_BOOTCTL_GET_RESET_ACTION, buf);
+}
+
+static ssize_t reset_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return mlxbf_bootctl_store(MLXBF_BOOTCTL_SET_RESET_ACTION, buf, count);
+}
+
+static ssize_t second_reset_action_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return mlxbf_bootctl_show(MLXBF_BOOTCTL_GET_SECOND_RESET_ACTION, buf);
+}
+
+static ssize_t second_reset_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return mlxbf_bootctl_store(MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION, buf,
+ count);
+}
+
+static ssize_t lifecycle_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int lc_state;
+
+ lc_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+ MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
+ if (lc_state < 0)
+ return lc_state;
+
+ lc_state &=
+ MLXBF_BOOTCTL_SB_TEST_MASK | MLXBF_BOOTCTL_SB_SECURE_MASK;
+
+ /*
+ * If the test bits are set, we specify that the current state may be
+ * due to using the test bits.
+ */
+ if (lc_state & MLXBF_BOOTCTL_SB_TEST_MASK) {
+ lc_state &= MLXBF_BOOTCTL_SB_SECURE_MASK;
+
+ return sprintf(buf, "%s(test)\n",
+ mlxbf_bootctl_lifecycle_states[lc_state]);
+ }
+
+ return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
+}
+
+static ssize_t secure_boot_fuse_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int burnt, valid, key, key_state, buf_len = 0, upper_key_used = 0;
+ const char *status;
+
+ key_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+ MLXBF_BOOTCTL_FUSE_STATUS_KEYS);
+ if (key_state < 0)
+ return key_state;
+
+ /*
+ * key_state contains the bits for 4 Key versions, loaded from eFuses
+ * after a hard reset. Lower 4 bits are a thermometer code indicating
+ * key programming has started for key n (0000 = none, 0001 = version 0,
+ * 0011 = version 1, 0111 = version 2, 1111 = version 3). Upper 4 bits
+ * are a thermometer code indicating key programming has completed for
+ * key n (same encodings as the start bits). This allows for detection
+ * of an interruption in the progamming process which has left the key
+ * partially programmed (and thus invalid). The process is to burn the
+ * eFuse for the new key start bit, burn the key eFuses, then burn the
+ * eFuse for the new key complete bit.
+ *
+ * For example 0000_0000: no key valid, 0001_0001: key version 0 valid,
+ * 0011_0011: key 1 version valid, 0011_0111: key version 2 started
+ * programming but did not complete, etc. The most recent key for which
+ * both start and complete bit is set is loaded. On soft reset, this
+ * register is not modified.
+ */
+ for (key = MLXBF_SB_KEY_NUM - 1; key >= 0; key--) {
+ burnt = key_state & BIT(key);
+ valid = key_state & BIT(key + MLXBF_SB_KEY_NUM);
+
+ if (burnt && valid)
+ upper_key_used = 1;
+
+ if (upper_key_used) {
+ if (burnt)
+ status = valid ? "Used" : "Wasted";
+ else
+ status = valid ? "Invalid" : "Skipped";
+ } else {
+ if (burnt)
+ status = valid ? "InUse" : "Incomplete";
+ else
+ status = valid ? "Invalid" : "Free";
+ }
+ buf_len += sprintf(buf + buf_len, "%d:%s ", key, status);
+ }
+ buf_len += sprintf(buf + buf_len, "\n");
+
+ return buf_len;
+}
+
+static DEVICE_ATTR_RW(post_reset_wdog);
+static DEVICE_ATTR_RW(reset_action);
+static DEVICE_ATTR_RW(second_reset_action);
+static DEVICE_ATTR_RO(lifecycle_state);
+static DEVICE_ATTR_RO(secure_boot_fuse_state);
+
+static struct attribute *mlxbf_bootctl_attrs[] = {
+ &dev_attr_post_reset_wdog.attr,
+ &dev_attr_reset_action.attr,
+ &dev_attr_second_reset_action.attr,
+ &dev_attr_lifecycle_state.attr,
+ &dev_attr_secure_boot_fuse_state.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(mlxbf_bootctl);
+
+static const struct acpi_device_id mlxbf_bootctl_acpi_ids[] = {
+ {"MLNXBF04", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, mlxbf_bootctl_acpi_ids);
+
+static bool mlxbf_bootctl_guid_match(const guid_t *guid,
+ const struct arm_smccc_res *res)
+{
+ guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16,
+ res->a2, res->a2 >> 8, res->a2 >> 16,
+ res->a2 >> 24, res->a3, res->a3 >> 8,
+ res->a3 >> 16, res->a3 >> 24);
+
+ return guid_equal(guid, &id);
+}
+
+static int mlxbf_bootctl_probe(struct platform_device *pdev)
+{
+ struct arm_smccc_res res = { 0 };
+ guid_t guid;
+ int ret;
+
+ /* Ensure we have the UUID we expect for this service. */
+ arm_smccc_smc(MLXBF_BOOTCTL_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+ guid_parse(mlxbf_bootctl_svc_uuid_str, &guid);
+ if (!mlxbf_bootctl_guid_match(&guid, &res))
+ return -ENODEV;
+
+ /*
+ * When watchdog is used, it sets boot mode to MLXBF_BOOTCTL_SWAP_EMMC
+ * in case of boot failures. However it doesn't clear the state if there
+ * is no failure. Restore the default boot mode here to avoid any
+ * unnecessary boot partition swapping.
+ */
+ ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_SET_RESET_ACTION,
+ MLXBF_BOOTCTL_EMMC);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "Unable to reset the EMMC boot mode\n");
+
+ return 0;
+}
+
+static struct platform_driver mlxbf_bootctl_driver = {
+ .probe = mlxbf_bootctl_probe,
+ .driver = {
+ .name = "mlxbf-bootctl",
+ .groups = mlxbf_bootctl_groups,
+ .acpi_match_table = mlxbf_bootctl_acpi_ids,
+ }
+};
+
+module_platform_driver(mlxbf_bootctl_driver);
+
+MODULE_DESCRIPTION("Mellanox boot control driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mellanox Technologies");
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.h b/drivers/platform/mellanox/mlxbf-bootctl.h
new file mode 100644
index 000000000000..148fdb43b435
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-bootctl.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __MLXBF_BOOTCTL_H__
+#define __MLXBF_BOOTCTL_H__
+
+/*
+ * Request that the on-chip watchdog be enabled, or disabled, after
+ * the next chip soft reset. This call does not affect the current
+ * status of the on-chip watchdog. If non-zero, the argument
+ * specifies the watchdog interval in seconds. If zero, the watchdog
+ * will not be enabled after the next soft reset. Non-zero errors are
+ * returned as documented below.
+ */
+#define MLXBF_BOOTCTL_SET_POST_RESET_WDOG 0x82000000
+
+/*
+ * Query the status which has been requested for the on-chip watchdog
+ * after the next chip soft reset. Returns the interval as set by
+ * MLXBF_BOOTCTL_SET_POST_RESET_WDOG.
+ */
+#define MLXBF_BOOTCTL_GET_POST_RESET_WDOG 0x82000001
+
+/*
+ * Request that a specific boot action be taken at the next soft
+ * reset. By default, the boot action is set by external chip pins,
+ * which are sampled on hard reset. Note that the boot action
+ * requested by this call will persist on subsequent resets unless
+ * this service, or the MLNX_SET_SECOND_RESET_ACTION service, is
+ * invoked. See below for the available MLNX_BOOT_xxx parameter
+ * values. Non-zero errors are returned as documented below.
+ */
+#define MLXBF_BOOTCTL_SET_RESET_ACTION 0x82000002
+
+/*
+ * Return the specific boot action which will be taken at the next
+ * soft reset. Returns the reset action (see below for the parameter
+ * values for MLXBF_BOOTCTL_SET_RESET_ACTION).
+ */
+#define MLXBF_BOOTCTL_GET_RESET_ACTION 0x82000003
+
+/*
+ * Request that a specific boot action be taken at the soft reset
+ * after the next soft reset. For a specified valid boot mode, the
+ * effect of this call is identical to that of invoking
+ * MLXBF_BOOTCTL_SET_RESET_ACTION after the next chip soft reset; in
+ * particular, after that reset, the action for the now next reset can
+ * be queried with MLXBF_BOOTCTL_GET_RESET_ACTION and modified with
+ * MLXBF_BOOTCTL_SET_RESET_ACTION. You may also specify the parameter as
+ * MLNX_BOOT_NONE, which is equivalent to specifying that no call to
+ * MLXBF_BOOTCTL_SET_RESET_ACTION be taken after the next chip soft reset.
+ * This call does not affect the action to be taken at the next soft
+ * reset. Non-zero errors are returned as documented below.
+ */
+#define MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION 0x82000004
+
+/*
+ * Return the specific boot action which will be taken at the soft
+ * reset after the next soft reset; this will be one of the valid
+ * actions for MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION.
+ */
+#define MLXBF_BOOTCTL_GET_SECOND_RESET_ACTION 0x82000005
+
+/*
+ * Return the fuse status of the current chip. The caller should specify
+ * with the second argument if the state of the lifecycle fuses or the
+ * version of secure boot fuse keys left should be returned.
+ */
+#define MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS 0x82000006
+
+/* Reset eMMC by programming the RST_N register. */
+#define MLXBF_BOOTCTL_SET_EMMC_RST_N 0x82000007
+
+#define MLXBF_BOOTCTL_GET_DIMM_INFO 0x82000008
+
+/* SMC function IDs for SiP Service queries */
+#define MLXBF_BOOTCTL_SIP_SVC_CALL_COUNT 0x8200ff00
+#define MLXBF_BOOTCTL_SIP_SVC_UID 0x8200ff01
+#define MLXBF_BOOTCTL_SIP_SVC_VERSION 0x8200ff03
+
+/* ARM Standard Service Calls version numbers */
+#define MLXBF_BOOTCTL_SVC_VERSION_MAJOR 0x0
+#define MLXBF_BOOTCTL_SVC_VERSION_MINOR 0x2
+
+/* Number of svc calls defined. */
+#define MLXBF_BOOTCTL_NUM_SVC_CALLS 12
+
+/* Valid reset actions for MLXBF_BOOTCTL_SET_RESET_ACTION. */
+#define MLXBF_BOOTCTL_EXTERNAL 0 /* Not boot from eMMC */
+#define MLXBF_BOOTCTL_EMMC 1 /* From primary eMMC boot partition */
+#define MLNX_BOOTCTL_SWAP_EMMC 2 /* Swap eMMC boot partitions and reboot */
+#define MLXBF_BOOTCTL_EMMC_LEGACY 3 /* From primary eMMC in legacy mode */
+
+/* Valid arguments for requesting the fuse status. */
+#define MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE 0 /* Return lifecycle status. */
+#define MLXBF_BOOTCTL_FUSE_STATUS_KEYS 1 /* Return secure boot key status */
+
+/* Additional value to disable the MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION. */
+#define MLXBF_BOOTCTL_NONE 0x7fffffff /* Don't change next boot action */
+
+#endif /* __MLXBF_BOOTCTL_H__ */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ae21d08c65e8..27d5b40fb717 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -94,7 +94,6 @@ config ASUS_LAPTOP
depends on RFKILL || RFKILL = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_SPARSEKMAP
- select INPUT_POLLDEV
---help---
This is a driver for Asus laptops, Lenovo SL and the Pegatron
Lucid tablet. It may also support some MEDION, JVC or VICTOR
@@ -259,7 +258,7 @@ config DELL_RBU
DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
supporting application to communicate with the BIOS regarding the new
image for the image update to take effect.
- See <file:Documentation/driver-api/dell_rbu.rst> for more details on the driver.
+ See <file:Documentation/admin-guide/dell_rbu.rst> for more details on the driver.
config FUJITSU_LAPTOP
@@ -623,7 +622,6 @@ config THINKPAD_ACPI_HOTKEY_POLL
config SENSORS_HDAPS
tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
depends on INPUT
- select INPUT_POLLDEV
help
This driver provides support for the IBM Hard Drive Active Protection
System (hdaps), which provides an accelerometer and other misc. data.
@@ -806,7 +804,6 @@ config PEAQ_WMI
tristate "PEAQ 2-in-1 WMI hotkey driver"
depends on ACPI_WMI
depends on INPUT
- select INPUT_POLLDEV
help
Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
@@ -834,7 +831,6 @@ config ACPI_TOSHIBA
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL || RFKILL = n
depends on IIO
- select INPUT_POLLDEV
select INPUT_SPARSEKMAP
---help---
This driver adds support for access to certain system settings
@@ -931,14 +927,20 @@ config INTEL_CHT_INT33FE
This driver add support for the INT33FE ACPI device found on
some Intel Cherry Trail devices.
+ There are two kinds of INT33FE ACPI device possible: for hardware
+ with USB Type-C and Micro-B connectors. This driver supports both.
+
The INT33FE ACPI device has a CRS table with I2cSerialBusV2
- resources for 3 devices: Maxim MAX17047 Fuel Gauge Controller,
+ resources for Fuel Gauge Controller and (in the Type-C variant)
FUSB302 USB Type-C Controller and PI3USB30532 USB switch.
This driver instantiates i2c-clients for these, so that standard
i2c drivers for these chips can bind to the them.
If you enable this driver it is advised to also select
- CONFIG_TYPEC_FUSB302=m and CONFIG_BATTERY_MAX17042=m.
+ CONFIG_BATTERY_BQ27XXX=m or CONFIG_BATTERY_BQ27XXX_I2C=m for Micro-B
+ device and CONFIG_TYPEC_FUSB302=m and CONFIG_BATTERY_MAX17042=m
+ for Type-C device.
+
config INTEL_INT0002_VGPIO
tristate "Intel ACPI INT0002 Virtual GPIO driver"
@@ -1305,7 +1307,8 @@ config INTEL_ATOMISP2_PM
will be called intel_atomisp2_pm.
config HUAWEI_WMI
- tristate "Huawei WMI hotkeys driver"
+ tristate "Huawei WMI laptop extras driver"
+ depends on ACPI_BATTERY
depends on ACPI_WMI
depends on INPUT
select INPUT_SPARSEKMAP
@@ -1314,9 +1317,8 @@ config HUAWEI_WMI
select LEDS_TRIGGER_AUDIO
select NEW_LEDS
help
- This driver provides support for Huawei WMI hotkeys.
- It enables the missing keys and adds support to the micmute
- LED found on some of these laptops.
+ This driver provides support for Huawei WMI hotkeys, battery charge
+ control, fn-lock, mic-mute LED, and other extra features.
To compile this driver as a module, choose M here: the module
will be called huawei-wmi.
@@ -1337,6 +1339,19 @@ config PCENGINES_APU2
source "drivers/platform/x86/intel_speed_select_if/Kconfig"
+config SYSTEM76_ACPI
+ tristate "System76 ACPI Driver"
+ depends on ACPI
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ help
+ This is a driver for System76 laptops running open firmware. It adds
+ support for Fn-Fx key combinations, keyboard backlight, and airplane mode
+ LEDs.
+
+ If you have a System76 laptop running open firmware, say Y or M here.
+
endif # X86_PLATFORM_DEVICES
config PMC_ATOM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 415104033060..42d85a00be4e 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -61,6 +61,10 @@ obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
obj-$(CONFIG_TOSHIBA_HAPS) += toshiba_haps.o
obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o
obj-$(CONFIG_INTEL_CHT_INT33FE) += intel_cht_int33fe.o
+intel_cht_int33fe-objs := intel_cht_int33fe_common.o \
+ intel_cht_int33fe_typec.o \
+ intel_cht_int33fe_microb.o
+
obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
@@ -100,3 +104,4 @@ obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
+obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 5ea8da5f0f70..8cc86f4e3ac1 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -4,7 +4,7 @@
* of the aspire one netbook, turns on/off the fan
* as soon as the upper/lower threshold is reached.
*
- * (C) 2009 - Peter Feuerer peter (a) piie.net
+ * (C) 2009 - Peter Kaestle peter (a) piie.net
* http://piie.net
* 2009 Borislav Petkov bp (a) alien8.de
*
@@ -224,6 +224,8 @@ static const struct bios_settings bios_tbl[] __initconst = {
{"Acer", "Aspire 5739G", "V1.3311", 0x55, 0x58, {0x20, 0x00}, 0},
/* Acer TravelMate 7730 */
{"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00}, 0},
+ /* Acer Aspire 7551 */
+ {"Acer", "Aspire 7551", "V1.18", 0x93, 0xa8, {0x14, 0x04}, 1},
/* Acer TravelMate TM8573T */
{"Acer", "TM8573T", "V1.13", 0x93, 0xa8, {0x14, 0x04}, 1},
/* Gateway */
@@ -801,7 +803,7 @@ static void __exit acerhdf_exit(void)
}
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Peter Feuerer");
+MODULE_AUTHOR("Peter Kaestle");
MODULE_DESCRIPTION("Aspire One temperature and fan driver");
MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:");
@@ -815,6 +817,7 @@ MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5739G:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*One*753:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5315:");
MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*7551:");
MODULE_ALIAS("dmi:*:*Acer*:TM8573T:");
MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index ca65e1039f92..a666fbc2e73b 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -34,7 +34,6 @@
#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <linux/input-polldev.h>
#include <linux/rfkill.h>
#include <linux/slab.h>
#include <linux/dmi.h>
@@ -244,7 +243,7 @@ struct asus_laptop {
struct input_dev *inputdev;
struct key_entry *keymap;
- struct input_polled_dev *pega_accel_poll;
+ struct input_dev *pega_accel_poll;
struct asus_led wled;
struct asus_led bled;
@@ -446,9 +445,9 @@ static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method)
return clamp_val((short)val, -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP);
}
-static void pega_accel_poll(struct input_polled_dev *ipd)
+static void pega_accel_poll(struct input_dev *input)
{
- struct device *parent = ipd->input->dev.parent;
+ struct device *parent = input->dev.parent;
struct asus_laptop *asus = dev_get_drvdata(parent);
/* In some cases, the very first call to poll causes a
@@ -457,10 +456,10 @@ static void pega_accel_poll(struct input_polled_dev *ipd)
* device, and perhaps a firmware bug. Fake the first report. */
if (!asus->pega_acc_live) {
asus->pega_acc_live = true;
- input_report_abs(ipd->input, ABS_X, 0);
- input_report_abs(ipd->input, ABS_Y, 0);
- input_report_abs(ipd->input, ABS_Z, 0);
- input_sync(ipd->input);
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_Z, 0);
+ input_sync(input);
return;
}
@@ -471,25 +470,24 @@ static void pega_accel_poll(struct input_polled_dev *ipd)
/* Note transform, convert to "right/up/out" in the native
* landscape orientation (i.e. the vector is the direction of
* "real up" in the device's cartiesian coordinates). */
- input_report_abs(ipd->input, ABS_X, -asus->pega_acc_x);
- input_report_abs(ipd->input, ABS_Y, -asus->pega_acc_y);
- input_report_abs(ipd->input, ABS_Z, asus->pega_acc_z);
- input_sync(ipd->input);
+ input_report_abs(input, ABS_X, -asus->pega_acc_x);
+ input_report_abs(input, ABS_Y, -asus->pega_acc_y);
+ input_report_abs(input, ABS_Z, asus->pega_acc_z);
+ input_sync(input);
}
static void pega_accel_exit(struct asus_laptop *asus)
{
if (asus->pega_accel_poll) {
- input_unregister_polled_device(asus->pega_accel_poll);
- input_free_polled_device(asus->pega_accel_poll);
+ input_unregister_device(asus->pega_accel_poll);
+ asus->pega_accel_poll = NULL;
}
- asus->pega_accel_poll = NULL;
}
static int pega_accel_init(struct asus_laptop *asus)
{
int err;
- struct input_polled_dev *ipd;
+ struct input_dev *input;
if (!asus->is_pega_lucid)
return -ENODEV;
@@ -499,37 +497,39 @@ static int pega_accel_init(struct asus_laptop *asus)
acpi_check_handle(asus->handle, METHOD_XLRZ, NULL))
return -ENODEV;
- ipd = input_allocate_polled_device();
- if (!ipd)
+ input = input_allocate_device();
+ if (!input)
return -ENOMEM;
- ipd->poll = pega_accel_poll;
- ipd->poll_interval = 125;
- ipd->poll_interval_min = 50;
- ipd->poll_interval_max = 2000;
-
- ipd->input->name = PEGA_ACCEL_DESC;
- ipd->input->phys = PEGA_ACCEL_NAME "/input0";
- ipd->input->dev.parent = &asus->platform_device->dev;
- ipd->input->id.bustype = BUS_HOST;
+ input->name = PEGA_ACCEL_DESC;
+ input->phys = PEGA_ACCEL_NAME "/input0";
+ input->dev.parent = &asus->platform_device->dev;
+ input->id.bustype = BUS_HOST;
- set_bit(EV_ABS, ipd->input->evbit);
- input_set_abs_params(ipd->input, ABS_X,
+ input_set_abs_params(input, ABS_X,
-PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
- input_set_abs_params(ipd->input, ABS_Y,
+ input_set_abs_params(input, ABS_Y,
-PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
- input_set_abs_params(ipd->input, ABS_Z,
+ input_set_abs_params(input, ABS_Z,
-PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
- err = input_register_polled_device(ipd);
+ err = input_setup_polling(input, pega_accel_poll);
if (err)
goto exit;
- asus->pega_accel_poll = ipd;
+ input_set_poll_interval(input, 125);
+ input_set_min_poll_interval(input, 50);
+ input_set_max_poll_interval(input, 2000);
+
+ err = input_register_device(input);
+ if (err)
+ goto exit;
+
+ asus->pega_accel_poll = input;
return 0;
exit:
- input_free_polled_device(ipd);
+ input_free_device(input);
return err;
}
@@ -1550,8 +1550,7 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
/* Accelerometer "coarse orientation change" event */
if (asus->pega_accel_poll && event == 0xEA) {
- kobject_uevent(&asus->pega_accel_poll->input->dev.kobj,
- KOBJ_CHANGE);
+ kobject_uevent(&asus->pega_accel_poll->dev.kobj, KOBJ_CHANGE);
return ;
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index d27be2836bc2..74e988f839e8 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -33,6 +33,7 @@
struct quirk_entry {
bool touchpad_led;
+ bool kbd_led_not_present;
bool kbd_led_levels_off_1;
bool kbd_missing_ac_tag;
@@ -73,6 +74,10 @@ static struct quirk_entry quirk_dell_latitude_e6410 = {
.kbd_led_levels_off_1 = true,
};
+static struct quirk_entry quirk_dell_inspiron_1012 = {
+ .kbd_led_not_present = true,
+};
+
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-laptop",
@@ -310,6 +315,24 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_latitude_e6410,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 1012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ .driver_data = &quirk_dell_inspiron_1012,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 1018",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1018"),
+ },
+ .driver_data = &quirk_dell_inspiron_1012,
+ },
{ }
};
@@ -1493,6 +1516,9 @@ static void kbd_init(void)
{
int ret;
+ if (quirks && quirks->kbd_led_not_present)
+ return;
+
ret = kbd_init_info();
kbd_init_tokens();
diff --git a/drivers/platform/x86/dell_rbu.c b/drivers/platform/x86/dell_rbu.c
index 3691391fea6b..7d5453326b43 100644
--- a/drivers/platform/x86/dell_rbu.c
+++ b/drivers/platform/x86/dell_rbu.c
@@ -24,7 +24,7 @@
* on every time the packet data is written. This driver requires an
* application to break the BIOS image in to fixed sized packet chunks.
*
- * See Documentation/driver-api/dell_rbu.rst for more info.
+ * See Documentation/admin-guide/dell_rbu.rst for more info.
*/
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 3adcb0de0193..04c4da6692d7 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -18,7 +18,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -59,7 +59,7 @@
#define HDAPS_BOTH_AXES (HDAPS_X_AXIS | HDAPS_Y_AXIS)
static struct platform_device *pdev;
-static struct input_polled_dev *hdaps_idev;
+static struct input_dev *hdaps_idev;
static unsigned int hdaps_invert;
static u8 km_activity;
static int rest_x;
@@ -318,9 +318,8 @@ static void hdaps_calibrate(void)
__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
}
-static void hdaps_mousedev_poll(struct input_polled_dev *dev)
+static void hdaps_mousedev_poll(struct input_dev *input_dev)
{
- struct input_dev *input_dev = dev->input;
int x, y;
mutex_lock(&hdaps_mtx);
@@ -531,7 +530,6 @@ static const struct dmi_system_id hdaps_whitelist[] __initconst = {
static int __init hdaps_init(void)
{
- struct input_dev *idev;
int ret;
if (!dmi_check_system(hdaps_whitelist)) {
@@ -559,31 +557,32 @@ static int __init hdaps_init(void)
if (ret)
goto out_device;
- hdaps_idev = input_allocate_polled_device();
+ hdaps_idev = input_allocate_device();
if (!hdaps_idev) {
ret = -ENOMEM;
goto out_group;
}
- hdaps_idev->poll = hdaps_mousedev_poll;
- hdaps_idev->poll_interval = HDAPS_POLL_INTERVAL;
-
/* initial calibrate for the input device */
hdaps_calibrate();
/* initialize the input class */
- idev = hdaps_idev->input;
- idev->name = "hdaps";
- idev->phys = "isa1600/input0";
- idev->id.bustype = BUS_ISA;
- idev->dev.parent = &pdev->dev;
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X,
+ hdaps_idev->name = "hdaps";
+ hdaps_idev->phys = "isa1600/input0";
+ hdaps_idev->id.bustype = BUS_ISA;
+ hdaps_idev->dev.parent = &pdev->dev;
+ input_set_abs_params(hdaps_idev, ABS_X,
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
- input_set_abs_params(idev, ABS_Y,
+ input_set_abs_params(hdaps_idev, ABS_Y,
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
- ret = input_register_polled_device(hdaps_idev);
+ ret = input_setup_polling(hdaps_idev, hdaps_mousedev_poll);
+ if (ret)
+ goto out_idev;
+
+ input_set_poll_interval(hdaps_idev, HDAPS_POLL_INTERVAL);
+
+ ret = input_register_device(hdaps_idev);
if (ret)
goto out_idev;
@@ -591,7 +590,7 @@ static int __init hdaps_init(void)
return 0;
out_idev:
- input_free_polled_device(hdaps_idev);
+ input_free_device(hdaps_idev);
out_group:
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
out_device:
@@ -607,8 +606,7 @@ out:
static void __exit hdaps_exit(void)
{
- input_unregister_polled_device(hdaps_idev);
- input_free_polled_device(hdaps_idev);
+ input_unregister_device(hdaps_idev);
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
platform_device_unregister(pdev);
platform_driver_unregister(&hdaps_driver);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 6bcbbb375401..9579a706fc08 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -65,7 +65,7 @@ struct bios_args {
u32 command;
u32 commandtype;
u32 datasize;
- u32 data;
+ u8 data[128];
};
enum hp_wmi_commandtype {
@@ -216,7 +216,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
.command = command,
.commandtype = query,
.datasize = insize,
- .data = 0,
+ .data = { 0 },
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -228,7 +228,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
if (WARN_ON(insize > sizeof(args.data)))
return -EINVAL;
- memcpy(&args.data, buffer, insize);
+ memcpy(&args.data[0], buffer, insize);
wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output);
@@ -380,7 +380,7 @@ static int hp_wmi_rfkill2_refresh(void)
int err, i;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
- 0, sizeof(state));
+ sizeof(state), sizeof(state));
if (err)
return err;
@@ -778,7 +778,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
int err, i;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
- 0, sizeof(state));
+ sizeof(state), sizeof(state));
if (err)
return err < 0 ? err : -EINVAL;
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index 195a7f3638cb..a2d846c4a7ee 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -1,32 +1,77 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Huawei WMI hotkeys
+ * Huawei WMI laptop extras driver
*
* Copyright (C) 2018 Ayman Bagabas <ayman.bagabas@gmail.com>
*/
#include <linux/acpi.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/sysfs.h>
#include <linux/wmi.h>
+#include <acpi/battery.h>
/*
* Huawei WMI GUIDs
*/
-#define WMI0_EVENT_GUID "59142400-C6A3-40fa-BADB-8A2652834100"
-#define AMW0_EVENT_GUID "ABBC0F5C-8EA1-11D1-A000-C90629100000"
+#define HWMI_METHOD_GUID "ABBC0F5B-8EA1-11D1-A000-C90629100000"
+#define HWMI_EVENT_GUID "ABBC0F5C-8EA1-11D1-A000-C90629100000"
+/* Legacy GUIDs */
#define WMI0_EXPENSIVE_GUID "39142400-C6A3-40fa-BADB-8A2652834100"
+#define WMI0_EVENT_GUID "59142400-C6A3-40fa-BADB-8A2652834100"
+
+/* HWMI commands */
+
+enum {
+ BATTERY_THRESH_GET = 0x00001103, /* \GBTT */
+ BATTERY_THRESH_SET = 0x00001003, /* \SBTT */
+ FN_LOCK_GET = 0x00000604, /* \GFRS */
+ FN_LOCK_SET = 0x00000704, /* \SFRS */
+ MICMUTE_LED_SET = 0x00000b04, /* \SMLS */
+};
+
+union hwmi_arg {
+ u64 cmd;
+ u8 args[8];
+};
+
+struct quirk_entry {
+ bool battery_reset;
+ bool ec_micmute;
+ bool report_brightness;
+};
+
+static struct quirk_entry *quirks;
-struct huawei_wmi_priv {
- struct input_dev *idev;
+struct huawei_wmi_debug {
+ struct dentry *root;
+ u64 arg;
+};
+
+struct huawei_wmi {
+ bool battery_available;
+ bool fn_lock_available;
+
+ struct huawei_wmi_debug debug;
+ struct input_dev *idev[2];
struct led_classdev cdev;
- acpi_handle handle;
- char *acpi_method;
+ struct device *dev;
+
+ struct mutex wmi_lock;
};
+static struct huawei_wmi *huawei_wmi;
+
static const struct key_entry huawei_wmi_keymap[] = {
{ KE_KEY, 0x281, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x282, { KEY_BRIGHTNESSUP } },
@@ -37,73 +82,614 @@ static const struct key_entry huawei_wmi_keymap[] = {
{ KE_KEY, 0x289, { KEY_WLAN } },
// Huawei |M| key
{ KE_KEY, 0x28a, { KEY_CONFIG } },
- // Keyboard backlight
+ // Keyboard backlit
{ KE_IGNORE, 0x293, { KEY_KBDILLUMTOGGLE } },
{ KE_IGNORE, 0x294, { KEY_KBDILLUMUP } },
{ KE_IGNORE, 0x295, { KEY_KBDILLUMUP } },
{ KE_END, 0 }
};
+static int battery_reset = -1;
+static int report_brightness = -1;
+
+module_param(battery_reset, bint, 0444);
+MODULE_PARM_DESC(battery_reset,
+ "Reset battery charge values to (0-0) before disabling it using (0-100)");
+module_param(report_brightness, bint, 0444);
+MODULE_PARM_DESC(report_brightness,
+ "Report brightness keys.");
+
+/* Quirks */
+
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 1;
+}
+
+static struct quirk_entry quirk_unknown = {
+};
+
+static struct quirk_entry quirk_battery_reset = {
+ .battery_reset = true,
+};
+
+static struct quirk_entry quirk_matebook_x = {
+ .ec_micmute = true,
+ .report_brightness = true,
+};
+
+static const struct dmi_system_id huawei_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "Huawei MACH-WX9",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MACH-WX9"),
+ },
+ .driver_data = &quirk_battery_reset
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Huawei MateBook X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HUAWEI MateBook X")
+ },
+ .driver_data = &quirk_matebook_x
+ },
+ { }
+};
+
+/* Utils */
+
+static int huawei_wmi_call(struct huawei_wmi *huawei,
+ struct acpi_buffer *in, struct acpi_buffer *out)
+{
+ acpi_status status;
+
+ mutex_lock(&huawei->wmi_lock);
+ status = wmi_evaluate_method(HWMI_METHOD_GUID, 0, 1, in, out);
+ mutex_unlock(&huawei->wmi_lock);
+ if (ACPI_FAILURE(status)) {
+ dev_err(huawei->dev, "Failed to evaluate wmi method\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* HWMI takes a 64 bit input and returns either a package with 2 buffers, one of
+ * 4 bytes and the other of 256 bytes, or one buffer of size 0x104 (260) bytes.
+ * The first 4 bytes are ignored, we ignore the first 4 bytes buffer if we got a
+ * package, or skip the first 4 if a buffer of 0x104 is used. The first byte of
+ * the remaining 0x100 sized buffer has the return status of every call. In case
+ * the return status is non-zero, we return -ENODEV but still copy the returned
+ * buffer to the given buffer parameter (buf).
+ */
+static int huawei_wmi_cmd(u64 arg, u8 *buf, size_t buflen)
+{
+ struct huawei_wmi *huawei = huawei_wmi;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer in;
+ union acpi_object *obj;
+ size_t len;
+ int err, i;
+
+ in.length = sizeof(arg);
+ in.pointer = &arg;
+
+ /* Some models require calling HWMI twice to execute a command. We evaluate
+ * HWMI and if we get a non-zero return status we evaluate it again.
+ */
+ for (i = 0; i < 2; i++) {
+ err = huawei_wmi_call(huawei, &in, &out);
+ if (err)
+ goto fail_cmd;
+
+ obj = out.pointer;
+ if (!obj) {
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ switch (obj->type) {
+ /* Models that implement both "legacy" and HWMI tend to return a 0x104
+ * sized buffer instead of a package of 0x4 and 0x100 buffers.
+ */
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length == 0x104) {
+ // Skip the first 4 bytes.
+ obj->buffer.pointer += 4;
+ len = 0x100;
+ } else {
+ dev_err(huawei->dev, "Bad buffer length, got %d\n", obj->buffer.length);
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ break;
+ /* HWMI returns a package with 2 buffer elements, one of 4 bytes and the
+ * other is 256 bytes.
+ */
+ case ACPI_TYPE_PACKAGE:
+ if (obj->package.count != 2) {
+ dev_err(huawei->dev, "Bad package count, got %d\n", obj->package.count);
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ obj = &obj->package.elements[1];
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(huawei->dev, "Bad package element type, got %d\n", obj->type);
+ err = -EIO;
+ goto fail_cmd;
+ }
+ len = obj->buffer.length;
+
+ break;
+ /* Shouldn't get here! */
+ default:
+ dev_err(huawei->dev, "Unexpected obj type, got: %d\n", obj->type);
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ if (!*obj->buffer.pointer)
+ break;
+ }
+
+ err = (*obj->buffer.pointer) ? -ENODEV : 0;
+
+ if (buf) {
+ len = min(buflen, len);
+ memcpy(buf, obj->buffer.pointer, len);
+ }
+
+fail_cmd:
+ kfree(out.pointer);
+ return err;
+}
+
+/* LEDs */
+
static int huawei_wmi_micmute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- struct huawei_wmi_priv *priv = dev_get_drvdata(led_cdev->dev->parent);
- acpi_status status;
- union acpi_object args[3];
- struct acpi_object_list arg_list = {
- .pointer = args,
- .count = ARRAY_SIZE(args),
- };
-
- args[0].type = args[1].type = args[2].type = ACPI_TYPE_INTEGER;
- args[1].integer.value = 0x04;
-
- if (strcmp(priv->acpi_method, "SPIN") == 0) {
- args[0].integer.value = 0;
- args[2].integer.value = brightness ? 1 : 0;
- } else if (strcmp(priv->acpi_method, "WPIN") == 0) {
- args[0].integer.value = 1;
- args[2].integer.value = brightness ? 0 : 1;
+ /* This is a workaround until the "legacy" interface is implemented. */
+ if (quirks && quirks->ec_micmute) {
+ char *acpi_method;
+ acpi_handle handle;
+ acpi_status status;
+ union acpi_object args[3];
+ struct acpi_object_list arg_list = {
+ .pointer = args,
+ .count = ARRAY_SIZE(args),
+ };
+
+ handle = ec_get_handle();
+ if (!handle)
+ return -ENODEV;
+
+ args[0].type = args[1].type = args[2].type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = 0x04;
+
+ if (acpi_has_method(handle, "SPIN")) {
+ acpi_method = "SPIN";
+ args[0].integer.value = 0;
+ args[2].integer.value = brightness ? 1 : 0;
+ } else if (acpi_has_method(handle, "WPIN")) {
+ acpi_method = "WPIN";
+ args[0].integer.value = 1;
+ args[2].integer.value = brightness ? 0 : 1;
+ } else {
+ return -ENODEV;
+ }
+
+ status = acpi_evaluate_object(handle, acpi_method, &arg_list, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
} else {
+ union hwmi_arg arg;
+
+ arg.cmd = MICMUTE_LED_SET;
+ arg.args[2] = brightness;
+
+ return huawei_wmi_cmd(arg.cmd, NULL, 0);
+ }
+}
+
+static void huawei_wmi_leds_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->cdev.name = "platform::micmute";
+ huawei->cdev.max_brightness = 1;
+ huawei->cdev.brightness_set_blocking = &huawei_wmi_micmute_led_set;
+ huawei->cdev.default_trigger = "audio-micmute";
+ huawei->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ huawei->cdev.dev = dev;
+ huawei->cdev.flags = LED_CORE_SUSPENDRESUME;
+
+ devm_led_classdev_register(dev, &huawei->cdev);
+}
+
+/* Battery protection */
+
+static int huawei_wmi_battery_get(int *start, int *end)
+{
+ u8 ret[0x100];
+ int err, i;
+
+ err = huawei_wmi_cmd(BATTERY_THRESH_GET, ret, 0x100);
+ if (err)
+ return err;
+
+ /* Find the last two non-zero values. Return status is ignored. */
+ i = 0xff;
+ do {
+ if (start)
+ *start = ret[i-1];
+ if (end)
+ *end = ret[i];
+ } while (i > 2 && !ret[i--]);
+
+ return 0;
+}
+
+static int huawei_wmi_battery_set(int start, int end)
+{
+ union hwmi_arg arg;
+ int err;
+
+ if (start < 0 || end < 0 || start > 100 || end > 100)
return -EINVAL;
+
+ arg.cmd = BATTERY_THRESH_SET;
+ arg.args[2] = start;
+ arg.args[3] = end;
+
+ /* This is an edge case were some models turn battery protection
+ * off without changing their thresholds values. We clear the
+ * values before turning off protection. Sometimes we need a sleep delay to
+ * make sure these values make their way to EC memory.
+ */
+ if (quirks && quirks->battery_reset && start == 0 && end == 100) {
+ err = huawei_wmi_battery_set(0, 0);
+ if (err)
+ return err;
+
+ msleep(1000);
}
- status = acpi_evaluate_object(priv->handle, priv->acpi_method, &arg_list, NULL);
- if (ACPI_FAILURE(status))
- return -ENXIO;
+ err = huawei_wmi_cmd(arg.cmd, NULL, 0);
+
+ return err;
+}
+
+static ssize_t charge_control_start_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, start;
+
+ err = huawei_wmi_battery_get(&start, NULL);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", start);
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, end;
+
+ err = huawei_wmi_battery_get(NULL, &end);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", end);
+}
+
+static ssize_t charge_control_thresholds_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, start, end;
+
+ err = huawei_wmi_battery_get(&start, &end);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d %d\n", start, end);
+}
+
+static ssize_t charge_control_start_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err, start, end;
+
+ err = huawei_wmi_battery_get(NULL, &end);
+ if (err)
+ return err;
+
+ if (sscanf(buf, "%d", &start) != 1)
+ return -EINVAL;
+
+ err = huawei_wmi_battery_set(start, end);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err, start, end;
+
+ err = huawei_wmi_battery_get(&start, NULL);
+ if (err)
+ return err;
+
+ if (sscanf(buf, "%d", &end) != 1)
+ return -EINVAL;
+
+ err = huawei_wmi_battery_set(start, end);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static ssize_t charge_control_thresholds_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err, start, end;
+
+ if (sscanf(buf, "%d %d", &start, &end) != 2)
+ return -EINVAL;
+
+ err = huawei_wmi_battery_set(start, end);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(charge_control_start_threshold);
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+static DEVICE_ATTR_RW(charge_control_thresholds);
+
+static int huawei_wmi_battery_add(struct power_supply *battery)
+{
+ device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold);
+ device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold);
return 0;
}
-static int huawei_wmi_leds_setup(struct wmi_device *wdev)
+static int huawei_wmi_battery_remove(struct power_supply *battery)
{
- struct huawei_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ device_remove_file(&battery->dev, &dev_attr_charge_control_start_threshold);
+ device_remove_file(&battery->dev, &dev_attr_charge_control_end_threshold);
- priv->handle = ec_get_handle();
- if (!priv->handle)
- return 0;
+ return 0;
+}
- if (acpi_has_method(priv->handle, "SPIN"))
- priv->acpi_method = "SPIN";
- else if (acpi_has_method(priv->handle, "WPIN"))
- priv->acpi_method = "WPIN";
- else
- return 0;
+static struct acpi_battery_hook huawei_wmi_battery_hook = {
+ .add_battery = huawei_wmi_battery_add,
+ .remove_battery = huawei_wmi_battery_remove,
+ .name = "Huawei Battery Extension"
+};
+
+static void huawei_wmi_battery_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
- priv->cdev.name = "platform::micmute";
- priv->cdev.max_brightness = 1;
- priv->cdev.brightness_set_blocking = huawei_wmi_micmute_led_set;
- priv->cdev.default_trigger = "audio-micmute";
- priv->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
- priv->cdev.dev = &wdev->dev;
- priv->cdev.flags = LED_CORE_SUSPENDRESUME;
+ huawei->battery_available = true;
+ if (huawei_wmi_battery_get(NULL, NULL)) {
+ huawei->battery_available = false;
+ return;
+ }
- return devm_led_classdev_register(&wdev->dev, &priv->cdev);
+ battery_hook_register(&huawei_wmi_battery_hook);
+ device_create_file(dev, &dev_attr_charge_control_thresholds);
}
-static void huawei_wmi_process_key(struct wmi_device *wdev, int code)
+static void huawei_wmi_battery_exit(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ if (huawei->battery_available) {
+ battery_hook_unregister(&huawei_wmi_battery_hook);
+ device_remove_file(dev, &dev_attr_charge_control_thresholds);
+ }
+}
+
+/* Fn lock */
+
+static int huawei_wmi_fn_lock_get(int *on)
+{
+ u8 ret[0x100] = { 0 };
+ int err, i;
+
+ err = huawei_wmi_cmd(FN_LOCK_GET, ret, 0x100);
+ if (err)
+ return err;
+
+ /* Find the first non-zero value. Return status is ignored. */
+ i = 1;
+ do {
+ if (on)
+ *on = ret[i] - 1; // -1 undefined, 0 off, 1 on.
+ } while (i < 0xff && !ret[i++]);
+
+ return 0;
+}
+
+static int huawei_wmi_fn_lock_set(int on)
+{
+ union hwmi_arg arg;
+
+ arg.cmd = FN_LOCK_SET;
+ arg.args[2] = on + 1; // 0 undefined, 1 off, 2 on.
+
+ return huawei_wmi_cmd(arg.cmd, NULL, 0);
+}
+
+static ssize_t fn_lock_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, on;
+
+ err = huawei_wmi_fn_lock_get(&on);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", on);
+}
+
+static ssize_t fn_lock_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int on, err;
+
+ if (kstrtoint(buf, 10, &on) ||
+ on < 0 || on > 1)
+ return -EINVAL;
+
+ err = huawei_wmi_fn_lock_set(on);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(fn_lock_state);
+
+static void huawei_wmi_fn_lock_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->fn_lock_available = true;
+ if (huawei_wmi_fn_lock_get(NULL)) {
+ huawei->fn_lock_available = false;
+ return;
+ }
+
+ device_create_file(dev, &dev_attr_fn_lock_state);
+}
+
+static void huawei_wmi_fn_lock_exit(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ if (huawei->fn_lock_available)
+ device_remove_file(dev, &dev_attr_fn_lock_state);
+}
+
+/* debugfs */
+
+static void huawei_wmi_debugfs_call_dump(struct seq_file *m, void *data,
+ union acpi_object *obj)
+{
+ struct huawei_wmi *huawei = m->private;
+ int i;
+
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ seq_printf(m, "0x%llx", obj->integer.value);
+ break;
+ case ACPI_TYPE_STRING:
+ seq_printf(m, "\"%.*s\"", obj->string.length, obj->string.pointer);
+ break;
+ case ACPI_TYPE_BUFFER:
+ seq_puts(m, "{");
+ for (i = 0; i < obj->buffer.length; i++) {
+ seq_printf(m, "0x%02x", obj->buffer.pointer[i]);
+ if (i < obj->buffer.length - 1)
+ seq_puts(m, ",");
+ }
+ seq_puts(m, "}");
+ break;
+ case ACPI_TYPE_PACKAGE:
+ seq_puts(m, "[");
+ for (i = 0; i < obj->package.count; i++) {
+ huawei_wmi_debugfs_call_dump(m, huawei, &obj->package.elements[i]);
+ if (i < obj->package.count - 1)
+ seq_puts(m, ",");
+ }
+ seq_puts(m, "]");
+ break;
+ default:
+ dev_err(huawei->dev, "Unexpected obj type, got %d\n", obj->type);
+ return;
+ }
+}
+
+static int huawei_wmi_debugfs_call_show(struct seq_file *m, void *data)
+{
+ struct huawei_wmi *huawei = m->private;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer in;
+ union acpi_object *obj;
+ int err;
+
+ in.length = sizeof(u64);
+ in.pointer = &huawei->debug.arg;
+
+ err = huawei_wmi_call(huawei, &in, &out);
+ if (err)
+ return err;
+
+ obj = out.pointer;
+ if (!obj) {
+ err = -EIO;
+ goto fail_debugfs_call;
+ }
+
+ huawei_wmi_debugfs_call_dump(m, huawei, obj);
+
+fail_debugfs_call:
+ kfree(out.pointer);
+ return err;
+}
+
+DEFINE_SHOW_ATTRIBUTE(huawei_wmi_debugfs_call);
+
+static void huawei_wmi_debugfs_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->debug.root = debugfs_create_dir("huawei-wmi", NULL);
+
+ debugfs_create_x64("arg", 0644, huawei->debug.root,
+ &huawei->debug.arg);
+ debugfs_create_file("call", 0400,
+ huawei->debug.root, huawei, &huawei_wmi_debugfs_call_fops);
+}
+
+static void huawei_wmi_debugfs_exit(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ debugfs_remove_recursive(huawei->debug.root);
+}
+
+/* Input */
+
+static void huawei_wmi_process_key(struct input_dev *idev, int code)
{
- struct huawei_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
const struct key_entry *key;
/*
@@ -127,81 +713,187 @@ static void huawei_wmi_process_key(struct wmi_device *wdev, int code)
kfree(response.pointer);
}
- key = sparse_keymap_entry_from_scancode(priv->idev, code);
+ key = sparse_keymap_entry_from_scancode(idev, code);
if (!key) {
- dev_info(&wdev->dev, "Unknown key pressed, code: 0x%04x\n", code);
+ dev_info(&idev->dev, "Unknown key pressed, code: 0x%04x\n", code);
return;
}
- sparse_keymap_report_entry(priv->idev, key, 1, true);
+ if (quirks && !quirks->report_brightness &&
+ (key->sw.code == KEY_BRIGHTNESSDOWN ||
+ key->sw.code == KEY_BRIGHTNESSUP))
+ return;
+
+ sparse_keymap_report_entry(idev, key, 1, true);
}
-static void huawei_wmi_notify(struct wmi_device *wdev,
- union acpi_object *obj)
+static void huawei_wmi_input_notify(u32 value, void *context)
{
- if (obj->type == ACPI_TYPE_INTEGER)
- huawei_wmi_process_key(wdev, obj->integer.value);
+ struct input_dev *idev = (struct input_dev *)context;
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&idev->dev, "Unable to get event data\n");
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ huawei_wmi_process_key(idev, obj->integer.value);
else
- dev_info(&wdev->dev, "Bad response type %d\n", obj->type);
+ dev_err(&idev->dev, "Bad response type\n");
+
+ kfree(response.pointer);
}
-static int huawei_wmi_input_setup(struct wmi_device *wdev)
+static int huawei_wmi_input_setup(struct device *dev,
+ const char *guid,
+ struct input_dev **idev)
{
- struct huawei_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
- int err;
-
- priv->idev = devm_input_allocate_device(&wdev->dev);
- if (!priv->idev)
+ *idev = devm_input_allocate_device(dev);
+ if (!*idev)
return -ENOMEM;
- priv->idev->name = "Huawei WMI hotkeys";
- priv->idev->phys = "wmi/input0";
- priv->idev->id.bustype = BUS_HOST;
- priv->idev->dev.parent = &wdev->dev;
+ (*idev)->name = "Huawei WMI hotkeys";
+ (*idev)->phys = "wmi/input0";
+ (*idev)->id.bustype = BUS_HOST;
+ (*idev)->dev.parent = dev;
- err = sparse_keymap_setup(priv->idev, huawei_wmi_keymap, NULL);
- if (err)
- return err;
+ return sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL) ||
+ input_register_device(*idev) ||
+ wmi_install_notify_handler(guid, huawei_wmi_input_notify,
+ *idev);
+}
- return input_register_device(priv->idev);
+static void huawei_wmi_input_exit(struct device *dev, const char *guid)
+{
+ wmi_remove_notify_handler(guid);
}
-static int huawei_wmi_probe(struct wmi_device *wdev, const void *context)
+/* Huawei driver */
+
+static const struct wmi_device_id huawei_wmi_events_id_table[] = {
+ { .guid_string = WMI0_EVENT_GUID },
+ { .guid_string = HWMI_EVENT_GUID },
+ { }
+};
+
+static int huawei_wmi_probe(struct platform_device *pdev)
{
- struct huawei_wmi_priv *priv;
+ const struct wmi_device_id *guid = huawei_wmi_events_id_table;
int err;
- priv = devm_kzalloc(&wdev->dev, sizeof(struct huawei_wmi_priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ platform_set_drvdata(pdev, huawei_wmi);
+ huawei_wmi->dev = &pdev->dev;
- dev_set_drvdata(&wdev->dev, priv);
+ while (*guid->guid_string) {
+ struct input_dev *idev = *huawei_wmi->idev;
- err = huawei_wmi_input_setup(wdev);
- if (err)
- return err;
+ if (wmi_has_guid(guid->guid_string)) {
+ err = huawei_wmi_input_setup(&pdev->dev, guid->guid_string, &idev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup input on %s\n", guid->guid_string);
+ return err;
+ }
+ }
+
+ idev++;
+ guid++;
+ }
+
+ if (wmi_has_guid(HWMI_METHOD_GUID)) {
+ mutex_init(&huawei_wmi->wmi_lock);
- return huawei_wmi_leds_setup(wdev);
+ huawei_wmi_leds_setup(&pdev->dev);
+ huawei_wmi_fn_lock_setup(&pdev->dev);
+ huawei_wmi_battery_setup(&pdev->dev);
+ huawei_wmi_debugfs_setup(&pdev->dev);
+ }
+
+ return 0;
}
-static const struct wmi_device_id huawei_wmi_id_table[] = {
- { .guid_string = WMI0_EVENT_GUID },
- { .guid_string = AMW0_EVENT_GUID },
- { }
-};
+static int huawei_wmi_remove(struct platform_device *pdev)
+{
+ const struct wmi_device_id *guid = huawei_wmi_events_id_table;
-static struct wmi_driver huawei_wmi_driver = {
+ while (*guid->guid_string) {
+ if (wmi_has_guid(guid->guid_string))
+ huawei_wmi_input_exit(&pdev->dev, guid->guid_string);
+
+ guid++;
+ }
+
+ if (wmi_has_guid(HWMI_METHOD_GUID)) {
+ huawei_wmi_debugfs_exit(&pdev->dev);
+ huawei_wmi_battery_exit(&pdev->dev);
+ huawei_wmi_fn_lock_exit(&pdev->dev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver huawei_wmi_driver = {
.driver = {
.name = "huawei-wmi",
},
- .id_table = huawei_wmi_id_table,
.probe = huawei_wmi_probe,
- .notify = huawei_wmi_notify,
+ .remove = huawei_wmi_remove,
};
-module_wmi_driver(huawei_wmi_driver);
+static __init int huawei_wmi_init(void)
+{
+ struct platform_device *pdev;
+ int err;
+
+ huawei_wmi = kzalloc(sizeof(struct huawei_wmi), GFP_KERNEL);
+ if (!huawei_wmi)
+ return -ENOMEM;
+
+ quirks = &quirk_unknown;
+ dmi_check_system(huawei_quirks);
+ if (battery_reset != -1)
+ quirks->battery_reset = battery_reset;
+ if (report_brightness != -1)
+ quirks->report_brightness = report_brightness;
+
+ err = platform_driver_register(&huawei_wmi_driver);
+ if (err)
+ goto pdrv_err;
+
+ pdev = platform_device_register_simple("huawei-wmi", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ err = PTR_ERR(pdev);
+ goto pdev_err;
+ }
+
+ return 0;
+
+pdev_err:
+ platform_driver_unregister(&huawei_wmi_driver);
+pdrv_err:
+ kfree(huawei_wmi);
+ return err;
+}
+
+static __exit void huawei_wmi_exit(void)
+{
+ struct platform_device *pdev = to_platform_device(huawei_wmi->dev);
+
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&huawei_wmi_driver);
+
+ kfree(huawei_wmi);
+}
+
+module_init(huawei_wmi_init);
+module_exit(huawei_wmi_exit);
-MODULE_DEVICE_TABLE(wmi, huawei_wmi_id_table);
+MODULE_ALIAS("wmi:"HWMI_METHOD_GUID);
+MODULE_DEVICE_TABLE(wmi, huawei_wmi_events_id_table);
MODULE_AUTHOR("Ayman Bagabas <ayman.bagabas@gmail.com>");
-MODULE_DESCRIPTION("Huawei WMI hotkeys");
+MODULE_DESCRIPTION("Huawei WMI laptop extras driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_cht_int33fe_common.c b/drivers/platform/x86/intel_cht_int33fe_common.c
new file mode 100644
index 000000000000..42dd11623f56
--- /dev/null
+++ b/drivers/platform/x86/intel_cht_int33fe_common.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common code for Intel Cherry Trail ACPI INT33FE pseudo device drivers
+ * (USB Micro-B and Type-C connector variants).
+ *
+ * Copyright (c) 2019 Yauhen Kharuzhy <jekhor@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "intel_cht_int33fe_common.h"
+
+#define EXPECTED_PTYPE 4
+
+static int cht_int33fe_i2c_res_filter(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_i2c_serialbus *sb;
+ int *count = data;
+
+ if (i2c_acpi_get_i2c_resource(ares, &sb))
+ (*count)++;
+
+ return 1;
+}
+
+static int cht_int33fe_count_i2c_clients(struct device *dev)
+{
+ struct acpi_device *adev;
+ LIST_HEAD(resource_list);
+ int count = 0;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -EINVAL;
+
+ acpi_dev_get_resources(adev, &resource_list,
+ cht_int33fe_i2c_res_filter, &count);
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ return count;
+}
+
+static int cht_int33fe_check_hw_type(struct device *dev)
+{
+ unsigned long long ptyp;
+ acpi_status status;
+ int ret;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "PTYP", NULL, &ptyp);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Error getting PTYPE\n");
+ return -ENODEV;
+ }
+
+ /*
+ * The same ACPI HID is used for different configurations check PTYP
+ * to ensure that we are dealing with the expected config.
+ */
+ if (ptyp != EXPECTED_PTYPE)
+ return -ENODEV;
+
+ /* Check presence of INT34D3 (hardware-rev 3) expected for ptype == 4 */
+ if (!acpi_dev_present("INT34D3", "1", 3)) {
+ dev_err(dev, "Error PTYPE == %d, but no INT34D3 device\n",
+ EXPECTED_PTYPE);
+ return -ENODEV;
+ }
+
+ ret = cht_int33fe_count_i2c_clients(dev);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case 2:
+ return INT33FE_HW_MICROB;
+ case 4:
+ return INT33FE_HW_TYPEC;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int cht_int33fe_probe(struct platform_device *pdev)
+{
+ struct cht_int33fe_data *data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = cht_int33fe_check_hw_type(dev);
+ if (ret < 0)
+ return ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+
+ switch (ret) {
+ case INT33FE_HW_MICROB:
+ data->probe = cht_int33fe_microb_probe;
+ data->remove = cht_int33fe_microb_remove;
+ break;
+
+ case INT33FE_HW_TYPEC:
+ data->probe = cht_int33fe_typec_probe;
+ data->remove = cht_int33fe_typec_remove;
+ break;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return data->probe(data);
+}
+
+static int cht_int33fe_remove(struct platform_device *pdev)
+{
+ struct cht_int33fe_data *data = platform_get_drvdata(pdev);
+
+ return data->remove(data);
+}
+
+static const struct acpi_device_id cht_int33fe_acpi_ids[] = {
+ { "INT33FE", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cht_int33fe_acpi_ids);
+
+static struct platform_driver cht_int33fe_driver = {
+ .driver = {
+ .name = "Intel Cherry Trail ACPI INT33FE driver",
+ .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
+ },
+ .probe = cht_int33fe_probe,
+ .remove = cht_int33fe_remove,
+};
+
+module_platform_driver(cht_int33fe_driver);
+
+MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver");
+MODULE_AUTHOR("Yauhen Kharuzhy <jekhor@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_cht_int33fe_common.h b/drivers/platform/x86/intel_cht_int33fe_common.h
new file mode 100644
index 000000000000..03cd45f4e8cb
--- /dev/null
+++ b/drivers/platform/x86/intel_cht_int33fe_common.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common code for Intel Cherry Trail ACPI INT33FE pseudo device drivers
+ * (USB Micro-B and Type-C connector variants), header file
+ *
+ * Copyright (c) 2019 Yauhen Kharuzhy <jekhor@gmail.com>
+ */
+
+#ifndef _INTEL_CHT_INT33FE_COMMON_H
+#define _INTEL_CHT_INT33FE_COMMON_H
+
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/i2c.h>
+
+enum int33fe_hw_type {
+ INT33FE_HW_MICROB,
+ INT33FE_HW_TYPEC,
+};
+
+struct cht_int33fe_data {
+ struct device *dev;
+
+ int (*probe)(struct cht_int33fe_data *data);
+ int (*remove)(struct cht_int33fe_data *data);
+
+ struct i2c_client *battery_fg;
+
+ /* Type-C only */
+ struct i2c_client *fusb302;
+ struct i2c_client *pi3usb30532;
+
+ struct fwnode_handle *dp;
+};
+
+int cht_int33fe_microb_probe(struct cht_int33fe_data *data);
+int cht_int33fe_microb_remove(struct cht_int33fe_data *data);
+int cht_int33fe_typec_probe(struct cht_int33fe_data *data);
+int cht_int33fe_typec_remove(struct cht_int33fe_data *data);
+
+#endif /* _INTEL_CHT_INT33FE_COMMON_H */
diff --git a/drivers/platform/x86/intel_cht_int33fe_microb.c b/drivers/platform/x86/intel_cht_int33fe_microb.c
new file mode 100644
index 000000000000..20b11e0d9a75
--- /dev/null
+++ b/drivers/platform/x86/intel_cht_int33fe_microb.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Cherry Trail ACPI INT33FE pseudo device driver for devices with
+ * USB Micro-B connector (e.g. without of FUSB302 USB Type-C controller)
+ *
+ * Copyright (C) 2019 Yauhen Kharuzhy <jekhor@gmail.com>
+ *
+ * At least one Intel Cherry Trail based device which ship with Windows 10
+ * (Lenovo YogaBook YB1-X91L/F tablet), have this weird INT33FE ACPI device
+ * with a CRS table with 2 I2cSerialBusV2 resources, for 2 different chips
+ * attached to various i2c busses:
+ * 1. The Whiskey Cove PMIC, which is also described by the INT34D3 ACPI device
+ * 2. TI BQ27542 Fuel Gauge Controller
+ *
+ * So this driver is a stub / pseudo driver whose only purpose is to
+ * instantiate i2c-client for battery fuel gauge, so that standard i2c driver
+ * for these chip can bind to the it.
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/pd.h>
+
+#include "intel_cht_int33fe_common.h"
+
+static const char * const bq27xxx_suppliers[] = { "bq25890-charger" };
+
+static const struct property_entry bq27xxx_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq27xxx_suppliers),
+ { }
+};
+
+int cht_int33fe_microb_probe(struct cht_int33fe_data *data)
+{
+ struct device *dev = data->dev;
+ struct i2c_board_info board_info;
+
+ memset(&board_info, 0, sizeof(board_info));
+ strscpy(board_info.type, "bq27542", ARRAY_SIZE(board_info.type));
+ board_info.dev_name = "bq27542";
+ board_info.properties = bq27xxx_props;
+ data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
+
+ return PTR_ERR_OR_ZERO(data->battery_fg);
+}
+
+int cht_int33fe_microb_remove(struct cht_int33fe_data *data)
+{
+ i2c_unregister_device(data->battery_fg);
+
+ return 0;
+}
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe_typec.c
index 1d5d877b9582..2d097fc2dd46 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe_typec.c
@@ -17,17 +17,15 @@
* for these chips can bind to the them.
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/pd.h>
-#define EXPECTED_PTYPE 4
+#include "intel_cht_int33fe_common.h"
enum {
INT33FE_NODE_FUSB302,
@@ -38,14 +36,6 @@ enum {
INT33FE_NODE_MAX,
};
-struct cht_int33fe_data {
- struct i2c_client *max17047;
- struct i2c_client *fusb302;
- struct i2c_client *pi3usb30532;
-
- struct fwnode_handle *dp;
-};
-
static const struct software_node nodes[];
static const struct software_node_ref_args pi3usb30532_ref = {
@@ -251,43 +241,20 @@ cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
board_info.dev_name = "max17047";
board_info.fwnode = fwnode;
- data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
+ data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
- return PTR_ERR_OR_ZERO(data->max17047);
+ return PTR_ERR_OR_ZERO(data->battery_fg);
}
-static int cht_int33fe_probe(struct platform_device *pdev)
+int cht_int33fe_typec_probe(struct cht_int33fe_data *data)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = data->dev;
struct i2c_board_info board_info;
- struct cht_int33fe_data *data;
struct fwnode_handle *fwnode;
struct regulator *regulator;
- unsigned long long ptyp;
- acpi_status status;
int fusb302_irq;
int ret;
- status = acpi_evaluate_integer(ACPI_HANDLE(dev), "PTYP", NULL, &ptyp);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "Error getting PTYPE\n");
- return -ENODEV;
- }
-
- /*
- * The same ACPI HID is used for different configurations check PTYP
- * to ensure that we are dealing with the expected config.
- */
- if (ptyp != EXPECTED_PTYPE)
- return -ENODEV;
-
- /* Check presence of INT34D3 (hardware-rev 3) expected for ptype == 4 */
- if (!acpi_dev_present("INT34D3", "1", 3)) {
- dev_err(dev, "Error PTYPE == %d, but no INT34D3 device\n",
- EXPECTED_PTYPE);
- return -ENODEV;
- }
-
/*
* We expect the WC PMIC to be paired with a TI bq24292i charger-IC.
* We check for the bq24292i vbus regulator here, this has 2 purposes:
@@ -317,10 +284,6 @@ static int cht_int33fe_probe(struct platform_device *pdev)
return fusb302_irq;
}
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
ret = cht_int33fe_add_nodes(data);
if (ret)
return ret;
@@ -365,15 +328,13 @@ static int cht_int33fe_probe(struct platform_device *pdev)
goto out_unregister_fusb302;
}
- platform_set_drvdata(pdev, data);
-
return 0;
out_unregister_fusb302:
i2c_unregister_device(data->fusb302);
out_unregister_max17047:
- i2c_unregister_device(data->max17047);
+ i2c_unregister_device(data->battery_fg);
out_remove_nodes:
cht_int33fe_remove_nodes(data);
@@ -381,36 +342,13 @@ out_remove_nodes:
return ret;
}
-static int cht_int33fe_remove(struct platform_device *pdev)
+int cht_int33fe_typec_remove(struct cht_int33fe_data *data)
{
- struct cht_int33fe_data *data = platform_get_drvdata(pdev);
-
i2c_unregister_device(data->pi3usb30532);
i2c_unregister_device(data->fusb302);
- i2c_unregister_device(data->max17047);
+ i2c_unregister_device(data->battery_fg);
cht_int33fe_remove_nodes(data);
return 0;
}
-
-static const struct acpi_device_id cht_int33fe_acpi_ids[] = {
- { "INT33FE", },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, cht_int33fe_acpi_ids);
-
-static struct platform_driver cht_int33fe_driver = {
- .driver = {
- .name = "Intel Cherry Trail ACPI INT33FE driver",
- .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
- },
- .probe = cht_int33fe_probe,
- .remove = cht_int33fe_remove,
-};
-
-module_platform_driver(cht_int33fe_driver);
-
-MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver");
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index af233b7b77f2..f14e2c5f9da5 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -164,8 +164,8 @@ static int int0002_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct x86_cpu_id *cpu_id;
- struct irq_chip *irq_chip;
struct gpio_chip *chip;
+ struct gpio_irq_chip *girq;
int irq, ret;
/* Menlow has a different INT0002 device? <sigh> */
@@ -192,15 +192,11 @@ static int int0002_probe(struct platform_device *pdev)
chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1;
chip->irq.init_valid_mask = int0002_init_irq_valid_mask;
- ret = devm_gpiochip_add_data(&pdev->dev, chip, NULL);
- if (ret) {
- dev_err(dev, "Error adding gpio chip: %d\n", ret);
- return ret;
- }
-
/*
- * We manually request the irq here instead of passing a flow-handler
+ * We directly request the irq here instead of passing a flow-handler
* to gpiochip_set_chained_irqchip, because the irq is shared.
+ * FIXME: augment this if we managed to pull handling of shared
+ * IRQs into gpiolib.
*/
ret = devm_request_irq(dev, irq, int0002_irq,
IRQF_SHARED, "INT0002", chip);
@@ -209,17 +205,21 @@ static int int0002_probe(struct platform_device *pdev)
return ret;
}
- irq_chip = (struct irq_chip *)cpu_id->driver_data;
+ girq = &chip->irq;
+ girq->chip = (struct irq_chip *)cpu_id->driver_data;
+ /* This let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
- ret = gpiochip_irqchip_add(chip, irq_chip, 0, handle_edge_irq,
- IRQ_TYPE_NONE);
+ ret = devm_gpiochip_add_data(dev, chip, NULL);
if (ret) {
- dev_err(dev, "Error adding irqchip: %d\n", ret);
+ dev_err(dev, "Error adding gpio chip: %d\n", ret);
return ret;
}
- gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL);
-
device_init_wakeup(dev, true);
return 0;
}
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 94a008efb09b..571b4754477c 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -158,8 +158,9 @@ static const struct pmc_reg_map spt_reg_map = {
.pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
};
-/* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
+/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
static const struct pmc_bit_map cnp_pfear_map[] = {
+ /* Reserved for Cannon Lake but valid for Comet Lake */
{"PMC", BIT(0)},
{"OPI-DMI", BIT(1)},
{"SPI/eSPI", BIT(2)},
@@ -185,7 +186,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"SDX", BIT(4)},
{"SPE", BIT(5)},
{"Fuse", BIT(6)},
- /* Reserved for Cannonlake but valid for Icelake */
+ /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
{"SBR8", BIT(7)},
{"CSME_FSC", BIT(0)},
@@ -229,12 +230,12 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"HDA_PGD4", BIT(2)},
{"HDA_PGD5", BIT(3)},
{"HDA_PGD6", BIT(4)},
- /* Reserved for Cannonlake but valid for Icelake */
+ /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
{"PSF6", BIT(5)},
{"PSF7", BIT(6)},
{"PSF8", BIT(7)},
- /* Icelake generation onwards only */
+ /* Ice Lake generation onwards only */
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
{"RES_67", BIT(2)},
@@ -324,7 +325,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
{"ISH", CNP_PMC_LTR_ISH},
{"UFSX2", CNP_PMC_LTR_UFSX2},
{"EMMC", CNP_PMC_LTR_EMMC},
- /* Reserved for Cannonlake but valid for Icelake */
+ /* Reserved for Cannon Lake but valid for Ice Lake */
{"WIGIG", ICL_PMC_LTR_WIGIG},
/* Below two cannot be used for LTR_IGNORE */
{"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
@@ -813,6 +814,8 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
+ INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
+ INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
{}
};
@@ -871,8 +874,8 @@ static int pmc_core_probe(struct platform_device *pdev)
pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
/*
- * Coffeelake has CPU ID of Kabylake and Cannonlake PCH. So here
- * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap
+ * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
+ * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
* in this case.
*/
if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index fa97834fdb78..05cced59e251 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -224,7 +224,6 @@ static irqreturn_t intel_punit_ioc(int irq, void *dev_id)
static int intel_punit_get_bars(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *addr;
/*
@@ -232,14 +231,12 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - BIOS_IPC BASE_DATA
* - BIOS_IPC BASE_IFACE
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr))
return PTR_ERR(addr);
punit_ipcdev->base[BIOS_IPC][BASE_DATA] = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- addr = devm_ioremap_resource(&pdev->dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(addr))
return PTR_ERR(addr);
punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
@@ -251,33 +248,21 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - GTDRIVER_IPC BASE_DATA
* - GTDRIVER_IPC BASE_IFACE
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (res) {
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (!IS_ERR(addr))
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
- }
+ addr = devm_platform_ioremap_resource(pdev, 2);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (res) {
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (!IS_ERR(addr))
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
- }
+ addr = devm_platform_ioremap_resource(pdev, 3);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (res) {
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (!IS_ERR(addr))
- punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
- }
+ addr = devm_platform_ioremap_resource(pdev, 4);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (res) {
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (!IS_ERR(addr))
- punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
- }
+ addr = devm_platform_ioremap_resource(pdev, 5);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
return 0;
}
@@ -309,14 +294,13 @@ static int intel_punit_ipc_probe(struct platform_device *pdev)
ret = intel_punit_get_bars(pdev);
if (ret)
- goto out;
+ return ret;
punit_ipcdev->dev = &pdev->dev;
mutex_init(&punit_ipcdev->lock);
init_completion(&punit_ipcdev->cmd_complete);
-out:
- return ret;
+ return 0;
}
static int intel_punit_ipc_remove(struct platform_device *pdev)
diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
index fdeb3624c529..cf9c44c20a82 100644
--- a/drivers/platform/x86/peaq-wmi.c
+++ b/drivers/platform/x86/peaq-wmi.c
@@ -6,7 +6,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -18,8 +18,7 @@
MODULE_ALIAS("wmi:"PEAQ_DOLBY_BUTTON_GUID);
-static unsigned int peaq_ignore_events_counter;
-static struct input_polled_dev *peaq_poll_dev;
+static struct input_dev *peaq_poll_dev;
/*
* The Dolby button (yes really a Dolby button) causes an ACPI variable to get
@@ -28,8 +27,10 @@ static struct input_polled_dev *peaq_poll_dev;
* (if polling after the release) or twice (polling between press and release).
* We ignore events for 0.5s after the first event to avoid reporting 2 presses.
*/
-static void peaq_wmi_poll(struct input_polled_dev *dev)
+static void peaq_wmi_poll(struct input_dev *input_dev)
{
+ static unsigned long last_event_time;
+ static bool had_events;
union acpi_object obj;
acpi_status status;
u32 dummy = 0;
@@ -44,22 +45,25 @@ static void peaq_wmi_poll(struct input_polled_dev *dev)
return;
if (obj.type != ACPI_TYPE_INTEGER) {
- dev_err(&peaq_poll_dev->input->dev,
+ dev_err(&input_dev->dev,
"Error WMBC did not return an integer\n");
return;
}
- if (peaq_ignore_events_counter && peaq_ignore_events_counter--)
+ if (!obj.integer.value)
return;
- if (obj.integer.value) {
- input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 1);
- input_sync(peaq_poll_dev->input);
- input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 0);
- input_sync(peaq_poll_dev->input);
- peaq_ignore_events_counter = max(1u,
- PEAQ_POLL_IGNORE_MS / peaq_poll_dev->poll_interval);
- }
+ if (had_events && time_before(jiffies, last_event_time +
+ msecs_to_jiffies(PEAQ_POLL_IGNORE_MS)))
+ return;
+
+ input_event(input_dev, EV_KEY, KEY_SOUND, 1);
+ input_sync(input_dev);
+ input_event(input_dev, EV_KEY, KEY_SOUND, 0);
+ input_sync(input_dev);
+
+ last_event_time = jiffies;
+ had_events = true;
}
/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */
@@ -75,6 +79,8 @@ static const struct dmi_system_id peaq_dmi_table[] __initconst = {
static int __init peaq_wmi_init(void)
{
+ int err;
+
/* WMI GUID is not unique, also check for a DMI match */
if (!dmi_check_system(peaq_dmi_table))
return -ENODEV;
@@ -82,24 +88,36 @@ static int __init peaq_wmi_init(void)
if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
return -ENODEV;
- peaq_poll_dev = input_allocate_polled_device();
+ peaq_poll_dev = input_allocate_device();
if (!peaq_poll_dev)
return -ENOMEM;
- peaq_poll_dev->poll = peaq_wmi_poll;
- peaq_poll_dev->poll_interval = PEAQ_POLL_INTERVAL_MS;
- peaq_poll_dev->poll_interval_max = PEAQ_POLL_MAX_MS;
- peaq_poll_dev->input->name = "PEAQ WMI hotkeys";
- peaq_poll_dev->input->phys = "wmi/input0";
- peaq_poll_dev->input->id.bustype = BUS_HOST;
- input_set_capability(peaq_poll_dev->input, EV_KEY, KEY_SOUND);
+ peaq_poll_dev->name = "PEAQ WMI hotkeys";
+ peaq_poll_dev->phys = "wmi/input0";
+ peaq_poll_dev->id.bustype = BUS_HOST;
+ input_set_capability(peaq_poll_dev, EV_KEY, KEY_SOUND);
+
+ err = input_setup_polling(peaq_poll_dev, peaq_wmi_poll);
+ if (err)
+ goto err_out;
+
+ input_set_poll_interval(peaq_poll_dev, PEAQ_POLL_INTERVAL_MS);
+ input_set_max_poll_interval(peaq_poll_dev, PEAQ_POLL_MAX_MS);
+
+ err = input_register_device(peaq_poll_dev);
+ if (err)
+ goto err_out;
+
+ return 0;
- return input_register_polled_device(peaq_poll_dev);
+err_out:
+ input_free_device(peaq_poll_dev);
+ return err;
}
static void __exit peaq_wmi_exit(void)
{
- input_unregister_polled_device(peaq_poll_dev);
+ input_unregister_device(peaq_poll_dev);
}
module_init(peaq_wmi_init);
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
new file mode 100644
index 000000000000..4f6e4c342382
--- /dev/null
+++ b/drivers/platform/x86/system76_acpi.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * System76 ACPI Driver
+ *
+ * Copyright (C) 2019 System76
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/types.h>
+
+struct system76_data {
+ struct acpi_device *acpi_dev;
+ struct led_classdev ap_led;
+ struct led_classdev kb_led;
+ enum led_brightness kb_brightness;
+ enum led_brightness kb_toggle_brightness;
+ int kb_color;
+};
+
+static const struct acpi_device_id device_ids[] = {
+ {"17761776", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, device_ids);
+
+// Array of keyboard LED brightness levels
+static const enum led_brightness kb_levels[] = {
+ 48,
+ 72,
+ 96,
+ 144,
+ 192,
+ 255
+};
+
+// Array of keyboard LED colors in 24-bit RGB format
+static const int kb_colors[] = {
+ 0xFFFFFF,
+ 0x0000FF,
+ 0xFF0000,
+ 0xFF00FF,
+ 0x00FF00,
+ 0x00FFFF,
+ 0xFFFF00
+};
+
+// Get a System76 ACPI device value by name
+static int system76_get(struct system76_data *data, char *method)
+{
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long ret = 0;
+
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_integer(handle, method, NULL, &ret);
+ if (ACPI_SUCCESS(status))
+ return (int)ret;
+ else
+ return -1;
+}
+
+// Set a System76 ACPI device value by name
+static int system76_set(struct system76_data *data, char *method, int value)
+{
+ union acpi_object obj;
+ struct acpi_object_list obj_list;
+ acpi_handle handle;
+ acpi_status status;
+
+ obj.type = ACPI_TYPE_INTEGER;
+ obj.integer.value = value;
+ obj_list.count = 1;
+ obj_list.pointer = &obj;
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_object(handle, method, &obj_list, NULL);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ else
+ return -1;
+}
+
+// Get the airplane mode LED brightness
+static enum led_brightness ap_led_get(struct led_classdev *led)
+{
+ struct system76_data *data;
+ int value;
+
+ data = container_of(led, struct system76_data, ap_led);
+ value = system76_get(data, "GAPL");
+ if (value > 0)
+ return (enum led_brightness)value;
+ else
+ return LED_OFF;
+}
+
+// Set the airplane mode LED brightness
+static void ap_led_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct system76_data *data;
+
+ data = container_of(led, struct system76_data, ap_led);
+ system76_set(data, "SAPL", value == LED_OFF ? 0 : 1);
+}
+
+// Get the last set keyboard LED brightness
+static enum led_brightness kb_led_get(struct led_classdev *led)
+{
+ struct system76_data *data;
+
+ data = container_of(led, struct system76_data, kb_led);
+ return data->kb_brightness;
+}
+
+// Set the keyboard LED brightness
+static void kb_led_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct system76_data *data;
+
+ data = container_of(led, struct system76_data, kb_led);
+ data->kb_brightness = value;
+ system76_set(data, "SKBL", (int)data->kb_brightness);
+}
+
+// Get the last set keyboard LED color
+static ssize_t kb_led_color_show(
+ struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct led_classdev *led;
+ struct system76_data *data;
+
+ led = (struct led_classdev *)dev->driver_data;
+ data = container_of(led, struct system76_data, kb_led);
+ return sprintf(buf, "%06X\n", data->kb_color);
+}
+
+// Set the keyboard LED color
+static ssize_t kb_led_color_store(
+ struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf,
+ size_t size)
+{
+ struct led_classdev *led;
+ struct system76_data *data;
+ unsigned int val;
+ int ret;
+
+ led = (struct led_classdev *)dev->driver_data;
+ data = container_of(led, struct system76_data, kb_led);
+ ret = kstrtouint(buf, 16, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFFFFFF)
+ return -EINVAL;
+ data->kb_color = (int)val;
+ system76_set(data, "SKBC", data->kb_color);
+
+ return size;
+}
+
+static const struct device_attribute kb_led_color_dev_attr = {
+ .attr = {
+ .name = "color",
+ .mode = 0644,
+ },
+ .show = kb_led_color_show,
+ .store = kb_led_color_store,
+};
+
+// Notify that the keyboard LED was changed by hardware
+static void kb_led_notify(struct system76_data *data)
+{
+ led_classdev_notify_brightness_hw_changed(
+ &data->kb_led,
+ data->kb_brightness
+ );
+}
+
+// Read keyboard LED brightness as set by hardware
+static void kb_led_hotkey_hardware(struct system76_data *data)
+{
+ int value;
+
+ value = system76_get(data, "GKBL");
+ if (value < 0)
+ return;
+ data->kb_brightness = value;
+ kb_led_notify(data);
+}
+
+// Toggle the keyboard LED
+static void kb_led_hotkey_toggle(struct system76_data *data)
+{
+ if (data->kb_brightness > 0) {
+ data->kb_toggle_brightness = data->kb_brightness;
+ kb_led_set(&data->kb_led, 0);
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Decrease the keyboard LED brightness
+static void kb_led_hotkey_down(struct system76_data *data)
+{
+ int i;
+
+ if (data->kb_brightness > 0) {
+ for (i = ARRAY_SIZE(kb_levels); i > 0; i--) {
+ if (kb_levels[i - 1] < data->kb_brightness) {
+ kb_led_set(&data->kb_led, kb_levels[i - 1]);
+ break;
+ }
+ }
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Increase the keyboard LED brightness
+static void kb_led_hotkey_up(struct system76_data *data)
+{
+ int i;
+
+ if (data->kb_brightness > 0) {
+ for (i = 0; i < ARRAY_SIZE(kb_levels); i++) {
+ if (kb_levels[i] > data->kb_brightness) {
+ kb_led_set(&data->kb_led, kb_levels[i]);
+ break;
+ }
+ }
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Cycle the keyboard LED color
+static void kb_led_hotkey_color(struct system76_data *data)
+{
+ int i;
+
+ if (data->kb_color < 0)
+ return;
+ if (data->kb_brightness > 0) {
+ for (i = 0; i < ARRAY_SIZE(kb_colors); i++) {
+ if (kb_colors[i] == data->kb_color)
+ break;
+ }
+ i += 1;
+ if (i >= ARRAY_SIZE(kb_colors))
+ i = 0;
+ data->kb_color = kb_colors[i];
+ system76_set(data, "SKBC", data->kb_color);
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Handle ACPI notification
+static void system76_notify(struct acpi_device *acpi_dev, u32 event)
+{
+ struct system76_data *data;
+
+ data = acpi_driver_data(acpi_dev);
+ switch (event) {
+ case 0x80:
+ kb_led_hotkey_hardware(data);
+ break;
+ case 0x81:
+ kb_led_hotkey_toggle(data);
+ break;
+ case 0x82:
+ kb_led_hotkey_down(data);
+ break;
+ case 0x83:
+ kb_led_hotkey_up(data);
+ break;
+ case 0x84:
+ kb_led_hotkey_color(data);
+ break;
+ }
+}
+
+// Add a System76 ACPI device
+static int system76_add(struct acpi_device *acpi_dev)
+{
+ struct system76_data *data;
+ int err;
+
+ data = devm_kzalloc(&acpi_dev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ acpi_dev->driver_data = data;
+ data->acpi_dev = acpi_dev;
+
+ err = system76_get(data, "INIT");
+ if (err)
+ return err;
+ data->ap_led.name = "system76_acpi::airplane";
+ data->ap_led.flags = LED_CORE_SUSPENDRESUME;
+ data->ap_led.brightness_get = ap_led_get;
+ data->ap_led.brightness_set = ap_led_set;
+ data->ap_led.max_brightness = 1;
+ data->ap_led.default_trigger = "rfkill-none";
+ err = devm_led_classdev_register(&acpi_dev->dev, &data->ap_led);
+ if (err)
+ return err;
+
+ data->kb_led.name = "system76_acpi::kbd_backlight";
+ data->kb_led.flags = LED_BRIGHT_HW_CHANGED | LED_CORE_SUSPENDRESUME;
+ data->kb_led.brightness_get = kb_led_get;
+ data->kb_led.brightness_set = kb_led_set;
+ if (acpi_has_method(acpi_device_handle(data->acpi_dev), "SKBC")) {
+ data->kb_led.max_brightness = 255;
+ data->kb_toggle_brightness = 72;
+ data->kb_color = 0xffffff;
+ system76_set(data, "SKBC", data->kb_color);
+ } else {
+ data->kb_led.max_brightness = 5;
+ data->kb_color = -1;
+ }
+ err = devm_led_classdev_register(&acpi_dev->dev, &data->kb_led);
+ if (err)
+ return err;
+
+ if (data->kb_color >= 0) {
+ err = device_create_file(
+ data->kb_led.dev,
+ &kb_led_color_dev_attr
+ );
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+// Remove a System76 ACPI device
+static int system76_remove(struct acpi_device *acpi_dev)
+{
+ struct system76_data *data;
+
+ data = acpi_driver_data(acpi_dev);
+ if (data->kb_color >= 0)
+ device_remove_file(data->kb_led.dev, &kb_led_color_dev_attr);
+
+ devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
+
+ devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led);
+
+ system76_get(data, "FINI");
+
+ return 0;
+}
+
+static struct acpi_driver system76_driver = {
+ .name = "System76 ACPI Driver",
+ .class = "hotkey",
+ .ids = device_ids,
+ .ops = {
+ .add = system76_add,
+ .remove = system76_remove,
+ .notify = system76_notify,
+ },
+};
+module_acpi_driver(system76_driver);
+
+MODULE_DESCRIPTION("System76 ACPI Driver");
+MODULE_AUTHOR("Jeremy Soller <jeremy@system76.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 1c7d8324ff5c..72205771d03d 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -310,6 +310,22 @@ static const struct ts_dmi_data jumper_ezpad_6_pro_b_data = {
.properties = jumper_ezpad_6_pro_b_props,
};
+static const struct property_entry jumper_ezpad_6_m4_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 35),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1950),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1525),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-m4.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_6_m4_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_6_m4_props,
+};
+
static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 23),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
@@ -498,6 +514,24 @@ static const struct ts_dmi_data pov_mobii_wintab_p1006w_v10_data = {
.properties = pov_mobii_wintab_p1006w_v10_props,
};
+static const struct property_entry schneider_sct101ctm_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1680-schneider-sct101ctm.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data schneider_sct101ctm_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = schneider_sct101ctm_props,
+};
+
static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -789,6 +823,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Jumper EZpad 6 m4 */
+ .driver_data = (void *)&jumper_ezpad_6_m4_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ /* Jumper8.S106x.A00C.1066 with the version dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "Jumper8.S106x"),
+ },
+ },
+ {
/* Jumper EZpad mini3 */
.driver_data = (void *)&jumper_ezpad_mini3_data,
.matches = {
@@ -909,6 +953,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Schneider SCT101CTM */
+ .driver_data = (void *)&schneider_sct101ctm_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
+ },
+ },
+ {
/* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data,
.matches = {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 59e9aa0f9643..dc2e966a5c25 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -911,7 +911,7 @@ static const struct file_operations wmi_fops = {
.read = wmi_char_read,
.open = wmi_char_open,
.unlocked_ioctl = wmi_ioctl,
- .compat_ioctl = wmi_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static int wmi_dev_probe(struct device *dev)
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index b5a217b828dc..089b6244b716 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -13,9 +13,9 @@ menuconfig POWER_AVS
Say Y here to enable Adaptive Voltage Scaling class support.
config ROCKCHIP_IODOMAIN
- tristate "Rockchip IO domain support"
- depends on POWER_AVS && ARCH_ROCKCHIP && OF
- help
- Say y here to enable support io domains on Rockchip SoCs. It is
- necessary for the io domain setting of the SoC to match the
- voltage supplied by the regulators.
+ tristate "Rockchip IO domain support"
+ depends on POWER_AVS && ARCH_ROCKCHIP && OF
+ help
+ Say y here to enable support io domains on Rockchip SoCs. It is
+ necessary for the io domain setting of the SoC to match the
+ voltage supplied by the regulators.
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index a564237278ff..c721939767eb 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -140,6 +140,16 @@ config POWER_RESET_LTC2952
This driver supports an external powerdown trigger and board power
down via the LTC2952. Bindings are made in the device tree.
+config POWER_RESET_MT6323
+ bool "MediaTek MT6323 power-off driver"
+ depends on MFD_MT6397
+ help
+ The power-off driver is responsible for externally shutdown down
+ the power of a remote MediaTek SoC MT6323 is connected to through
+ controlling a tiny circuit BBPU inside MT6323 RTC.
+
+ Say Y if you have a board where MT6323 could be found.
+
config POWER_RESET_QNAP
bool "QNAP power-off driver"
depends on OF_GPIO && PLAT_ORION
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 85da3198e4e0..da37f8b851dc 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o
obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o
obj-$(CONFIG_POWER_RESET_OCELOT_RESET) += ocelot-reset.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 44ca983a49a1..d94e3267c3b6 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -131,7 +131,7 @@ static int at91sam9g45_restart(struct notifier_block *this, unsigned long mode,
static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
void *cmd)
{
- writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST),
+ writel(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
at91_rstc_base);
return NOTIFY_DONE;
@@ -140,9 +140,7 @@ static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
static int samx7_restart(struct notifier_block *this, unsigned long mode,
void *cmd)
{
- writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PROCRST),
- at91_rstc_base);
-
+ writel(AT91_RSTC_KEY | AT91_RSTC_PROCRST, at91_rstc_base);
return NOTIFY_DONE;
}
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index e341cc5c0ea6..1c18f465a245 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -269,6 +269,12 @@ static const struct of_device_id at91_shdwc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at91_shdwc_of_match);
+static const struct of_device_id at91_pmc_ids[] = {
+ { .compatible = "atmel,sama5d2-pmc" },
+ { .compatible = "microchip,sam9x60-pmc" },
+ { /* Sentinel. */ }
+};
+
static int __init at91_shdwc_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -313,7 +319,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
at91_shdwc_dt_configure(pdev);
- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-pmc");
+ np = of_find_matching_node(NULL, at91_pmc_ids);
if (!np) {
ret = -ENODEV;
goto clk_disable;
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
new file mode 100644
index 000000000000..1caf43d9e46d
--- /dev/null
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power off through MediaTek PMIC
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6397/rtc.h>
+
+struct mt6323_pwrc {
+ struct device *dev;
+ struct regmap *regmap;
+ u32 base;
+};
+
+static struct mt6323_pwrc *mt_pwrc;
+
+static void mt6323_do_pwroff(void)
+{
+ struct mt6323_pwrc *pwrc = mt_pwrc;
+ unsigned int val;
+ int ret;
+
+ regmap_write(pwrc->regmap, pwrc->base + RTC_BBPU, RTC_BBPU_KEY);
+ regmap_write(pwrc->regmap, pwrc->base + RTC_WRTGR, 1);
+
+ ret = regmap_read_poll_timeout(pwrc->regmap,
+ pwrc->base + RTC_BBPU, val,
+ !(val & RTC_BBPU_CBUSY),
+ MTK_RTC_POLL_DELAY_US,
+ MTK_RTC_POLL_TIMEOUT);
+ if (ret)
+ dev_err(pwrc->dev, "failed to write BBPU: %d\n", ret);
+
+ /* Wait some time until system down, otherwise, notice with a warn */
+ mdelay(1000);
+
+ WARN_ONCE(1, "Unable to power off system\n");
+}
+
+static int mt6323_pwrc_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6397_chip = dev_get_drvdata(pdev->dev.parent);
+ struct mt6323_pwrc *pwrc;
+ struct resource *res;
+
+ pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
+ if (!pwrc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pwrc->base = res->start;
+ pwrc->regmap = mt6397_chip->regmap;
+ pwrc->dev = &pdev->dev;
+ mt_pwrc = pwrc;
+
+ pm_power_off = &mt6323_do_pwroff;
+
+ return 0;
+}
+
+static int mt6323_pwrc_remove(struct platform_device *pdev)
+{
+ if (pm_power_off == &mt6323_do_pwroff)
+ pm_power_off = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id mt6323_pwrc_dt_match[] = {
+ { .compatible = "mediatek,mt6323-pwrc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt6323_pwrc_dt_match);
+
+static struct platform_driver mt6323_pwrc_driver = {
+ .probe = mt6323_pwrc_probe,
+ .remove = mt6323_pwrc_remove,
+ .driver = {
+ .name = "mt6323-pwrc",
+ .of_match_table = mt6323_pwrc_dt_match,
+ },
+};
+
+module_platform_driver(mt6323_pwrc_driver);
+
+MODULE_DESCRIPTION("Poweroff driver for MT6323 PMIC");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index c84a7b1caeb6..27164a1d3c7c 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -629,7 +629,7 @@ config BATTERY_GAUGE_LTC2941
config AB8500_BM
bool "AB8500 Battery Management Driver"
- depends on AB8500_CORE && AB8500_GPADC
+ depends on AB8500_CORE && AB8500_GPADC && (IIO = y)
help
Say Y to include support for AB8500 battery management.
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 8fe81259bfd9..909f0242bacb 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -26,7 +26,7 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/iio/consumer.h>
#define VTVOUT_V 1800
@@ -79,7 +79,8 @@ struct ab8500_btemp_ranges {
* @bat_temp: Dispatched battery temperature in degree Celsius
* @prev_bat_temp Last measured battery temperature in degree Celsius
* @parent: Pointer to the struct ab8500
- * @gpadc: Pointer to the struct gpadc
+ * @adc_btemp_ball: ADC channel for the battery ball temperature
+ * @adc_bat_ctrl: ADC channel for the battery control
* @fg: Pointer to the struct fg
* @bm: Platform specific battery management information
* @btemp_psy: Structure for BTEMP specific battery properties
@@ -96,7 +97,8 @@ struct ab8500_btemp {
int bat_temp;
int prev_bat_temp;
struct ab8500 *parent;
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *btemp_ball;
+ struct iio_channel *bat_ctrl;
struct ab8500_fg *fg;
struct abx500_bm_data *bm;
struct power_supply *btemp_psy;
@@ -177,13 +179,13 @@ static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
*/
static int ab8500_btemp_read_batctrl_voltage(struct ab8500_btemp *di)
{
- int vbtemp;
+ int vbtemp, ret;
static int prev;
- vbtemp = ab8500_gpadc_convert(di->gpadc, BAT_CTRL);
- if (vbtemp < 0) {
+ ret = iio_read_channel_processed(di->bat_ctrl, &vbtemp);
+ if (ret < 0) {
dev_err(di->dev,
- "%s gpadc conversion failed, using previous value",
+ "%s ADC conversion failed, using previous value",
__func__);
return prev;
}
@@ -455,7 +457,7 @@ static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
*/
static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
{
- int temp;
+ int temp, ret;
static int prev;
int rbat, rntc, vntc;
u8 id;
@@ -480,10 +482,10 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
di->bm->bat_type[id].r_to_t_tbl,
di->bm->bat_type[id].n_temp_tbl_elements, rbat);
} else {
- vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
- if (vntc < 0) {
+ ret = iio_read_channel_processed(di->btemp_ball, &vntc);
+ if (ret < 0) {
dev_err(di->dev,
- "%s gpadc conversion failed,"
+ "%s ADC conversion failed,"
" using previous value\n", __func__);
return prev;
}
@@ -1024,7 +1026,22 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ /* Get ADC channels */
+ di->btemp_ball = devm_iio_channel_get(&pdev->dev, "btemp_ball");
+ if (IS_ERR(di->btemp_ball)) {
+ if (PTR_ERR(di->btemp_ball) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get BTEMP BALL ADC channel\n");
+ return PTR_ERR(di->btemp_ball);
+ }
+ di->bat_ctrl = devm_iio_channel_get(&pdev->dev, "bat_ctrl");
+ if (IS_ERR(di->bat_ctrl)) {
+ if (PTR_ERR(di->bat_ctrl) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get BAT CTRL ADC channel\n");
+ return PTR_ERR(di->bat_ctrl);
+ }
di->initialized = false;
@@ -1082,6 +1099,11 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_btemp_irq[i].name, di);
@@ -1104,13 +1126,13 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
return ret;
free_irq:
- power_supply_unregister(di->btemp_psy);
-
/* We also have to free all successfully registered irqs */
for (i = i - 1; i >= 0; i--) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
free_irq(irq, di);
}
+
+ power_supply_unregister(di->btemp_psy);
free_btemp_wq:
destroy_workqueue(di->btemp_wq);
return ret;
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index e51d0e72beea..8a0f9d769690 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -29,10 +29,10 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
#include <linux/mfd/abx500/ux500_chargalg.h>
#include <linux/usb/otg.h>
#include <linux/mutex.h>
+#include <linux/iio/consumer.h>
/* Charger constants */
#define NO_PW_CONN 0
@@ -233,7 +233,10 @@ struct ab8500_charger_max_usb_in_curr {
* @current_stepping_sessions:
* Counter for current stepping sessions
* @parent: Pointer to the struct ab8500
- * @gpadc: Pointer to the struct gpadc
+ * @adc_main_charger_v ADC channel for main charger voltage
+ * @adc_main_charger_c ADC channel for main charger current
+ * @adc_vbus_v ADC channel for USB charger voltage
+ * @adc_usb_charger_c ADC channel for USB charger current
* @bm: Platform specific battery management information
* @flags: Structure for information about events triggered
* @usb_state: Structure for usb stack information
@@ -283,7 +286,10 @@ struct ab8500_charger {
int is_aca_rid;
atomic_t current_stepping_sessions;
struct ab8500 *parent;
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *adc_main_charger_v;
+ struct iio_channel *adc_main_charger_c;
+ struct iio_channel *adc_vbus_v;
+ struct iio_channel *adc_usb_charger_c;
struct abx500_bm_data *bm;
struct ab8500_charger_event_flags flags;
struct ab8500_charger_usb_state usb_state;
@@ -459,13 +465,13 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
*/
static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
{
- int vch;
+ int vch, ret;
/* Only measure voltage if the charger is connected */
if (di->ac.charger_connected) {
- vch = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_V);
- if (vch < 0)
- dev_err(di->dev, "%s gpadc conv failed,\n", __func__);
+ ret = iio_read_channel_processed(di->adc_main_charger_v, &vch);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
vch = 0;
}
@@ -510,13 +516,13 @@ static int ab8500_charger_ac_cv(struct ab8500_charger *di)
*/
static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
{
- int vch;
+ int vch, ret;
/* Only measure voltage if the charger is connected */
if (di->usb.charger_connected) {
- vch = ab8500_gpadc_convert(di->gpadc, VBUS_V);
- if (vch < 0)
- dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ ret = iio_read_channel_processed(di->adc_vbus_v, &vch);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
vch = 0;
}
@@ -532,13 +538,13 @@ static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
*/
static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
{
- int ich;
+ int ich, ret;
/* Only measure current if the charger is online */
if (di->usb.charger_online) {
- ich = ab8500_gpadc_convert(di->gpadc, USB_CHARGER_C);
- if (ich < 0)
- dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ ret = iio_read_channel_processed(di->adc_usb_charger_c, &ich);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
ich = 0;
}
@@ -554,13 +560,13 @@ static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
*/
static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
{
- int ich;
+ int ich, ret;
/* Only measure current if the charger is online */
if (di->ac.charger_online) {
- ich = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_C);
- if (ich < 0)
- dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ ret = iio_read_channel_processed(di->adc_main_charger_c, &ich);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
ich = 0;
}
@@ -3371,7 +3377,39 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ /* Get ADC channels */
+ di->adc_main_charger_v = devm_iio_channel_get(&pdev->dev,
+ "main_charger_v");
+ if (IS_ERR(di->adc_main_charger_v)) {
+ if (PTR_ERR(di->adc_main_charger_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC main charger voltage\n");
+ return PTR_ERR(di->adc_main_charger_v);
+ }
+ di->adc_main_charger_c = devm_iio_channel_get(&pdev->dev,
+ "main_charger_c");
+ if (IS_ERR(di->adc_main_charger_c)) {
+ if (PTR_ERR(di->adc_main_charger_c) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC main charger current\n");
+ return PTR_ERR(di->adc_main_charger_c);
+ }
+ di->adc_vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
+ if (IS_ERR(di->adc_vbus_v)) {
+ if (PTR_ERR(di->adc_vbus_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC USB charger voltage\n");
+ return PTR_ERR(di->adc_vbus_v);
+ }
+ di->adc_usb_charger_c = devm_iio_channel_get(&pdev->dev,
+ "usb_charger_c");
+ if (IS_ERR(di->adc_usb_charger_c)) {
+ if (PTR_ERR(di->adc_usb_charger_c) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC USB charger current\n");
+ return PTR_ERR(di->adc_usb_charger_c);
+ }
/* initialize lock */
spin_lock_init(&di->usb_state.usb_lock);
@@ -3556,6 +3594,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_charger_irq[i].name, di);
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 6fc4bc30644c..c3912ee9eb99 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -32,7 +32,7 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/iio/consumer.h>
#include <linux/kernel.h>
#define MILLI_TO_MICRO 1000
@@ -182,7 +182,7 @@ struct inst_curr_result_list {
* @bat_cap: Structure for battery capacity specific parameters
* @avg_cap: Average capacity filter
* @parent: Pointer to the struct ab8500
- * @gpadc: Pointer to the struct gpadc
+ * @main_bat_v: ADC channel for the main battery voltage
* @bm: Platform specific battery management information
* @fg_psy: Structure that holds the FG specific battery properties
* @fg_wq: Work queue for running the FG algorithm
@@ -224,7 +224,7 @@ struct ab8500_fg {
struct ab8500_fg_battery_capacity bat_cap;
struct ab8500_fg_avg_cap avg_cap;
struct ab8500 *parent;
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *main_bat_v;
struct abx500_bm_data *bm;
struct power_supply *fg_psy;
struct workqueue_struct *fg_wq;
@@ -829,13 +829,13 @@ exit:
*/
static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
{
- int vbat;
+ int vbat, ret;
static int prev;
- vbat = ab8500_gpadc_convert(di->gpadc, MAIN_BAT_V);
- if (vbat < 0) {
+ ret = iio_read_channel_processed(di->main_bat_v, &vbat);
+ if (ret < 0) {
dev_err(di->dev,
- "%s gpadc conversion failed, using previous value\n",
+ "%s ADC conversion failed, using previous value\n",
__func__);
return prev;
}
@@ -3066,7 +3066,14 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ di->main_bat_v = devm_iio_channel_get(&pdev->dev, "main_bat_v");
+ if (IS_ERR(di->main_bat_v)) {
+ if (PTR_ERR(di->main_bat_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get main battery ADC channel\n");
+ return PTR_ERR(di->main_bat_v);
+ }
psy_cfg.supplied_to = supply_interface;
psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
@@ -3151,6 +3158,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register primary interrupt handlers */
for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq_th;
+ }
+
ret = request_irq(irq, ab8500_fg_irq_th[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_fg_irq_th[i].name, di);
@@ -3158,7 +3170,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq_th[i].name, irq, ret);
- goto free_irq;
+ goto free_irq_th;
}
dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
ab8500_fg_irq_th[i].name, irq, ret);
@@ -3166,6 +3178,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register threaded interrupt handler */
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq_th;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_fg_irq_bh[0].isr,
IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_fg_irq_bh[0].name, di);
@@ -3173,7 +3190,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq_bh[0].name, irq, ret);
- goto free_irq;
+ goto free_irq_th;
}
dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
ab8500_fg_irq_bh[0].name, irq, ret);
@@ -3212,15 +3229,17 @@ static int ab8500_fg_probe(struct platform_device *pdev)
return ret;
free_irq:
- power_supply_unregister(di->fg_psy);
-
/* We also have to free all registered irqs */
- for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
+ free_irq(irq, di);
+free_irq_th:
+ while (--i >= 0) {
+ /* Last assignment of i from primary interrupt handlers */
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
free_irq(irq, di);
}
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
- free_irq(irq, di);
+
+ power_supply_unregister(di->fg_psy);
free_inst_curr_wq:
destroy_workqueue(di->fg_wq);
return ret;
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index 23757fb10479..e6e37d4f20e4 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -354,13 +354,13 @@ static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di)
if (di->chg_info.charger_type & USB_CHG) {
return di->usb_chg->ops.check_enable(di->usb_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
} else if ((di->chg_info.charger_type & AC_CHG) &&
!(di->ac_chg->external)) {
return di->ac_chg->ops.check_enable(di->ac_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
}
return 0;
}
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index dc4c316eff81..5f0a5722b19e 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -48,6 +48,8 @@
#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
+#define AXP813_BC_EN BIT(0)
+
/*
* Note do not raise the debounce time, we must report Vusb high within
* 100ms otherwise we get Vbus errors in musb.
@@ -495,6 +497,12 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (power->axp20x_id == AXP813_ID) {
+ /* Enable USB Battery Charging specification detection */
+ regmap_update_bits(axp20x->regmap, AXP288_BC_GLOBAL,
+ AXP813_BC_EN, AXP813_BC_EN);
+ }
+
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = power;
diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c
index 1bb32b7226d7..b8e1ec106627 100644
--- a/drivers/power/supply/bd70528-charger.c
+++ b/drivers/power/supply/bd70528-charger.c
@@ -741,3 +741,4 @@ module_platform_driver(bd70528_power);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 power-supply driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-power");
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 61d6447d1966..6e9392901b0a 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -33,8 +33,6 @@
#include <linux/iio/types.h>
#include <linux/mfd/motorola-cpcap.h>
-#include <asm/div64.h>
-
/*
* Register bit defines for CPCAP_REG_BPEOL. Some of these seem to
* map to MC13783UG.pdf "Table 5-19. Register 13, Power Control 0"
@@ -52,6 +50,26 @@
#define CPCAP_REG_BPEOL_BIT_BATTDETEN BIT(1) /* Enable battery detect */
#define CPCAP_REG_BPEOL_BIT_EOLSEL BIT(0) /* BPDET = 0, EOL = 1 */
+/*
+ * Register bit defines for CPCAP_REG_CCC1. These seem similar to the twl6030
+ * coulomb counter registers rather than the mc13892 registers. Both twl6030
+ * and mc13892 set bits 2 and 1 to reset and clear registers. But mc13892
+ * sets bit 0 to start the coulomb counter while twl6030 sets bit 0 to stop
+ * the coulomb counter like cpcap does. So for now, we use the twl6030 style
+ * naming for the registers.
+ */
+#define CPCAP_REG_CCC1_ACTIVE_MODE1 BIT(4) /* Update rate */
+#define CPCAP_REG_CCC1_ACTIVE_MODE0 BIT(3) /* Update rate */
+#define CPCAP_REG_CCC1_AUTOCLEAR BIT(2) /* Resets sample registers */
+#define CPCAP_REG_CCC1_CAL_EN BIT(1) /* Clears after write in 1s */
+#define CPCAP_REG_CCC1_PAUSE BIT(0) /* Stop counters, allow write */
+#define CPCAP_REG_CCC1_RESET_MASK (CPCAP_REG_CCC1_AUTOCLEAR | \
+ CPCAP_REG_CCC1_CAL_EN)
+
+#define CPCAP_REG_CCCC2_RATE1 BIT(5)
+#define CPCAP_REG_CCCC2_RATE0 BIT(4)
+#define CPCAP_REG_CCCC2_ENABLE BIT(3)
+
#define CPCAP_BATTERY_CC_SAMPLE_PERIOD_MS 250
enum {
@@ -64,6 +82,7 @@ enum {
enum cpcap_battery_irq_action {
CPCAP_BATTERY_IRQ_ACTION_NONE,
+ CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE,
CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW,
CPCAP_BATTERY_IRQ_ACTION_POWEROFF,
};
@@ -76,15 +95,16 @@ struct cpcap_interrupt_desc {
};
struct cpcap_battery_config {
- int ccm;
int cd_factor;
struct power_supply_info info;
+ struct power_supply_battery_info bat;
};
struct cpcap_coulomb_counter_data {
s32 sample; /* 24 or 32 bits */
s32 accumulator;
s16 offset; /* 9 bits */
+ s16 integrator; /* 13 or 16 bits */
};
enum cpcap_battery_state {
@@ -110,6 +130,7 @@ struct cpcap_battery_ddata {
struct power_supply *psy;
struct cpcap_battery_config config;
struct cpcap_battery_state_data state[CPCAP_BATTERY_STATE_NR];
+ u32 cc_lsb; /* μAms per LSB */
atomic_t active;
int status;
u16 vendor;
@@ -217,41 +238,17 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
s16 offset, u32 divider)
{
s64 acc;
- u64 tmp;
- int avg_current;
- u32 cc_lsb;
if (!divider)
return 0;
- switch (ddata->vendor) {
- case CPCAP_VENDOR_ST:
- cc_lsb = 95374; /* μAms per LSB */
- break;
- case CPCAP_VENDOR_TI:
- cc_lsb = 91501; /* μAms per LSB */
- break;
- default:
- return -EINVAL;
- }
-
acc = accumulator;
- acc = acc - ((s64)sample * offset);
- cc_lsb = (cc_lsb * ddata->config.cd_factor) / 1000;
+ acc -= (s64)sample * offset;
+ acc *= ddata->cc_lsb;
+ acc *= -1;
+ acc = div_s64(acc, divider);
- if (acc >= 0)
- tmp = acc;
- else
- tmp = acc * -1;
-
- tmp = tmp * cc_lsb;
- do_div(tmp, divider);
- avg_current = tmp;
-
- if (acc >= 0)
- return -avg_current;
- else
- return avg_current;
+ return acc;
}
/* 3600000μAms = 1μAh */
@@ -293,12 +290,13 @@ static int
cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
struct cpcap_coulomb_counter_data *ccd)
{
- u16 buf[7]; /* CPCAP_REG_CC1 to CCI */
+ u16 buf[7]; /* CPCAP_REG_CCS1 to CCI */
int error;
ccd->sample = 0;
ccd->accumulator = 0;
ccd->offset = 0;
+ ccd->integrator = 0;
/* Read coulomb counter register range */
error = regmap_bulk_read(ddata->reg, CPCAP_REG_CCS1,
@@ -323,6 +321,12 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
ccd->offset = buf[4];
ccd->offset = sign_extend32(ccd->offset, 9);
+ /* Integrator register CPCAP_REG_CCI */
+ if (ddata->vendor == CPCAP_VENDOR_TI)
+ ccd->integrator = sign_extend32(buf[6], 13);
+ else
+ ccd->integrator = (s16)buf[6];
+
return cpcap_battery_cc_to_uah(ddata,
ccd->sample,
ccd->accumulator,
@@ -336,31 +340,28 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
static int cpcap_battery_cc_get_avg_current(struct cpcap_battery_ddata *ddata)
{
int value, acc, error;
- s32 sample = 1;
+ s32 sample;
s16 offset;
- if (ddata->vendor == CPCAP_VENDOR_ST)
- sample = 4;
-
/* Coulomb counter integrator */
error = regmap_read(ddata->reg, CPCAP_REG_CCI, &value);
if (error)
return error;
- if ((ddata->vendor == CPCAP_VENDOR_TI) && (value > 0x2000))
- value = value | 0xc000;
-
- acc = (s16)value;
+ if (ddata->vendor == CPCAP_VENDOR_TI) {
+ acc = sign_extend32(value, 13);
+ sample = 1;
+ } else {
+ acc = (s16)value;
+ sample = 4;
+ }
- /* Coulomb counter sample time */
+ /* Coulomb counter calibration offset */
error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
if (error)
return error;
- if (value < 0x200)
- offset = value;
- else
- offset = value | 0xfc00;
+ offset = sign_extend32(value, 9);
return cpcap_battery_cc_to_ua(ddata, sample, acc, offset);
}
@@ -369,8 +370,8 @@ static bool cpcap_battery_full(struct cpcap_battery_ddata *ddata)
{
struct cpcap_battery_state_data *state = cpcap_battery_latest(ddata);
- /* Basically anything that measures above 4347000 is full */
- if (state->voltage >= (ddata->config.info.voltage_max_design - 4000))
+ if (state->voltage >=
+ (ddata->config.bat.constant_charge_voltage_max_uv - 18000))
return true;
return false;
@@ -417,6 +418,7 @@ static enum power_supply_property cpcap_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
@@ -475,6 +477,9 @@ static int cpcap_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = ddata->config.info.voltage_min_design;
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ val->intval = ddata->config.bat.constant_charge_voltage_max_uv;
+ break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
sample = latest->cc.sample - previous->cc.sample;
if (!sample) {
@@ -540,6 +545,69 @@ static int cpcap_battery_get_property(struct power_supply *psy,
return 0;
}
+static int cpcap_battery_update_charger(struct cpcap_battery_ddata *ddata,
+ int const_charge_voltage)
+{
+ union power_supply_propval prop;
+ union power_supply_propval val;
+ struct power_supply *charger;
+ int error;
+
+ charger = power_supply_get_by_name("usb");
+ if (!charger)
+ return -ENODEV;
+
+ error = power_supply_get_property(charger,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &prop);
+ if (error)
+ return error;
+
+ /* Allow charger const voltage lower than battery const voltage */
+ if (const_charge_voltage > prop.intval)
+ return 0;
+
+ val.intval = const_charge_voltage;
+
+ return power_supply_set_property(charger,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &val);
+}
+
+static int cpcap_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct cpcap_battery_ddata *ddata = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ if (val->intval < ddata->config.info.voltage_min_design)
+ return -EINVAL;
+ if (val->intval > ddata->config.info.voltage_max_design)
+ return -EINVAL;
+
+ ddata->config.bat.constant_charge_voltage_max_uv = val->intval;
+
+ return cpcap_battery_update_charger(ddata, val->intval);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpcap_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
{
struct cpcap_battery_ddata *ddata = data;
@@ -560,14 +628,19 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
latest = cpcap_battery_latest(ddata);
switch (d->action) {
+ case CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE:
+ dev_info(ddata->dev, "Coulomb counter calibration done\n");
+ break;
case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW:
if (latest->current_ua >= 0)
- dev_warn(ddata->dev, "Battery low at 3.3V!\n");
+ dev_warn(ddata->dev, "Battery low at %imV!\n",
+ latest->voltage / 1000);
break;
case CPCAP_BATTERY_IRQ_ACTION_POWEROFF:
- if (latest->current_ua >= 0) {
+ if (latest->current_ua >= 0 && latest->voltage <= 3200000) {
dev_emerg(ddata->dev,
- "Battery empty at 3.1V, powering off\n");
+ "Battery empty at %imV, powering off\n",
+ latest->voltage / 1000);
orderly_poweroff(true);
}
break;
@@ -609,7 +682,9 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
d->name = name;
d->irq = irq;
- if (!strncmp(name, "lowbph", 6))
+ if (!strncmp(name, "cccal", 5))
+ d->action = CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE;
+ else if (!strncmp(name, "lowbph", 6))
d->action = CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW;
else if (!strncmp(name, "lowbpl", 6))
d->action = CPCAP_BATTERY_IRQ_ACTION_POWEROFF;
@@ -635,6 +710,9 @@ static int cpcap_battery_init_interrupts(struct platform_device *pdev,
return error;
}
+ /* Enable calibration interrupt if already available in dts */
+ cpcap_battery_init_irq(pdev, ddata, "cccal");
+
/* Enable low battery interrupts for 3.3V high and 3.1V low */
error = regmap_update_bits(ddata->reg, CPCAP_REG_BPEOL,
0xffff,
@@ -676,6 +754,60 @@ out_err:
return error;
}
+/* Calibrate coulomb counter */
+static int cpcap_battery_calibrate(struct cpcap_battery_ddata *ddata)
+{
+ int error, ccc1, value;
+ unsigned long timeout;
+
+ error = regmap_read(ddata->reg, CPCAP_REG_CCC1, &ccc1);
+ if (error)
+ return error;
+
+ timeout = jiffies + msecs_to_jiffies(6000);
+
+ /* Start calibration */
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_CCC1,
+ 0xffff,
+ CPCAP_REG_CCC1_CAL_EN);
+ if (error)
+ goto restore;
+
+ while (time_before(jiffies, timeout)) {
+ error = regmap_read(ddata->reg, CPCAP_REG_CCC1, &value);
+ if (error)
+ goto restore;
+
+ if (!(value & CPCAP_REG_CCC1_CAL_EN))
+ break;
+
+ error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
+ if (error)
+ goto restore;
+
+ msleep(300);
+ }
+
+ /* Read calibration offset from CCM */
+ error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
+ if (error)
+ goto restore;
+
+ dev_info(ddata->dev, "calibration done: 0x%04x\n", value);
+
+restore:
+ if (error)
+ dev_err(ddata->dev, "%s: error %i\n", __func__, error);
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_CCC1,
+ 0xffff, ccc1);
+ if (error)
+ dev_err(ddata->dev, "%s: restore error %i\n",
+ __func__, error);
+
+ return error;
+}
+
/*
* Based on the values from Motorola mapphone Linux kernel. In the
* the Motorola mapphone Linux kernel tree the value for pm_cd_factor
@@ -687,12 +819,12 @@ out_err:
* at 3078000. The device will die around 2743000.
*/
static const struct cpcap_battery_config cpcap_battery_default_data = {
- .ccm = 0x3ff,
.cd_factor = 0x3cc,
.info.technology = POWER_SUPPLY_TECHNOLOGY_LION,
.info.voltage_max_design = 4351000,
.info.voltage_min_design = 3100000,
.info.charge_full_design = 1740000,
+ .bat.constant_charge_voltage_max_uv = 4200000,
};
#ifdef CONFIG_OF
@@ -741,12 +873,19 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (error)
return error;
- platform_set_drvdata(pdev, ddata);
+ switch (ddata->vendor) {
+ case CPCAP_VENDOR_ST:
+ ddata->cc_lsb = 95374; /* μAms per LSB */
+ break;
+ case CPCAP_VENDOR_TI:
+ ddata->cc_lsb = 91501; /* μAms per LSB */
+ break;
+ default:
+ return -EINVAL;
+ }
+ ddata->cc_lsb = (ddata->cc_lsb * ddata->config.cd_factor) / 1000;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_CCM,
- 0xffff, ddata->config.ccm);
- if (error)
- return error;
+ platform_set_drvdata(pdev, ddata);
error = cpcap_battery_init_interrupts(pdev, ddata);
if (error)
@@ -760,11 +899,13 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (!psy_desc)
return -ENOMEM;
- psy_desc->name = "battery",
- psy_desc->type = POWER_SUPPLY_TYPE_BATTERY,
- psy_desc->properties = cpcap_battery_props,
- psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props),
- psy_desc->get_property = cpcap_battery_get_property,
+ psy_desc->name = "battery";
+ psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
+ psy_desc->properties = cpcap_battery_props;
+ psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props);
+ psy_desc->get_property = cpcap_battery_get_property;
+ psy_desc->set_property = cpcap_battery_set_property;
+ psy_desc->property_is_writeable = cpcap_battery_property_is_writeable;
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = ddata;
@@ -779,6 +920,10 @@ static int cpcap_battery_probe(struct platform_device *pdev)
atomic_set(&ddata->active, 1);
+ error = cpcap_battery_calibrate(ddata);
+ if (error)
+ return error;
+
return 0;
}
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 74258c7fe17d..c0d452e3dc8b 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -120,6 +120,13 @@ enum {
CPCAP_CHARGER_IIO_NR,
};
+enum {
+ CPCAP_CHARGER_DISCONNECTED,
+ CPCAP_CHARGER_DETECTING,
+ CPCAP_CHARGER_CHARGING,
+ CPCAP_CHARGER_DONE,
+};
+
struct cpcap_charger_ddata {
struct device *dev;
struct regmap *reg;
@@ -138,6 +145,8 @@ struct cpcap_charger_ddata {
atomic_t active;
int status;
+ int state;
+ int voltage;
};
struct cpcap_interrupt_desc {
@@ -153,6 +162,7 @@ struct cpcap_charger_ints_state {
bool chrg_se1b;
bool rvrs_mode;
+ bool chrgcurr2;
bool chrgcurr1;
bool vbusvld;
@@ -162,24 +172,26 @@ struct cpcap_charger_ints_state {
static enum power_supply_property cpcap_charger_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
+/* No battery always shows temperature of -40000 */
static bool cpcap_charger_battery_found(struct cpcap_charger_ddata *ddata)
{
struct iio_channel *channel;
- int error, value;
+ int error, temperature;
channel = ddata->channels[CPCAP_CHARGER_IIO_BATTDET];
- error = iio_read_channel_raw(channel, &value);
+ error = iio_read_channel_processed(channel, &temperature);
if (error < 0) {
dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
return false;
}
- return value == 1;
+ return temperature > -20000 && temperature < 60000;
}
static int cpcap_charger_get_charge_voltage(struct cpcap_charger_ddata *ddata)
@@ -224,6 +236,9 @@ static int cpcap_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_STATUS:
val->intval = ddata->status;
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ val->intval = ddata->voltage;
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (ddata->status == POWER_SUPPLY_STATUS_CHARGING)
val->intval = cpcap_charger_get_charge_voltage(ddata) *
@@ -248,6 +263,83 @@ static int cpcap_charger_get_property(struct power_supply *psy,
return 0;
}
+static int cpcap_charger_match_voltage(int voltage)
+{
+ switch (voltage) {
+ case 0 ... 4100000 - 1: return 3800000;
+ case 4100000 ... 4120000 - 1: return 4100000;
+ case 4120000 ... 4150000 - 1: return 4120000;
+ case 4150000 ... 4170000 - 1: return 4150000;
+ case 4170000 ... 4200000 - 1: return 4170000;
+ case 4200000 ... 4230000 - 1: return 4200000;
+ case 4230000 ... 4250000 - 1: return 4230000;
+ case 4250000 ... 4270000 - 1: return 4250000;
+ case 4270000 ... 4300000 - 1: return 4270000;
+ case 4300000 ... 4330000 - 1: return 4300000;
+ case 4330000 ... 4350000 - 1: return 4330000;
+ case 4350000 ... 4380000 - 1: return 4350000;
+ case 4380000 ... 4400000 - 1: return 4380000;
+ case 4400000 ... 4420000 - 1: return 4400000;
+ case 4420000 ... 4440000 - 1: return 4420000;
+ case 4440000: return 4440000;
+ default: return 0;
+ }
+}
+
+static int
+cpcap_charger_get_bat_const_charge_voltage(struct cpcap_charger_ddata *ddata)
+{
+ union power_supply_propval prop;
+ struct power_supply *battery;
+ int voltage = ddata->voltage;
+ int error;
+
+ battery = power_supply_get_by_name("battery");
+ if (battery) {
+ error = power_supply_get_property(battery,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &prop);
+ if (!error)
+ voltage = prop.intval;
+ }
+
+ return voltage;
+}
+
+static int cpcap_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct cpcap_charger_ddata *ddata = dev_get_drvdata(psy->dev.parent);
+ int voltage, batvolt;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ voltage = cpcap_charger_match_voltage(val->intval);
+ batvolt = cpcap_charger_get_bat_const_charge_voltage(ddata);
+ if (voltage > batvolt)
+ voltage = batvolt;
+ ddata->voltage = voltage;
+ schedule_delayed_work(&ddata->detect_work, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpcap_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static void cpcap_charger_set_cable_path(struct cpcap_charger_ddata *ddata,
bool enabled)
{
@@ -422,6 +514,7 @@ static int cpcap_charger_get_ints_state(struct cpcap_charger_ddata *ddata,
s->chrg_se1b = val & BIT(13);
s->rvrs_mode = val & BIT(6);
+ s->chrgcurr2 = val & BIT(5);
s->chrgcurr1 = val & BIT(4);
s->vbusvld = val & BIT(3);
@@ -434,6 +527,79 @@ static int cpcap_charger_get_ints_state(struct cpcap_charger_ddata *ddata,
return 0;
}
+static void cpcap_charger_update_state(struct cpcap_charger_ddata *ddata,
+ int state)
+{
+ const char *status;
+
+ if (state > CPCAP_CHARGER_DONE) {
+ dev_warn(ddata->dev, "unknown state: %i\n", state);
+
+ return;
+ }
+
+ ddata->state = state;
+
+ switch (state) {
+ case CPCAP_CHARGER_DISCONNECTED:
+ status = "DISCONNECTED";
+ break;
+ case CPCAP_CHARGER_DETECTING:
+ status = "DETECTING";
+ break;
+ case CPCAP_CHARGER_CHARGING:
+ status = "CHARGING";
+ break;
+ case CPCAP_CHARGER_DONE:
+ status = "DONE";
+ break;
+ default:
+ return;
+ }
+
+ dev_dbg(ddata->dev, "state: %s\n", status);
+}
+
+static int cpcap_charger_voltage_to_regval(int voltage)
+{
+ int offset;
+
+ switch (voltage) {
+ case 0 ... 4100000 - 1:
+ return 0;
+ case 4100000 ... 4200000 - 1:
+ offset = 1;
+ break;
+ case 4200000 ... 4300000 - 1:
+ offset = 0;
+ break;
+ case 4300000 ... 4380000 - 1:
+ offset = -1;
+ break;
+ case 4380000 ... 4440000:
+ offset = -2;
+ break;
+ default:
+ return 0;
+ }
+
+ return ((voltage - 4100000) / 20000) + offset;
+}
+
+static void cpcap_charger_disconnect(struct cpcap_charger_ddata *ddata,
+ int state, unsigned long delay)
+{
+ int error;
+
+ error = cpcap_charger_set_state(ddata, 0, 0, 0);
+ if (error)
+ return;
+
+ cpcap_charger_update_state(ddata, state);
+ power_supply_changed(ddata->usb);
+ schedule_delayed_work(&ddata->detect_work, delay);
+}
+
static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_charger_ddata *ddata;
@@ -447,24 +613,67 @@ static void cpcap_usb_detect(struct work_struct *work)
if (error)
return;
+ /* Just init the state if a charger is connected with no chrg_det set */
+ if (!s.chrg_det && s.chrgcurr1 && s.vbusvld) {
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_DETECTING);
+
+ return;
+ }
+
+ /*
+ * If battery voltage is higher than charge voltage, it may have been
+ * charged to 4.35V by Android. Try again in 10 minutes.
+ */
+ if (cpcap_charger_get_charge_voltage(ddata) > ddata->voltage) {
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
+ HZ * 60 * 10);
+
+ return;
+ }
+
+ /* Throttle chrgcurr2 interrupt for charger done and retry */
+ switch (ddata->state) {
+ case CPCAP_CHARGER_CHARGING:
+ if (s.chrgcurr2)
+ break;
+ if (s.chrgcurr1 && s.vbusvld) {
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DONE,
+ HZ * 5);
+ return;
+ }
+ break;
+ case CPCAP_CHARGER_DONE:
+ if (!s.chrgcurr2)
+ break;
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
+ HZ * 5);
+ return;
+ default:
+ break;
+ }
+
if (!ddata->feeding_vbus && cpcap_charger_vbus_valid(ddata) &&
s.chrgcurr1) {
int max_current;
+ int vchrg;
if (cpcap_charger_battery_found(ddata))
max_current = CPCAP_REG_CRM_ICHRG_1A596;
else
max_current = CPCAP_REG_CRM_ICHRG_0A532;
+ vchrg = cpcap_charger_voltage_to_regval(ddata->voltage);
error = cpcap_charger_set_state(ddata,
- CPCAP_REG_CRM_VCHRG_4V35,
+ CPCAP_REG_CRM_VCHRG(vchrg),
max_current, 0);
if (error)
goto out_err;
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_CHARGING);
} else {
error = cpcap_charger_set_state(ddata, 0, 0, 0);
if (error)
goto out_err;
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_DISCONNECTED);
}
power_supply_changed(ddata->usb);
@@ -524,7 +733,7 @@ static const char * const cpcap_charger_irqs[] = {
"chrg_det", "rvrs_chrg",
/* REG_INT1 */
- "chrg_se1b", "se0conn", "rvrs_mode", "chrgcurr1", "vbusvld",
+ "chrg_se1b", "se0conn", "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
/* REG_INT_3 */
"battdetb",
@@ -596,6 +805,8 @@ static const struct power_supply_desc cpcap_charger_usb_desc = {
.properties = cpcap_charger_props,
.num_properties = ARRAY_SIZE(cpcap_charger_props),
.get_property = cpcap_charger_get_property,
+ .set_property = cpcap_charger_set_property,
+ .property_is_writeable = cpcap_charger_property_is_writeable,
};
#ifdef CONFIG_OF
@@ -625,6 +836,7 @@ static int cpcap_charger_probe(struct platform_device *pdev)
return -ENOMEM;
ddata->dev = &pdev->dev;
+ ddata->voltage = 4200000;
ddata->reg = dev_get_regmap(ddata->dev->parent, NULL);
if (!ddata->reg)
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index c3cad2b6daba..65c23ef6408d 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -33,6 +33,8 @@ static int battery_present = 1; /* true */
static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION;
static int battery_capacity = 50;
static int battery_voltage = 3300;
+static int battery_charge_counter = -1000;
+static int battery_current = 1600;
static bool module_initialized;
@@ -100,6 +102,9 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_NOW:
val->intval = battery_capacity;
break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = battery_charge_counter;
+ break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_CHARGE_FULL:
val->intval = 100;
@@ -114,6 +119,10 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = battery_voltage;
break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = battery_current;
+ break;
default:
pr_info("%s: some properties deliberately report errors.\n",
__func__);
@@ -135,6 +144,7 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -144,6 +154,8 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
};
static char *test_power_ac_supplied_to[] = {
@@ -447,6 +459,36 @@ static int param_set_battery_voltage(const char *key,
#define param_get_battery_voltage param_get_int
+static int param_set_battery_charge_counter(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_charge_counter = tmp;
+ signal_power_supply_changed(test_power_supplies[TEST_BATTERY]);
+ return 0;
+}
+
+#define param_get_battery_charge_counter param_get_int
+
+static int param_set_battery_current(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_current = tmp;
+ signal_power_supply_changed(test_power_supplies[TEST_BATTERY]);
+ return 0;
+}
+
+#define param_get_battery_current param_get_int
+
static const struct kernel_param_ops param_ops_ac_online = {
.set = param_set_ac_online,
.get = param_get_ac_online,
@@ -487,6 +529,16 @@ static const struct kernel_param_ops param_ops_battery_voltage = {
.get = param_get_battery_voltage,
};
+static const struct kernel_param_ops param_ops_battery_charge_counter = {
+ .set = param_set_battery_charge_counter,
+ .get = param_get_battery_charge_counter,
+};
+
+static const struct kernel_param_ops param_ops_battery_current = {
+ .set = param_set_battery_current,
+ .get = param_get_battery_current,
+};
+
#define param_check_ac_online(name, p) __param_check(name, p, void);
#define param_check_usb_online(name, p) __param_check(name, p, void);
#define param_check_battery_status(name, p) __param_check(name, p, void);
@@ -495,6 +547,8 @@ static const struct kernel_param_ops param_ops_battery_voltage = {
#define param_check_battery_health(name, p) __param_check(name, p, void);
#define param_check_battery_capacity(name, p) __param_check(name, p, void);
#define param_check_battery_voltage(name, p) __param_check(name, p, void);
+#define param_check_battery_charge_counter(name, p) __param_check(name, p, void);
+#define param_check_battery_current(name, p) __param_check(name, p, void);
module_param(ac_online, ac_online, 0644);
@@ -525,6 +579,13 @@ MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
module_param(battery_voltage, battery_voltage, 0644);
MODULE_PARM_DESC(battery_voltage, "battery voltage (millivolts)");
+module_param(battery_charge_counter, battery_charge_counter, 0644);
+MODULE_PARM_DESC(battery_charge_counter,
+ "battery charge counter (microampere-hours)");
+
+module_param(battery_current, battery_current, 0644);
+MODULE_PARM_DESC(battery_current, "battery current (milliampere)");
+
MODULE_DESCRIPTION("Power supply driver for testing");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index e3a2518503ed..bd21655c37a6 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -508,15 +508,6 @@ config PWM_TIEHRPWM
To compile this driver as a module, choose M here: the module
will be called pwm-tiehrpwm.
-config PWM_TIPWMSS
- bool
- default y if (ARCH_OMAP2PLUS) && (PWM_TIECAP || PWM_TIEHRPWM)
- help
- PWM Subsystem driver support for AM33xx SOC.
-
- PWM submodules require PWM config space access from submodule
- drivers and require common parent driver support.
-
config PWM_TWL
tristate "TWL4030/6030 PWM support"
depends on TWL4030_CORE
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 26326adf71d7..9a475073dafc 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o
obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
-obj-$(CONFIG_PWM_TIPWMSS) += pwm-tipwmss.o
obj-$(CONFIG_PWM_TWL) += pwm-twl.o
obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 125a173bed45..4dd31dd9feea 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -2755,7 +2755,7 @@ static int tsi721_probe(struct pci_dev *pdev,
{
int i;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
tsi_debug(INIT, &pdev->dev, "res%d %pR",
i, &pdev->resource[i]);
}
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index de919f2e8b94..471128a2e723 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -61,6 +61,7 @@
#define QDSP6SS_GFMUX_CTL_REG 0x020
#define QDSP6SS_PWR_CTL_REG 0x030
#define QDSP6SS_MEM_PWR_CTL 0x0B0
+#define QDSP6V6SS_MEM_PWR_CTL 0x034
#define QDSP6SS_STRAP_ACC 0x110
/* AXI Halt Register Offsets */
@@ -196,6 +197,7 @@ enum {
MSS_MSM8916,
MSS_MSM8974,
MSS_MSM8996,
+ MSS_MSM8998,
MSS_SDM845,
};
@@ -498,7 +500,10 @@ static int q6v5proc_reset(struct q6v5 *qproc)
}
goto pbl_wait;
- } else if (qproc->version == MSS_MSM8996) {
+ } else if (qproc->version == MSS_MSM8996 ||
+ qproc->version == MSS_MSM8998) {
+ int mem_pwr_ctl;
+
/* Override the ACC value if required */
writel(QDSP6SS_ACC_OVERRIDE_VAL,
qproc->reg_base + QDSP6SS_STRAP_ACC);
@@ -543,17 +548,24 @@ static int q6v5proc_reset(struct q6v5 *qproc)
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Turn on L1, L2, ETB and JU memories 1 at a time */
- val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
- for (i = 19; i >= 0; i--) {
+ if (qproc->version == MSS_MSM8996) {
+ mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
+ i = 19;
+ } else {
+ /* MSS_MSM8998 */
+ mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
+ i = 28;
+ }
+ val = readl(qproc->reg_base + mem_pwr_ctl);
+ for (; i >= 0; i--) {
val |= BIT(i);
- writel(val, qproc->reg_base +
- QDSP6SS_MEM_PWR_CTL);
+ writel(val, qproc->reg_base + mem_pwr_ctl);
/*
* Read back value to ensure the write is done then
* wait for 1us for both memory peripheral and data
* array to turn on.
*/
- val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
+ val |= readl(qproc->reg_base + mem_pwr_ctl);
udelay(1);
}
/* Remove word line clamp */
@@ -1571,6 +1583,33 @@ static const struct rproc_hexagon_res sdm845_mss = {
.version = MSS_SDM845,
};
+static const struct rproc_hexagon_res msm8998_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "qdss",
+ "mem",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ "gpll0_mss",
+ "mnoc_axi",
+ "snoc_axi",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .version = MSS_MSM8998,
+};
+
static const struct rproc_hexagon_res msm8996_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
@@ -1677,6 +1716,7 @@ static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
+ { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
{ },
};
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 3c5fbbbfb0f1..307df98347ba 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -44,8 +44,6 @@
static DEFINE_MUTEX(rproc_list_mutex);
static LIST_HEAD(rproc_list);
-typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
- struct resource_table *table, int len);
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
void *, int offset, int avail);
@@ -336,7 +334,8 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
return -ENOMEM;
} else {
/* Register carveout in in list */
- mem = rproc_mem_entry_init(dev, 0, 0, size, rsc->vring[i].da,
+ mem = rproc_mem_entry_init(dev, NULL, 0,
+ size, rsc->vring[i].da,
rproc_alloc_carveout,
rproc_release_carveout,
"vdev%dvring%d",
@@ -400,7 +399,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
void rproc_free_vring(struct rproc_vring *rvring)
{
struct rproc *rproc = rvring->rvdev->rproc;
- int idx = rvring->rvdev->vring - rvring;
+ int idx = rvring - rvring->rvdev->vring;
struct fw_rsc_vdev *rsc;
idr_remove(&rproc->notifyids, rvring->notifyid);
@@ -913,7 +912,7 @@ static int rproc_handle_carveout(struct rproc *rproc,
}
/* Register carveout in in list */
- carveout = rproc_mem_entry_init(dev, 0, 0, rsc->len, rsc->da,
+ carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
rproc_alloc_carveout,
rproc_release_carveout, rsc->name);
if (!carveout) {
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 8cd4a0a3892b..dd93cf04e17f 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -333,9 +333,6 @@ struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
void rproc_delete_debug_dir(struct rproc *rproc)
{
- if (!rproc->dbg_dir)
- return;
-
debugfs_remove_recursive(rproc->dbg_dir);
}
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 2cf4b2992bfc..a18f88044111 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -15,9 +15,11 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
+#include <linux/workqueue.h>
#include "remoteproc_internal.h"
@@ -31,7 +33,9 @@
#define STM32_SMC_REG_WRITE 0x1
#define STM32_MBX_VQ0 "vq0"
+#define STM32_MBX_VQ0_ID 0
#define STM32_MBX_VQ1 "vq1"
+#define STM32_MBX_VQ1_ID 1
#define STM32_MBX_SHUTDOWN "shutdown"
struct stm32_syscon {
@@ -58,6 +62,7 @@ struct stm32_mbox {
const unsigned char name[10];
struct mbox_chan *chan;
struct mbox_client client;
+ struct work_struct vq_work;
int vq_id;
};
@@ -65,9 +70,11 @@ struct stm32_rproc {
struct reset_control *rst;
struct stm32_syscon hold_boot;
struct stm32_syscon pdds;
+ int wdg_irq;
u32 nb_rmems;
struct stm32_rproc_mem *rmems;
struct stm32_mbox mb[MBOX_NB_MBX];
+ struct workqueue_struct *workqueue;
bool secured_soc;
};
@@ -261,13 +268,22 @@ static irqreturn_t stm32_rproc_wdg(int irq, void *data)
return IRQ_HANDLED;
}
+static void stm32_rproc_mb_vq_work(struct work_struct *work)
+{
+ struct stm32_mbox *mb = container_of(work, struct stm32_mbox, vq_work);
+ struct rproc *rproc = dev_get_drvdata(mb->client.dev);
+
+ if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
+ dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id);
+}
+
static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data)
{
struct rproc *rproc = dev_get_drvdata(cl->dev);
struct stm32_mbox *mb = container_of(cl, struct stm32_mbox, client);
+ struct stm32_rproc *ddata = rproc->priv;
- if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
- dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id);
+ queue_work(ddata->workqueue, &mb->vq_work);
}
static void stm32_rproc_free_mbox(struct rproc *rproc)
@@ -285,7 +301,7 @@ static void stm32_rproc_free_mbox(struct rproc *rproc)
static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
{
.name = STM32_MBX_VQ0,
- .vq_id = 0,
+ .vq_id = STM32_MBX_VQ0_ID,
.client = {
.rx_callback = stm32_rproc_mb_callback,
.tx_block = false,
@@ -293,7 +309,7 @@ static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
},
{
.name = STM32_MBX_VQ1,
- .vq_id = 1,
+ .vq_id = STM32_MBX_VQ1_ID,
.client = {
.rx_callback = stm32_rproc_mb_callback,
.tx_block = false,
@@ -310,11 +326,12 @@ static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
}
};
-static void stm32_rproc_request_mbox(struct rproc *rproc)
+static int stm32_rproc_request_mbox(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
struct device *dev = &rproc->dev;
unsigned int i;
+ int j;
const unsigned char *name;
struct mbox_client *cl;
@@ -329,10 +346,24 @@ static void stm32_rproc_request_mbox(struct rproc *rproc)
ddata->mb[i].chan = mbox_request_channel_byname(cl, name);
if (IS_ERR(ddata->mb[i].chan)) {
+ if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER)
+ goto err_probe;
dev_warn(dev, "cannot get %s mbox\n", name);
ddata->mb[i].chan = NULL;
}
+ if (ddata->mb[i].vq_id >= 0) {
+ INIT_WORK(&ddata->mb[i].vq_work,
+ stm32_rproc_mb_vq_work);
+ }
}
+
+ return 0;
+
+err_probe:
+ for (j = i - 1; j >= 0; j--)
+ if (ddata->mb[j].chan)
+ mbox_free_channel(ddata->mb[j].chan);
+ return -EPROBE_DEFER;
}
static int stm32_rproc_set_hold_boot(struct rproc *rproc, bool hold)
@@ -528,6 +559,13 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
return err;
}
+ ddata->wdg_irq = irq;
+
+ if (of_property_read_bool(np, "wakeup-source")) {
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+ }
+
dev_info(dev, "wdg irq registered\n");
}
@@ -589,14 +627,22 @@ static int stm32_rproc_probe(struct platform_device *pdev)
rproc->has_iommu = false;
ddata = rproc->priv;
+ ddata->workqueue = create_workqueue(dev_name(dev));
+ if (!ddata->workqueue) {
+ dev_err(dev, "cannot create workqueue\n");
+ ret = -ENOMEM;
+ goto free_rproc;
+ }
platform_set_drvdata(pdev, rproc);
ret = stm32_rproc_parse_dt(pdev);
if (ret)
- goto free_rproc;
+ goto free_wkq;
- stm32_rproc_request_mbox(rproc);
+ ret = stm32_rproc_request_mbox(rproc);
+ if (ret)
+ goto free_rproc;
ret = rproc_add(rproc);
if (ret)
@@ -606,7 +652,13 @@ static int stm32_rproc_probe(struct platform_device *pdev)
free_mb:
stm32_rproc_free_mbox(rproc);
+free_wkq:
+ destroy_workqueue(ddata->workqueue);
free_rproc:
+ if (device_may_wakeup(dev)) {
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+ }
rproc_free(rproc);
return ret;
}
@@ -614,22 +666,56 @@ free_rproc:
static int stm32_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = &pdev->dev;
if (atomic_read(&rproc->power) > 0)
rproc_shutdown(rproc);
rproc_del(rproc);
stm32_rproc_free_mbox(rproc);
+ destroy_workqueue(ddata->workqueue);
+
+ if (device_may_wakeup(dev)) {
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+ }
rproc_free(rproc);
return 0;
}
+static int __maybe_unused stm32_rproc_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct stm32_rproc *ddata = rproc->priv;
+
+ if (device_may_wakeup(dev))
+ return enable_irq_wake(ddata->wdg_irq);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_rproc_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct stm32_rproc *ddata = rproc->priv;
+
+ if (device_may_wakeup(dev))
+ return disable_irq_wake(ddata->wdg_irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
+ stm32_rproc_suspend, stm32_rproc_resume);
+
static struct platform_driver stm32_rproc_driver = {
.probe = stm32_rproc_probe,
.remove = stm32_rproc_remove,
.driver = {
.name = "stm32-rproc",
+ .pm = &stm32_rproc_pm_ops,
.of_match_table = of_match_ptr(stm32_rproc_match),
},
};
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index d0322b41eca5..709276540ef1 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -21,7 +21,7 @@ config RPMSG_QCOM_GLINK_NATIVE
config RPMSG_QCOM_GLINK_RPM
tristate "Qualcomm RPM Glink driver"
- select RPMSG_QCOM_GLINK_NATIVE
+ select RPMSG_QCOM_GLINK_NATIVE
depends on HAS_IOMEM
depends on MAILBOX
help
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 621f1afd4d6b..1995f5b3ea67 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -241,10 +241,31 @@ static void qcom_glink_channel_release(struct kref *ref)
{
struct glink_channel *channel = container_of(ref, struct glink_channel,
refcount);
+ struct glink_core_rx_intent *intent;
+ struct glink_core_rx_intent *tmp;
unsigned long flags;
+ int iid;
+
+ /* cancel pending rx_done work */
+ cancel_work_sync(&channel->intent_work);
spin_lock_irqsave(&channel->intent_lock, flags);
+ /* Free all non-reuse intents pending rx_done work */
+ list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
+ if (!intent->reuse) {
+ kfree(intent->data);
+ kfree(intent);
+ }
+ }
+
+ idr_for_each_entry(&channel->liids, tmp, iid) {
+ kfree(tmp->data);
+ kfree(tmp);
+ }
idr_destroy(&channel->liids);
+
+ idr_for_each_entry(&channel->riids, tmp, iid)
+ kfree(tmp);
idr_destroy(&channel->riids);
spin_unlock_irqrestore(&channel->intent_lock, flags);
@@ -1094,13 +1115,12 @@ static int qcom_glink_create_remote(struct qcom_glink *glink,
close_link:
/*
* Send a close request to "undo" our open-ack. The close-ack will
- * release the last reference.
+ * release qcom_glink_send_open_req() reference and the last reference
+ * will be relesed after receiving remote_close or transport unregister
+ * by calling qcom_glink_native_remove().
*/
qcom_glink_send_close_req(glink, channel);
- /* Release qcom_glink_send_open_req() reference */
- kref_put(&channel->refcount, qcom_glink_channel_release);
-
return ret;
}
@@ -1415,15 +1435,13 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
ret = rpmsg_register_device(rpdev);
if (ret)
- goto free_rpdev;
+ goto rcid_remove;
channel->rpdev = rpdev;
}
return 0;
-free_rpdev:
- kfree(rpdev);
rcid_remove:
spin_lock_irqsave(&glink->idr_lock, flags);
idr_remove(&glink->rcids, channel->rcid);
@@ -1544,6 +1562,18 @@ static void qcom_glink_work(struct work_struct *work)
}
}
+static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
+{
+ struct glink_defer_cmd *dcmd;
+ struct glink_defer_cmd *tmp;
+
+ /* cancel any pending deferred rx_work */
+ cancel_work_sync(&glink->rx_work);
+
+ list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
+ kfree(dcmd);
+}
+
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
unsigned long features,
struct qcom_glink_pipe *rx,
@@ -1619,23 +1649,24 @@ void qcom_glink_native_remove(struct qcom_glink *glink)
struct glink_channel *channel;
int cid;
int ret;
- unsigned long flags;
disable_irq(glink->irq);
- cancel_work_sync(&glink->rx_work);
+ qcom_glink_cancel_rx_work(glink);
ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
if (ret)
dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
- spin_lock_irqsave(&glink->idr_lock, flags);
/* Release any defunct local channels, waiting for close-ack */
idr_for_each_entry(&glink->lcids, channel, cid)
kref_put(&channel->refcount, qcom_glink_channel_release);
+ /* Release any defunct local channels, waiting for close-req */
+ idr_for_each_entry(&glink->rcids, channel, cid)
+ kref_put(&channel->refcount, qcom_glink_channel_release);
+
idr_destroy(&glink->lcids);
idr_destroy(&glink->rcids);
- spin_unlock_irqrestore(&glink->idr_lock, flags);
mbox_free_channel(glink->mbox_chan);
}
EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c
index 4238383d8685..579bc4443f6d 100644
--- a/drivers/rpmsg/qcom_glink_smem.c
+++ b/drivers/rpmsg/qcom_glink_smem.c
@@ -105,7 +105,7 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
tail = le32_to_cpu(*pipe->tail);
tail += count;
- if (tail > pipe->native.length)
+ if (tail >= pipe->native.length)
tail -= pipe->native.length;
*pipe->tail = cpu_to_le32(tail);
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index eea5ebbb5119..4bbbacdbf3bb 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -146,7 +146,6 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
{
struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
struct device *dev = &eptdev->dev;
- struct sk_buff *skb;
/* Close the endpoint, if it's not already destroyed by the parent */
mutex_lock(&eptdev->ept_lock);
@@ -157,10 +156,7 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
mutex_unlock(&eptdev->ept_lock);
/* Discard all SKBs */
- while (!skb_queue_empty(&eptdev->queue)) {
- skb = skb_dequeue(&eptdev->queue);
- kfree_skb(skb);
- }
+ skb_queue_purge(&eptdev->queue);
put_device(dev);
@@ -227,8 +223,10 @@ static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
if (!kbuf)
return -ENOMEM;
- if (!copy_from_iter_full(kbuf, len, from))
- return -EFAULT;
+ if (!copy_from_iter_full(kbuf, len, from)) {
+ ret = -EFAULT;
+ goto free_kbuf;
+ }
if (mutex_lock_interruptible(&eptdev->ept_lock)) {
ret = -ERESTARTSYS;
@@ -290,7 +288,7 @@ static const struct file_operations rpmsg_eptdev_fops = {
.write_iter = rpmsg_eptdev_write_iter,
.poll = rpmsg_eptdev_poll,
.unlocked_ioctl = rpmsg_eptdev_ioctl,
- .compat_ioctl = rpmsg_eptdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
@@ -451,7 +449,7 @@ static const struct file_operations rpmsg_ctrldev_fops = {
.open = rpmsg_ctrldev_open,
.release = rpmsg_ctrldev_release,
.unlocked_ioctl = rpmsg_ctrldev_ioctl,
- .compat_ioctl = rpmsg_ctrldev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static void rpmsg_ctrldev_release_device(struct device *dev)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 1adf9f815652..d77515d8382c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -373,17 +373,6 @@ config RTC_DRV_MAX77686
This driver can also be built as a module. If so, the module
will be called rtc-max77686.
-config RTC_DRV_MESON_VRTC
- tristate "Amlogic Meson Virtual RTC"
- depends on ARCH_MESON || COMPILE_TEST
- default m if ARCH_MESON
- help
- If you say yes here you will get support for the
- Virtual RTC of Amlogic SoCs.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-meson-vrtc.
-
config RTC_DRV_RK808
tristate "Rockchip RK805/RK808/RK809/RK817/RK818 RTC"
depends on MFD_RK808
@@ -1337,8 +1326,6 @@ config RTC_DRV_IMXDI
config RTC_DRV_FSL_FTM_ALARM
tristate "Freescale FlexTimer alarm timer"
depends on ARCH_LAYERSCAPE || SOC_LS1021A
- select FSL_RCPM
- default y
help
For the FlexTimer in LS1012A, LS1021A, LS1028A, LS1043A, LS1046A,
LS1088A, LS208xA, we can use FTM as the wakeup source.
@@ -1360,6 +1347,17 @@ config RTC_DRV_MESON
This driver can also be built as a module, if so, the module
will be called "rtc-meson".
+config RTC_DRV_MESON_VRTC
+ tristate "Amlogic Meson Virtual RTC"
+ depends on ARCH_MESON || COMPILE_TEST
+ default m if ARCH_MESON
+ help
+ If you say yes here you will get support for the
+ Virtual RTC of Amlogic SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-meson-vrtc.
+
config RTC_DRV_OMAP
tristate "TI OMAP Real Time Clock"
depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
@@ -1459,6 +1457,7 @@ config RTC_DRV_PL031
config RTC_DRV_AT91RM9200
tristate "AT91RM9200 or some AT91SAM9 RTC"
depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF
help
Driver for the internal RTC (Realtime Clock) module found on
Atmel AT91RM9200's and some AT91SAM9 chips. On AT91SAM9 chips
@@ -1510,9 +1509,9 @@ config RTC_DRV_PXA
depends on ARCH_PXA
select RTC_DRV_SA1100
help
- If you say Y here you will get access to the real time clock
- built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
- consisting of an SA1100 compatible RTC and the extended PXA RTC.
+ If you say Y here you will get access to the real time clock
+ built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
+ consisting of an SA1100 compatible RTC and the extended PXA RTC.
This RTC driver uses PXA RTC registers available since pxa27x
series (RDxR, RYxR) instead of legacy RCNR, RTAR.
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 84feb2565abd..5b8ebe86124a 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/sched/signal.h>
@@ -360,7 +361,6 @@ static long rtc_dev_ioctl(struct file *file,
case RTC_IRQP_SET:
err = rtc_irq_set_freq(rtc, arg);
break;
-
case RTC_IRQP_READ:
err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
break;
@@ -399,6 +399,34 @@ done:
return err;
}
+#ifdef CONFIG_COMPAT
+#define RTC_IRQP_SET32 _IOW('p', 0x0c, __u32)
+#define RTC_IRQP_READ32 _IOR('p', 0x0b, __u32)
+#define RTC_EPOCH_SET32 _IOW('p', 0x0e, __u32)
+
+static long rtc_dev_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct rtc_device *rtc = file->private_data;
+ void __user *uarg = compat_ptr(arg);
+
+ switch (cmd) {
+ case RTC_IRQP_READ32:
+ return put_user(rtc->irq_freq, (__u32 __user *)uarg);
+
+ case RTC_IRQP_SET32:
+ /* arg is a plain integer, not pointer */
+ return rtc_dev_ioctl(file, RTC_IRQP_SET, arg);
+
+ case RTC_EPOCH_SET32:
+ /* arg is a plain integer, not pointer */
+ return rtc_dev_ioctl(file, RTC_EPOCH_SET, arg);
+ }
+
+ return rtc_dev_ioctl(file, cmd, (unsigned long)uarg);
+}
+#endif
+
static int rtc_dev_fasync(int fd, struct file *file, int on)
{
struct rtc_device *rtc = file->private_data;
@@ -434,6 +462,9 @@ static const struct file_operations rtc_dev_fops = {
.read = rtc_dev_read,
.poll = rtc_dev_poll,
.unlocked_ioctl = rtc_dev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = rtc_dev_compat_ioctl,
+#endif
.open = rtc_dev_open,
.release = rtc_dev_release,
.fasync = rtc_dev_fasync,
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index c93ef33b01d3..794a4f036b99 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -70,7 +70,7 @@ static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm)
time64_t time = rtc_tm_to_time64(tm);
time64_t range_min = rtc->set_start_time ? rtc->start_secs :
rtc->range_min;
- time64_t range_max = rtc->set_start_time ?
+ timeu64_t range_max = rtc->set_start_time ?
(rtc->start_secs + rtc->range_max - rtc->range_min) :
rtc->range_max;
@@ -125,7 +125,7 @@ EXPORT_SYMBOL_GPL(rtc_read_time);
int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
{
- int err;
+ int err, uie;
err = rtc_valid_tm(tm);
if (err != 0)
@@ -137,6 +137,17 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
rtc_subtract_offset(rtc, tm);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ uie = rtc->uie_rtctimer.enabled || rtc->uie_irq_active;
+#else
+ uie = rtc->uie_rtctimer.enabled;
+#endif
+ if (uie) {
+ err = rtc_update_irq_enable(rtc, 0);
+ if (err)
+ return err;
+ }
+
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
@@ -153,6 +164,12 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
+ if (uie) {
+ err = rtc_update_irq_enable(rtc, 1);
+ if (err)
+ return err;
+ }
+
trace_rtc_set_time(rtc_tm_to_time64(tm), err);
return err;
}
@@ -528,7 +545,7 @@ EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
- int err;
+ int rc = 0, err;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
@@ -553,7 +570,9 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
struct rtc_time tm;
ktime_t now, onesec;
- __rtc_read_time(rtc, &tm);
+ rc = __rtc_read_time(rtc, &tm);
+ if (rc)
+ goto out;
onesec = ktime_set(1, 0);
now = rtc_tm_to_ktime(tm);
rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
@@ -565,6 +584,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
out:
mutex_unlock(&rtc->ops_lock);
+
+ /*
+ * __rtc_read_time() failed, this probably means that the RTC time has
+ * never been set or less probably there is a transient error on the
+ * bus. In any case, avoid enabling emulation has this will fail when
+ * reading the time too.
+ */
+ if (rc)
+ return rc;
+
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
/*
* Enable emulation if the driver returned -EINVAL to signal that it has
@@ -581,6 +610,8 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
/**
* rtc_handle_legacy_irq - AIE, UIE and PIE event hook
* @rtc: pointer to the rtc device
+ * @num: number of occurence of the event
+ * @mode: type of the event, RTC_AF, RTC_UF of RTC_PF
*
* This function is called when an AIE, UIE or PIE mode interrupt
* has occurred (or been emulated).
@@ -761,8 +792,8 @@ int rtc_irq_set_freq(struct rtc_device *rtc, int freq)
/**
* rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
- * @rtc rtc device
- * @timer timer being added.
+ * @rtc: rtc device
+ * @timer: timer being added.
*
* Enqueues a timer onto the rtc devices timerqueue and sets
* the next alarm event appropriately.
@@ -821,8 +852,8 @@ static void rtc_alarm_disable(struct rtc_device *rtc)
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
- * @rtc rtc device
- * @timer timer being removed.
+ * @rtc: rtc device
+ * @timer: timer being removed.
*
* Removes a timer onto the rtc devices timerqueue and sets
* the next alarm event appropriately.
@@ -859,8 +890,7 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
/**
* rtc_timer_do_work - Expires rtc timers
- * @rtc rtc device
- * @timer timer being removed.
+ * @work: work item
*
* Expires rtc timers. Reprograms next alarm event if needed.
* Called via worktask.
@@ -993,8 +1023,8 @@ void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer)
/**
* rtc_read_offset - Read the amount of rtc offset in parts per billion
- * @ rtc: rtc device to be used
- * @ offset: the offset in parts per billion
+ * @rtc: rtc device to be used
+ * @offset: the offset in parts per billion
*
* see below for details.
*
@@ -1022,8 +1052,8 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset)
/**
* rtc_set_offset - Adjusts the duration of the average second
- * @ rtc: rtc device to be used
- * @ offset: the offset in parts per billion
+ * @rtc: rtc device to be used
+ * @offset: the offset in parts per billion
*
* Some rtc's allow an adjustment to the average duration of a second
* to compensate for differences in the actual clock rate due to temperature,
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index cdad6f00debf..811fe2005488 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -900,16 +900,6 @@ err:
return ret;
}
-static int abb5zes3_remove(struct i2c_client *client)
-{
- struct abb5zes3_rtc_data *rtc_data = dev_get_drvdata(&client->dev);
-
- if (rtc_data->irq > 0)
- device_init_wakeup(&client->dev, false);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int abb5zes3_rtc_suspend(struct device *dev)
{
@@ -956,7 +946,6 @@ static struct i2c_driver abb5zes3_driver = {
.of_match_table = of_match_ptr(abb5zes3_dt_match),
},
.probe = abb5zes3_probe,
- .remove = abb5zes3_remove,
.id_table = abb5zes3_id,
};
module_i2c_driver(abb5zes3_driver);
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 9351bd52477e..94d7c22fc4f3 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -74,7 +74,7 @@ struct armada38x_rtc {
int irq;
bool initialized;
struct value_to_freq *val_to_freq;
- struct armada38x_rtc_data *data;
+ const struct armada38x_rtc_data *data;
};
#define ALARM1 0
@@ -501,17 +501,14 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct armada38x_rtc *rtc;
- const struct of_device_id *match;
-
- match = of_match_device(armada38x_rtc_of_match_table, &pdev->dev);
- if (!match)
- return -ENODEV;
rtc = devm_kzalloc(&pdev->dev, sizeof(struct armada38x_rtc),
GFP_KERNEL);
if (!rtc)
return -ENOMEM;
+ rtc->data = of_device_get_match_data(&pdev->dev);
+
rtc->val_to_freq = devm_kcalloc(&pdev->dev, SAMPLE_NR,
sizeof(struct value_to_freq), GFP_KERNEL);
if (!rtc->val_to_freq)
@@ -553,7 +550,6 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
*/
rtc->rtc_dev->ops = &armada38x_rtc_ops_noirq;
}
- rtc->data = (struct armada38x_rtc_data *)match->data;
/* Update RTC-MBUS bridge timing parameters */
rtc->data->update_mbus_timing(rtc);
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 10413d803caa..10064bdabdff 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -245,7 +245,6 @@ static int asm9260_rtc_probe(struct platform_device *pdev)
{
struct asm9260_rtc_priv *priv;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq_alarm, ret;
u32 ccr;
@@ -260,8 +259,7 @@ static int asm9260_rtc_probe(struct platform_device *pdev)
if (irq_alarm < 0)
return irq_alarm;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->iobase = devm_ioremap_resource(dev, res);
+ priv->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->iobase))
return PTR_ERR(priv->iobase);
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
index e351d35b29a3..eacdd0637cce 100644
--- a/drivers/rtc/rtc-aspeed.c
+++ b/drivers/rtc/rtc-aspeed.c
@@ -85,14 +85,12 @@ static const struct rtc_class_ops aspeed_rtc_ops = {
static int aspeed_rtc_probe(struct platform_device *pdev)
{
struct aspeed_rtc *rtc;
- struct resource *res;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index d119c6e6353e..3b833e02a657 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -319,7 +319,6 @@ static const struct at91_rtc_config at91sam9x5_config = {
.use_shadow_imr = true,
};
-#ifdef CONFIG_OF
static const struct of_device_id at91_rtc_dt_ids[] = {
{
.compatible = "atmel,at91rm9200-rtc",
@@ -332,22 +331,6 @@ static const struct of_device_id at91_rtc_dt_ids[] = {
}
};
MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
-#endif
-
-static const struct at91_rtc_config *
-at91_rtc_get_config(struct platform_device *pdev)
-{
- const struct of_device_id *match;
-
- if (pdev->dev.of_node) {
- match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node);
- if (!match)
- return NULL;
- return (const struct at91_rtc_config *)match->data;
- }
-
- return &at91rm9200_config;
-}
static const struct rtc_class_ops at91_rtc_ops = {
.read_time = at91_rtc_readtime,
@@ -367,7 +350,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
struct resource *regs;
int ret = 0;
- at91_rtc_config = at91_rtc_get_config(pdev);
+ at91_rtc_config = of_device_get_match_data(&pdev->dev);
if (!at91_rtc_config)
return -ENODEV;
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index bb3ba7bfe6a5..e39e89867d29 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -334,7 +334,6 @@ static const struct rtc_class_ops at91_rtc_ops = {
*/
static int at91_rtc_probe(struct platform_device *pdev)
{
- struct resource *r;
struct sam9_rtc *rtc;
int ret, irq;
u32 mr;
@@ -358,8 +357,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->rtt = devm_ioremap_resource(&pdev->dev, r);
+ rtc->rtt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtt))
return PTR_ERR(rtc->rtt);
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index 7744333b0f40..627037aa66a8 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -491,3 +491,4 @@ module_platform_driver(bd70528_rtc);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 RTC driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-rtc");
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index 3e9800f9878a..4fee57c51280 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -200,7 +200,6 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct brcmstb_waketmr *timer;
- struct resource *res;
int ret;
timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
@@ -210,8 +209,7 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, timer);
timer->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- timer->base = devm_ioremap_resource(dev, res);
+ timer->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->base))
return PTR_ERR(timer->base);
@@ -277,6 +275,7 @@ static int brcmstb_waketmr_remove(struct platform_device *pdev)
struct brcmstb_waketmr *timer = dev_get_drvdata(&pdev->dev);
unregister_reboot_notifier(&timer->reboot_notifier);
+ clk_disable_unprepare(timer->clk);
return 0;
}
diff --git a/drivers/rtc/rtc-cadence.c b/drivers/rtc/rtc-cadence.c
index 592aae23cbaf..595d5d252850 100644
--- a/drivers/rtc/rtc-cadence.c
+++ b/drivers/rtc/rtc-cadence.c
@@ -255,7 +255,6 @@ static const struct rtc_class_ops cdns_rtc_ops = {
static int cdns_rtc_probe(struct platform_device *pdev)
{
struct cdns_rtc *crtc;
- struct resource *res;
int ret;
unsigned long ref_clk_freq;
@@ -263,8 +262,7 @@ static int cdns_rtc_probe(struct platform_device *pdev)
if (!crtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- crtc->regs = devm_ioremap_resource(&pdev->dev, res);
+ crtc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(crtc->regs))
return PTR_ERR(crtc->regs);
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 4ac850837153..da59917c9ee8 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -164,15 +164,13 @@ static int __init coh901331_probe(struct platform_device *pdev)
{
int ret;
struct coh901331_port *rtap;
- struct resource *res;
rtap = devm_kzalloc(&pdev->dev,
sizeof(struct coh901331_port), GFP_KERNEL);
if (!rtap)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtap->virtbase = devm_ioremap_resource(&pdev->dev, res);
+ rtap->virtbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtap->virtbase))
return PTR_ERR(rtap->virtbase);
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 6909e01936d9..d043d30f05bc 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -107,11 +107,7 @@ static int cros_ec_rtc_set_time(struct device *dev, struct rtc_time *tm)
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(dev);
struct cros_ec_device *cros_ec = cros_ec_rtc->cros_ec;
int ret;
- time64_t time;
-
- time = rtc_tm_to_time64(tm);
- if (time < 0 || time > U32_MAX)
- return -EINVAL;
+ time64_t time = rtc_tm_to_time64(tm);
ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_VALUE, (u32)time);
if (ret < 0) {
@@ -348,14 +344,16 @@ static int cros_ec_rtc_probe(struct platform_device *pdev)
return ret;
}
- cros_ec_rtc->rtc = devm_rtc_device_register(&pdev->dev, DRV_NAME,
- &cros_ec_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(cros_ec_rtc->rtc)) {
- ret = PTR_ERR(cros_ec_rtc->rtc);
- dev_err(&pdev->dev, "failed to register rtc device\n");
+ cros_ec_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(cros_ec_rtc->rtc))
+ return PTR_ERR(cros_ec_rtc->rtc);
+
+ cros_ec_rtc->rtc->ops = &cros_ec_rtc_ops;
+ cros_ec_rtc->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(cros_ec_rtc->rtc);
+ if (ret)
return ret;
- }
/* Get RTC events from the EC. */
cros_ec_rtc->notifier.notifier_call = cros_ec_rtc_event;
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 15908d51b1cb..046b1d4c3dae 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -483,6 +483,9 @@ static int da9063_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->uie_unsupported = 1;
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
+ if (irq_alarm < 0)
+ return irq_alarm;
+
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index d8e0db2e7fc6..390b7351e0fe 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -469,7 +469,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct davinci_rtc *davinci_rtc;
- struct resource *res;
int ret = 0;
davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL);
@@ -480,8 +479,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
if (davinci_rtc->irq < 0)
return davinci_rtc->irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- davinci_rtc->base = devm_ioremap_resource(dev, res);
+ davinci_rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(davinci_rtc->base))
return PTR_ERR(davinci_rtc->base);
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 0aecc3f8e721..200d85b01e8b 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -175,7 +175,6 @@ static irqreturn_t dc_rtc_irq(int irq, void *dev_id)
static int __init dc_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct dc_rtc *rtc;
int irq, ret;
@@ -183,8 +182,7 @@ static int __init dc_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->regs = devm_ioremap_resource(&pdev->dev, res);
+ rtc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->regs))
return PTR_ERR(rtc->regs);
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
index b225bcfef50b..7eeb3f359de8 100644
--- a/drivers/rtc/rtc-ds1216.c
+++ b/drivers/rtc/rtc-ds1216.c
@@ -137,7 +137,6 @@ static const struct rtc_class_ops ds1216_rtc_ops = {
static int __init ds1216_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct ds1216_priv *priv;
u8 dummy[8];
@@ -147,8 +146,7 @@ static int __init ds1216_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ priv->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->ioaddr))
return PTR_ERR(priv->ioaddr);
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index a06508b6c404..7acf849d4902 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -323,15 +323,13 @@ static const struct rtc_class_ops ds1286_ops = {
static int ds1286_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- struct resource *res;
struct ds1286_priv *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(struct ds1286_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->rtcregs = devm_ioremap_resource(&pdev->dev, res);
+ priv->rtcregs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->rtcregs))
return PTR_ERR(priv->rtcregs);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 4faa24c88af5..b3de6d2e680a 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -15,8 +15,6 @@
#include <linux/rtc.h>
#include <linux/spi/spi.h>
-#define DRV_NAME "rtc-ds1302"
-
#define RTC_CMD_READ 0x81 /* Read command */
#define RTC_CMD_WRITE 0x80 /* Write command */
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index fa6de31d5793..d21004a68ee0 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -78,42 +78,19 @@ struct ds1343_priv {
struct spi_device *spi;
struct rtc_device *rtc;
struct regmap *map;
- struct mutex mutex;
- unsigned int irqen;
int irq;
- int alarm_sec;
- int alarm_min;
- int alarm_hour;
- int alarm_mday;
};
-static int ds1343_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
-#ifdef RTC_SET_CHARGE
- case RTC_SET_CHARGE:
- {
- int val;
-
- if (copy_from_user(&val, (int __user *)arg, sizeof(int)))
- return -EFAULT;
-
- return regmap_write(priv->map, DS1343_TRICKLE_REG, val);
- }
- break;
-#endif
- }
-
- return -ENOIOCTLCMD;
-}
-
static ssize_t ds1343_show_glitchfilter(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
+ struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
int glitch_filt_status, data;
+ int res;
- regmap_read(priv->map, DS1343_CONTROL_REG, &data);
+ res = regmap_read(priv->map, DS1343_CONTROL_REG, &data);
+ if (res)
+ return res;
glitch_filt_status = !!(data & DS1343_EGFIL);
@@ -127,21 +104,19 @@ static ssize_t ds1343_store_glitchfilter(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
- int data;
-
- regmap_read(priv->map, DS1343_CONTROL_REG, &data);
+ struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
+ int data = 0;
+ int res;
if (strncmp(buf, "enabled", 7) == 0)
- data |= DS1343_EGFIL;
-
- else if (strncmp(buf, "disabled", 8) == 0)
- data &= ~(DS1343_EGFIL);
-
- else
+ data = DS1343_EGFIL;
+ else if (strncmp(buf, "disabled", 8))
return -EINVAL;
- regmap_write(priv->map, DS1343_CONTROL_REG, data);
+ res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_EGFIL, data);
+ if (res)
+ return res;
return count;
}
@@ -168,11 +143,13 @@ static int ds1343_nvram_read(void *priv, unsigned int off, void *val,
static ssize_t ds1343_show_tricklecharger(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
- int data;
+ struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
+ int res, data;
char *diodes = "disabled", *resistors = " ";
- regmap_read(priv->map, DS1343_TRICKLE_REG, &data);
+ res = regmap_read(priv->map, DS1343_TRICKLE_REG, &data);
+ if (res)
+ return res;
if ((data & 0xf0) == DS1343_TRICKLE_MAGIC) {
switch (data & 0x0c) {
@@ -209,28 +186,15 @@ static ssize_t ds1343_show_tricklecharger(struct device *dev,
static DEVICE_ATTR(trickle_charger, S_IRUGO, ds1343_show_tricklecharger, NULL);
-static int ds1343_sysfs_register(struct device *dev)
-{
- int err;
-
- err = device_create_file(dev, &dev_attr_glitch_filter);
- if (err)
- return err;
-
- err = device_create_file(dev, &dev_attr_trickle_charger);
- if (!err)
- return 0;
-
- device_remove_file(dev, &dev_attr_glitch_filter);
-
- return err;
-}
+static struct attribute *ds1343_attrs[] = {
+ &dev_attr_glitch_filter.attr,
+ &dev_attr_trickle_charger.attr,
+ NULL
+};
-static void ds1343_sysfs_unregister(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_glitch_filter);
- device_remove_file(dev, &dev_attr_trickle_charger);
-}
+static const struct attribute_group ds1343_attr_group = {
+ .attrs = ds1343_attrs,
+};
static int ds1343_read_time(struct device *dev, struct rtc_time *dt)
{
@@ -256,144 +220,78 @@ static int ds1343_read_time(struct device *dev, struct rtc_time *dt)
static int ds1343_set_time(struct device *dev, struct rtc_time *dt)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
- int res;
-
- res = regmap_write(priv->map, DS1343_SECONDS_REG,
- bin2bcd(dt->tm_sec));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_MINUTES_REG,
- bin2bcd(dt->tm_min));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_HOURS_REG,
- bin2bcd(dt->tm_hour) & 0x3F);
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_DAY_REG,
- bin2bcd(dt->tm_wday + 1));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_DATE_REG,
- bin2bcd(dt->tm_mday));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_MONTH_REG,
- bin2bcd(dt->tm_mon + 1));
- if (res)
- return res;
-
- dt->tm_year %= 100;
-
- res = regmap_write(priv->map, DS1343_YEAR_REG,
- bin2bcd(dt->tm_year));
- if (res)
- return res;
-
- return 0;
+ u8 buf[7];
+
+ buf[0] = bin2bcd(dt->tm_sec);
+ buf[1] = bin2bcd(dt->tm_min);
+ buf[2] = bin2bcd(dt->tm_hour) & 0x3F;
+ buf[3] = bin2bcd(dt->tm_wday + 1);
+ buf[4] = bin2bcd(dt->tm_mday);
+ buf[5] = bin2bcd(dt->tm_mon + 1);
+ buf[6] = bin2bcd(dt->tm_year - 100);
+
+ return regmap_bulk_write(priv->map, DS1343_SECONDS_REG,
+ buf, sizeof(buf));
}
-static int ds1343_update_alarm(struct device *dev)
+static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
- unsigned int control, stat;
unsigned char buf[4];
- int res = 0;
+ unsigned int val;
+ int res;
- res = regmap_read(priv->map, DS1343_CONTROL_REG, &control);
- if (res)
- return res;
+ if (priv->irq <= 0)
+ return -EINVAL;
- res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
+ res = regmap_read(priv->map, DS1343_STATUS_REG, &val);
if (res)
return res;
- control &= ~(DS1343_A0IE);
- stat &= ~(DS1343_IRQF0);
-
- res = regmap_write(priv->map, DS1343_CONTROL_REG, control);
- if (res)
- return res;
+ alarm->pending = !!(val & DS1343_IRQF0);
- res = regmap_write(priv->map, DS1343_STATUS_REG, stat);
+ res = regmap_read(priv->map, DS1343_CONTROL_REG, &val);
if (res)
return res;
+ alarm->enabled = !!(val & DS1343_A0IE);
- buf[0] = priv->alarm_sec < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_sec) & 0x7F;
- buf[1] = priv->alarm_min < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_min) & 0x7F;
- buf[2] = priv->alarm_hour < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_hour) & 0x3F;
- buf[3] = priv->alarm_mday < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_mday) & 0x7F;
-
- res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
+ res = regmap_bulk_read(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
if (res)
return res;
- if (priv->irqen) {
- control |= DS1343_A0IE;
- res = regmap_write(priv->map, DS1343_CONTROL_REG, control);
- }
+ alarm->time.tm_sec = bcd2bin(buf[0]) & 0x7f;
+ alarm->time.tm_min = bcd2bin(buf[1]) & 0x7f;
+ alarm->time.tm_hour = bcd2bin(buf[2]) & 0x3f;
+ alarm->time.tm_mday = bcd2bin(buf[3]) & 0x3f;
- return res;
+ return 0;
}
-static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
+ unsigned char buf[4];
int res = 0;
- unsigned int stat;
if (priv->irq <= 0)
return -EINVAL;
- mutex_lock(&priv->mutex);
-
- res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
+ res = regmap_update_bits(priv->map, DS1343_CONTROL_REG, DS1343_A0IE, 0);
if (res)
- goto out;
-
- alarm->enabled = !!(priv->irqen & RTC_AF);
- alarm->pending = !!(stat & DS1343_IRQF0);
-
- alarm->time.tm_sec = priv->alarm_sec < 0 ? 0 : priv->alarm_sec;
- alarm->time.tm_min = priv->alarm_min < 0 ? 0 : priv->alarm_min;
- alarm->time.tm_hour = priv->alarm_hour < 0 ? 0 : priv->alarm_hour;
- alarm->time.tm_mday = priv->alarm_mday < 0 ? 0 : priv->alarm_mday;
-
-out:
- mutex_unlock(&priv->mutex);
- return res;
-}
-
-static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
- int res = 0;
-
- if (priv->irq <= 0)
- return -EINVAL;
+ return res;
- mutex_lock(&priv->mutex);
+ buf[0] = bin2bcd(alarm->time.tm_sec);
+ buf[1] = bin2bcd(alarm->time.tm_min);
+ buf[2] = bin2bcd(alarm->time.tm_hour);
+ buf[3] = bin2bcd(alarm->time.tm_mday);
- priv->alarm_sec = alarm->time.tm_sec;
- priv->alarm_min = alarm->time.tm_min;
- priv->alarm_hour = alarm->time.tm_hour;
- priv->alarm_mday = alarm->time.tm_mday;
+ res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
+ if (res)
+ return res;
if (alarm->enabled)
- priv->irqen |= RTC_AF;
-
- res = ds1343_update_alarm(dev);
-
- mutex_unlock(&priv->mutex);
+ res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_A0IE, DS1343_A0IE);
return res;
}
@@ -401,32 +299,21 @@ static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int ds1343_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
- int res = 0;
if (priv->irq <= 0)
return -EINVAL;
- mutex_lock(&priv->mutex);
-
- if (enabled)
- priv->irqen |= RTC_AF;
- else
- priv->irqen &= ~RTC_AF;
-
- res = ds1343_update_alarm(dev);
-
- mutex_unlock(&priv->mutex);
-
- return res;
+ return regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_A0IE, enabled ? DS1343_A0IE : 0);
}
static irqreturn_t ds1343_thread(int irq, void *dev_id)
{
struct ds1343_priv *priv = dev_id;
- unsigned int stat, control;
+ unsigned int stat;
int res = 0;
- mutex_lock(&priv->mutex);
+ rtc_lock(priv->rtc);
res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
if (res)
@@ -436,23 +323,18 @@ static irqreturn_t ds1343_thread(int irq, void *dev_id)
stat &= ~DS1343_IRQF0;
regmap_write(priv->map, DS1343_STATUS_REG, stat);
- res = regmap_read(priv->map, DS1343_CONTROL_REG, &control);
- if (res)
- goto out;
-
- control &= ~DS1343_A0IE;
- regmap_write(priv->map, DS1343_CONTROL_REG, control);
-
rtc_update_irq(priv->rtc, 1, RTC_AF | RTC_IRQF);
+
+ regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_A0IE, 0);
}
out:
- mutex_unlock(&priv->mutex);
+ rtc_unlock(priv->rtc);
return IRQ_HANDLED;
}
static const struct rtc_class_ops ds1343_rtc_ops = {
- .ioctl = ds1343_ioctl,
.read_time = ds1343_read_time,
.set_time = ds1343_set_time,
.read_alarm = ds1343_read_alarm,
@@ -481,7 +363,6 @@ static int ds1343_probe(struct spi_device *spi)
return -ENOMEM;
priv->spi = spi;
- mutex_init(&priv->mutex);
/* RTC DS1347 works in spi mode 3 and
* its chip select is active high
@@ -520,6 +401,13 @@ static int ds1343_probe(struct spi_device *spi)
priv->rtc->nvram_old_abi = true;
priv->rtc->ops = &ds1343_rtc_ops;
+ priv->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ priv->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ res = rtc_add_group(priv->rtc, &ds1343_attr_group);
+ if (res)
+ dev_err(&spi->dev,
+ "unable to create sysfs entries for rtc ds1343\n");
res = rtc_register_device(priv->rtc);
if (res)
@@ -544,31 +432,12 @@ static int ds1343_probe(struct spi_device *spi)
}
}
- res = ds1343_sysfs_register(&spi->dev);
- if (res)
- dev_err(&spi->dev,
- "unable to create sysfs entries for rtc ds1343\n");
-
return 0;
}
static int ds1343_remove(struct spi_device *spi)
{
- struct ds1343_priv *priv = spi_get_drvdata(spi);
-
- if (spi->irq) {
- mutex_lock(&priv->mutex);
- priv->irqen &= ~RTC_AF;
- mutex_unlock(&priv->mutex);
-
- dev_pm_clear_wake_irq(&spi->dev);
- device_init_wakeup(&spi->dev, false);
- devm_free_irq(&spi->dev, spi->irq, priv);
- }
-
- spi_set_drvdata(spi, NULL);
-
- ds1343_sysfs_unregister(&spi->dev);
+ dev_pm_clear_wake_irq(&spi->dev);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index d392a7bfdd1c..7025cf3fb9f8 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -26,9 +26,15 @@
#define DS1347_DAY_REG 0x0B
#define DS1347_YEAR_REG 0x0D
#define DS1347_CONTROL_REG 0x0F
+#define DS1347_CENTURY_REG 0x13
#define DS1347_STATUS_REG 0x17
#define DS1347_CLOCK_BURST 0x3F
+#define DS1347_WP_BIT BIT(7)
+
+#define DS1347_NEOSC_BIT BIT(7)
+#define DS1347_OSF_BIT BIT(2)
+
static const struct regmap_range ds1347_ranges[] = {
{
.range_min = DS1347_SECONDS_REG,
@@ -43,35 +49,54 @@ static const struct regmap_access_table ds1347_access_table = {
static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
{
- struct spi_device *spi = to_spi_device(dev);
- struct regmap *map;
- int err;
+ struct regmap *map = dev_get_drvdata(dev);
+ unsigned int status, century, secs;
unsigned char buf[8];
+ int err;
- map = spi_get_drvdata(spi);
-
- err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
+ err = regmap_read(map, DS1347_STATUS_REG, &status);
if (err)
return err;
+ if (status & DS1347_OSF_BIT)
+ return -EINVAL;
+
+ do {
+ err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
+ if (err)
+ return err;
+
+ err = regmap_read(map, DS1347_CENTURY_REG, &century);
+ if (err)
+ return err;
+
+ err = regmap_read(map, DS1347_SECONDS_REG, &secs);
+ if (err)
+ return err;
+ } while (buf[0] != secs);
+
dt->tm_sec = bcd2bin(buf[0]);
- dt->tm_min = bcd2bin(buf[1]);
+ dt->tm_min = bcd2bin(buf[1] & 0x7f);
dt->tm_hour = bcd2bin(buf[2] & 0x3F);
dt->tm_mday = bcd2bin(buf[3]);
dt->tm_mon = bcd2bin(buf[4]) - 1;
dt->tm_wday = bcd2bin(buf[5]) - 1;
- dt->tm_year = bcd2bin(buf[6]) + 100;
+ dt->tm_year = (bcd2bin(century) * 100) + bcd2bin(buf[6]) - 1900;
return 0;
}
static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
{
- struct spi_device *spi = to_spi_device(dev);
- struct regmap *map;
+ struct regmap *map = dev_get_drvdata(dev);
+ unsigned int century;
unsigned char buf[8];
+ int err;
- map = spi_get_drvdata(spi);
+ err = regmap_update_bits(map, DS1347_STATUS_REG,
+ DS1347_NEOSC_BIT, DS1347_NEOSC_BIT);
+ if (err)
+ return err;
buf[0] = bin2bcd(dt->tm_sec);
buf[1] = bin2bcd(dt->tm_min);
@@ -79,16 +104,20 @@ static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
buf[3] = bin2bcd(dt->tm_mday);
buf[4] = bin2bcd(dt->tm_mon + 1);
buf[5] = bin2bcd(dt->tm_wday + 1);
+ buf[6] = bin2bcd(dt->tm_year % 100);
+ buf[7] = bin2bcd(0x00);
- /* year in linux is from 1900 i.e in range of 100
- in rtc it is from 00 to 99 */
- dt->tm_year = dt->tm_year % 100;
+ err = regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
+ if (err)
+ return err;
- buf[6] = bin2bcd(dt->tm_year);
- buf[7] = bin2bcd(0x00);
+ century = (dt->tm_year / 100) + 19;
+ err = regmap_write(map, DS1347_CENTURY_REG, century);
+ if (err)
+ return err;
- /* write the rtc settings */
- return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
+ return regmap_update_bits(map, DS1347_STATUS_REG,
+ DS1347_NEOSC_BIT | DS1347_OSF_BIT, 0);
}
static const struct rtc_class_ops ds1347_rtc_ops = {
@@ -101,8 +130,7 @@ static int ds1347_probe(struct spi_device *spi)
struct rtc_device *rtc;
struct regmap_config config;
struct regmap *map;
- unsigned int data;
- int res;
+ int err;
memset(&config, 0, sizeof(config));
config.reg_bits = 8;
@@ -125,36 +153,20 @@ static int ds1347_probe(struct spi_device *spi)
spi_set_drvdata(spi, map);
- /* RTC Settings */
- res = regmap_read(map, DS1347_SECONDS_REG, &data);
- if (res)
- return res;
-
/* Disable the write protect of rtc */
- regmap_read(map, DS1347_CONTROL_REG, &data);
- data = data & ~(1<<7);
- regmap_write(map, DS1347_CONTROL_REG, data);
-
- /* Enable the oscillator , disable the oscillator stop flag,
- and glitch filter to reduce current consumption */
- regmap_read(map, DS1347_STATUS_REG, &data);
- data = data & 0x1B;
- regmap_write(map, DS1347_STATUS_REG, data);
-
- /* display the settings */
- regmap_read(map, DS1347_CONTROL_REG, &data);
- dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data);
-
- regmap_read(map, DS1347_STATUS_REG, &data);
- dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data);
-
- rtc = devm_rtc_device_register(&spi->dev, "ds1347",
- &ds1347_rtc_ops, THIS_MODULE);
+ err = regmap_update_bits(map, DS1347_CONTROL_REG, DS1347_WP_BIT, 0);
+ if (err)
+ return err;
+ rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- return 0;
+ rtc->ops = &ds1347_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ rtc->range_max = RTC_TIMESTAMP_END_9999;
+
+ return rtc_register_device(rtc);
}
static struct spi_driver ds1347_driver = {
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 367497914c10..6e9ddcd03992 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -439,14 +439,13 @@ static void ds1374_wdt_ping(void)
static void ds1374_wdt_disable(void)
{
- int ret = -ENOIOCTLCMD;
int cr;
cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR);
/* Disable watchdog timer */
cr &= ~DS1374_REG_CR_WACE;
- ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
+ i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
}
/*
@@ -586,6 +585,7 @@ static const struct file_operations ds1374_wdt_fops = {
.owner = THIS_MODULE,
.read = ds1374_wdt_read,
.unlocked_ioctl = ds1374_wdt_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.write = ds1374_wdt_write,
.open = ds1374_wdt_open,
.release = ds1374_wdt_release,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index b6a477519280..a63872c4c76d 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -414,7 +414,6 @@ static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
static int ds1511_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct rtc_plat_data *pdata;
int ret = 0;
struct nvmem_config ds1511_nvmem_cfg = {
@@ -431,8 +430,7 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ds1511_base = devm_ioremap_resource(&pdev->dev, res);
+ ds1511_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ds1511_base))
return PTR_ERR(ds1511_base);
pdata->ioaddr = ds1511_base;
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 219d6b520a69..cdf5e05b9489 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -249,7 +249,6 @@ static int ds1553_nvram_write(void *priv, unsigned int pos, void *val,
static int ds1553_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
unsigned int cen, sec;
struct rtc_plat_data *pdata;
void __iomem *ioaddr;
@@ -268,8 +267,7 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 184e4a3e2bef..56c670af2e50 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -31,7 +31,10 @@
/* ----------------------------------------------------------------------- */
-/* Standard read/write functions if platform does not provide overrides */
+/*
+ * Standard read/write
+ * all registers are mapped in CPU address space
+ */
/**
* ds1685_read - read a value from an rtc register.
@@ -59,6 +62,35 @@ ds1685_write(struct ds1685_priv *rtc, int reg, u8 value)
}
/* ----------------------------------------------------------------------- */
+/*
+ * Indirect read/write functions
+ * access happens via address and data register mapped in CPU address space
+ */
+
+/**
+ * ds1685_indirect_read - read a value from an rtc register.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @reg: the register address to read.
+ */
+static u8
+ds1685_indirect_read(struct ds1685_priv *rtc, int reg)
+{
+ writeb(reg, rtc->regs);
+ return readb(rtc->data);
+}
+
+/**
+ * ds1685_indirect_write - write a value to an rtc register.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @reg: the register address to write.
+ * @value: value to write to the register.
+ */
+static void
+ds1685_indirect_write(struct ds1685_priv *rtc, int reg, u8 value)
+{
+ writeb(reg, rtc->regs);
+ writeb(value, rtc->data);
+}
/* ----------------------------------------------------------------------- */
/* Inlined functions */
@@ -229,7 +261,7 @@ static int
ds1685_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
- u8 ctrlb, century;
+ u8 century;
u8 seconds, minutes, hours, wday, mday, month, years;
/* Fetch the time info from the RTC registers. */
@@ -242,7 +274,6 @@ ds1685_rtc_read_time(struct device *dev, struct rtc_time *tm)
month = rtc->read(rtc, RTC_MONTH);
years = rtc->read(rtc, RTC_YEAR);
century = rtc->read(rtc, RTC_CENTURY);
- ctrlb = rtc->read(rtc, RTC_CTRL_B);
ds1685_rtc_end_data_access(rtc);
/* bcd2bin if needed, perform fixups, and store to rtc_time. */
@@ -723,7 +754,7 @@ static int
ds1685_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
- u8 ctrla, ctrlb, ctrlc, ctrld, ctrl4a, ctrl4b, ssn[8];
+ u8 ctrla, ctrlb, ctrld, ctrl4a, ctrl4b, ssn[8];
char *model;
/* Read all the relevant data from the control registers. */
@@ -731,7 +762,6 @@ ds1685_rtc_proc(struct device *dev, struct seq_file *seq)
ds1685_rtc_get_ssn(rtc, ssn);
ctrla = rtc->read(rtc, RTC_CTRL_A);
ctrlb = rtc->read(rtc, RTC_CTRL_B);
- ctrlc = rtc->read(rtc, RTC_CTRL_C);
ctrld = rtc->read(rtc, RTC_CTRL_D);
ctrl4a = rtc->read(rtc, RTC_EXT_CTRL_4A);
ctrl4b = rtc->read(rtc, RTC_EXT_CTRL_4B);
@@ -1009,7 +1039,7 @@ ds1685_rtc_sysfs_serial_show(struct device *dev,
}
static DEVICE_ATTR(serial, S_IRUGO, ds1685_rtc_sysfs_serial_show, NULL);
-/**
+/*
* struct ds1685_rtc_sysfs_misc_attrs - list for misc RTC features.
*/
static struct attribute*
@@ -1020,7 +1050,7 @@ ds1685_rtc_sysfs_misc_attrs[] = {
NULL,
};
-/**
+/*
* struct ds1685_rtc_sysfs_misc_grp - attr group for misc RTC features.
*/
static const struct attribute_group
@@ -1040,7 +1070,6 @@ static int
ds1685_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc_dev;
- struct resource *res;
struct ds1685_priv *rtc;
struct ds1685_rtc_platform_data *pdata;
u8 ctrla, ctrlb, hours;
@@ -1063,35 +1092,29 @@ ds1685_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- /*
- * Allocate/setup any IORESOURCE_MEM resources, if required. Not all
- * platforms put the RTC in an easy-access place. Like the SGI Octane,
- * which attaches the RTC to a "ByteBus", hooked to a SuperIO chip
- * that sits behind the IOC3 PCI metadevice.
- */
- if (pdata->alloc_io_resources) {
- /* Get the platform resources. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
- rtc->size = resource_size(res);
-
- /* Request a memory region. */
- /* XXX: mmio-only for now. */
- if (!devm_request_mem_region(&pdev->dev, res->start, rtc->size,
- pdev->name))
- return -EBUSY;
-
- /*
- * Set the base address for the rtc, and ioremap its
- * registers.
- */
- rtc->baseaddr = res->start;
- rtc->regs = devm_ioremap(&pdev->dev, res->start, rtc->size);
- if (!rtc->regs)
- return -ENOMEM;
+ /* Setup resources and access functions */
+ switch (pdata->access_type) {
+ case ds1685_reg_direct:
+ rtc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc->regs))
+ return PTR_ERR(rtc->regs);
+ rtc->read = ds1685_read;
+ rtc->write = ds1685_write;
+ break;
+ case ds1685_reg_indirect:
+ rtc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc->regs))
+ return PTR_ERR(rtc->regs);
+ rtc->data = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(rtc->data))
+ return PTR_ERR(rtc->data);
+ rtc->read = ds1685_indirect_read;
+ rtc->write = ds1685_indirect_write;
+ break;
}
- rtc->alloc_io_resources = pdata->alloc_io_resources;
+
+ if (!rtc->read || !rtc->write)
+ return -ENXIO;
/* Get the register step size. */
if (pdata->regstep > 0)
@@ -1099,24 +1122,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
else
rtc->regstep = 1;
- /* Platform read function, else default if mmio setup */
- if (pdata->plat_read)
- rtc->read = pdata->plat_read;
- else
- if (pdata->alloc_io_resources)
- rtc->read = ds1685_read;
- else
- return -ENXIO;
-
- /* Platform write function, else default if mmio setup */
- if (pdata->plat_write)
- rtc->write = pdata->plat_write;
- else
- if (pdata->alloc_io_resources)
- rtc->write = ds1685_write;
- else
- return -ENXIO;
-
/* Platform pre-shutdown function, if defined. */
if (pdata->plat_prepare_poweroff)
rtc->prepare_poweroff = pdata->plat_prepare_poweroff;
@@ -1271,7 +1276,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
/* See if the platform doesn't support UIE. */
if (pdata->uie_unsupported)
rtc_dev->uie_unsupported = 1;
- rtc->uie_unsupported = pdata->uie_unsupported;
rtc->dev = rtc_dev;
@@ -1351,7 +1355,7 @@ ds1685_rtc_remove(struct platform_device *pdev)
return 0;
}
-/**
+/*
* ds1685_rtc_driver - rtc driver properties.
*/
static struct platform_driver ds1685_rtc_driver = {
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index 77cca1392253..9f176bce48ba 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -71,7 +71,7 @@ static int em3027_get_time(struct device *dev, struct rtc_time *tm)
tm->tm_hour = bcd2bin(buf[2]);
tm->tm_mday = bcd2bin(buf[3]);
tm->tm_wday = bcd2bin(buf[4]);
- tm->tm_mon = bcd2bin(buf[5]);
+ tm->tm_mon = bcd2bin(buf[5]) - 1;
tm->tm_year = bcd2bin(buf[6]) + 100;
return 0;
@@ -94,7 +94,7 @@ static int em3027_set_time(struct device *dev, struct rtc_time *tm)
buf[3] = bin2bcd(tm->tm_hour);
buf[4] = bin2bcd(tm->tm_mday);
buf[5] = bin2bcd(tm->tm_wday);
- buf[6] = bin2bcd(tm->tm_mon);
+ buf[6] = bin2bcd(tm->tm_mon + 1);
buf[7] = bin2bcd(tm->tm_year % 100);
/* write time/date registers */
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 1766496385fe..8ec9ea1ca72e 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -122,15 +122,13 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
static int ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
- struct resource *res;
int err;
ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
if (!ep93xx_rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ep93xx_rtc->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ ep93xx_rtc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ep93xx_rtc->mmio_base))
return PTR_ERR(ep93xx_rtc->mmio_base);
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 8df2075af9a2..9e6e994cce99 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -180,10 +180,7 @@ static int ftm_rtc_alarm_irq_enable(struct device *dev,
*/
static int ftm_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct timespec64 ts64;
-
- ktime_get_real_ts64(&ts64);
- rtc_time_to_tm(ts64.tv_sec, tm);
+ rtc_time64_to_tm(ktime_get_real_seconds(), tm);
return 0;
}
@@ -206,16 +203,14 @@ static int ftm_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
*/
static int ftm_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
- struct rtc_time tm;
- unsigned long now, alm_time, cycle;
+ time64_t alm_time;
+ unsigned long long cycle;
struct ftm_rtc *rtc = dev_get_drvdata(dev);
- ftm_rtc_read_time(dev, &tm);
- rtc_tm_to_time(&tm, &now);
- rtc_tm_to_time(&alm->time, &alm_time);
+ alm_time = rtc_tm_to_time64(&alm->time);
ftm_clean_alarm(rtc);
- cycle = (alm_time - now) * rtc->alarm_freq;
+ cycle = (alm_time - ktime_get_real_seconds()) * rtc->alarm_freq;
if (cycle > MAX_COUNT_VAL) {
pr_err("Out of alarm range {0~262} seconds.\n");
return -ERANGE;
@@ -248,7 +243,6 @@ static const struct rtc_class_ops ftm_rtc_ops = {
static int ftm_rtc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *r;
int irq;
int ret;
struct ftm_rtc *rtc;
@@ -265,13 +259,7 @@ static int ftm_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "cannot get resource for rtc\n");
- return -ENODEV;
- }
-
- rtc->base = devm_ioremap_resource(&pdev->dev, r);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base)) {
dev_err(&pdev->dev, "cannot ioremap resource for rtc\n");
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index 1a3420ee6a4d..cb6b0ad7ec3f 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -165,7 +165,6 @@ static const struct rtc_class_ops goldfish_rtc_ops = {
static int goldfish_rtc_probe(struct platform_device *pdev)
{
struct goldfish_rtc *rtcdrv;
- struct resource *r;
int err;
rtcdrv = devm_kzalloc(&pdev->dev, sizeof(*rtcdrv), GFP_KERNEL);
@@ -173,12 +172,7 @@ static int goldfish_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, rtcdrv);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- rtcdrv->base = devm_ioremap_resource(&pdev->dev, r);
+ rtcdrv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtcdrv->base))
return -ENODEV;
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 3089645e0ce8..18023e472cbc 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -307,7 +307,6 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_rtc *rtc;
- struct resource *mem;
const struct platform_device_id *id = platform_get_device_id(pdev);
const struct of_device_id *of_id = of_match_device(
jz4740_rtc_of_match, &pdev->dev);
@@ -326,8 +325,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (rtc->irq < 0)
return -ENOENT;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, mem);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-lpc24xx.c b/drivers/rtc/rtc-lpc24xx.c
index a8bb15606ec8..00ef16ba9480 100644
--- a/drivers/rtc/rtc-lpc24xx.c
+++ b/drivers/rtc/rtc-lpc24xx.c
@@ -194,15 +194,13 @@ static const struct rtc_class_ops lpc24xx_rtc_ops = {
static int lpc24xx_rtc_probe(struct platform_device *pdev)
{
struct lpc24xx_rtc *rtc;
- struct resource *res;
int irq, ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->rtc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtc_base))
return PTR_ERR(rtc->rtc_base);
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index ac393230e592..15d8abda81fe 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -185,7 +185,6 @@ static const struct rtc_class_ops lpc32xx_rtc_ops = {
static int lpc32xx_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct lpc32xx_rtc *rtc;
int err;
u32 tmp;
@@ -194,8 +193,7 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
if (unlikely(!rtc))
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->rtc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtc_base))
return PTR_ERR(rtc->rtc_base);
@@ -266,16 +264,6 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int lpc32xx_rtc_remove(struct platform_device *pdev)
-{
- struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
-
- if (rtc->irq >= 0)
- device_init_wakeup(&pdev->dev, 0);
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int lpc32xx_rtc_suspend(struct device *dev)
{
@@ -357,7 +345,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
static struct platform_driver lpc32xx_rtc_driver = {
.probe = lpc32xx_rtc_probe,
- .remove = lpc32xx_rtc_remove,
.driver = {
.name = "rtc-lpc32xx",
.pm = LPC32XX_RTC_PM_OPS,
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 5f46f85f814b..9b70b371bd0c 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -235,9 +235,6 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned char buf[8];
int err, flags;
- if (tm->tm_year < 100 || tm->tm_year > 199)
- return -EINVAL;
-
buf[M41T80_REG_SSEC] = 0;
buf[M41T80_REG_SEC] = bin2bcd(tm->tm_sec);
buf[M41T80_REG_MIN] = bin2bcd(tm->tm_min);
@@ -705,7 +702,6 @@ static ssize_t wdt_read(struct file *file, char __user *buf,
/**
* wdt_ioctl:
- * @inode: inode of the device
* @file: file handle to the device
* @cmd: watchdog command
* @arg: argument pointer
@@ -840,6 +836,7 @@ static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.read = wdt_read,
.unlocked_ioctl = wdt_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.write = wdt_write,
.open = wdt_open,
.release = wdt_release,
@@ -925,6 +922,8 @@ static int m41t80_probe(struct i2c_client *client,
}
m41t80_data->rtc->ops = &m41t80_rtc_ops;
+ m41t80_data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ m41t80_data->rtc->range_max = RTC_TIMESTAMP_END_2099;
if (client->irq <= 0) {
/* We cannot support UIE mode if we do not have an IRQ line */
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 59b54ed9b841..75a0e73071d8 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -218,7 +218,6 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
- struct resource *res;
unsigned char reg;
int err;
struct nvmem_config m48t86_nvmem_cfg = {
@@ -235,17 +234,11 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- info->index_reg = devm_ioremap_resource(&pdev->dev, res);
+ info->index_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->index_reg))
return PTR_ERR(info->index_reg);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return -ENODEV;
- info->data_reg = devm_ioremap_resource(&pdev->dev, res);
+ info->data_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(info->data_reg))
return PTR_ERR(info->data_reg);
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 2ecd8752b088..df2829dd55ad 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -172,7 +172,20 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+#ifdef CONFIG_X86
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 0x17) ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)),
+ RTC_FREQ_SELECT);
+ save_freq_select &= ~RTC_DIV_RESET2;
+ } else
+ CMOS_WRITE((save_freq_select | RTC_DIV_RESET2),
+ RTC_FREQ_SELECT);
+#else
+ CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
+#endif
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index e08b981dfc21..47ebcf834cc2 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -131,7 +131,7 @@ static u32 meson_rtc_get_data(struct meson_rtc *rtc)
static int meson_rtc_get_bus(struct meson_rtc *rtc)
{
- int ret, retries = 3;
+ int ret, retries;
u32 val;
/* prepare bus for transfers, set all lines low */
@@ -292,7 +292,6 @@ static int meson_rtc_probe(struct platform_device *pdev)
};
struct device *dev = &pdev->dev;
struct meson_rtc *rtc;
- struct resource *res;
void __iomem *base;
int ret;
u32 tm;
@@ -312,8 +311,7 @@ static int meson_rtc_probe(struct platform_device *pdev)
rtc->rtc->ops = &meson_rtc_ops;
rtc->rtc->range_max = U32_MAX;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index 1c2d3c4a4963..80e364baac53 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -88,28 +88,16 @@ static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
__raw_writel(val, &priv->regs[reg]);
}
-static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
- unsigned int reg)
-{
- msm6242_write(priv, msm6242_read(priv, reg) | val, reg);
-}
-
-static inline void msm6242_clear(struct msm6242_priv *priv, unsigned int val,
- unsigned int reg)
-{
- msm6242_write(priv, msm6242_read(priv, reg) & ~val, reg);
-}
-
static void msm6242_lock(struct msm6242_priv *priv)
{
int cnt = 5;
- msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_HOLD|MSM6242_CD_IRQ_FLAG, MSM6242_CD);
while ((msm6242_read(priv, MSM6242_CD) & MSM6242_CD_BUSY) && cnt) {
- msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_IRQ_FLAG, MSM6242_CD);
udelay(70);
- msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_HOLD|MSM6242_CD_IRQ_FLAG, MSM6242_CD);
cnt--;
}
@@ -120,7 +108,7 @@ static void msm6242_lock(struct msm6242_priv *priv)
static void msm6242_unlock(struct msm6242_priv *priv)
{
- msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_IRQ_FLAG, MSM6242_CD);
}
static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
@@ -133,7 +121,8 @@ static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
msm6242_read(priv, MSM6242_SECOND1);
tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 +
msm6242_read(priv, MSM6242_MINUTE1);
- tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10 & 3)) * 10 +
+ tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10) &
+ MSM6242_HOUR10_HR_MASK) * 10 +
msm6242_read(priv, MSM6242_HOUR1);
tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 +
msm6242_read(priv, MSM6242_DAY1);
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 704229eb0cac..5249fc99fd5f 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -4,69 +4,19 @@
* Author: Tianping.Fang <tianping.fang@mediatek.com>
*/
-#include <linux/delay.h>
-#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
-#include <linux/irqdomain.h>
-#include <linux/platform_device.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/mfd/mt6397/core.h>
-
-#define RTC_BBPU 0x0000
-#define RTC_BBPU_CBUSY BIT(6)
-
-#define RTC_WRTGR 0x003c
-
-#define RTC_IRQ_STA 0x0002
-#define RTC_IRQ_STA_AL BIT(0)
-#define RTC_IRQ_STA_LP BIT(3)
-
-#define RTC_IRQ_EN 0x0004
-#define RTC_IRQ_EN_AL BIT(0)
-#define RTC_IRQ_EN_ONESHOT BIT(2)
-#define RTC_IRQ_EN_LP BIT(3)
-#define RTC_IRQ_EN_ONESHOT_AL (RTC_IRQ_EN_ONESHOT | RTC_IRQ_EN_AL)
-
-#define RTC_AL_MASK 0x0008
-#define RTC_AL_MASK_DOW BIT(4)
-
-#define RTC_TC_SEC 0x000a
-/* Min, Hour, Dom... register offset to RTC_TC_SEC */
-#define RTC_OFFSET_SEC 0
-#define RTC_OFFSET_MIN 1
-#define RTC_OFFSET_HOUR 2
-#define RTC_OFFSET_DOM 3
-#define RTC_OFFSET_DOW 4
-#define RTC_OFFSET_MTH 5
-#define RTC_OFFSET_YEAR 6
-#define RTC_OFFSET_COUNT 7
-
-#define RTC_AL_SEC 0x0018
-
-#define RTC_PDN2 0x002e
-#define RTC_PDN2_PWRON_ALARM BIT(4)
-
-#define RTC_MIN_YEAR 1968
-#define RTC_BASE_YEAR 1900
-#define RTC_NUM_YEARS 128
-#define RTC_MIN_YEAR_OFFSET (RTC_MIN_YEAR - RTC_BASE_YEAR)
-
-struct mt6397_rtc {
- struct device *dev;
- struct rtc_device *rtc_dev;
- struct mutex lock;
- struct regmap *regmap;
- int irq;
- u32 addr_base;
-};
+#include <linux/mfd/mt6397/rtc.h>
+#include <linux/mod_devicetable.h>
static int mtk_rtc_write_trigger(struct mt6397_rtc *rtc)
{
- unsigned long timeout = jiffies + HZ;
int ret;
u32 data;
@@ -74,19 +24,13 @@ static int mtk_rtc_write_trigger(struct mt6397_rtc *rtc)
if (ret < 0)
return ret;
- while (1) {
- ret = regmap_read(rtc->regmap, rtc->addr_base + RTC_BBPU,
- &data);
- if (ret < 0)
- break;
- if (!(data & RTC_BBPU_CBUSY))
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- break;
- }
- cpu_relax();
- }
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->addr_base + RTC_BBPU, data,
+ !(data & RTC_BBPU_CBUSY),
+ MTK_RTC_POLL_DELAY_US,
+ MTK_RTC_POLL_TIMEOUT);
+ if (ret < 0)
+ dev_err(rtc->dev, "failed to write WRTGE: %d\n", ret);
return ret;
}
@@ -319,19 +263,19 @@ static int mtk_rtc_probe(struct platform_device *pdev)
return rtc->irq;
rtc->regmap = mt6397_chip->regmap;
- rtc->dev = &pdev->dev;
mutex_init(&rtc->lock);
platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev = devm_rtc_allocate_device(rtc->dev);
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
- ret = request_threaded_irq(rtc->irq, NULL,
- mtk_rtc_irq_handler_thread,
- IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
- "mt6397-rtc", rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ mtk_rtc_irq_handler_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ "mt6397-rtc", rtc);
+
if (ret) {
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
rtc->irq, ret);
@@ -353,15 +297,6 @@ out_free_irq:
return ret;
}
-static int mtk_rtc_remove(struct platform_device *pdev)
-{
- struct mt6397_rtc *rtc = platform_get_drvdata(pdev);
-
- free_irq(rtc->irq, rtc);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int mt6397_rtc_suspend(struct device *dev)
{
@@ -388,6 +323,7 @@ static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_rtc_suspend,
mt6397_rtc_resume);
static const struct of_device_id mt6397_rtc_of_match[] = {
+ { .compatible = "mediatek,mt6323-rtc", },
{ .compatible = "mediatek,mt6397-rtc", },
{ }
};
@@ -400,7 +336,6 @@ static struct platform_driver mtk_rtc_driver = {
.pm = &mt6397_pm_ops,
},
.probe = mtk_rtc_probe,
- .remove = mtk_rtc_remove,
};
module_platform_driver(mtk_rtc_driver);
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
index 16bd26b5aa6f..f1e356394814 100644
--- a/drivers/rtc/rtc-mt7622.c
+++ b/drivers/rtc/rtc-mt7622.c
@@ -303,7 +303,6 @@ MODULE_DEVICE_TABLE(of, mtk_rtc_match);
static int mtk_rtc_probe(struct platform_device *pdev)
{
struct mtk_rtc *hw;
- struct resource *res;
int ret;
hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
@@ -312,8 +311,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->base = devm_ioremap_resource(&pdev->dev, res);
+ hw->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hw->base))
return PTR_ERR(hw->base);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index ab9db57a6834..d5f190e578e4 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -212,7 +212,6 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = {
static int __init mv_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct rtc_plat_data *pdata;
u32 rtc_time;
int ret = 0;
@@ -221,8 +220,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index a2941c875a06..988a4dfcfaf8 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -727,7 +727,6 @@ static struct nvmem_config omap_rtc_nvmem_config = {
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
- struct resource *res;
u8 reg, mask, new_ctrl;
const struct platform_device_id *id_entry;
const struct of_device_id *of_id;
@@ -764,8 +763,7 @@ static int omap_rtc_probe(struct platform_device *pdev)
if (!IS_ERR(rtc->clk))
clk_prepare_enable(rtc->clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base)) {
clk_disable_unprepare(rtc->clk);
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 02b069caffd5..ba5baaca47be 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -417,6 +417,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
const char *name, bool has_nvmem)
{
struct pcf2127 *pcf2127;
+ u32 wdd_timeout;
int ret = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -459,7 +460,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
/*
* Watchdog timer enabled and reset pin /RST activated when timed out.
* Select 1Hz clock source for watchdog timer.
- * Timer is not started until WD_VAL is loaded with a valid value.
* Note: Countdown timer disabled and not available.
*/
ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_WD_CTL,
@@ -475,6 +475,14 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return ret;
}
+ /* Test if watchdog timer is started by bootloader */
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
+ if (ret)
+ return ret;
+
+ if (wdd_timeout)
+ set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
+
#ifdef CONFIG_WATCHDOG
ret = devm_watchdog_register_device(dev, &pcf2127->wdd);
if (ret)
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 2f435e533b10..b24c908f5f06 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -35,10 +35,6 @@
#define REG_OFFSET 0x0e
#define REG_OFFSET_MODE BIT(7)
-struct pcf8523 {
- struct rtc_device *rtc;
-};
-
static int pcf8523_read(struct i2c_client *client, u8 reg, u8 *valuep)
{
struct i2c_msg msgs[2];
@@ -345,16 +341,12 @@ static const struct rtc_class_ops pcf8523_rtc_ops = {
static int pcf8523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pcf8523 *pcf;
+ struct rtc_device *rtc;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
- if (!pcf)
- return -ENOMEM;
-
err = pcf8523_load_capacitance(client);
if (err < 0)
dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
@@ -364,12 +356,10 @@ static int pcf8523_probe(struct i2c_client *client,
if (err < 0)
return err;
- pcf->rtc = devm_rtc_device_register(&client->dev, DRIVER_NAME,
+ rtc = devm_rtc_device_register(&client->dev, DRIVER_NAME,
&pcf8523_rtc_ops, THIS_MODULE);
- if (IS_ERR(pcf->rtc))
- return PTR_ERR(pcf->rtc);
-
- i2c_set_clientdata(client, pcf);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 24baa4767b11..3c322f3079b0 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -390,7 +390,7 @@ static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
#define clkout_hw_to_pcf8563(_hw) container_of(_hw, struct pcf8563, clkout_hw)
-static int clkout_rates[] = {
+static const int clkout_rates[] = {
32768,
1024,
32,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 17653ed52ebb..2b6946744654 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -298,7 +298,6 @@ static int pic32_rtc_remove(struct platform_device *pdev)
static int pic32_rtc_probe(struct platform_device *pdev)
{
struct pic32_rtc_dev *pdata;
- struct resource *res;
int ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
@@ -311,8 +310,7 @@ static int pic32_rtc_probe(struct platform_device *pdev)
if (pdata->alarm_irq < 0)
return pdata->alarm_irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->reg_base))
return PTR_ERR(pdata->reg_base);
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index f5a30e0f16c2..07ea1be3abb9 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -49,7 +49,7 @@ struct pm8xxx_rtc_regs {
* @regmap: regmap used to access RTC registers
* @allow_set_time: indicates whether writing to the RTC is allowed
* @rtc_alarm_irq: rtc alarm irq number.
- * @ctrl_reg: rtc control register.
+ * @regs: rtc registers description.
* @rtc_dev: device structure.
* @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
*/
diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c
index 2498278853af..aaf1b95e3990 100644
--- a/drivers/rtc/rtc-r7301.c
+++ b/drivers/rtc/rtc-r7301.c
@@ -354,21 +354,16 @@ static void rtc7301_init(struct rtc7301_priv *priv)
static int __init rtc7301_rtc_probe(struct platform_device *dev)
{
- struct resource *res;
void __iomem *regs;
struct rtc7301_priv *priv;
struct rtc_device *rtc;
int ret;
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- regs = devm_ioremap_resource(&dev->dev, res);
+ regs = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/rtc/rtc-rtd119x.c b/drivers/rtc/rtc-rtd119x.c
index b233559d950b..bb98f2d574a5 100644
--- a/drivers/rtc/rtc-rtd119x.c
+++ b/drivers/rtc/rtc-rtd119x.c
@@ -167,7 +167,6 @@ static const struct of_device_id rtd119x_rtc_dt_ids[] = {
static int rtd119x_rtc_probe(struct platform_device *pdev)
{
struct rtd119x_rtc *data;
- struct resource *res;
u32 val;
int ret;
@@ -178,8 +177,7 @@ static int rtd119x_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
data->base_year = 2014;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 2b316661a578..6b7b3a69601a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/clk-provider.h>
#include <linux/bcd.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
@@ -52,6 +53,11 @@
#define RV3028_STATUS_CLKF BIT(6)
#define RV3028_STATUS_EEBUSY BIT(7)
+#define RV3028_CLKOUT_FD_MASK GENMASK(2, 0)
+#define RV3028_CLKOUT_PORIE BIT(3)
+#define RV3028_CLKOUT_CLKSY BIT(6)
+#define RV3028_CLKOUT_CLKOE BIT(7)
+
#define RV3028_CTRL1_EERD BIT(3)
#define RV3028_CTRL1_WADA BIT(5)
@@ -84,6 +90,9 @@ struct rv3028_data {
struct regmap *regmap;
struct rtc_device *rtc;
enum rv3028_type type;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
};
static u16 rv3028_trickle_resistors[] = {1000, 3000, 6000, 11000};
@@ -581,6 +590,140 @@ restore_eerd:
return ret;
}
+#ifdef CONFIG_COMMON_CLK
+#define clkout_hw_to_rv3028(hw) container_of(hw, struct rv3028_data, clkout_hw)
+
+static int clkout_rates[] = {
+ 32768,
+ 8192,
+ 1024,
+ 64,
+ 32,
+ 1,
+};
+
+static unsigned long rv3028_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ int clkout, ret;
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ ret = regmap_read(rv3028->regmap, RV3028_CLKOUT, &clkout);
+ if (ret < 0)
+ return 0;
+
+ clkout &= RV3028_CLKOUT_FD_MASK;
+ return clkout_rates[clkout];
+}
+
+static long rv3028_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int rv3028_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i, ret;
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ ret = regmap_write(rv3028->regmap, RV3028_CLKOUT, 0x0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) {
+ if (clkout_rates[i] == rate) {
+ ret = regmap_update_bits(rv3028->regmap,
+ RV3028_CLKOUT,
+ RV3028_CLKOUT_FD_MASK, i);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(rv3028->regmap, RV3028_CLKOUT,
+ RV3028_CLKOUT_CLKSY | RV3028_CLKOUT_CLKOE);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int rv3028_clkout_prepare(struct clk_hw *hw)
+{
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ return regmap_write(rv3028->regmap, RV3028_CLKOUT,
+ RV3028_CLKOUT_CLKSY | RV3028_CLKOUT_CLKOE);
+}
+
+static void rv3028_clkout_unprepare(struct clk_hw *hw)
+{
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ regmap_write(rv3028->regmap, RV3028_CLKOUT, 0x0);
+ regmap_update_bits(rv3028->regmap, RV3028_STATUS,
+ RV3028_STATUS_CLKF, 0);
+}
+
+static int rv3028_clkout_is_prepared(struct clk_hw *hw)
+{
+ int clkout, ret;
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ ret = regmap_read(rv3028->regmap, RV3028_CLKOUT, &clkout);
+ if (ret < 0)
+ return ret;
+
+ return !!(clkout & RV3028_CLKOUT_CLKOE);
+}
+
+static const struct clk_ops rv3028_clkout_ops = {
+ .prepare = rv3028_clkout_prepare,
+ .unprepare = rv3028_clkout_unprepare,
+ .is_prepared = rv3028_clkout_is_prepared,
+ .recalc_rate = rv3028_clkout_recalc_rate,
+ .round_rate = rv3028_clkout_round_rate,
+ .set_rate = rv3028_clkout_set_rate,
+};
+
+static int rv3028_clkout_register_clk(struct rv3028_data *rv3028,
+ struct i2c_client *client)
+{
+ int ret;
+ struct clk *clk;
+ struct clk_init_data init;
+ struct device_node *node = client->dev.of_node;
+
+ ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS,
+ RV3028_STATUS_CLKF, 0);
+ if (ret < 0)
+ return ret;
+
+ init.name = "rv3028-clkout";
+ init.ops = &rv3028_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ rv3028->clkout_hw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = devm_clk_register(&client->dev, &rv3028->clkout_hw);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return 0;
+}
+#endif
+
static struct rtc_class_ops rv3028_rtc_ops = {
.read_time = rv3028_get_time,
.set_time = rv3028_set_time,
@@ -708,6 +851,9 @@ static int rv3028_probe(struct i2c_client *client)
rv3028->rtc->max_user_freq = 1;
+#ifdef CONFIG_COMMON_CLK
+ rv3028_clkout_register_clk(rv3028, client);
+#endif
return 0;
}
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 71e20a6bd387..3a9eb7043f01 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the Epson RTC module RX-6110 SA
*
* Copyright(C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright(C) SEIKO EPSON CORPORATION 2013. All rights reserved.
- *
- * This driver software is distributed as is, without any warranty of any kind,
- * either express or implied as further specified in the GNU Public License.
- * This software may be used and distributed according to the terms of the GNU
- * Public License, version 2 as published by the Free Software Foundation.
- * See the file COPYING in the main directory of this archive for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bcd.h>
@@ -370,11 +362,6 @@ static int rx6110_probe(struct spi_device *spi)
return 0;
}
-static int rx6110_remove(struct spi_device *spi)
-{
- return 0;
-}
-
static const struct spi_device_id rx6110_id[] = {
{ "rx6110", 0 },
{ }
@@ -393,7 +380,6 @@ static struct spi_driver rx6110_driver = {
.of_match_table = of_match_ptr(rx6110_spi_of_match),
},
.probe = rx6110_probe,
- .remove = rx6110_remove,
.id_table = rx6110_id,
};
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index da34cfd70f95..03672a246356 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -423,8 +423,6 @@ static const struct rtc_class_ops s35390a_rtc_ops = {
.ioctl = s35390a_rtc_ioctl,
};
-static struct i2c_driver s35390a_driver;
-
static int s35390a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -456,6 +454,10 @@ static int s35390a_probe(struct i2c_client *client,
}
}
+ s35390a->rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(s35390a->rtc))
+ return PTR_ERR(s35390a->rtc);
+
err_read = s35390a_read_status(s35390a, &status1);
if (err_read < 0) {
dev_err(dev, "error resetting chip\n");
@@ -485,11 +487,9 @@ static int s35390a_probe(struct i2c_client *client,
device_set_wakeup_capable(dev, 1);
- s35390a->rtc = devm_rtc_device_register(dev, s35390a_driver.driver.name,
- &s35390a_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(s35390a->rtc))
- return PTR_ERR(s35390a->rtc);
+ s35390a->rtc->ops = &s35390a_rtc_ops;
+ s35390a->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ s35390a->rtc->range_max = RTC_TIMESTAMP_END_2099;
/* supports per-minute alarms only, therefore set uie_unsupported */
s35390a->rtc->uie_unsupported = 1;
@@ -497,7 +497,7 @@ static int s35390a_probe(struct i2c_client *client,
if (status1 & S35390A_FLAG_INT2)
rtc_update_irq(s35390a->rtc, 1, RTC_AF);
- return 0;
+ return rtc_register_device(s35390a->rtc);
}
static struct i2c_driver s35390a_driver = {
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7801249c254b..e1b50e682fc4 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -444,7 +444,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
{
struct s3c_rtc *info = NULL;
struct rtc_time rtc_tm;
- struct resource *res;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -475,8 +474,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
info->irq_tick, info->irq_alarm);
/* get the memory region */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->base = devm_ioremap_resource(&pdev->dev, res);
+ info->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->base))
return PTR_ERR(info->base);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 86fa723b3b76..d37893f6eaee 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -252,7 +252,6 @@ EXPORT_SYMBOL_GPL(sa1100_rtc_init);
static int sa1100_rtc_probe(struct platform_device *pdev)
{
struct sa1100_rtc *info;
- struct resource *iores;
void __iomem *base;
int irq_1hz, irq_alarm;
int ret;
@@ -281,8 +280,7 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
return ret;
}
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, iores);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
index b95676899750..36810dd40cd3 100644
--- a/drivers/rtc/rtc-sc27xx.c
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -661,12 +661,6 @@ static int sprd_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int sprd_rtc_remove(struct platform_device *pdev)
-{
- device_init_wakeup(&pdev->dev, 0);
- return 0;
-}
-
static const struct of_device_id sprd_rtc_of_match[] = {
{ .compatible = "sprd,sc2731-rtc", },
{ },
@@ -679,7 +673,6 @@ static struct platform_driver sprd_rtc_driver = {
.of_match_table = sprd_rtc_of_match,
},
.probe = sprd_rtc_probe,
- .remove = sprd_rtc_remove,
};
module_platform_driver(sprd_rtc_driver);
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index c759c55359a1..a2c9c55667cd 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -365,13 +365,6 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int sirfsoc_rtc_remove(struct platform_device *pdev)
-{
- device_init_wakeup(&pdev->dev, 0);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int sirfsoc_rtc_suspend(struct device *dev)
{
@@ -450,7 +443,6 @@ static struct platform_driver sirfsoc_rtc_driver = {
.of_match_table = sirfsoc_rtc_of_match,
},
.probe = sirfsoc_rtc_probe,
- .remove = sirfsoc_rtc_remove,
};
module_platform_driver(sirfsoc_rtc_driver);
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 9f23b24f466c..833daeb7b60e 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -347,7 +347,6 @@ static const struct rtc_class_ops spear_rtc_ops = {
static int spear_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct spear_rtc_config *config;
int status = 0;
int irq;
@@ -369,8 +368,7 @@ static int spear_rtc_probe(struct platform_device *pdev)
return status;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- config->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ config->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(config->ioaddr))
return PTR_ERR(config->ioaddr);
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 49474a31c66d..51041dc08af4 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -41,7 +41,6 @@
struct st_rtc {
struct rtc_device *rtc_dev;
struct rtc_wkalrm alarm;
- struct resource *res;
struct clk *clk;
unsigned long clkrate;
void __iomem *ioaddr;
@@ -186,7 +185,6 @@ static int st_rtc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct st_rtc *rtc;
- struct resource *res;
uint32_t mode;
int ret = 0;
@@ -210,8 +208,7 @@ static int st_rtc_probe(struct platform_device *pdev)
spin_lock_init(&rtc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ rtc->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->ioaddr))
return PTR_ERR(rtc->ioaddr);
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index a833ebc4ecb9..01a45044f468 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -256,7 +256,6 @@ static int stk17ta8_nvram_write(void *priv, unsigned int pos, void *val,
static int stk17ta8_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
unsigned int cal;
unsigned int flags;
struct rtc_plat_data *pdata;
@@ -275,8 +274,7 @@ static int stk17ta8_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 2999e33a7e37..781cabb2afca 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -693,15 +693,13 @@ static int stm32_rtc_probe(struct platform_device *pdev)
{
struct stm32_rtc *rtc;
const struct stm32_rtc_registers *regs;
- struct resource *res;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 5e2bd9f1d01e..8dcd20b34dde 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -136,7 +136,6 @@ struct sun6i_rtc_clk_data {
struct sun6i_rtc_dev {
struct rtc_device *rtc;
- struct device *dev;
const struct sun6i_rtc_clk_data *data;
void __iomem *base;
int irq;
@@ -669,7 +668,6 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
return -ENODEV;
platform_set_drvdata(pdev, chip);
- chip->dev = &pdev->dev;
chip->irq = platform_get_irq(pdev, 0);
if (chip->irq < 0)
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index 9b6f2483c1c6..f5d7f44550ce 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -422,7 +422,6 @@ MODULE_DEVICE_TABLE(of, sunxi_rtc_dt_ids);
static int sunxi_rtc_probe(struct platform_device *pdev)
{
struct sunxi_rtc_dev *chip;
- struct resource *res;
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
@@ -436,8 +435,7 @@ static int sunxi_rtc_probe(struct platform_device *pdev)
if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(&pdev->dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 69d695bf9500..7fbb1741692f 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -103,7 +103,7 @@ static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
unsigned long flags;
- u32 sec, msec;
+ u32 sec;
/*
* RTC hardware copies seconds to shadow seconds when a read of
@@ -111,7 +111,7 @@ static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
*/
spin_lock_irqsave(&info->lock, flags);
- msec = readl(info->base + TEGRA_RTC_REG_MILLI_SECONDS);
+ readl(info->base + TEGRA_RTC_REG_MILLI_SECONDS);
sec = readl(info->base + TEGRA_RTC_REG_SHADOW_SECONDS);
spin_unlock_irqrestore(&info->lock, flags);
@@ -277,15 +277,13 @@ MODULE_DEVICE_TABLE(of, tegra_rtc_dt_match);
static int tegra_rtc_probe(struct platform_device *pdev)
{
struct tegra_rtc_info *info;
- struct resource *res;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->base = devm_ioremap_resource(&pdev->dev, res);
+ info->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->base))
return PTR_ERR(info->base);
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 2c0467a9e717..e3840386f430 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -361,6 +361,13 @@ static const struct rtc_class_ops tps65910_rtc_ops = {
.set_offset = tps65910_set_offset,
};
+static const struct rtc_class_ops tps65910_rtc_ops_noirq = {
+ .read_time = tps65910_rtc_read_time,
+ .set_time = tps65910_rtc_set_time,
+ .read_offset = tps65910_read_offset,
+ .set_offset = tps65910_set_offset,
+};
+
static int tps65910_rtc_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = NULL;
@@ -414,14 +421,16 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
dev_name(&pdev->dev), &pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "IRQ is not free.\n");
- return ret;
- }
+ if (ret < 0)
+ irq = -1;
+
tps_rtc->irq = irq;
- device_set_wakeup_capable(&pdev->dev, 1);
+ if (irq != -1) {
+ device_set_wakeup_capable(&pdev->dev, 1);
+ tps_rtc->rtc->ops = &tps65910_rtc_ops;
+ } else
+ tps_rtc->rtc->ops = &tps65910_rtc_ops_noirq;
- tps_rtc->rtc->ops = &tps65910_rtc_ops;
tps_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
tps_rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 5a29915a06ec..715b82981279 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -236,7 +236,6 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct tx4939rtc_plat_data *pdata;
- struct resource *res;
int irq, ret;
struct nvmem_config nvmem_cfg = {
.name = "tx4939_nvram",
@@ -253,8 +252,7 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, pdata);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->rtcreg = devm_ioremap_resource(&pdev->dev, res);
+ pdata->rtcreg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->rtcreg))
return PTR_ERR(pdata->rtcreg);
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index 63ffba21397b..d2da92187d56 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -284,7 +284,6 @@ static int rtc_probe(struct platform_device *pdev)
struct v3020 *chip;
int retval = -EBUSY;
int i;
- int temp;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -302,7 +301,7 @@ static int rtc_probe(struct platform_device *pdev)
/* Make sure the v3020 expects a communication cycle
* by reading 8 times */
for (i = 0; i < 8; i++)
- temp = chip->ops->read_bit(chip);
+ chip->ops->read_bit(chip);
/* Test chip by doing a write/read sequence
* to the chip ram */
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c75230562c0d..c3671043ace7 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*/
+#include <linux/compat.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -66,6 +67,9 @@ static void __iomem *rtc2_base;
#define rtc2_read(offset) readw(rtc2_base + (offset))
#define rtc2_write(offset, value) writew((value), rtc2_base + (offset))
+/* 32-bit compat for ioctls that nobody else uses */
+#define RTC_EPOCH_READ32 _IOR('p', 0x0d, __u32)
+
static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
static DEFINE_SPINLOCK(rtc_lock);
@@ -179,6 +183,10 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
switch (cmd) {
case RTC_EPOCH_READ:
return put_user(epoch, (unsigned long __user *)arg);
+#ifdef CONFIG_64BIT
+ case RTC_EPOCH_READ32:
+ return put_user(epoch, (unsigned int __user *)arg);
+#endif
case RTC_EPOCH_SET:
/* Doesn't support before 1900 */
if (arg < 1900)
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index d5d14cf86e0d..e2588625025f 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -122,12 +122,6 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
- if (tm->tm_year < 100) {
- dev_warn(dev, "Only years 2000-2199 are supported by the "
- "hardware!\n");
- return -EINVAL;
- }
-
writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)
| (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
| (bin2bcd(tm->tm_mday))
@@ -200,7 +194,6 @@ static const struct rtc_class_ops vt8500_rtc_ops = {
static int vt8500_rtc_probe(struct platform_device *pdev)
{
struct vt8500_rtc *vt8500_rtc;
- struct resource *res;
int ret;
vt8500_rtc = devm_kzalloc(&pdev->dev,
@@ -215,8 +208,7 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
if (vt8500_rtc->irq_alarm < 0)
return vt8500_rtc->irq_alarm;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vt8500_rtc->regbase = devm_ioremap_resource(&pdev->dev, res);
+ vt8500_rtc->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vt8500_rtc->regbase))
return PTR_ERR(vt8500_rtc->regbase);
@@ -224,27 +216,23 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
writel(VT8500_RTC_CR_ENABLE,
vt8500_rtc->regbase + VT8500_RTC_CR);
- vt8500_rtc->rtc = devm_rtc_device_register(&pdev->dev, "vt8500-rtc",
- &vt8500_rtc_ops, THIS_MODULE);
- if (IS_ERR(vt8500_rtc->rtc)) {
- ret = PTR_ERR(vt8500_rtc->rtc);
- dev_err(&pdev->dev,
- "Failed to register RTC device -> %d\n", ret);
- goto err_return;
- }
+ vt8500_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(vt8500_rtc->rtc))
+ return PTR_ERR(vt8500_rtc->rtc);
+
+ vt8500_rtc->rtc->ops = &vt8500_rtc_ops;
+ vt8500_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ vt8500_rtc->rtc->range_max = RTC_TIMESTAMP_END_2199;
ret = devm_request_irq(&pdev->dev, vt8500_rtc->irq_alarm,
vt8500_rtc_irq, 0, "rtc alarm", vt8500_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "can't get irq %i, err %d\n",
vt8500_rtc->irq_alarm, ret);
- goto err_return;
+ return ret;
}
- return 0;
-
-err_return:
- return ret;
+ return rtc_register_device(vt8500_rtc->rtc);
}
static int vt8500_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-wilco-ec.c b/drivers/rtc/rtc-wilco-ec.c
index 8ad4c4e6d557..ff46066a68a4 100644
--- a/drivers/rtc/rtc-wilco-ec.c
+++ b/drivers/rtc/rtc-wilco-ec.c
@@ -110,10 +110,12 @@ static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm)
tm->tm_mday = rtc.day;
tm->tm_mon = rtc.month - 1;
tm->tm_year = rtc.year + (rtc.century * 100) - 1900;
- tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+ /* Ignore other tm fields, man rtc says userspace shouldn't use them. */
- /* Don't compute day of week, we don't need it. */
- tm->tm_wday = -1;
+ if (rtc_valid_tm(tm)) {
+ dev_err(dev, "Time from RTC is invalid: %ptRr\n", tm);
+ return -EIO;
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 9683fbf7c78d..96db441f92b3 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -34,7 +34,6 @@
struct xgene_rtc_dev {
struct rtc_device *rtc;
- struct device *dev;
void __iomem *csr_base;
struct clk *clk;
unsigned int irq_wake;
@@ -137,7 +136,6 @@ static irqreturn_t xgene_rtc_interrupt(int irq, void *id)
static int xgene_rtc_probe(struct platform_device *pdev)
{
struct xgene_rtc_dev *pdata;
- struct resource *res;
int ret;
int irq;
@@ -145,10 +143,8 @@ static int xgene_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
platform_set_drvdata(pdev, pdata);
- pdata->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->csr_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->csr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->csr_base))
return PTR_ERR(pdata->csr_base);
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 2c762757fb54..539690568298 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -44,7 +44,7 @@ struct xlnx_rtc_dev {
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
- int calibval;
+ unsigned int calibval;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -195,7 +195,6 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
static int xlnx_rtc_probe(struct platform_device *pdev)
{
struct xlnx_rtc_dev *xrtcdev;
- struct resource *res;
int ret;
xrtcdev = devm_kzalloc(&pdev->dev, sizeof(*xrtcdev), GFP_KERNEL);
@@ -211,9 +210,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
xrtcdev->rtc->ops = &xlnx_rtc_ops;
xrtcdev->rtc->range_max = U32_MAX;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- xrtcdev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ xrtcdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xrtcdev->reg_base))
return PTR_ERR(xrtcdev->reg_base);
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index be3531e7f868..b7ca7d79fb28 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -103,8 +103,11 @@ static DEVICE_ATTR_RW(max_user_freq);
/**
* rtc_sysfs_show_hctosys - indicate if the given RTC set the system time
+ * @dev: The device that the attribute belongs to.
+ * @attr: The attribute being read.
+ * @buf: The result buffer.
*
- * Returns 1 if the system clock was set by this RTC at the last
+ * buf is "1" if the system clock was set by this RTC at the last
* boot or resume event.
*/
static ssize_t
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index ea4253939555..8abb42923307 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -341,14 +341,14 @@ tapechar_release(struct inode *inode, struct file *filp)
*/
static int
__tapechar_ioctl(struct tape_device *device,
- unsigned int no, unsigned long data)
+ unsigned int no, void __user *data)
{
int rc;
if (no == MTIOCTOP) {
struct mtop op;
- if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
+ if (copy_from_user(&op, data, sizeof(op)) != 0)
return -EFAULT;
if (op.mt_count < 0)
return -EINVAL;
@@ -392,9 +392,7 @@ __tapechar_ioctl(struct tape_device *device,
if (rc < 0)
return rc;
pos.mt_blkno = rc;
- if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
- return -EFAULT;
- return 0;
+ return put_user_mtpos(data, &pos);
}
if (no == MTIOCGET) {
/* MTIOCGET: query the tape drive status. */
@@ -424,15 +422,12 @@ __tapechar_ioctl(struct tape_device *device,
get.mt_blkno = rc;
}
- if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
- return -EFAULT;
-
- return 0;
+ return put_user_mtget(data, &get);
}
/* Try the discipline ioctl function. */
if (device->discipline->ioctl_fn == NULL)
return -EINVAL;
- return device->discipline->ioctl_fn(device, no, data);
+ return device->discipline->ioctl_fn(device, no, (unsigned long)data);
}
static long
@@ -445,7 +440,7 @@ tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
device = (struct tape_device *) filp->private_data;
mutex_lock(&device->mutex);
- rc = __tapechar_ioctl(device, no, data);
+ rc = __tapechar_ioctl(device, no, (void __user *)data);
mutex_unlock(&device->mutex);
return rc;
}
@@ -455,23 +450,17 @@ static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
- int rval = -ENOIOCTLCMD;
- unsigned long argp;
+ long rc;
- /* The 'arg' argument of any ioctl function may only be used for
- * pointers because of the compat pointer conversion.
- * Consider this when adding new ioctls.
- */
- argp = (unsigned long) compat_ptr(data);
- if (device->discipline->ioctl_fn) {
- mutex_lock(&device->mutex);
- rval = device->discipline->ioctl_fn(device, no, argp);
- mutex_unlock(&device->mutex);
- if (rval == -EINVAL)
- rval = -ENOIOCTLCMD;
- }
+ if (no == MTIOCPOS32)
+ no = MTIOCPOS;
+ else if (no == MTIOCGET32)
+ no = MTIOCGET;
- return rval;
+ mutex_lock(&device->mutex);
+ rc = __tapechar_ioctl(device, no, compat_ptr(data));
+ mutex_unlock(&device->mutex);
+ return rc;
}
#endif /* CONFIG_COMPAT */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index f34ee41cbed8..4f4dd9d727c9 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -61,6 +61,7 @@ struct error_hdr {
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
#define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
@@ -91,6 +92,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
case REP82_ERROR_INVALID_SPECIAL_CMD:
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index 9dda431ec8f3..352056eb0dd1 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -5,6 +5,6 @@
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
- zfcp_unit.o
+ zfcp_unit.o zfcp_diag.o
obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e390f8c6d5f3..09ec846fe01d 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
/*
@@ -25,6 +25,7 @@
* Martin Petermann
* Sven Schuetz
* Steffen Maier
+ * Benjamin Block
*/
#define KMSG_COMPONENT "zfcp"
@@ -36,6 +37,7 @@
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
#define ZFCP_BUS_ID_SIZE 20
@@ -356,6 +358,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->erp_action.adapter = adapter;
+ if (zfcp_diag_adapter_setup(adapter))
+ goto failed;
+
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -402,6 +407,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto failed;
+ if (zfcp_diag_sysfs_setup(adapter))
+ goto failed;
+
/* report size limit per scatter-gather segment */
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
@@ -426,6 +434,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter);
+ zfcp_diag_sysfs_destroy(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
@@ -449,6 +458,7 @@ void zfcp_adapter_release(struct kref *ref)
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
zfcp_fc_gs_destroy(adapter);
zfcp_free_low_mem_buffers(adapter);
+ zfcp_diag_adapter_free(adapter);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index dccdb41bed8c..1234294700c4 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -95,11 +95,9 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE);
- if (q_head->fsf_command != FSF_QTCB_FCP_CMND) {
- rec->pl_len = q_head->log_length;
- zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
- rec->pl_len, "fsf_res", req->req_id);
- }
+ rec->pl_len = q_head->log_length;
+ zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
+ rec->pl_len, "fsf_res", req->req_id);
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 87d2f47a6990..8cc0eefe4ccc 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#ifndef ZFCP_DEF_H
@@ -86,6 +86,7 @@
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
+#define ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE 0x00020000
/************************* STRUCTURE DEFINITIONS *****************************/
@@ -197,6 +198,7 @@ struct zfcp_adapter {
struct device_dma_parameters dma_parms;
struct zfcp_fc_events events;
unsigned long next_port_scan;
+ struct zfcp_diag_adapter *diagnostics;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_diag.c b/drivers/s390/scsi/zfcp_diag.c
new file mode 100644
index 000000000000..67a8f4e57db1
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Functions to handle diagnostics.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/kernfs.h>
+#include <linux/sysfs.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "zfcp_diag.h"
+#include "zfcp_ext.h"
+#include "zfcp_def.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(__zfcp_diag_publish_wait);
+
+/**
+ * zfcp_diag_adapter_setup() - Setup storage for adapter diagnostics.
+ * @adapter: the adapter to setup diagnostics for.
+ *
+ * Creates the data-structures to store the diagnostics for an adapter. This
+ * overwrites whatever was stored before at &zfcp_adapter->diagnostics!
+ *
+ * Return:
+ * * 0 - Everyting is OK
+ * * -ENOMEM - Could not allocate all/parts of the data-structures;
+ * &zfcp_adapter->diagnostics remains unchanged
+ */
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter)
+{
+ struct zfcp_diag_adapter *diag;
+ struct zfcp_diag_header *hdr;
+
+ diag = kzalloc(sizeof(*diag), GFP_KERNEL);
+ if (diag == NULL)
+ return -ENOMEM;
+
+ diag->max_age = (5 * 1000); /* default value: 5 s */
+
+ /* setup header for port_data */
+ hdr = &diag->port_data.header;
+
+ spin_lock_init(&hdr->access_lock);
+ hdr->buffer = &diag->port_data.data;
+ hdr->buffer_size = sizeof(diag->port_data.data);
+ /* set the timestamp so that the first test on age will always fail */
+ hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+ /* setup header for config_data */
+ hdr = &diag->config_data.header;
+
+ spin_lock_init(&hdr->access_lock);
+ hdr->buffer = &diag->config_data.data;
+ hdr->buffer_size = sizeof(diag->config_data.data);
+ /* set the timestamp so that the first test on age will always fail */
+ hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+ adapter->diagnostics = diag;
+ return 0;
+}
+
+/**
+ * zfcp_diag_adapter_free() - Frees all adapter diagnostics allocations.
+ * @adapter: the adapter whose diagnostic structures should be freed.
+ *
+ * Frees all data-structures in the given adapter that store diagnostics
+ * information. Can savely be called with partially setup diagnostics.
+ */
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter)
+{
+ kfree(adapter->diagnostics);
+ adapter->diagnostics = NULL;
+}
+
+/**
+ * zfcp_diag_sysfs_setup() - Setup the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter to which the group should be added.
+ *
+ * Return: 0 on success; Something else otherwise (see sysfs_create_group()).
+ */
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter)
+{
+ int rc = sysfs_create_group(&adapter->ccw_device->dev.kobj,
+ &zfcp_sysfs_diag_attr_group);
+ if (rc == 0)
+ adapter->diagnostics->sysfs_established = 1;
+
+ return rc;
+}
+
+/**
+ * zfcp_diag_sysfs_destroy() - Remove the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter from which the group should be removed.
+ */
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter)
+{
+ if (adapter->diagnostics == NULL ||
+ !adapter->diagnostics->sysfs_established)
+ return;
+
+ /*
+ * We need this state-handling so we can prevent warnings being printed
+ * on the kernel-console in case we have to abort a halfway done
+ * zfcp_adapter_enqueue(), in which the sysfs-group was not yet
+ * established. sysfs_remove_group() does this checking as well, but
+ * still prints a warning in case we try to remove a group that has not
+ * been established before
+ */
+ adapter->diagnostics->sysfs_established = 0;
+ sysfs_remove_group(&adapter->ccw_device->dev.kobj,
+ &zfcp_sysfs_diag_attr_group);
+}
+
+
+/**
+ * zfcp_diag_update_xdata() - Update a diagnostics buffer.
+ * @hdr: the meta data to update.
+ * @data: data to use for the update.
+ * @incomplete: flag stating whether the data in @data is incomplete.
+ */
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+ const void *const data, const bool incomplete)
+{
+ const unsigned long capture_timestamp = jiffies;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdr->access_lock, flags);
+
+ /* make sure we never go into the past with an update */
+ if (!time_after_eq(capture_timestamp, hdr->timestamp))
+ goto out;
+
+ hdr->timestamp = capture_timestamp;
+ hdr->incomplete = incomplete;
+ memcpy(hdr->buffer, data, hdr->buffer_size);
+out:
+ spin_unlock_irqrestore(&hdr->access_lock, flags);
+}
+
+/**
+ * zfcp_diag_update_port_data_buffer() - Implementation of
+ * &typedef zfcp_diag_update_buffer_func
+ * to collect and update Port Data.
+ * @adapter: Adapter to collect Port Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
+ * this also includes cases where data was retrieved, but
+ * incomplete; you'll have to check the flag ``incomplete``
+ * of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_port_data_sync() for possible error-codes (
+ * excluding -EAGAIN)
+ */
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter)
+{
+ int rc;
+
+ rc = zfcp_fsf_exchange_port_data_sync(adapter->qdio, NULL);
+ if (rc == -EAGAIN)
+ rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+ /* buffer-data was updated in zfcp_fsf_exchange_port_data_handler() */
+
+ return rc;
+}
+
+/**
+ * zfcp_diag_update_config_data_buffer() - Implementation of
+ * &typedef zfcp_diag_update_buffer_func
+ * to collect and update Config Data.
+ * @adapter: Adapter to collect Config Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
+ * this also includes cases where data was retrieved, but
+ * incomplete; you'll have to check the flag ``incomplete``
+ * of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_config_data_sync() for possible error-codes (
+ * excluding -EAGAIN)
+ */
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter)
+{
+ int rc;
+
+ rc = zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL);
+ if (rc == -EAGAIN)
+ rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+ /* buffer-data was updated in zfcp_fsf_exchange_config_data_handler() */
+
+ return rc;
+}
+
+static int __zfcp_diag_update_buffer(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update,
+ unsigned long *const flags)
+ __must_hold(hdr->access_lock)
+{
+ int rc;
+
+ if (hdr->updating == 1) {
+ rc = wait_event_interruptible_lock_irq(__zfcp_diag_publish_wait,
+ hdr->updating == 0,
+ hdr->access_lock);
+ rc = (rc == 0 ? -EAGAIN : -EINTR);
+ } else {
+ hdr->updating = 1;
+ spin_unlock_irqrestore(&hdr->access_lock, *flags);
+
+ /* unlocked, because update function sleeps */
+ rc = buffer_update(adapter);
+
+ spin_lock_irqsave(&hdr->access_lock, *flags);
+ hdr->updating = 0;
+
+ /*
+ * every thread waiting here went via an interruptible wait,
+ * so its fine to only wake those
+ */
+ wake_up_interruptible_all(&__zfcp_diag_publish_wait);
+ }
+
+ return rc;
+}
+
+static bool
+__zfcp_diag_test_buffer_age_isfresh(const struct zfcp_diag_adapter *const diag,
+ const struct zfcp_diag_header *const hdr)
+ __must_hold(hdr->access_lock)
+{
+ const unsigned long now = jiffies;
+
+ /*
+ * Should not happen (data is from the future).. if it does, still
+ * signal that it needs refresh
+ */
+ if (!time_after_eq(now, hdr->timestamp))
+ return false;
+
+ if (jiffies_to_msecs(now - hdr->timestamp) >= diag->max_age)
+ return false;
+
+ return true;
+}
+
+/**
+ * zfcp_diag_update_buffer_limited() - Collect diagnostics and update a
+ * diagnostics buffer rate limited.
+ * @adapter: Adapter to collect the diagnostics from.
+ * @hdr: buffer-header for which to update with the collected diagnostics.
+ * @buffer_update: Specific implementation for collecting and updating.
+ *
+ * This function will cause an update of the given @hdr by calling the also
+ * given @buffer_update function. If called by multiple sources at the same
+ * time, it will synchornize the update by only allowing one source to call
+ * @buffer_update and the others to wait for that source to complete instead
+ * (the wait is interruptible).
+ *
+ * Additionally this version is rate-limited and will only exit if either the
+ * buffer is fresh enough (within the limit) - it will do nothing if the buffer
+ * is fresh enough to begin with -, or if the source/thread that started this
+ * update is the one that made the update (to prevent endless loops).
+ *
+ * Return:
+ * * 0 - If the update was successfully published and/or the buffer is
+ * fresh enough
+ * * -EINTR - If the thread went into the wait-state and was interrupted
+ * * whatever @buffer_update returns
+ */
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&hdr->access_lock, flags);
+
+ for (rc = 0;
+ !__zfcp_diag_test_buffer_age_isfresh(adapter->diagnostics, hdr);
+ rc = 0) {
+ rc = __zfcp_diag_update_buffer(adapter, hdr, buffer_update,
+ &flags);
+ if (rc != -EAGAIN)
+ break;
+ }
+
+ spin_unlock_irqrestore(&hdr->access_lock, flags);
+
+ return rc;
+}
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
new file mode 100644
index 000000000000..b9c93d15f67c
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Definitions for handling diagnostics in the the zfcp device driver.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef ZFCP_DIAG_H
+#define ZFCP_DIAG_H
+
+#include <linux/spinlock.h>
+
+#include "zfcp_fsf.h"
+#include "zfcp_def.h"
+
+/**
+ * struct zfcp_diag_header - general part of a diagnostic buffer.
+ * @access_lock: lock protecting all the data in this buffer.
+ * @updating: flag showing that an update for this buffer is currently running.
+ * @incomplete: flag showing that the data in @buffer is incomplete.
+ * @timestamp: time in jiffies when the data of this buffer was last captured.
+ * @buffer: implementation-depending data of this buffer
+ * @buffer_size: size of @buffer
+ */
+struct zfcp_diag_header {
+ spinlock_t access_lock;
+
+ /* Flags */
+ u64 updating :1;
+ u64 incomplete :1;
+
+ unsigned long timestamp;
+
+ void *buffer;
+ size_t buffer_size;
+};
+
+/**
+ * struct zfcp_diag_adapter - central storage for all diagnostics concerning an
+ * adapter.
+ * @sysfs_established: flag showing that the associated sysfs-group was created
+ * during run of zfcp_adapter_enqueue().
+ * @max_age: maximum age of data in diagnostic buffers before they need to be
+ * refreshed (in ms).
+ * @port_data: data retrieved using exchange port data.
+ * @port_data.header: header with metadata for the cache in @port_data.data.
+ * @port_data.data: cached QTCB Bottom of command exchange port data.
+ * @config_data: data retrieved using exchange config data.
+ * @config_data.header: header with metadata for the cache in @config_data.data.
+ * @config_data.data: cached QTCB Bottom of command exchange config data.
+ */
+struct zfcp_diag_adapter {
+ u64 sysfs_established :1;
+
+ unsigned long max_age;
+
+ struct {
+ struct zfcp_diag_header header;
+ struct fsf_qtcb_bottom_port data;
+ } port_data;
+ struct {
+ struct zfcp_diag_header header;
+ struct fsf_qtcb_bottom_config data;
+ } config_data;
+};
+
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter);
+
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+ const void *const data, const bool incomplete);
+
+/*
+ * Function-Type used in zfcp_diag_update_buffer_limited() for the function
+ * that does the buffer-implementation dependent work.
+ */
+typedef int (*zfcp_diag_update_buffer_func)(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update);
+
+/**
+ * zfcp_diag_support_sfp() - Return %true if the @adapter supports reporting
+ * SFP Data.
+ * @adapter: adapter to test the availability of SFP Data reporting for.
+ */
+static inline bool
+zfcp_diag_support_sfp(const struct zfcp_adapter *const adapter)
+{
+ return !!(adapter->adapter_features & FSF_FEATURE_REPORT_SFP_DATA);
+}
+
+#endif /* ZFCP_DIAG_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 96f0d34e9459..93655b85b73f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -174,7 +174,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
- p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+ p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
@@ -190,7 +190,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
a_status = atomic_read(&adapter->status);
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
- a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+ a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (p_status & ZFCP_STATUS_COMMON_NOESC)
return need;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 31e8a7240fd7..c8556787cfdc 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -167,6 +167,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+extern const struct attribute_group zfcp_sysfs_diag_attr_group;
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index cf63916814cc..223a805f0b0b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_els.h>
@@ -19,6 +20,7 @@
#include "zfcp_dbf.h"
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
@@ -554,6 +556,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
+ struct zfcp_diag_header *const diag_hdr =
+ &adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
struct Scsi_Host *shost = adapter->scsi_host;
@@ -570,6 +574,12 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
+ /*
+ * usually we wait with an update till the cache is too old,
+ * but because we have the data available, update it anyway
+ */
+ zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
if (zfcp_fsf_exchange_config_evaluate(req))
return;
@@ -585,6 +595,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ zfcp_diag_update_xdata(diag_hdr, bottom, true);
+ req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
+
fc_host_node_name(shost) = 0;
fc_host_port_name(shost) = 0;
fc_host_port_id(shost) = 0;
@@ -653,16 +666,28 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
{
+ struct zfcp_diag_header *const diag_hdr =
+ &req->adapter->diagnostics->port_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
+ struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
+ /*
+ * usually we wait with an update till the cache is too old,
+ * but because we have the data available, update it anyway
+ */
+ zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
zfcp_fsf_exchange_port_evaluate(req);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ zfcp_diag_update_xdata(diag_hdr, bottom, true);
+ req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
+
zfcp_fsf_exchange_port_evaluate(req);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
@@ -1261,7 +1286,8 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
- FSF_FEATURE_UPDATE_ALERT;
+ FSF_FEATURE_UPDATE_ALERT |
+ FSF_FEATURE_REQUEST_SFP_DATA;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
erp_action->fsf_req_id = req->req_id;
@@ -1278,6 +1304,19 @@ out:
return retval;
}
+
+/**
+ * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ * might be %NULL.
+ *
+ * Returns:
+ * * 0 - Exchange Config Data was successful, @data is complete
+ * * -EIO - Exchange Config Data was not successful, @data is invalid
+ * * -EAGAIN - @data contains incomplete data
+ * * -ENOMEM - Some memory allocation failed along the way
+ */
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
@@ -1301,7 +1340,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
- FSF_FEATURE_UPDATE_ALERT;
+ FSF_FEATURE_UPDATE_ALERT |
+ FSF_FEATURE_REQUEST_SFP_DATA;
if (data)
req->data = data;
@@ -1309,9 +1349,16 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
+
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+
+ if (req->status &
+ (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+ retval = -EIO;
+ else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+ retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
@@ -1369,10 +1416,17 @@ out:
}
/**
- * zfcp_fsf_exchange_port_data_sync - request information about local port
- * @qdio: pointer to struct zfcp_qdio
- * @data: pointer to struct fsf_qtcb_bottom_port
- * Returns: 0 on success, error otherwise
+ * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ * might be %NULL.
+ *
+ * Returns:
+ * * 0 - Exchange Port Data was successful, @data is complete
+ * * -EIO - Exchange Port Data was not successful, @data is invalid
+ * * -EAGAIN - @data contains incomplete data
+ * * -ENOMEM - Some memory allocation failed along the way
+ * * -EOPNOTSUPP - This operation is not supported
*/
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
@@ -1408,10 +1462,15 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+
+ if (req->status &
+ (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+ retval = -EIO;
+ else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+ retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
-
return retval;
out_unlock:
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 2c658b66318c..2b1e4da1944f 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -163,6 +163,8 @@
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
+#define FSF_FEATURE_REQUEST_SFP_DATA 0x00000200
+#define FSF_FEATURE_REPORT_SFP_DATA 0x00000800
#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
@@ -407,7 +409,24 @@ struct fsf_qtcb_bottom_port {
u8 cp_util;
u8 cb_util;
u8 a_util;
- u8 res2[253];
+ u8 res2;
+ u16 temperature;
+ u16 vcc;
+ u16 tx_bias;
+ u16 tx_power;
+ u16 rx_power;
+ union {
+ u16 raw;
+ struct {
+ u16 fec_active :1;
+ u16:7;
+ u16 connector_type :2;
+ u16 sfp_invalid :1;
+ u16 optical_port :1;
+ u16 port_tx_type :4;
+ };
+ } sfp_flags;
+ u8 res3[240];
} __attribute__ ((packed));
union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e9ded2befa0d..3910d529c15a 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -605,7 +605,7 @@ zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
return NULL;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
- if (ret) {
+ if (ret != 0 && ret != -EAGAIN) {
kfree(data);
return NULL;
}
@@ -634,7 +634,7 @@ static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
return;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
- if (ret)
+ if (ret != 0 && ret != -EAGAIN)
kfree(data);
else {
adapter->stats_reset = jiffies/HZ;
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index af197e2b3e69..494b9fe9cc94 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
+#include "zfcp_diag.h"
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
@@ -325,6 +326,50 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store);
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ ssize_t rc;
+
+ if (!adapter)
+ return -ENODEV;
+
+ /* ceil(log(2^64 - 1) / log(10)) = 20 */
+ rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age);
+
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ unsigned long max_age;
+ ssize_t rc;
+
+ if (!adapter)
+ return -ENODEV;
+
+ rc = kstrtoul(buf, 10, &max_age);
+ if (rc != 0)
+ goto out;
+
+ adapter->diagnostics->max_age = max_age;
+
+ rc = count;
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644,
+ zfcp_sysfs_adapter_diag_max_age_show,
+ zfcp_sysfs_adapter_diag_max_age_store);
+
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
@@ -337,6 +382,7 @@ static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
+ &dev_attr_adapter_diag_max_age.attr,
NULL
};
@@ -577,7 +623,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
- if (!retval)
+ if (retval == 0 || retval == -EAGAIN)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
@@ -603,7 +649,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev,
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
- if (!retval)
+ if (retval == 0 || retval == -EAGAIN)
*stat_inf = qtcb_config->stat_info;
kfree(qtcb_config);
@@ -664,3 +710,123 @@ struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_queue_full,
NULL
};
+
+static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show(
+ struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ struct zfcp_diag_header *diag_hdr;
+ struct fc_els_flogi *nsp;
+ ssize_t rc = -ENOLINK;
+ unsigned long flags;
+ unsigned int status;
+
+ if (!adapter)
+ return -ENODEV;
+
+ status = atomic_read(&adapter->status);
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED))
+ goto out;
+
+ diag_hdr = &adapter->diagnostics->config_data.header;
+
+ rc = zfcp_diag_update_buffer_limited(
+ adapter, diag_hdr, zfcp_diag_update_config_data_buffer);
+ if (rc != 0)
+ goto out;
+
+ spin_lock_irqsave(&diag_hdr->access_lock, flags);
+ /* nport_serv_param doesn't contain the ELS_Command code */
+ nsp = (struct fc_els_flogi *)((unsigned long)
+ adapter->diagnostics->config_data
+ .data.nport_serv_param -
+ sizeof(u32));
+
+ rc = scnprintf(buf, 5 + 2, "%hu\n",
+ be16_to_cpu(nsp->fl_csp.sp_bb_cred));
+ spin_unlock_irqrestore(&diag_hdr->access_lock, flags);
+
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
+ zfcp_sysfs_adapter_diag_b2b_credit_show, NULL);
+
+#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt) \
+ static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show( \
+ struct device *dev, struct device_attribute *attr, char *buf) \
+ { \
+ struct zfcp_adapter *const adapter = \
+ zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); \
+ struct zfcp_diag_header *diag_hdr; \
+ ssize_t rc = -ENOLINK; \
+ unsigned long flags; \
+ unsigned int status; \
+ \
+ if (!adapter) \
+ return -ENODEV; \
+ \
+ status = atomic_read(&adapter->status); \
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || \
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || \
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED)) \
+ goto out; \
+ \
+ if (!zfcp_diag_support_sfp(adapter)) { \
+ rc = -EOPNOTSUPP; \
+ goto out; \
+ } \
+ \
+ diag_hdr = &adapter->diagnostics->port_data.header; \
+ \
+ rc = zfcp_diag_update_buffer_limited( \
+ adapter, diag_hdr, zfcp_diag_update_port_data_buffer); \
+ if (rc != 0) \
+ goto out; \
+ \
+ spin_lock_irqsave(&diag_hdr->access_lock, flags); \
+ rc = scnprintf( \
+ buf, (_prtsize) + 2, _prtfmt "\n", \
+ adapter->diagnostics->port_data.data._qtcb_member); \
+ spin_unlock_irqrestore(&diag_hdr->access_lock, flags); \
+ \
+ out: \
+ zfcp_ccw_adapter_put(adapter); \
+ return rc; \
+ } \
+ static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
+ zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
+
+ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu");
+
+static struct attribute *zfcp_sysfs_diag_attrs[] = {
+ &dev_attr_adapter_diag_sfp_temperature.attr,
+ &dev_attr_adapter_diag_sfp_vcc.attr,
+ &dev_attr_adapter_diag_sfp_tx_bias.attr,
+ &dev_attr_adapter_diag_sfp_tx_power.attr,
+ &dev_attr_adapter_diag_sfp_rx_power.attr,
+ &dev_attr_adapter_diag_sfp_port_tx_type.attr,
+ &dev_attr_adapter_diag_sfp_optical_port.attr,
+ &dev_attr_adapter_diag_sfp_sfp_invalid.attr,
+ &dev_attr_adapter_diag_sfp_connector_type.attr,
+ &dev_attr_adapter_diag_sfp_fec_active.attr,
+ &dev_attr_adapter_diag_b2b_credit.attr,
+ NULL,
+};
+
+const struct attribute_group zfcp_sysfs_diag_attr_group = {
+ .name = "diagnostics",
+ .attrs = zfcp_sysfs_diag_attrs,
+};
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 971fe074d7c9..fad936eb845f 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -156,7 +156,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations d7s_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = d7s_ioctl,
- .compat_ioctl = d7s_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = d7s_open,
.release = d7s_release,
.llseek = noop_llseek,
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index a63d5e402ff2..12d66aa61ede 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -715,9 +715,7 @@ static const struct file_operations envctrl_fops = {
.owner = THIS_MODULE,
.read = envctrl_read,
.unlocked_ioctl = envctrl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = envctrl_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = envctrl_open,
.release = envctrl_release,
.llseek = noop_llseek,
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 2b1e0d503020..fb6444d0409c 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1049,9 +1049,7 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
static const struct file_operations tw_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tw_chrdev_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = tw_chrdev_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = tw_chrdev_open,
.release = NULL,
.llseek = noop_llseek,
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 536426f25e86..f2f7e6e76c07 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -129,6 +129,9 @@
#define NCR5380_release_dma_irq(x)
#endif
+static unsigned int disconnect_mask = ~0;
+module_param(disconnect_mask, int, 0444);
+
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@@ -172,6 +175,19 @@ static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
}
}
+static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
+{
+ int resid = cmd->SCp.this_residual;
+ struct scatterlist *s = cmd->SCp.buffer;
+
+ if (s)
+ while (!sg_is_last(s)) {
+ s = sg_next(s);
+ resid += s->length;
+ }
+ scsi_set_resid(cmd, resid);
+}
+
/**
* NCR5380_poll_politely2 - wait for two chip register values
* @hostdata: host private data
@@ -954,7 +970,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
int err;
bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ &&
- cmd->cmnd[0] != REQUEST_SENSE;
+ cmd->cmnd[0] != REQUEST_SENSE &&
+ (disconnect_mask & BIT(scmd_id(cmd)));
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -1379,7 +1396,7 @@ static void do_reset(struct Scsi_Host *instance)
* MESSAGE OUT phase and sending an ABORT message.
* @instance: relevant scsi host instance
*
- * Returns 0 on success, -1 on failure.
+ * Returns 0 on success, negative error code on failure.
*/
static int do_abort(struct Scsi_Host *instance)
@@ -1404,7 +1421,7 @@ static int do_abort(struct Scsi_Host *instance)
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
if (rc < 0)
- goto timeout;
+ goto out;
tmp = NCR5380_read(STATUS_REG) & PHASE_MASK;
@@ -1415,7 +1432,7 @@ static int do_abort(struct Scsi_Host *instance)
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
if (rc < 0)
- goto timeout;
+ goto out;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
}
@@ -1424,17 +1441,17 @@ static int do_abort(struct Scsi_Host *instance)
len = 1;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
+ if (len)
+ rc = -ENXIO;
/*
* If we got here, and the command completed successfully,
* we're about to go into bus free state.
*/
- return len ? -1 : 0;
-
-timeout:
+out:
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- return -1;
+ return rc;
}
/*
@@ -1803,6 +1820,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result |= cmd->SCp.Status;
cmd->result |= cmd->SCp.Message << 8;
+ set_resid_from_SCp(cmd);
+
if (cmd->cmnd[0] == REQUEST_SENSE)
complete_cmd(instance, cmd);
else {
@@ -2264,7 +2283,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
hostdata->dma_len = 0;
- if (do_abort(instance)) {
+ if (do_abort(instance) < 0) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
result = FAILED;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 0ed3f806ace5..e36608ce937a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1477,6 +1477,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
struct aac_srb * srbcmd;
u32 flag;
u32 timeout;
+ struct aac_dev *dev = fib->dev;
aac_fib_init(fib);
switch(cmd->sc_data_direction){
@@ -1503,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->flags = cpu_to_le32(flag);
timeout = cmd->request->timeout/HZ;
if (timeout == 0)
- timeout = 1;
+ timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
srbcmd->retry_limit = 0; /* Obsolete parameter */
srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
@@ -2467,13 +2468,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
@@ -2559,13 +2560,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3fa03230f6ba..e3e4ecbea726 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -85,7 +85,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50877
+# define AAC_DRIVER_BUILD 50983
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -108,6 +108,8 @@ enum {
#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512
+#define AAC_SA_TIMEOUT 180
+#define AAC_ARC_TIMEOUT 60
#define get_bus_number(x) (x/AAC_MAX_TARGETS)
#define get_target_number(x) (x%AAC_MAX_TARGETS)
@@ -1328,7 +1330,7 @@ struct fib {
#define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3
-#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
+#define AAC_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
@@ -1601,6 +1603,7 @@ struct aac_dev
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
struct delayed_work safw_rescan_work;
+ struct delayed_work src_reinit_aif_worker;
int cardtype;
/*
*This lock will protect the two 32-bit
@@ -1673,6 +1676,7 @@ struct aac_dev
u8 adapter_shutdown;
u32 handle_pci_error;
bool init_reset;
+ u8 soft_reset_support;
};
#define aac_adapter_interrupt(dev) \
@@ -2644,7 +2648,12 @@ int aac_scan_host(struct aac_dev *dev);
static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
{
- schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+ schedule_delayed_work(&dev->safw_rescan_work, AAC_RESCAN_DELAY);
+}
+
+static inline void aac_schedule_src_reinit_aif_worker(struct aac_dev *dev)
+{
+ schedule_delayed_work(&dev->src_reinit_aif_worker, AAC_RESCAN_DELAY);
}
static inline void aac_safw_rescan_worker(struct work_struct *work)
@@ -2658,10 +2667,10 @@ static inline void aac_safw_rescan_worker(struct work_struct *work)
aac_scan_host(dev);
}
-static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+static inline void aac_cancel_rescan_worker(struct aac_dev *dev)
{
- if (dev->sa_firmware)
- cancel_delayed_work_sync(&dev->safw_rescan_work);
+ cancel_delayed_work_sync(&dev->safw_rescan_work);
+ cancel_delayed_work_sync(&dev->src_reinit_aif_worker);
}
/* SCp.phase values */
@@ -2671,6 +2680,7 @@ static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
#define AAC_OWNER_FIRMWARE 0x106
void aac_safw_rescan_worker(struct work_struct *work);
+void aac_src_reinit_aif_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
int aac_setup_safw_adapter(struct aac_dev *dev);
@@ -2728,6 +2738,7 @@ int aac_probe_container(struct aac_dev *dev, int cid);
int _aac_rx_init(struct aac_dev *dev);
int aac_rx_select_comm(struct aac_dev *dev, int comm);
int aac_rx_deliver_producer(struct fib * fib);
+void aac_reinit_aif(struct aac_dev *aac, unsigned int index);
static inline int aac_is_src(struct aac_dev *dev)
{
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index d4fcfa1e54e0..f75878d773cf 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -571,6 +571,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
else
dev->sa_firmware = 0;
+ if (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET))
+ dev->soft_reset_support = 1;
+ else
+ dev->soft_reset_support = 0;
+
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) {
aac_adapter_ioremap(dev, 0);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 2142a649e865..5a8a999606ea 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -232,6 +232,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
fibptr->type = FSAFS_NTC_FIB_CONTEXT;
fibptr->callback_data = NULL;
fibptr->callback = NULL;
+ fibptr->flags = 0;
return fibptr;
}
@@ -1463,6 +1464,14 @@ retry_next:
}
}
+static void aac_schedule_bus_scan(struct aac_dev *aac)
+{
+ if (aac->sa_firmware)
+ aac_schedule_safw_scan_worker(aac);
+ else
+ aac_schedule_src_reinit_aif_worker(aac);
+}
+
static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
int index, quirks;
@@ -1638,7 +1647,7 @@ out:
*/
if (!retval && !is_kdump_kernel()) {
dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
- aac_schedule_safw_scan_worker(aac);
+ aac_schedule_bus_scan(aac);
}
if (jafo) {
@@ -1959,6 +1968,16 @@ int aac_scan_host(struct aac_dev *dev)
return rcode;
}
+void aac_src_reinit_aif_worker(struct work_struct *work)
+{
+ struct aac_dev *dev = container_of(to_delayed_work(work),
+ struct aac_dev, src_reinit_aif_worker);
+
+ wait_event(dev->scsi_host_ptr->host_wait,
+ !scsi_host_in_recovery(dev->scsi_host_ptr));
+ aac_reinit_aif(dev, dev->cardtype);
+}
+
/**
* aac_handle_sa_aif Handle a message from the firmware
* @dev: Which adapter this fib is from
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4a858789e6c5..ee6bc2f9b80a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -391,6 +391,7 @@ static int aac_slave_configure(struct scsi_device *sdev)
int chn, tid;
unsigned int depth = 0;
unsigned int set_timeout = 0;
+ int timeout = 0;
bool set_qd_dev_type = false;
u8 devtype = 0;
@@ -483,10 +484,13 @@ common_config:
/*
* Firmware has an individual device recovery time typically
- * of 35 seconds, give us a margin.
+ * of 35 seconds, give us a margin. Thor devices can take longer in
+ * error recovery, hence different value.
*/
- if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
- blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+ if (set_timeout) {
+ timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
+ }
if (depth > 256)
depth = 256;
@@ -608,9 +612,13 @@ static struct device_attribute *aac_dev_attrs[] = {
static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg)
{
+ int retval;
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ retval = aac_adapter_check_health(dev);
+ if (retval)
+ return -EBUSY;
return aac_do_ioctl(dev, cmd, arg);
}
@@ -1585,6 +1593,19 @@ static void aac_init_char(void)
}
}
+void aac_reinit_aif(struct aac_dev *aac, unsigned int index)
+{
+ /*
+ * Firmware may send a AIF messages very early and the Driver may have
+ * ignored as it is not fully ready to process the messages. Send
+ * AIF to firmware so that if there are any unprocessed events they
+ * can be processed now.
+ */
+ if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ aac_intr_normal(aac, 0, 2, 0, NULL);
+
+}
+
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned index = id->driver_data;
@@ -1682,6 +1703,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&aac->scan_mutex);
INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
+ INIT_DELAYED_WORK(&aac->src_reinit_aif_worker,
+ aac_src_reinit_aif_worker);
/*
* Map in the registers from the adapter.
*/
@@ -1872,7 +1895,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost);
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
@@ -1931,7 +1954,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
scsi_remove_host(shost);
__aac_shutdown(aac);
@@ -1989,7 +2012,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr);
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
aac_flush_ios(aac);
aac_release_resources(aac);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 3b66e06726c8..787ec9baebb0 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -733,10 +733,20 @@ static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
return ctrl_up;
}
+static void aac_src_drop_io(struct aac_dev *dev)
+{
+ if (!dev->soft_reset_support)
+ return;
+
+ aac_adapter_sync_cmd(dev, DROP_IO,
+ 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
{
aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
NULL, NULL, NULL, NULL);
+ aac_src_drop_io(dev);
}
static void aac_send_iop_reset(struct aac_dev *dev)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 88053b15c363..db687ef8a99e 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1400,7 +1400,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
, pCCB->acb
, pCCB->startdone
, atomic_read(&acb->ccboutstandingcount));
- return;
+ return;
}
arcmsr_report_ccb_state(acb, pCCB, error);
}
@@ -3476,8 +3476,8 @@ polling_hbc_ccb_retry:
, pCCB->pcmd->device->id
, (u32)pCCB->pcmd->device->lun
, pCCB);
- pCCB->pcmd->result = DID_ABORT << 16;
- arcmsr_ccb_complete(pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
continue;
}
printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index d12dd89538df..ddb52e7ba622 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -1067,7 +1067,7 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
* Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct
* Params : host - host to finish
* Notes : This is called when a command is:
- * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONECT
+ * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONNECT
* : This must not return until all transfers are completed.
*/
static
@@ -1816,7 +1816,7 @@ int acornscsi_reconnect(AS_Host *host)
}
/*
- * Function: int acornscsi_reconect_finish(AS_Host *host)
+ * Function: int acornscsi_reconnect_finish(AS_Host *host)
* Purpose : finish reconnecting a command
* Params : host - host to complete
* Returns : 0 if failed
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index e809493d0d06..a82b63a66635 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
atari_scsi_template.can_queue = 1;
- atari_scsi_template.sg_tablesize = SG_NONE;
+ atari_scsi_template.sg_tablesize = 1;
}
if (setup_can_queue > 0)
@@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- /* Leave sg_tablesize at 0 on a Falcon! */
- if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
+ /* Don't increase sg_tablesize on Falcon! */
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) {
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index e41f0bbdc9fd..c6a752309dda 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1680,7 +1680,7 @@ static struct scsi_host_template atp870u_template = {
.bios_param = atp870u_biosparam /* biosparm */,
.can_queue = qcnt /* can_queue */,
.this_id = 7 /* SCSI ID */,
- .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
+ .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/,
.max_sectors = ATP870U_MAX_SECTORS,
};
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 2f9213b257a4..eb0c76338295 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1487,8 +1487,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return ret;
}
-int
-restart_bfa(struct bfad_s *bfad)
+static int restart_bfa(struct bfad_s *bfad)
{
unsigned long flags;
struct pci_dev *pdev = bfad->pcidev;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 29ab81df75c0..fbfce02e5b93 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -275,8 +275,10 @@ bfad_im_get_stats(struct Scsi_Host *shost)
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
fcstats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- if (rc != BFA_STATUS_OK)
+ if (rc != BFA_STATUS_OK) {
+ kfree(fcstats);
return NULL;
+ }
wait_for_completion(&fcomp.comp);
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
index e4469df9c469..698f5ebaa0c2 100644
--- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -813,7 +813,7 @@ struct fcoe_confqe {
/*
- * FCoE conection data base
+ * FCoE connection data base
*/
struct fcoe_conn_db {
#if defined(__BIG_ENDIAN)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 401743e2b429..4c8122a82322 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1242,7 +1242,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
time_left = wait_for_completion_timeout(&io_req->abts_done,
- (2 * rp->r_a_tov + 1) * HZ);
+ msecs_to_jiffies(2 * rp->r_a_tov + 1));
if (time_left)
BNX2FC_IO_DBG(io_req,
"Timed out in eh_abort waiting for abts_done");
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index c5fa5f3b00e9..0b28d44d3573 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -915,12 +915,12 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
- pci_dev_put(hba->pcidev);
if (hba->regview) {
pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
+ pci_dev_put(hba->pcidev);
bnx2i_free_mp_bdt(hba);
bnx2i_release_free_cid_que(hba);
iscsi_host_free(shost);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index e51923886475..950f9cdf0577 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -793,10 +793,10 @@ csio_hw_get_flash_params(struct csio_hw *hw)
goto found;
}
- /* Decode Flash part size. The code below looks repetative with
+ /* Decode Flash part size. The code below looks repetitive with
* common encodings, but that's not guaranteed in the JEDEC
- * specification for the Read JADEC ID command. The only thing that
- * we're guaranteed by the JADEC specification is where the
+ * specification for the Read JEDEC ID command. The only thing that
+ * we're guaranteed by the JEDEC specification is where the
* Manufacturer ID is in the returned result. After that each
* Manufacturer ~could~ encode things completely differently.
* Note, all Flash parts must have 64KB sectors.
@@ -983,8 +983,8 @@ retry:
waiting -= 50;
/*
- * If neither Error nor Initialialized are indicated
- * by the firmware keep waiting till we exaust our
+ * If neither Error nor Initialized are indicated
+ * by the firmware keep waiting till we exhaust our
* timeout ... and then retry if we haven't exhausted
* our retries ...
*/
@@ -1738,7 +1738,7 @@ static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps,
* Convert Common Code Forward Error Control settings into the
* Firmware's API. If the current Requested FEC has "Automatic"
* (IEEE 802.3) specified, then we use whatever the Firmware
- * sent us as part of it's IEEE 802.3-based interpratation of
+ * sent us as part of it's IEEE 802.3-based interpretation of
* the Transceiver Module EPROM FEC parameters. Otherwise we
* use whatever is in the current Requested FEC settings.
*/
@@ -2834,7 +2834,7 @@ csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
}
/*
- * csio_hws_initializing - Initialiazing state
+ * csio_hws_initializing - Initializing state
* @hw - HW module
* @evt - Event
*
@@ -3049,7 +3049,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
if (!csio_is_hw_master(hw))
break;
/*
- * The BYE should have alerady been issued, so we cant
+ * The BYE should have already been issued, so we can't
* use the mailbox interface. Hence we use the PL_RST
* register directly.
*/
@@ -3104,7 +3104,7 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
*
* A table driven interrupt handler that applies a set of masks to an
* interrupt status word and performs the corresponding actions if the
- * interrupts described by the mask have occured. The actions include
+ * interrupts described by the mask have occurred. The actions include
* optionally emitting a warning or alert message. The table is terminated
* by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions.
@@ -4219,7 +4219,7 @@ csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
* @hw: Pointer to HW module.
*
* It is assumed that the initialization is a synchronous operation.
- * So when we return afer posting the event, the HW SM should be in
+ * So when we return after posting the event, the HW SM should be in
* the ready state, if there were no errors during init.
*/
int
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index a6dd704d7f2d..2e8a3ac575cb 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -154,13 +154,10 @@ csio_dfs_create(struct csio_hw *hw)
/*
* csio_dfs_destroy - Destroys per-hw debugfs.
*/
-static int
+static void
csio_dfs_destroy(struct csio_hw *hw)
{
- if (hw->debugfs_root)
- debugfs_remove_recursive(hw->debugfs_root);
-
- return 0;
+ debugfs_remove_recursive(hw->debugfs_root);
}
/*
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 66e58f0a75dc..74ff8adc41f7 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint8_t *fc4_type;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs);
/* Submit FDMI RHBA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name);
/* Submit FDMI request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/**
@@ -1989,7 +1992,7 @@ static int
csio_ln_init(struct csio_lnode *ln)
{
int rv = -EINVAL;
- struct csio_lnode *rln, *pln;
+ struct csio_lnode *pln;
struct csio_hw *hw = csio_lnode_to_hw(ln);
csio_init_state(&ln->sm, csio_lns_uninit);
@@ -2019,7 +2022,6 @@ csio_ln_init(struct csio_lnode *ln)
* THe rest is common for non-root physical and NPIV lnodes.
* Just get references to all other modules
*/
- rln = csio_root_lnode(ln);
if (csio_is_npiv_ln(ln)) {
/* NPIV */
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 6f13673d6aa0..94810b19e747 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1210,7 +1210,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
!csio_is_hw_intr_enabled(hw)) {
csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
*((uint8_t *)mbp->mb));
- goto error_out;
+ goto error_out;
}
if (mbm->mcurrent != NULL) {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index da50e87921bc..bc1086ae6835 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -2073,7 +2073,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct net_device *ndev = cdev->ports[0];
struct cxgbi_tag_format tformat;
- unsigned int ppmax;
int i, err;
if (!lldi->vr->iscsi.size) {
@@ -2082,7 +2081,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
}
cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
- ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3e17af8aedeb..0d044c165960 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2284,34 +2284,6 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
-static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
-{
- int len;
-
- cxgbi_sock_get(csk);
- len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
- cxgbi_sock_put(csk);
-
- return len;
-}
-
-static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
-{
- int len;
-
- cxgbi_sock_get(csk);
- if (csk->csk_family == AF_INET)
- len = sprintf(buf, "%pI4",
- &csk->daddr.sin_addr.s_addr);
- else
- len = sprintf(buf, "%pI6",
- &csk->daddr6.sin6_addr);
-
- cxgbi_sock_put(csk);
-
- return len;
-}
-
int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
char *buf)
{
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 93ef97af22df..fbd2ae40dab4 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -44,14 +44,12 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
struct afu *afu = cmd->parent;
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb *ioarcb;
struct sisl_ioasa *ioasa;
u32 resid;
if (unlikely(!cmd))
return;
- ioarcb = &(cmd->rcb);
ioasa = &(cmd->sa);
if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
@@ -3593,7 +3591,7 @@ static const struct file_operations cxlflash_chr_fops = {
.owner = THIS_MODULE,
.open = cxlflash_chr_open,
.unlocked_ioctl = cxlflash_chr_ioctl,
- .compat_ioctl = cxlflash_chr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/**
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 7bd376d95ed5..b02ac389e6c6 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -1197,6 +1197,7 @@ bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram))) {
esas2r_hdebug("NVRAM read failed, using defaults");
+ up(&a->nvram_semaphore);
return false;
}
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index fdbda5c05aa0..80c5a235d193 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -613,7 +613,7 @@ static int __init esas2r_init(void)
/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */
static const struct file_operations esas2r_proc_fops = {
- .compat_ioctl = esas2r_proc_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.unlocked_ioctl = esas2r_proc_ioctl,
};
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 80608b53897b..8ef150dfb6f7 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1024,7 +1024,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
atomic64_inc(&fnic_stats->io_stats.io_completions);
- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
+ io_duration_time = jiffies_to_msecs(jiffies) -
+ jiffies_to_msecs(start_time);
if(io_duration_time <= 10)
atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 78af9cc2009b..1f55b9e4e74a 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -259,7 +259,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
u32 status;
- int dev_cmd_err[] = {
+ static const int dev_cmd_err[] = {
/* convert from fw's version of error.h to host's version */
0, /* ERR_SUCCESS */
EINVAL, /* ERR_EINVAL */
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 720c4d6be939..233c73e01246 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/timer.h>
#include <scsi/sas_ata.h>
#include <scsi/libsas.h>
@@ -84,6 +85,7 @@
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
+#define CLEAR_ITCT_TIMEOUT 20
struct hisi_hba;
@@ -167,6 +169,7 @@ struct hisi_sas_phy {
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
int enable;
+ atomic_t down_cnt;
};
struct hisi_sas_port {
@@ -296,8 +299,8 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void);
- void (*clear_itct)(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *dev);
+ int (*clear_itct)(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *dev);
void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba,
@@ -321,6 +324,44 @@ struct hisi_sas_hw {
const struct hisi_sas_debugfs_reg *debugfs_reg_port;
};
+#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
+
+struct hisi_sas_debugfs_cq {
+ struct hisi_sas_cq *cq;
+ void *complete_hdr;
+};
+
+struct hisi_sas_debugfs_dq {
+ struct hisi_sas_dq *dq;
+ struct hisi_sas_cmd_hdr *hdr;
+};
+
+struct hisi_sas_debugfs_regs {
+ struct hisi_hba *hisi_hba;
+ u32 *data;
+};
+
+struct hisi_sas_debugfs_port {
+ struct hisi_sas_phy *phy;
+ u32 *data;
+};
+
+struct hisi_sas_debugfs_iost {
+ struct hisi_sas_iost *iost;
+};
+
+struct hisi_sas_debugfs_itct {
+ struct hisi_sas_itct *itct;
+};
+
+struct hisi_sas_debugfs_iost_cache {
+ struct hisi_sas_iost_itct_cache *cache;
+};
+
+struct hisi_sas_debugfs_itct_cache {
+ struct hisi_sas_iost_itct_cache *cache;
+};
+
struct hisi_hba {
/* This must be the first element, used by SHOST_TO_SAS_HA */
struct sas_ha_struct *p;
@@ -402,19 +443,20 @@ struct hisi_hba {
/* debugfs memories */
/* Put Global AXI and RAS Register into register array */
- u32 *debugfs_regs[DEBUGFS_REGS_NUM];
- u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS];
- void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES];
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES];
- struct hisi_sas_iost *debugfs_iost;
- struct hisi_sas_itct *debugfs_itct;
- u64 *debugfs_iost_cache;
- u64 *debugfs_itct_cache;
-
+ struct hisi_sas_debugfs_regs debugfs_regs[HISI_SAS_MAX_DEBUGFS_DUMP][DEBUGFS_REGS_NUM];
+ struct hisi_sas_debugfs_port debugfs_port_reg[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_PHYS];
+ struct hisi_sas_debugfs_cq debugfs_cq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_debugfs_dq debugfs_dq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_debugfs_iost debugfs_iost[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_itct debugfs_itct[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_iost_cache debugfs_iost_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_itct_cache debugfs_itct_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
+
+ u64 debugfs_timestamp[HISI_SAS_MAX_DEBUGFS_DUMP];
+ int debugfs_dump_index;
struct dentry *debugfs_dir;
struct dentry *debugfs_dump_dentry;
struct dentry *debugfs_bist_dentry;
- bool debugfs_snapshot;
};
/* Generic HW DMA host memory structures */
@@ -556,6 +598,7 @@ struct hisi_sas_slot_dif_buf_table {
extern struct scsi_transport_template *hisi_sas_stt;
extern bool hisi_sas_debugfs_enable;
+extern u32 hisi_sas_debugfs_dump_count;
extern struct dentry *hisi_sas_debugfs_dir;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 0847e682797b..03588ec3c394 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -587,7 +587,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
dev = hisi_hba->dev;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- if (in_softirq())
+ /*
+ * For IOs from upper layer, it may already disable preempt
+ * in the IO path, if disable preempt again in down(),
+ * function schedule() will report schedule_bug(), so check
+ * preemptible() before goto down().
+ */
+ if (!preemptible())
return -EINVAL;
down(&hisi_hba->sem);
@@ -968,12 +974,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ struct hisi_sas_port *port;
unsigned long flags;
if (!sas_port)
return;
+ port = to_hisi_sas_port(sas_port);
spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1;
port->id = phy->port_id;
@@ -1045,6 +1052,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
+ int ret = 0;
dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
@@ -1056,13 +1064,16 @@ static void hisi_sas_dev_gone(struct domain_device *device)
hisi_sas_dereg_device(hisi_hba, device);
- hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
device->lldd_dev = NULL;
}
if (hisi_hba->hw->free_device)
hisi_hba->hw->free_device(sas_dev);
- sas_dev->dev_type = SAS_PHY_UNUSED;
+
+ /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
+ if (!ret)
+ sas_dev->dev_type = SAS_PHY_UNUSED;
sas_dev->sas_device = NULL;
up(&hisi_hba->sem);
}
@@ -1402,7 +1413,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- bool do_port_check = !!(_sas_port != sas_port);
+ bool do_port_check = _sas_port != sas_port;
if (!sas_phy->phy->enabled)
continue;
@@ -1563,7 +1574,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
struct Scsi_Host *shost = hisi_hba->shost;
int rc;
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!hisi_hba->hw->soft_reset)
@@ -2055,7 +2066,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
/* Internal abort timed out */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -2676,6 +2687,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
return rc;
@@ -2687,10 +2699,11 @@ struct dentry *hisi_sas_debugfs_dir;
static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
{
int queue_entry_size = hisi_hba->hw->complete_hdr_size;
+ int dump_index = hisi_hba->debugfs_dump_index;
int i;
for (i = 0; i < hisi_hba->queue_count; i++)
- memcpy(hisi_hba->debugfs_complete_hdr[i],
+ memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
hisi_hba->complete_hdr[i],
HISI_SAS_QUEUE_SLOTS * queue_entry_size);
}
@@ -2698,13 +2711,14 @@ static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
{
int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
+ int dump_index = hisi_hba->debugfs_dump_index;
int i;
for (i = 0; i < hisi_hba->queue_count; i++) {
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
+ struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
int j;
- debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i];
+ debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
cmd_hdr = hisi_hba->cmd_hdr[i];
for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
@@ -2715,6 +2729,7 @@ static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
{
+ int dump_index = hisi_hba->debugfs_dump_index;
const struct hisi_sas_debugfs_reg *port =
hisi_hba->hw->debugfs_reg_port;
int i, phy_cnt;
@@ -2722,7 +2737,7 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
u32 *databuf;
for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
- databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt];
+ databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
for (i = 0; i < port->count; i++, databuf++) {
offset = port->base_off + 4 * i;
*databuf = port->read_port_reg(hisi_hba, phy_cnt,
@@ -2733,7 +2748,8 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_GLOBAL];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *global =
hw->debugfs_reg_array[DEBUGFS_GLOBAL];
@@ -2745,7 +2761,8 @@ static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_AXI];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *axi =
hw->debugfs_reg_array[DEBUGFS_AXI];
@@ -2758,7 +2775,8 @@ static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_RAS];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *ras =
hw->debugfs_reg_array[DEBUGFS_RAS];
@@ -2771,8 +2789,9 @@ static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
{
- void *cachebuf = hisi_hba->debugfs_itct_cache;
- void *databuf = hisi_hba->debugfs_itct;
+ int dump_index = hisi_hba->debugfs_dump_index;
+ void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
struct hisi_sas_itct *itct;
int i;
@@ -2789,9 +2808,10 @@ static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
{
+ int dump_index = hisi_hba->debugfs_dump_index;
int max_command_entries = HISI_SAS_MAX_COMMANDS;
- void *cachebuf = hisi_hba->debugfs_iost_cache;
- void *databuf = hisi_hba->debugfs_iost;
+ void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
struct hisi_sas_iost *iost;
int i;
@@ -2842,11 +2862,12 @@ static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *global = s->private;
+ struct hisi_hba *hisi_hba = global->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_GLOBAL],
+ hisi_sas_debugfs_print_reg(global->data,
reg_global, s);
return 0;
@@ -2868,11 +2889,12 @@ static const struct file_operations hisi_sas_debugfs_global_fops = {
static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *axi = s->private;
+ struct hisi_hba *hisi_hba = axi->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_AXI],
+ hisi_sas_debugfs_print_reg(axi->data,
reg_axi, s);
return 0;
@@ -2894,11 +2916,12 @@ static const struct file_operations hisi_sas_debugfs_axi_fops = {
static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *ras = s->private;
+ struct hisi_hba *hisi_hba = ras->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_RAS],
+ hisi_sas_debugfs_print_reg(ras->data,
reg_ras, s);
return 0;
@@ -2920,13 +2943,13 @@ static const struct file_operations hisi_sas_debugfs_ras_fops = {
static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
{
- struct hisi_sas_phy *phy = s->private;
+ struct hisi_sas_debugfs_port *port = s->private;
+ struct hisi_sas_phy *phy = port->phy;
struct hisi_hba *hisi_hba = phy->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
- u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
- hisi_sas_debugfs_print_reg(databuf, reg_port, s);
+ hisi_sas_debugfs_print_reg(port->data, reg_port, s);
return 0;
}
@@ -2975,13 +2998,13 @@ static void hisi_sas_show_row_32(struct seq_file *s, int index,
seq_puts(s, "\n");
}
-static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
+static void hisi_sas_cq_show_slot(struct seq_file *s, int slot,
+ struct hisi_sas_debugfs_cq *debugfs_cq)
{
- struct hisi_sas_cq *cq = cq_ptr;
+ struct hisi_sas_cq *cq = debugfs_cq->cq;
struct hisi_hba *hisi_hba = cq->hisi_hba;
- void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id];
- __le32 *complete_hdr = complete_queue +
- (hisi_hba->hw->complete_hdr_size * slot);
+ __le32 *complete_hdr = debugfs_cq->complete_hdr +
+ (hisi_hba->hw->complete_hdr_size * slot);
hisi_sas_show_row_32(s, slot,
hisi_hba->hw->complete_hdr_size,
@@ -2990,11 +3013,11 @@ static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
{
- struct hisi_sas_cq *cq = s->private;
+ struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
int slot;
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- hisi_sas_cq_show_slot(s, slot, cq);
+ hisi_sas_cq_show_slot(s, slot, debugfs_cq);
}
return 0;
}
@@ -3014,9 +3037,8 @@ static const struct file_operations hisi_sas_debugfs_cq_fops = {
static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
{
- struct hisi_sas_dq *dq = dq_ptr;
- struct hisi_hba *hisi_hba = dq->hisi_hba;
- void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
+ struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
+ void *cmd_queue = debugfs_dq->hdr;
__le32 *cmd_hdr = cmd_queue +
sizeof(struct hisi_sas_cmd_hdr) * slot;
@@ -3048,14 +3070,14 @@ static const struct file_operations hisi_sas_debugfs_dq_fops = {
static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
+ struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
+ struct hisi_sas_iost *iost = debugfs_iost->iost;
int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
- for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
- __le64 *iost = &debugfs_iost->qw0;
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ __le64 *data = &iost->qw0;
- hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), iost);
+ hisi_sas_show_row_64(s, i, sizeof(*iost), data);
}
return 0;
@@ -3076,9 +3098,8 @@ static const struct file_operations hisi_sas_debugfs_iost_fops = {
static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost_itct_cache *iost_cache =
- (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_iost_cache;
+ struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
+ struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx;
__le64 *iost;
@@ -3117,13 +3138,13 @@ static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
{
int i;
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
+ struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
+ struct hisi_sas_itct *itct = debugfs_itct->itct;
- for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
- __le64 *itct = &debugfs_itct->qw0;
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ __le64 *data = &itct->qw0;
- hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), itct);
+ hisi_sas_show_row_64(s, i, sizeof(*itct), data);
}
return 0;
@@ -3144,9 +3165,8 @@ static const struct file_operations hisi_sas_debugfs_itct_fops = {
static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost_itct_cache *itct_cache =
- (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_itct_cache;
+ struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
+ struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx;
__le64 *itct;
@@ -3184,6 +3204,8 @@ static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
{
+ u64 *debugfs_timestamp;
+ int dump_index = hisi_hba->debugfs_dump_index;
struct dentry *dump_dentry;
struct dentry *dentry;
char name[256];
@@ -3191,19 +3213,26 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
int c;
int d;
- /* Create dump dir inside device dir */
- dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
- hisi_hba->debugfs_dump_dentry = dump_dentry;
+ snprintf(name, 256, "%d", dump_index);
+
+ dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
- debugfs_create_file("global", 0400, dump_dentry, hisi_hba,
- &hisi_sas_debugfs_global_fops);
+ debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
+
+ debugfs_create_u64("timestamp", 0400, dump_dentry,
+ debugfs_timestamp);
+
+ debugfs_create_file("global", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
+ &hisi_sas_debugfs_global_fops);
/* Create port dir and files */
dentry = debugfs_create_dir("port", dump_dentry);
for (p = 0; p < hisi_hba->n_phy; p++) {
snprintf(name, 256, "%d", p);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_port_reg[dump_index][p],
&hisi_sas_debugfs_port_fops);
}
@@ -3212,7 +3241,8 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (c = 0; c < hisi_hba->queue_count; c++) {
snprintf(name, 256, "%d", c);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_cq[dump_index][c],
&hisi_sas_debugfs_cq_fops);
}
@@ -3221,26 +3251,33 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (d = 0; d < hisi_hba->queue_count; d++) {
snprintf(name, 256, "%d", d);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_dq[dump_index][d],
&hisi_sas_debugfs_dq_fops);
}
- debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("iost", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost[dump_index],
&hisi_sas_debugfs_iost_fops);
- debugfs_create_file("iost_cache", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("iost_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost_cache[dump_index],
&hisi_sas_debugfs_iost_cache_fops);
- debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("itct", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct[dump_index],
&hisi_sas_debugfs_itct_fops);
- debugfs_create_file("itct_cache", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("itct_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct_cache[dump_index],
&hisi_sas_debugfs_itct_cache_fops);
- debugfs_create_file("axi", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("axi", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
&hisi_sas_debugfs_axi_fops);
- debugfs_create_file("ras", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("ras", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
&hisi_sas_debugfs_ras_fops);
return;
@@ -3271,8 +3308,7 @@ static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
struct hisi_hba *hisi_hba = file->f_inode->i_private;
char buf[8];
- /* A bit racy, but don't care too much since it's only debugfs */
- if (hisi_hba->debugfs_snapshot)
+ if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
return -EFAULT;
if (count > 8)
@@ -3539,7 +3575,7 @@ static const struct {
int value;
char *name;
} hisi_sas_debugfs_loop_modes[] = {
- { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
{ HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
{ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
};
@@ -3670,132 +3706,201 @@ static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
.owner = THIS_MODULE,
};
+static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct hisi_sas_phy *phy = s->private;
+ unsigned int set_val;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, &set_val);
+ if (res)
+ return res;
+
+ if (set_val > 0)
+ return -EINVAL;
+
+ atomic_set(&phy->down_cnt, 0);
+
+ return count;
+}
+
+static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_phy *phy = s->private;
+
+ seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = {
+ .open = hisi_sas_debugfs_phy_down_cnt_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_phy_down_cnt_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
void hisi_sas_debugfs_work_handler(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, debugfs_work);
+ int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+ struct device *dev = hisi_hba->dev;
+ u64 timestamp = local_clock();
- if (hisi_hba->debugfs_snapshot)
+ if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+ dev_warn(dev, "dump count exceeded!\n");
return;
- hisi_hba->debugfs_snapshot = true;
+ }
+
+ do_div(timestamp, NSEC_PER_MSEC);
+ hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
hisi_sas_debugfs_snapshot_regs(hisi_hba);
+ hisi_hba->debugfs_dump_index++;
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
-static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index)
{
struct device *dev = hisi_hba->dev;
int i;
- devm_kfree(dev, hisi_hba->debugfs_iost_cache);
- devm_kfree(dev, hisi_hba->debugfs_itct_cache);
- devm_kfree(dev, hisi_hba->debugfs_iost);
+ devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
+ devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
+ devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
+ devm_kfree(dev,
+ hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
for (i = 0; i < DEBUGFS_REGS_NUM; i++)
- devm_kfree(dev, hisi_hba->debugfs_regs[i]);
+ devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
for (i = 0; i < hisi_hba->n_phy; i++)
- devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
+ devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
}
-static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
+static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index)
{
const struct hisi_sas_hw *hw = hisi_hba->hw;
struct device *dev = hisi_hba->dev;
- int p, c, d;
+ int p, c, d, r, i;
size_t sz;
- hisi_hba->debugfs_dump_dentry =
- debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+ for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
+ struct hisi_sas_debugfs_regs *regs =
+ &hisi_hba->debugfs_regs[dump_index][r];
- sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_GLOBAL])
- goto fail;
+ sz = hw->debugfs_reg_array[r]->count * 4;
+ regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!regs->data)
+ goto fail;
+ regs->hisi_hba = hisi_hba;
+ }
sz = hw->debugfs_reg_port->count * 4;
for (p = 0; p < hisi_hba->n_phy; p++) {
- hisi_hba->debugfs_port_reg[p] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_port *port =
+ &hisi_hba->debugfs_port_reg[dump_index][p];
- if (!hisi_hba->debugfs_port_reg[p])
+ port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!port->data)
goto fail;
+ port->phy = &hisi_hba->phy[p];
}
- sz = hw->debugfs_reg_array[DEBUGFS_AXI]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_AXI] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_AXI])
- goto fail;
-
- sz = hw->debugfs_reg_array[DEBUGFS_RAS]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_RAS] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_RAS])
- goto fail;
-
sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
for (c = 0; c < hisi_hba->queue_count; c++) {
- hisi_hba->debugfs_complete_hdr[c] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_cq *cq =
+ &hisi_hba->debugfs_cq[dump_index][c];
- if (!hisi_hba->debugfs_complete_hdr[c])
+ cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!cq->complete_hdr)
goto fail;
+ cq->cq = &hisi_hba->cq[c];
}
sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
for (d = 0; d < hisi_hba->queue_count; d++) {
- hisi_hba->debugfs_cmd_hdr[d] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_dq *dq =
+ &hisi_hba->debugfs_dq[dump_index][d];
- if (!hisi_hba->debugfs_cmd_hdr[d])
+ dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!dq->hdr)
goto fail;
+ dq->dq = &hisi_hba->dq[d];
}
sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
- hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost)
+ hisi_hba->debugfs_iost[dump_index].iost =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost[dump_index].iost)
goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache);
- hisi_hba->debugfs_iost_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost_cache)
+ hisi_hba->debugfs_iost_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache);
- hisi_hba->debugfs_itct_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct_cache)
+ hisi_hba->debugfs_itct_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
goto fail;
/* New memory allocation must be locate before itct */
sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct)
+ hisi_hba->debugfs_itct[dump_index].itct =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct[dump_index].itct)
goto fail;
return 0;
fail:
- hisi_sas_debugfs_release(hisi_hba);
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
+ hisi_sas_debugfs_release(hisi_hba, i);
return -ENOMEM;
}
+static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
+{
+ struct dentry *dir = debugfs_create_dir("phy_down_cnt",
+ hisi_hba->debugfs_dir);
+ char name[16];
+ int phy_no;
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ snprintf(name, 16, "%d", phy_no);
+ debugfs_create_file(name, 0600, dir,
+ &hisi_hba->phy[phy_no],
+ &hisi_sas_debugfs_phy_down_cnt_ops);
+ }
+}
+
static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
{
hisi_hba->debugfs_bist_dentry =
@@ -3827,6 +3932,7 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ int i;
hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
hisi_sas_debugfs_dir);
@@ -3838,9 +3944,17 @@ void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
/* create bist structures */
hisi_sas_debugfs_bist_init(hisi_hba);
- if (hisi_sas_debugfs_alloc(hisi_hba)) {
- debugfs_remove_recursive(hisi_hba->debugfs_dir);
- dev_dbg(dev, "failed to init debugfs!\n");
+ hisi_hba->debugfs_dump_dentry =
+ debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+
+ hisi_sas_debugfs_phy_down_cnt_init(hisi_hba);
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ if (hisi_sas_debugfs_alloc(hisi_hba, i)) {
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ break;
+ }
}
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
@@ -3874,14 +3988,24 @@ EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
+u32 hisi_sas_debugfs_dump_count = 1;
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
+module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
+MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
+
static __init int hisi_sas_init(void)
{
hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
if (!hisi_sas_stt)
return -ENOMEM;
- if (hisi_sas_debugfs_enable)
+ if (hisi_sas_debugfs_enable) {
hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
+ if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
+ pr_info("hisi_sas: Limiting debugfs dump count\n");
+ hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
+ }
+ }
return 0;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index b861a0f14c9d..3af53cc42bd6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -531,8 +531,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
}
-static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v1_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
@@ -551,6 +551,8 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
qw0 = le64_to_cpu(itct->qw0);
qw0 &= ~ITCT_HDR_VALID_MSK;
itct->qw0 = cpu_to_le64(qw0);
+
+ return 0;
}
static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 8e96a257e439..61b1e2693b08 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -974,13 +974,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ struct device *dev = hisi_hba->dev;
int i;
sas_dev->completion = &completion;
@@ -990,13 +991,19 @@ static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
+ /* need to set register twice to clear ITCT for v2 hw */
for (i = 0; i < 2; i++) {
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- wait_for_completion(sas_dev->completion);
+ if (!wait_for_completion_timeout(sas_dev->completion,
+ CLEAR_ITCT_TIMEOUT * HZ)) {
+ dev_warn(dev, "failed to clear ITCT\n");
+ return -ETIMEDOUT;
+ }
memset(itct, 0, sizeof(struct hisi_sas_itct));
}
+ return 0;
}
static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index cb8d087762db..bf5d5f138437 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -795,13 +795,14 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ struct device *dev = hisi_hba->dev;
sas_dev->completion = &completion;
@@ -814,8 +815,14 @@ static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- wait_for_completion(sas_dev->completion);
+ if (!wait_for_completion_timeout(sas_dev->completion,
+ CLEAR_ITCT_TIMEOUT * HZ)) {
+ dev_warn(dev, "failed to clear ITCT\n");
+ return -ETIMEDOUT;
+ }
+
memset(itct, 0, sizeof(struct hisi_sas_itct));
+ return 0;
}
static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
@@ -1542,6 +1549,8 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto;
struct device *dev = hisi_hba->dev;
+ atomic_inc(&phy->down_cnt);
+
del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
@@ -3022,11 +3031,6 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CTRL, reg_val);
- mdelay(100);
- reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
-
/* set the bist init value */
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CODE,
@@ -3035,6 +3039,11 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
SAS_PHY_BIST_CODE1,
SAS_PHY_BIST_CODE1_INIT);
+ mdelay(100);
+ reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_id,
+ SAS_PHY_BIST_CTRL, reg_val);
+
/* clear error bit */
mdelay(100);
hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
@@ -3259,6 +3268,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost);
err_out_regions:
pci_release_regions(pdev);
@@ -3292,8 +3302,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
- hisi_sas_debugfs_exit(hisi_hba);
-
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
@@ -3305,6 +3313,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
+ hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost);
}
@@ -3422,6 +3431,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
if (rc) {
scsi_remove_host(shost);
pci_disable_device(pdev);
+ return rc;
}
hisi_hba->hw->phys_init(hisi_hba);
sas_resume_ha(sha);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 55522b7162d3..1d669e47b692 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -38,6 +38,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
+#include <scsi/scsi_cmnd.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
@@ -554,13 +555,29 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
+static bool scsi_host_check_in_flight(struct request *rq, void *data,
+ bool reserved)
+{
+ int *count = data;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
+ (*count)++;
+
+ return true;
+}
+
/**
* scsi_host_busy - Return the host busy counter
* @shost: Pointer to Scsi_Host to inc.
**/
int scsi_host_busy(struct Scsi_Host *shost)
{
- return atomic_read(&shost->host_busy);
+ int cnt = 0;
+
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ scsi_host_check_in_flight, &cnt);
+ return cnt;
}
EXPORT_SYMBOL(scsi_host_busy);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index e8bc8d328bab..f25672982c5f 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -498,7 +498,7 @@ ips_setup(char *ips_str)
int i;
char *key;
char *value;
- IPS_OPTION options[] = {
+ static const IPS_OPTION options[] = {
{"noi2o", &ips_force_i2o, 0},
{"nommap", &ips_force_memio, 0},
{"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 9e8de1462593..b1c197505579 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -147,7 +147,7 @@ static struct isci_port *sci_port_configuration_agent_find_port(
/**
*
* @controller: This is the controller object that contains the port agent
- * @port_agent: This is the port configruation agent for the controller.
+ * @port_agent: This is the port configuration agent for the controller.
*
* This routine will validate the port configuration is correct for the SCU
* hardware. The SCU hardware allows for port configurations as follows. LP0
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 49aa4e657c44..cd1e4b4d95bb 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1504,7 +1504,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
* This function builds the isci_remote_device when a libsas dev_found message
* is received.
* @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the isci_port conected to this device.
+ * @port: This parameter specifies the isci_port connected to this device.
*
* pointer to new isci_remote_device.
*/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7bedbe877704..0bc63a7ab41c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
unsigned int noreclaim_flag;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int rc = 0;
+ if (!tcp_sw_conn->sock) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Transport not bound to socket!\n");
+ return -EINVAL;
+ }
+
noreclaim_flag = memalloc_noreclaim_save();
while (iscsi_sw_tcp_xmit_qlen(conn)) {
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 691acbdcc46d..935f98804198 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -605,6 +605,12 @@ struct lpfc_epd_pool {
spinlock_t lock; /* lock for expedite pool */
};
+enum ras_state {
+ INACTIVE,
+ REG_INPROGRESS,
+ ACTIVE
+};
+
struct lpfc_ras_fwlog {
uint8_t *fwlog_buff;
uint32_t fw_buffcount; /* Buffer size posted to FW */
@@ -621,7 +627,7 @@ struct lpfc_ras_fwlog {
bool ras_enabled; /* Ras Enabled for the function */
#define LPFC_RAS_DISABLE_LOGGING 0x00
#define LPFC_RAS_ENABLE_LOGGING 0x01
- bool ras_active; /* RAS logging running state */
+ enum ras_state state; /* RAS logging running state */
};
struct lpfc_hba {
@@ -725,6 +731,7 @@ struct lpfc_hba {
#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
@@ -830,6 +837,7 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
+ uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
@@ -872,7 +880,6 @@ struct lpfc_hba {
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
- uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz;
uint32_t cfg_delay_discovery;
@@ -990,7 +997,6 @@ struct lpfc_hba {
struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
- struct dma_pool *txrdy_payload_pool;
struct dma_pool *lpfc_cmd_rsp_buf_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@@ -1055,6 +1061,7 @@ struct lpfc_hba {
#ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat;
#endif
+ struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
@@ -1209,6 +1216,13 @@ struct lpfc_hba {
uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max;
#endif
+
+ struct hlist_node cpuhp; /* used for cpuhp per hba callback */
+ struct timer_list cpuhp_poll_timer;
+ struct list_head poll_list; /* slowpath eq polling list */
+#define LPFC_POLL_HB 1 /* slowpath heartbeat */
+#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
+#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
};
static inline struct Scsi_Host *
@@ -1299,6 +1313,26 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
/**
+ * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
+ * @numa_mask: Pointer to phba's numa_mask member.
+ * @start: starting cpu index
+ *
+ * Note: If no valid cpu found, then nr_cpu_ids is returned.
+ *
+ **/
+static inline unsigned int
+lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
+{
+ unsigned int cpu_it;
+
+ for_each_cpu_wrap(cpu_it, numa_mask, start) {
+ if (cpu_online(cpu_it))
+ break;
+ }
+
+ return cpu_it;
+}
+/**
* lpfc_sli4_mod_hba_eq_delay - update EQ delay
* @phba: Pointer to HBA context object.
* @q: The Event Queue to update.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 25aa7a53d255..4ff82b36a37a 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -176,7 +176,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
int i;
int len = 0;
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
- unsigned long iflags = 0;
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@@ -347,7 +346,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
- rcu_read_lock();
scnprintf(tmp, sizeof(tmp),
"XRI Dist lpfc%d Total %d IO %d ELS %d\n",
phba->brd_no,
@@ -355,7 +353,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
phba->sli4_hba.io_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -371,15 +369,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
+
+ spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
- spin_lock_irqsave(&vport->phba->hbalock, iflags);
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
- spin_unlock_irqrestore(&vport->phba->hbalock, iflags);
+ spin_unlock(&vport->phba->hbalock);
if (!nrport)
continue;
@@ -398,39 +398,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
/* Tab in to show lport ownership. */
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
if (phba->brd_no >= 10) {
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
nrport->port_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
nrport->node_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "DID x%06x ",
nrport->port_id);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
/* An NVME rport can have multiple roles. */
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
@@ -438,14 +438,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
nrport->port_role);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "%s\n", statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
- rcu_read_unlock();
+ spin_unlock_irq(shost->host_lock);
if (!lport)
goto buffer_done;
@@ -505,11 +505,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_fcp_err));
strlcat(buf, tmp, PAGE_SIZE);
- /* RCU is already unlocked. */
+ /* host_lock is already unlocked. */
goto buffer_done;
- rcu_unlock_buf_done:
- rcu_read_unlock();
+ unlock_buf_done:
+ spin_unlock_irq(shost->host_lock);
buffer_done:
len = strnlen(buf, PAGE_SIZE);
@@ -1475,8 +1475,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
int i;
msleep(100);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ return -EIO;
/* verify if privileged for the request operation */
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
@@ -1486,8 +1487,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
/* wait for the SLI port firmware ready after firmware reset */
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
msleep(10);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ continue;
if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
continue;
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
@@ -1642,7 +1644,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
{
LPFC_MBOXQ_t *mbox = NULL;
unsigned long val = 0;
- char *pval = 0;
+ char *pval = NULL;
int rc = 0;
if (!strncmp("enable", buff_out,
@@ -3533,6 +3535,31 @@ LPFC_ATTR_R(enable_rrq, 2, 0, 2,
LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
LPFC_DELAY_INIT_LINK_INDEFINITELY,
"Suppress Link Up at initialization");
+
+static ssize_t
+lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli4_hba.pc_sli4_params.pls);
+}
+static DEVICE_ATTR(pls, 0444,
+ lpfc_pls_show, NULL);
+
+static ssize_t
+lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
+}
+static DEVICE_ATTR(pt, 0444,
+ lpfc_pt_show, NULL);
+
/*
# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
# 1 - (1024)
@@ -3580,9 +3607,6 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
-LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
- "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -4096,7 +4120,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
val);
return -EINVAL;
}
- if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+ /*
+ * The 'topology' is not a configurable parameter if :
+ * - persistent topology enabled
+ * - G7 adapters
+ * - G6 with no private loop support
+ */
+
+ if (((phba->hba_flag & HBA_PERSISTENT_TOPO) ||
+ (!phba->sli4_hba.pc_sli4_params.pls &&
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
@@ -5298,7 +5331,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %02d not present\n",
phba->sli4_hba.curr_disp_cpu);
- else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
+ else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5311,10 +5344,10 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "CPU %02d EQ %04d hdwq %04d "
+ "CPU %02d EQ None hdwq %04d "
"physid %d coreid %d ht %d ua %d\n",
phba->sli4_hba.curr_disp_cpu,
- cpup->eq, cpup->hdwq, cpup->phys_id,
+ cpup->hdwq, cpup->phys_id,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
@@ -5329,7 +5362,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5340,7 +5373,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
}
phba->sli4_hba.curr_disp_cpu++;
@@ -5711,7 +5744,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
* the driver will advertise it supports to the SCSI layer.
*
* 0 = Set nr_hw_queues by the number of CPUs or HW queues.
- * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
+ * 1,256 = Manually specify nr_hw_queue value to be advertised,
*
* Value range is [0,256]. Default value is 8.
*/
@@ -5729,30 +5762,130 @@ LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
* A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
*
* 0 = Configure the number of hdw queues to the number of active CPUs.
- * 1,128 = Manually specify how many hdw queues to use.
+ * 1,256 = Manually specify how many hdw queues to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is 0.
*/
LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_DEF,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
+static inline void
+lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
+{
+#if IS_ENABLED(CONFIG_X86)
+ /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ phba->cfg_irq_numa = 1;
+ else
+ phba->cfg_irq_numa = 0;
+#else
+ phba->cfg_irq_numa = 0;
+#endif
+}
+
/*
* lpfc_irq_chann: Set the number of IRQ vectors that are available
* for Hardware Queues to utilize. This also will map to the number
* of EQ / MSI-X vectors the driver will create. This should never be
* more than the number of Hardware Queues
*
- * 0 = Configure number of IRQ Channels to the number of active CPUs.
- * 1,128 = Manually specify how many IRQ Channels to use.
+ * 0 = Configure number of IRQ Channels to:
+ * if AMD architecture, number of CPUs on HBA's NUMA node
+ * otherwise, number of active CPUs.
+ * [1,256] = Manually specify how many IRQ Channels to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is [0].
*/
-LPFC_ATTR_R(irq_chann,
- LPFC_HBA_HDWQ_DEF,
- LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
- "Set the number of I/O IRQ Channels");
+static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
+module_param(lpfc_irq_chann, uint, 0444);
+MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
+
+/* lpfc_irq_chann_init - Set the hba irq_chann initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
+{
+ const struct cpumask *numa_mask;
+
+ if (phba->cfg_use_msi != 2) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8532 use_msi = %u ignoring cfg_irq_numa\n",
+ phba->cfg_use_msi);
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return 0;
+ }
+
+ /* Check if default setting was passed */
+ if (val == LPFC_IRQ_CHANN_DEF)
+ lpfc_assign_default_irq_numa(phba);
+
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (cpumask_empty(numa_mask)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8533 Could not identify NUMA node, "
+ "ignoring cfg_irq_numa\n");
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ } else {
+ phba->cfg_irq_chann = cpumask_weight(numa_mask);
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8543 lpfc_irq_chann set to %u "
+ "(numa)\n", phba->cfg_irq_chann);
+ }
+ } else {
+ if (val > LPFC_IRQ_CHANN_MAX) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8545 lpfc_irq_chann attribute cannot "
+ "be set to %u, allowed range is "
+ "[%u,%u]\n",
+ val,
+ LPFC_IRQ_CHANN_MIN,
+ LPFC_IRQ_CHANN_MAX);
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return -EINVAL;
+ }
+ phba->cfg_irq_chann = val;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_irq_chann_show - Display value of irq_chann
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
+}
+
+static DEVICE_ATTR_RO(lpfc_irq_chann);
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
@@ -5933,7 +6066,53 @@ LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
* [1-4] = Multiple of 1/4th Mb of host memory for FW logging
* Value range [0..4]. Default value is 0
*/
-LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+lpfc_param_show(ras_fwlog_buffsize);
+
+static ssize_t
+lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
+{
+ int ret = 0;
+ enum ras_state state;
+
+ if (!lpfc_rangecheck(val, 0, 4))
+ return -EINVAL;
+
+ if (phba->cfg_ras_fwlog_buffsize == val)
+ return 0;
+
+ if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
+ return -EINVAL;
+
+ spin_lock_irq(&phba->hbalock);
+ state = phba->ras_fwlog.state;
+ spin_unlock_irq(&phba->hbalock);
+
+ if (state == REG_INPROGRESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
+ "registration is in progress\n");
+ return -EBUSY;
+ }
+
+ /* For disable logging: stop the logs and free the DMA.
+ * For ras_fwlog_buffsize size change we still need to free and
+ * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
+ */
+ phba->cfg_ras_fwlog_buffsize = val;
+ if (state == ACTIVE) {
+ lpfc_ras_stop_fwlog(phba);
+ lpfc_sli4_ras_dma_free(phba);
+ }
+
+ lpfc_sli4_ras_init(phba);
+ if (phba->ras_fwlog.ras_enabled)
+ ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+ LPFC_RAS_ENABLE_LOGGING);
+ return ret;
+}
+
+lpfc_param_store(ras_fwlog_buffsize);
+static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
/*
* lpfc_ras_fwlog_level: Firmware logging verbosity level
@@ -6071,8 +6250,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up,
- &dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
+ &dev_attr_pls,
+ &dev_attr_pt,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
@@ -7085,11 +7265,22 @@ struct fc_function_template lpfc_vport_transport_functions = {
static void
lpfc_get_hba_function_mode(struct lpfc_hba *phba)
{
- /* If it's a SkyHawk FCoE adapter */
- if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK)
+ /* If the adapter supports FCoE mode */
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_SKYHAWK:
+ case PCI_DEVICE_ID_SKYHAWK_VF:
+ case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
+ case PCI_DEVICE_ID_ZEPHYR_DCSP:
+ case PCI_DEVICE_ID_HORNET:
+ case PCI_DEVICE_ID_TIGERSHARK:
+ case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
- else
+ break;
+ default:
+ /* for others, clear the flag */
phba->hba_flag &= ~HBA_FCOE_MODE;
+ }
}
/**
@@ -7099,6 +7290,7 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
+ lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
lpfc_ns_query_init(phba, lpfc_ns_query);
lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
@@ -7205,12 +7397,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
- lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
- lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
@@ -7256,11 +7446,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
}
if (!phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
- if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) {
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 39a736b887b1..d4e1b120cc9e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -5435,10 +5435,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
/* Current logging state */
- if (ras_fwlog->ras_active == true)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE)
ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
else
ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+ spin_unlock_irq(&phba->hbalock);
ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
@@ -5495,10 +5497,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
if (action == LPFC_RASACTION_STOP_LOGGING) {
/* Check if already disabled */
- if (ras_fwlog->ras_active == false) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -ESRCH;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
/* Disable logging */
lpfc_ras_stop_fwlog(phba);
@@ -5509,8 +5514,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
* FW-logging with new log-level. Return status
* "Logging already Running" to caller.
**/
- if (ras_fwlog->ras_active)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != INACTIVE)
action_status = -EINPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
/* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
@@ -5626,10 +5633,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
goto ras_job_error;
/* Logging to be stopped before reading */
- if (ras_fwlog->ras_active == true) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -EINPROGRESS;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
if (job->request_len <
sizeof(struct fc_bsg_request) +
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b2ad8c750486..ee353c84a097 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -215,6 +215,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
+int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
+void lpfc_sli4_poll_hbtimer(struct timer_list *t);
+void lpfc_sli4_start_polling(struct lpfc_queue *q);
+void lpfc_sli4_stop_polling(struct lpfc_queue *q);
+
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -586,6 +592,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
+void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 25e86706e207..99c9bb249758 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -763,9 +763,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0208 NameServer Rsp Data: x%x x%x "
- "sz x%x\n",
+ "x%x x%x sz x%x\n",
vport->fc_flag,
CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
@@ -961,9 +963,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4105 NameServer Rsp Data: x%x x%x\n",
+ "4105 NameServer Rsp Data: x%x x%x "
+ "x%x x%x sz x%x\n",
vport->fc_flag,
- CTreq->un.gid.Fc4Type);
+ CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
+ irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
outp,
@@ -1025,6 +1031,11 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
vport->gidft_inp--;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6450 GID_PT cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
/* Link up / RSCN discovery */
if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) {
@@ -1159,6 +1170,11 @@ out:
/* Link up / RSCN discovery */
if (vport->num_disc_nodes)
vport->num_disc_nodes--;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6451 GFF_ID cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
if (vport->num_disc_nodes == 0) {
/*
* The driver has cycled through all Nports in the RSCN payload.
@@ -1868,6 +1884,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED:
+ case IOERR_SLI_DOWN:
+ /* Driver aborted this IO. No retry as error
+ * is likely Offline->Online or some adapter
+ * error. Recovery will try again.
+ */
+ break;
case IOERR_ABORT_IN_PROGRESS:
case IOERR_SEQUENCE_TIMEOUT:
case IOERR_ILLEGAL_FRAME:
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 8d34be60d379..2e6a68d9ea4f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -2078,6 +2079,96 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
}
#endif
+static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
+ char *buffer, int size)
+{
+ int copied = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry_safe(dmabuf, next,
+ &phba->ras_fwlog.fwlog_buff_list, list) {
+ memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
+ copied += LPFC_RAS_MAX_ENTRY_SIZE;
+ if (size > copied)
+ break;
+ }
+ return copied;
+}
+
+static int
+lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ vfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int size;
+ int rc = -ENOMEM;
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ rc = -EINVAL;
+ goto out;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize;
+ debug->buffer = vmalloc(size);
+ if (!debug->buffer)
+ goto free_debug;
+
+ debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size);
+ if (debug->len < 0) {
+ rc = -EINVAL;
+ goto free_buffer;
+ }
+ file->private_data = debug;
+
+ return 0;
+
+free_buffer:
+ vfree(debug->buffer);
+free_debug:
+ kfree(debug);
+out:
+ return rc;
+}
+
/**
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
@@ -5286,6 +5377,16 @@ static const struct file_operations lpfc_debugfs_op_lockstat = {
};
#endif
+#undef lpfc_debugfs_ras_log
+static const struct file_operations lpfc_debugfs_ras_log = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_ras_log_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_ras_log_release,
+};
+#endif
+
#undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
.owner = THIS_MODULE,
@@ -5457,7 +5558,6 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.release = lpfc_idiag_cmd_release,
};
-#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
* @phba: Pointer to HBA context object.
@@ -5707,6 +5807,19 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ /* RAS log */
+ snprintf(name, sizeof(name), "ras_log");
+ phba->debug_ras_log =
+ debugfs_create_file(name, 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_ras_log);
+ if (!phba->debug_ras_log) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "6148 Cannot create debugfs"
+ " ras_log\n");
+ goto debug_failed;
+ }
+
/* Setup hbqinfo */
snprintf(name, sizeof(name), "hbqinfo");
phba->debug_hbqinfo =
@@ -6117,6 +6230,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
+ debugfs_remove(phba->debug_ras_log);
+ phba->debug_ras_log = NULL;
+
#ifdef LPFC_HDWQ_LOCK_STAT
debugfs_remove(phba->debug_lockstat); /* lockstat */
phba->debug_lockstat = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d5303994bfd6..42a2bf38eaea 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2236,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
+ char *mode;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -2273,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ /* If we don't send GFT_ID to Fabric, a PRLI error
+ * could be expected.
+ */
+ if ((vport->fc_flag & FC_FABRIC) ||
+ (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
+ mode = KERN_ERR;
+ else
+ mode = KERN_INFO;
+
/* PRLI failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, mode, LOG_ELS,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
@@ -4291,6 +4301,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "3177 ELS response failed\n");
+ goto out;
+ }
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -4430,7 +4445,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
@@ -5260,6 +5275,11 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
}
}
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6452 Discover PLOGI %d flag x%x\n",
+ sentplogi, vport->fc_flag);
+
if (sentplogi) {
lpfc_set_disctmo(vport);
}
@@ -6455,7 +6475,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t payload_len, length, nportid, *cmd;
int rscn_cnt;
int rscn_id = 0, hba_id = 0;
- int i;
+ int i, tmo;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -6561,6 +6581,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DEFERRED;
+
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies + msecs_to_jiffies(1000 * tmo));
+ }
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE;
@@ -6663,9 +6690,10 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* RSCN processed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
vport->fc_flag, 0, vport->fc_rscn_id_cnt,
- vport->port_state);
+ vport->port_state, vport->num_disc_nodes,
+ vport->gidft_inp);
/* To process RSCN, first compare RSCN data with NameServer */
vport->fc_ns_retry = 0;
@@ -7986,20 +8014,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
+ unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
+
/*
* For SLI3, only the hbalock is required. But SLI4 needs to coordinate
* with the ring insert operation. Because lpfc_sli_issue_abort_iotag
* ultimately grabs the ring_lock, the driver must splice the list into
* a working list and release the locks before calling the abort.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring = lpfc_phba_elsring(phba);
/* Bail out if we've no ELS wq, like in PCI error recovery case. */
if (unlikely(!pring)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -8014,6 +8044,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->vport != vport)
continue;
+ if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
/* On the ELS ring we can have ELS_REQUESTs or
* GEN_REQUESTs waiting for a response.
*/
@@ -8037,21 +8070,21 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Abort each txcmpl iocb on aborted list and remove the dlist links. */
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"3387 abort list for txq not empty\n");
INIT_LIST_HEAD(&abort_list);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
@@ -8091,7 +8124,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 749286acdc17..85ada3deb47d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -700,7 +700,10 @@ lpfc_work_done(struct lpfc_hba *phba)
if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
- if (phba->link_state >= LPFC_LINK_UP ||
+ /* Driver could have abort request completed in queue
+ * when link goes down. Allow for this transition.
+ */
+ if (phba->link_state >= LPFC_LINK_DOWN ||
phba->link_flag & LS_MDS_LOOPBACK) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -1135,7 +1138,6 @@ void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- uint8_t bbscn = 0;
if (pmb->u.mb.mbxStatus)
goto out;
@@ -1162,17 +1164,11 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI) {
- if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
- bbscn = bf_get(lpfc_bbscn_def,
- &phba->sli4_hba.bbscn_params);
- vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
- vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
- }
+ if (vport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(vport);
- } else if (vport->fc_flag & FC_PT2PT) {
+ else if (vport->fc_flag & FC_PT2PT)
lpfc_disc_start(vport);
- }
+
return;
out:
@@ -3456,8 +3452,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pport->port_state, vport->fc_flag);
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1313 Link Down UNEXP WWPN Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "1313 Link Down Unexpected FA WWPN Event x%x "
+ "received Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_mm, la),
@@ -4046,7 +4042,7 @@ out:
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
@@ -4575,8 +4571,10 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp;
free_rpi:
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_free_rpi(vport->phba, rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
return NULL;
}
@@ -4835,12 +4833,51 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
}
/*
+ * Sets the mailbox completion handler to be used for the
+ * unreg_rpi command. The handler varies based on the state of
+ * the port and what will be happening to the rpi next.
+ */
+static void
+lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
+{
+ unsigned long iflags;
+
+ if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+
+ } else if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (!(vport->load_flag & FC_UNLOADING)) &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+ LPFC_SLI_INTF_IF_TYPE_2) &&
+ (kref_read(&ndlp->kref) > 0)) {
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
+ } else {
+ if (vport->load_flag & FC_UNLOADING) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ lpfc_nlp_get(ndlp);
+ }
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+}
+
+/*
* Free rpi associated with LPFC_NODELIST entry.
* This routine is called from lpfc_freenode(), when we are removing
* a LPFC_NODELIST entry. It is also called if the driver initiates a
@@ -4860,7 +4897,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"3366 RPI x%x needs to be "
"unregistered nlp_flag x%x "
"did x%x\n",
@@ -4871,7 +4909,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* no need to queue up another one.
*/
if (ndlp->nlp_flag & NLP_UNREG_INP) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1436 unreg_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
"Data: x%px\n",
@@ -4890,39 +4929,19 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
- if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
- } else {
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- (!(vport->load_flag & FC_UNLOADING)) &&
- (bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) >=
- LPFC_SLI_INTF_IF_TYPE_2) &&
- (kref_read(&ndlp->kref) > 0)) {
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- mbox->mbox_cmpl =
- lpfc_sli4_unreg_rpi_cmpl_clr;
- /*
- * accept PLOGIs after unreg_rpi_cmpl
- */
- acc_plogi = 0;
- } else if (vport->load_flag & FC_UNLOADING) {
- mbox->ctx_ndlp = NULL;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- } else {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- }
- }
+ lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
+ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
+ /*
+ * accept PLOGIs after unreg_rpi_cmpl
+ */
+ acc_plogi = 0;
if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
Fabric_DID_MASK) &&
(!(vport->fc_flag & FC_OFFLINE_MODE)))
ndlp->nlp_flag |= NLP_UNREG_INP;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1433 unreg_rpi UNREG x%x on "
"NPort x%x deferred flg x%x "
"Data:x%px\n",
@@ -5057,6 +5076,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
+ unsigned long iflags;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5138,8 +5158,20 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cleanup_vports_rrqs(vport, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- lpfc_unreg_rpi(vport, ndlp);
-
+ if (!lpfc_unreg_rpi(vport, ndlp)) {
+ /* Clean up unregistered and non freed rpis */
+ if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
+ !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
+ lpfc_sli4_free_rpi(vport->phba,
+ ndlp->nlp_rpi);
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ }
return 0;
}
@@ -5165,8 +5197,10 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
+ "ref %d map:x%x ndlp x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -5203,8 +5237,9 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
"0940 removed node x%px DID x%x "
- " rport not null x%px\n",
- ndlp, ndlp->nlp_DID, ndlp->rport);
+ "rpi %d rport not null x%px\n",
+ ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->rport);
rport = ndlp->rport;
rdata = rport->dd_data;
rdata->pnode = NULL;
@@ -5362,6 +5397,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (!ndlp)
return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6453 Setup New Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5375,6 +5417,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
"0014 Could not enable ndlp\n");
return NULL;
}
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6454 Setup Enabled Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5394,6 +5442,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6455 Setup RSCN Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* NVME Target mode waits until rport is known to be
* impacted by the RSCN before it transitions. No
* active management - just go to NPR provided the
@@ -5405,15 +5459,32 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
+ !(ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6456 Skip Setup RSCN Node x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
ndlp = NULL;
+ }
} else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6457 Setup Active Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* If the initiator received a PLOGI from this NPort or if the
* initiator is already in the process of discovery on it,
* there's no need to try to discover it again.
@@ -5565,10 +5636,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Start Discovery state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0202 Start Discovery hba state x%x "
- "Data: x%x x%x x%x\n",
+ "0202 Start Discovery port state x%x "
+ "flg x%x Data: x%x x%x x%x\n",
vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt);
+ vport->fc_adisc_cnt, vport->fc_npr_cnt);
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
@@ -5996,7 +6067,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
@@ -6185,12 +6256,12 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0007 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+ "0007 Init New ndlp x%px, rpi:x%x DID:%x "
+ "flg:x%x refcnt:%d map:x%x\n",
+ ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_usg_map);
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
@@ -6419,7 +6490,8 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
goto out;
} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
ret = 1;
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"2624 RPI %x DID %x flag %x "
"still logged in\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bd533475c86a..25cdcbc2b02f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -210,7 +210,6 @@ struct lpfc_sli_intf {
#define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 0
-#define LPFC_IMAX_THRESHOLD 1000
#define LPFC_MAX_AUTO_EQ_DELAY 120
#define LPFC_EQ_DELAY_STEP 15
#define LPFC_EQD_ISR_TRIGGER 20000
@@ -2320,6 +2319,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
#define ADD_STATUS_INVALID_REQUEST 0x4B
+#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58
struct lpfc_mbx_sli4_config {
struct mbox_header header;
@@ -2809,6 +2809,15 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_trunk_SHIFT 12
#define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F
#define lpfc_mbx_rd_conf_trunk_WORD word2
+#define lpfc_mbx_rd_conf_pt_SHIFT 20
+#define lpfc_mbx_rd_conf_pt_MASK 0x00000003
+#define lpfc_mbx_rd_conf_pt_WORD word2
+#define lpfc_mbx_rd_conf_tf_SHIFT 22
+#define lpfc_mbx_rd_conf_tf_MASK 0x00000001
+#define lpfc_mbx_rd_conf_tf_WORD word2
+#define lpfc_mbx_rd_conf_ptv_SHIFT 23
+#define lpfc_mbx_rd_conf_ptv_MASK 0x00000001
+#define lpfc_mbx_rd_conf_ptv_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
@@ -3479,6 +3488,9 @@ struct lpfc_sli4_parameters {
#define cfg_bv1s_SHIFT 10
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
+#define cfg_pvl_SHIFT 13
+#define cfg_pvl_MASK 0x00000001
+#define cfg_pvl_WORD word19
#define cfg_nsler_SHIFT 12
#define cfg_nsler_MASK 0x00000001
@@ -3518,6 +3530,7 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x11
+#define LPFC_SET_DUAL_DUMP 0x1e
struct lpfc_mbx_set_feature {
struct mbox_header header;
uint32_t feature;
@@ -3532,6 +3545,15 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
+#define lpfc_mbx_set_feature_dd_SHIFT 0
+#define lpfc_mbx_set_feature_dd_MASK 0x00000001
+#define lpfc_mbx_set_feature_dd_WORD word6
+#define lpfc_mbx_set_feature_ddquery_SHIFT 1
+#define lpfc_mbx_set_feature_ddquery_MASK 0x00000001
+#define lpfc_mbx_set_feature_ddquery_WORD word6
+#define LPFC_DISABLE_DUAL_DUMP 0
+#define LPFC_ENABLE_DUAL_DUMP 1
+#define LPFC_QUERY_OP_DUAL_DUMP 2
uint32_t word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
@@ -4261,6 +4283,8 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
+#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
+#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
};
/*
@@ -4659,6 +4683,7 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
+#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
@@ -4807,8 +4832,8 @@ union lpfc_wqe128 {
struct send_frame_wqe send_frame;
};
-#define MAGIC_NUMER_G6 0xFEAA0003
-#define MAGIC_NUMER_G7 0xFEAA0005
+#define MAGIC_NUMBER_G6 0xFEAA0003
+#define MAGIC_NUMBER_G7 0xFEAA0005
struct lpfc_grp_hdr {
uint32_t size;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e8813d26e594..dc6f7c4b54c6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -40,6 +40,8 @@
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/crash_dump.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -66,9 +68,13 @@
#include "lpfc_version.h"
#include "lpfc_ids.h"
+static enum cpuhp_state lpfc_cpuhp_state;
/* Used when mapping IRQ vectors in a driver centric manner */
static uint32_t lpfc_present_cpu;
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_add(struct lpfc_hba *phba);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -1235,10 +1241,9 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
struct lpfc_hba, eq_delay_work);
struct lpfc_eq_intr_info *eqi, *eqi_new;
struct lpfc_queue *eq, *eq_next;
- unsigned char *eqcnt = NULL;
+ unsigned char *ena_delay = NULL;
uint32_t usdelay;
int i;
- bool update = false;
if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
return;
@@ -1247,44 +1252,36 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
phba->pport->fc_flag & FC_OFFLINE_MODE)
goto requeue;
- eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
- GFP_KERNEL);
- if (!eqcnt)
+ ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
+ GFP_KERNEL);
+ if (!ena_delay)
goto requeue;
- if (phba->cfg_irq_chann > 1) {
- /* Loop thru all IRQ vectors */
- for (i = 0; i < phba->cfg_irq_chann; i++) {
- /* Get the EQ corresponding to the IRQ vector */
- eq = phba->sli4_hba.hba_eq_hdl[i].eq;
- if (!eq)
- continue;
- if (eq->q_mode) {
- update = true;
- break;
- }
- if (eqcnt[eq->last_cpu] < 2)
- eqcnt[eq->last_cpu]++;
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+ if (!eq)
+ continue;
+ if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
+ eq->q_flag &= ~HBA_EQ_DELAY_CHK;
+ ena_delay[eq->last_cpu] = 1;
}
- } else
- update = true;
+ }
for_each_present_cpu(i) {
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
- if (!update && eqcnt[i] < 2) {
- eqi->icnt = 0;
- continue;
+ if (ena_delay[i]) {
+ usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
+ if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+ usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+ } else {
+ usdelay = 0;
}
- usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
- LPFC_EQ_DELAY_STEP;
- if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
- usdelay = LPFC_MAX_AUTO_EQ_DELAY;
-
eqi->icnt = 0;
list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
- if (eq->last_cpu != i) {
+ if (unlikely(eq->last_cpu != i)) {
eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
eq->last_cpu);
list_move_tail(&eq->cpu_list, &eqi_new->list);
@@ -1296,7 +1293,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
}
}
- kfree(eqcnt);
+ kfree(ena_delay);
requeue:
queue_delayed_work(phba->wq, &phba->eq_delay_work,
@@ -3053,11 +3050,12 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
continue;
}
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0009 rpi:%x DID:%x "
- "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
- ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0009 Assign RPI x%x to ndlp x%px "
+ "DID:x%06x flg:x%x map:x%x\n",
+ ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_usg_map);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3387,6 +3385,8 @@ lpfc_online(struct lpfc_hba *phba)
if (phba->cfg_xri_rebalancing)
lpfc_create_multixri_pools(phba);
+ lpfc_cpuhp_add(phba);
+
lpfc_unblock_mgmt_io(phba);
return 0;
}
@@ -3453,10 +3453,15 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ if ((!NLP_CHK_NODE_ACT(ndlp)) ||
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* Driver must assume RPI is invalid for
+ * any unused or inactive node.
+ */
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
continue;
+ }
+
if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_disc_state_machine(vports[i], ndlp,
NULL, NLP_EVT_DEVICE_RECOVERY);
@@ -3472,16 +3477,16 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_printf_vlog(ndlp->vport,
- KERN_INFO, LOG_NODE,
- "0011 lpfc_offline: "
- "ndlp:x%px did %x "
- "usgmap:x%x rpi:%x\n",
- ndlp, ndlp->nlp_DID,
- ndlp->nlp_usg_map,
- ndlp->nlp_rpi);
-
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0011 Free RPI x%x on "
+ "ndlp:x%px did x%x "
+ "usgmap:x%x\n",
+ ndlp->nlp_rpi, ndlp,
+ ndlp->nlp_DID,
+ ndlp->nlp_usg_map);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -3545,6 +3550,7 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
+ __lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
lpfc_destroy_multixri_pools(phba);
@@ -5283,10 +5289,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2901 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
+ "2901 Async SLI event - Type:%d, Event Data: x%08x "
+ "x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- evt_type);
+ acqe_sli->reserved, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -5433,11 +5439,26 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"Event Data1:x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
+ /* Misconfigured WWN. Reports that the SLI Port is configured
+ * to use FA-WWN, but the attached device doesn’t support it.
+ * No driver action is required.
+ * Event Data1 - N.A, Event Data2 - N.A
+ */
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
+ "2699 Misconfigured FA-WWN - Attached device does "
+ "not support FA-WWN\n");
+ break;
+ case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
+ /* EEPROM failure. No driver action is required */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2518 EEPROM failure - "
+ "Event Data1: x%08x Event Data2: x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3193 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
- acqe_sli->event_data1, acqe_sli->event_data2,
+ "3193 Unrecognized SLI event, type: 0x%x",
evt_type);
break;
}
@@ -5976,6 +5997,29 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
}
/**
+ * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
+ * @phba: Pointer to HBA context object.
+ *
+ **/
+static void
+lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
+{
+ unsigned int cpu, numa_node;
+ struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
+
+ cpumask_clear(numa_mask);
+
+ /* Check if we're a NUMA architecture */
+ numa_node = dev_to_node(&phba->pcidev->dev);
+ if (numa_node == NUMA_NO_NODE)
+ return;
+
+ for_each_possible_cpu(cpu)
+ if (cpu_to_node(cpu) == numa_node)
+ cpumask_set_cpu(cpu, numa_mask);
+}
+
+/**
* lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure.
*
@@ -6418,6 +6462,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
phba->sli4_hba.num_possible_cpu = num_possible_cpus();
phba->sli4_hba.curr_disp_cpu = 0;
+ lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -6953,6 +6998,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
+ cpumask_clear(&phba->sli4_hba.numa_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -7126,7 +7172,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
- __func__, i, LPFC_IOCB_LIST_CNT);
+ __func__, i, iocb_count);
goto out_free_iocbq;
}
@@ -7545,18 +7591,10 @@ lpfc_create_shost(struct lpfc_hba *phba)
if (phba->nvmet_support) {
/* Only 1 vport (pport) will support NVME target */
- if (phba->txrdy_payload_pool == NULL) {
- phba->txrdy_payload_pool = dma_pool_create(
- "txrdy_pool", &phba->pcidev->dev,
- TXRDY_PAYLOAD_LEN, 16, 0);
- if (phba->txrdy_payload_pool) {
- phba->targetport = NULL;
- phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
- lpfc_printf_log(phba, KERN_INFO,
- LOG_INIT | LOG_NVME_DISC,
- "6076 NVME Target Found\n");
- }
- }
+ phba->targetport = NULL;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
+ "6076 NVME Target Found\n");
}
lpfc_debugfs_initialize(vport);
@@ -8235,6 +8273,94 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
}
+static const char * const lpfc_topo_to_str[] = {
+ "Loop then P2P",
+ "Loopback",
+ "P2P Only",
+ "Unsupported",
+ "Loop Only",
+ "Unsupported",
+ "P2P then Loop",
+};
+
+/**
+ * lpfc_map_topology - Map the topology read from READ_CONFIG
+ * @phba: pointer to lpfc hba data structure.
+ * @rdconf: pointer to read config data
+ *
+ * This routine is invoked to map the topology values as read
+ * from the read config mailbox command. If the persistent
+ * topology feature is supported, the firmware will provide the
+ * saved topology information to be used in INIT_LINK
+ *
+ **/
+#define LINK_FLAGS_DEF 0x0
+#define LINK_FLAGS_P2P 0x1
+#define LINK_FLAGS_LOOP 0x2
+static void
+lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
+{
+ u8 ptv, tf, pt;
+
+ ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
+ tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
+ pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
+ ptv, tf, pt);
+ if (!ptv) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2019 FW does not support persistent topology "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ return;
+ }
+ /* FW supports persistent topology - override module parameter value */
+ phba->hba_flag |= HBA_PERSISTENT_TOPO;
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ if (tf || (pt == LINK_FLAGS_LOOP)) {
+ /* Invalid values from FW - use driver params */
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ } else {
+ /* Prism only supports PT2PT topology */
+ phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
+ }
+ break;
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ if (!tf) {
+ phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
+ ? FLAGS_TOPOLOGY_MODE_LOOP
+ : FLAGS_TOPOLOGY_MODE_PT_PT);
+ } else {
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ }
+ break;
+ default: /* G5 */
+ if (tf) {
+ /* If topology failover set - pt is '0' or '1' */
+ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
+ FLAGS_TOPOLOGY_MODE_LOOP_PT);
+ } else {
+ phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
+ ? FLAGS_TOPOLOGY_MODE_PT_PT
+ : FLAGS_TOPOLOGY_MODE_LOOP);
+ }
+ break;
+ }
+ if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2020 Using persistent topology value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2021 Invalid topology values from FW "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ }
+}
+
/**
* lpfc_sli4_read_config - Get the config parameters.
* @phba: pointer to lpfc hba data structure.
@@ -8346,6 +8472,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
+ lpfc_map_topology(phba, rd_config);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2003 cfg params Extents? %d "
"XRI(B:%d M:%d), "
@@ -8619,8 +8746,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
*/
if (phba->nvmet_support) {
- if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
}
@@ -9160,6 +9287,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
}
spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_cleanup_poll_list(phba);
+
/* Release HBA eqs */
if (phba->sli4_hba.hdwq)
lpfc_sli4_release_hdwq(phba);
@@ -10581,7 +10710,6 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
*/
if ((match == LPFC_FIND_BY_EQ) &&
(cpup->flag & LPFC_CPU_FIRST_IRQ) &&
- (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
(cpup->eq == id))
return cpu;
@@ -10619,6 +10747,75 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
}
#endif
+/*
+ * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
+ * @phba: pointer to lpfc hba data structure.
+ * @eqidx: index for eq and irq vector
+ * @flag: flags to set for vector_map structure
+ * @cpu: cpu used to index vector_map structure
+ *
+ * The routine assigns eq info into vector_map structure
+ */
+static inline void
+lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
+ unsigned int cpu)
+{
+ struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
+ struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
+
+ cpup->eq = eqidx;
+ cpup->flag |= flag;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
+ cpu, eqhdl->irq, cpup->eq, cpup->flag);
+}
+
+/**
+ * lpfc_cpu_map_array_init - Initialize cpu_map structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the cpu_map array structure
+ */
+static void
+lpfc_cpu_map_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_eq_intr_info *eqi;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->eq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->flag = 0;
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
+ INIT_LIST_HEAD(&eqi->list);
+ eqi->icnt = 0;
+ }
+}
+
+/**
+ * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the hba_eq_hdl array structure
+ */
+static void
+lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_hba_eq_hdl *eqhdl;
+ int i;
+
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ eqhdl = lpfc_get_eq_hdl(i);
+ eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->phba = phba;
+ }
+}
+
/**
* lpfc_cpu_affinity_check - Check vector CPU affinity mappings
* @phba: pointer to lpfc hba data structure.
@@ -10637,22 +10834,10 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *new_cpup;
- const struct cpumask *maskp;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
- /* Init cpu_map array */
- for_each_possible_cpu(cpu) {
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
- cpup->eq = LPFC_VECTOR_MAP_EMPTY;
- cpup->irq = LPFC_VECTOR_MAP_EMPTY;
- cpup->flag = 0;
- }
-
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
max_core_id = 0;
@@ -10688,65 +10873,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
min_core_id = cpup->core_id;
}
- for_each_possible_cpu(i) {
- struct lpfc_eq_intr_info *eqi =
- per_cpu_ptr(phba->sli4_hba.eq_info, i);
-
- INIT_LIST_HEAD(&eqi->list);
- eqi->icnt = 0;
- }
-
- /* This loop sets up all CPUs that are affinitized with a
- * irq vector assigned to the driver. All affinitized CPUs
- * will get a link to that vectors IRQ and EQ.
- *
- * NULL affinity mask handling:
- * If irq count is greater than one, log an error message.
- * If the null mask is received for the first irq, find the
- * first present cpu, and assign the eq index to ensure at
- * least one EQ is assigned.
- */
- for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- /* Get a CPU mask for all CPUs affinitized to this vector */
- maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp) {
- if (phba->cfg_irq_chann > 1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3329 No affinity mask found "
- "for vector %d (%d)\n",
- idx, phba->cfg_irq_chann);
- if (!idx) {
- cpu = cpumask_first(cpu_present_mask);
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- }
- break;
- }
-
- i = 0;
- /* Loop through all CPUs associated with vector idx */
- for_each_cpu_and(cpu, maskp, cpu_present_mask) {
- /* Set the EQ index and IRQ for that vector */
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
-
- /* If this is the first CPU thats assigned to this
- * vector, set LPFC_CPU_FIRST_IRQ.
- */
- if (!i)
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- i++;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3336 Set Affinity: CPU %d "
- "irq %d eq %d flag x%x\n",
- cpu, cpup->irq, cpup->eq, cpup->flag);
- }
- }
-
/* After looking at each irq vector assigned to this pcidev, its
* possible to see that not ALL CPUs have been accounted for.
* Next we will set any unassigned (unaffinitized) cpu map
@@ -10772,7 +10898,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
(new_cpup->phys_id == cpup->phys_id))
goto found_same;
new_cpu = cpumask_next(
@@ -10785,7 +10911,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
found_same:
/* We found a matching phys_id, so copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10797,9 +10922,10 @@ found_same:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3337 Set Affinity: CPU %d "
- "irq %d from id %d same "
+ "eq %d from peer cpu %d same "
"phys_id (%d)\n",
- cpu, cpup->irq, new_cpu, cpup->phys_id);
+ cpu, cpup->eq, new_cpu,
+ cpup->phys_id);
}
}
@@ -10823,7 +10949,7 @@ found_same:
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
goto found_any;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
@@ -10833,13 +10959,12 @@ found_same:
/* We should never leave an entry unassigned */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3339 Set Affinity: CPU %d "
- "irq %d UNASSIGNED\n",
- cpup->hdwq, cpup->irq);
+ "eq %d UNASSIGNED\n",
+ cpup->hdwq, cpup->eq);
continue;
found_any:
/* We found an available entry, copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10851,8 +10976,8 @@ found_any:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3338 Set Affinity: CPU %d "
- "irq %d from id %d (%d/%d)\n",
- cpu, cpup->irq, new_cpu,
+ "eq %d from peer cpu %d (%d/%d)\n",
+ cpu, cpup->eq, new_cpu,
new_cpup->phys_id, new_cpup->core_id);
}
}
@@ -10873,11 +10998,11 @@ found_any:
idx++;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3333 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
}
- /* Finally we need to associate a hdwq with each cpu_map entry
+ /* Associate a hdwq with each cpu_map entry
* This will be 1 to 1 - hdwq to cpu, unless there are less
* hardware queues then CPUs. For that case we will just round-robin
* the available hardware queues as they get assigned to CPUs.
@@ -10951,9 +11076,26 @@ found_any:
logit:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3335 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
+ }
+
+ /*
+ * Initialize the cpu_map slots for not-present cpus in case
+ * a cpu is hot-added. Perform a simple hdwq round robin assignment.
+ */
+ idx = 0;
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
+ continue;
+
+ cpup->hdwq = idx++ % phba->cfg_hdw_queue;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3340 Set Affinity: not present "
+ "CPU %d hdwq %d\n",
+ cpu, cpup->hdwq);
}
/* The cpu_map array will be used later during initialization
@@ -10963,11 +11105,280 @@ found_any:
}
/**
+ * lpfc_cpuhp_get_eq
+ *
+ * @phba: pointer to lpfc hba data structure.
+ * @cpu: cpu going offline
+ * @eqlist:
+ */
+static void
+lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
+ struct list_head *eqlist)
+{
+ const struct cpumask *maskp;
+ struct lpfc_queue *eq;
+ cpumask_t tmp;
+ u16 idx;
+
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ maskp = pci_irq_get_affinity(phba->pcidev, idx);
+ if (!maskp)
+ continue;
+ /*
+ * if irq is not affinitized to the cpu going
+ * then we don't need to poll the eq attached
+ * to it.
+ */
+ if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ continue;
+ /* get the cpus that are online and are affini-
+ * tized to this irq vector. If the count is
+ * more than 1 then cpuhp is not going to shut-
+ * down this vector. Since this cpu has not
+ * gone offline yet, we need >1.
+ */
+ cpumask_and(&tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(&tmp) > 1)
+ continue;
+
+ /* Now that we have an irq to shutdown, get the eq
+ * mapped to this irq. Note: multiple hdwq's in
+ * the software can share an eq, but eventually
+ * only eq will be mapped to this vector
+ */
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
+ list_add(&eq->_poll_list, eqlist);
+ }
+}
+
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+ /*
+ * unregistering the instance doesn't stop the polling
+ * timer. Wait for the poll timer to retire.
+ */
+ synchronize_rcu();
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ return;
+
+ __lpfc_cpuhp_remove(phba);
+}
+
+static void lpfc_cpuhp_add(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ rcu_read_lock();
+
+ if (!list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ rcu_read_unlock();
+
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+}
+
+static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
+{
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ *retval = -EAGAIN;
+ return true;
+ }
+
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ *retval = 0;
+ return true;
+ }
+
+ /* proceed with the hotplug */
+ return false;
+}
+
+/**
+ * lpfc_irq_set_aff - set IRQ affinity
+ * @eqhdl: EQ handle
+ * @cpu: cpu to set affinity
+ *
+ **/
+static inline void
+lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ cpumask_set_cpu(cpu, &eqhdl->aff_mask);
+ irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_clear_aff - clear IRQ affinity
+ * @eqhdl: EQ handle
+ *
+ **/
+static inline void
+lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
+ * @phba: pointer to HBA context object.
+ * @cpu: cpu going offline/online
+ * @offline: true, cpu is going offline. false, cpu is coming online.
+ *
+ * If cpu is going offline, we'll try our best effort to find the next
+ * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ *
+ * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ *
+ * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
+ *
+ **/
+static void
+lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct cpumask *aff_mask;
+ unsigned int cpu_select, cpu_next, idx;
+ const struct cpumask *numa_mask;
+
+ if (!phba->cfg_irq_numa)
+ return;
+
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (!cpumask_test_cpu(cpu, numa_mask))
+ return;
+
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ return;
+
+ if (offline) {
+ /* Find next online CPU on NUMA node */
+ cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+
+ /* Found a valid CPU */
+ if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
+ /* Go through each eqhdl and ensure offlining
+ * cpu aff_mask is migrated
+ */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ aff_mask = lpfc_get_aff_mask(idx);
+
+ /* Migrate affinity */
+ if (cpumask_test_cpu(cpu, aff_mask))
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
+ cpu_select);
+ }
+ } else {
+ /* Rely on irqbalance if no online CPUs left on NUMA */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++)
+ lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
+ }
+ } else {
+ /* Migrate affinity back to this CPU */
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
+ }
+}
+
+static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ LIST_HEAD(eqlist);
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, true);
+
+ lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+
+ /* start polling on these eq's */
+ list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
+ list_del_init(&eq->_poll_list);
+ lpfc_sli4_start_polling(eq);
+ }
+
+ return 0;
+}
+
+static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ unsigned int n;
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, false);
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
+ n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
+ if (n == cpu)
+ lpfc_sli4_stop_polling(eq);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-4 interface spec.
+ * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
+ * to cpus on the system.
+ *
+ * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
+ * the number of cpus on the same numa node as this adapter. The vectors are
+ * allocated without requesting OS affinity mapping. A vector will be
+ * allocated and assigned to each online and offline cpu. If the cpu is
+ * online, then affinity will be set to that cpu. If the cpu is offline, then
+ * affinity will be set to the nearest peer cpu within the numa node that is
+ * online. If there are no online cpus within the numa node, affinity is not
+ * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
+ * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
+ * configured.
+ *
+ * If numa mode is not enabled and there is more than 1 vector allocated, then
+ * the driver relies on the managed irq interface where the OS assigns vector to
+ * cpu affinity. The driver will then use that affinity mapping to setup its
+ * cpu mapping table.
*
* Return codes
* 0 - successful
@@ -10978,13 +11389,31 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
char *name;
+ const struct cpumask *numa_mask = NULL;
+ unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
+ struct lpfc_hba_eq_hdl *eqhdl;
+ const struct cpumask *maskp;
+ bool first;
+ unsigned int flags = PCI_IRQ_MSIX;
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- rc = pci_alloc_irq_vectors(phba->pcidev,
- 1,
- vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+ cpu_cnt = cpumask_weight(numa_mask);
+ vectors = min(phba->cfg_irq_chann, cpu_cnt);
+
+ /* cpu: iterates over numa_mask including offline or online
+ * cpu_select: iterates over online numa_mask to set affinity
+ */
+ cpu = cpumask_first(numa_mask);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else {
+ flags |= PCI_IRQ_AFFINITY;
+ }
+
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0484 PCI enable MSI-X failed (%d)\n", rc);
@@ -10994,23 +11423,61 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
- name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
+ eqhdl = lpfc_get_eq_hdl(index);
+ name = eqhdl->handler_name;
memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl->idx = index;
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
- name,
- &phba->sli4_hba.hba_eq_hdl[index]);
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
"request_irq failed (%d)\n", index, rc);
goto cfg_fail_out;
}
+
+ eqhdl->irq = pci_irq_vector(phba->pcidev, index);
+
+ if (phba->cfg_irq_numa) {
+ /* If found a neighboring online cpu, set affinity */
+ if (cpu_select < nr_cpu_ids)
+ lpfc_irq_set_aff(eqhdl, cpu_select);
+
+ /* Assign EQ to cpu_map */
+ lpfc_assign_eq_map_info(phba, index,
+ LPFC_CPU_FIRST_IRQ,
+ cpu);
+
+ /* Iterate to next offline or online cpu in numa_mask */
+ cpu = cpumask_next(cpu, numa_mask);
+
+ /* Find next online cpu in numa_mask to set affinity */
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else if (vectors == 1) {
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
+ cpu);
+ } else {
+ maskp = pci_irq_get_affinity(phba->pcidev, index);
+
+ first = true;
+ /* Loop through all CPUs associated with vector index */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ /* If this is the first CPU thats assigned to
+ * this vector, set LPFC_CPU_FIRST_IRQ.
+ */
+ lpfc_assign_eq_map_info(phba, index,
+ first ?
+ LPFC_CPU_FIRST_IRQ : 0,
+ cpu);
+ if (first)
+ first = false;
+ }
+ }
}
if (vectors != phba->cfg_irq_chann) {
@@ -11020,17 +11487,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->cfg_irq_chann, vectors);
if (phba->cfg_irq_chann > vectors)
phba->cfg_irq_chann = vectors;
- if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
- phba->cfg_nvmet_mrq = vectors;
}
return rc;
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 0; index--)
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ for (--index; index >= 0; index--) {
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
+ }
/* Unconfigure MSI-X capability structure */
pci_free_irq_vectors(phba->pcidev);
@@ -11057,6 +11525,8 @@ static int
lpfc_sli4_enable_msi(struct lpfc_hba *phba)
{
int rc, index;
+ unsigned int cpu;
+ struct lpfc_hba_eq_hdl *eqhdl;
rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
@@ -11078,9 +11548,15 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
return rc;
}
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
+
for (index = 0; index < phba->cfg_irq_chann; index++) {
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl = lpfc_get_eq_hdl(index);
+ eqhdl->idx = index;
}
return 0;
@@ -11138,15 +11614,21 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (!retval) {
struct lpfc_hba_eq_hdl *eqhdl;
+ unsigned int cpu;
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
+ cpu);
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+ eqhdl = lpfc_get_eq_hdl(idx);
eqhdl->idx = idx;
- eqhdl->phba = phba;
}
}
}
@@ -11168,14 +11650,14 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
/* Disable the currently initialized interrupt mode */
if (phba->intr_type == MSIX) {
int index;
+ struct lpfc_hba_eq_hdl *eqhdl;
/* Free up MSI-X multi-message vectors */
for (index = 0; index < phba->cfg_irq_chann; index++) {
- irq_set_affinity_hint(
- pci_irq_vector(phba->pcidev, index),
- NULL);
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
}
} else {
free_irq(phba->pcidev->irq, phba);
@@ -11367,6 +11849,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Wait for completion of device XRI exchange busy */
lpfc_sli4_xri_exchange_busy_wait(phba);
+ /* per-phba callback de-registration for hotplug event */
+ lpfc_cpuhp_remove(phba);
+
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
@@ -11538,6 +12023,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
+ sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
@@ -11589,13 +12075,10 @@ fcponly:
}
/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
- * accommodate 512K and 1M IOs in a single nvme buf and supply
- * enough NVME LS iocb buffers for larger connectivity counts.
+ * accommodate 512K and 1M IOs in a single nvme buf.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- phba->cfg_iocb_cnt = 5;
- }
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
@@ -12312,35 +12795,57 @@ lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
}
-static void
+static int
lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
const struct firmware *fw)
{
- if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
+ int rc;
+
+ /* Three cases: (1) FW was not supported on the detected adapter.
+ * (2) FW update has been locked out administratively.
+ * (3) Some other error during FW update.
+ * In each case, an unmaskable message is written to the console
+ * for admin diagnosis.
+ */
+ if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
- magic_number != MAGIC_NUMER_G6) ||
+ magic_number != MAGIC_NUMBER_G6) ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
- magic_number != MAGIC_NUMER_G7))
+ magic_number != MAGIC_NUMBER_G7)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3030 This firmware version is not supported on "
- "this HBA model. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
- else
+ "3030 This firmware version is not supported on"
+ " this HBA model. Device:%x Magic:%x Type:%x "
+ "ID:%x Size %d %zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EINVAL;
+ } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3022 FW Download failed. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
+ "3021 Firmware downloads have been prohibited "
+ "by a system configuration setting on "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EACCES;
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 FW Download failed. Add Status x%x "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ offset, phba->pcidev->device, magic_number,
+ ftype, fid, fsize, fw->size);
+ rc = -EIO;
+ }
+ return rc;
}
-
/**
* lpfc_write_firmware - attempt to write a firmware image to the port
* @fw: pointer to firmware image returned from request_firmware.
- * @phba: pointer to lpfc hba data structure.
+ * @context: pointer to firmware image returned from request_firmware.
+ * @ret: return value this routine provides to the caller.
*
**/
static void
@@ -12409,8 +12914,12 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
rc = lpfc_wr_object(phba, &dma_buffer_list,
(fw->size - offset), &offset);
if (rc) {
- lpfc_log_write_firmware_error(phba, offset,
- magic_number, ftype, fid, fsize, fw);
+ rc = lpfc_log_write_firmware_error(phba, offset,
+ magic_number,
+ ftype,
+ fid,
+ fsize,
+ fw);
goto release_out;
}
}
@@ -12430,9 +12939,12 @@ release_out:
}
release_firmware(fw);
out:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3024 Firmware update done: %d.\n", rc);
- return;
+ if (rc < 0)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3062 Firmware update error, status %d.\n", rc);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update success: size %d.\n", rc);
}
/**
@@ -12551,6 +13063,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->pport = NULL;
lpfc_stop_port(phba);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
+
+ /* Init hba_eq_hdl array */
+ lpfc_hba_eq_hdl_array_init(phba);
+
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -12632,6 +13150,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
+ INIT_LIST_HEAD(&phba->poll_list);
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
+
return 0;
out_free_sysfs_attr:
@@ -13344,8 +13865,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
phba->cfg_fof = 1;
} else {
phba->cfg_fof = 0;
- if (phba->device_data_mem_pool)
- mempool_destroy(phba->device_data_mem_pool);
+ mempool_destroy(phba->device_data_mem_pool);
phba->device_data_mem_pool = NULL;
}
@@ -13450,11 +13970,24 @@ lpfc_init(void)
/* Initialize in case vector mapping is needed */
lpfc_present_cpu = num_present_cpus();
+ error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "lpfc/sli4:online",
+ lpfc_cpu_online, lpfc_cpu_offline);
+ if (error < 0)
+ goto cpuhp_failure;
+ lpfc_cpuhp_state = error;
+
error = pci_register_driver(&lpfc_driver);
- if (error) {
- fc_release_transport(lpfc_transport_template);
- fc_release_transport(lpfc_vport_transport_template);
- }
+ if (error)
+ goto unwind;
+
+ return error;
+
+unwind:
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
+cpuhp_failure:
+ fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
return error;
}
@@ -13471,6 +14004,7 @@ lpfc_exit(void)
{
misc_deregister(&lpfc_mgmt_dev);
pci_unregister_driver(&lpfc_driver);
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
idr_destroy(&lpfc_hba_index);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index ea10f03437f5..148d02a27b58 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -46,6 +46,23 @@
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
+/* generate message by verbose log setting or severity */
+#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \
+{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
+ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); }
+
+#define lpfc_log_msg(phba, level, mask, fmt, arg...) \
+do { \
+ { uint32_t log_verbose = (phba)->pport ? \
+ (phba)->pport->cfg_log_verbose : \
+ (phba)->cfg_log_verbose; \
+ if (((mask) & log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
+ fmt, phba->brd_no, ##arg); \
+ } \
+} while (0)
+
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 8abe933bad09..d1773c01d2b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -515,6 +515,7 @@ lpfc_init_link(struct lpfc_hba * phba,
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ !(phba->sli4_hba.pc_sli4_params.pls) &&
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ae09bb863497..7082279e4c01 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -230,9 +230,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
- dma_pool_destroy(phba->txrdy_payload_pool);
- phba->txrdy_payload_pool = NULL;
-
dma_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index fc6e4546d738..ae4359013846 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -279,6 +279,55 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
}
+/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
+ * @phba: pointer to lpfc hba data structure.
+ * @link_mbox: pointer to CONFIG_LINK mailbox object
+ *
+ * This routine is only called if we are SLI3, direct connect pt2pt
+ * mode and the remote NPort issues the PLOGI after link up.
+ */
+static void
+lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
+{
+ LPFC_MBOXQ_t *login_mbox;
+ MAILBOX_t *mb = &link_mbox->u.mb;
+ struct lpfc_iocbq *save_iocb;
+ struct lpfc_nodelist *ndlp;
+ int rc;
+
+ ndlp = link_mbox->ctx_ndlp;
+ login_mbox = link_mbox->context3;
+ save_iocb = login_mbox->context3;
+ link_mbox->context3 = NULL;
+ login_mbox->context3 = NULL;
+
+ /* Check for CONFIG_LINK error */
+ if (mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
+ mb->mbxStatus);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ lpfc_sli_release_iocbq(phba, save_iocb);
+ return;
+ }
+
+ /* Now that CONFIG_LINK completed, and our SID is configured,
+ * we can now proceed with sending the PLOGI ACC.
+ */
+ rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, login_mbox);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4576 PLOGI ACC fails pt2pt discovery: %x\n",
+ rc);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ }
+
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ lpfc_sli_release_iocbq(phba, save_iocb);
+}
+
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -291,10 +340,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
IOCB_t *icmd;
struct serv_parm *sp;
uint32_t ed_tov;
- LPFC_MBOXQ_t *mbox;
+ LPFC_MBOXQ_t *link_mbox;
+ LPFC_MBOXQ_t *login_mbox;
+ struct lpfc_iocbq *save_iocb;
struct ls_rjt stat;
uint32_t vid, flag;
- int rc;
+ int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt));
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -343,6 +394,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
ndlp->nlp_fcp_info |= CLASS3;
+ defer_acc = 0;
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -354,7 +406,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
-
/* if already logged in, do implicit logout */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
@@ -396,6 +447,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ login_mbox = NULL;
+ link_mbox = NULL;
+ save_iocb = NULL;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -423,17 +478,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport);
else {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox == NULL)
+ defer_acc = 1;
+ link_mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!link_mbox)
goto out;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_config_link(phba, link_mbox);
+ link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
+ link_mbox->vport = vport;
+ link_mbox->ctx_ndlp = ndlp;
+
+ save_iocb = lpfc_sli_get_iocbq(phba);
+ if (!save_iocb)
goto out;
- }
+ /* Save info from cmd IOCB used in rsp */
+ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+ sizeof(struct lpfc_iocbq));
}
lpfc_can_disctmo(vport);
@@ -448,8 +508,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
}
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!login_mbox)
goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
@@ -457,21 +517,19 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
- (uint8_t *) sp, mbox, ndlp->nlp_rpi);
- if (rc) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
+ if (rc)
goto out;
- }
/* ACC PLOGI rsp command needs to execute first,
- * queue this mbox command to be processed later.
+ * queue this login_mbox command to be processed later.
*/
- mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
/*
- * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
+ * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc().
*/
- mbox->vport = vport;
+ login_mbox->vport = vport;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
spin_unlock_irq(shost->host_lock);
@@ -484,8 +542,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
+ * This only applies to a fabric environment.
*/
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+ (vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
@@ -504,16 +564,47 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, mbox);
+ ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+ if (defer_acc) {
+ /* So the order here should be:
+ * Issue CONFIG_LINK mbox
+ * CONFIG_LINK cmpl
+ * Issue PLOGI ACC
+ * PLOGI ACC cmpl
+ * Issue REG_LOGIN mbox
+ */
+
+ /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
+ link_mbox->context3 = login_mbox;
+ login_mbox->context3 = save_iocb;
+
+ /* Start the ball rolling by issuing CONFIG_LINK here */
+ rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out;
return 1;
}
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
return 1;
out:
+ if (defer_acc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4577 pt2pt discovery failure: %p %p %p\n",
+ save_iocb, link_mbox, login_mbox);
+ if (save_iocb)
+ lpfc_sli_release_iocbq(phba, save_iocb);
+ if (link_mbox)
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ if (login_mbox)
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
@@ -2030,7 +2121,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_init, nvpr))
ndlp->nlp_type |= NLP_NVME_INITIATOR;
- if (phba->nsler && bf_get_be32(prli_nsler, nvpr))
+ if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
+ bf_get_be32(prli_conf, nvpr))
+
ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
else
ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index a227e36cbdc2..db4a04a207ec 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -196,6 +196,46 @@ lpfc_nvme_cmd_template(void)
}
/**
+ * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
+ * @pwqeq: Pointer to command iocb.
+ * @xritag: Tag that uniqely identifies the local exchange resource.
+ * @opt: Option bits -
+ * bit 0 = inhibit sending abts on the link
+ *
+ * This function is called with hbalock held.
+ **/
+void
+lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
+{
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+ */
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (opt & INHIBIT_ABORT)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ /* Abort specified xri tag, with the mask deliberately zeroed */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+
+ /* Abort the IO associated with this outstanding exchange ID. */
+ wqe->abort_cmd.wqe_com.abort_tag = xritag;
+
+ /* iotag for the wqe completion. */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
+
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
+
+/**
* lpfc_nvme_create_queue -
* @lpfc_pnvme: Pointer to the driver's nvme instance data
* @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
@@ -1791,7 +1831,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
- union lpfc_wqe128 *abts_wqe;
unsigned long flags;
int ret_val;
@@ -1912,37 +1951,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Ready - mark outstanding as aborted by driver. */
nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* Complete prepping the abort wqe and issue to the FW. */
- abts_wqe = &abts_buf->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(*abts_wqe));
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
- nvmereq_wqe->iocb.ulpClass);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_buf->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_buf->iocb_flag |= LPFC_IO_NVME;
@@ -2084,7 +2093,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
qp = lpfc_ncmd->hdwq;
- if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
+ if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
"ox_id x%x on reqtag x%x\n",
@@ -2139,12 +2148,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
*/
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
- /* Advertise how many hw queues we support based on fcp_io_sched */
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
- lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
- else
- lpfc_nvme_template.max_hw_queues =
- phba->sli4_hba.num_present_cpu;
+ /* Advertise how many hw queues we support based on cfg_hdw_queue,
+ * which will not exceed cpu count.
+ */
+ lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 9884228800a5..9dc9afe1c255 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -378,13 +378,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
int cpu;
unsigned long iflag;
- if (ctxp->txrdy) {
- dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
- ctxp->txrdy_phys);
- ctxp->txrdy = NULL;
- ctxp->txrdy_phys = 0;
- }
-
if (ctxp->state == LPFC_NVMET_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6411 NVMET free, already free IO x%x: %d %d\n",
@@ -430,7 +423,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -1958,12 +1950,10 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t *payload;
uint32_t size, oxid, sid, rc;
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
- if (!phba->targetport) {
+ if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO x%x\n", oxid);
+ "6154 LS Drop IO\n");
oxid = 0;
size = 0;
sid = 0;
@@ -1971,6 +1961,9 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto dropit;
}
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
@@ -2326,7 +2319,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
}
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -2401,6 +2393,11 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
d_buf = piocb->context2;
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3015 LS Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
return;
@@ -2429,6 +2426,11 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint64_t isr_timestamp,
uint8_t cqflag)
{
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3167 NVMET FCP Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return;
@@ -2595,7 +2597,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
struct scatterlist *sgel;
union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde;
- uint32_t *txrdy;
dma_addr_t physaddr;
int i, cnt;
int do_pbde;
@@ -2757,23 +2758,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
&lpfc_treceive_cmd_template.words[3],
sizeof(uint32_t) * 9);
- /* Words 0 - 2 : The first sg segment */
- txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
- GFP_KERNEL, &physaddr);
- if (!txrdy) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6041 Bad txrdy buffer: oxid x%x\n",
- ctxp->oxid);
- return NULL;
- }
- ctxp->txrdy = txrdy;
- ctxp->txrdy_phys = physaddr;
- wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
- wqe->fcp_treceive.bde.addrLow =
- cpu_to_le32(putPaddrLow(physaddr));
- wqe->fcp_treceive.bde.addrHigh =
- cpu_to_le32(putPaddrHigh(physaddr));
+ /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
+ wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
+ wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
+ wqe->fcp_treceive.bde.addrLow = 0;
+ wqe->fcp_treceive.bde.addrHigh = 0;
/* Word 4 */
wqe->fcp_treceive.relative_offset = ctxp->offset;
@@ -2808,17 +2797,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Word 12 */
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
- /* Setup 1 TXRDY and 1 SKIP SGE */
- txrdy[0] = 0;
- txrdy[1] = cpu_to_be32(rsp->transfer_length);
- txrdy[2] = 0;
-
- sgl->addr_hi = putPaddrHigh(physaddr);
- sgl->addr_lo = putPaddrLow(physaddr);
+ /* Setup 2 SKIP SGEs */
+ sgl->addr_hi = 0;
+ sgl->addr_lo = 0;
sgl->word2 = 0;
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
+ sgl->sge_len = 0;
sgl++;
sgl->addr_hi = 0;
sgl->addr_lo = 0;
@@ -3239,9 +3224,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
{
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_iocbq *abts_wqeq;
- union lpfc_wqe128 *abts_wqe;
struct lpfc_nodelist *ndlp;
unsigned long flags;
+ u8 opt;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3280,8 +3265,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
return 0;
}
abts_wqeq = ctxp->abort_wqeq;
- abts_wqe = &abts_wqeq->wqe;
ctxp->state = LPFC_NVMET_STE_ABORT;
+ opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3327,40 +3312,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(*abts_wqe));
-
- /* word 3 */
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_wqeq->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
abts_wqeq->vport = phba->pport;
@@ -3495,7 +3452,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 8ff67deac10a..b80b1639b9a7 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -112,9 +112,7 @@ struct lpfc_nvmet_rcv_ctx {
struct lpfc_hba *phba;
struct lpfc_iocbq *wqeq;
struct lpfc_iocbq *abort_wqeq;
- dma_addr_t txrdy_phys;
spinlock_t ctxlock; /* protect flag access */
- uint32_t *txrdy;
uint32_t sid;
uint32_t offset;
uint16_t oxid;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 6822cd9ff8f1..b138d9fee675 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -134,21 +134,21 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
/**
* lpfc_update_stats - Update statistical data for the command completion
- * @phba: Pointer to HBA object.
+ * @vport: The virtual port on which this call is executing.
* @lpfc_cmd: lpfc scsi command object pointer.
*
* This function is called when there is a command completion and this
* function updates the statistical data for the command completion.
**/
static void
-lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
- struct Scsi_Host *shost = cmd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long latency;
int i;
@@ -526,7 +526,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
&qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del_init(&psb->list);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
@@ -566,7 +566,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
if (iocbq->sli4_xritag != xri)
continue;
psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
@@ -786,7 +786,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
psb->prot_seg_cnt = 0;
qp = psb->hdwq;
- if (psb->exch_busy) {
+ if (psb->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
@@ -3812,7 +3812,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
- if (!cmd) {
+ if (!cmd || !phba) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"2621 IO completion: Not an active IO\n");
spin_unlock(&lpfc_cmd->buf_lock);
@@ -3824,7 +3824,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
@@ -3835,7 +3835,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */
- lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -3869,7 +3872,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
#endif
- if (lpfc_cmd->status) {
+ if (unlikely(lpfc_cmd->status)) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
(lpfc_cmd->result & IOERR_DRVR_MASK))
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
@@ -4002,7 +4005,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
- lpfc_update_stats(phba, lpfc_cmd);
+ lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4610,17 +4613,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
- if (err == 2) {
- cmnd->result = DID_ERROR << 16;
- goto out_fail_command_release_buf;
- } else if (err) {
+ if (unlikely(err)) {
+ if (err == 2) {
+ cmnd->result = DID_ERROR << 16;
+ goto out_fail_command_release_buf;
+ }
goto out_host_busy_free_buf;
}
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq =
@@ -4843,20 +4847,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
abtsiocb, 0);
}
- /* no longer need the lock after this point */
- spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val == IOCB_ERROR) {
/* Indicate the IO is not being aborted by the driver. */
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
}
+ /* no longer need the lock after this point */
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 614f78dddafe..c82b5792da98 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -87,6 +87,10 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_eqe *eqe);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
+static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
+static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
+ struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -467,25 +471,52 @@ __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
}
static void
-lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
{
- struct lpfc_eqe *eqe;
- uint32_t count = 0;
+ struct lpfc_eqe *eqe = NULL;
+ u32 eq_count = 0, cq_count = 0;
+ struct lpfc_cqe *cqe = NULL;
+ struct lpfc_queue *cq = NULL, *childq = NULL;
+ int cqid = 0;
/* walk all the EQ entries and drop on the floor */
eqe = lpfc_sli4_eq_get(eq);
while (eqe) {
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+ cq = NULL;
+
+ list_for_each_entry(childq, &eq->child_list, list) {
+ if (childq->queue_id == cqid) {
+ cq = childq;
+ break;
+ }
+ }
+ /* If CQ is valid, iterate through it and drop all the CQEs */
+ if (cq) {
+ cqe = lpfc_sli4_cq_get(cq);
+ while (cqe) {
+ __lpfc_sli4_consume_cqe(phba, cq, cqe);
+ cq_count++;
+ cqe = lpfc_sli4_cq_get(cq);
+ }
+ /* Clear and re-arm the CQ */
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
+ LPFC_QUEUE_REARM);
+ cq_count = 0;
+ }
__lpfc_sli4_consume_eqe(phba, eq, eqe);
- count++;
+ eq_count++;
eqe = lpfc_sli4_eq_get(eq);
}
/* Clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
}
static int
-lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ uint8_t rearm)
{
struct lpfc_eqe *eqe;
int count = 0, consumed = 0;
@@ -519,8 +550,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
eq->queue_claimed = 0;
rearm_and_exit:
- /* Always clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
+ /* Always clear the EQ. */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
return count;
}
@@ -2526,6 +2557,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
+ if (vport->load_flag & FC_UNLOADING)
+ lpfc_nlp_put(ndlp);
pmb->ctx_ndlp = NULL;
}
}
@@ -2672,7 +2705,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command "
"x%x (x%x/x%x) Cmpl\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2693,7 +2727,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
"(%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x "
"(x%x/x%x) x%x x%x x%x\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2701,7 +2736,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmb),
pmbox->mbxStatus,
pmbox->un.varWords[0],
- pmb->vport->port_state);
+ pmb->vport ? pmb->vport->port_state :
+ LPFC_VPORT_UNKNOWN);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -6167,6 +6203,14 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
mbox->u.mqe.un.set_feature.param_len = 8;
break;
+ case LPFC_SET_DUAL_DUMP:
+ bf_set(lpfc_mbx_set_feature_dd,
+ &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
+ bf_set(lpfc_mbx_set_feature_ddquery,
+ &mbox->u.mqe.un.set_feature, 0);
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
+ mbox->u.mqe.un.set_feature.param_len = 4;
+ break;
}
return;
@@ -6184,11 +6228,16 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
/* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* Wait 10ms for firmware to stop using DMA buffer */
+ usleep_range(10 * 1000, 20 * 1000);
}
/**
@@ -6224,7 +6273,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
ras_fwlog->lwpd.virt = NULL;
}
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
}
/**
@@ -6326,7 +6377,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto disable_ras;
}
- ras_fwlog->ras_active = true;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
mempool_free(pmb, phba->mbox_mem_pool);
return;
@@ -6358,6 +6411,10 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
int rc = 0;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
phba->cfg_ras_fwlog_buffsize);
fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
@@ -6417,6 +6474,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = REG_INPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
@@ -7148,7 +7208,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int
lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{
- int rc, i, cnt, len;
+ int rc, i, cnt, len, dd;
LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe;
uint8_t *vpd;
@@ -7399,6 +7459,23 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /* Always try to enable dual dump feature if we can */
+ lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
+ if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
+ "6448 Dual Dump is enabled\n");
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
+ "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
+ "rc:x%x dd:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(
+ phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(
+ phba, mboxq),
+ rc, dd);
/*
* Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
* calls depends on these resources to complete port setup.
@@ -7523,9 +7600,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
phba->sli4_hba.nvmet_xri_cnt = rc;
- cnt = phba->cfg_iocb_cnt * 1024;
- /* We need 1 iocbq for every SGL, for IO processing */
- cnt += phba->sli4_hba.nvmet_xri_cnt;
+ /* We allocate an iocbq for every receive context SGL.
+ * The additional allocation is for abort and ls handling.
+ */
+ cnt = phba->sli4_hba.nvmet_xri_cnt +
+ phba->sli4_hba.max_cfg_param.max_xri;
} else {
/* update host common xri-sgl sizes and mappings */
rc = lpfc_sli4_io_sgl_update(phba);
@@ -7547,14 +7626,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV;
goto out_destroy_queue;
}
- cnt = phba->cfg_iocb_cnt * 1024;
+ /* Each lpfc_io_buf job structure has an iocbq element.
+ * This cnt provides for abort, els, ct and ls requests.
+ */
+ cnt = phba->sli4_hba.max_cfg_param.max_xri;
}
if (!phba->sli.iocbq_lookup) {
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2821 initialize iocb list %d total %d\n",
- phba->cfg_iocb_cnt, cnt);
+ "2821 initialize iocb list with %d entries\n",
+ cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7892,7 +7974,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending)
/* process and rearm the EQ */
- lpfc_sli4_process_eq(phba, fpeq);
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
else
/* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@@ -8964,7 +9046,8 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to address of newly added command iocb.
*
- * This function is called with hbalock held to add a command
+ * This function is called with hbalock held for SLI3 ports or
+ * the ring lock held for SLI4 ports to add a command
* iocb to the txq when SLI layer cannot submit the command iocb
* to the ring.
**/
@@ -8972,7 +9055,10 @@ void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- lockdep_assert_held(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lockdep_assert_held(&pring->ring_lock);
+ else
+ lockdep_assert_held(&phba->hbalock);
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
}
@@ -9863,7 +9949,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
* an iocb command to an HBA with SLI-4 interface spec.
*
- * This function is called with hbalock held. The function will return success
+ * This function is called with ringlock held. The function will return success
* after it successfully submit the iocb to firmware or after adding to the
* txq.
**/
@@ -10053,10 +10139,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sli_ring *pring;
+ struct lpfc_queue *eq;
unsigned long iflags;
int rc;
if (phba->sli_rev == LPFC_SLI_REV4) {
+ eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
+
pring = lpfc_sli4_calc_ring(phba, piocb);
if (unlikely(pring == NULL))
return IOCB_ERROR;
@@ -10064,6 +10153,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -10678,14 +10769,14 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
set_bit(LPFC_DATA_READY, &phba->data_flags);
}
prev_pring_flag = pring->flag;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txq, list) {
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
}
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txcmplq, list) {
if (iocb->vport != vport)
@@ -11050,9 +11141,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4]);
spin_unlock_irq(&phba->hbalock);
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
- lpfc_sli_release_iocbq(phba, abort_iocb);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -11736,7 +11824,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq);
- lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
}
pdone_q = cmdiocbq->context_un.wait_queue;
@@ -13158,13 +13249,19 @@ send_current_mbox:
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
+ return workposted;
+
out_no_mqe_complete:
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
- return workposted;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return false;
}
/**
@@ -13217,7 +13314,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_sli_ring *pring = cq->pring;
int txq_cnt = 0;
int txcmplq_cnt = 0;
- int fcp_txcmplq_cnt = 0;
/* Check for response status */
if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
@@ -13239,9 +13335,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
- "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+ "els_txcmplq_cnt=%d\n",
txq_cnt, phba->iocb_cnt,
- fcp_txcmplq_cnt,
txcmplq_cnt);
return false;
}
@@ -13592,6 +13687,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
LPFC_QUEUE_NOARM);
consumed = 0;
+ cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
}
if (count == LPFC_NVMET_CQ_NOTIFY)
@@ -14220,7 +14316,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */
- lpfc_sli4_eq_flush(phba, fpeq);
+ lpfc_sli4_eqcq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
@@ -14230,14 +14326,14 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER &&
- phba->cfg_irq_chann == 1 &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
/* process and rearm the EQ */
- ecount = lpfc_sli4_process_eq(phba, fpeq);
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;
@@ -14297,6 +14393,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
+void lpfc_sli4_poll_hbtimer(struct timer_list *t)
+{
+ struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
+ struct lpfc_queue *eq;
+ int i = 0;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
+ i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
+ if (!list_empty(&phba->poll_list))
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+
+ rcu_read_unlock();
+}
+
+inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
+{
+ struct lpfc_hba *phba = eq->phba;
+ int i = 0;
+
+ /*
+ * Unlocking an irq is one of the entry point to check
+ * for re-schedule, but we are good for io submission
+ * path as midlayer does a get_cpu to glue us in. Flush
+ * out the invalidate queue so we can see the updated
+ * value for flag.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
+ /* We will not likely get the completion for the caller
+ * during this iteration but i guess that's fine.
+ * Future io's coming on this eq should be able to
+ * pick it up. As for the case of single io's, they
+ * will be handled through a sched from polling timer
+ * function which is currently triggered every 1msec.
+ */
+ i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+
+ return i;
+}
+
+static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ if (list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ /* kickstart slowpath processing for this eq */
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ list_add_rcu(&eq->_poll_list, &phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ /* Disable slowpath processing for this eq. Kick start the eq
+ * by RE-ARMING the eq's ASAP
+ */
+ list_del_rcu(&eq->_poll_list);
+ synchronize_rcu();
+
+ if (list_empty(&phba->poll_list))
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *eq, *next;
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
+ list_del(&eq->_poll_list);
+
+ INIT_LIST_HEAD(&phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void
+__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
+{
+ if (mode == eq->mode)
+ return;
+ /*
+ * currently this function is only called during a hotplug
+ * event and the cpu on which this function is executing
+ * is going offline. By now the hotplug has instructed
+ * the scheduler to remove this cpu from cpu active mask.
+ * So we don't need to work about being put aside by the
+ * scheduler for a high priority process. Yes, the inte-
+ * rrupts could come but they are known to retire ASAP.
+ */
+
+ /* Disable polling in the fastpath */
+ WRITE_ONCE(eq->mode, mode);
+ /* flush out the store buffer */
+ smp_wmb();
+
+ /*
+ * Add this eq to the polling list and start polling. For
+ * a grace period both interrupt handler and poller will
+ * try to process the eq _but_ that's fine. We have a
+ * synchronization mechanism in place (queue_claimed) to
+ * deal with it. This is just a draining phase for int-
+ * errupt handler (not eq's) as we have guranteed through
+ * barrier that all the CPUs have seen the new CQ_POLLED
+ * state. which will effectively disable the REARMING of
+ * the EQ. The whole idea is eq's die off eventually as
+ * we are not rearming EQ's anymore.
+ */
+ mode ? lpfc_sli4_add_to_poll_list(eq) :
+ lpfc_sli4_remove_from_poll_list(eq);
+}
+
+void lpfc_sli4_start_polling(struct lpfc_queue *eq)
+{
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
+}
+
+void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
+
+ /* Kick start for the pending io's in h/w.
+ * Once we switch back to interrupt processing on a eq
+ * the io path completion will only arm eq's when it
+ * receives a completion. But since eq's are in disa-
+ * rmed state it doesn't receive a completion. This
+ * creates a deadlock scenaro.
+ */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
+}
+
/**
* lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free.
@@ -14371,6 +14608,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
return NULL;
INIT_LIST_HEAD(&queue->list);
+ INIT_LIST_HEAD(&queue->_poll_list);
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->wqfull_list);
INIT_LIST_HEAD(&queue->page_list);
@@ -18124,8 +18362,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0001 rpi:%x max:%x lim:%x\n",
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
(int) rpi, max_rpi, rpi_limit);
/*
@@ -18181,11 +18420,19 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
+ /*
+ * if the rpi value indicates a prior unreg has already
+ * been done, skip the unreg.
+ */
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return;
+
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
} else {
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"2016 rpi %x not inuse\n",
rpi);
}
@@ -19683,6 +19930,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19703,6 +19952,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19731,6 +19982,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
return WQE_ERROR;
@@ -20093,6 +20346,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+ if (phba->cfg_xpsgl && !phba->nvmet_support &&
+ !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+
+ if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
+
if (phba->cfg_xri_rebalancing) {
if (lpfc_ncmd->expedite) {
/* Return to expedite pool */
@@ -20157,13 +20417,6 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
iflag);
}
-
- if (phba->cfg_xpsgl && !phba->nvmet_support &&
- !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
- lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
-
- if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
- lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
}
/**
@@ -20399,8 +20652,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *allocated_sgl = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the sgl_list */
@@ -20412,9 +20666,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
}
} else {
/* allocate more */
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
- cpu_to_node(smp_processor_id()));
+ cpu_to_node(hdwq->io_wq->chann));
if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8353 error kmalloc memory for HDWQ "
@@ -20434,7 +20688,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
return NULL;
}
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
}
@@ -20442,7 +20696,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl,
list_node);
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_sgl;
}
@@ -20466,8 +20720,9 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
list_for_each_entry_safe(list_entry, tmp,
@@ -20480,7 +20735,7 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
rc = -EINVAL;
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc;
}
@@ -20501,8 +20756,9 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->sgl_list;
struct sli4_hybrid_sgl *list_entry = NULL;
struct sli4_hybrid_sgl *tmp = NULL;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free sgl pool */
list_for_each_entry_safe(list_entry, tmp,
@@ -20514,7 +20770,7 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry);
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
/**
@@ -20538,8 +20794,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *allocated_buf = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the list */
@@ -20552,9 +20809,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
}
} else {
/* allocate more */
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
- cpu_to_node(smp_processor_id()));
+ cpu_to_node(hdwq->io_wq->chann));
if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8355 error kmalloc memory for HDWQ "
@@ -20579,7 +20836,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
sizeof(struct fcp_cmnd));
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
}
@@ -20587,7 +20844,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf,
list_node);
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_buf;
}
@@ -20612,8 +20869,9 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
list_for_each_entry_safe(list_entry, tmp,
@@ -20626,7 +20884,7 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
rc = -EINVAL;
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc;
}
@@ -20647,8 +20905,9 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
struct fcp_cmd_rsp_buf *list_entry = NULL;
struct fcp_cmd_rsp_buf *tmp = NULL;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free cmd_rsp buf pool */
list_for_each_entry_safe(list_entry, tmp,
@@ -20661,5 +20920,5 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry);
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 37fbcb46387e..7bcf922a8be2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -384,14 +384,13 @@ struct lpfc_io_buf {
struct lpfc_nodelist *ndlp;
uint32_t timeout;
- uint16_t flags; /* TBD convert exch_busy to flags */
+ uint16_t flags;
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
/* External DIF device IO conversions */
#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */
#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */
#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */
- uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 0d4882a9e634..d963ca871383 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -41,8 +41,13 @@
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_HBA_HDWQ_MIN 0
-#define LPFC_HBA_HDWQ_MAX 128
-#define LPFC_HBA_HDWQ_DEF 0
+#define LPFC_HBA_HDWQ_MAX 256
+#define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN
+
+/* irq_chann range, values */
+#define LPFC_IRQ_CHANN_MIN 0
+#define LPFC_IRQ_CHANN_MAX 256
+#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN
/* FCP MQ queue count limiting */
#define LPFC_FCP_MQ_THRESHOLD_MIN 0
@@ -133,6 +138,23 @@ struct lpfc_rqb {
struct lpfc_queue {
struct list_head list;
struct list_head wq_list;
+
+ /*
+ * If interrupts are in effect on _all_ the eq's the footprint
+ * of polling code is zero (except mode). This memory is chec-
+ * ked for every io to see if the io needs to be polled and
+ * while completion to check if the eq's needs to be rearmed.
+ * Keep in same cacheline as the queue ptr to avoid cpu fetch
+ * stalls. Using 1B memory will leave us with 7B hole. Fill
+ * it with other frequently used members.
+ */
+ uint16_t last_cpu; /* most recent cpu */
+ uint16_t hdwq;
+ uint8_t qe_valid;
+ uint8_t mode; /* interrupt or polling */
+#define LPFC_EQ_INTERRUPT 0
+#define LPFC_EQ_POLL 1
+
struct list_head wqfull_list;
enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype;
@@ -199,6 +221,7 @@ struct lpfc_queue {
uint8_t q_flag;
#define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
#define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
+#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */
#define LPFC_NVMET_CQ_NOTIFY 4
void __iomem *db_regaddr;
uint16_t dpp_enable;
@@ -239,10 +262,8 @@ struct lpfc_queue {
struct delayed_work sched_spwork;
uint64_t isr_timestamp;
- uint16_t hdwq;
- uint16_t last_cpu; /* most recent cpu */
- uint8_t qe_valid;
struct lpfc_queue *assoc_qp;
+ struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
};
@@ -451,11 +472,17 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
+ uint16_t irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
+ struct cpumask aff_mask;
};
+#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx])
+#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask)
+#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq)
+
/*BB Credit recovery value*/
struct lpfc_bbscn_params {
uint32_t word0;
@@ -513,6 +540,7 @@ struct lpfc_pc_sli4_params {
uint8_t cqav;
uint8_t wqsize;
uint8_t bv1s;
+ uint8_t pls;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
@@ -544,11 +572,10 @@ struct lpfc_sli4_lnk_info {
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM)
-/* Used for IRQ vector to CPU mapping */
+/* Used for tracking CPU mapping attributes */
struct lpfc_vector_map_info {
uint16_t phys_id;
uint16_t core_id;
- uint16_t irq;
uint16_t eq;
uint16_t hdwq;
uint16_t flag;
@@ -891,6 +918,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu;
uint16_t num_present_cpu;
+ struct cpumask numa_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
uint32_t conf_trunk;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b8aae31ffda3..9e5ff58edaca 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.4.0.0"
+#define LPFC_DRIVER_VERSION "12.6.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 9c5566217ef6..b5dde9d0d054 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -464,7 +464,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 59cca898f088..e83163c66884 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -41,10 +41,6 @@ static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
-#ifdef CONFIG_COMPAT
-static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
-#endif
-
MODULE_AUTHOR("LSI Logic Corporation");
MODULE_DESCRIPTION("LSI Logic Management Module");
MODULE_LICENSE("GPL");
@@ -68,9 +64,7 @@ static wait_queue_head_t wait_q;
static const struct file_operations lsi_fops = {
.open = mraid_mm_open,
.unlocked_ioctl = mraid_mm_unlocked_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mraid_mm_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
@@ -224,7 +218,6 @@ mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
{
int err;
- /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
mutex_lock(&mraid_mm_mutex);
err = mraid_mm_ioctl(filep, cmd, arg);
mutex_unlock(&mraid_mm_mutex);
@@ -1228,25 +1221,6 @@ mraid_mm_init(void)
}
-#ifdef CONFIG_COMPAT
-/**
- * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
- * @filep : file operations pointer (ignored)
- * @cmd : ioctl command
- * @arg : user ioctl packet
- */
-static long
-mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
- unsigned long arg)
-{
- int err;
-
- err = mraid_mm_ioctl(filep, cmd, arg);
-
- return err;
-}
-#endif
-
/**
* mraid_mm_exit - Module exit point
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a6e788c02ff4..bd8184072bed 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -24,6 +24,8 @@
#define MEGASAS_VERSION "07.710.50.00-rc1"
#define MEGASAS_RELDATE "June 28, 2019"
+#define MEGASAS_MSIX_NAME_LEN 32
+
/*
* Device IDs
*/
@@ -2203,6 +2205,7 @@ struct megasas_aen_event {
};
struct megasas_irq_context {
+ char name[MEGASAS_MSIX_NAME_LEN];
struct megasas_instance *instance;
u32 MSIxIndex;
u32 os_irq;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 42cf38c1ea99..c40fbea06cc5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5546,9 +5546,11 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
pdev = instance->pdev;
instance->irq_context[0].instance = instance;
instance->irq_context[0].MSIxIndex = 0;
+ snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
+ "megasas", instance->host->host_no);
if (request_irq(pci_irq_vector(pdev, 0),
instance->instancet->service_isr, IRQF_SHARED,
- "megasas", &instance->irq_context[0])) {
+ instance->irq_context->name, &instance->irq_context[0])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ from %s %d\n",
__func__, __LINE__);
@@ -5580,8 +5582,10 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
+ snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
+ "megasas", instance->host->host_no, i);
if (request_irq(pci_irq_vector(pdev, i),
- instance->instancet->service_isr, 0, "megasas",
+ instance->instancet->service_isr, 0, instance->irq_context[i].name,
&instance->irq_context[i])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 50b8c1b12767..89c3685f5163 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -386,9 +386,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) {
- u64 blk, debugBlk;
+ u64 blk;
blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
- debugBlk = blk;
blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
*span_blk = blk;
@@ -699,9 +698,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
__le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface;
u32 logArm, rowMod, armQ, arm;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
/*Get row and span from io_info for Uneven Span IO.*/
@@ -801,9 +798,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u64 *pdBlock = &io_info->pdBlock;
__le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
row = mega_div64_32(stripRow, raid->rowDataSize);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index fea3cb6a090b..848fbec7bda6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3044,11 +3044,11 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
descp = NULL;
ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
- ioc->msix_vector_count);
+ ioc->reply_queue_count);
i = pci_alloc_irq_vectors_affinity(ioc->pdev,
ioc->high_iops_queues,
- ioc->msix_vector_count, irq_flags, descp);
+ ioc->reply_queue_count, irq_flags, descp);
return i;
}
@@ -4242,10 +4242,12 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
{
- Mpi2FWImageHeader_t *FWImgHdr;
+ Mpi2FWImageHeader_t *fw_img_hdr;
+ Mpi26ComponentImageHeader_t *cmp_img_hdr;
Mpi25FWUploadRequest_t *mpi_request;
Mpi2FWUploadReply_t mpi_reply;
int r = 0;
+ u32 package_version = 0;
void *fwpkg_data = NULL;
dma_addr_t fwpkg_data_dma;
u16 smid, ioc_status;
@@ -4302,14 +4304,26 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
- FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
- if (FWImgHdr->PackageVersion.Word) {
- ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
- FWImgHdr->PackageVersion.Struct.Major,
- FWImgHdr->PackageVersion.Struct.Minor,
- FWImgHdr->PackageVersion.Struct.Unit,
- FWImgHdr->PackageVersion.Struct.Dev);
- }
+ fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
+ if (le32_to_cpu(fw_img_hdr->Signature) ==
+ MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
+ cmp_img_hdr =
+ (Mpi26ComponentImageHeader_t *)
+ (fwpkg_data);
+ package_version =
+ le32_to_cpu(
+ cmp_img_hdr->ApplicationSpecific);
+ } else
+ package_version =
+ le32_to_cpu(
+ fw_img_hdr->PackageVersion.Word);
+ if (package_version)
+ ioc_info(ioc,
+ "FW Package Ver(%02d.%02d.%02d.%02d)\n",
+ ((package_version) & 0xFF000000) >> 24,
+ ((package_version) & 0x00FF0000) >> 16,
+ ((package_version) & 0x0000FF00) >> 8,
+ (package_version) & 0x000000FF);
} else {
_debug_dump_mf(&mpi_reply,
sizeof(Mpi2FWUploadReply_t)/4);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index faca0a5e71f8..4ebf81ea4d2f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "31.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 31
+#define MPT3SAS_DRIVER_VERSION "32.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 32
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -303,6 +303,8 @@ struct mpt3sas_nvme_cmd {
#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+#define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08)
+#define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10)
/*
* HP HBA branding
@@ -391,9 +393,12 @@ struct Mpi2ManufacturingPage11_t {
u8 Reserved6; /* 2Fh */
__le32 Reserved7[7]; /* 30h - 4Bh */
u8 NVMeAbortTO; /* 4Ch */
- u8 Reserved8; /* 4Dh */
- u16 Reserved9; /* 4Eh */
- __le32 Reserved10[4]; /* 50h - 60h */
+ u8 NumPerDevEvents; /* 4Dh */
+ u8 HostTraceBufferDecrementSizeKB; /* 4Eh */
+ u8 HostTraceBufferFlags; /* 4Fh */
+ u16 HostTraceBufferMaxSizeKB; /* 50h */
+ u16 HostTraceBufferMinSizeKB; /* 52h */
+ __le32 Reserved10[2]; /* 54h - 5Bh */
};
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 7d696952b376..6874cf017739 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -466,6 +466,13 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
if ((ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_RELEASED))
continue;
+
+ /*
+ * add a log message to indicate the release
+ */
+ ioc_info(ioc,
+ "%s: Releasing the trace buffer due to adapter reset.",
+ __func__);
mpt3sas_send_diag_release(ioc, i, &issue_reset);
}
}
@@ -778,6 +785,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
+ if (!ioc->pcie_sg_lookup) {
+ dtmprintk(ioc, ioc_info(ioc,
+ "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
+ ));
+
+ if (ioc->logging_level & MPT_DEBUG_TM)
+ _debug_dump_mf(nvme_encap_request,
+ ioc->request_sz/4);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
/*
* Get the Physical Address of the sense buffer.
* Use Error Response buffer address field to hold the sense
@@ -1484,6 +1503,26 @@ _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
return rc;
}
+/**
+ * _ctl_diag_get_bufftype - return diag buffer type
+ * either TRACE, SNAPSHOT, or EXTENDED
+ * @ioc: per adapter object
+ * @unique_id: specifies the unique_id for the buffer
+ *
+ * returns MPT3_DIAG_UID_NOT_FOUND if the id not found
+ */
+static u8
+_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id)
+{
+ u8 index;
+
+ for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
+ if (ioc->unique_id[index] == unique_id)
+ return index;
+ }
+
+ return MPT3_DIAG_UID_NOT_FOUND;
+}
/**
* _ctl_diag_register_2 - wrapper for registering diag buffer support
@@ -1531,11 +1570,88 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
return -EPERM;
}
+ if (diag_register->unique_id == 0) {
+ ioc_err(ioc,
+ "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__,
+ diag_register->unique_id, buffer_type);
+ return -EINVAL;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED) &&
+ !(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ ioc_err(ioc,
+ "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n",
+ __func__, buffer_type, ioc->unique_id[buffer_type]);
+ return -EINVAL;
+ }
+
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) {
- ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
- __func__, buffer_type);
- return -EINVAL;
+ /*
+ * If driver posts buffer initially, then an application wants
+ * to Register that buffer (own it) without Releasing first,
+ * the application Register command MUST have the same buffer
+ * type and size in the Register command (obtained from the
+ * Query command). Otherwise that Register command will be
+ * failed. If the application has released the buffer but wants
+ * to re-register it, it should be allowed as long as the
+ * Unique-Id/Size match.
+ */
+
+ if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID &&
+ ioc->diag_buffer_sz[buffer_type] ==
+ diag_register->requested_buffer_size) {
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ dctlprintk(ioc, ioc_info(ioc,
+ "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n",
+ __func__, buffer_type,
+ ioc->unique_id[buffer_type],
+ diag_register->unique_id));
+
+ /*
+ * Application wants to own the buffer with
+ * the same size.
+ */
+ ioc->unique_id[buffer_type] =
+ diag_register->unique_id;
+ rc = 0; /* success */
+ goto out;
+ }
+ } else if (ioc->unique_id[buffer_type] !=
+ MPT3DIAGBUFFUNIQUEID) {
+ if (ioc->unique_id[buffer_type] !=
+ diag_register->unique_id ||
+ ioc->diag_buffer_sz[buffer_type] !=
+ diag_register->requested_buffer_size ||
+ !(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ ioc_err(ioc,
+ "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+ } else {
+ ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+ } else if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
+
+ if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID ||
+ ioc->diag_buffer_sz[buffer_type] !=
+ diag_register->requested_buffer_size) {
+
+ ioc_err(ioc,
+ "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n",
+ __func__, buffer_type,
+ ioc->diag_buffer_sz[buffer_type]);
+ return -EINVAL;
+ }
}
if (diag_register->requested_buffer_size % 4) {
@@ -1560,7 +1676,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
request_data = ioc->diag_buffer[buffer_type];
request_data_sz = diag_register->requested_buffer_size;
ioc->unique_id[buffer_type] = diag_register->unique_id;
- ioc->diag_buffer_status[buffer_type] = 0;
+ ioc->diag_buffer_status[buffer_type] &=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
memcpy(ioc->product_specific[buffer_type],
diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
@@ -1584,7 +1701,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
__func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz;
@@ -1649,9 +1767,12 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
out:
- if (rc && request_data)
+ if (rc && request_data) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
return rc;
@@ -1669,6 +1790,10 @@ void
mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
{
struct mpt3_diag_register diag_register;
+ u32 ret_val;
+ u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10;
+ u32 min_trace_buff_size = 0;
+ u32 decr_trace_buff_size = 0;
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
@@ -1677,10 +1802,68 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
- /* register for 2MB buffers */
- diag_register.requested_buffer_size = 2 * (1024 * 1024);
- diag_register.unique_id = 0x7075900;
- _ctl_diag_register_2(ioc, &diag_register);
+ diag_register.unique_id =
+ (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
+ (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
+
+ if (trace_buff_size != 0) {
+ diag_register.requested_buffer_size = trace_buff_size;
+ min_trace_buff_size =
+ ioc->manu_pg11.HostTraceBufferMinSizeKB<<10;
+ decr_trace_buff_size =
+ ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10;
+
+ if (min_trace_buff_size > trace_buff_size) {
+ /* The buff size is not set correctly */
+ ioc_err(ioc,
+ "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n",
+ min_trace_buff_size>>10,
+ trace_buff_size>>10);
+ ioc_err(ioc,
+ "Using zero Min Trace Buff Size\n");
+ min_trace_buff_size = 0;
+ }
+
+ if (decr_trace_buff_size == 0) {
+ /*
+ * retry the min size if decrement
+ * is not available.
+ */
+ decr_trace_buff_size =
+ trace_buff_size - min_trace_buff_size;
+ }
+ } else {
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ }
+
+ do {
+ ret_val = _ctl_diag_register_2(ioc, &diag_register);
+
+ if (ret_val == -ENOMEM && min_trace_buff_size &&
+ (trace_buff_size - decr_trace_buff_size) >=
+ min_trace_buff_size) {
+ /* adjust the buffer size */
+ trace_buff_size -= decr_trace_buff_size;
+ diag_register.requested_buffer_size =
+ trace_buff_size;
+ } else
+ break;
+ } while (true);
+
+ if (ret_val == -ENOMEM)
+ ioc_err(ioc,
+ "Cannot allocate trace buffer memory. Last memory tried = %d KB\n",
+ diag_register.requested_buffer_size>>10);
+ else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
+ & MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
+ diag_register.requested_buffer_size>>10);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
+ ioc->diag_buffer_status[
+ MPI2_DIAG_BUF_TYPE_TRACE] |=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
}
if (bits_to_register & 2) {
@@ -1723,6 +1906,12 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
}
rc = _ctl_diag_register_2(ioc, &karg);
+
+ if (!rc && (ioc->diag_buffer_status[karg.buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ ioc->diag_buffer_status[karg.buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_APP_OWNED;
+
return rc;
}
@@ -1752,7 +1941,13 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -1785,12 +1980,21 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM;
}
- request_data_sz = ioc->diag_buffer_sz[buffer_type];
- request_data_dma = ioc->diag_buffer_dma[buffer_type];
- dma_free_coherent(&ioc->pdev->dev, request_data_sz,
- request_data, request_data_dma);
- ioc->diag_buffer[buffer_type] = NULL;
- ioc->diag_buffer_status[buffer_type] = 0;
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
+ ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_APP_OWNED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_REGISTERED;
+ } else {
+ request_data_sz = ioc->diag_buffer_sz[buffer_type];
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
+ request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ }
return 0;
}
@@ -1829,14 +2033,17 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EPERM;
}
- if ((ioc->diag_buffer_status[buffer_type] &
- MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
- __func__, buffer_type);
- return -EINVAL;
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) {
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
}
- if (karg.unique_id & 0xffffff00) {
+ if (karg.unique_id) {
if (karg.unique_id != ioc->unique_id[buffer_type]) {
ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
__func__, karg.unique_id);
@@ -1851,13 +2058,21 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM;
}
- if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
- karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
- MPT3_APP_FLAGS_BUFFER_VALID);
- else
- karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
- MPT3_APP_FLAGS_BUFFER_VALID |
- MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID;
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS;
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED))
+ karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC;
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED))
+ karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED;
for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
karg.product_specific[i] =
@@ -2002,7 +2217,13 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -2026,7 +2247,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_DIAG_BUFFER_IS_RELEASED) {
ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
__func__, buffer_type);
- return 0;
+ return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
@@ -2086,7 +2307,13 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -2210,6 +2437,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_RELEASED;
dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
@@ -3130,10 +3359,49 @@ host_trace_buffer_enable_store(struct device *cdev,
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
ioc_info(ioc, "posting host trace buffers\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
- diag_register.requested_buffer_size = (1024 * 1024);
- diag_register.unique_id = 0x7075900;
+
+ if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 &&
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) {
+ /* post the same buffer allocated previously */
+ diag_register.requested_buffer_size =
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE];
+ } else {
+ /*
+ * Free the diag buffer memory which was previously
+ * allocated by an application.
+ */
+ if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0)
+ &&
+ (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
+ pci_free_consistent(ioc->pdev,
+ ioc->diag_buffer_sz[
+ MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer_dma[
+ MPI2_DIAG_BUF_TYPE_TRACE]);
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
+ NULL;
+ }
+
+ diag_register.requested_buffer_size = (1024 * 1024);
+ }
+
+ diag_register.unique_id =
+ (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
+ (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
_ctl_diag_register_2(ioc, &diag_register);
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ ioc_info(ioc,
+ "Trace buffer %d KB allocated through sysfs\n",
+ diag_register.requested_buffer_size>>10);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
+ ioc->diag_buffer_status[
+ MPI2_DIAG_BUF_TYPE_TRACE] |=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
} else if (!strcmp(str, "release")) {
/* exit out if host buffers are already released */
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
@@ -3702,12 +3970,6 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!ioc->diag_buffer[i])
continue;
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
dma_free_coherent(&ioc->pdev->dev,
ioc->diag_buffer_sz[i],
ioc->diag_buffer[i],
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 18b46faef6f1..0f7aa4ddade0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -95,6 +95,14 @@
#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
struct mpt3_diag_read_buffer)
+/* Trace Buffer default UniqueId */
+#define MPT2DIAGBUFFUNIQUEID (0x07075900)
+#define MPT3DIAGBUFFUNIQUEID (0x4252434D)
+
+/* UID not found */
+#define MPT3_DIAG_UID_NOT_FOUND (0xFF)
+
+
/**
* struct mpt3_ioctl_header - main header structure
* @ioc_number - IOC unit number
@@ -310,6 +318,7 @@ struct mpt3_ioctl_btdh_mapping {
#define MPT3_APP_FLAGS_APP_OWNED (0x0001)
#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002)
#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
+#define MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC (0x0008)
/* flags for mpt3_diag_read_buffer */
#define MPT3_FLAGS_REREGISTER (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c8e512ba6d39..a038be8a0e90 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5161,7 +5161,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -10193,6 +10193,8 @@ scsih_scan_start(struct Scsi_Host *shost)
int rc;
if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+ else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
+ mpt3sas_enable_diag_buffer(ioc, 1);
if (disable_discovery > 0)
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index 6ac453fd5937..8ec9bab20ec4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -113,15 +113,21 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
{
u8 issue_reset = 0;
+ u32 *trig_data = (u32 *)&event_data->u.master;
dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
/* release the diag buffer trace */
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dTriggerDiagPrintk(ioc,
- ioc_info(ioc, "%s: release trace diag buffer\n",
- __func__));
+ /*
+ * add a log message so that user knows which event caused
+ * the release
+ */
+ ioc_info(ioc,
+ "%s: Releasing the trace buffer. Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n",
+ __func__, event_data->trigger_type,
+ trig_data[0], trig_data[1]);
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 3e0b8ebe257f..a920eced92ec 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1541,7 +1541,7 @@ out:
int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
{
- int rc = TMF_RESP_FUNC_FAILED;
+ int rc;
struct mvs_tmf_task tmf_task;
tmf_task.tmf = TMF_ABORT_TASK_SET;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index e0b427fdf818..11a2cb844ecb 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1722,7 +1722,7 @@ struct ncb {
** Miscellaneous configuration and status parameters.
**----------------------------------------------------------------
*/
- u_char disc; /* Diconnection allowed */
+ u_char disc; /* Disconnection allowed */
u_char scsi_mode; /* Current SCSI BUS mode */
u_char order; /* Tag order to use */
u_char verbose; /* Verbosity for this controller*/
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 70db79254155..b6e04d14292d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -1542,7 +1542,7 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
* with ACK reply when below condition is matched:
* MsgIn 00: Command Complete.
* MsgIn 02: Save Data Pointer.
- * MsgIn 04: Diconnect.
+ * MsgIn 04: Disconnect.
* In other case, unexpected BUSFREE is detected.
*/
static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index 2368f34efba3..dc9b74c9348a 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -32,7 +32,7 @@ config PCMCIA_FDOMAIN
config PCMCIA_NINJA_SCSI
tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
- depends on !64BIT
+ depends on !64BIT || COMPILE_TEST
help
If you intend to attach this type of PCMCIA SCSI host adapter to
your computer, say Y here and read
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 97416e1dcc5b..93616f9fd6d7 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -56,9 +56,7 @@
MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>");
MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module");
MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
-#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
-#endif
#include "nsp_io.h"
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 6b85016b4db3..7c6be2ec110d 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -70,6 +70,25 @@ static
DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
/**
+ * controller_fatal_error_show - check controller is under fatal err
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read only' shost attribute.
+ */
+static ssize_t controller_fatal_error_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pm8001_ha->controller_fatal_error);
+}
+static DEVICE_ATTR_RO(controller_fatal_error);
+
+/**
* pm8001_ctl_fw_version_show - firmware version
* @cdev: pointer to embedded class device
* @buf: the buffer returned
@@ -804,6 +823,7 @@ static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_interface_rev,
+ &dev_attr_controller_fatal_error,
&dev_attr_fw_version,
&dev_attr_update_fw,
&dev_attr_aap_log,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 68a8217032d0..2328ff1349ac 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1186,7 +1186,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
{
s8 bar, logical = 0;
- for (bar = 0; bar < 6; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
/*
** logical BARs for SPC:
** bar 0 and 1 - logical BAR0
@@ -1336,10 +1336,13 @@ int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
* @circularQ: the inbound queue we want to transfer to HBA.
* @opCode: the operation code represents commands which LLDD and fw recognized.
* @payload: the command payload of each operation command.
+ * @nb: size in bytes of the command payload
+ * @responseQueue: queue to interrupt on w/ command response (if any)
*/
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, u32 responseQueue)
+ u32 opCode, void *payload, size_t nb,
+ u32 responseQueue)
{
u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
void *pMessage;
@@ -1350,10 +1353,13 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
pm8001_printk("No free mpi buffer\n"));
return -ENOMEM;
}
- BUG_ON(!payload);
- /*Copy to the payload*/
- memcpy(pMessage, payload, (pm8001_ha->iomb_size -
- sizeof(struct mpi_msg_hdr)));
+
+ if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr)))
+ nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr);
+ memcpy(pMessage, payload, nb);
+ if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size)
+ memset(pMessage + nb, 0, pm8001_ha->iomb_size -
+ (nb + sizeof(struct mpi_msg_hdr)));
/*Build the header*/
Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1364,7 +1370,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
/*Update the PI to the firmware*/
pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
circularQ->pi_offset, circularQ->producer_idx);
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
responseQueue, opCode, circularQ->producer_idx,
circularQ->consumer_index));
@@ -1436,6 +1442,10 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
/* read header */
header_tmp = pm8001_read_32(msgHeader);
msgHeader_tmp = cpu_to_le32(header_tmp);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "outbound opcode msgheader:%x ci=%d pi=%d\n",
+ msgHeader_tmp, circularQ->consumer_idx,
+ circularQ->producer_index));
if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
if (OPC_OUB_SKIP_ENTRY !=
(le32_to_cpu(msgHeader_tmp) & 0xfff)) {
@@ -1604,7 +1614,8 @@ void pm8001_work_fn(struct work_struct *work)
break;
default:
- pm8001_printk("...query task failed!!!\n");
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "...query task failed!!!\n"));
break;
});
@@ -1758,7 +1769,8 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
if (ret)
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1831,7 +1843,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1890,6 +1903,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("SAS Address of IO Failure Drive:"
"%016llx", SAS_ADDR(t->dev->sas_addr)));
+ if (status)
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t));
+
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@@ -2072,7 +2090,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2125,7 +2143,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("port_id = %x,device_id = %x\n",
port_id, dev_id));
switch (event) {
@@ -2263,7 +2281,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n"));
return;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2352,6 +2370,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+
+ if (status)
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
@@ -2652,7 +2676,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2723,7 +2747,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, dev_id, tag, event));
switch (event) {
@@ -2872,7 +2896,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_OPEN_TO;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2917,9 +2941,13 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t = ccb->task;
ts = &t->task_status;
pm8001_dev = ccb->device;
- if (status)
+ if (status) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("smp IO status 0x%x\n", status));
+ PM8001_IOERR_DBG(pm8001_ha,
+ pm8001_printk("status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t));
+ }
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
@@ -3070,7 +3098,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
@@ -3355,7 +3383,8 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3416,7 +3445,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType));
break;
}
@@ -3463,7 +3492,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
" phy id = %d\n", port_id, phy_id));
port->port_state = portstate;
@@ -3541,7 +3570,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" phy Down and(default) = %x\n",
portstate));
break;
@@ -3689,7 +3718,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("No matched status = %d\n", status));
break;
}
@@ -3805,8 +3834,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("outbound queue HW event & event type : "));
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
+ port_id, phy_id, eventType, status));
switch (eventType) {
case HW_EVENT_PHY_START_STATUS:
PM8001_MSG_DBG(pm8001_ha,
@@ -3990,7 +4020,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type = %x\n", eventType));
break;
}
@@ -4161,7 +4191,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
opc));
break;
@@ -4284,7 +4314,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- (u32 *)&smp_cmd, 0);
+ &smp_cmd, sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
@@ -4352,7 +4382,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd,
+ sizeof(ssp_cmd), 0);
return ret;
}
@@ -4461,7 +4492,8 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
return ret;
}
@@ -4496,7 +4528,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
memcpy(payload.sas_identify.sas_addr,
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4518,7 +4551,8 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4577,7 +4611,8 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return rc;
}
@@ -4598,7 +4633,8 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
payload.device_id = cpu_to_le32(device_id);
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("unregister device device_id = %d\n", device_id));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4621,7 +4657,8 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(1);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4649,6 +4686,9 @@ static irqreturn_t
pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm8001_chip_interrupt_disable(pm8001_ha, vec);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec);
pm8001_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -4672,7 +4712,8 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
task_abort.device_id = cpu_to_le32(dev_id);
task_abort.tag = cpu_to_le32(cmd_tag);
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
return ret;
}
@@ -4729,7 +4770,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
if (pm8001_ha->chip_id != chip_8001)
sspTMCmd.ds_ads_m = 0x08;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
+ sizeof(sspTMCmd), 0);
return ret;
}
@@ -4819,7 +4861,8 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag);
@@ -4903,7 +4946,8 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag);
@@ -4938,7 +4982,8 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
payload.sgl_addr_hi =
cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4960,6 +5005,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context)
return -ENOMEM;
fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "dma fw_control context input length :%x\n", fw_control->len));
memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -5083,7 +5130,8 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.device_id = cpu_to_le32(pm8001_dev->device_id);
payload.nds = cpu_to_le32(state);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return rc;
}
@@ -5108,7 +5156,8 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
payload.SSAHOLT = cpu_to_le32(0xd << 25);
payload.sata_hol_tmo = cpu_to_le32(80);
payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3374f553c617..ff618ad80ebd 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -41,6 +41,19 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm8001_chips.h"
+#include "pm80xx_hwi.h"
+
+static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING;
+module_param(logging_level, ulong, 0644);
+MODULE_PARM_DESC(logging_level, " bits for enabling logging info.");
+
+static ulong link_rate = LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | LINKRATE_120;
+module_param(link_rate, ulong, 0644);
+MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
+ " 1: Link rate 1.5G\n"
+ " 2: Link rate 3.0G\n"
+ " 4: Link rate 6.0G\n"
+ " 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
@@ -401,7 +414,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
pdev = pm8001_ha->pdev;
/* map pci mem (PMC pci base 0-3)*/
- for (bar = 0; bar < 6; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
/*
** logical BARs for SPC:
** bar 0 and 1 - logical BAR0
@@ -432,7 +445,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
pm8001_ha->io_mem[logicalBar].memsize = 0;
- pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
+ pm8001_ha->io_mem[logicalBar].memvirtaddr = NULL;
}
logicalBar++;
}
@@ -466,7 +479,15 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->sas = sha;
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
- pm8001_ha->logging_level = 0x01;
+ pm8001_ha->logging_level = logging_level;
+ if (link_rate >= 1 && link_rate <= 15)
+ pm8001_ha->link_rate = (link_rate << 8);
+ else {
+ pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 |
+ LINKRATE_60 | LINKRATE_120;
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "Setting link rate to default value\n"));
+ }
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
/* IOMB size is 128 for 8088/89 controllers */
if (pm8001_ha->chip_id != chip_8001)
@@ -873,7 +894,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
u32 number_of_intr;
int flag = 0;
int rc;
- static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
@@ -894,14 +914,16 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
rc, pm8001_ha->number_of_intr));
for (i = 0; i < number_of_intr; i++) {
- snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
- DRV_NAME"%d", i);
+ snprintf(pm8001_ha->intr_drvname[i],
+ sizeof(pm8001_ha->intr_drvname[0]),
+ "%s-%d", pm8001_ha->name, i);
pm8001_ha->irq_vector[i].irq_id = i;
pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
pm8001_interrupt_handler_msix, flag,
- intr_drvname[i], &(pm8001_ha->irq_vector[i]));
+ pm8001_ha->intr_drvname[i],
+ &(pm8001_ha->irq_vector[i]));
if (rc) {
for (j = 0; j < i; j++) {
free_irq(pci_irq_vector(pm8001_ha->pdev, i),
@@ -942,7 +964,7 @@ intx:
pm8001_ha->irq_vector[0].irq_id = 0;
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
- DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
+ pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
return rc;
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 7e48154e11c3..b7cbc312843e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -119,7 +119,7 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
&mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
- pm8001_printk("memory allocation error\n");
+ pr_err("pm80xx: memory allocation error\n");
return -1;
}
*pphys_addr = mem_dma_handle;
@@ -249,6 +249,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return 0;
default:
+ PM8001_DEVIO_DBG(pm8001_ha,
+ pm8001_printk("func 0x%x\n", func));
rc = -EOPNOTSUPP;
}
msleep(300);
@@ -384,8 +386,9 @@ static int pm8001_task_exec(struct sas_task *task,
struct pm8001_port *port = NULL;
struct sas_task *t = task;
struct pm8001_ccb_info *ccb;
- u32 tag = 0xdeadbeef, rc, n_elem = 0;
+ u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
unsigned long flags = 0;
+ enum sas_protocol task_proto = t->task_proto;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
@@ -410,7 +413,7 @@ static int pm8001_task_exec(struct sas_task *task,
pm8001_dev = dev->lldd_dev;
port = &pm8001_ha->port[sas_find_local_port_id(dev)];
if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
- if (sas_protocol_ata(t->task_proto)) {
+ if (sas_protocol_ata(task_proto)) {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
@@ -432,7 +435,7 @@ static int pm8001_task_exec(struct sas_task *task,
goto err_out;
ccb = &pm8001_ha->ccb_info[tag];
- if (!sas_protocol_ata(t->task_proto)) {
+ if (!sas_protocol_ata(task_proto)) {
if (t->num_scatter) {
n_elem = dma_map_sg(pm8001_ha->dev,
t->scatter,
@@ -452,7 +455,7 @@ static int pm8001_task_exec(struct sas_task *task,
ccb->ccb_tag = tag;
ccb->task = t;
ccb->device = pm8001_dev;
- switch (t->task_proto) {
+ switch (task_proto) {
case SAS_PROTOCOL_SMP:
rc = pm8001_task_prep_smp(pm8001_ha, ccb);
break;
@@ -469,8 +472,7 @@ static int pm8001_task_exec(struct sas_task *task,
break;
default:
dev_printk(KERN_ERR, pm8001_ha->dev,
- "unknown sas_task proto: 0x%x\n",
- t->task_proto);
+ "unknown sas_task proto: 0x%x\n", task_proto);
rc = -EINVAL;
break;
}
@@ -493,7 +495,7 @@ err_out_tag:
pm8001_tag_free(pm8001_ha, tag);
err_out:
dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
- if (!sas_protocol_ata(t->task_proto))
+ if (!sas_protocol_ata(task_proto))
if (n_elem)
dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t->data_dir);
@@ -1179,7 +1181,7 @@ int pm8001_query_task(struct sas_task *task)
break;
}
}
- pm8001_printk(":rc= %d\n", rc);
+ pr_err("pm80xx: rc= %d\n", rc);
return rc;
}
@@ -1202,8 +1204,8 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
phy_id = pm8001_dev->attached_phy;
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
+ ret = pm8001_find_tag(task, &tag);
+ if (ret == 0) {
pm8001_printk("no tag for task:%p\n", task);
return TMF_RESP_FUNC_FAILED;
}
@@ -1241,26 +1243,50 @@ int pm8001_abort_task(struct sas_task *task)
/* 2. Send Phy Control Hard Reset */
reinit_completion(&completion);
+ phy->port_reset_status = PORT_RESET_TMO;
phy->reset_success = false;
phy->enable_completion = &completion;
phy->reset_completion = &completion_reset;
ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_HARD_RESET);
- if (ret)
- goto out;
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Waiting for local phy ctl\n"));
- wait_for_completion(&completion);
- if (!phy->reset_success)
+ if (ret) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
goto out;
+ }
- /* 3. Wait for Port Reset complete / Port reset TMO */
+ /* In the case of the reset timeout/fail we still
+ * abort the command at the firmware. The assumption
+ * here is that the drive is off doing something so
+ * that it's not processing requests, and we want to
+ * avoid getting a completion for this and either
+ * leaking the task in libsas or losing the race and
+ * getting a double free.
+ */
PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for local phy ctl\n"));
+ ret = wait_for_completion_timeout(&completion,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret || !phy->reset_success) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
+ } else {
+ /* 3. Wait for Port Reset complete or
+ * Port reset TMO
+ */
+ PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n"));
- wait_for_completion(&completion_reset);
- if (phy->port_reset_status) {
- pm8001_dev_gone_notify(dev);
- goto out;
+ ret = wait_for_completion_timeout(
+ &completion_reset,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret)
+ phy->reset_completion = NULL;
+ WARN_ON(phy->port_reset_status ==
+ PORT_RESET_TMO);
+ if (phy->port_reset_status == PORT_RESET_TMO) {
+ pm8001_dev_gone_notify(dev);
+ goto out;
+ }
}
/*
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index ff17c6aff63d..93438c8f67da 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -66,8 +66,11 @@
#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
-#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \
- format, __func__, __LINE__, ## arg)
+#define PM8001_DEV_LOGGING 0x80 /* development message logging */
+#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
+#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
+#define pm8001_printk(format, arg...) pr_info("%s:: %s %d:" \
+ format, pm8001_ha->name, __func__, __LINE__, ## arg)
#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
do { \
if (unlikely(HBA->logging_level & LEVEL)) \
@@ -97,6 +100,14 @@ do { \
#define PM8001_MSG_DBG(HBA, CMD) \
PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
+#define PM8001_DEV_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_DEV_LOGGING, CMD)
+
+#define PM8001_DEVIO_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_DEVIO_LOGGING, CMD)
+
+#define PM8001_IOERR_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_IOERR_LOGGING, CMD)
#define PM8001_USE_TASKLET
#define PM8001_USE_MSIX
@@ -141,6 +152,8 @@ struct pm8001_ioctl_payload {
#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */
#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */
#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */
+#define MPI_FATAL_EDUMP_TABLE_TOTAL_LEN 0x18 /* TOTALLEN */
+#define MPI_FATAL_EDUMP_TABLE_SIGNATURE 0x1C /* SIGNITURE */
#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1
#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0
#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0
@@ -496,6 +509,7 @@ struct pm8001_hba_info {
u32 forensic_last_offset;
u32 fatal_forensic_shift_offset;
u32 forensic_fatal_step;
+ u32 forensic_preserved_accumulated_transfer;
u32 evtlog_ib_offset;
u32 evtlog_ob_offset;
void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
@@ -530,11 +544,14 @@ struct pm8001_hba_info {
struct pm8001_ccb_info *ccb_info;
#ifdef PM8001_USE_MSIX
int number_of_intr;/*will be used in remove()*/
+ char intr_drvname[PM8001_MAX_MSIX_VEC]
+ [PM8001_NAME_LENGTH+1+3+1];
#endif
#ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
#endif
u32 logging_level;
+ u32 link_rate;
u32 fw_status;
u32 smp_exp_mode;
bool controller_fatal_error;
@@ -663,7 +680,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, u32 responseQueue);
+ u32 opCode, void *payload, size_t nb,
+ u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
u16 messageSize, void **messagePtr);
u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 73261902d75d..19601138e889 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -37,6 +37,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*
*/
+ #include <linux/version.h>
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm80xx_hwi.h"
@@ -75,7 +76,7 @@ void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
destination1 = (u32 *)destination;
for (index = 0; index < dw_count; index += 4, destination1++) {
- offset = (soffset + index / 4);
+ offset = (soffset + index);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
*destination1 = cpu_to_le32(value);
@@ -92,9 +93,12 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
u32 accum_len , reg_val, index, *temp;
+ u32 status = 1;
unsigned long start;
u8 *direct_data;
char *fatal_error_data = buf;
+ u32 length_to_read;
+ u32 offset;
pm8001_ha->forensic_info.data_buf.direct_data = buf;
if (pm8001_ha->chip_id == chip_8001) {
@@ -104,16 +108,35 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
+ /* initialize variables for very first call from host application */
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
+ pm8001_ha->forensic_preserved_accumulated_transfer = 0;
- pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ /* Write signature to fatal dump table */
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd);
+ pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: status1 %d\n", status));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset));
+ }
+ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
/* Program the MEMBASE II Shifting Register with 0x00.*/
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@@ -126,30 +149,66 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
/* Read until accum_len is retrived */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
- accum_len));
+ /* Determine length of data between previously stored transfer length
+ * and current accumulated transfer length
+ */
+ length_to_read =
+ accum_len - pm8001_ha->forensic_preserved_accumulated_transfer;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: accum_len 0x%x\n", accum_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: length_to_read 0x%x\n",
+ length_to_read));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: last_offset 0x%x\n",
+ pm8001_ha->forensic_last_offset));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset));
+
+ /* If accumulated length failed to read correctly fail the attempt.*/
if (accum_len == 0xFFFFFFFF) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Possible PCI issue 0x%x not expected\n",
- accum_len));
- return -EIO;
+ accum_len));
+ return status;
}
- if (accum_len == 0 || accum_len >= 0x100000) {
+ /* If accumulated length is zero fail the attempt */
+ if (accum_len == 0) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
- "%08x ", 0xFFFFFFFF);
+ "%08x ", 0xFFFFFFFF);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
+ /* Accumulated length is good so start capturing the first data */
temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
if (pm8001_ha->forensic_fatal_step == 0) {
moreData:
+ /* If data to read is less than SYSFS_OFFSET then reduce the
+ * length of dataLen
+ */
+ if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET
+ > length_to_read) {
+ pm8001_ha->forensic_info.data_buf.direct_len =
+ length_to_read -
+ pm8001_ha->forensic_last_offset;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_len =
+ SYSFS_OFFSET;
+ }
if (pm8001_ha->forensic_info.data_buf.direct_data) {
/* Data is in bar, copy to host memory */
- pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
- pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
- pm8001_ha->forensic_info.data_buf.direct_len ,
- 1);
+ pm80xx_pci_mem_copy(pm8001_ha,
+ pm8001_ha->fatal_bar_loc,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
+ pm8001_ha->forensic_info.data_buf.direct_len, 1);
}
pm8001_ha->fatal_bar_loc +=
pm8001_ha->forensic_info.data_buf.direct_len;
@@ -160,21 +219,29 @@ moreData:
pm8001_ha->forensic_info.data_buf.read_len =
pm8001_ha->forensic_info.data_buf.direct_len;
- if (pm8001_ha->forensic_last_offset >= accum_len) {
+ if (pm8001_ha->forensic_last_offset >= length_to_read) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 3);
- for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", *(temp + index));
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
}
pm8001_ha->fatal_bar_loc = 0;
pm8001_ha->forensic_fatal_step = 1;
pm8001_ha->fatal_forensic_shift_offset = 0;
pm8001_ha->forensic_last_offset = 0;
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:return1 0x%x\n", offset));
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -184,12 +251,20 @@ moreData:
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", 2);
- for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data
+ += sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:return2 0x%x\n", offset));
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -199,63 +274,122 @@ moreData:
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 2);
- for (index = 0; index < 256; index++) {
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4) ; index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", *(temp + index));
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
}
pm8001_ha->fatal_forensic_shift_offset += 0x100;
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->fatal_bar_loc = 0;
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: return3 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
if (pm8001_ha->forensic_fatal_step == 1) {
- pm8001_ha->fatal_forensic_shift_offset = 0;
- /* Read 64K of the debug data. */
- pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
- pm8001_ha->fatal_forensic_shift_offset);
- pm8001_mw32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
+ /* store previous accumulated length before triggering next
+ * accumulated length update
+ */
+ pm8001_ha->forensic_preserved_accumulated_transfer =
+ pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+
+ /* continue capturing the fatal log until Dump status is 0x3 */
+ if (pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS) <
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
+
+ /* reset fddstat bit by writing to zero*/
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS, 0x0);
+
+ /* set dump control value to '1' so that new data will
+ * be transferred to shared memory
+ */
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
MPI_FATAL_EDUMP_HANDSHAKE_RDY);
- /* Poll FDDHSHK until clear */
- start = jiffies + (2 * HZ); /* 2 sec */
+ /*Poll FDDHSHK until clear */
+ start = jiffies + (2 * HZ); /* 2 sec */
- do {
- reg_val = pm8001_mr32(fatal_table_address,
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
- } while ((reg_val) && time_before(jiffies, start));
+ } while ((reg_val) && time_before(jiffies, start));
- if (reg_val != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
- " = 0x%x\n", reg_val));
- return -EIO;
- }
-
- /* Read the next 64K of the debug data. */
- pm8001_ha->forensic_fatal_step = 0;
- if (pm8001_mr32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_STATUS) !=
- MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
- pm8001_mw32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
- goto moreData;
- } else {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", 4);
- pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
- pm8001_ha->forensic_info.data_buf.direct_len = 0;
- pm8001_ha->forensic_info.data_buf.direct_offset = 0;
- pm8001_ha->forensic_info.data_buf.read_len = 0;
+ if (reg_val != 0) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
+ reg_val));
+ /* Fail the dump if a timeout occurs */
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ return((char *)
+ pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ }
+ /* Poll status register until set to 2 or
+ * 3 for up to 2 seconds
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS);
+ } while (((reg_val != 2) || (reg_val != 3)) &&
+ time_before(jiffies, start));
+
+ if (reg_val < 2) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
+ reg_val));
+ /* Fail the dump if a timeout occurs */
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0,
+ MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ }
+ /* Read the next block of the debug data.*/
+ length_to_read = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
+ pm8001_ha->forensic_preserved_accumulated_transfer;
+ if (length_to_read != 0x0) {
+ pm8001_ha->forensic_fatal_step = 0;
+ goto moreData;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 4);
+ pm8001_ha->forensic_info.data_buf.read_len
+ = 0xFFFFFFFF;
+ pm8001_ha->forensic_info.data_buf.direct_len
+ = 0;
+ pm8001_ha->forensic_info.data_buf.direct_offset
+ = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+ }
}
}
-
+ offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: return4 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
@@ -317,6 +451,25 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Main cfg table; ila rev:%x Inactive fw rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
}
/**
@@ -521,6 +674,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressib, (offsetib + 0x18));
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
+ pm8001_ha->inbnd_q_tbl[i].pi_offset));
}
for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
@@ -549,6 +707,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressob, (offsetob + 0x18));
pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
+ pm8001_ha->outbnd_q_tbl[i].ci_offset));
}
}
@@ -582,6 +745,10 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
((pm8001_ha->number_of_intr - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Updated Fatal error interrupt vector 0x%x\n",
+ pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)));
+
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
@@ -591,6 +758,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Programming DW 0x21 in main cfg table with 0x%x\n",
+ pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)));
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
@@ -629,6 +799,21 @@ static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].lower_base_addr));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "CI upper base addr 0x%x CI lower base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr));
}
/**
@@ -652,6 +837,21 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->outbnd_q_tbl[number].element_size_cnt));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].lower_base_addr));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "PI upper base addr 0x%x PI lower base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr));
}
/**
@@ -669,9 +869,9 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
/* wait until Inbound DoorBell Clear Register toggled */
if (IS_SPCV_12G(pm8001_ha->pdev)) {
- max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT;
} else {
- max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
udelay(1);
@@ -797,7 +997,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
- PM8001_INIT_DBG(pm8001_ha,
+ PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
offset, value));
pcilogic = (value & 0xFC000000) >> 26;
@@ -885,7 +1085,12 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
(THERMAL_ENABLE << 8) | page_code;
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
+ payload.cfg_pg[0], payload.cfg_pg[1]));
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
@@ -967,7 +1172,8 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1090,7 +1296,12 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
KEK_MGMT_SUBOP_KEYCARDUPDATE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Saving Encryption info to flash. payload 0x%x\n",
+ payload.new_curidx_ksop));
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1241,7 +1452,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
pm8001_printk("reset register before write : 0x%x\n", regval));
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
- mdelay(500);
+ msleep(500);
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
PM8001_INIT_DBG(pm8001_ha,
@@ -1443,7 +1654,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Executing abort task end\n"));
if (ret) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1519,7 +1733,9 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing read log end\n"));
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1570,6 +1786,10 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
@@ -1772,7 +1992,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -1826,7 +2046,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event));
switch (event) {
@@ -1963,7 +2183,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
@@ -1974,7 +2194,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
return;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2062,6 +2282,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+
+ if (unlikely(status))
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
@@ -2365,7 +2591,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2382,6 +2608,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -2435,7 +2663,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event));
switch (event) {
@@ -2655,6 +2883,9 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
+ PM8001_DEV_DBG(pm8001_ha,
+ pm8001_printk("tag::0x%x status::0x%x\n", tag, status));
+
switch (status) {
case IO_SUCCESS:
@@ -2822,7 +3053,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
@@ -2873,7 +3104,8 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0xFF) << 24) | (port_id & 0xFF));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -2964,7 +3196,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType));
break;
}
@@ -2984,7 +3216,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
if (pm8001_ha->flags == PM8001F_RUN_TIME)
- mdelay(200);/*delay a moment to wait disk to spinup*/
+ msleep(200);/*delay a moment to wait disk to spinup*/
pm8001_bytes_dmaed(pm8001_ha, phy_id);
}
@@ -3013,7 +3245,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port id %d, phy id %d link_rate %d portstate 0x%x\n",
port_id, phy_id, link_rate, portstate));
@@ -3101,7 +3333,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" Phy Down and(default) = 0x%x\n",
portstate));
break;
@@ -3130,8 +3362,10 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (status == 0) {
phy->phy_state = PHY_LINK_DOWN;
if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL)
+ phy->enable_completion != NULL) {
complete(phy->enable_completion);
+ phy->enable_completion = NULL;
+ }
}
return 0;
@@ -3191,7 +3425,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
port_id, phy_id, eventType, status));
@@ -3376,7 +3610,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type 0x%x\n", eventType));
break;
}
@@ -3758,7 +3992,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
ssp_coalesced_comp_resp(pm8001_ha, piomb);
break;
default:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
break;
}
@@ -3991,8 +4225,8 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- (u32 *)&smp_cmd, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
+ sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
return 0;
@@ -4200,7 +4434,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
}
q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &ssp_cmd, q_index);
+ &ssp_cmd, sizeof(ssp_cmd), q_index);
return ret;
}
@@ -4441,7 +4675,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &sata_cmd, q_index);
+ &sata_cmd, sizeof(sata_cmd), q_index);
return ret;
}
@@ -4465,23 +4699,9 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
- /*
- ** [0:7] PHY Identifier
- ** [8:11] link rate 1.5G, 3G, 6G
- ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
- ** [14] 0b disable spin up hold; 1b enable spin up hold
- ** [15] ob no change in current PHY analig setup 1b enable using SPAST
- */
- if (!IS_SPCV_12G(pm8001_ha->pdev))
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | phy_id);
- else
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
- phy_id);
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | pm8001_ha->link_rate | phy_id);
/* SSC Disable and SAS Analog ST configuration */
/**
payload.ase_sh_lm_slr_phyid =
@@ -4494,9 +4714,10 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4518,7 +4739,8 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4584,7 +4806,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -4614,7 +4837,8 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
@@ -4641,6 +4865,9 @@ static irqreturn_t
pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec);
pm80xx_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -4669,7 +4896,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
}
@@ -4711,7 +4939,8 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
for (i = 0; i < length; i++)
payload.reserved[i] = cpu_to_le32(*(buf + i));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index dc9ab7689060..701951a0f715 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -220,6 +220,9 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
+#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 1000 * 1000) /* 30 sec */
+#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 1000 * 1000) /* 15 sec */
+
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 398d2af60832..7eb88fe1eb0b 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3973,9 +3973,7 @@ static const struct file_operations pmcraid_fops = {
.open = pmcraid_chr_open,
.fasync = pmcraid_chr_fasync,
.unlocked_ioctl = pmcraid_chr_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = pmcraid_chr_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index d979f095aeda..2386bfb73c46 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -42,7 +42,7 @@ extern uint qedf_debug;
#define QEDF_LOG_LPORT 0x4000 /* lport logs */
#define QEDF_LOG_ELS 0x8000 /* ELS logs */
#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */
-#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDF_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDF_LOG_TID 0x80000 /*
* FW TID context acquire
* free
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 59ca98f12afd..604856e72cfb 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1926,6 +1926,13 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost)
return 0;
}
+static void qedf_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ fc_host_port_id(shost) = lport->port_id;
+}
+
static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
*shost)
{
@@ -1996,6 +2003,7 @@ static struct fc_function_template qedf_fc_transport_fn = {
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
+ .get_host_port_id = qedf_get_host_port_id,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fc_get_host_speed,
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index 243acc8b520a..37d084086fd4 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -44,7 +44,7 @@ extern uint qedi_dbg_log;
#define QEDI_LOG_LPORT 0x4000 /* lport logs */
#define QEDI_LOG_ELS 0x8000 /* ELS logs */
#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
-#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDI_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
#define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
* free
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7259bce85e0e..ae97e2f310a3 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -102,8 +102,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla8044_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha);
- } else
+ } else {
+ ha->fw_dump_mpi = 1;
qla2x00_system_error(vha);
+ }
break;
case 4:
if (IS_P3P_TYPE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6ffa9877c28b..460f443f6471 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -591,19 +591,23 @@ typedef struct srb {
*/
uint8_t cmd_type;
uint8_t pad[3];
- atomic_t ref_count;
struct kref cmd_kref; /* need to migrate ref_count over to this */
void *priv;
wait_queue_head_t nvme_ls_waitq;
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
+ unsigned int abort:1;
+ unsigned int aborted:1;
+ unsigned int completed:1;
+
uint32_t handle;
uint16_t flags;
uint16_t type;
const char *name;
int iocbs;
struct qla_qpair *qpair;
+ struct srb *cmd_sp;
struct list_head elem;
u32 gen1; /* scratch */
u32 gen2; /* scratch */
@@ -2277,7 +2281,7 @@ typedef struct {
uint8_t fabric_port_name[WWN_SIZE];
uint16_t fp_speed;
uint8_t fc4_type;
- uint8_t fc4f_nvme; /* nvme fc4 feature bits */
+ uint8_t fc4_features;
} sw_info_t;
/* FCP-4 types */
@@ -2445,7 +2449,7 @@ typedef struct fc_port {
u32 supported_classes;
uint8_t fc4_type;
- uint8_t fc4f_nvme;
+ uint8_t fc4_features;
uint8_t scan_state;
unsigned long last_queue_full;
@@ -2476,6 +2480,11 @@ typedef struct fc_port {
u16 n2n_chip_reset;
} fc_port_t;
+enum {
+ FC4_PRIORITY_NVME = 1,
+ FC4_PRIORITY_FCP = 2,
+};
+
#define QLA_FCPORT_SCAN 1
#define QLA_FCPORT_FOUND 2
@@ -4291,6 +4300,8 @@ struct qla_hw_data {
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
+ uint8_t fc4_type_priority;
+
atomic_t zio_threshold;
uint16_t last_zio_threshold;
@@ -4816,6 +4827,23 @@ struct sff_8247_a0 {
ha->current_topology == ISP_CFG_N || \
!ha->current_topology)
+#define NVME_TYPE(fcport) \
+ (fcport->fc4_type & FS_FC4TYPE_NVME) \
+
+#define FCP_TYPE(fcport) \
+ (fcport->fc4_type & FS_FC4TYPE_FCP) \
+
+#define NVME_ONLY_TARGET(fcport) \
+ (NVME_TYPE(fcport) && !FCP_TYPE(fcport)) \
+
+#define NVME_FCP_TARGET(fcport) \
+ (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \
+
+#define NVME_TARGET(ha, fcport) \
+ ((NVME_FCP_TARGET(fcport) && \
+ (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
+ NVME_ONLY_TARGET(fcport)) \
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 732bb871c433..59f6903e5abe 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -2101,4 +2101,6 @@ struct qla_fcp_prio_cfg {
#define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4)
#define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4)
+#define NVRAM_DUAL_FCP_NVME_FLAG_OFFSET 0x196
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d11416dcee4e..5b163ad85c34 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -917,4 +917,5 @@ int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 5298ed10059f..67230688b05e 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -248,7 +248,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
WWN_SIZE);
fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
- FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
+ FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
@@ -2887,7 +2887,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
- uint8_t fcp_scsi_features = 0;
+ uint8_t fcp_scsi_features = 0, nvme_features = 0;
struct ct_arg arg;
for (i = 0; i < ha->max_fibre_devices; i++) {
@@ -2933,14 +2933,19 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
fcp_scsi_features &= 0x0f;
- if (fcp_scsi_features)
- list[i].fc4_type = FC4_TYPE_FCP_SCSI;
- else
- list[i].fc4_type = FC4_TYPE_OTHER;
+ if (fcp_scsi_features) {
+ list[i].fc4_type = FS_FC4TYPE_FCP;
+ list[i].fc4_features = fcp_scsi_features;
+ }
- list[i].fc4f_nvme =
+ nvme_features =
ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
- list[i].fc4f_nvme &= 0xf;
+ nvme_features &= 0xf;
+
+ if (nvme_features) {
+ list[i].fc4_type |= FS_FC4TYPE_NVME;
+ list[i].fc4_features = nvme_features;
+ }
}
/* Last device exit. */
@@ -3005,7 +3010,7 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (res == QLA_FUNCTION_TIMEOUT)
- return;
+ goto done;
if (res == (DID_ERROR << 16)) {
/* entry status error */
@@ -3435,6 +3440,8 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
fc_port_t *fcport = sp->fcport;
struct ct_sns_rsp *ct_rsp;
struct event_arg ea;
+ uint8_t fc4_scsi_feat;
+ uint8_t fc4_nvme_feat;
ql_dbg(ql_dbg_disc, vha, 0x2133,
"Async done-%s res %x ID %x. %8phC\n",
@@ -3442,24 +3449,25 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
fcport->flags &= ~FCF_ASYNC_SENT;
ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+ fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+
/*
* FC-GS-7, 5.2.3.12 FC-4 Features - format
* The format of the FC-4 Features object, as defined by the FC-4,
* Shall be an array of 4-bit values, one for each type code value
*/
if (!res) {
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
+ if (fc4_scsi_feat & 0xf) {
/* w1 b00:03 */
- fcport->fc4_type =
- ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
- fcport->fc4_type &= 0xf;
- }
+ fcport->fc4_type = FS_FC4TYPE_FCP;
+ fcport->fc4_features = fc4_scsi_feat & 0xf;
+ }
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
+ if (fc4_nvme_feat & 0xf) {
/* w5 [00:03]/28h */
- fcport->fc4f_nvme =
- ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
- fcport->fc4f_nvme &= 0xf;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ fcport->fc4_features = fc4_nvme_feat & 0xf;
}
}
@@ -3563,7 +3571,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
u8 recheck = 0;
u16 dup = 0, dup_cnt = 0;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
@@ -3580,8 +3588,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Fabric scan failed on all retries.\n");
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: Fabric scan failed for %d retries.\n",
+ __func__, vha->scan.scan_retry);
}
goto out;
}
@@ -4047,7 +4056,7 @@ done_free_sp:
void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
{
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
qla24xx_async_gnnft(vha, sp, sp->gen2);
}
@@ -4061,7 +4070,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
u32 rspsz;
unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
if (!vha->flags.online)
@@ -4070,14 +4079,15 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags & SF_SCANNING) {
spin_unlock_irqrestore(&vha->work_lock, flags);
- ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: scan active\n", __func__);
return rval;
}
vha->scan.scan_flags |= SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
if (fc4_type == FC4_TYPE_FCP_SCSI) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s: Performing FCP Scan\n", __func__);
if (sp)
@@ -4132,7 +4142,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
}
sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s scan list size %d\n", __func__, vha->scan.size);
memset(vha->scan.l, 0, vha->scan.size);
@@ -4197,8 +4207,8 @@ done_free_sp:
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
if (vha->scan.scan_flags == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s: schedule\n", __func__);
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: Scan scheduled.\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
schedule_delayed_work(&vha->scan.scan_work, 5);
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1d041313ec52..1dbee8800218 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,7 +17,6 @@
#include <asm/prom.h>
#endif
-#include <target/target_core_base.h>
#include "qla_target.h"
/*
@@ -101,8 +100,22 @@ static void qla24xx_abort_iocb_timeout(void *data)
u32 handle;
unsigned long flags;
+ if (sp->cmd_sp)
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+ "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
+ sp->cmd_sp->handle, sp->cmd_sp->type,
+ sp->handle, sp->type);
+ else
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+ "Abort timeout 2 - hdl=%x, type=%x\n",
+ sp->handle, sp->type);
+
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
+ if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
+ sp->cmd_sp))
+ qpair->req->outstanding_cmds[handle] = NULL;
+
/* removing the abort */
if (qpair->req->outstanding_cmds[handle] == sp) {
qpair->req->outstanding_cmds[handle] = NULL;
@@ -111,6 +124,9 @@ static void qla24xx_abort_iocb_timeout(void *data)
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ if (sp->cmd_sp)
+ sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
+
abt->u.abt.comp_status = CS_TIMEOUT;
sp->done(sp, QLA_OS_TIMER_EXPIRED);
}
@@ -142,6 +158,7 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
sp->type = SRB_ABT_CMD;
sp->name = "abort";
sp->qpair = cmd_sp->qpair;
+ sp->cmd_sp = cmd_sp;
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
@@ -328,7 +345,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
else
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x2072,
@@ -726,19 +743,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
current_login_state = e->current_login_state >> 4;
else
current_login_state = e->current_login_state & 0xf;
-
ql_dbg(ql_dbg_disc, vha, 0x20e2,
- "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
__func__, fcport->port_name,
e->current_login_state, fcport->fw_login_state,
- fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
+ fcport->fc4_type, id.b24, fcport->d_id.b24,
+ loop_id, fcport->loop_id);
switch (fcport->disc_state) {
case DSC_DELETE_PEND:
@@ -1135,19 +1150,18 @@ static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- if (res == QLA_FUNCTION_TIMEOUT) {
- dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
- sp->u.iocb_cmd.u.mbx.in_dma);
- return;
- }
-
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
+ if (res == QLA_FUNCTION_TIMEOUT)
+ goto done;
+
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
ea.sp = sp;
qla24xx_handle_gpdb_event(vha, &ea);
+done:
dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
sp->u.iocb_cmd.u.mbx.in_dma);
@@ -1225,13 +1239,13 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->done = qla2x00_async_prli_sp_done;
lio->u.logio.flags = 0;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x211b,
"Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
- fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
+ fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc");
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -1382,14 +1396,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
- fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
- ea->rc);
+ "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__,
+ fcport->port_name, fcport->disc_state, pd->current_login_state,
+ fcport->fc4_type, ea->rc);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
ls = pd->current_login_state >> 4;
else
ls = pd->current_login_state & 0xf;
@@ -1578,7 +1592,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC post %s PRLI\n",
__func__, __LINE__, fcport->port_name,
- fcport->fc4f_nvme ? "NVME" : "FC");
+ NVME_TARGET(vha->hw, fcport) ? "NVME" :
+ "FC");
qla24xx_post_prli_work(vha, fcport);
}
break;
@@ -1701,6 +1716,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
qla24xx_fcport_handle_login(vha, fcport);
}
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post PRLI\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_prli_work(vha, ea->fcport);
+}
+
/*
* RSCN(s) came in for this fcport, but the RSCN(s) was not able
* to be consumed by the fcport
@@ -1860,38 +1884,26 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
break;
}
- if (ea->fcport->fc4f_nvme) {
+ /*
+ * Retry PRLI with other FC-4 type if failure occurred on dual
+ * FCP/NVMe port
+ */
+ if (NVME_FCP_TARGET(ea->fcport)) {
ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post fc4 prli\n",
- __func__, __LINE__, ea->fcport->port_name);
- ea->fcport->fc4f_nvme = 0;
- qla24xx_post_prli_work(vha, ea->fcport);
- return;
+ "%s %d %8phC post %s prli\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ?
+ "NVMe" : "FCP");
+ if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+ else
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
}
- /* at this point both PRLI NVME & PRLI FCP failed */
- if (N2N_TOPO(vha->hw)) {
- if (ea->fcport->n2n_link_reset_cnt < 3) {
- ea->fcport->n2n_link_reset_cnt++;
- /*
- * remote port is not sending Plogi. Reset
- * link to kick start his state machine
- */
- set_bit(N2N_LINK_RESET, &vha->dpc_flags);
- } else {
- ql_log(ql_log_warn, vha, 0x2119,
- "%s %d %8phC Unable to reconnect\n",
- __func__, __LINE__, ea->fcport->port_name);
- }
- } else {
- /*
- * switch connect. login failed. Take connection
- * down and allow relogin to retrigger
- */
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- ea->fcport->keep_nport_handle = 0;
- qlt_schedule_sess_for_deletion(ea->fcport);
- }
+ ea->fcport->flags &= ~FCF_ASYNC_SENT;
+ ea->fcport->keep_nport_handle = 0;
+ ea->fcport->logout_on_delete = 1;
+ qlt_schedule_sess_for_deletion(ea->fcport);
break;
}
}
@@ -1952,7 +1964,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- if (ea->fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, ea->fcport)) {
ql_dbg(ql_dbg_disc, vha, 0x2117,
"%s %d %8phC post prli\n",
__func__, __LINE__, ea->fcport->port_name);
@@ -2206,8 +2218,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x0061,
"Configure NVRAM parameters...\n");
+ /* Let priority default to FCP, can be overridden by nvram_config */
+ ha->fc4_type_priority = FC4_PRIORITY_FCP;
+
ha->isp_ops->nvram_config(vha);
+ if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
+ ha->fc4_type_priority != FC4_PRIORITY_NVME)
+ ha->fc4_type_priority = FC4_PRIORITY_FCP;
+
+ ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
+ ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
+
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
ql_log(ql_log_info, vha, 0x0077,
@@ -5382,7 +5404,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_iidma_fcport(vha, fcport);
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
qla_nvme_register_remote(vha, fcport);
fcport->disc_state = DSC_LOGIN_COMPLETE;
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
@@ -5710,11 +5732,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
new_fcport->fc4_type = swl[swl_idx].fc4_type;
new_fcport->nvme_flag = 0;
- new_fcport->fc4f_nvme = 0;
if (vha->flags.nvme_enabled &&
- swl[swl_idx].fc4f_nvme) {
- new_fcport->fc4f_nvme =
- swl[swl_idx].fc4f_nvme;
+ swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
ql_log(ql_log_info, vha, 0x2131,
"FOUND: NVME port %8phC as FC Type 28h\n",
new_fcport->port_name);
@@ -5770,7 +5789,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
if (ql2xgffidenable &&
- (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+ (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
continue;
@@ -5839,7 +5858,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
break;
}
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
fcport->disc_state = DSC_GNL;
vha->fcport_count--;
@@ -8514,6 +8533,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* N2N: driver will initiate Login instead of FW */
icb->firmware_options_3 |= BIT_8;
+ /* Determine NVMe/FCP priority for target ports */
+ ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
+
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
@@ -9003,8 +9025,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
struct qla_hw_data *ha = qpair->hw;
qpair->delete_in_progress = 1;
- while (atomic_read(&qpair->ref_count))
- msleep(500);
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 0c3d907af769..352aba4127f7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -307,3 +307,15 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
WRT_REG_DWORD(req->req_q_in, req->ring_index);
}
+
+static inline int
+qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
+{
+ uint32_t data;
+
+ data =
+ ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
+
+
+ return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 518eb954cf42..b25f87ff8cde 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2740,6 +2740,10 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct scsi_qla_host *vha = sp->vha;
struct event_arg ea;
struct qla_work_evt *e;
+ struct fc_port *conflict_fcport;
+ port_id_t cid; /* conflict Nport id */
+ u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+ u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072,
"%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
@@ -2751,14 +2755,101 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
else {
- if (res) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- } else {
+ switch (fw_status[0]) {
+ case CS_DATA_UNDERRUN:
+ case CS_COMPLETE:
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
- ea.data[0] = MBS_COMMAND_COMPLETE;
- ea.sp = sp;
- qla24xx_handle_plogi_done_event(vha, &ea);
+ ea.rc = res;
+ qla_handle_els_plogi_done(vha, &ea);
+ break;
+
+ case CS_IOCB_ERROR:
+ switch (fw_status[1]) {
+ case LSC_SCODE_PORTID_USED:
+ lid = fw_status[2] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(fcport->port_name),
+ fcport->d_id, lid, &conflict_fcport);
+ if (conflict_fcport) {
+ /*
+ * Another fcport shares the same
+ * loop_id & nport id; conflict
+ * fcport needs to finish cleanup
+ * before this fcport can proceed
+ * to login.
+ */
+ conflict_fcport->conflict = fcport;
+ fcport->login_pause = 1;
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ fcport->loop_id = lid;
+ fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ break;
+
+ case LSC_SCODE_NPORT_USED:
+ cid.b.domain = (fw_status[2] >> 16) & 0xff;
+ cid.b.area = (fw_status[2] >> 8) & 0xff;
+ cid.b.al_pa = fw_status[2] & 0xff;
+ cid.b.rsvd_1 = 0;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
+ __func__, __LINE__, fcport->port_name,
+ fcport->loop_id, cid.b24);
+ set_bit(fcport->loop_id,
+ vha->hw->loop_id_map);
+ fcport->loop_id = FC_NO_LOOP_ID;
+ qla24xx_post_gnl_work(vha, fcport);
+ break;
+
+ case LSC_SCODE_NOXCB:
+ vha->hw->exch_starvation++;
+ if (vha->hw->exch_starvation > 5) {
+ ql_log(ql_log_warn, vha, 0xd046,
+ "Exchange starvation. Resetting RISC\n");
+ vha->hw->exch_starvation = 0;
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ /* fall through */
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_LOGIN_FAILED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->disc_state = DSC_LOGIN_FAILED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
}
e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
@@ -2792,11 +2883,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
return -ENOMEM;
}
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_LOGIN_PEND;
elsio = &sp->u.iocb_cmd;
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 009fd5a33fcd..1b8f297449cf 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1227,11 +1227,32 @@ global_port_update:
break;
case MBA_IDC_AEN:
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
- mb[5] = RD_REG_WORD(&reg24->mailbox5);
- mb[6] = RD_REG_WORD(&reg24->mailbox6);
- mb[7] = RD_REG_WORD(&reg24->mailbox7);
- qla83xx_handle_8200_aen(vha, mb);
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->flags.fw_init_done = 0;
+ ql_log(ql_log_warn, vha, 0xffff,
+ "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
+ mb[0], mb[1], mb[2], mb[3]);
+
+ if ((mb[1] & BIT_8) ||
+ (mb[2] & BIT_8)) {
+ ql_log(ql_log_warn, vha, 0xd013,
+ "MPI Heartbeat stop. FW dump needed\n");
+ ha->fw_dump_mpi = 1;
+ ha->isp_ops->fw_dump(vha, 1);
+ }
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else if (IS_QLA83XX(ha)) {
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[5] = RD_REG_WORD(&reg24->mailbox5);
+ mb[6] = RD_REG_WORD(&reg24->mailbox6);
+ mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ qla83xx_handle_8200_aen(vha, mb);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
+ mb[0], mb[1], mb[2], mb[3]);
+ }
break;
case MBA_DPORT_DIAGNOSTICS:
@@ -2466,6 +2487,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ if (sp->abort)
+ sp->aborted = 1;
+ else
+ sp->completed = 1;
+
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
ql_dbg(ql_dbg_io, vha, 0x3015,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 4a1f21c11758..0cf94f05f008 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1932,7 +1932,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(ha, fcport)) {
current_login_state = pd24->current_login_state >> 4;
last_login_state = pd24->last_login_state >> 4;
} else {
@@ -3899,8 +3899,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->n2n_flag = 1;
fcport->keep_nport_handle = 1;
+ fcport->fc4_type = FS_FC4TYPE_FCP;
if (vha->flags.nvme_enabled)
- fcport->fc4f_nvme = 1;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
switch (fcport->disc_state) {
case DSC_DELETED:
@@ -6287,17 +6288,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
case QLA_SUCCESS:
ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
__func__, sp->name);
- sp->free(sp);
break;
default:
ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
- sp->free(sp);
break;
}
- return rval;
-
done_free_sp:
sp->free(sp);
done:
@@ -6362,7 +6359,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
uint64_t zero = 0;
u8 current_login_state, last_login_state;
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
current_login_state = pd->current_login_state >> 4;
last_login_state = pd->last_login_state >> 4;
} else {
@@ -6397,8 +6394,8 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
- if (fcport->fc4f_nvme) {
- fcport->port_type = 0;
+ if (NVME_TARGET(vha->hw, fcport)) {
+ fcport->port_type = FCT_NVME;
if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
fcport->port_type |= FCT_NVME_INITIATOR;
if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 238240984bc1..eabc5127174e 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -946,7 +946,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
if (!sp)
- goto done;
+ return rval;
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
@@ -962,7 +962,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
ql_dbg(ql_dbg_async, vha, 0xffff,
"%s: %s Failed submission. %x.\n",
__func__, sp->name, rval);
- goto done_free_sp;
+ goto done;
}
ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
@@ -980,16 +980,13 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
case QLA_SUCCESS:
ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
__func__, sp->name);
- goto done_free_sp;
+ break;
default:
ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
- goto done_free_sp;
+ break;
}
done:
- return rval;
-
-done_free_sp:
sp->free(sp);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 6cc19e060afc..941aa53363f5 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -224,8 +224,8 @@ static void qla_nvme_abort_work(struct work_struct *work)
if (ha->flags.host_shutting_down) {
ql_log(ql_log_info, sp->fcport->vha, 0xffff,
- "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
- __func__, sp, sp->type, atomic_read(&sp->ref_count));
+ "%s Calling done on sp: %p, type: 0x%x\n",
+ __func__, sp, sp->type);
sp->done(sp, 0);
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 726ad4cbf4a6..8b84bc4a6ac8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -698,11 +698,6 @@ void qla2x00_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
- atomic_dec(&sp->ref_count);
-
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
@@ -794,11 +789,6 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
- atomic_dec(&sp->ref_count);
-
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
@@ -903,7 +893,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- atomic_set(&sp->ref_count, 1);
+
CMD_SP(cmd) = (void *)sp;
sp->free = qla2x00_sp_free_dma;
sp->done = qla2x00_sp_compl;
@@ -985,18 +975,16 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- atomic_set(&sp->ref_count, 1);
CMD_SP(cmd) = (void *)sp;
sp->free = qla2xxx_qpair_sp_free_dma;
sp->done = qla2xxx_qpair_sp_compl;
- sp->qpair = qpair;
rval = ha->isp_ops->start_scsi_mq(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
if (rval == QLA_INTERFACE_ERROR)
- goto qc24_fail_command;
+ goto qc24_free_sp_fail_command;
goto qc24_host_busy_free_sp;
}
@@ -1008,6 +996,11 @@ qc24_host_busy_free_sp:
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
+qc24_free_sp_fail_command:
+ sp->free(sp);
+ CMD_SP(cmd) = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+
qc24_fail_command:
cmd->scsi_done(cmd);
@@ -1184,16 +1177,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-static int
-sp_get(struct srb *sp)
-{
- if (!refcount_inc_not_zero((refcount_t *)&sp->ref_count))
- /* kref get fail */
- return ENXIO;
- else
- return 0;
-}
-
#define ISP_REG_DISCONNECT 0xffffffffU
/**************************************************************************
* qla2x00_isp_reg_stat
@@ -1249,6 +1232,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
uint64_t lun;
int rval;
struct qla_hw_data *ha = vha->hw;
+ uint32_t ratov_j;
+ struct qla_qpair *qpair;
+ unsigned long flags;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
@@ -1261,13 +1247,26 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return ret;
sp = scsi_cmd_priv(cmd);
+ qpair = sp->qpair;
- if (sp->fcport && sp->fcport->deleted)
+ if ((sp->fcport && sp->fcport->deleted) || !qpair)
return SUCCESS;
- /* Return if the command has already finished. */
- if (sp_get(sp))
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (sp->completed) {
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return SUCCESS;
+ }
+
+ if (sp->abort || sp->aborted) {
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return FAILED;
+ }
+
+ sp->abort = 1;
+ sp->comp = &comp;
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
id = cmd->device->id;
lun = cmd->device->lun;
@@ -1276,47 +1275,37 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
vha->host_no, id, lun, sp, cmd, sp->handle);
+ /*
+ * Abort will release the original Command/sp from FW. Let the
+ * original command call scsi_done. In return, he will wakeup
+ * this sleeping thread.
+ */
rval = ha->isp_ops->abort_command(sp);
+
ql_dbg(ql_dbg_taskm, vha, 0x8003,
"Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
+ /* Wait for the command completion. */
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
+ ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
- /*
- * The command has been aborted. That means that the firmware
- * won't report a completion.
- */
- sp->done(sp, DID_ABORT << 16);
- ret = SUCCESS;
- break;
- case QLA_FUNCTION_PARAMETER_ERROR: {
- /* Wait for the command completion. */
- uint32_t ratov = ha->r_a_tov/10;
- uint32_t ratov_j = msecs_to_jiffies(4 * ratov * 1000);
-
- WARN_ON_ONCE(sp->comp);
- sp->comp = &comp;
if (!wait_for_completion_timeout(&comp, ratov_j)) {
ql_dbg(ql_dbg_taskm, vha, 0xffff,
"%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
- __func__, ha->r_a_tov);
+ __func__, ha->r_a_tov/10);
ret = FAILED;
} else {
ret = SUCCESS;
}
break;
- }
default:
- /*
- * Either abort failed or abort and completion raced. Let
- * the SCSI core retry the abort in the former case.
- */
ret = FAILED;
break;
}
sp->comp = NULL;
- atomic_dec(&sp->ref_count);
+
ql_log(ql_log_info, vha, 0x801c,
"Abort command issued nexus=%ld:%d:%llu -- %x.\n",
vha->host_no, id, lun, ret);
@@ -1708,32 +1697,53 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
int rval;
+ bool ret_cmd;
+ uint32_t ratov_j;
- if (sp_get(sp))
+ if (qla2x00_chip_is_down(vha)) {
+ sp->done(sp, res);
return;
+ }
if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
(sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!qla2x00_isp_reg_stat(ha))) {
+ if (sp->comp) {
+ sp->done(sp, res);
+ return;
+ }
+
sp->comp = &comp;
+ sp->abort = 1;
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
- rval = ha->isp_ops->abort_command(sp);
+ rval = ha->isp_ops->abort_command(sp);
+ /* Wait for command completion. */
+ ret_cmd = false;
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
+ ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
- sp->done(sp, res);
+ if (wait_for_completion_timeout(&comp, ratov_j)) {
+ ql_dbg(ql_dbg_taskm, vha, 0xffff,
+ "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
+ __func__, ha->r_a_tov/10);
+ ret_cmd = true;
+ }
+ /* else FW return SP to driver */
break;
- case QLA_FUNCTION_PARAMETER_ERROR:
- wait_for_completion(&comp);
+ default:
+ ret_cmd = true;
break;
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- sp->comp = NULL;
+ if (ret_cmd && (!sp->completed || !sp->aborted))
+ sp->done(sp, res);
+ } else {
+ sp->done(sp, res);
}
-
- atomic_dec(&sp->ref_count);
}
static void
@@ -1755,7 +1765,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- req->outstanding_cmds[cnt] = NULL;
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -1777,6 +1786,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
default:
break;
}
+ req->outstanding_cmds[cnt] = NULL;
}
}
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
@@ -3492,6 +3502,29 @@ disable_device:
return ret;
}
+static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
+{
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ struct qla_hw_data *ha;
+
+ if (!base_vha)
+ return;
+
+ ha = base_vha->hw;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list)
+ set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
+
+ /*
+ * Indicate device removal to prevent future board_disable
+ * and wait until any pending board_disable has completed.
+ */
+ set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
static void
qla2x00_shutdown(struct pci_dev *pdev)
{
@@ -3508,7 +3541,7 @@ qla2x00_shutdown(struct pci_dev *pdev)
* Prevent future board_disable and wait
* until any pending board_disable has completed.
*/
- set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags);
+ __qla_set_remove_flag(vha);
cancel_work_sync(&ha->board_disable);
if (!atomic_read(&pdev->enable_cnt))
@@ -3668,10 +3701,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha = base_vha->hw;
ql_log(ql_log_info, base_vha, 0xb079,
"Removing driver\n");
-
- /* Indicate device removal to prevent future board_disable and wait
- * until any pending board_disable has completed. */
- set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ __qla_set_remove_flag(base_vha);
cancel_work_sync(&ha->board_disable);
/*
@@ -4666,7 +4696,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->sfp_data = NULL;
if (ha->flt)
- dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ dma_free_coherent(&ha->pdev->dev,
+ sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
ha->flt, ha->flt_dma);
ha->flt = NULL;
ha->flt_dma = 0;
@@ -5042,19 +5073,17 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
- fcport->fc4_type = FC4_TYPE_FCP_SCSI;
-
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
- fcport->fc4_type = FC4_TYPE_OTHER;
- fcport->fc4f_nvme = FC4_TYPE_NVME;
- }
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
- if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
+ fcport->fc4_type = e->u.new_sess.fc4_type;
+ if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
+ fcport->fc4_type = FS_FC4TYPE_FCP;
fcport->n2n_flag = 1;
+ if (vha->flags.nvme_enabled)
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ }
} else {
ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -5158,7 +5187,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->flags &= ~FCF_FABRIC_DEVICE;
fcport->keep_nport_handle = 1;
if (vha->flags.nvme_enabled) {
- fcport->fc4f_nvme = 1;
+ fcport->fc4_type =
+ (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
fcport->n2n_flag = 1;
}
fcport->fw_login_state = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a06e56224a55..51b275a575a5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -463,7 +463,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
case IMMED_NOTIFY_TYPE:
{
- struct scsi_qla_host *host = vha;
+ struct scsi_qla_host *host;
struct imm_ntfy_from_isp *entry =
(struct imm_ntfy_from_isp *)pkt;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 294d77c02cdf..5b0c057def2b 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -10,6 +10,7 @@
#define ISPREG(vha) (&(vha)->hw->iobase->isp24)
#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
#define IOBASE(vha) IOBAR(ISPREG(vha))
+#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
@@ -261,6 +262,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
ulong start = le32_to_cpu(ent->t262.start_addr);
ulong end = le32_to_cpu(ent->t262.end_addr);
ulong dwords;
+ int rc;
ql_dbg(ql_dbg_misc, vha, 0xd206,
"%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
@@ -308,7 +310,13 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
dwords = end - start + 1;
if (buf) {
buf += *len;
- qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
+ __func__, area, start, end);
+ return INVALID_ENTRY;
+ }
}
*len += dwords * sizeof(uint32_t);
done:
@@ -838,6 +846,13 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ent = qla27xx_find_entry(type)(vha, ent, buf, len);
if (!ent)
break;
+
+ if (ent == INVALID_ENTRY) {
+ *len = 0;
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Unable to capture FW dump");
+ goto bailout;
+ }
}
if (tmp->count)
@@ -847,6 +862,9 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
if (ent)
ql_dbg(ql_dbg_misc, vha, 0xd019,
"%s: missing end entry\n", __func__);
+
+bailout:
+ cpu_to_le32s(&tmp->count); /* endianize residual count */
}
static void
@@ -999,8 +1017,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
uint j;
ulong len;
void *buf = vha->hw->fw_dump;
+ uint count = vha->hw->fw_dump_mpi ? 2 : 1;
- for (j = 0; j < 2; j++, fwdt++, buf += len) {
+ for (j = 0; j < count; j++, fwdt++, buf += len) {
ql_log(ql_log_warn, vha, 0xd011,
"-> fwdt%u running...\n", j);
if (!fwdt->template) {
@@ -1010,7 +1029,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
}
len = qla27xx_execute_fwdt_template(vha,
fwdt->template, buf);
- if (len != fwdt->dump_size) {
+ if (len == 0) {
+ goto bailout;
+ } else if (len != fwdt->dump_size) {
ql_log(ql_log_warn, vha, 0xd013,
"-> fwdt%u fwdump residual=%+ld\n",
j, fwdt->dump_size - len);
@@ -1025,6 +1046,8 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
+bailout:
+ vha->hw->fw_dump_mpi = 0;
#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index a8f2a953ceff..03bd3b712b77 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.19-k"
+#define QLA2XXX_VERSION "10.01.00.21-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index dac9a7013208..02636b4785c5 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -640,9 +640,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
- dma_free_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
goto exit_init_fw_cb;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7a1b6c76f263..930e4803d888 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -186,7 +186,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
struct scsi_driver *drv;
unsigned int good_bytes;
- scsi_device_unbusy(sdev);
+ scsi_device_unbusy(sdev, cmd);
/*
* Clear the flags that say that the device/target/host is no longer
@@ -465,10 +465,14 @@ void scsi_attach_vpd(struct scsi_device *sdev)
return;
for (i = 4; i < vpd_buf->len; i++) {
+ if (vpd_buf->data[i] == 0x0)
+ scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
if (vpd_buf->data[i] == 0x80)
scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
if (vpd_buf->data[i] == 0x83)
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
+ if (vpd_buf->data[i] == 0x89)
+ scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d323523f5f9d..44cb054d5e66 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1025,7 +1025,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
int arr_len, unsigned int off_dst)
{
- int act_len, n;
+ unsigned int act_len, n;
struct scsi_data_buffer *sdb = &scp->sdb;
off_t skip = off_dst;
@@ -1039,7 +1039,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
__func__, off_dst, scsi_bufflen(scp), act_len,
scsi_get_resid(scp));
- n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
+ n = scsi_bufflen(scp) - (off_dst + act_len);
scsi_set_resid(scp, min(scsi_get_resid(scp), n));
return 0;
}
@@ -5263,6 +5263,11 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
+ if (sdebug_num_tgts < 0) {
+ pr_err("num_tgts must be >= 0\n");
+ return -EINVAL;
+ }
+
if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 91c007d26c1e..3e7a45d0daca 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -189,7 +189,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
* active on the host/device.
*/
if (unbusy)
- scsi_device_unbusy(device);
+ scsi_device_unbusy(device, cmd);
/*
* Requeue this command. It will go before all other commands
@@ -321,20 +321,20 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
}
/*
- * Decrement the host_busy counter and wake up the error handler if necessary.
- * Avoid as follows that the error handler is not woken up if shost->host_busy
- * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
- * with an RCU read lock in this function to ensure that this function in its
- * entirety either finishes before scsi_eh_scmd_add() increases the
+ * Wake up the error handler if necessary. Avoid as follows that the error
+ * handler is not woken up if host in-flight requests number ==
+ * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in
+ * its entirety either finishes before scsi_eh_scmd_add() increases the
* host_failed counter or that it notices the shost state change made by
* scsi_eh_scmd_add().
*/
-static void scsi_dec_host_busy(struct Scsi_Host *shost)
+static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
unsigned long flags;
rcu_read_lock();
- atomic_dec(&shost->host_busy);
+ __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
@@ -344,12 +344,12 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
rcu_read_unlock();
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
{
struct Scsi_Host *shost = sdev->host;
struct scsi_target *starget = scsi_target(sdev);
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
@@ -430,9 +430,6 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
- if (shost->can_queue > 0 &&
- atomic_read(&shost->host_busy) >= shost->can_queue)
- return true;
if (atomic_read(&shost->host_blocked) > 0)
return true;
if (shost->host_self_blocked)
@@ -1139,6 +1136,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
unsigned long jiffies_at_alloc;
int retries;
+ bool in_flight;
if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
flags |= SCMD_INITIALIZED;
@@ -1147,6 +1145,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
jiffies_at_alloc = cmd->jiffies_at_alloc;
retries = cmd->retries;
+ in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
@@ -1158,6 +1157,8 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies_at_alloc;
cmd->retries = retries;
+ if (in_flight)
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
scsi_add_cmd_to_list(cmd);
}
@@ -1367,16 +1368,14 @@ out_dec:
*/
static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ struct scsi_cmnd *cmd)
{
- unsigned int busy;
-
if (scsi_host_in_recovery(shost))
return 0;
- busy = atomic_inc_return(&shost->host_busy) - 1;
if (atomic_read(&shost->host_blocked) > 0) {
- if (busy)
+ if (scsi_host_busy(shost) > 0)
goto starved;
/*
@@ -1390,8 +1389,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
"unblocking host at zero depth\n"));
}
- if (shost->can_queue > 0 && busy >= shost->can_queue)
- goto starved;
if (shost->host_self_blocked)
goto starved;
@@ -1403,6 +1400,8 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
spin_unlock_irq(shost->host_lock);
}
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+
return 1;
starved:
@@ -1411,7 +1410,7 @@ starved:
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
return 0;
}
@@ -1665,7 +1664,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = BLK_STS_RESOURCE;
if (!scsi_target_queue_ready(shost, sdev))
goto out_put_budget;
- if (!scsi_host_queue_ready(q, shost, sdev))
+ if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
if (!(req->rq_flags & RQF_DONTPREP)) {
@@ -1697,7 +1696,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index c6ed0b12e807..c91fa3feb930 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -390,6 +390,7 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
const char *mlret_string = scsi_mlreturn_string(disposition);
const char *hb_string = scsi_hostbyte_string(cmd->result);
const char *db_string = scsi_driverbyte_string(cmd->result);
+ unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
if (!logbuf)
@@ -431,10 +432,15 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
if (db_string)
off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=%s", db_string);
+ "driverbyte=%s ", db_string);
else
off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=0x%02x", driver_byte(cmd->result));
+ "driverbyte=0x%02x ",
+ driver_byte(cmd->result));
+
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "cmd_age=%lus", cmd_age);
+
out_printk:
dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
scsi_log_release_buffer(logbuf);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index cc2859d76d81..3bff9f7aa684 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -87,7 +87,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd);
extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd);
extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
-extern void scsi_device_unbusy(struct scsi_device *sdev);
+extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index cc51f4756077..677b5c5403d2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -437,6 +437,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
+ struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
unsigned long flags;
sdev = container_of(work, struct scsi_device, ew.work);
@@ -466,16 +467,24 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
sdev->request_queue = NULL;
mutex_lock(&sdev->inquiry_mutex);
+ vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0,
+ lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80,
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
+ if (vpd_pg0)
+ kfree_rcu(vpd_pg0, rcu);
if (vpd_pg83)
kfree_rcu(vpd_pg83, rcu);
if (vpd_pg80)
kfree_rcu(vpd_pg80, rcu);
+ if (vpd_pg89)
+ kfree_rcu(vpd_pg89, rcu);
kfree(sdev->inquiry);
kfree(sdev);
@@ -868,6 +877,8 @@ static struct bin_attribute dev_attr_vpd_##_page = { \
sdev_vpd_pg_attr(pg83);
sdev_vpd_pg_attr(pg80);
+sdev_vpd_pg_attr(pg89);
+sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -1200,12 +1211,18 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
struct scsi_device *sdev = to_scsi_device(dev);
+ if (attr == &dev_attr_vpd_pg0 && !sdev->vpd_pg0)
+ return 0;
+
if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
return 0;
if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
return 0;
+ if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89)
+ return 0;
+
return S_IRUGO;
}
@@ -1248,8 +1265,10 @@ static struct attribute *scsi_sdev_attrs[] = {
};
static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+ &dev_attr_vpd_pg0,
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
+ &dev_attr_vpd_pg89,
&dev_attr_inquiry,
NULL
};
@@ -1309,7 +1328,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
device_enable_async_suspend(&sdev->sdev_gendev);
scsi_autopm_get_target(starget);
pm_runtime_set_active(&sdev->sdev_gendev);
- pm_runtime_forbid(&sdev->sdev_gendev);
+ if (!sdev->rpm_autosuspend)
+ pm_runtime_forbid(&sdev->sdev_gendev);
pm_runtime_enable(&sdev->sdev_gendev);
scsi_autopm_put_target(starget);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 0f17e7dac1b0..ac35c301c792 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -9,7 +9,7 @@
#include <trace/events/scsi.h>
#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
-#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8]))
static const char *
scsi_trace_misc(struct trace_seq *, unsigned char *, int);
@@ -18,15 +18,18 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba = 0, txlen;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
- txlen = cdb[4];
+ /*
+ * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be read (READ(6)) or written (WRITE(6)).
+ */
+ txlen = cdb[4] ? cdb[4] : 256;
- trace_seq_printf(p, "lba=%llu txlen=%llu",
- (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
trace_seq_putc(p, 0);
return ret;
@@ -36,17 +39,12 @@ static const char *
scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba, txlen;
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[7] << 8);
- txlen |= cdb[8];
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be16(&cdb[7]);
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME)
@@ -61,19 +59,12 @@ static const char *
scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[6] << 24);
- txlen |= (cdb[7] << 16);
- txlen |= (cdb[8] << 8);
- txlen |= cdb[9];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u32 lba, txlen;
+
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[6]);
+
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
trace_seq_putc(p, 0);
@@ -84,23 +75,13 @@ static const char *
scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- txlen |= (cdb[10] << 24);
- txlen |= (cdb[11] << 16);
- txlen |= (cdb[12] << 8);
- txlen |= cdb[13];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u64 lba;
+ u32 txlen;
+
+ lba = get_unaligned_be64(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME_16)
@@ -115,8 +96,8 @@ static const char *
scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0, txlen = 0;
- u32 ei_lbrt = 0;
+ u64 lba;
+ u32 ei_lbrt, txlen;
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
@@ -136,26 +117,12 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[12] << 56);
- lba |= ((u64)cdb[13] << 48);
- lba |= ((u64)cdb[14] << 40);
- lba |= ((u64)cdb[15] << 32);
- lba |= (cdb[16] << 24);
- lba |= (cdb[17] << 16);
- lba |= (cdb[18] << 8);
- lba |= cdb[19];
- ei_lbrt |= (cdb[20] << 24);
- ei_lbrt |= (cdb[21] << 16);
- ei_lbrt |= (cdb[22] << 8);
- ei_lbrt |= cdb[23];
- txlen |= (cdb[28] << 24);
- txlen |= (cdb[29] << 16);
- txlen |= (cdb[30] << 8);
- txlen |= cdb[31];
-
- trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
- cmd, (unsigned long long)lba,
- (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+ lba = get_unaligned_be64(&cdb[12]);
+ ei_lbrt = get_unaligned_be32(&cdb[20]);
+ txlen = get_unaligned_be32(&cdb[28]);
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u",
+ cmd, lba, txlen, cdb[10] >> 5, ei_lbrt);
if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
@@ -170,7 +137,7 @@ static const char *
scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- unsigned int regions = cdb[7] << 8 | cdb[8];
+ unsigned int regions = get_unaligned_be16(&cdb[7]);
trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
trace_seq_putc(p, 0);
@@ -182,8 +149,8 @@ static const char *
scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0;
- u32 alloc_len = 0;
+ u64 lba;
+ u32 alloc_len;
switch (SERVICE_ACTION16(cdb)) {
case SAI_READ_CAPACITY_16:
@@ -197,21 +164,10 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- alloc_len |= (cdb[10] << 24);
- alloc_len |= (cdb[11] << 16);
- alloc_len |= (cdb[12] << 8);
- alloc_len |= cdb[13];
-
- trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
- (unsigned long long)lba, alloc_len);
+ lba = get_unaligned_be64(&cdb[2]);
+ alloc_len = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len);
out:
trace_seq_putc(p, 0);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 470ee6dc3f7e..7dc17821f873 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1702,20 +1702,30 @@ static void sd_rescan(struct device *dev)
static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ struct gendisk *disk = bdev->bd_disk;
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdev = sdkp->device;
+ void __user *p = compat_ptr(arg);
int error;
+ error = scsi_verify_blk_ioctl(bdev, cmd);
+ if (error < 0)
+ return error;
+
error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
(mode & FMODE_NDELAY) != 0);
if (error)
return error;
+
+ if (is_sed_ioctl(cmd))
+ return sed_ioctl(sdkp->opal_dev, cmd, p);
/*
* Let the static ioctl translation table take care of it.
*/
if (!sdev->host->hostt->compat_ioctl)
return -ENOIOCTLCMD;
- return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+ return sdev->host->hostt->compat_ioctl(sdev, cmd, p);
}
#endif
@@ -3380,6 +3390,10 @@ static int sd_probe(struct device *dev)
}
blk_pm_runtime_init(sdp->request_queue, dev);
+ if (sdp->rpm_autosuspend) {
+ pm_runtime_set_autosuspend_delay(dev,
+ sdp->host->hostt->rpm_autosuspend_delay);
+ }
device_add_disk(dev, gd, NULL);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cce757506383..160748ad9c0f 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -429,26 +429,33 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
- if (!access_ok(buf, count))
- return -EFAULT;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
- old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
- if (!old_hdr)
- return -ENOMEM;
- if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
- retval = -EFAULT;
- goto free_old_hdr;
- }
+ old_hdr = memdup_user(buf, SZ_SG_HEADER);
+ if (IS_ERR(old_hdr))
+ return PTR_ERR(old_hdr);
if (old_hdr->reply_len < 0) {
if (count >= SZ_SG_IO_HDR) {
+ /*
+ * This is stupid.
+ *
+ * We're copying the whole sg_io_hdr_t from user
+ * space just to get the 'pack_id' field. But the
+ * field is at different offsets for the compat
+ * case, so we'll use "get_sg_io_hdr()" to copy
+ * the whole thing and convert it.
+ *
+ * We could do something like just calculating the
+ * offset based of 'in_compat_syscall()', but the
+ * 'compat_sg_io_hdr' definition is in the wrong
+ * place for that.
+ */
sg_io_hdr_t *new_hdr;
new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
if (!new_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
- retval =__copy_from_user
- (new_hdr, buf, SZ_SG_IO_HDR);
+ retval = get_sg_io_hdr(new_hdr, buf);
req_pack_id = new_hdr->pack_id;
kfree(new_hdr);
if (retval) {
@@ -538,7 +545,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
/* Now copy the result back to the user buffer. */
if (count >= SZ_SG_HEADER) {
- if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
+ if (copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
@@ -589,10 +596,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
}
if (hp->masked_status || hp->host_status || hp->driver_status)
hp->info |= SG_INFO_CHECK;
- if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
- err = -EFAULT;
- goto err_out;
- }
+ err = put_sg_io_hdr(hp, buf);
err_out:
err2 = sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
@@ -627,11 +631,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
- if (!access_ok(buf, count))
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
- if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
+ if (copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
@@ -640,13 +642,15 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
+ buf += SZ_SG_HEADER;
+ if (get_user(opcode, buf))
+ return -EFAULT;
+
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
- buf += SZ_SG_HEADER;
- __get_user(opcode, buf);
mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
@@ -689,7 +693,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
- if (__copy_from_user(cmnd, buf, cmd_size))
+ if (copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
@@ -724,8 +728,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
if (count < SZ_SG_IO_HDR)
return -EINVAL;
- if (!access_ok(buf, count))
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
@@ -735,7 +737,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
}
srp->sg_io_owned = sg_io_owned;
hp = &srp->header;
- if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
+ if (get_sg_io_hdr(hp, buf)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
@@ -763,11 +765,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EMSGSIZE;
}
- if (!access_ok(hp->cmdp, hp->cmd_len)) {
- sg_remove_request(sfp, srp);
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
- }
- if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
+ if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
@@ -893,6 +891,33 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
}
}
+#ifdef CONFIG_COMPAT
+struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
+ char req_state;
+ char orphan;
+ char sg_io_owned;
+ char problem;
+ int pack_id;
+ compat_uptr_t usr_ptr;
+ unsigned int duration;
+ int unused;
+};
+
+static int put_compat_request_table(struct compat_sg_req_info __user *o,
+ struct sg_req_info *rinfo)
+{
+ int i;
+ for (i = 0; i < SG_MAX_QUEUE; i++) {
+ if (copy_to_user(o + i, rinfo + i, offsetof(sg_req_info_t, usr_ptr)) ||
+ put_user((uintptr_t)rinfo[i].usr_ptr, &o[i].usr_ptr) ||
+ put_user(rinfo[i].duration, &o[i].duration) ||
+ put_user(rinfo[i].unused, &o[i].unused))
+ return -EFAULT;
+ }
+ return 0;
+}
+#endif
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -917,8 +942,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
- if (!access_ok(p, SZ_SG_IO_HDR))
- return -EFAULT;
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
if (result < 0)
@@ -963,26 +986,21 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
case SG_GET_LOW_DMA:
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
case SG_GET_SCSI_ID:
- if (!access_ok(p, sizeof (sg_scsi_id_t)))
- return -EFAULT;
- else {
- sg_scsi_id_t __user *sg_idp = p;
+ {
+ sg_scsi_id_t v;
if (atomic_read(&sdp->detaching))
return -ENODEV;
- __put_user((int) sdp->device->host->host_no,
- &sg_idp->host_no);
- __put_user((int) sdp->device->channel,
- &sg_idp->channel);
- __put_user((int) sdp->device->id, &sg_idp->scsi_id);
- __put_user((int) sdp->device->lun, &sg_idp->lun);
- __put_user((int) sdp->device->type, &sg_idp->scsi_type);
- __put_user((short) sdp->device->host->cmd_per_lun,
- &sg_idp->h_cmd_per_lun);
- __put_user((short) sdp->device->queue_depth,
- &sg_idp->d_queue_depth);
- __put_user(0, &sg_idp->unused[0]);
- __put_user(0, &sg_idp->unused[1]);
+ memset(&v, 0, sizeof(v));
+ v.host_no = sdp->device->host->host_no;
+ v.channel = sdp->device->channel;
+ v.scsi_id = sdp->device->id;
+ v.lun = sdp->device->lun;
+ v.scsi_type = sdp->device->type;
+ v.h_cmd_per_lun = sdp->device->host->cmd_per_lun;
+ v.d_queue_depth = sdp->device->queue_depth;
+ if (copy_to_user(p, &v, sizeof(sg_scsi_id_t)))
+ return -EFAULT;
return 0;
}
case SG_SET_FORCE_PACK_ID:
@@ -992,20 +1010,16 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
- if (!access_ok(ip, sizeof (int)))
- return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
- __put_user(srp->header.pack_id, ip);
- return 0;
+ return put_user(srp->header.pack_id, ip);
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- __put_user(-1, ip);
- return 0;
+ return put_user(-1, ip);
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
val = 0;
@@ -1073,9 +1087,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
val = (sdp->device ? 1 : 0);
return put_user(val, ip);
case SG_GET_REQUEST_TABLE:
- if (!access_ok(p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
- return -EFAULT;
- else {
+ {
sg_req_info_t *rinfo;
rinfo = kcalloc(SG_MAX_QUEUE, SZ_SG_REQ_INFO,
@@ -1085,8 +1097,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
read_lock_irqsave(&sfp->rq_list_lock, iflags);
sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- result = __copy_to_user(p, rinfo,
- SZ_SG_REQ_INFO * SG_MAX_QUEUE);
+ #ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ result = put_compat_request_table(p, rinfo);
+ else
+ #endif
+ result = copy_to_user(p, rinfo,
+ SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
return result;
@@ -1797,7 +1814,14 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
struct iovec *iov = NULL;
struct iov_iter i;
- res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ res = compat_import_iovec(rw, hp->dxferp, iov_count,
+ 0, &iov, &i);
+ else
+#endif
+ res = import_iovec(rw, hp->dxferp, iov_count,
+ 0, &iov, &i);
if (res < 0)
return res;
@@ -1984,12 +2008,12 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
num = 1 << (PAGE_SHIFT + schp->page_order);
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 79d2af36f655..1129fe7a27ed 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -276,7 +276,9 @@ struct pqi_raid_path_request {
u8 reserved4 : 2;
u8 additional_cdb_bytes_usage : 3;
u8 reserved5 : 3;
- u8 cdb[32];
+ u8 cdb[16];
+ u8 reserved6[12];
+ __le32 timeout;
struct pqi_sg_descriptor
sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
@@ -385,7 +387,8 @@ struct pqi_task_management_request {
struct pqi_iu_header header;
__le16 request_id;
__le16 nexus_id;
- u8 reserved[4];
+ u8 reserved[2];
+ __le16 timeout;
u8 lun_number[8];
__le16 protocol_specific;
__le16 outbound_queue_id_to_manage;
@@ -445,7 +448,7 @@ struct pqi_vendor_general_response {
struct pqi_ofa_memory {
__le64 signature; /* "OFA_QRM" */
- __le16 version; /* version of this struct(1 = 1st version) */
+ __le16 version; /* version of this struct (1 = 1st version) */
u8 reserved[62];
__le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors;
@@ -761,6 +764,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_OFA 0
#define PQI_FIRMWARE_FEATURE_SMP 1
#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
+#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -826,10 +831,17 @@ union pqi_reset_register {
struct report_lun_header {
__be32 list_length;
- u8 extended_response;
+ u8 flags;
u8 reserved[3];
};
+/* for flags field of struct report_lun_header */
+#define CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID (1 << 0)
+#define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5)
+#define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6)
+
+#define CISS_REPORT_PHYS_FLAG_OTHER (1 << 1)
+
struct report_log_lun_extended_entry {
u8 lunid[8];
u8 volume_id[16];
@@ -851,7 +863,7 @@ struct report_phys_lun_extended_entry {
};
/* for device_flags field of struct report_phys_lun_extended_entry */
-#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
+#define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8
struct report_phys_lun_extended {
struct report_lun_header header;
@@ -864,7 +876,7 @@ struct raid_map_disk_data {
u8 reserved[2];
};
-/* constants for flags field of RAID map */
+/* for flags field of RAID map */
#define RAID_MAP_ENCRYPTION_ENABLED 0x1
struct raid_map {
@@ -907,7 +919,6 @@ struct pqi_scsi_dev {
u8 scsi3addr[8];
__be64 wwid;
u8 volume_id[16];
- u8 unique_id[16];
u8 is_physical_device : 1;
u8 is_external_raid_device : 1;
u8 is_expander_smp_device : 1;
@@ -954,13 +965,9 @@ struct pqi_scsi_dev {
};
/* VPD inquiry pages */
-#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
-#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */
#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
-#define SCSI_VPD_HEADER_SZ 4
-#define SCSI_VPD_DEVICE_ID_IDX 8 /* Index of page id in page */
#define VPD_PAGE (1 << 8)
@@ -1130,13 +1137,16 @@ struct pqi_ctrl_info {
struct mutex ofa_mutex; /* serialize ofa */
bool controller_online;
bool block_requests;
- bool in_shutdown;
+ bool block_device_reset;
bool in_ofa;
+ bool in_shutdown;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
u8 pqi_reset_quiesce_supported : 1;
u8 soft_reset_handshake_supported : 1;
+ u8 raid_iu_timeout_supported: 1;
+ u8 tmf_iu_timeout_supported: 1;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -1170,9 +1180,10 @@ struct pqi_ctrl_info {
spinlock_t raid_bypass_retry_list_lock;
struct work_struct raid_bypass_retry_work;
- struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
- dma_addr_t pqi_ofa_mem_dma_handle;
- void **pqi_ofa_chunk_virt_addr;
+ struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
+ dma_addr_t pqi_ofa_mem_dma_handle;
+ void **pqi_ofa_chunk_virt_addr;
+ atomic_t sync_cmds_outstanding;
};
enum pqi_ctrl_mode {
@@ -1191,10 +1202,6 @@ enum pqi_ctrl_mode {
#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
#define CISS_GET_RAID_MAP 0xc8
-/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
-#define CISS_REPORT_LOG_EXTENDED 0x1
-#define CISS_REPORT_PHYS_EXTENDED 0x2
-
/* BMIC commands */
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
@@ -1208,7 +1215,7 @@ enum pqi_ctrl_mode {
#define BMIC_SET_DIAG_OPTIONS 0xf4
#define BMIC_SENSE_DIAG_OPTIONS 0xf5
-#define CSMI_CC_SAS_SMP_PASSTHRU 0X17
+#define CSMI_CC_SAS_SMP_PASSTHRU 0x17
#define SA_FLUSH_CACHE 0x1
@@ -1244,10 +1251,12 @@ struct bmic_sense_subsystem_info {
u8 ctrl_serial_number[16];
};
-#define SA_EXPANDER_SMP_DEVICE 0x05
-#define SA_CONTROLLER_DEVICE 0x07
-/*SCSI Invalid Device Type for SAS devices*/
-#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff
+/* constants for device_type field */
+#define SA_DEVICE_TYPE_SATA 0x1
+#define SA_DEVICE_TYPE_SAS 0x2
+#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5
+#define SA_DEVICE_TYPE_CONTROLLER 0x7
+#define SA_DEVICE_TYPE_NVME 0x9
struct bmic_identify_physical_device {
u8 scsi_bus; /* SCSI Bus number on controller */
@@ -1273,7 +1282,7 @@ struct bmic_identify_physical_device {
__le32 rpm; /* drive rotational speed in RPM */
u8 device_type; /* type of drive */
u8 sata_version; /* only valid when device_type = */
- /* BMIC_DEVICE_TYPE_SATA */
+ /* SA_DEVICE_TYPE_SATA */
__le64 big_total_block_count;
__le64 ris_starting_lba;
__le32 ris_size;
@@ -1396,18 +1405,6 @@ struct bmic_diag_options {
#pragma pack()
-static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
-{
- void *hostdata = shost_priv(shost);
-
- return *((struct pqi_ctrl_info **)hostdata);
-}
-
-static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
-{
- return !ctrl_info->controller_online;
-}
-
static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
{
atomic_inc(&ctrl_info->num_busy_threads);
@@ -1418,9 +1415,11 @@ static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
atomic_dec(&ctrl_info->num_busy_threads);
}
-static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
{
- return ctrl_info->block_requests;
+ void *hostdata = shost_priv(shost);
+
+ return *((struct pqi_ctrl_info **)hostdata);
}
void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ea5409bebf57..7b7ef3acb504 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.8-026"
+#define DRIVER_VERSION "1.2.10-025"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 8
-#define DRIVER_REVISION 26
+#define DRIVER_RELEASE 10
+#define DRIVER_REVISION 25
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -211,6 +211,11 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
return scsi3addr[2] != 0;
}
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ return !ctrl_info->controller_online;
+}
+
static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{
if (ctrl_info->controller_online)
@@ -235,6 +240,21 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
sis_write_driver_scratch(ctrl_info, mode);
}
+static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->block_device_reset = true;
+}
+
+static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_device_reset;
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_requests;
+}
+
static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = true;
@@ -331,6 +351,16 @@ static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
return device->in_remove && !ctrl_info->in_shutdown;
}
+static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->in_shutdown = true;
+}
+
+static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->in_shutdown;
+}
+
static inline void pqi_schedule_rescan_worker_with_delay(
struct pqi_ctrl_info *ctrl_info, unsigned long delay)
{
@@ -360,6 +390,11 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
cancel_delayed_work_sync(&ctrl_info->rescan_work);
}
+static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_work_sync(&ctrl_info->event_work);
+}
+
static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
{
if (!ctrl_info->heartbeat_counter)
@@ -377,7 +412,7 @@ static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
}
static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
- u8 clear)
+ u8 clear)
{
u8 status;
@@ -462,9 +497,9 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
request->data_direction = SOP_READ_FLAG;
cdb[0] = cmd;
if (cmd == CISS_REPORT_PHYS)
- cdb[1] = CISS_REPORT_PHYS_EXTENDED;
+ cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
else
- cdb[1] = CISS_REPORT_LOG_EXTENDED;
+ cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case CISS_GET_RAID_MAP:
@@ -567,13 +602,12 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
}
static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
- struct pqi_raid_error_info *error_info,
- unsigned long timeout_msecs)
+ u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
+ struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
{
int rc;
- enum dma_data_direction dir;
struct pqi_raid_path_request request;
+ enum dma_data_direction dir;
rc = pqi_build_raid_path_request(ctrl_info, &request,
cmd, scsi3addr, buffer,
@@ -581,44 +615,44 @@ static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
if (rc)
return rc;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, error_info, timeout_msecs);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+ error_info, timeout_msecs);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
return rc;
}
-/* Helper functions for pqi_send_scsi_raid_request */
+/* helper functions for pqi_send_scsi_raid_request */
static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length)
+ u8 cmd, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+ buffer, buffer_length, 0, NULL, NO_TIMEOUT);
}
static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length,
- struct pqi_raid_error_info *error_info)
+ u8 cmd, void *buffer, size_t buffer_length,
+ struct pqi_raid_error_info *error_info)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+ buffer, buffer_length, 0, error_info, NO_TIMEOUT);
}
-
static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
- struct bmic_identify_controller *buffer)
+ struct bmic_identify_controller *buffer)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
- buffer, sizeof(*buffer));
+ buffer, sizeof(*buffer));
}
static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
- struct bmic_sense_subsystem_info *sense_info)
+ struct bmic_sense_subsystem_info *sense_info)
{
return pqi_send_ctrl_raid_request(ctrl_info,
- BMIC_SENSE_SUBSYSTEM_INFORMATION,
- sense_info, sizeof(*sense_info));
+ BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
+ sizeof(*sense_info));
}
static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
@@ -628,83 +662,9 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
}
-static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u16 vpd_page)
-{
- int rc;
- int i;
- int pages;
- unsigned char *buf, bufsize;
-
- buf = kzalloc(256, GFP_KERNEL);
- if (!buf)
- return false;
-
- /* Get the size of the page list first */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, SCSI_VPD_HEADER_SZ);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
- bufsize = pages + SCSI_VPD_HEADER_SZ;
- else
- bufsize = 255;
-
- /* Get the whole VPD page list */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, bufsize);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- for (i = 1; i <= pages; i++)
- if (buf[3 + i] == vpd_page)
- goto exit_supported;
-
-exit_unsupported:
- kfree(buf);
- return false;
-
-exit_supported:
- kfree(buf);
- return true;
-}
-
-static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u8 *device_id, int buflen)
-{
- int rc;
- unsigned char *buf;
-
- if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
- return 1; /* function not supported */
-
- buf = kzalloc(64, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_DEVICE_ID,
- buf, 64);
- if (rc == 0) {
- if (buflen > 16)
- buflen = 16;
- memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
- }
-
- kfree(buf);
-
- return rc;
-}
-
static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
- struct bmic_identify_physical_device *buffer,
- size_t buffer_length)
+ struct bmic_identify_physical_device *buffer, size_t buffer_length)
{
int rc;
enum dma_data_direction dir;
@@ -725,6 +685,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
0, NULL, NO_TIMEOUT);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
return rc;
}
@@ -763,7 +724,7 @@ int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, error_info);
}
-#define PQI_FETCH_PTRAID_DATA (1UL<<31)
+#define PQI_FETCH_PTRAID_DATA (1 << 31)
static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
{
@@ -775,14 +736,15 @@ static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
return -ENOMEM;
rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ diag, sizeof(*diag));
if (rc)
goto out;
diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
- rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
+ sizeof(*diag));
+
out:
kfree(diag);
@@ -793,7 +755,7 @@ static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
void *buffer, size_t buffer_length)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
- buffer, buffer_length);
+ buffer, buffer_length);
}
#pragma pack(1)
@@ -946,7 +908,7 @@ static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
- buffer_length);
+ buffer_length);
}
static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
@@ -1280,9 +1242,9 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
-#define RAID_BYPASS_STATUS 4
-#define RAID_BYPASS_CONFIGURED 0x1
-#define RAID_BYPASS_ENABLED 0x2
+#define RAID_BYPASS_STATUS 4
+#define RAID_BYPASS_CONFIGURED 0x1
+#define RAID_BYPASS_ENABLED 0x2
bypass_status = buffer[RAID_BYPASS_STATUS];
device->raid_bypass_configured =
@@ -1385,14 +1347,6 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
}
}
- if (pqi_get_device_id(ctrl_info, device->scsi3addr,
- device->unique_id, sizeof(device->unique_id)) < 0)
- dev_warn(&ctrl_info->pci_dev->dev,
- "Can't get device id for scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target,
- device->lun);
-
out:
kfree(buffer);
@@ -1413,6 +1367,7 @@ static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
return;
}
+
device->box_index = id_phys->box_index;
device->phys_box_on_bus = id_phys->phys_box_on_bus;
device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
@@ -1828,7 +1783,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device = new_device_list[i];
find_result = pqi_scsi_find_entry(ctrl_info, device,
- &matching_device);
+ &matching_device);
switch (find_result) {
case DEVICE_SAME:
@@ -2057,9 +2012,8 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
rc = -ENOMEM;
goto out;
}
- if (pqi_hide_vsep) {
- int i;
+ if (pqi_hide_vsep) {
for (i = num_physicals - 1; i >= 0; i--) {
phys_lun_ext_entry =
&physdev_list->lun_entries[i];
@@ -2132,7 +2086,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device->is_physical_device = is_physical_device;
if (is_physical_device) {
if (phys_lun_ext_entry->device_type ==
- SA_EXPANDER_SMP_DEVICE)
+ SA_DEVICE_TYPE_EXPANDER_SMP)
device->is_expander_smp_device = true;
} else {
device->is_external_raid_device =
@@ -2169,16 +2123,13 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (device->is_physical_device) {
device->wwid = phys_lun_ext_entry->wwid;
if ((phys_lun_ext_entry->device_flags &
- REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
+ CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
phys_lun_ext_entry->aio_handle) {
device->aio_enabled = true;
- device->aio_handle =
- phys_lun_ext_entry->aio_handle;
+ device->aio_handle =
+ phys_lun_ext_entry->aio_handle;
}
-
- pqi_get_physical_disk_info(ctrl_info,
- device, id_phys);
-
+ pqi_get_physical_disk_info(ctrl_info, device, id_phys);
} else {
memcpy(device->volume_id, log_lun_ext_entry->volume_id,
sizeof(device->volume_id));
@@ -3158,7 +3109,7 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
}
static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
- enum pqi_soft_reset_status reset_status)
+ enum pqi_soft_reset_status reset_status)
{
int rc;
@@ -3202,8 +3153,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
if (event_id == PQI_EVENT_OFA_QUIESCE) {
dev_info(&ctrl_info->pci_dev->dev,
- "Received Online Firmware Activation quiesce event for controller %u\n",
- ctrl_info->ctrl_id);
+ "Received Online Firmware Activation quiesce event for controller %u\n",
+ ctrl_info->ctrl_id);
pqi_ofa_ctrl_quiesce(ctrl_info);
pqi_acknowledge_event(ctrl_info, event);
if (ctrl_info->soft_reset_handshake_supported) {
@@ -3223,8 +3174,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
pqi_ofa_free_host_buffer(ctrl_info);
pqi_acknowledge_event(ctrl_info, event);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation(%u) cancel reason : %u\n",
- ctrl_info->ctrl_id, event->ofa_cancel_reason);
+ "Online Firmware Activation(%u) cancel reason : %u\n",
+ ctrl_info->ctrl_id, event->ofa_cancel_reason);
}
mutex_unlock(&ctrl_info->ofa_mutex);
@@ -3403,7 +3354,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
#define PQI_LEGACY_INTX_MASK 0x1
static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
- bool enable_intx)
+ bool enable_intx)
{
u32 intx_mask;
struct pqi_device_registers __iomem *pqi_registers;
@@ -3841,7 +3792,7 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
&pqi_registers->admin_oq_pi_addr);
reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
- (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
+ (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
(admin_queues->int_msg_num << 16);
writel(reg, &pqi_registers->admin_iq_num_elements);
writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
@@ -4048,8 +3999,8 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
- *error_info)
+static int pqi_process_raid_io_error_synchronous(
+ struct pqi_raid_error_info *error_info)
{
int rc = -EIO;
@@ -4122,6 +4073,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
goto out;
}
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
+
io_request = pqi_alloc_io_request(ctrl_info);
put_unaligned_le16(io_request->index,
@@ -4168,6 +4121,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_free_io_request(io_request);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
up(&ctrl_info->sync_request_sem);
@@ -4665,11 +4619,11 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
- ctrl_info->error_buffer_length,
- &ctrl_info->error_buffer_dma_handle,
- GFP_KERNEL);
+ ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->error_buffer_length,
+ &ctrl_info->error_buffer_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->error_buffer)
return -ENOMEM;
@@ -5402,7 +5356,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
pqi_ctrl_busy(ctrl_info);
if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
- pqi_ctrl_in_ofa(ctrl_info)) {
+ pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -5419,7 +5373,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
if (device->raid_bypass_enabled &&
- !blk_rq_is_passthrough(scmd->request)) {
+ !blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
@@ -5650,6 +5604,18 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
+{
+ while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -5658,7 +5624,8 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-#define PQI_LUN_RESET_TIMEOUT_SECS 10
+#define PQI_LUN_RESET_TIMEOUT_SECS 30
+#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct completion *wait)
@@ -5667,7 +5634,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
+ PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
rc = 0;
break;
}
@@ -5704,6 +5671,9 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
+ if (ctrl_info->tmf_iu_timeout_supported)
+ put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
+ &request->timeout);
pqi_start_io(ctrl_info,
&ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
@@ -5733,7 +5703,7 @@ static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
for (retries = 0;;) {
rc = pqi_lun_reset(ctrl_info, device);
- if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
+ if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -5787,17 +5757,17 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
shost->host_no, device->bus, device->target, device->lun);
pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "controller %u offlined - cannot send device reset\n",
- ctrl_info->ctrl_id);
+ if (pqi_ctrl_offline(ctrl_info) ||
+ pqi_device_reset_blocked(ctrl_info)) {
rc = FAILED;
goto out;
}
pqi_wait_until_ofa_finished(ctrl_info);
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
rc = pqi_device_reset(ctrl_info, device);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
dev_err(&ctrl_info->pci_dev->dev,
@@ -6066,6 +6036,9 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
put_unaligned_le16(iu_length, &request.header.iu_length);
+ if (ctrl_info->raid_iu_timeout_supported)
+ put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
+
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
@@ -6119,7 +6092,7 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
ctrl_info = shost_to_hba(sdev->host);
- if (pqi_ctrl_in_ofa(ctrl_info))
+ if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
return -EBUSY;
switch (cmd) {
@@ -6160,14 +6133,8 @@ static ssize_t pqi_firmware_version_show(struct device *dev,
static ssize_t pqi_driver_version_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- struct Scsi_Host *shost;
- struct pqi_ctrl_info *ctrl_info;
-
- shost = class_to_shost(dev);
- ctrl_info = shost_to_hba(shost);
-
- return snprintf(buffer, PAGE_SIZE,
- "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+ return snprintf(buffer, PAGE_SIZE, "%s\n",
+ DRIVER_VERSION BUILD_TIMESTAMP);
}
static ssize_t pqi_serial_number_show(struct device *dev,
@@ -6283,7 +6250,7 @@ static ssize_t pqi_unique_id_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- unsigned char uid[16];
+ u8 unique_id[16];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -6296,16 +6263,22 @@ static ssize_t pqi_unique_id_show(struct device *dev,
flags);
return -ENODEV;
}
- memcpy(uid, device->unique_id, sizeof(uid));
+
+ if (device->is_physical_device) {
+ memset(unique_id, 0, 8);
+ memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
+ } else {
+ memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
+ }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return snprintf(buffer, PAGE_SIZE,
"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
- uid[0], uid[1], uid[2], uid[3],
- uid[4], uid[5], uid[6], uid[7],
- uid[8], uid[9], uid[10], uid[11],
- uid[12], uid[13], uid[14], uid[15]);
+ unique_id[0], unique_id[1], unique_id[2], unique_id[3],
+ unique_id[4], unique_id[5], unique_id[6], unique_id[7],
+ unique_id[8], unique_id[9], unique_id[10], unique_id[11],
+ unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
}
static ssize_t pqi_lunid_show(struct device *dev,
@@ -6328,6 +6301,7 @@ static ssize_t pqi_lunid_show(struct device *dev,
flags);
return -ENODEV;
}
+
memcpy(lunid, device->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -6335,7 +6309,8 @@ static ssize_t pqi_lunid_show(struct device *dev,
return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
}
-#define MAX_PATHS 8
+#define MAX_PATHS 8
+
static ssize_t pqi_path_info_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -6347,9 +6322,9 @@ static ssize_t pqi_path_info_show(struct device *dev,
int output_len = 0;
u8 box;
u8 bay;
- u8 path_map_index = 0;
+ u8 path_map_index;
char *active;
- unsigned char phys_connector[2];
+ u8 phys_connector[2];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -6365,7 +6340,7 @@ static ssize_t pqi_path_info_show(struct device *dev,
bay = device->bay;
for (i = 0; i < MAX_PATHS; i++) {
- path_map_index = 1<<i;
+ path_map_index = 1 << i;
if (i == device->active_path_index)
active = "Active";
else if (device->path_map & path_map_index)
@@ -6416,10 +6391,10 @@ end_buffer:
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
return output_len;
}
-
static ssize_t pqi_sas_address_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
@@ -6440,6 +6415,7 @@ static ssize_t pqi_sas_address_show(struct device *dev,
flags);
return -ENODEV;
}
+
sas_address = device->sas_address;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -6844,6 +6820,27 @@ static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
firmware_feature->feature_name);
}
+static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ switch (firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
+ ctrl_info->soft_reset_handshake_supported =
+ firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
+ ctrl_info->raid_iu_timeout_supported =
+ firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
+ ctrl_info->tmf_iu_timeout_supported =
+ firmware_feature->enabled;
+ break;
+ }
+
+ pqi_firmware_feature_status(ctrl_info, firmware_feature);
+}
+
static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
struct pqi_firmware_feature *firmware_feature)
{
@@ -6867,7 +6864,17 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
{
.feature_name = "New Soft Reset Handshake",
.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
- .feature_status = pqi_firmware_feature_status,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "TMF IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
},
};
@@ -6921,7 +6928,6 @@ static void pqi_process_firmware_features(
return;
}
- ctrl_info->soft_reset_handshake_supported = false;
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
if (!pqi_firmware_features[i].supported)
continue;
@@ -6929,10 +6935,6 @@ static void pqi_process_firmware_features(
firmware_features_iomem_addr,
pqi_firmware_features[i].feature_bit)) {
pqi_firmware_features[i].enabled = true;
- if (pqi_firmware_features[i].feature_bit ==
- PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
- ctrl_info->soft_reset_handshake_supported =
- true;
}
pqi_firmware_feature_update(ctrl_info,
&pqi_firmware_features[i]);
@@ -7074,13 +7076,20 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
return pqi_revert_to_sis_mode(ctrl_info);
}
+#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
- rc = pqi_force_sis_mode(ctrl_info);
- if (rc)
- return rc;
+ if (reset_devices) {
+ sis_soft_reset(ctrl_info);
+ msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+ } else {
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
+ }
/*
* Wait until the controller is ready to start accepting SIS
@@ -7386,7 +7395,7 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error obtaining product detail\n");
+ "error obtaining product details\n");
return rc;
}
@@ -7514,6 +7523,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
atomic_set(&ctrl_info->num_interrupts, 0);
+ atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
@@ -7721,6 +7731,8 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
dev_err(dev, "Failed to allocate host buffer of size = %u",
bytes_requested);
}
+
+ return;
}
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
@@ -7787,8 +7799,6 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
0, NULL, NO_TIMEOUT);
}
-#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
-
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
{
msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
@@ -7956,28 +7966,73 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
pqi_remove_ctrl(ctrl_info);
}
+static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_io_request *io_request;
+ struct scsi_cmnd *scmd;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ io_request = &ctrl_info->io_request_pool[i];
+ if (atomic_read(&io_request->refcount) == 0)
+ continue;
+ scmd = io_request->scmd;
+ WARN_ON(scmd != NULL); /* IO command from SML */
+ WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
+ }
+}
+
static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
ctrl_info = pci_get_drvdata(pci_dev);
- if (!ctrl_info)
- goto error;
+ if (!ctrl_info) {
+ dev_err(&pci_dev->dev,
+ "cache could not be flushed\n");
+ return;
+ }
+
+ pqi_disable_events(ctrl_info);
+ pqi_wait_until_ofa_finished(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_cancel_event_worker(ctrl_info);
+
+ pqi_ctrl_shutdown_start(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending I/O failed\n");
+ return;
+ }
+
+ pqi_ctrl_block_device_reset(ctrl_info);
+ pqi_wait_until_lun_reset_finished(ctrl_info);
/*
* Write all data in the controller's battery-backed cache to
* storage.
*/
rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
- pqi_free_interrupts(ctrl_info);
- pqi_reset(ctrl_info);
- if (rc == 0)
+ if (rc)
+ dev_err(&pci_dev->dev,
+ "unable to flush controller cache\n");
+
+ pqi_ctrl_block_requests(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending sync cmds failed\n");
return;
+ }
-error:
- dev_warn(&pci_dev->dev,
- "unable to flush controller cache\n");
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_reset(ctrl_info);
}
static void pqi_process_lockup_action_param(void)
@@ -8686,6 +8741,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
cdb) != 32);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ timeout) != 60);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
sg_descriptors) != 64);
BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
@@ -8840,6 +8897,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
nexus_id) != 10);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ timeout) != 14);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
lun_number) != 16);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
protocol_specific) != 24);
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 6776dfc1d317..b7289112455c 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -45,9 +45,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
struct sas_phy *phy = pqi_sas_phy->phy;
sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (pqi_sas_phy->added_to_port)
list_del(&pqi_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(pqi_sas_phy);
}
@@ -312,7 +312,6 @@ static int pqi_sas_get_linkerrors(struct sas_phy *phy)
static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
u64 *identifier)
{
-
int rc;
unsigned long flags;
struct Scsi_Host *shost;
@@ -361,7 +360,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
}
}
- if (found_device->phy_connected_dev_type != SA_CONTROLLER_DEVICE) {
+ if (found_device->phy_connected_dev_type != SA_DEVICE_TYPE_CONTROLLER) {
rc = -EINVAL;
goto out;
}
@@ -382,12 +381,10 @@ out:
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return rc;
-
}
static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
{
-
int rc;
unsigned long flags;
struct pqi_ctrl_info *ctrl_info;
@@ -482,7 +479,6 @@ pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy,
req_size -= SMP_CRC_FIELD_LENGTH;
put_unaligned_le32(req_size, &parameters->request_length);
-
put_unaligned_le32(resp_size, &parameters->response_length);
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -512,12 +508,12 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
int rc;
- struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ struct pqi_ctrl_info *ctrl_info;
struct bmic_csmi_smp_passthru_buffer *smp_buf;
struct pqi_raid_error_info error_info;
unsigned int reslen = 0;
- pqi_ctrl_busy(ctrl_info);
+ ctrl_info = shost_to_hba(shost);
if (job->reply_payload.payload_len == 0) {
rc = -ENOMEM;
@@ -539,16 +535,6 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto out;
}
- if (pqi_ctrl_offline(ctrl_info)) {
- rc = -ENXIO;
- goto out;
- }
-
- if (pqi_ctrl_blocked(ctrl_info)) {
- rc = -EBUSY;
- goto out;
- }
-
smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job);
if (!smp_buf) {
rc = -ENOMEM;
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index e3b0ce25162b..17a56c87d383 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -61,6 +61,7 @@
#define VENDOR_NEC 2
#define VENDOR_TOSHIBA 3
#define VENDOR_WRITER 4 /* pre-scsi3 writers */
+#define VENDOR_CYGNAL_85ED 5 /* CD-on-a-chip */
#define VENDOR_TIMEOUT 30*HZ
@@ -99,6 +100,23 @@ void sr_vendor_init(Scsi_CD *cd)
} else if (!strncmp(vendor, "TOSHIBA", 7)) {
cd->vendor = VENDOR_TOSHIBA;
+ } else if (!strncmp(vendor, "Beurer", 6) &&
+ !strncmp(model, "Gluco Memory", 12)) {
+ /* The Beurer GL50 evo uses a Cygnal-manufactured CD-on-a-chip
+ that only accepts a subset of SCSI commands. Most of the
+ not-implemented commands are fine to fail, but a few,
+ particularly around the MMC or Audio commands, will put the
+ device into an unrecoverable state, so they need to be
+ avoided at all costs.
+ */
+ cd->vendor = VENDOR_CYGNAL_85ED;
+ cd->cdi.mask |= (
+ CDC_MULTI_SESSION |
+ CDC_CLOSE_TRAY | CDC_OPEN_TRAY |
+ CDC_LOCK |
+ CDC_GENERIC_PACKET |
+ CDC_PLAY_AUDIO
+ );
}
#endif
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e3266a64a477..9e3fff2de83e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -22,6 +22,7 @@ static const char *verstr = "20160209";
#include <linux/module.h>
+#include <linux/compat.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
@@ -3800,14 +3801,11 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
if (STp->cleaning_req)
mt_status.mt_gstat |= GMT_CLN(0xffffffff);
- i = copy_to_user(p, &mt_status, sizeof(struct mtget));
- if (i) {
- retval = (-EFAULT);
+ retval = put_user_mtget(p, &mt_status);
+ if (retval)
goto out;
- }
STp->recover_reg = 0; /* Clear after read */
- retval = 0;
goto out;
} /* End of MTIOCGET */
if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) {
@@ -3821,9 +3819,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
goto out;
}
mt_pos.mt_blkno = blk;
- i = copy_to_user(p, &mt_pos, sizeof(struct mtpos));
- if (i)
- retval = (-EFAULT);
+ retval = put_user_mtpos(p, &mt_pos);
goto out;
}
mutex_unlock(&STp->lock);
@@ -3857,14 +3853,26 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
}
#ifdef CONFIG_COMPAT
-static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
+ void __user *p = compat_ptr(arg);
struct scsi_tape *STp = file->private_data;
struct scsi_device *sdev = STp->device;
int ret = -ENOIOCTLCMD;
+
+ /* argument conversion is handled using put_user_mtpos/put_user_mtget */
+ switch (cmd_in) {
+ case MTIOCTOP:
+ return st_ioctl(file, MTIOCTOP, (unsigned long)p);
+ case MTIOCPOS32:
+ return st_ioctl(file, MTIOCPOS, (unsigned long)p);
+ case MTIOCGET32:
+ return st_ioctl(file, MTIOCGET, (unsigned long)p);
+ }
+
if (sdev->host->hostt->compat_ioctl) {
- ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+ ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
}
return ret;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 542d2bac2922..f8faf8b3d965 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1727,6 +1727,13 @@ static const struct hv_vmbus_device_id id_table[] = {
MODULE_DEVICE_TABLE(vmbus, id_table);
+static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID };
+
+static bool hv_dev_is_fc(struct hv_device *hv_dev)
+{
+ return guid_equal(&fc_guid.guid, &hv_dev->dev_type);
+}
+
static int storvsc_probe(struct hv_device *device,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1934,11 +1941,45 @@ static int storvsc_remove(struct hv_device *dev)
return 0;
}
+static int storvsc_suspend(struct hv_device *hv_dev)
+{
+ struct storvsc_device *stor_device = hv_get_drvdata(hv_dev);
+ struct Scsi_Host *host = stor_device->host;
+ struct hv_host_device *host_dev = shost_priv(host);
+
+ storvsc_wait_to_drain(stor_device);
+
+ drain_workqueue(host_dev->handle_error_wq);
+
+ vmbus_close(hv_dev->channel);
+
+ memset(stor_device->stor_chns, 0,
+ num_possible_cpus() * sizeof(void *));
+
+ kfree(stor_device->stor_chns);
+ stor_device->stor_chns = NULL;
+
+ cpumask_clear(&stor_device->alloced_cpus);
+
+ return 0;
+}
+
+static int storvsc_resume(struct hv_device *hv_dev)
+{
+ int ret;
+
+ ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
+ hv_dev_is_fc(hv_dev));
+ return ret;
+}
+
static struct hv_driver storvsc_drv = {
.name = KBUILD_MODNAME,
.id_table = id_table,
.probe = storvsc_probe,
.remove = storvsc_remove,
+ .suspend = storvsc_suspend,
+ .resume = storvsc_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 955e4c938d49..701b842296f0 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -501,7 +501,7 @@ static struct scsi_host_template sun3_scsi_template = {
.eh_host_reset_handler = sun3scsi_host_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
@@ -523,7 +523,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
sun3_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
sun3_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 0b845ab7c3bf..d14c2243e02a 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -132,6 +132,16 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+config SCSI_UFS_TI_J721E
+ tristate "TI glue layer for Cadence UFS Controller"
+ depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
+ help
+ This selects driver for TI glue layer for Cadence UFS Host
+ Controller IP.
+
+ Selects this if you have TI platform with UFS controller.
+ If unsure, say N.
+
config SCSI_UFS_BSG
bool "Universal Flash Storage BSG device node"
depends on SCSI_UFSHCD
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 2a9097939bcb..94c6c5d7334b 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
+obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
new file mode 100644
index 000000000000..5216d228cdd9
--- /dev/null
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+//
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define TI_UFS_SS_CTRL 0x4
+#define TI_UFS_SS_RST_N_PCS BIT(0)
+#define TI_UFS_SS_CLK_26MHZ BIT(4)
+
+static int ti_j721e_ufs_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long clk_rate;
+ void __iomem *regbase;
+ struct clk *clk;
+ u32 reg = 0;
+ int ret;
+
+ regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regbase))
+ return PTR_ERR(regbase);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ /* Select MPHY refclk frequency */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Cannot claim MPHY clock.\n");
+ return PTR_ERR(clk);
+ }
+ clk_rate = clk_get_rate(clk);
+ if (clk_rate == 26000000)
+ reg |= TI_UFS_SS_CLK_26MHZ;
+ devm_clk_put(dev, clk);
+
+ /* Take UFS slave device out of reset */
+ reg |= TI_UFS_SS_RST_N_PCS;
+ writel(reg, regbase + TI_UFS_SS_CTRL);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
+ dev);
+ if (ret) {
+ dev_err(dev, "failed to populate child nodes %d\n", ret);
+ pm_runtime_put_sync(dev);
+ }
+
+ return ret;
+}
+
+static int ti_j721e_ufs_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ti_j721e_ufs_of_match[] = {
+ {
+ .compatible = "ti,j721e-ufs",
+ },
+ { },
+};
+
+static struct platform_driver ti_j721e_ufs_driver = {
+ .probe = ti_j721e_ufs_probe,
+ .remove = ti_j721e_ufs_remove,
+ .driver = {
+ .name = "ti-j721e-ufs",
+ .of_match_table = ti_j721e_ufs_of_match,
+ },
+};
+module_platform_driver(ti_j721e_ufs_driver);
+
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_DESCRIPTION("TI UFS host controller glue driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 6bbb1679bb91..5d6487350a6c 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -452,10 +452,7 @@ static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
/* get resource of ufs sys ctrl */
host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(host->ufs_sys_ctrl))
- return PTR_ERR(host->ufs_sys_ctrl);
-
- return 0;
+ return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
}
static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 0f6ff33ce52e..83e28edc3ac5 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -147,6 +147,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
if (err)
goto out_variant_clear;
+ /* Enable runtime autosuspend */
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index a5b71487a206..c69c29a1ceb9 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -246,6 +246,44 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
mb();
}
+/**
+ * ufs_qcom_host_reset - reset host controller and PHY
+ */
+static int ufs_qcom_host_reset(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (!host->core_reset) {
+ dev_warn(hba->dev, "%s: reset control not set\n", __func__);
+ goto out;
+ }
+
+ ret = reset_control_assert(host->core_reset);
+ if (ret) {
+ dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(host->core_reset);
+ if (ret)
+ dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+ __func__, ret);
+
+ usleep_range(1000, 1100);
+
+out:
+ return ret;
+}
+
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -254,6 +292,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
+ /* Reset UFS Host Controller and PHY */
+ ret = ufs_qcom_host_reset(hba);
+ if (ret)
+ dev_warn(hba->dev, "%s: host reset returned %d\n",
+ __func__, ret);
+
if (is_rate_B)
phy_set_mode(phy, PHY_MODE_UFS_HS_B);
@@ -1101,6 +1145,15 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
+ /* Setup the reset control of HCI */
+ host->core_reset = devm_reset_control_get(hba->dev, "rst");
+ if (IS_ERR(host->core_reset)) {
+ err = PTR_ERR(host->core_reset);
+ dev_warn(dev, "Failed to get reset control %d\n", err);
+ host->core_reset = NULL;
+ err = 0;
+ }
+
/* Fire up the reset controller. Failure here is non-fatal. */
host->rcdev.of_node = dev->of_node;
host->rcdev.ops = &ufs_qcom_reset_ops;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index d401f174bb70..2d95e7cc7187 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -6,6 +6,7 @@
#define UFS_QCOM_H_
#include <linux/reset-controller.h>
+#include <linux/reset.h>
#define MAX_UFS_QCOM_HOSTS 1
#define MAX_U32 (~(u32)0)
@@ -233,6 +234,8 @@ struct ufs_qcom_host {
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
+ /* Reset control of HCI */
+ struct reset_control *core_reset;
struct reset_controller_dev rcdev;
struct gpio_desc *device_reset;
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 969a36b15897..ad2abc96c0f1 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -126,13 +126,16 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit == ahit)
- goto out_unlock;
- hba->ahit = ahit;
- if (!pm_runtime_suspended(hba->dev))
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-out_unlock:
+ if (hba->ahit != ahit)
+ hba->ahit = ahit;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!pm_runtime_suspended(hba->dev)) {
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ ufshcd_auto_hibern8_enable(hba);
+ ufshcd_release(hba);
+ pm_runtime_put(hba->dev);
+ }
}
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index dc2f6d2b46ed..baeecee35d1e 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -162,6 +162,7 @@ out:
/**
* ufs_bsg_remove - detach and remove the added ufs-bsg node
+ * @hba: per adapter object
*
* Should be called when unloading the driver.
*/
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
index fb9e2ff4f8d2..6a901da2d15a 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ b/drivers/scsi/ufs/ufshcd-dwc.c
@@ -80,7 +80,7 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
*/
static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
{
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8d40dc918f4e..76f9be71c31b 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -402,7 +402,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "IRQ resource not available\n");
err = -ENODEV;
goto out;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 11a87f51c442..b5966faf3e98 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -88,6 +88,9 @@
/* Interrupt aggregation default timeout, unit: 40us */
#define INT_AGGR_DEF_TO 0x02
+/* default delay of autosuspend: 2000 ms */
+#define RPM_AUTOSUSPEND_DELAY_MS 2000
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -114,7 +117,7 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
return -EINVAL;
- regs = kzalloc(len, GFP_KERNEL);
+ regs = kzalloc(len, GFP_ATOMIC);
if (!regs)
return -ENOMEM;
@@ -237,7 +240,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
END_FIX
};
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
@@ -1607,7 +1610,7 @@ static void ufshcd_gate_work(struct work_struct *work)
* state to CLKS_ON.
*/
if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state == REQ_CLKS_ON)) {
+ (hba->clk_gating.state != REQ_CLKS_OFF)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
@@ -1935,8 +1938,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
} else {
dev_warn(hba->dev,
- "%s: Response size is bigger than buffer",
- __func__);
+ "%s: rsp size %d is bigger than buffer size %d",
+ __func__, resp_len, buf_len);
return -EINVAL;
}
}
@@ -2986,10 +2989,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
+ hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release(hba);
@@ -3856,6 +3859,9 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Reset the attached device */
+ ufshcd_vops_device_reset(hba);
+
ret = ufshcd_host_reset_and_restore(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -3885,15 +3891,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
+ int err;
+
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
/*
- * If link recovery fails then return error so that caller
- * don't retry the hibern8 enter again.
+ * If link recovery fails then return error code returned from
+ * ufshcd_link_recovery().
+ * If link recovery succeeds then return -EAGAIN to attempt
+ * hibern8 enter retry again.
*/
- if (ufshcd_link_recovery(hba))
- ret = -ENOLINK;
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: link recovery failed", __func__);
+ ret = err;
+ } else {
+ ret = -EAGAIN;
+ }
} else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
@@ -3907,7 +3922,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
ret = __ufshcd_uic_hibern8_enter(hba);
- if (!ret || ret == -ENOLINK)
+ if (!ret)
goto out;
}
out:
@@ -3941,7 +3956,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
return ret;
}
-static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
unsigned long flags;
@@ -4631,9 +4646,14 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
*/
static int ufshcd_slave_configure(struct scsi_device *sdev)
{
+ struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+
+ if (ufshcd_is_rpm_autosuspend_allowed(hba))
+ sdev->rpm_autosuspend = 1;
+
return 0;
}
@@ -4788,19 +4808,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
+ retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
complete(hba->uic_async_done);
+ retval = IRQ_HANDLED;
+ }
+ return retval;
}
/**
@@ -4856,8 +4886,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs;
u32 tr_doorbell;
@@ -4876,7 +4910,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ if (completed_reqs) {
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
@@ -5395,61 +5434,77 @@ out:
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ irqreturn_t retval = IRQ_NONE;
/* PHY layer lane error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
/* Ignore LINERESET indication, as this is not an error */
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
*/
dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
+ retval |= IRQ_HANDLED;
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if (reg)
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- else if (hba->dev_quirks &
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
- hba->uic_error |=
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+ retval |= IRQ_HANDLED;
}
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if (reg) {
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if (reg) {
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if (reg) {
+ if ((reg & UIC_DME_ERROR) &&
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ retval |= IRQ_HANDLED;
}
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
+ return retval;
}
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
@@ -5472,10 +5527,15 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{
bool queue_eh_work = false;
+ irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
@@ -5484,7 +5544,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
- ufshcd_update_uic_error(hba);
+ retval = ufshcd_update_uic_error(hba);
if (hba->uic_error)
queue_eh_work = true;
}
@@ -5532,6 +5592,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
}
schedule_work(&hba->eh_work);
}
+ retval |= IRQ_HANDLED;
}
/*
* if (!queue_eh_work) -
@@ -5539,44 +5600,62 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ return retval;
}
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
u32 tm_doorbell;
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- wake_up(&hba->tm_wq);
+ if (hba->tm_condition) {
+ wake_up(&hba->tm_wq);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
if (hba->errors)
- ufshcd_check_errors(hba);
+ retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK)
- ufshcd_uic_cmd_compl(hba, intr_status);
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
- ufshcd_tmc_handler(hba);
+ retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_transfer_req_compl(hba);
+
+ return retval;
}
/**
@@ -5584,8 +5663,9 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
@@ -5608,14 +5688,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (intr_status)
ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
- }
+ if (enabled_intr_status)
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
} while (intr_status && --retries);
+ if (retval == IRQ_NONE) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+ __func__, intr_status);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
+ }
+
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5760,9 +5844,9 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
* @hba: per-adapter instance
* @req_upiu: upiu request
* @rsp_upiu: upiu reply
- * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
* @desc_buff: pointer to descriptor buffer, NULL if NA
* @buff_len: descriptor size, 0 if NA
+ * @cmd_type: specifies the type (NOP, Query...)
* @desc_op: descriptor operation
*
* Those type of requests uses UTP Transfer Request Descriptor - utrd.
@@ -5776,7 +5860,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
u8 *desc_buff, int *buff_len,
- int cmd_type,
+ enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
struct ufshcd_lrb *lrbp;
@@ -5856,7 +5940,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memcpy(desc_buff, descp, resp_len);
*buff_len = resp_len;
} else {
- dev_warn(hba->dev, "rsp size is bigger than buffer");
+ dev_warn(hba->dev,
+ "%s: rsp size %d is bigger than buffer size %d",
+ __func__, resp_len, *buff_len);
*buff_len = 0;
err = -EINVAL;
}
@@ -5891,7 +5977,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op)
{
int err;
- int cmd_type = DEV_CMD_TYPE_QUERY;
+ enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
struct utp_task_req_desc treq = { { 0 }, };
int ocs_value;
u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
@@ -6770,23 +6856,13 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
&hba->desc_size.geom_desc);
if (err)
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
&hba->desc_size.hlth_desc);
if (err)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
-static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
-{
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
-}
-
static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
@@ -6881,9 +6957,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
@@ -6934,6 +7007,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -7069,6 +7145,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
.dma_boundary = PAGE_SIZE - 1,
+ .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -7950,12 +8027,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
- /* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
-
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+
goto out;
set_old_link_state:
@@ -8274,9 +8351,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
- /* Set descriptor lengths to specification defaults */
- ufshcd_def_desc_sizes(hba);
-
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index c94cfda52829..2740f6941ec6 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -716,6 +716,12 @@ struct ufs_hba {
* the performance of ongoing read/write operations.
*/
#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
+ /*
+ * This capability allows host controller driver to automatically
+ * enable runtime power management by itself instead of waiting
+ * for userspace to control the power management.
+ */
+#define UFSHCD_CAP_RPM_AUTOSUSPEND (1 << 6)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
@@ -749,6 +755,10 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
}
+static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
+}
static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
{
@@ -916,6 +926,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index dbb75cd28dc8..c2961d37cc1c 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -195,7 +195,7 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
-#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index ca8e3abeb2c7..a23a8e5794f5 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -218,7 +218,14 @@ static int fastlane_esp_irq_pending(struct esp *esp)
static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
u32 dma_len)
{
- return dma_len > 0xFFFF ? 0xFFFF : dma_len;
+ return dma_len > (1U << 16) ? (1U << 16) : dma_len;
+}
+
+static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
+ u32 dma_len)
+{
+ /* The old driver used 0xfffc as limit, so do that here too */
+ return dma_len > 0xfffc ? 0xfffc : dma_len;
}
static void zorro_esp_reset_dma(struct esp *esp)
@@ -604,7 +611,7 @@ static const struct esp_driver_ops fastlane_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
.irq_pending = fastlane_esp_irq_pending,
- .dma_length_limit = zorro_esp_dma_length_limit,
+ .dma_length_limit = fastlane_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
.dma_drain = zorro_esp_dma_drain,
.dma_invalidate = fastlane_esp_dma_invalidate,
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index c8c80df090d1..c725d0a8b288 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -24,7 +24,7 @@ config SOUNDWIRE_CADENCE
config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
select SOUNDWIRE_CADENCE
- depends on X86 && ACPI && SND_SOC
+ depends on ACPI && SND_SOC
help
SoundWire Intel Master driver.
If you have an Intel platform which has a SoundWire Master then
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index fc53dbe57f85..be5d437058ed 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -422,10 +422,11 @@ static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
{
- if (slave->id.unique_id != id.unique_id ||
- slave->id.mfg_id != id.mfg_id ||
+ if (slave->id.mfg_id != id.mfg_id ||
slave->id.part_id != id.part_id ||
- slave->id.class_id != id.class_id)
+ slave->id.class_id != id.class_id ||
+ (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
+ slave->id.unique_id != id.unique_id))
return -ENODEV;
return 0;
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 502ed4ec8f07..fed21e2b2277 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -183,9 +183,6 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_DEFAULT_SSP_INTERVAL 0x18
#define CDNS_TX_TIMEOUT 2000
-#define CDNS_PCM_PDI_OFFSET 0x2
-#define CDNS_PDM_PDI_OFFSET 0x6
-
#define CDNS_SCP_RX_FIFOLEVEL 0x2
/*
@@ -232,6 +229,22 @@ static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
}
/*
+ * all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL
+ * need to be confirmed with a write to MCP_CONFIG_UPDATE
+ */
+static int cdns_update_config(struct sdw_cdns *cdns)
+{
+ int ret;
+
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT);
+ if (ret < 0)
+ dev_err(cdns->dev, "Config update timedout\n");
+
+ return ret;
+}
+
+/*
* debugfs
*/
#ifdef CONFIG_DEBUG_FS
@@ -279,11 +292,7 @@ static int cdns_reg_show(struct seq_file *s, void *data)
ret += scnprintf(buf + ret, RD_BUF - ret,
"\nDPn B0 Registers\n");
- /*
- * in sdw_cdns_pdi_init() we filter out the Bulk PDIs,
- * so the indices need to be corrected again
- */
- num_ports = cdns->num_ports + CDNS_PCM_PDI_OFFSET;
+ num_ports = cdns->num_ports;
for (i = 0; i < num_ports; i++) {
ret += scnprintf(buf + ret, RD_BUF - ret,
@@ -324,6 +333,26 @@ static int cdns_reg_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(cdns_reg);
+static int cdns_hw_reset(void *data, u64 value)
+{
+ struct sdw_cdns *cdns = data;
+ int ret;
+
+ if (value != 1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ ret = sdw_cdns_exit_reset(cdns);
+
+ dev_dbg(cdns->dev, "link hw_reset done: %d\n", ret);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cdns_hw_reset_fops, NULL, cdns_hw_reset, "%llu\n");
+
/**
* sdw_cdns_debugfs_init() - Cadence debugfs init
* @cdns: Cadence instance
@@ -332,6 +361,9 @@ DEFINE_SHOW_ATTRIBUTE(cdns_reg);
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root)
{
debugfs_create_file("cdns-registers", 0400, root, cdns, &cdns_reg_fops);
+
+ debugfs_create_file("cdns-hw-reset", 0200, root, cdns,
+ &cdns_hw_reset_fops);
}
EXPORT_SYMBOL_GPL(sdw_cdns_debugfs_init);
@@ -752,14 +784,48 @@ EXPORT_SYMBOL(sdw_cdns_thread);
/*
* init routines
*/
-static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
+
+/**
+ * sdw_cdns_exit_reset() - Program reset parameters and start bus operations
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
{
- u32 mask;
+ /* program maximum length reset to be safe */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_RST_DELAY,
+ CDNS_MCP_CONTROL_RST_DELAY);
+
+ /* use hardware generated reset */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_HW_RST,
+ CDNS_MCP_CONTROL_HW_RST);
+
+ /* enable bus operations with clock and data */
+ cdns_updatel(cdns, CDNS_MCP_CONFIG,
+ CDNS_MCP_CONFIG_OP,
+ CDNS_MCP_CONFIG_OP_NORMAL);
+
+ /* commit changes */
+ return cdns_update_config(cdns);
+}
+EXPORT_SYMBOL(sdw_cdns_exit_reset);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0,
- CDNS_MCP_SLAVE_INTMASK0_MASK);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1,
- CDNS_MCP_SLAVE_INTMASK1_MASK);
+/**
+ * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
+{
+ u32 slave_intmask0 = 0;
+ u32 slave_intmask1 = 0;
+ u32 mask = 0;
+
+ if (!state)
+ goto update_masks;
+
+ slave_intmask0 = CDNS_MCP_SLAVE_INTMASK0_MASK;
+ slave_intmask1 = CDNS_MCP_SLAVE_INTMASK1_MASK;
/* enable detection of all slave state changes */
mask = CDNS_MCP_INT_SLAVE_MASK;
@@ -782,26 +848,13 @@ static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
if (interrupt_mask) /* parameter override */
mask = interrupt_mask;
+update_masks:
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
- return 0;
-}
-
-/**
- * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
- * @cdns: Cadence instance
- */
-int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns)
-{
- int ret;
-
- _cdns_enable_interrupt(cdns);
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
- CDNS_MCP_CONFIG_UPDATE_BIT);
- if (ret < 0)
- dev_err(cdns->dev, "Config update timedout\n");
-
- return ret;
+ /* commit changes */
+ return cdns_update_config(cdns);
}
EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
@@ -821,7 +874,6 @@ static int cdns_allocate_pdi(struct sdw_cdns *cdns,
for (i = 0; i < num; i++) {
pdi[i].num = i + pdi_offset;
- pdi[i].assigned = false;
}
*stream = pdi;
@@ -838,7 +890,8 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config)
{
struct sdw_cdns_streams *stream;
- int offset, i, ret;
+ int offset;
+ int ret;
cdns->pcm.num_bd = config.pcm_bd;
cdns->pcm.num_in = config.pcm_in;
@@ -850,11 +903,8 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
/* Allocate PDIs for PCMs */
stream = &cdns->pcm;
- /* First two PDIs are reserved for bulk transfers */
- if (stream->num_bd < CDNS_PCM_PDI_OFFSET)
- return -EINVAL;
- stream->num_bd -= CDNS_PCM_PDI_OFFSET;
- offset = CDNS_PCM_PDI_OFFSET;
+ /* we allocate PDI0 and PDI1 which are used for Bulk */
+ offset = 0;
ret = cdns_allocate_pdi(cdns, &stream->bd,
stream->num_bd, offset);
@@ -881,7 +931,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
/* Allocate PDIs for PDMs */
stream = &cdns->pdm;
- offset = CDNS_PDM_PDI_OFFSET;
ret = cdns_allocate_pdi(cdns, &stream->bd,
stream->num_bd, offset);
if (ret)
@@ -898,6 +947,9 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
ret = cdns_allocate_pdi(cdns, &stream->out,
stream->num_out, offset);
+
+ offset += stream->num_out;
+
if (ret)
return ret;
@@ -905,18 +957,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out;
cdns->num_ports += stream->num_pdi;
- cdns->ports = devm_kcalloc(cdns->dev, cdns->num_ports,
- sizeof(*cdns->ports), GFP_KERNEL);
- if (!cdns->ports) {
- ret = -ENOMEM;
- return ret;
- }
-
- for (i = 0; i < cdns->num_ports; i++) {
- cdns->ports[i].assigned = false;
- cdns->ports[i].num = i + 1; /* Port 0 reserved for bulk */
- }
-
return 0;
}
EXPORT_SYMBOL(sdw_cdns_pdi_init);
@@ -939,7 +979,7 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
* sdw_cdns_init() - Cadence initialization
* @cdns: Cadence instance
*/
-int sdw_cdns_init(struct sdw_cdns *cdns)
+int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
{
struct sdw_bus *bus = &cdns->bus;
struct sdw_master_prop *prop = &bus->prop;
@@ -947,12 +987,13 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
int divider;
int ret;
- /* Exit clock stop */
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
- CDNS_MCP_CONTROL_CLK_STOP_CLR);
- if (ret < 0) {
- dev_err(cdns->dev, "Couldn't exit from clock stop\n");
- return ret;
+ if (clock_stop_exit) {
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CLK_STOP_CLR);
+ if (ret < 0) {
+ dev_err(cdns->dev, "Couldn't exit from clock stop\n");
+ return ret;
+ }
}
/* Set clock divider */
@@ -975,6 +1016,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL);
cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL);
+ /* flush command FIFOs */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST,
+ CDNS_MCP_CONTROL_CMD_RST);
+
/* Set cmd accept mode */
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
CDNS_MCP_CONTROL_CMD_ACCEPT);
@@ -997,13 +1042,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
/* Set cmd mode for Tx and Rx cmds */
val &= ~CDNS_MCP_CONFIG_CMD;
- /* Set operation to normal */
- val &= ~CDNS_MCP_CONFIG_OP;
- val |= CDNS_MCP_CONFIG_OP_NORMAL;
-
cdns_writel(cdns, CDNS_MCP_CONFIG, val);
- return 0;
+ /* commit changes */
+ return cdns_update_config(cdns);
}
EXPORT_SYMBOL(sdw_cdns_init);
@@ -1185,20 +1227,20 @@ EXPORT_SYMBOL(cdns_set_sdw_stream);
* @num: Number of PDIs
* @pdi: PDI instances
*
- * Find and return a free PDI for a given PDI array
+ * Find a PDI for a given PDI array. The PDI num and dai_id are
+ * expected to match, return NULL otherwise.
*/
static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
+ unsigned int offset,
unsigned int num,
- struct sdw_cdns_pdi *pdi)
+ struct sdw_cdns_pdi *pdi,
+ int dai_id)
{
int i;
- for (i = 0; i < num; i++) {
- if (pdi[i].assigned)
- continue;
- pdi[i].assigned = true;
- return &pdi[i];
- }
+ for (i = offset; i < offset + num; i++)
+ if (pdi[i].num == dai_id)
+ return &pdi[i];
return NULL;
}
@@ -1207,13 +1249,11 @@ static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
* sdw_cdns_config_stream: Configure a stream
*
* @cdns: Cadence instance
- * @port: Cadence data port
* @ch: Channel count
* @dir: Data direction
* @pdi: PDI to be used
*/
void sdw_cdns_config_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_port *port,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi)
{
u32 offset, val = 0;
@@ -1221,113 +1261,51 @@ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
if (dir == SDW_DATA_DIR_RX)
val = CDNS_PORTCTRL_DIRN;
- offset = CDNS_PORTCTRL + port->num * CDNS_PORT_OFFSET;
+ offset = CDNS_PORTCTRL + pdi->num * CDNS_PORT_OFFSET;
cdns_updatel(cdns, offset, CDNS_PORTCTRL_DIRN, val);
- val = port->num;
+ val = pdi->num;
val |= ((1 << ch) - 1) << SDW_REG_SHIFT(CDNS_PDI_CONFIG_CHANNEL);
cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
}
EXPORT_SYMBOL(sdw_cdns_config_stream);
/**
- * cdns_get_num_pdi() - Get number of PDIs required
- *
- * @cdns: Cadence instance
- * @pdi: PDI to be used
- * @num: Number of PDIs
- * @ch_count: Channel count
- */
-static int cdns_get_num_pdi(struct sdw_cdns *cdns,
- struct sdw_cdns_pdi *pdi,
- unsigned int num, u32 ch_count)
-{
- int i, pdis = 0;
-
- for (i = 0; i < num; i++) {
- if (pdi[i].assigned)
- continue;
-
- if (pdi[i].ch_count < ch_count)
- ch_count -= pdi[i].ch_count;
- else
- ch_count = 0;
-
- pdis++;
-
- if (!ch_count)
- break;
- }
-
- if (ch_count)
- return 0;
-
- return pdis;
-}
-
-/**
- * sdw_cdns_get_stream() - Get stream information
- *
- * @cdns: Cadence instance
- * @stream: Stream to be allocated
- * @ch: Channel count
- * @dir: Data direction
- */
-int sdw_cdns_get_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- u32 ch, u32 dir)
-{
- int pdis = 0;
-
- if (dir == SDW_DATA_DIR_RX)
- pdis = cdns_get_num_pdi(cdns, stream->in, stream->num_in, ch);
- else
- pdis = cdns_get_num_pdi(cdns, stream->out, stream->num_out, ch);
-
- /* check if we found PDI, else find in bi-directional */
- if (!pdis)
- pdis = cdns_get_num_pdi(cdns, stream->bd, stream->num_bd, ch);
-
- return pdis;
-}
-EXPORT_SYMBOL(sdw_cdns_get_stream);
-
-/**
- * sdw_cdns_alloc_stream() - Allocate a stream
+ * sdw_cdns_alloc_pdi() - Allocate a PDI
*
* @cdns: Cadence instance
* @stream: Stream to be allocated
- * @port: Cadence data port
* @ch: Channel count
* @dir: Data direction
*/
-int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir)
+struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ struct sdw_cdns_streams *stream,
+ u32 ch, u32 dir, int dai_id)
{
struct sdw_cdns_pdi *pdi = NULL;
if (dir == SDW_DATA_DIR_RX)
- pdi = cdns_find_pdi(cdns, stream->num_in, stream->in);
+ pdi = cdns_find_pdi(cdns, 0, stream->num_in, stream->in,
+ dai_id);
else
- pdi = cdns_find_pdi(cdns, stream->num_out, stream->out);
+ pdi = cdns_find_pdi(cdns, 0, stream->num_out, stream->out,
+ dai_id);
/* check if we found a PDI, else find in bi-directional */
if (!pdi)
- pdi = cdns_find_pdi(cdns, stream->num_bd, stream->bd);
-
- if (!pdi)
- return -EIO;
-
- port->pdi = pdi;
- pdi->l_ch_num = 0;
- pdi->h_ch_num = ch - 1;
- pdi->dir = dir;
- pdi->ch_count = ch;
+ pdi = cdns_find_pdi(cdns, 2, stream->num_bd, stream->bd,
+ dai_id);
+
+ if (pdi) {
+ pdi->l_ch_num = 0;
+ pdi->h_ch_num = ch - 1;
+ pdi->dir = dir;
+ pdi->ch_count = ch;
+ }
- return 0;
+ return pdi;
}
-EXPORT_SYMBOL(sdw_cdns_alloc_stream);
+EXPORT_SYMBOL(sdw_cdns_alloc_pdi);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Cadence Soundwire Library");
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 0b72b7094735..001457cbe5ad 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -8,7 +8,6 @@
/**
* struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
*
- * @assigned: pdi assigned
* @num: pdi number
* @intel_alh_id: link identifier
* @l_ch_num: low channel for PDI
@@ -18,7 +17,6 @@
* @type: stream type, PDM or PCM
*/
struct sdw_cdns_pdi {
- bool assigned;
int num;
int intel_alh_id;
int l_ch_num;
@@ -29,23 +27,6 @@ struct sdw_cdns_pdi {
};
/**
- * struct sdw_cdns_port: Cadence port structure
- *
- * @num: port number
- * @assigned: port assigned
- * @ch: channel count
- * @direction: data port direction
- * @pdi: pdi for this port
- */
-struct sdw_cdns_port {
- unsigned int num;
- bool assigned;
- unsigned int ch;
- enum sdw_data_direction direction;
- struct sdw_cdns_pdi *pdi;
-};
-
-/**
* struct sdw_cdns_streams: Cadence stream data structure
*
* @num_bd: number of bidirectional streams
@@ -95,8 +76,8 @@ struct sdw_cdns_stream_config {
* struct sdw_cdns_dma_data: Cadence DMA data
*
* @name: SoundWire stream name
- * @nr_ports: Number of ports
- * @port: Ports
+ * @stream: stream runtime
+ * @pdi: PDI used for this dai
* @bus: Bus handle
* @stream_type: Stream type
* @link_id: Master link id
@@ -104,8 +85,7 @@ struct sdw_cdns_stream_config {
struct sdw_cdns_dma_data {
char *name;
struct sdw_stream_runtime *stream;
- int nr_ports;
- struct sdw_cdns_port **port;
+ struct sdw_cdns_pdi *pdi;
struct sdw_bus *bus;
enum sdw_stream_type stream_type;
int link_id;
@@ -158,10 +138,11 @@ extern struct sdw_master_ops sdw_cdns_master_ops;
irqreturn_t sdw_cdns_irq(int irq, void *dev_id);
irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
-int sdw_cdns_init(struct sdw_cdns *cdns);
+int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit);
int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config);
-int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns);
+int sdw_cdns_exit_reset(struct sdw_cdns *cdns);
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state);
#ifdef CONFIG_DEBUG_FS
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
@@ -170,10 +151,10 @@ void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
int sdw_cdns_get_stream(struct sdw_cdns *cdns,
struct sdw_cdns_streams *stream,
u32 ch, u32 dir);
-int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir);
-void sdw_cdns_config_stream(struct sdw_cdns *cdns, struct sdw_cdns_port *port,
+struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ struct sdw_cdns_streams *stream,
+ u32 ch, u32 dir, int dai_id);
+void sdw_cdns_config_stream(struct sdw_cdns *cdns,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 13c54eac0cc3..99dc61021211 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -479,7 +480,10 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
unsigned int link_id = sdw->instance;
int pdi_conf = 0;
- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
+ /* the Bulk and PCM streams are not contiguous */
+ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
+ if (pdi->num >= 2)
+ pdi->intel_alh_id += 2;
/*
* Program stream parameters to stream SHIM register
@@ -508,7 +512,10 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
unsigned int link_id = sdw->instance;
unsigned int conf;
- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
+ /* the Bulk and PCM streams are not contiguous */
+ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
+ if (pdi->num >= 2)
+ pdi->intel_alh_id += 2;
/* Program Stream config ALH register */
conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
@@ -603,66 +610,6 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
* DAI routines
*/
-static struct sdw_cdns_port *intel_alloc_port(struct sdw_intel *sdw,
- u32 ch, u32 dir, bool pcm)
-{
- struct sdw_cdns *cdns = &sdw->cdns;
- struct sdw_cdns_port *port = NULL;
- int i, ret = 0;
-
- for (i = 0; i < cdns->num_ports; i++) {
- if (cdns->ports[i].assigned)
- continue;
-
- port = &cdns->ports[i];
- port->assigned = true;
- port->direction = dir;
- port->ch = ch;
- break;
- }
-
- if (!port) {
- dev_err(cdns->dev, "Unable to find a free port\n");
- return NULL;
- }
-
- if (pcm) {
- ret = sdw_cdns_alloc_stream(cdns, &cdns->pcm, port, ch, dir);
- if (ret)
- goto out;
-
- intel_pdi_shim_configure(sdw, port->pdi);
- sdw_cdns_config_stream(cdns, port, ch, dir, port->pdi);
-
- intel_pdi_alh_configure(sdw, port->pdi);
-
- } else {
- ret = sdw_cdns_alloc_stream(cdns, &cdns->pdm, port, ch, dir);
- }
-
-out:
- if (ret) {
- port->assigned = false;
- port = NULL;
- }
-
- return port;
-}
-
-static void intel_port_cleanup(struct sdw_cdns_dma_data *dma)
-{
- int i;
-
- for (i = 0; i < dma->nr_ports; i++) {
- if (dma->port[i]) {
- dma->port[i]->pdi->assigned = false;
- dma->port[i]->pdi = NULL;
- dma->port[i]->assigned = false;
- dma->port[i] = NULL;
- }
- }
-}
-
static int intel_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -670,9 +617,11 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_pdi *pdi;
struct sdw_stream_config sconfig;
struct sdw_port_config *pconfig;
- int ret, i, ch, dir;
+ int ch, dir;
+ int ret;
bool pcm = true;
dma = snd_soc_dai_get_dma_data(dai, substream);
@@ -685,38 +634,30 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
else
dir = SDW_DATA_DIR_TX;
- if (dma->stream_type == SDW_STREAM_PDM) {
- /* TODO: Check whether PDM decimator is already in use */
- dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pdm, ch, dir);
+ if (dma->stream_type == SDW_STREAM_PDM)
pcm = false;
- } else {
- dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pcm, ch, dir);
- }
- if (!dma->nr_ports) {
- dev_err(dai->dev, "ports/resources not available\n");
- return -EINVAL;
+ if (pcm)
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
+ else
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
+
+ if (!pdi) {
+ ret = -EINVAL;
+ goto error;
}
- dma->port = kcalloc(dma->nr_ports, sizeof(*dma->port), GFP_KERNEL);
- if (!dma->port)
- return -ENOMEM;
+ /* do run-time configurations for SHIM, ALH and PDI/PORT */
+ intel_pdi_shim_configure(sdw, pdi);
+ intel_pdi_alh_configure(sdw, pdi);
+ sdw_cdns_config_stream(cdns, ch, dir, pdi);
- for (i = 0; i < dma->nr_ports; i++) {
- dma->port[i] = intel_alloc_port(sdw, ch, dir, pcm);
- if (!dma->port[i]) {
- ret = -EINVAL;
- goto port_error;
- }
- }
/* Inform DSP about PDI stream number */
- for (i = 0; i < dma->nr_ports; i++) {
- ret = intel_config_stream(sdw, substream, dai, params,
- dma->port[i]->pdi->intel_alh_id);
- if (ret)
- goto port_error;
- }
+ ret = intel_config_stream(sdw, substream, dai, params,
+ pdi->intel_alh_id);
+ if (ret)
+ goto error;
sconfig.direction = dir;
sconfig.ch_count = ch;
@@ -731,32 +672,22 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
}
/* Port configuration */
- pconfig = kcalloc(dma->nr_ports, sizeof(*pconfig), GFP_KERNEL);
+ pconfig = kcalloc(1, sizeof(*pconfig), GFP_KERNEL);
if (!pconfig) {
ret = -ENOMEM;
- goto port_error;
+ goto error;
}
- for (i = 0; i < dma->nr_ports; i++) {
- pconfig[i].num = dma->port[i]->num;
- pconfig[i].ch_mask = (1 << ch) - 1;
- }
+ pconfig->num = pdi->num;
+ pconfig->ch_mask = (1 << ch) - 1;
ret = sdw_stream_add_master(&cdns->bus, &sconfig,
- pconfig, dma->nr_ports, dma->stream);
- if (ret) {
+ pconfig, 1, dma->stream);
+ if (ret)
dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
- goto stream_error;
- }
kfree(pconfig);
- return ret;
-
-stream_error:
- kfree(pconfig);
-port_error:
- intel_port_cleanup(dma);
- kfree(dma->port);
+error:
return ret;
}
@@ -776,8 +707,6 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
dev_err(dai->dev, "remove master from stream %s failed: %d\n",
dma->stream->name, ret);
- intel_port_cleanup(dma);
- kfree(dma->port);
return ret;
}
@@ -842,14 +771,6 @@ static int intel_create_dai(struct sdw_cdns *cdns,
return -ENOMEM;
if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
- dais[i].playback.stream_name =
- kasprintf(GFP_KERNEL, "SDW%d Tx%d",
- cdns->instance, i);
- if (!dais[i].playback.stream_name) {
- kfree(dais[i].name);
- return -ENOMEM;
- }
-
dais[i].playback.channels_min = 1;
dais[i].playback.channels_max = max_ch;
dais[i].playback.rates = SNDRV_PCM_RATE_48000;
@@ -857,23 +778,12 @@ static int intel_create_dai(struct sdw_cdns *cdns,
}
if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
- dais[i].capture.stream_name =
- kasprintf(GFP_KERNEL, "SDW%d Rx%d",
- cdns->instance, i);
- if (!dais[i].capture.stream_name) {
- kfree(dais[i].name);
- kfree(dais[i].playback.stream_name);
- return -ENOMEM;
- }
-
dais[i].capture.channels_min = 1;
dais[i].capture.channels_max = max_ch;
dais[i].capture.rates = SNDRV_PCM_RATE_48000;
dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
}
- dais[i].id = SDW_DAI_ID_RANGE_START + i;
-
if (pcm)
dais[i].ops = &intel_pcm_dai_ops;
else
@@ -993,6 +903,15 @@ static struct sdw_master_ops sdw_intel_ops = {
.post_bank_switch = intel_post_bank_switch,
};
+static int intel_init(struct sdw_intel *sdw)
+{
+ /* Initialize shim and controller */
+ intel_link_power_up(sdw);
+ intel_shim_init(sdw);
+
+ return sdw_cdns_init(&sdw->cdns, false);
+}
+
/*
* probe and init
*/
@@ -1026,7 +945,7 @@ static int intel_probe(struct platform_device *pdev)
ret = sdw_add_bus_master(&sdw->cdns.bus);
if (ret) {
dev_err(&pdev->dev, "sdw_add_bus_master fail: %d\n", ret);
- goto err_master_reg;
+ return ret;
}
if (sdw->cdns.bus.prop.hw_disabled) {
@@ -1035,16 +954,11 @@ static int intel_probe(struct platform_device *pdev)
return 0;
}
- /* Initialize shim and controller */
- intel_link_power_up(sdw);
- intel_shim_init(sdw);
-
- ret = sdw_cdns_init(&sdw->cdns);
+ /* Initialize shim, controller and Cadence IP */
+ ret = intel_init(sdw);
if (ret)
goto err_init;
- ret = sdw_cdns_enable_interrupt(&sdw->cdns);
-
/* Read the PDI config and initialize cadence PDI */
intel_pdi_init(sdw, &config);
ret = sdw_cdns_pdi_init(&sdw->cdns, config);
@@ -1062,23 +976,35 @@ static int intel_probe(struct platform_device *pdev)
goto err_init;
}
+ ret = sdw_cdns_enable_interrupt(&sdw->cdns, true);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "cannot enable interrupts\n");
+ goto err_init;
+ }
+
+ ret = sdw_cdns_exit_reset(&sdw->cdns);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "unable to exit bus reset sequence\n");
+ goto err_interrupt;
+ }
+
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
dev_err(sdw->cdns.dev, "DAI registration failed: %d\n", ret);
snd_soc_unregister_component(sdw->cdns.dev);
- goto err_dai;
+ goto err_interrupt;
}
intel_debugfs_init(sdw);
return 0;
-err_dai:
+err_interrupt:
+ sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->res->irq, sdw);
err_init:
sdw_delete_bus_master(&sdw->cdns.bus);
-err_master_reg:
return ret;
}
@@ -1090,6 +1016,7 @@ static int intel_remove(struct platform_device *pdev)
if (!sdw->cdns.bus.prop.hw_disabled) {
intel_debugfs_exit(sdw);
+ sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->res->irq, sdw);
snd_soc_unregister_component(sdw->cdns.dev);
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index b74c2f144962..2a2b4d8df462 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/soundwire/sdw_intel.h>
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 6473fa602f82..19919975bb6d 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -29,10 +29,17 @@ static int sdw_slave_add(struct sdw_bus *bus,
slave->dev.parent = bus->dev;
slave->dev.fwnode = fwnode;
- /* name shall be sdw:link:mfg:part:class:unique */
- dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
- bus->link_id, id->mfg_id, id->part_id,
- id->class_id, id->unique_id);
+ if (id->unique_id == SDW_IGNORED_UNIQUE_ID) {
+ /* name shall be sdw:link:mfg:part:class */
+ dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x",
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id);
+ } else {
+ /* name shall be sdw:link:mfg:part:class:unique */
+ dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id, id->unique_id);
+ }
slave->dev.release = sdw_slave_release;
slave->dev.bus = &sdw_bus_type;
@@ -64,6 +71,36 @@ static int sdw_slave_add(struct sdw_bus *bus,
}
#if IS_ENABLED(CONFIG_ACPI)
+
+static bool find_slave(struct sdw_bus *bus,
+ struct acpi_device *adev,
+ struct sdw_slave_id *id)
+{
+ unsigned long long addr;
+ unsigned int link_id;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle,
+ METHOD_NAME__ADR, NULL, &addr);
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(bus->dev, "_ADR resolution failed: %x\n",
+ status);
+ return false;
+ }
+
+ /* Extract link id from ADR, Bit 51 to 48 (included) */
+ link_id = (addr >> 48) & GENMASK(3, 0);
+
+ /* Check for link_id match */
+ if (link_id != bus->link_id)
+ return false;
+
+ sdw_extract_slave_id(bus, addr, id);
+
+ return true;
+}
+
/*
* sdw_acpi_find_slaves() - Find Slave devices in Master ACPI node
* @bus: SDW bus instance
@@ -73,6 +110,7 @@ static int sdw_slave_add(struct sdw_bus *bus,
int sdw_acpi_find_slaves(struct sdw_bus *bus)
{
struct acpi_device *adev, *parent;
+ struct acpi_device *adev2, *parent2;
parent = ACPI_COMPANION(bus->dev);
if (!parent) {
@@ -81,28 +119,46 @@ int sdw_acpi_find_slaves(struct sdw_bus *bus)
}
list_for_each_entry(adev, &parent->children, node) {
- unsigned long long addr;
struct sdw_slave_id id;
- unsigned int link_id;
- acpi_status status;
+ struct sdw_slave_id id2;
+ bool ignore_unique_id = true;
- status = acpi_evaluate_integer(adev->handle,
- METHOD_NAME__ADR, NULL, &addr);
+ if (!find_slave(bus, adev, &id))
+ continue;
- if (ACPI_FAILURE(status)) {
- dev_err(bus->dev, "_ADR resolution failed: %x\n",
- status);
- return status;
+ /* brute-force O(N^2) search for duplicates */
+ parent2 = parent;
+ list_for_each_entry(adev2, &parent2->children, node) {
+
+ if (adev == adev2)
+ continue;
+
+ if (!find_slave(bus, adev2, &id2))
+ continue;
+
+ if (id.sdw_version != id2.sdw_version ||
+ id.mfg_id != id2.mfg_id ||
+ id.part_id != id2.part_id ||
+ id.class_id != id2.class_id)
+ continue;
+
+ if (id.unique_id != id2.unique_id) {
+ dev_dbg(bus->dev,
+ "Valid unique IDs %x %x for Slave mfg %x part %d\n",
+ id.unique_id, id2.unique_id,
+ id.mfg_id, id.part_id);
+ ignore_unique_id = false;
+ } else {
+ dev_err(bus->dev,
+ "Invalid unique IDs %x %x for Slave mfg %x part %d\n",
+ id.unique_id, id2.unique_id,
+ id.mfg_id, id.part_id);
+ return -ENODEV;
+ }
}
- /* Extract link id from ADR, Bit 51 to 48 (included) */
- link_id = (addr >> 48) & GENMASK(3, 0);
-
- /* Check for link_id match */
- if (link_id != bus->link_id)
- continue;
-
- sdw_extract_slave_id(bus, addr, &id);
+ if (ignore_unique_id)
+ id.unique_id = SDW_IGNORED_UNIQUE_ID;
/*
* don't error check for sdw_slave_add as we want to continue
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 333308fe807e..eaf753b70ec5 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -127,4 +127,6 @@ source "drivers/staging/qlge/Kconfig"
source "drivers/staging/hp/Kconfig"
+source "drivers/staging/wfx/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index e4943cd63e98..0a4396c9067b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -54,3 +54,4 @@ obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NET_VENDOR_HP) += hp/
+obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index e6b1ca141b93..c394686a8e7d 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -533,9 +533,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static const struct file_operations ion_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = ion_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ion_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
static int debug_shrink_set(void *data, u64 val)
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 805437fa249a..39e6c59df1e9 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -125,7 +125,6 @@ MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out;
struct axis_fifo {
int irq; /* interrupt */
- struct resource *mem; /* physical memory */
void __iomem *base_addr; /* kernel space memory */
unsigned int rx_fifo_depth; /* max words in the receive fifo */
@@ -701,6 +700,68 @@ static int get_dts_property(struct axis_fifo *fifo,
return 0;
}
+static int axis_fifo_parse_dt(struct axis_fifo *fifo)
+{
+ int ret;
+ unsigned int value;
+
+ ret = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width", &value);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n");
+ goto end;
+ } else if (value != 32) {
+ dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width", &value);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n");
+ goto end;
+ } else if (value != 32) {
+ dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,rx-fifo-depth",
+ &fifo->rx_fifo_depth);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,tx-fifo-depth",
+ &fifo->tx_fifo_depth);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ /* IP sets TDFV to fifo depth - 4 so we will do the same */
+ fifo->tx_fifo_depth -= 4;
+
+ ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,use-tx-data", &fifo->has_tx_fifo);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
static int axis_fifo_probe(struct platform_device *pdev)
{
struct resource *r_irq; /* interrupt resources */
@@ -712,34 +773,6 @@ static int axis_fifo_probe(struct platform_device *pdev)
int rc = 0; /* error return value */
- /* IP properties from device tree */
- unsigned int rxd_tdata_width;
- unsigned int txc_tdata_width;
- unsigned int txd_tdata_width;
- unsigned int tdest_width;
- unsigned int tid_width;
- unsigned int tuser_width;
- unsigned int data_interface_type;
- unsigned int has_tdest;
- unsigned int has_tid;
- unsigned int has_tkeep;
- unsigned int has_tstrb;
- unsigned int has_tuser;
- unsigned int rx_fifo_depth;
- unsigned int rx_programmable_empty_threshold;
- unsigned int rx_programmable_full_threshold;
- unsigned int axi_id_width;
- unsigned int axi4_data_width;
- unsigned int select_xpm;
- unsigned int tx_fifo_depth;
- unsigned int tx_programmable_empty_threshold;
- unsigned int tx_programmable_full_threshold;
- unsigned int use_rx_cut_through;
- unsigned int use_rx_data;
- unsigned int use_tx_control;
- unsigned int use_tx_cut_through;
- unsigned int use_tx_data;
-
/* ----------------------------
* init wrapper device
* ----------------------------
@@ -772,32 +805,19 @@ static int axis_fifo_probe(struct platform_device *pdev)
goto err_initial;
}
- fifo->mem = r_mem;
-
/* request physical memory */
- if (!request_mem_region(fifo->mem->start, resource_size(fifo->mem),
- DRIVER_NAME)) {
- dev_err(fifo->dt_device,
- "couldn't lock memory region at 0x%pa\n",
- &fifo->mem->start);
- rc = -EBUSY;
+ fifo->base_addr = devm_ioremap_resource(fifo->dt_device, r_mem);
+ if (IS_ERR(fifo->base_addr)) {
+ rc = PTR_ERR(fifo->base_addr);
+ dev_err(fifo->dt_device, "can't remap IO resource (%d)\n", rc);
goto err_initial;
}
- dev_dbg(fifo->dt_device, "got memory location [0x%pa - 0x%pa]\n",
- &fifo->mem->start, &fifo->mem->end);
-
- /* map physical memory to kernel virtual address space */
- fifo->base_addr = ioremap(fifo->mem->start, resource_size(fifo->mem));
- if (!fifo->base_addr) {
- dev_err(fifo->dt_device, "couldn't map physical memory\n");
- rc = -ENOMEM;
- goto err_mem;
- }
+
dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr);
/* create unique device name */
snprintf(device_name, sizeof(device_name), "%s_%pa",
- DRIVER_NAME, &fifo->mem->start);
+ DRIVER_NAME, &r_mem->start);
dev_dbg(fifo->dt_device, "device name [%s]\n", device_name);
@@ -806,164 +826,9 @@ static int axis_fifo_probe(struct platform_device *pdev)
* ----------------------------
*/
- /* retrieve device tree properties */
- rc = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width",
- &rxd_tdata_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axi-str-txc-tdata-width",
- &txc_tdata_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width",
- &txd_tdata_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axis-tdest-width", &tdest_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axis-tid-width", &tid_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axis-tuser-width", &tuser_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,data-interface-type",
- &data_interface_type);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tdest", &has_tdest);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tid", &has_tid);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tkeep", &has_tkeep);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tstrb", &has_tstrb);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tuser", &has_tuser);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,rx-fifo-depth", &rx_fifo_depth);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,rx-fifo-pe-threshold",
- &rx_programmable_empty_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,rx-fifo-pf-threshold",
- &rx_programmable_full_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,s-axi-id-width", &axi_id_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,s-axi4-data-width", &axi4_data_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,select-xpm", &select_xpm);
+ rc = axis_fifo_parse_dt(fifo);
if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,tx-fifo-depth", &tx_fifo_depth);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,tx-fifo-pe-threshold",
- &tx_programmable_empty_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,tx-fifo-pf-threshold",
- &tx_programmable_full_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-rx-cut-through",
- &use_rx_cut_through);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-rx-data", &use_rx_data);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-tx-ctrl", &use_tx_control);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-tx-cut-through",
- &use_tx_cut_through);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-tx-data", &use_tx_data);
- if (rc)
- goto err_unmap;
-
- /* check validity of device tree properties */
- if (rxd_tdata_width != 32) {
- dev_err(fifo->dt_device,
- "rxd_tdata_width width [%u] unsupported\n",
- rxd_tdata_width);
- rc = -EIO;
- goto err_unmap;
- }
- if (txd_tdata_width != 32) {
- dev_err(fifo->dt_device,
- "txd_tdata_width width [%u] unsupported\n",
- txd_tdata_width);
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tdest) {
- dev_err(fifo->dt_device, "tdest not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tid) {
- dev_err(fifo->dt_device, "tid not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tkeep) {
- dev_err(fifo->dt_device, "tkeep not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tstrb) {
- dev_err(fifo->dt_device, "tstrb not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tuser) {
- dev_err(fifo->dt_device, "tuser not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (use_rx_cut_through) {
- dev_err(fifo->dt_device, "rx cut-through not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (use_tx_cut_through) {
- dev_err(fifo->dt_device, "tx cut-through not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (use_tx_control) {
- dev_err(fifo->dt_device, "tx control not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
-
- /* TODO
- * these exist in the device tree but it's unclear what they do
- * - select-xpm
- * - data-interface-type
- */
-
- /* set device wrapper properties based on IP config */
- fifo->rx_fifo_depth = rx_fifo_depth;
- /* IP sets TDFV to fifo depth - 4 so we will do the same */
- fifo->tx_fifo_depth = tx_fifo_depth - 4;
- fifo->has_rx_fifo = use_rx_data;
- fifo->has_tx_fifo = use_tx_data;
+ goto err_initial;
reset_ip_core(fifo);
@@ -976,18 +841,19 @@ static int axis_fifo_probe(struct platform_device *pdev)
r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!r_irq) {
dev_err(fifo->dt_device, "no IRQ found for 0x%pa\n",
- &fifo->mem->start);
+ &r_mem->start);
rc = -EIO;
- goto err_unmap;
+ goto err_initial;
}
/* request IRQ */
fifo->irq = r_irq->start;
- rc = request_irq(fifo->irq, &axis_fifo_irq, 0, DRIVER_NAME, fifo);
+ rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0,
+ DRIVER_NAME, fifo);
if (rc) {
dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n",
fifo->irq);
- goto err_unmap;
+ goto err_initial;
}
/* ----------------------------
@@ -998,7 +864,7 @@ static int axis_fifo_probe(struct platform_device *pdev)
/* allocate device number */
rc = alloc_chrdev_region(&fifo->devt, 0, 1, DRIVER_NAME);
if (rc < 0)
- goto err_irq;
+ goto err_initial;
dev_dbg(fifo->dt_device, "allocated device number major %i minor %i\n",
MAJOR(fifo->devt), MINOR(fifo->devt));
@@ -1022,14 +888,14 @@ static int axis_fifo_probe(struct platform_device *pdev)
}
/* create sysfs entries */
- rc = sysfs_create_group(&fifo->device->kobj, &axis_fifo_attrs_group);
+ rc = devm_device_add_group(fifo->device, &axis_fifo_attrs_group);
if (rc < 0) {
dev_err(fifo->dt_device, "couldn't register sysfs group\n");
goto err_cdev;
}
dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i, major=%i, minor=%i\n",
- &fifo->mem->start, &fifo->base_addr, fifo->irq,
+ &r_mem->start, &fifo->base_addr, fifo->irq,
MAJOR(fifo->devt), MINOR(fifo->devt));
return 0;
@@ -1040,12 +906,6 @@ err_dev:
device_destroy(axis_fifo_driver_class, fifo->devt);
err_chrdev_region:
unregister_chrdev_region(fifo->devt, 1);
-err_irq:
- free_irq(fifo->irq, fifo);
-err_unmap:
- iounmap(fifo->base_addr);
-err_mem:
- release_mem_region(fifo->mem->start, resource_size(fifo->mem));
err_initial:
dev_set_drvdata(dev, NULL);
return rc;
@@ -1056,15 +916,12 @@ static int axis_fifo_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct axis_fifo *fifo = dev_get_drvdata(dev);
- sysfs_remove_group(&fifo->device->kobj, &axis_fifo_attrs_group);
cdev_del(&fifo->char_device);
dev_set_drvdata(fifo->device, NULL);
device_destroy(axis_fifo_driver_class, fifo->devt);
unregister_chrdev_region(fifo->devt, 1);
- free_irq(fifo->irq, fifo);
- iounmap(fifo->base_addr);
- release_mem_region(fifo->mem->start, resource_size(fifo->mem));
dev_set_drvdata(dev, NULL);
+
return 0;
}
diff --git a/drivers/staging/axis-fifo/axis-fifo.txt b/drivers/staging/axis-fifo/axis-fifo.txt
index 85d88c010e72..5828e1b8e822 100644
--- a/drivers/staging/axis-fifo/axis-fifo.txt
+++ b/drivers/staging/axis-fifo/axis-fifo.txt
@@ -25,10 +25,10 @@ Required properties:
- xlnx,axi-str-txc-tdata-width: Should be <0x20>
- xlnx,axi-str-txd-protocol: Should be "XIL_AXI_STREAM_ETH_DATA"
- xlnx,axi-str-txd-tdata-width: Should be <0x20>
-- xlnx,axis-tdest-width: AXI-Stream TDEST width
-- xlnx,axis-tid-width: AXI-Stream TID width
-- xlnx,axis-tuser-width: AXI-Stream TUSER width
-- xlnx,data-interface-type: Should be <0x0>
+- xlnx,axis-tdest-width: AXI-Stream TDEST width (ignored by the driver)
+- xlnx,axis-tid-width: AXI-Stream TID width (ignored by the driver)
+- xlnx,axis-tuser-width: AXI-Stream TUSER width (ignored by the driver)
+- xlnx,data-interface-type: Should be <0x0> (ignored by the driver)
- xlnx,has-axis-tdest: Should be <0x0> (this feature isn't supported)
- xlnx,has-axis-tid: Should be <0x0> (this feature isn't supported)
- xlnx,has-axis-tkeep: Should be <0x0> (this feature isn't supported)
@@ -36,13 +36,17 @@ Required properties:
- xlnx,has-axis-tuser: Should be <0x0> (this feature isn't supported)
- xlnx,rx-fifo-depth: Depth of RX FIFO in words
- xlnx,rx-fifo-pe-threshold: RX programmable empty interrupt threshold
+ (ignored by the driver)
- xlnx,rx-fifo-pf-threshold: RX programmable full interrupt threshold
-- xlnx,s-axi-id-width: Should be <0x4>
-- xlnx,s-axi4-data-width: Should be <0x20>
-- xlnx,select-xpm: Should be <0x0>
+ (ignored by the driver)
+- xlnx,s-axi-id-width: Should be <0x4> (ignored by the driver)
+- xlnx,s-axi4-data-width: Should be <0x20> (ignored by the driver)
+- xlnx,select-xpm: Should be <0x0> (ignored by the driver)
- xlnx,tx-fifo-depth: Depth of TX FIFO in words
- xlnx,tx-fifo-pe-threshold: TX programmable empty interrupt threshold
+ (ignored by the driver)
- xlnx,tx-fifo-pf-threshold: TX programmable full interrupt threshold
+ (ignored by the driver)
- xlnx,use-rx-cut-through: Should be <0x0> (this feature isn't supported)
- xlnx,use-rx-data: <0x1> if RX FIFO is enabled, <0x0> otherwise
- xlnx,use-tx-ctrl: Should be <0x0> (this feature isn't supported)
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 962cc0c79988..0225234dd7aa 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -50,16 +50,8 @@ static struct sh_mobile_lcdc_info lcdc0_info = {
};
static struct resource lcdc0_resources[] = {
- [0] = {
- .name = "LCD0",
- .start = 0xfe940000,
- .end = 0xfe943fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = 177 + 32,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_MEM_NAMED(0xfe940000, 0x4000, "LCD0"),
+ DEFINE_RES_IRQ(177 + 32),
};
static struct platform_device lcdc0_device = {
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 15b7a82f4b1e..e52a64be93f3 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -135,7 +135,6 @@ static int clk_wzrd_probe(struct platform_device *pdev)
unsigned long rate;
const char *clk_name;
struct clk_wzrd *clk_wzrd;
- struct resource *mem;
struct device_node *np = pdev->dev.of_node;
clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
@@ -143,8 +142,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, clk_wzrd);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- clk_wzrd->base = devm_ioremap_resource(&pdev->dev, mem);
+ clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(clk_wzrd->base))
return PTR_ERR(clk_wzrd->base);
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index caf4d4df4bd3..f7c365b70106 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -508,12 +508,11 @@ static int dt3k_ai_insn_read(struct comedi_device *dev,
unsigned int *data)
{
int i;
- unsigned int chan, gain, aref;
+ unsigned int chan, gain;
chan = CR_CHAN(insn->chanspec);
gain = CR_RANGE(insn->chanspec);
/* XXX docs don't explain how to select aref */
- aref = CR_AREF(insn->chanspec);
for (i = 0; i < insn->n; i++)
data[i] = dt3k_readsingle(dev, DPR_SUBSYS_AI, chan, gain);
diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c
index eb61494dc2bd..673d732dcb8f 100644
--- a/drivers/staging/comedi/drivers/ni_routes.c
+++ b/drivers/staging/comedi/drivers/ni_routes.c
@@ -49,8 +49,6 @@
/* Helper for accessing data. */
#define RVi(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)])
-static const size_t route_table_size = NI_NUM_NAMES * NI_NUM_NAMES;
-
/*
* Find the proper route_values and ni_device_routes tables for this particular
* device.
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 04bc488385e6..4af012968cb6 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
+ * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk
*/
/*
@@ -8,7 +8,7 @@
* Description: University of Stirling USB DAQ & INCITE Technology Limited
* Devices: [ITL] USB-DUX-FAST (usbduxfast)
* Author: Bernd Porr <mail@berndporr.me.uk>
- * Updated: 10 Oct 2014
+ * Updated: 16 Nov 2019
* Status: stable
*/
@@ -22,6 +22,7 @@
*
*
* Revision history:
+ * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest
* 0.9: Dropping the first data packet which seems to be from the last transfer.
* Buffer overflows in the FX2 are handed over to comedi.
* 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
@@ -350,6 +351,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
struct comedi_cmd *cmd)
{
int err = 0;
+ int err2 = 0;
unsigned int steps;
unsigned int arg;
@@ -399,11 +401,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
*/
steps = (cmd->convert_arg * 30) / 1000;
if (cmd->chanlist_len != 1)
- err |= comedi_check_trigger_arg_min(&steps,
- MIN_SAMPLING_PERIOD);
- err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
- arg = (steps * 1000) / 30;
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ err2 |= comedi_check_trigger_arg_min(&steps,
+ MIN_SAMPLING_PERIOD);
+ else
+ err2 |= comedi_check_trigger_arg_min(&steps, 1);
+ err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
+ if (err2) {
+ err |= err2;
+ arg = (steps * 1000) / 30;
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ }
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 147481bf680c..03929b9d3a8b 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -1072,9 +1072,8 @@ static int _nbu2ss_epn_in_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
if (i_word_length > 0) {
for (i = 0; i < i_word_length; i++) {
_nbu2ss_writel(
- &preg->EP_REGS[ep->epnum - 1].EP_WRITE
- , p_buf_32->dw
- );
+ &preg->EP_REGS[ep->epnum - 1].EP_WRITE,
+ p_buf_32->dw);
p_buf_32++;
}
@@ -2661,20 +2660,18 @@ static int nbu2ss_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
- if (&req->req == _req)
- break;
- }
- if (&req->req != _req) {
- spin_unlock_irqrestore(&udc->lock, flags);
- pr_debug("%s no queue(EINVAL)\n", __func__);
- return -EINVAL;
+ if (&req->req == _req) {
+ _nbu2ss_ep_done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+ }
}
- _nbu2ss_ep_done(ep, req, -ECONNRESET);
-
spin_unlock_irqrestore(&udc->lock, flags);
- return 0;
+ pr_debug("%s no queue(EINVAL)\n", __func__);
+
+ return -EINVAL;
}
/*-------------------------------------------------------------------------*/
@@ -3078,7 +3075,6 @@ static int nbu2ss_drv_probe(struct platform_device *pdev)
{
int status = -ENODEV;
struct nbu2ss_udc *udc;
- struct resource *r;
int irq;
void __iomem *mmio_base;
@@ -3088,8 +3084,7 @@ static int nbu2ss_drv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, udc);
/* require I/O memory and IRQ to be provided as resources */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio_base))
return PTR_ERR(mmio_base);
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
index ce32dfe33bec..0130019cbec2 100644
--- a/drivers/staging/exfat/Kconfig
+++ b/drivers/staging/exfat/Kconfig
@@ -6,15 +6,6 @@ config EXFAT_FS
help
This adds support for the exFAT file system.
-config EXFAT_DONT_MOUNT_VFAT
- bool "Prohibit mounting of fat/vfat filesystems by exFAT"
- depends on EXFAT_FS
- default y
- help
- By default, the exFAT driver will only mount exFAT filesystems, and refuse
- to mount fat/vfat filesystems. Set this to 'n' to allow the exFAT driver
- to mount these filesystems.
-
config EXFAT_DISCARD
bool "enable discard support"
depends on EXFAT_FS
diff --git a/drivers/staging/exfat/TODO b/drivers/staging/exfat/TODO
index a3eb282f9efc..a283ce534cf4 100644
--- a/drivers/staging/exfat/TODO
+++ b/drivers/staging/exfat/TODO
@@ -1,8 +1,22 @@
+A laundry list of things that need looking at, most of which will
+require more work than the average checkpatch cleanup...
+
+Note that some of these entries may not be bugs - they're things
+that need to be looked at, and *possibly* fixed.
+
+Clean up the ffsCamelCase function names.
+
+Fix (thing)->flags to not use magic numbers - multiple offenders
+
+Sort out all the s32/u32/u8 nonsense - most of these should be plain int.
+
exfat_core.c - ffsReadFile - the goto err_out seem to leak a brelse().
same for ffsWriteFile.
-exfat_core.c - fs_sync(sb,0) all over the place looks fishy as hell.
-There's only one place that calls it with a non-zero argument.
+All the calls to fs_sync() need to be looked at, particularly in the
+context of EXFAT_DELAYED_SYNC. Currently, if that's defined, we only
+flush to disk when sync() gets called. We should be doing at least
+metadata flushes at appropriate times.
ffsTruncateFile - if (old_size <= new_size) {
That doesn't look right. How did it ever work? Are they relying on lazy
@@ -10,3 +24,46 @@ block allocation when actual writes happen? If nothing else, it never
does the 'fid->size = new_size' and do the inode update....
ffsSetAttr() is just dangling in the breeze, not wired up at all...
+
+Convert global mutexes to a per-superblock mutex.
+
+Right now, we load exactly one UTF-8 table. Check to see
+if that plays nice with different codepage and iocharset values
+for simultanous mounts of different devices
+
+exfat_rmdir() checks for -EBUSY but ffsRemoveDir() doesn't return it.
+In fact, there's a complete lack of -EBUSY testing anywhere.
+
+There's probably a few missing checks for -EEXIST
+
+check return codes of sync_dirty_buffer()
+
+Why is remove_file doing a num_entries++??
+
+Double check a lot of can't-happen parameter checks (for null pointers for
+things that have only one call site and can't pass a null, etc).
+
+All the DEBUG stuff can probably be tossed, including the ioctl(). Either
+that, or convert to a proper fault-injection system.
+
+exfat_remount does exactly one thing. Fix to actually deal with remount
+options, particularly handling R/O correctly. For that matter, allow
+R/O mounts in the first place.
+
+Figure out why the VFAT code used multi_sector_(read|write) but the
+exfat code doesn't use it. The difference matters on SSDs with wear leveling.
+
+exfat_fat_sync(), exfat_buf_sync(), and sync_alloc_bitmap()
+aren't called anyplace....
+
+Create helper function for exfat_set_entry_time() and exfat_set_entry_type()
+because it's sort of ugly to be calling the same functionn directly and
+other code calling through the fs_func struc ponters...
+
+clean up the remaining vol_type checks, which are of two types:
+some are ?: operators with magic numbers, and the rest are places
+where we're doing stuff with '.' and '..'.
+
+Patches to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ Valdis Kletnieks <valdis.kletnieks@vt.edu>
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
index 3abab33e932c..2aac1e000977 100644
--- a/drivers/staging/exfat/exfat.h
+++ b/drivers/staging/exfat/exfat.h
@@ -30,6 +30,8 @@
#undef DEBUG
#endif
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+
#define DENTRY_SIZE 32 /* dir entry size */
#define DENTRY_SIZE_BITS 5
@@ -206,28 +208,6 @@ static inline u16 get_row_index(u16 i)
#define FM_REGULAR 0x00
#define FM_SYMLINK 0x40
-/* return values */
-#define FFS_SUCCESS 0
-#define FFS_MEDIAERR 1
-#define FFS_FORMATERR 2
-#define FFS_MOUNTED 3
-#define FFS_NOTMOUNTED 4
-#define FFS_ALIGNMENTERR 5
-#define FFS_SEMAPHOREERR 6
-#define FFS_INVALIDPATH 7
-#define FFS_INVALIDFID 8
-#define FFS_NOTFOUND 9
-#define FFS_FILEEXIST 10
-#define FFS_PERMISSIONERR 11
-#define FFS_NOTOPENED 12
-#define FFS_MAXOPENED 13
-#define FFS_FULL 14
-#define FFS_EOF 15
-#define FFS_DIRBUSY 16
-#define FFS_MEMORYERR 17
-#define FFS_NAMETOOLONG 18
-#define FFS_ERROR 19
-
#define NUM_UPCASE 2918
#define DOS_CUR_DIR_NAME ". "
@@ -618,7 +598,7 @@ struct fs_info_t {
u32 dev_ejected; /* block device operation error flag */
struct fs_func *fs_func;
- struct semaphore v_sem;
+ struct mutex v_mutex;
/* FAT cache */
struct buf_cache_t FAT_cache_array[FAT_CACHE_SIZE];
@@ -749,14 +729,7 @@ static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
/* NLS management function */
u16 nls_upper(struct super_block *sb, u16 a);
-int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b);
int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b);
-void nls_uniname_to_dosname(struct super_block *sb,
- struct dos_name_t *p_dosname,
- struct uni_name_t *p_uniname, bool *p_lossy);
-void nls_dosname_to_uniname(struct super_block *sb,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
struct uni_name_t *p_uniname);
void nls_cstring_to_uniname(struct super_block *sb,
@@ -764,48 +737,33 @@ void nls_cstring_to_uniname(struct super_block *sb,
bool *p_lossy);
/* buffer cache management */
-void buf_init(struct super_block *sb);
-void buf_shutdown(struct super_block *sb);
-int FAT_read(struct super_block *sb, u32 loc, u32 *content);
-s32 FAT_write(struct super_block *sb, u32 loc, u32 content);
-u8 *FAT_getblk(struct super_block *sb, sector_t sec);
-void FAT_modify(struct super_block *sb, sector_t sec);
-void FAT_release_all(struct super_block *sb);
-void FAT_sync(struct super_block *sb);
-u8 *buf_getblk(struct super_block *sb, sector_t sec);
-void buf_modify(struct super_block *sb, sector_t sec);
-void buf_lock(struct super_block *sb, sector_t sec);
-void buf_unlock(struct super_block *sb, sector_t sec);
-void buf_release(struct super_block *sb, sector_t sec);
-void buf_release_all(struct super_block *sb);
-void buf_sync(struct super_block *sb);
+void exfat_buf_init(struct super_block *sb);
+void exfat_buf_shutdown(struct super_block *sb);
+int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content);
+s32 exfat_fat_write(struct super_block *sb, u32 loc, u32 content);
+u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec);
+void exfat_fat_modify(struct super_block *sb, sector_t sec);
+void exfat_fat_release_all(struct super_block *sb);
+void exfat_fat_sync(struct super_block *sb);
+u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec);
+void exfat_buf_modify(struct super_block *sb, sector_t sec);
+void exfat_buf_lock(struct super_block *sb, sector_t sec);
+void exfat_buf_unlock(struct super_block *sb, sector_t sec);
+void exfat_buf_release(struct super_block *sb, sector_t sec);
+void exfat_buf_release_all(struct super_block *sb);
+void exfat_buf_sync(struct super_block *sb);
/* fs management functions */
void fs_set_vol_flags(struct super_block *sb, u32 new_flag);
void fs_error(struct super_block *sb);
/* cluster management functions */
-s32 clear_cluster(struct super_block *sb, u32 clu);
-s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain);
-s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain);
-void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse);
-void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse);
-u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain);
s32 count_num_clusters(struct super_block *sb, struct chain_t *dir);
-s32 fat_count_used_clusters(struct super_block *sb);
-s32 exfat_count_used_clusters(struct super_block *sb);
void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len);
/* allocation bitmap management functions */
s32 load_alloc_bitmap(struct super_block *sb);
void free_alloc_bitmap(struct super_block *sb);
-s32 set_alloc_bitmap(struct super_block *sb, u32 clu);
-s32 clr_alloc_bitmap(struct super_block *sb, u32 clu);
-u32 test_alloc_bitmap(struct super_block *sb, u32 clu);
void sync_alloc_bitmap(struct super_block *sb);
/* upcase table management functions */
@@ -813,63 +771,8 @@ s32 load_upcase_table(struct super_block *sb);
void free_upcase_table(struct super_block *sb);
/* dir entry management functions */
-u32 fat_get_entry_type(struct dentry_t *p_entry);
-u32 exfat_get_entry_type(struct dentry_t *p_entry);
-void fat_set_entry_type(struct dentry_t *p_entry, u32 type);
-void exfat_set_entry_type(struct dentry_t *p_entry, u32 type);
-u32 fat_get_entry_attr(struct dentry_t *p_entry);
-u32 exfat_get_entry_attr(struct dentry_t *p_entry);
-void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
-void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
-u8 fat_get_entry_flag(struct dentry_t *p_entry);
-u8 exfat_get_entry_flag(struct dentry_t *p_entry);
-void fat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
-void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
-u32 fat_get_entry_clu0(struct dentry_t *p_entry);
-u32 exfat_get_entry_clu0(struct dentry_t *p_entry);
-void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
-void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
-u64 fat_get_entry_size(struct dentry_t *p_entry);
-u64 exfat_get_entry_size(struct dentry_t *p_entry);
-void fat_set_entry_size(struct dentry_t *p_entry, u64 size);
-void exfat_set_entry_size(struct dentry_t *p_entry, u64 size);
struct timestamp_t *tm_current(struct timestamp_t *tm);
-void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- u32 type, u32 start_clu, u64 size);
-s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, u32 type, u32 start_clu, u64 size);
-s32 fat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
-s32 exfat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
-void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu);
-void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum,
- u16 *uniname);
-void init_file_entry(struct file_dentry_t *ep, u32 type);
-void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu,
- u64 size);
-void init_name_entry(struct name_dentry_t *ep, u16 *uniname);
-void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries);
-void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries);
-
-s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- sector_t *sector, s32 *offset);
-struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
- s32 offset);
+
struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
s32 entry, sector_t *sector);
struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
@@ -877,24 +780,6 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
u32 type,
struct dentry_t **file_ep);
void release_entry_set(struct entry_set_cache_t *es);
-s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es);
-s32 write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es,
- struct dentry_t *ep, u32 count);
-s32 search_deleted_or_unused_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 num_entries);
-s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir,
- s32 num_entries);
-s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type);
-s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type);
-s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry);
-s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry);
s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
u32 type);
void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
@@ -907,36 +792,13 @@ bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir);
s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 *entries,
struct dos_name_t *p_dosname);
-void get_uni_name_from_dos_entry(struct super_block *sb,
- struct dos_dentry_t *ep,
- struct uni_name_t *p_uniname, u8 mode);
-void fat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname);
-void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname);
-s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep,
- u16 *uniname, s32 order);
-s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep,
- u16 *uniname, s32 order);
-s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
- struct dos_name_t *p_dosname);
-void fat_attach_count_to_dos_name(u8 *dosname, s32 count);
-s32 fat_calc_num_entries(struct uni_name_t *p_uniname);
-s32 exfat_calc_num_entries(struct uni_name_t *p_uniname);
-u8 calc_checksum_1byte(void *data, s32 len, u8 chksum);
u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type);
-u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type);
/* name resolution functions */
s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
struct uni_name_t *p_uniname);
-s32 resolve_name(u8 *name, u8 **arg);
/* file operation functions */
-s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
-s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
s32 create_dir(struct inode *inode, struct chain_t *p_dir,
struct uni_name_t *p_uniname, struct file_id_t *fid);
@@ -959,13 +821,13 @@ int multi_sector_read(struct super_block *sb, sector_t sec,
int multi_sector_write(struct super_block *sb, sector_t sec,
struct buffer_head *bh, s32 num_secs, bool sync);
-void bdev_open(struct super_block *sb);
-void bdev_close(struct super_block *sb);
-int bdev_read(struct super_block *sb, sector_t secno,
+void exfat_bdev_open(struct super_block *sb);
+void exfat_bdev_close(struct super_block *sb);
+int exfat_bdev_read(struct super_block *sb, sector_t secno,
struct buffer_head **bh, u32 num_secs, bool read);
-int bdev_write(struct super_block *sb, sector_t secno,
+int exfat_bdev_write(struct super_block *sb, sector_t secno,
struct buffer_head *bh, u32 num_secs, bool sync);
-int bdev_sync(struct super_block *sb);
+int exfat_bdev_sync(struct super_block *sb);
extern const u8 uni_upcase[];
#endif /* _EXFAT_H */
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
index 81d20e6241c6..7bcd98b13109 100644
--- a/drivers/staging/exfat/exfat_blkdev.c
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -8,7 +8,7 @@
#include <linux/fs.h>
#include "exfat.h"
-void bdev_open(struct super_block *sb)
+void exfat_bdev_open(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -23,14 +23,14 @@ void bdev_open(struct super_block *sb)
p_bd->opened = true;
}
-void bdev_close(struct super_block *sb)
+void exfat_bdev_close(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
p_bd->opened = false;
}
-int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
+int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
u32 num_secs, bool read)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -40,11 +40,11 @@ int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return FFS_MEDIAERR;
+ return -EIO;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
- return FFS_MEDIAERR;
+ return -ENODEV;
if (*bh)
__brelse(*bh);
@@ -62,10 +62,10 @@ int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
WARN(!p_fs->dev_ejected,
"[EXFAT] No bh, device seems wrong or to be ejected.\n");
- return FFS_MEDIAERR;
+ return -EIO;
}
-int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
+int exfat_bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
u32 num_secs, bool sync)
{
s32 count;
@@ -77,11 +77,11 @@ int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return FFS_MEDIAERR;
+ return -EIO;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
- return FFS_MEDIAERR;
+ return -ENODEV;
if (secno == bh->b_blocknr) {
lock_buffer(bh);
@@ -89,7 +89,7 @@ int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
mark_buffer_dirty(bh);
unlock_buffer(bh);
if (sync && (sync_dirty_buffer(bh) != 0))
- return FFS_MEDIAERR;
+ return -EIO;
} else {
count = num_secs << p_bd->sector_size_bits;
@@ -115,10 +115,10 @@ no_bh:
WARN(!p_fs->dev_ejected,
"[EXFAT] No bh, device seems wrong or to be ejected.\n");
- return FFS_MEDIAERR;
+ return -EIO;
}
-int bdev_sync(struct super_block *sb)
+int exfat_bdev_sync(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
#ifdef CONFIG_EXFAT_KERNEL_DEBUG
@@ -126,11 +126,11 @@ int bdev_sync(struct super_block *sb)
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return FFS_MEDIAERR;
+ return -EIO;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
- return FFS_MEDIAERR;
+ return -ENODEV;
return sync_blockdev(sb->s_bdev);
}
diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c
index e1b001718709..3fd5604058a9 100644
--- a/drivers/staging/exfat/exfat_cache.c
+++ b/drivers/staging/exfat/exfat_cache.c
@@ -12,8 +12,8 @@
#define DIRTYBIT 0x02
/* Local variables */
-static DEFINE_SEMAPHORE(f_sem);
-static DEFINE_SEMAPHORE(b_sem);
+static DEFINE_MUTEX(f_mutex);
+static DEFINE_MUTEX(b_mutex);
static struct buf_cache_t *FAT_cache_find(struct super_block *sb, sector_t sec)
{
@@ -128,7 +128,7 @@ static void buf_cache_remove_hash(struct buf_cache_t *bp)
(bp->hash_next)->hash_prev = bp->hash_prev;
}
-void buf_init(struct super_block *sb)
+void exfat_buf_init(struct super_block *sb)
{
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -189,11 +189,11 @@ void buf_init(struct super_block *sb)
buf_cache_insert_hash(sb, &p_fs->buf_cache_array[i]);
}
-void buf_shutdown(struct super_block *sb)
+void exfat_buf_shutdown(struct super_block *sb)
{
}
-static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
+static int __exfat_fat_read(struct super_block *sb, u32 loc, u32 *content)
{
s32 off;
u32 _content;
@@ -202,107 +202,22 @@ static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- if (p_fs->vol_type == FAT12) {
- sec = p_fs->FAT1_start_sector +
- ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
- off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
+ sec = p_fs->FAT1_start_sector +
+ (loc >> (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
- if (off == (p_bd->sector_size - 1)) {
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
+ fat_sector = exfat_fat_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
- _content = (u32)fat_sector[off];
+ fat_entry = &fat_sector[off];
+ _content = GET32_A(fat_entry);
- fat_sector = FAT_getblk(sb, ++sec);
- if (!fat_sector)
- return -1;
-
- _content |= (u32)fat_sector[0] << 8;
- } else {
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
- _content = GET16(fat_entry);
- }
-
- if (loc & 1)
- _content >>= 4;
-
- _content &= 0x00000FFF;
-
- if (_content >= CLUSTER_16(0x0FF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
- } else if (p_fs->vol_type == FAT16) {
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 1));
- off = (loc << 1) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- _content = GET16_A(fat_entry);
-
- _content &= 0x0000FFFF;
-
- if (_content >= CLUSTER_16(0xFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
- } else if (p_fs->vol_type == FAT32) {
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- _content = GET32_A(fat_entry);
-
- _content &= 0x0FFFFFFF;
-
- if (_content >= CLUSTER_32(0x0FFFFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
- } else if (p_fs->vol_type == EXFAT) {
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
- _content = GET32_A(fat_entry);
-
- if (_content >= CLUSTER_32(0xFFFFFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
+ if (_content >= CLUSTER_32(0xFFFFFFF8)) {
+ *content = CLUSTER_32(~0);
return 0;
}
-
- /* Unknown volume type, throw in the towel and go home */
- *content = CLUSTER_32(~0);
+ *content = CLUSTER_32(_content);
return 0;
}
@@ -311,18 +226,18 @@ static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
* returns 0 on success
* -1 on error
*/
-int FAT_read(struct super_block *sb, u32 loc, u32 *content)
+int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content)
{
s32 ret;
- down(&f_sem);
- ret = __FAT_read(sb, loc, content);
- up(&f_sem);
+ mutex_lock(&f_mutex);
+ ret = __exfat_fat_read(sb, loc, content);
+ mutex_unlock(&f_mutex);
return ret;
}
-static s32 __FAT_write(struct super_block *sb, u32 loc, u32 content)
+static s32 __exfat_fat_write(struct super_block *sb, u32 loc, u32 content)
{
s32 off;
sector_t sec;
@@ -330,118 +245,34 @@ static s32 __FAT_write(struct super_block *sb, u32 loc, u32 content)
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- if (p_fs->vol_type == FAT12) {
- content &= 0x00000FFF;
-
- sec = p_fs->FAT1_start_sector +
- ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
- off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- if (loc & 1) { /* odd */
- content <<= 4;
-
- if (off == (p_bd->sector_size - 1)) {
- fat_sector[off] = (u8)(content |
- (fat_sector[off] &
- 0x0F));
- FAT_modify(sb, sec);
-
- fat_sector = FAT_getblk(sb, ++sec);
- if (!fat_sector)
- return -1;
-
- fat_sector[0] = (u8)(content >> 8);
- } else {
- fat_entry = &fat_sector[off];
- content |= GET16(fat_entry) & 0x000F;
-
- SET16(fat_entry, content);
- }
- } else { /* even */
- fat_sector[off] = (u8)(content);
-
- if (off == (p_bd->sector_size - 1)) {
- fat_sector[off] = (u8)(content);
- FAT_modify(sb, sec);
-
- fat_sector = FAT_getblk(sb, ++sec);
- if (!fat_sector)
- return -1;
- fat_sector[0] = (u8)((fat_sector[0] & 0xF0) |
- (content >> 8));
- } else {
- fat_entry = &fat_sector[off];
- content |= GET16(fat_entry) & 0xF000;
-
- SET16(fat_entry, content);
- }
- }
- }
-
- else if (p_fs->vol_type == FAT16) {
- content &= 0x0000FFFF;
-
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 1));
- off = (loc << 1) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- SET16_A(fat_entry, content);
- } else if (p_fs->vol_type == FAT32) {
- content &= 0x0FFFFFFF;
+ sec = p_fs->FAT1_start_sector + (loc >>
+ (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
+ fat_sector = exfat_fat_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
+ fat_entry = &fat_sector[off];
- fat_entry = &fat_sector[off];
-
- content |= GET32_A(fat_entry) & 0xF0000000;
-
- SET32_A(fat_entry, content);
- } else { /* p_fs->vol_type == EXFAT */
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- SET32_A(fat_entry, content);
- }
+ SET32_A(fat_entry, content);
- FAT_modify(sb, sec);
+ exfat_fat_modify(sb, sec);
return 0;
}
-int FAT_write(struct super_block *sb, u32 loc, u32 content)
+int exfat_fat_write(struct super_block *sb, u32 loc, u32 content)
{
s32 ret;
- down(&f_sem);
- ret = __FAT_write(sb, loc, content);
- up(&f_sem);
+ mutex_lock(&f_mutex);
+ ret = __exfat_fat_write(sb, loc, content);
+ mutex_unlock(&f_mutex);
return ret;
}
-u8 *FAT_getblk(struct super_block *sb, sector_t sec)
+u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -462,7 +293,7 @@ u8 *FAT_getblk(struct super_block *sb, sector_t sec)
FAT_cache_insert_hash(sb, bp);
- if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) {
FAT_cache_remove_hash(bp);
bp->drv = -1;
bp->sec = ~0;
@@ -476,7 +307,7 @@ u8 *FAT_getblk(struct super_block *sb, sector_t sec)
return bp->buf_bh->b_data;
}
-void FAT_modify(struct super_block *sb, sector_t sec)
+void exfat_fat_modify(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
@@ -485,12 +316,12 @@ void FAT_modify(struct super_block *sb, sector_t sec)
sector_write(sb, sec, bp->buf_bh, 0);
}
-void FAT_release_all(struct super_block *sb)
+void exfat_fat_release_all(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&f_sem);
+ mutex_lock(&f_mutex);
bp = p_fs->FAT_cache_lru_list.next;
while (bp != &p_fs->FAT_cache_lru_list) {
@@ -507,15 +338,15 @@ void FAT_release_all(struct super_block *sb)
bp = bp->next;
}
- up(&f_sem);
+ mutex_unlock(&f_mutex);
}
-void FAT_sync(struct super_block *sb)
+void exfat_fat_sync(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&f_sem);
+ mutex_lock(&f_mutex);
bp = p_fs->FAT_cache_lru_list.next;
while (bp != &p_fs->FAT_cache_lru_list) {
@@ -526,7 +357,7 @@ void FAT_sync(struct super_block *sb)
bp = bp->next;
}
- up(&f_sem);
+ mutex_unlock(&f_mutex);
}
static struct buf_cache_t *buf_cache_find(struct super_block *sb, sector_t sec)
@@ -561,7 +392,7 @@ static struct buf_cache_t *buf_cache_get(struct super_block *sb, sector_t sec)
return bp;
}
-static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
+static u8 *__exfat_buf_getblk(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -582,7 +413,7 @@ static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
buf_cache_insert_hash(sb, bp);
- if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) {
buf_cache_remove_hash(bp);
bp->drv = -1;
bp->sec = ~0;
@@ -596,22 +427,22 @@ static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
return bp->buf_bh->b_data;
}
-u8 *buf_getblk(struct super_block *sb, sector_t sec)
+u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec)
{
u8 *buf;
- down(&b_sem);
- buf = __buf_getblk(sb, sec);
- up(&b_sem);
+ mutex_lock(&b_mutex);
+ buf = __exfat_buf_getblk(sb, sec);
+ mutex_unlock(&b_mutex);
return buf;
}
-void buf_modify(struct super_block *sb, sector_t sec)
+void exfat_buf_modify(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp))
@@ -620,14 +451,14 @@ void buf_modify(struct super_block *sb, sector_t sec)
WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
(unsigned long long)sec);
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_lock(struct super_block *sb, sector_t sec)
+void exfat_buf_lock(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp))
@@ -636,14 +467,14 @@ void buf_lock(struct super_block *sb, sector_t sec)
WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
(unsigned long long)sec);
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_unlock(struct super_block *sb, sector_t sec)
+void exfat_buf_unlock(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp))
@@ -652,15 +483,15 @@ void buf_unlock(struct super_block *sb, sector_t sec)
WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
(unsigned long long)sec);
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_release(struct super_block *sb, sector_t sec)
+void exfat_buf_release(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp)) {
@@ -676,15 +507,15 @@ void buf_release(struct super_block *sb, sector_t sec)
move_to_lru(bp, &p_fs->buf_cache_lru_list);
}
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_release_all(struct super_block *sb)
+void exfat_buf_release_all(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = p_fs->buf_cache_lru_list.next;
while (bp != &p_fs->buf_cache_lru_list) {
@@ -701,15 +532,15 @@ void buf_release_all(struct super_block *sb)
bp = bp->next;
}
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_sync(struct super_block *sb)
+void exfat_buf_sync(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = p_fs->buf_cache_lru_list.next;
while (bp != &p_fs->buf_cache_lru_list) {
@@ -720,5 +551,5 @@ void buf_sync(struct super_block *sb)
bp = bp->next;
}
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
index 79174e5c4145..d2d3447083c7 100644
--- a/drivers/staging/exfat/exfat_core.c
+++ b/drivers/staging/exfat/exfat_core.c
@@ -20,15 +20,6 @@ static void __set_sb_dirty(struct super_block *sb)
static u8 name_buf[MAX_PATH_LENGTH * MAX_CHARSET_SIZE];
-static char *reserved_names[] = {
- "AUX ", "CON ", "NUL ", "PRN ",
- "COM1 ", "COM2 ", "COM3 ", "COM4 ",
- "COM5 ", "COM6 ", "COM7 ", "COM8 ", "COM9 ",
- "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ",
- "LPT5 ", "LPT6 ", "LPT7 ", "LPT8 ", "LPT9 ",
- NULL
-};
-
static u8 free_bit[] = {
0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 0 ~ 19 */
0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, /* 20 ~ 39 */
@@ -99,25 +90,23 @@ void fs_set_vol_flags(struct super_block *sb, u32 new_flag)
p_fs->vol_flag = new_flag;
- if (p_fs->vol_type == EXFAT) {
- if (!p_fs->pbr_bh) {
- if (sector_read(sb, p_fs->PBR_sector,
- &p_fs->pbr_bh, 1) != FFS_SUCCESS)
- return;
- }
+ if (!p_fs->pbr_bh) {
+ if (sector_read(sb, p_fs->PBR_sector,
+ &p_fs->pbr_bh, 1) != 0)
+ return;
+ }
- p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data;
- p_bpb = (struct bpbex_t *)p_pbr->bpb;
- SET16(p_bpb->vol_flags, (u16)new_flag);
+ p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data;
+ p_bpb = (struct bpbex_t *)p_pbr->bpb;
+ SET16(p_bpb->vol_flags, (u16)new_flag);
- /* XXX duyoung
- * what can we do here? (cuz fs_set_vol_flags() is void)
- */
- if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh)))
- sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1);
- else
- sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0);
- }
+ /* XXX duyoung
+ * what can we do here? (cuz fs_set_vol_flags() is void)
+ */
+ if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh)))
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1);
+ else
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0);
}
void fs_error(struct super_block *sb)
@@ -136,10 +125,10 @@ void fs_error(struct super_block *sb)
* Cluster Management Functions
*/
-s32 clear_cluster(struct super_block *sb, u32 clu)
+static s32 clear_cluster(struct super_block *sb, u32 clu)
{
sector_t s, n;
- s32 ret = FFS_SUCCESS;
+ s32 ret = 0;
struct buffer_head *tmp_bh = NULL;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -154,12 +143,12 @@ s32 clear_cluster(struct super_block *sb, u32 clu)
for (; s < n; s++) {
ret = sector_read(sb, s, &tmp_bh, 0);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
memset((char *)tmp_bh->b_data, 0x0, p_bd->sector_size);
ret = sector_write(sb, s, tmp_bh, 0);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
break;
}
@@ -167,61 +156,98 @@ s32 clear_cluster(struct super_block *sb, u32 clu)
return ret;
}
-s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain)
+static s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
{
- int i, num_clusters = 0;
- u32 new_clu, last_clu = CLUSTER_32(~0), read_clu;
+ int i, b;
+ sector_t sector;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- new_clu = p_chain->dir;
- if (new_clu == CLUSTER_32(~0))
- new_clu = p_fs->clu_srch_ptr;
- else if (new_clu >= p_fs->num_clusters)
- new_clu = 2;
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
- __set_sb_dirty(sb);
+ sector = START_SECTOR(p_fs->map_clu) + i;
- p_chain->dir = CLUSTER_32(~0);
+ exfat_bitmap_set((u8 *)p_fs->vol_amap[i]->b_data, b);
- for (i = 2; i < p_fs->num_clusters; i++) {
- if (FAT_read(sb, new_clu, &read_clu) != 0)
- return -1;
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+}
- if (read_clu == CLUSTER_32(0)) {
- if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
- return -1;
- num_clusters++;
+static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, b;
+ sector_t sector;
+#ifdef CONFIG_EXFAT_DISCARD
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *opts = &sbi->options;
+ int ret;
+#endif /* CONFIG_EXFAT_DISCARD */
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- if (p_chain->dir == CLUSTER_32(~0)) {
- p_chain->dir = new_clu;
- } else {
- if (FAT_write(sb, last_clu, new_clu) < 0)
- return -1;
- }
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
- last_clu = new_clu;
+ sector = START_SECTOR(p_fs->map_clu) + i;
- if ((--num_alloc) == 0) {
- p_fs->clu_srch_ptr = new_clu;
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters += num_clusters;
+ exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
- return num_clusters;
- }
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+
+#ifdef CONFIG_EXFAT_DISCARD
+ if (opts->discard) {
+ ret = sb_issue_discard(sb, START_SECTOR(clu),
+ (1 << p_fs->sectors_per_clu_bits),
+ GFP_NOFS, 0);
+ if (ret == -EOPNOTSUPP) {
+ pr_warn("discard not supported by device, disabling");
+ opts->discard = 0;
}
- if ((++new_clu) >= p_fs->num_clusters)
- new_clu = 2;
}
+#endif /* CONFIG_EXFAT_DISCARD */
+}
- p_fs->clu_srch_ptr = new_clu;
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters += num_clusters;
+static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, map_i, map_b;
+ u32 clu_base, clu_free;
+ u8 k, clu_mask;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- return num_clusters;
+ clu_base = (clu & ~(0x7)) + 2;
+ clu_mask = (1 << (clu - clu_base + 2)) - 1;
+
+ map_i = clu >> (p_bd->sector_size_bits + 3);
+ map_b = (clu >> 3) & p_bd->sector_size_mask;
+
+ for (i = 2; i < p_fs->num_clusters; i += 8) {
+ k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
+ if (clu_mask > 0) {
+ k |= clu_mask;
+ clu_mask = 0;
+ }
+ if (k < 0xFF) {
+ clu_free = clu_base + free_bit[k];
+ if (clu_free < p_fs->num_clusters)
+ return clu_free;
+ }
+ clu_base += 8;
+
+ if (((++map_b) >= p_bd->sector_size) ||
+ (clu_base >= p_fs->num_clusters)) {
+ if ((++map_i) >= p_fs->map_sectors) {
+ clu_base = 2;
+ map_i = 0;
+ }
+ map_b = 0;
+ }
+ }
+
+ return CLUSTER_32(~0);
}
-s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
struct chain_t *p_chain)
{
s32 num_clusters = 0;
@@ -251,22 +277,22 @@ s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
}
}
- if (set_alloc_bitmap(sb, new_clu - 2) != FFS_SUCCESS)
- return -1;
+ if (set_alloc_bitmap(sb, new_clu - 2) != 0)
+ return -EIO;
num_clusters++;
if (p_chain->flags == 0x01) {
- if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
- return -1;
+ if (exfat_fat_write(sb, new_clu, CLUSTER_32(~0)) < 0)
+ return -EIO;
}
if (p_chain->dir == CLUSTER_32(~0)) {
p_chain->dir = new_clu;
} else {
if (p_chain->flags == 0x01) {
- if (FAT_write(sb, last_clu, new_clu) < 0)
- return -1;
+ if (exfat_fat_write(sb, last_clu, new_clu) < 0)
+ return -EIO;
}
}
last_clu = new_clu;
@@ -300,48 +326,7 @@ s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
return num_clusters;
}
-void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse)
-{
- s32 num_clusters = 0;
- u32 clu, prev;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int i;
- sector_t sector;
-
- if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
- return;
- __set_sb_dirty(sb);
- clu = p_chain->dir;
-
- if (p_chain->size <= 0)
- return;
-
- do {
- if (p_fs->dev_ejected)
- break;
-
- if (do_relse) {
- sector = START_SECTOR(clu);
- for (i = 0; i < p_fs->sectors_per_clu; i++)
- buf_release(sb, sector + i);
- }
-
- prev = clu;
- if (FAT_read(sb, clu, &clu) == -1)
- break;
-
- if (FAT_write(sb, prev, CLUSTER_32(0)) < 0)
- break;
- num_clusters++;
-
- } while (clu != CLUSTER_32(~0));
-
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters -= num_clusters;
-}
-
-void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+static void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
s32 do_relse)
{
s32 num_clusters = 0;
@@ -367,10 +352,10 @@ void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
if (do_relse) {
sector = START_SECTOR(clu);
for (i = 0; i < p_fs->sectors_per_clu; i++)
- buf_release(sb, sector + i);
+ exfat_buf_release(sb, sector + i);
}
- if (clr_alloc_bitmap(sb, clu - 2) != FFS_SUCCESS)
+ if (clr_alloc_bitmap(sb, clu - 2) != 0)
break;
clu++;
@@ -384,13 +369,13 @@ void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
if (do_relse) {
sector = START_SECTOR(clu);
for (i = 0; i < p_fs->sectors_per_clu; i++)
- buf_release(sb, sector + i);
+ exfat_buf_release(sb, sector + i);
}
- if (clr_alloc_bitmap(sb, clu - 2) != FFS_SUCCESS)
+ if (clr_alloc_bitmap(sb, clu - 2) != 0)
break;
- if (FAT_read(sb, clu, &clu) == -1)
+ if (exfat_fat_read(sb, clu, &clu) == -1)
break;
num_clusters++;
} while ((clu != CLUSTER_32(0)) && (clu != CLUSTER_32(~0)));
@@ -400,7 +385,7 @@ void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
p_fs->used_clusters -= num_clusters;
}
-u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
+static u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
{
u32 clu, next;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -410,7 +395,7 @@ u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
if (p_chain->flags == 0x03) {
clu += p_chain->size - 1;
} else {
- while ((FAT_read(sb, clu, &next) == 0) &&
+ while ((exfat_fat_read(sb, clu, &next) == 0) &&
(next != CLUSTER_32(~0))) {
if (p_fs->dev_ejected)
break;
@@ -437,7 +422,7 @@ s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
} else {
for (i = 2; i < p_fs->num_clusters; i++) {
count++;
- if (FAT_read(sb, clu, &clu) != 0)
+ if (exfat_fat_read(sb, clu, &clu) != 0)
return 0;
if (clu == CLUSTER_32(~0))
break;
@@ -447,30 +432,15 @@ s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
return count;
}
-s32 fat_count_used_clusters(struct super_block *sb)
-{
- int i, count = 0;
- u32 clu;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- for (i = 2; i < p_fs->num_clusters; i++) {
- if (FAT_read(sb, i, &clu) != 0)
- break;
- if (clu != CLUSTER_32(0))
- count++;
- }
-
- return count;
-}
-
-s32 exfat_count_used_clusters(struct super_block *sb)
+static s32 exfat_count_used_clusters(struct super_block *sb)
{
int i, map_i, map_b, count = 0;
u8 k;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- map_i = map_b = 0;
+ map_i = 0;
+ map_b = 0;
for (i = 2; i < p_fs->num_clusters; i += 8) {
k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
@@ -491,12 +461,12 @@ void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len)
return;
while (len > 1) {
- if (FAT_write(sb, chain, chain + 1) < 0)
+ if (exfat_fat_write(sb, chain, chain + 1) < 0)
break;
chain++;
len--;
}
- FAT_write(sb, chain, CLUSTER_32(~0));
+ exfat_fat_write(sb, chain, CLUSTER_32(~0));
}
/*
@@ -525,7 +495,7 @@ s32 load_alloc_bitmap(struct super_block *sb)
ep = (struct bmap_dentry_t *)get_entry_in_dir(sb, &clu,
i, NULL);
if (!ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
@@ -544,14 +514,14 @@ s32 load_alloc_bitmap(struct super_block *sb)
sizeof(struct buffer_head *),
GFP_KERNEL);
if (!p_fs->vol_amap)
- return FFS_MEMORYERR;
+ return -ENOMEM;
sector = START_SECTOR(p_fs->map_clu);
for (j = 0; j < p_fs->map_sectors; j++) {
p_fs->vol_amap[j] = NULL;
- ret = sector_read(sb, sector + j, &(p_fs->vol_amap[j]), 1);
- if (ret != FFS_SUCCESS) {
+ ret = sector_read(sb, sector + j, &p_fs->vol_amap[j], 1);
+ if (ret != 0) {
/* release all buffers and free vol_amap */
i = 0;
while (i < j)
@@ -564,15 +534,15 @@ s32 load_alloc_bitmap(struct super_block *sb)
}
p_fs->pbr_bh = NULL;
- return FFS_SUCCESS;
+ return 0;
}
}
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return FFS_MEDIAERR;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
+ return -EIO;
}
- return FFS_FORMATERR;
+ return -EFSCORRUPTED;
}
void free_alloc_bitmap(struct super_block *sb)
@@ -589,97 +559,6 @@ void free_alloc_bitmap(struct super_block *sb)
p_fs->vol_amap = NULL;
}
-s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, b;
- sector_t sector;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- i = clu >> (p_bd->sector_size_bits + 3);
- b = clu & ((p_bd->sector_size << 3) - 1);
-
- sector = START_SECTOR(p_fs->map_clu) + i;
-
- exfat_bitmap_set((u8 *)p_fs->vol_amap[i]->b_data, b);
-
- return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-}
-
-s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, b;
- sector_t sector;
-#ifdef CONFIG_EXFAT_DISCARD
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_mount_options *opts = &sbi->options;
- int ret;
-#endif /* CONFIG_EXFAT_DISCARD */
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- i = clu >> (p_bd->sector_size_bits + 3);
- b = clu & ((p_bd->sector_size << 3) - 1);
-
- sector = START_SECTOR(p_fs->map_clu) + i;
-
- exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
-
- return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-
-#ifdef CONFIG_EXFAT_DISCARD
- if (opts->discard) {
- ret = sb_issue_discard(sb, START_SECTOR(clu),
- (1 << p_fs->sectors_per_clu_bits),
- GFP_NOFS, 0);
- if (ret == -EOPNOTSUPP) {
- pr_warn("discard not supported by device, disabling");
- opts->discard = 0;
- }
- }
-#endif /* CONFIG_EXFAT_DISCARD */
-}
-
-u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, map_i, map_b;
- u32 clu_base, clu_free;
- u8 k, clu_mask;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- clu_base = (clu & ~(0x7)) + 2;
- clu_mask = (1 << (clu - clu_base + 2)) - 1;
-
- map_i = clu >> (p_bd->sector_size_bits + 3);
- map_b = (clu >> 3) & p_bd->sector_size_mask;
-
- for (i = 2; i < p_fs->num_clusters; i += 8) {
- k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
- if (clu_mask > 0) {
- k |= clu_mask;
- clu_mask = 0;
- }
- if (k < 0xFF) {
- clu_free = clu_base + free_bit[k];
- if (clu_free < p_fs->num_clusters)
- return clu_free;
- }
- clu_base += 8;
-
- if (((++map_b) >= p_bd->sector_size) ||
- (clu_base >= p_fs->num_clusters)) {
- if ((++map_i) >= p_fs->map_sectors) {
- clu_base = 2;
- map_i = 0;
- }
- map_b = 0;
- }
- }
-
- return CLUSTER_32(~0);
-}
-
void sync_alloc_bitmap(struct super_block *sb)
{
int i;
@@ -698,7 +577,7 @@ void sync_alloc_bitmap(struct super_block *sb)
static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
u32 num_sectors, u32 utbl_checksum)
{
- int i, ret = FFS_ERROR;
+ int i, ret = -EINVAL;
u32 j;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -712,15 +591,15 @@ static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
u32 checksum = 0;
- upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
- GFP_KERNEL);
+ upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL);
+ p_fs->vol_utbl = upcase_table;
if (!upcase_table)
- return FFS_MEMORYERR;
+ return -ENOMEM;
memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
while (sector < end_sector) {
ret = sector_read(sb, sector, &tmp_bh, 1);
- if (ret != FFS_SUCCESS) {
+ if (ret != 0) {
pr_debug("sector read (0x%llX)fail\n",
(unsigned long long)sector);
goto error;
@@ -755,7 +634,7 @@ static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT,
sizeof(u16), GFP_KERNEL);
if (!upcase_table[col_index]) {
- ret = FFS_MEMORYERR;
+ ret = -ENOMEM;
goto error;
}
@@ -771,9 +650,9 @@ static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
if (index >= 0xFFFF && utbl_checksum == checksum) {
if (tmp_bh)
brelse(tmp_bh);
- return FFS_SUCCESS;
+ return 0;
}
- ret = FFS_ERROR;
+ ret = -EINVAL;
error:
if (tmp_bh)
brelse(tmp_bh);
@@ -783,7 +662,7 @@ error:
static s32 __load_default_upcase_table(struct super_block *sb)
{
- int i, ret = FFS_ERROR;
+ int i, ret = -EINVAL;
u32 j;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -792,10 +671,10 @@ static s32 __load_default_upcase_table(struct super_block *sb)
u16 uni = 0;
u16 **upcase_table;
- upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
- GFP_KERNEL);
+ upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL);
+ p_fs->vol_utbl = upcase_table;
if (!upcase_table)
- return FFS_MEMORYERR;
+ return -ENOMEM;
memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
for (i = 0; index <= 0xFFFF && i < NUM_UPCASE * 2; i += 2) {
@@ -818,7 +697,7 @@ static s32 __load_default_upcase_table(struct super_block *sb)
sizeof(u16),
GFP_KERNEL);
if (!upcase_table[col_index]) {
- ret = FFS_MEMORYERR;
+ ret = -ENOMEM;
goto error;
}
@@ -832,7 +711,7 @@ static s32 __load_default_upcase_table(struct super_block *sb)
}
if (index >= 0xFFFF)
- return FFS_SUCCESS;
+ return 0;
error:
/* FATAL error: default upcase table has error */
@@ -855,14 +734,14 @@ s32 load_upcase_table(struct super_block *sb)
clu.flags = 0x01;
if (p_fs->dev_ejected)
- return FFS_MEDIAERR;
+ return -EIO;
while (clu.dir != CLUSTER_32(~0)) {
for (i = 0; i < p_fs->dentries_per_clu; i++) {
ep = (struct case_dentry_t *)get_entry_in_dir(sb, &clu,
i, NULL);
if (!ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
@@ -877,12 +756,12 @@ s32 load_upcase_table(struct super_block *sb)
sector = START_SECTOR(tbl_clu);
num_sectors = ((tbl_size - 1) >> p_bd->sector_size_bits) + 1;
if (__load_upcase_table(sb, sector, num_sectors,
- GET32_A(ep->checksum)) != FFS_SUCCESS)
+ GET32_A(ep->checksum)) != 0)
break;
- return FFS_SUCCESS;
+ return 0;
}
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return FFS_MEDIAERR;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
+ return -EIO;
}
/* load default upcase table */
return __load_default_upcase_table(sb);
@@ -906,29 +785,7 @@ void free_upcase_table(struct super_block *sb)
* Directory Entry Management Functions
*/
-u32 fat_get_entry_type(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- if (*(ep->name) == 0x0)
- return TYPE_UNUSED;
-
- else if (*(ep->name) == 0xE5)
- return TYPE_DELETED;
-
- else if (ep->attr == ATTR_EXTEND)
- return TYPE_EXTEND;
-
- else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_VOLUME)
- return TYPE_VOLUME;
-
- else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_SUBDIR)
- return TYPE_DIR;
-
- return TYPE_FILE;
-}
-
-u32 exfat_get_entry_type(struct dentry_t *p_entry)
+static u32 exfat_get_entry_type(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
@@ -973,30 +830,7 @@ u32 exfat_get_entry_type(struct dentry_t *p_entry)
return TYPE_BENIGN_SEC;
}
-void fat_set_entry_type(struct dentry_t *p_entry, u32 type)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- if (type == TYPE_UNUSED)
- *(ep->name) = 0x0;
-
- else if (type == TYPE_DELETED)
- *(ep->name) = 0xE5;
-
- else if (type == TYPE_EXTEND)
- ep->attr = ATTR_EXTEND;
-
- else if (type == TYPE_DIR)
- ep->attr = ATTR_SUBDIR;
-
- else if (type == TYPE_FILE)
- ep->attr = ATTR_ARCHIVE;
-
- else if (type == TYPE_SYMLINK)
- ep->attr = ATTR_ARCHIVE | ATTR_SYMLINK;
-}
-
-void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
+static void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
@@ -1026,109 +860,56 @@ void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
}
}
-u32 fat_get_entry_attr(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- return (u32)ep->attr;
-}
-
-u32 exfat_get_entry_attr(struct dentry_t *p_entry)
+static u32 exfat_get_entry_attr(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
return (u32)GET16_A(ep->attr);
}
-void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- ep->attr = (u8)attr;
-}
-
-void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+static void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
SET16_A(ep->attr, (u16)attr);
}
-u8 fat_get_entry_flag(struct dentry_t *p_entry)
-{
- return 0x01;
-}
-
-u8 exfat_get_entry_flag(struct dentry_t *p_entry)
+static u8 exfat_get_entry_flag(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return ep->flags;
}
-void fat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
-{
-}
-
-void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+static void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
ep->flags = flags;
}
-u32 fat_get_entry_clu0(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- return ((u32)GET16_A(ep->start_clu_hi) << 16) |
- GET16_A(ep->start_clu_lo);
-}
-
-u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
+static u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET32_A(ep->start_clu);
}
-void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
- SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
-}
-
-void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+static void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
SET32_A(ep->start_clu, start_clu);
}
-u64 fat_get_entry_size(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- return (u64)GET32_A(ep->size);
-}
-
-u64 exfat_get_entry_size(struct dentry_t *p_entry)
+static u64 exfat_get_entry_size(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET64_A(ep->valid_size);
}
-void fat_set_entry_size(struct dentry_t *p_entry, u64 size)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- SET32_A(ep->size, (u32)size);
-}
-
-void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
+static void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
@@ -1136,32 +917,7 @@ void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
SET64_A(ep->size, size);
}
-void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode)
-{
- u16 t = 0x00, d = 0x21;
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- switch (mode) {
- case TM_CREATE:
- t = GET16_A(ep->create_time);
- d = GET16_A(ep->create_date);
- break;
- case TM_MODIFY:
- t = GET16_A(ep->modify_time);
- d = GET16_A(ep->modify_date);
- break;
- }
-
- tp->sec = (t & 0x001F) << 1;
- tp->min = (t >> 5) & 0x003F;
- tp->hour = (t >> 11);
- tp->day = (d & 0x001F);
- tp->mon = (d >> 5) & 0x000F;
- tp->year = (d >> 9);
-}
-
-void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+static void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t = 0x00, d = 0x21;
@@ -1190,28 +946,7 @@ void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
tp->year = (d >> 9);
}
-void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode)
-{
- u16 t, d;
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
- d = (tp->year << 9) | (tp->mon << 5) | tp->day;
-
- switch (mode) {
- case TM_CREATE:
- SET16_A(ep->create_time, t);
- SET16_A(ep->create_date, d);
- break;
- case TM_MODIFY:
- SET16_A(ep->modify_time, t);
- SET16_A(ep->modify_date, d);
- break;
- }
-}
-
-void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+static void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t, d;
@@ -1236,24 +971,46 @@ void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
}
}
-s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- u32 type, u32 start_clu, u64 size)
+static void init_file_entry(struct file_dentry_t *ep, u32 type)
{
- sector_t sector;
- struct dos_dentry_t *dos_ep;
+ struct timestamp_t tm, *tp;
- dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- &sector);
- if (!dos_ep)
- return FFS_MEDIAERR;
+ exfat_set_entry_type((struct dentry_t *)ep, type);
- init_dos_entry(dos_ep, type, start_clu);
- buf_modify(sb, sector);
+ tp = tm_current(&tm);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS);
+ ep->create_time_ms = 0;
+ ep->modify_time_ms = 0;
+ ep->access_time_ms = 0;
+}
- return FFS_SUCCESS;
+static void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size)
+{
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM);
+ ep->flags = flags;
+ SET32_A(ep->start_clu, start_clu);
+ SET64_A(ep->valid_size, size);
+ SET64_A(ep->size, size);
}
-s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+static void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
+{
+ int i;
+
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
+ ep->flags = 0x0;
+
+ for (i = 0; i < 30; i++, i++) {
+ SET16_A(ep->unicode_0_14 + i, *uniname);
+ if (*uniname == 0x0)
+ break;
+ uniname++;
+ }
+}
+
+static s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
s32 entry, u32 type, u32 start_clu, u64 size)
{
sector_t sector;
@@ -1267,71 +1024,20 @@ s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
&sector);
if (!file_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
&sector);
if (!strm_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
init_file_entry(file_ep, type);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
init_strm_entry(strm_ep, flags, start_clu, size);
- buf_modify(sb, sector);
-
- return FFS_SUCCESS;
-}
-
-static s32 fat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname)
-{
- int i;
- sector_t sector;
- u8 chksum;
- u16 *uniname = p_uniname->name;
- struct dos_dentry_t *dos_ep;
- struct ext_dentry_t *ext_ep;
-
- dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- &sector);
- if (!dos_ep)
- return FFS_MEDIAERR;
-
- dos_ep->lcase = p_dosname->name_case;
- memcpy(dos_ep->name, p_dosname->name, DOS_NAME_LENGTH);
- buf_modify(sb, sector);
-
- if ((--num_entries) > 0) {
- chksum = calc_checksum_1byte((void *)dos_ep->name,
- DOS_NAME_LENGTH, 0);
-
- for (i = 1; i < num_entries; i++) {
- ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb,
- p_dir,
- entry - i,
- &sector);
- if (!ext_ep)
- return FFS_MEDIAERR;
-
- init_ext_entry(ext_ep, i, chksum, uniname);
- buf_modify(sb, sector);
- uniname += 13;
- }
-
- ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
- entry - i,
- &sector);
- if (!ext_ep)
- return FFS_MEDIAERR;
+ exfat_buf_modify(sb, sector);
- init_ext_entry(ext_ep, i + 0x40, chksum, uniname);
- buf_modify(sb, sector);
- }
-
- return FFS_SUCCESS;
+ return 0;
}
static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
@@ -1349,160 +1055,39 @@ static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
&sector);
if (!file_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
file_ep->num_ext = (u8)(num_entries - 1);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
&sector);
if (!strm_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
strm_ep->name_len = p_uniname->name_len;
SET16_A(strm_ep->name_hash, p_uniname->name_hash);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
for (i = 2; i < num_entries; i++) {
name_ep = (struct name_dentry_t *)get_entry_in_dir(sb, p_dir,
entry + i,
&sector);
if (!name_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
init_name_entry(name_ep, uniname);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
uniname += 15;
}
update_dir_checksum(sb, p_dir, entry);
- return FFS_SUCCESS;
-}
-
-void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu)
-{
- struct timestamp_t tm, *tp;
-
- fat_set_entry_type((struct dentry_t *)ep, type);
- SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
- SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
- SET32_A(ep->size, 0);
-
- tp = tm_current(&tm);
- fat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
- fat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
- SET16_A(ep->access_date, 0);
- ep->create_time_ms = 0;
-}
-
-void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum, u16 *uniname)
-{
- int i;
- bool end = false;
-
- fat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
- ep->order = (u8)order;
- ep->sysid = 0;
- ep->checksum = chksum;
- SET16_A(ep->start_clu, 0);
-
- for (i = 0; i < 10; i += 2) {
- if (!end) {
- SET16(ep->unicode_0_4 + i, *uniname);
- if (*uniname == 0x0)
- end = true;
- else
- uniname++;
- } else {
- SET16(ep->unicode_0_4 + i, 0xFFFF);
- }
- }
-
- for (i = 0; i < 12; i += 2) {
- if (!end) {
- SET16_A(ep->unicode_5_10 + i, *uniname);
- if (*uniname == 0x0)
- end = true;
- else
- uniname++;
- } else {
- SET16_A(ep->unicode_5_10 + i, 0xFFFF);
- }
- }
-
- for (i = 0; i < 4; i += 2) {
- if (!end) {
- SET16_A(ep->unicode_11_12 + i, *uniname);
- if (*uniname == 0x0)
- end = true;
- else
- uniname++;
- } else {
- SET16_A(ep->unicode_11_12 + i, 0xFFFF);
- }
- }
-}
-
-void init_file_entry(struct file_dentry_t *ep, u32 type)
-{
- struct timestamp_t tm, *tp;
-
- exfat_set_entry_type((struct dentry_t *)ep, type);
-
- tp = tm_current(&tm);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS);
- ep->create_time_ms = 0;
- ep->modify_time_ms = 0;
- ep->access_time_ms = 0;
-}
-
-void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size)
-{
- exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM);
- ep->flags = flags;
- SET32_A(ep->start_clu, start_clu);
- SET64_A(ep->valid_size, size);
- SET64_A(ep->size, size);
-}
-
-void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
-{
- int i;
-
- exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
- ep->flags = 0x0;
-
- for (i = 0; i < 30; i++, i++) {
- SET16_A(ep->unicode_0_14 + i, *uniname);
- if (*uniname == 0x0)
- break;
- uniname++;
- }
-}
-
-void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries)
-{
- int i;
- sector_t sector;
- struct dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- for (i = num_entries - 1; i >= order; i--) {
- ep = get_entry_in_dir(sb, p_dir, entry - i, &sector);
- if (!ep)
- return;
-
- p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
- buf_modify(sb, sector);
- }
+ return 0;
}
-void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries)
+static void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries)
{
int i;
sector_t sector;
@@ -1515,7 +1100,7 @@ void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
return;
p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
}
}
@@ -1533,7 +1118,7 @@ void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
if (!file_ep)
return;
- buf_lock(sb, sector);
+ exfat_buf_lock(sb, sector);
num_entries = (s32)file_ep->num_ext + 1;
chksum = calc_checksum_2byte((void *)file_ep, DENTRY_SIZE, 0,
@@ -1542,7 +1127,7 @@ void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
for (i = 1; i < num_entries; i++) {
ep = get_entry_in_dir(sb, p_dir, entry + i, NULL);
if (!ep) {
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
return;
}
@@ -1551,8 +1136,75 @@ void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
}
SET16_A(file_ep->checksum, chksum);
- buf_modify(sb, sector);
- buf_unlock(sb, sector);
+ exfat_buf_modify(sb, sector);
+ exfat_buf_unlock(sb, sector);
+}
+
+static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es,
+ sector_t sec, s32 off, u32 count)
+{
+ s32 num_entries, buf_off = (off - es->offset);
+ u32 remaining_byte_in_sector, copy_entries;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ u32 clu;
+ u8 *buf, *esbuf = (u8 *)&es->__buf;
+
+ pr_debug("%s entered es %p sec %llu off %d count %d\n",
+ __func__, es, (unsigned long long)sec, off, count);
+ num_entries = count;
+
+ while (num_entries) {
+ /* white per sector base */
+ remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off;
+ copy_entries = min_t(s32,
+ remaining_byte_in_sector >> DENTRY_SIZE_BITS,
+ num_entries);
+ buf = exfat_buf_getblk(sb, sec);
+ if (!buf)
+ goto err_out;
+ pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off);
+ pr_debug("copying %d entries from %p to sector %llu\n",
+ copy_entries, (esbuf + buf_off),
+ (unsigned long long)sec);
+ memcpy(buf + off, esbuf + buf_off,
+ copy_entries << DENTRY_SIZE_BITS);
+ exfat_buf_modify(sb, sec);
+ num_entries -= copy_entries;
+
+ if (num_entries) {
+ /* get next sector */
+ if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
+ clu = GET_CLUSTER_FROM_SECTOR(sec);
+ if (es->alloc_flag == 0x03) {
+ clu++;
+ } else {
+ if (exfat_fat_read(sb, clu, &clu) == -1)
+ goto err_out;
+ }
+ sec = START_SECTOR(clu);
+ } else {
+ sec++;
+ }
+ off = 0;
+ buf_off += copy_entries << DENTRY_SIZE_BITS;
+ }
+ }
+
+ pr_debug("%s exited successfully\n", __func__);
+ return 0;
+err_out:
+ pr_debug("%s failed\n", __func__);
+ return -EINVAL;
+}
+
+/* write back all entries in entry set */
+static s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es)
+{
+ return __write_partial_entries_in_entry_set(sb, es, es->sector,
+ es->offset,
+ es->num_entries);
}
void update_dir_checksum_with_entry_set(struct super_block *sb,
@@ -1562,7 +1214,7 @@ void update_dir_checksum_with_entry_set(struct super_block *sb,
u16 chksum = 0;
s32 chksum_type = CS_DIR_ENTRY, i;
- ep = (struct dentry_t *)&(es->__buf);
+ ep = (struct dentry_t *)&es->__buf;
for (i = 0; i < es->num_entries; i++) {
pr_debug("%s ep %p\n", __func__, ep);
chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum,
@@ -1571,7 +1223,7 @@ void update_dir_checksum_with_entry_set(struct super_block *sb,
chksum_type = CS_DEFAULT;
}
- ep = (struct dentry_t *)&(es->__buf);
+ ep = (struct dentry_t *)&es->__buf;
SET16_A(((struct file_dentry_t *)ep)->checksum, chksum);
write_whole_entry_set(sb, es);
}
@@ -1590,18 +1242,18 @@ static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir,
cur_clu += clu_offset;
} else {
while (clu_offset > 0) {
- if (FAT_read(sb, cur_clu, &cur_clu) == -1)
- return FFS_MEDIAERR;
+ if (exfat_fat_read(sb, cur_clu, &cur_clu) == -1)
+ return -EIO;
clu_offset--;
}
}
if (clu)
*clu = cur_clu;
- return FFS_SUCCESS;
+ return 0;
}
-s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+static s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
sector_t *sector, s32 *offset)
{
s32 off, ret;
@@ -1617,7 +1269,7 @@ s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
*sector += p_fs->root_start_sector;
} else {
ret = _walk_fat_chain(sb, p_dir, off, &clu);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
/* byte offset in cluster */
@@ -1630,20 +1282,7 @@ s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
*sector = off >> p_bd->sector_size_bits;
*sector += START_SECTOR(clu);
}
- return FFS_SUCCESS;
-}
-
-struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
- s32 offset)
-{
- u8 *buf;
-
- buf = buf_getblk(sb, sector);
-
- if (!buf)
- return NULL;
-
- return (struct dentry_t *)(buf + offset);
+ return 0;
}
struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
@@ -1653,10 +1292,10 @@ struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
sector_t sec;
u8 *buf;
- if (find_location(sb, p_dir, entry, &sec, &off) != FFS_SUCCESS)
+ if (find_location(sb, p_dir, entry, &sec, &off) != 0)
return NULL;
- buf = buf_getblk(sb, sec);
+ buf = exfat_buf_getblk(sb, sec);
if (!buf)
return NULL;
@@ -1703,11 +1342,11 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
size_t bufsize;
pr_debug("%s entered p_dir dir %u flags %x size %d\n",
- __func__, p_dir->dir, p_dir->flags, p_dir->size);
+ __func__, p_dir->dir, p_dir->flags, p_dir->size);
byte_offset = entry << DENTRY_SIZE_BITS;
ret = _walk_fat_chain(sb, p_dir, byte_offset, &clu);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return NULL;
/* byte offset in cluster */
@@ -1720,15 +1359,14 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
sec = byte_offset >> p_bd->sector_size_bits;
sec += START_SECTOR(clu);
- buf = buf_getblk(sb, sec);
+ buf = exfat_buf_getblk(sb, sec);
if (!buf)
goto err_out;
ep = (struct dentry_t *)(buf + off);
entry_type = p_fs->fs_func->get_entry_type(ep);
- if ((entry_type != TYPE_FILE)
- && (entry_type != TYPE_DIR))
+ if ((entry_type != TYPE_FILE) && (entry_type != TYPE_DIR))
goto err_out;
if (type == ES_ALL_ENTRIES)
@@ -1812,14 +1450,14 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
if (es->alloc_flag == 0x03) {
clu++;
} else {
- if (FAT_read(sb, clu, &clu) == -1)
+ if (exfat_fat_read(sb, clu, &clu) == -1)
goto err_out;
}
sec = START_SECTOR(clu);
} else {
sec++;
}
- buf = buf_getblk(sb, sec);
+ buf = exfat_buf_getblk(sb, sec);
if (!buf)
goto err_out;
off = 0;
@@ -1832,11 +1470,11 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
}
if (file_ep)
- *file_ep = (struct dentry_t *)&(es->__buf);
+ *file_ep = (struct dentry_t *)&es->__buf;
pr_debug("%s exiting es %p sec %llu offset %d flags %d, num_entries %u buf ptr %p\n",
- __func__, es, (unsigned long long)es->sector, es->offset,
- es->alloc_flag, es->num_entries, &es->__buf);
+ __func__, es, (unsigned long long)es->sector, es->offset,
+ es->alloc_flag, es->num_entries, &es->__buf);
return es;
err_out:
pr_debug("%s exited NULL (es %p)\n", __func__, es);
@@ -1850,114 +1488,8 @@ void release_entry_set(struct entry_set_cache_t *es)
kfree(es);
}
-static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es,
- sector_t sec, s32 off, u32 count)
-{
- s32 num_entries, buf_off = (off - es->offset);
- u32 remaining_byte_in_sector, copy_entries;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- u32 clu;
- u8 *buf, *esbuf = (u8 *)&(es->__buf);
-
- pr_debug("%s entered es %p sec %llu off %d count %d\n",
- __func__, es, (unsigned long long)sec, off, count);
- num_entries = count;
-
- while (num_entries) {
- /* white per sector base */
- remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off;
- copy_entries = min_t(s32,
- remaining_byte_in_sector >> DENTRY_SIZE_BITS,
- num_entries);
- buf = buf_getblk(sb, sec);
- if (!buf)
- goto err_out;
- pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off);
- pr_debug("copying %d entries from %p to sector %llu\n",
- copy_entries, (esbuf + buf_off),
- (unsigned long long)sec);
- memcpy(buf + off, esbuf + buf_off,
- copy_entries << DENTRY_SIZE_BITS);
- buf_modify(sb, sec);
- num_entries -= copy_entries;
-
- if (num_entries) {
- /* get next sector */
- if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
- clu = GET_CLUSTER_FROM_SECTOR(sec);
- if (es->alloc_flag == 0x03) {
- clu++;
- } else {
- if (FAT_read(sb, clu, &clu) == -1)
- goto err_out;
- }
- sec = START_SECTOR(clu);
- } else {
- sec++;
- }
- off = 0;
- buf_off += copy_entries << DENTRY_SIZE_BITS;
- }
- }
-
- pr_debug("%s exited successfully\n", __func__);
- return FFS_SUCCESS;
-err_out:
- pr_debug("%s failed\n", __func__);
- return FFS_ERROR;
-}
-
-/* write back all entries in entry set */
-s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es)
-{
- return __write_partial_entries_in_entry_set(sb, es, es->sector,
- es->offset,
- es->num_entries);
-}
-
-/* write back some entries in entry set */
-s32 write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es, struct dentry_t *ep, u32 count)
-{
- s32 ret, byte_offset, off;
- u32 clu = 0;
- sector_t sec;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- struct chain_t dir;
-
- /* vaidity check */
- if (ep + count > ((struct dentry_t *)&(es->__buf)) + es->num_entries)
- return FFS_ERROR;
-
- dir.dir = GET_CLUSTER_FROM_SECTOR(es->sector);
- dir.flags = es->alloc_flag;
- dir.size = 0xffffffff; /* XXX */
-
- byte_offset = (es->sector - START_SECTOR(dir.dir)) <<
- p_bd->sector_size_bits;
- byte_offset += ((void **)ep - &(es->__buf)) + es->offset;
-
- ret = _walk_fat_chain(sb, &dir, byte_offset, &clu);
- if (ret != FFS_SUCCESS)
- return ret;
-
- /* byte offset in cluster */
- byte_offset &= p_fs->cluster_size - 1;
-
- /* byte offset in sector */
- off = byte_offset & p_bd->sector_size_mask;
-
- /* sector offset in cluster */
- sec = byte_offset >> p_bd->sector_size_bits;
- sec += START_SECTOR(clu);
- return __write_partial_entries_in_entry_set(sb, es, sec, off, count);
-}
-
/* search EMPTY CONTINUOUS "num_entries" entries */
-s32 search_deleted_or_unused_entry(struct super_block *sb,
+static s32 search_deleted_or_unused_entry(struct super_block *sb,
struct chain_t *p_dir, s32 num_entries)
{
int i, dentry, num_empty = 0;
@@ -2043,7 +1575,7 @@ s32 search_deleted_or_unused_entry(struct super_block *sb,
else
clu.dir = CLUSTER_32(~0);
} else {
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
return -1;
}
}
@@ -2051,7 +1583,7 @@ s32 search_deleted_or_unused_entry(struct super_block *sb,
return -1;
}
-s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries)
+static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries)
{
s32 ret, dentry;
u32 last_clu;
@@ -2070,10 +1602,8 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
if (p_fs->dev_ejected)
break;
- if (p_fs->vol_type == EXFAT) {
- if (p_dir->dir != p_fs->root_dir)
- size = i_size_read(inode);
- }
+ if (p_dir->dir != p_fs->root_dir)
+ size = i_size_read(inode);
last_clu = find_last_cluster(sb, p_dir);
clu.dir = last_clu + 1;
@@ -2083,10 +1613,10 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
/* (1) allocate a cluster */
ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu);
if (ret < 1)
- return -1;
+ return -EIO;
- if (clear_cluster(sb, clu.dir) != FFS_SUCCESS)
- return -1;
+ if (clear_cluster(sb, clu.dir) != 0)
+ return -EIO;
/* (2) append to the FAT chain */
if (clu.flags != p_dir->flags) {
@@ -2095,8 +1625,8 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
p_fs->hint_uentry.clu.flags = 0x01;
}
if (clu.flags == 0x01)
- if (FAT_write(sb, last_clu, clu.dir) < 0)
- return -1;
+ if (exfat_fat_write(sb, last_clu, clu.dir) < 0)
+ return -EIO;
if (p_fs->hint_uentry.entry == -1) {
p_fs->hint_uentry.dir = p_dir->dir;
@@ -2110,21 +1640,19 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
p_dir->size++;
/* (3) update the directory entry */
- if (p_fs->vol_type == EXFAT) {
- if (p_dir->dir != p_fs->root_dir) {
- size += p_fs->cluster_size;
-
- ep = get_entry_in_dir(sb, &fid->dir,
- fid->entry + 1, &sector);
- if (!ep)
- return -1;
- p_fs->fs_func->set_entry_size(ep, size);
- p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
- buf_modify(sb, sector);
-
- update_dir_checksum(sb, &(fid->dir),
- fid->entry);
- }
+ if (p_dir->dir != p_fs->root_dir) {
+ size += p_fs->cluster_size;
+
+ ep = get_entry_in_dir(sb, &fid->dir,
+ fid->entry + 1, &sector);
+ if (!ep)
+ return -ENOENT;
+ p_fs->fs_func->set_entry_size(ep, size);
+ p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
+ exfat_buf_modify(sb, sector);
+
+ update_dir_checksum(sb, &fid->dir,
+ fid->entry);
}
i_size_write(inode, i_size_read(inode) + p_fs->cluster_size);
@@ -2137,102 +1665,21 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
return dentry;
}
-/* return values of fat_find_dir_entry()
- * >= 0 : return dir entiry position with the name in dir
- * -1 : (root dir, ".") it is the root dir itself
- * -2 : entry with the name does not exist
- */
-s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type)
+static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
+ s32 order)
{
- int i, dentry = 0, len;
- s32 order = 0;
- bool is_feasible_entry = true, has_ext_entry = false;
- s32 dentries_per_clu;
- u32 entry_type;
- u16 entry_uniname[14], *uniname = NULL, unichar;
- struct chain_t clu;
- struct dentry_t *ep;
- struct dos_dentry_t *dos_ep;
- struct ext_dentry_t *ext_ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (p_dir->dir == p_fs->root_dir) {
- if ((!nls_uniname_cmp(sb, p_uniname->name,
- (u16 *)UNI_CUR_DIR_NAME)) ||
- (!nls_uniname_cmp(sb, p_uniname->name,
- (u16 *)UNI_PAR_DIR_NAME)))
- return -1; // special case, root directory itself
- }
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- clu.dir = p_dir->dir;
- clu.flags = p_dir->flags;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- for (i = 0; i < dentries_per_clu; i++, dentry++) {
- ep = get_entry_in_dir(sb, &clu, i, NULL);
- if (!ep)
- return -2;
-
- entry_type = p_fs->fs_func->get_entry_type(ep);
-
- if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
- if ((type == TYPE_ALL) || (type == entry_type)) {
- if (is_feasible_entry && has_ext_entry)
- return dentry;
-
- dos_ep = (struct dos_dentry_t *)ep;
- if (!nls_dosname_cmp(sb, p_dosname->name, dos_ep->name))
- return dentry;
- }
- is_feasible_entry = true;
- has_ext_entry = false;
- } else if (entry_type == TYPE_EXTEND) {
- if (is_feasible_entry) {
- ext_ep = (struct ext_dentry_t *)ep;
- if (ext_ep->order > 0x40) {
- order = (s32)(ext_ep->order - 0x40);
- uniname = p_uniname->name + 13 * (order - 1);
- } else {
- order = (s32)ext_ep->order;
- uniname -= 13;
- }
-
- len = extract_uni_name_from_ext_entry(ext_ep, entry_uniname, order);
-
- unichar = *(uniname + len);
- *(uniname + len) = 0x0;
-
- if (nls_uniname_cmp(sb, uniname, entry_uniname))
- is_feasible_entry = false;
-
- *(uniname + len) = unichar;
- }
- has_ext_entry = true;
- } else if (entry_type == TYPE_UNUSED) {
- return -2;
- }
- is_feasible_entry = true;
- has_ext_entry = false;
- }
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
+ int i, len = 0;
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return -2;
+ for (i = 0; i < 30; i += 2) {
+ *uniname = GET16_A(ep->unicode_0_14 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
}
- return -2;
+ *uniname = 0x0;
+ return len;
}
/* return values of exfat_find_dir_entry()
@@ -2240,7 +1687,7 @@ s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
* -1 : (root dir, ".") it is the root dir itself
* -2 : entry with the name does not exist
*/
-s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 num_entries,
struct dos_name_t *p_dosname, u32 type)
{
@@ -2375,7 +1822,7 @@ s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
else
clu.dir = CLUSTER_32(~0);
} else {
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
return -2;
}
}
@@ -2383,37 +1830,7 @@ s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
return -2;
}
-s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry)
-{
- s32 count = 0;
- u8 chksum;
- struct dos_dentry_t *dos_ep = (struct dos_dentry_t *)p_entry;
- struct ext_dentry_t *ext_ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- chksum = calc_checksum_1byte((void *)dos_ep->name, DOS_NAME_LENGTH, 0);
-
- for (entry--; entry >= 0; entry--) {
- ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
- entry, NULL);
- if (!ext_ep)
- return -1;
-
- if ((p_fs->fs_func->get_entry_type((struct dentry_t *)ext_ep) ==
- TYPE_EXTEND) && (ext_ep->checksum == chksum)) {
- count++;
- if (ext_ep->order > 0x40)
- return count;
- } else {
- return count;
- }
- }
-
- return count;
-}
-
-s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+static s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
s32 entry, struct dentry_t *p_entry)
{
int i, count = 0;
@@ -2463,7 +1880,7 @@ s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
for (i = 0; i < dentries_per_clu; i++) {
ep = get_entry_in_dir(sb, &clu, i, NULL);
if (!ep)
- return -1;
+ return -ENOENT;
entry_type = p_fs->fs_func->get_entry_type(ep);
@@ -2486,8 +1903,8 @@ s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
else
clu.dir = CLUSTER_32(~0);
} else {
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return -1;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
+ return -EIO;
}
}
@@ -2546,7 +1963,7 @@ bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir)
else
clu.dir = CLUSTER_32(~0);
}
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
break;
}
@@ -2564,84 +1981,19 @@ s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 *entries,
struct dos_name_t *p_dosname)
{
- s32 ret, num_entries;
- bool lossy = false;
- char **r;
+ s32 num_entries;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
num_entries = p_fs->fs_func->calc_num_entries(p_uniname);
if (num_entries == 0)
- return FFS_INVALIDPATH;
-
- if (p_fs->vol_type != EXFAT) {
- nls_uniname_to_dosname(sb, p_dosname, p_uniname, &lossy);
-
- if (lossy) {
- ret = fat_generate_dos_name(sb, p_dir, p_dosname);
- if (ret)
- return ret;
- } else {
- for (r = reserved_names; *r; r++) {
- if (!strncmp((void *)p_dosname->name, *r, 8))
- return FFS_INVALIDPATH;
- }
-
- if (p_dosname->name_case != 0xFF)
- num_entries = 1;
- }
-
- if (num_entries > 1)
- p_dosname->name_case = 0x0;
- }
+ return -EINVAL;
*entries = num_entries;
- return FFS_SUCCESS;
-}
-
-void get_uni_name_from_dos_entry(struct super_block *sb,
- struct dos_dentry_t *ep,
- struct uni_name_t *p_uniname, u8 mode)
-{
- struct dos_name_t dos_name;
-
- if (mode == 0x0)
- dos_name.name_case = 0x0;
- else
- dos_name.name_case = ep->lcase;
-
- memcpy(dos_name.name, ep->name, DOS_NAME_LENGTH);
- nls_dosname_to_uniname(sb, p_uniname, &dos_name);
-}
-
-void fat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname)
-{
- int i;
- struct ext_dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- for (entry--, i = 1; entry >= 0; entry--, i++) {
- ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- NULL);
- if (!ep)
- return;
-
- if (p_fs->fs_func->get_entry_type((struct dentry_t *)ep) ==
- TYPE_EXTEND) {
- extract_uni_name_from_ext_entry(ep, uniname, i);
- if (ep->order > 0x40)
- return;
- } else {
- return;
- }
-
- uniname += 13;
- }
+ return 0;
}
-void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
struct chain_t *p_dir, s32 entry,
u16 *uniname)
{
@@ -2678,203 +2030,7 @@ out:
release_entry_set(es);
}
-s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep, u16 *uniname,
- s32 order)
-{
- int i, len = 0;
-
- for (i = 0; i < 10; i += 2) {
- *uniname = GET16(ep->unicode_0_4 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- if (order < 20) {
- for (i = 0; i < 12; i += 2) {
- *uniname = GET16_A(ep->unicode_5_10 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
- } else {
- for (i = 0; i < 8; i += 2) {
- *uniname = GET16_A(ep->unicode_5_10 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
- *uniname = 0x0; /* uniname[MAX_NAME_LENGTH-1] */
- return len;
- }
-
- for (i = 0; i < 4; i += 2) {
- *uniname = GET16_A(ep->unicode_11_12 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- *uniname = 0x0;
- return len;
-}
-
-s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
- s32 order)
-{
- int i, len = 0;
-
- for (i = 0; i < 30; i += 2) {
- *uniname = GET16_A(ep->unicode_0_14 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- *uniname = 0x0;
- return len;
-}
-
-s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
- struct dos_name_t *p_dosname)
-{
- int i, j, count = 0;
- bool count_begin = false;
- s32 dentries_per_clu;
- u32 type;
- u8 bmap[128/* 1 ~ 1023 */];
- struct chain_t clu;
- struct dos_dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- memset(bmap, 0, sizeof(bmap));
- exfat_bitmap_set(bmap, 0);
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- clu.dir = p_dir->dir;
- clu.flags = p_dir->flags;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- for (i = 0; i < dentries_per_clu; i++) {
- ep = (struct dos_dentry_t *)get_entry_in_dir(sb, &clu,
- i, NULL);
- if (!ep)
- return FFS_MEDIAERR;
-
- type = p_fs->fs_func->get_entry_type((struct dentry_t *)
- ep);
-
- if (type == TYPE_UNUSED)
- break;
- if ((type != TYPE_FILE) && (type != TYPE_DIR))
- continue;
-
- count = 0;
- count_begin = false;
-
- for (j = 0; j < 8; j++) {
- if (ep->name[j] == ' ')
- break;
-
- if (ep->name[j] == '~') {
- count_begin = true;
- } else if (count_begin) {
- if ((ep->name[j] >= '0') &&
- (ep->name[j] <= '9')) {
- count = count * 10 +
- (ep->name[j] - '0');
- } else {
- count = 0;
- count_begin = false;
- }
- }
- }
-
- if ((count > 0) && (count < 1024))
- exfat_bitmap_set(bmap, count);
- }
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
-
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return FFS_MEDIAERR;
- }
-
- count = 0;
- for (i = 0; i < 128; i++) {
- if (bmap[i] != 0xFF) {
- for (j = 0; j < 8; j++) {
- if (exfat_bitmap_test(&bmap[i], j) == 0) {
- count = (i << 3) + j;
- break;
- }
- }
- if (count != 0)
- break;
- }
- }
-
- if ((count == 0) || (count >= 1024))
- return FFS_FILEEXIST;
- fat_attach_count_to_dos_name(p_dosname->name, count);
-
- /* Now dos_name has DOS~????.EXT */
- return FFS_SUCCESS;
-}
-
-void fat_attach_count_to_dos_name(u8 *dosname, s32 count)
-{
- int i, j, length;
- char str_count[6];
-
- snprintf(str_count, sizeof(str_count), "~%d", count);
- length = strlen(str_count);
-
- i = 0;
- j = 0;
- while (j <= (8 - length)) {
- i = j;
- if (dosname[j] == ' ')
- break;
- if (dosname[j] & 0x80)
- j += 2;
- else
- j++;
- }
-
- for (j = 0; j < length; i++, j++)
- dosname[i] = (u8)str_count[j];
-
- if (i == 7)
- dosname[7] = ' ';
-}
-
-s32 fat_calc_num_entries(struct uni_name_t *p_uniname)
-{
- s32 len;
-
- len = p_uniname->name_len;
- if (len == 0)
- return 0;
-
- /* 1 dos name entry + extended entries */
- return (len - 1) / 13 + 2;
-}
-
-s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
+static s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
{
s32 len;
@@ -2886,17 +2042,6 @@ s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
return (len - 1) / 15 + 3;
}
-u8 calc_checksum_1byte(void *data, s32 len, u8 chksum)
-{
- int i;
- u8 *c = (u8 *)data;
-
- for (i = 0; i < len; i++, c++)
- chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c;
-
- return chksum;
-}
-
u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type)
{
int i;
@@ -2921,30 +2066,6 @@ u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type)
return chksum;
}
-u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type)
-{
- int i;
- u8 *c = (u8 *)data;
-
- switch (type) {
- case CS_PBR_SECTOR:
- for (i = 0; i < len; i++, c++) {
- if ((i == 106) || (i == 107) || (i == 112))
- continue;
- chksum = (((chksum & 1) << 31) |
- ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
- }
- break;
- default
- :
- for (i = 0; i < len; i++, c++)
- chksum = (((chksum & 1) << 31) |
- ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
- }
-
- return chksum;
-}
-
/*
* Name Resolution Functions
*/
@@ -2962,11 +2083,11 @@ s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
struct file_id_t *fid = &(EXFAT_I(inode)->fid);
if (strscpy(name_buf, path, sizeof(name_buf)) < 0)
- return FFS_INVALIDPATH;
+ return -EINVAL;
nls_cstring_to_uniname(sb, p_uniname, name_buf, &lossy);
if (lossy)
- return FFS_INVALIDPATH;
+ return -EINVAL;
fid->size = i_size_read(inode);
@@ -2974,154 +2095,12 @@ s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
p_dir->size = (s32)(fid->size >> p_fs->cluster_size_bits);
p_dir->flags = fid->flags;
- return FFS_SUCCESS;
+ return 0;
}
/*
* File Operation Functions
*/
-static struct fs_func fat_fs_func = {
- .alloc_cluster = fat_alloc_cluster,
- .free_cluster = fat_free_cluster,
- .count_used_clusters = fat_count_used_clusters,
-
- .init_dir_entry = fat_init_dir_entry,
- .init_ext_entry = fat_init_ext_entry,
- .find_dir_entry = fat_find_dir_entry,
- .delete_dir_entry = fat_delete_dir_entry,
- .get_uni_name_from_ext_entry = fat_get_uni_name_from_ext_entry,
- .count_ext_entries = fat_count_ext_entries,
- .calc_num_entries = fat_calc_num_entries,
-
- .get_entry_type = fat_get_entry_type,
- .set_entry_type = fat_set_entry_type,
- .get_entry_attr = fat_get_entry_attr,
- .set_entry_attr = fat_set_entry_attr,
- .get_entry_flag = fat_get_entry_flag,
- .set_entry_flag = fat_set_entry_flag,
- .get_entry_clu0 = fat_get_entry_clu0,
- .set_entry_clu0 = fat_set_entry_clu0,
- .get_entry_size = fat_get_entry_size,
- .set_entry_size = fat_set_entry_size,
- .get_entry_time = fat_get_entry_time,
- .set_entry_time = fat_set_entry_time,
-};
-
-s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
-{
- s32 num_reserved, num_root_sectors;
- struct bpb16_t *p_bpb = (struct bpb16_t *)p_pbr->bpb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- if (p_bpb->num_fats == 0)
- return FFS_FORMATERR;
-
- num_root_sectors = GET16(p_bpb->num_root_entries) << DENTRY_SIZE_BITS;
- num_root_sectors = ((num_root_sectors - 1) >>
- p_bd->sector_size_bits) + 1;
-
- p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
- p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
- p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
- p_bd->sector_size_bits;
- p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
-
- p_fs->num_FAT_sectors = GET16(p_bpb->num_fat_sectors);
-
- p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
- if (p_bpb->num_fats == 1)
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
- else
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
- p_fs->num_FAT_sectors;
-
- p_fs->root_start_sector = p_fs->FAT2_start_sector +
- p_fs->num_FAT_sectors;
- p_fs->data_start_sector = p_fs->root_start_sector + num_root_sectors;
-
- p_fs->num_sectors = GET16(p_bpb->num_sectors);
- if (p_fs->num_sectors == 0)
- p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
-
- num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
- p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
- p_fs->sectors_per_clu_bits) + 2;
- /* because the cluster index starts with 2 */
-
- if (p_fs->num_clusters < FAT12_THRESHOLD)
- p_fs->vol_type = FAT12;
- else
- p_fs->vol_type = FAT16;
- p_fs->vol_id = GET32(p_bpb->vol_serial);
-
- p_fs->root_dir = 0;
- p_fs->dentries_in_root = GET16(p_bpb->num_root_entries);
- p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
- DENTRY_SIZE_BITS);
-
- p_fs->vol_flag = VOL_CLEAN;
- p_fs->clu_srch_ptr = 2;
- p_fs->used_clusters = UINT_MAX;
-
- p_fs->fs_func = &fat_fs_func;
-
- return FFS_SUCCESS;
-}
-
-s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
-{
- s32 num_reserved;
- struct bpb32_t *p_bpb = (struct bpb32_t *)p_pbr->bpb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- if (p_bpb->num_fats == 0)
- return FFS_FORMATERR;
-
- p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
- p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
- p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
- p_bd->sector_size_bits;
- p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
-
- p_fs->num_FAT_sectors = GET32(p_bpb->num_fat32_sectors);
-
- p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
- if (p_bpb->num_fats == 1)
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
- else
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
- p_fs->num_FAT_sectors;
-
- p_fs->root_start_sector = p_fs->FAT2_start_sector +
- p_fs->num_FAT_sectors;
- p_fs->data_start_sector = p_fs->root_start_sector;
-
- p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
- num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
-
- p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
- p_fs->sectors_per_clu_bits) + 2;
- /* because the cluster index starts with 2 */
-
- p_fs->vol_type = FAT32;
- p_fs->vol_id = GET32(p_bpb->vol_serial);
-
- p_fs->root_dir = GET32(p_bpb->root_cluster);
- p_fs->dentries_in_root = 0;
- p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
- DENTRY_SIZE_BITS);
-
- p_fs->vol_flag = VOL_CLEAN;
- p_fs->clu_srch_ptr = 2;
- p_fs->used_clusters = UINT_MAX;
-
- p_fs->fs_func = &fat_fs_func;
-
- return FFS_SUCCESS;
-}
-
static struct fs_func exfat_fs_func = {
.alloc_cluster = exfat_alloc_cluster,
.free_cluster = exfat_free_cluster,
@@ -3156,7 +2135,7 @@ s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
if (p_bpb->num_fats == 0)
- return FFS_FORMATERR;
+ return -EFSCORRUPTED;
p_fs->sectors_per_clu = 1 << p_bpb->sectors_per_clu_bits;
p_fs->sectors_per_clu_bits = p_bpb->sectors_per_clu_bits;
@@ -3194,7 +2173,7 @@ s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
p_fs->fs_func = &exfat_fs_func;
- return FFS_SUCCESS;
+ return 0;
}
s32 create_dir(struct inode *inode, struct chain_t *p_dir,
@@ -3203,7 +2182,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
s32 ret, dentry, num_entries;
u64 size;
struct chain_t clu;
- struct dos_name_t dos_name, dot_name;
+ struct dos_name_t dos_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct fs_func *fs_func = p_fs->fs_func;
@@ -3216,7 +2195,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
/* find_empty_entry must be called before alloc_cluster */
dentry = find_empty_entry(inode, p_dir, num_entries);
if (dentry < 0)
- return FFS_FULL;
+ return -ENOSPC;
clu.dir = CLUSTER_32(~0);
clu.size = 0;
@@ -3225,64 +2204,26 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
/* (1) allocate a cluster */
ret = fs_func->alloc_cluster(sb, 1, &clu);
if (ret < 0)
- return FFS_MEDIAERR;
+ return ret;
else if (ret == 0)
- return FFS_FULL;
+ return -ENOSPC;
ret = clear_cluster(sb, clu.dir);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
- if (p_fs->vol_type == EXFAT) {
- size = p_fs->cluster_size;
- } else {
- size = 0;
-
- /* initialize the . and .. entry
- * Information for . points to itself
- * Information for .. points to parent dir
- */
-
- dot_name.name_case = 0x0;
- memcpy(dot_name.name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH);
-
- ret = fs_func->init_dir_entry(sb, &clu, 0, TYPE_DIR, clu.dir,
- 0);
- if (ret != FFS_SUCCESS)
- return ret;
-
- ret = fs_func->init_ext_entry(sb, &clu, 0, 1, NULL, &dot_name);
- if (ret != FFS_SUCCESS)
- return ret;
-
- memcpy(dot_name.name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH);
-
- if (p_dir->dir == p_fs->root_dir)
- ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
- CLUSTER_32(0), 0);
- else
- ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
- p_dir->dir, 0);
-
- if (ret != FFS_SUCCESS)
- return ret;
-
- ret = p_fs->fs_func->init_ext_entry(sb, &clu, 1, 1, NULL,
- &dot_name);
- if (ret != FFS_SUCCESS)
- return ret;
- }
+ size = p_fs->cluster_size;
/* (2) update the directory entry */
/* make sub-dir entry in parent directory */
ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
size);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fid->dir.dir = p_dir->dir;
@@ -3299,7 +2240,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
fid->rwoffset = 0;
fid->hint_last_off = -1;
- return FFS_SUCCESS;
+ return 0;
}
s32 create_file(struct inode *inode, struct chain_t *p_dir,
@@ -3319,7 +2260,7 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
/* find_empty_entry must be called before alloc_cluster() */
dentry = find_empty_entry(inode, p_dir, num_entries);
if (dentry < 0)
- return FFS_FULL;
+ return -ENOSPC;
/* (1) update the directory entry */
/* fill the dos name directory entry information of the created file.
@@ -3327,12 +2268,12 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
*/
ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
CLUSTER_32(0), 0);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fid->dir.dir = p_dir->dir;
@@ -3349,7 +2290,7 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
fid->rwoffset = 0;
fid->hint_last_off = -1;
- return FFS_SUCCESS;
+ return 0;
}
void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
@@ -3365,17 +2306,17 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
if (!ep)
return;
- buf_lock(sb, sector);
+ exfat_buf_lock(sb, sector);
- /* buf_lock() before call count_ext_entries() */
+ /* exfat_buf_lock() before call count_ext_entries() */
num_entries = fs_func->count_ext_entries(sb, p_dir, entry, ep);
if (num_entries < 0) {
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
return;
}
num_entries++;
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
/* (1) update the directory entry */
fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
@@ -3394,37 +2335,37 @@ s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
epold = get_entry_in_dir(sb, p_dir, oldentry, &sector_old);
if (!epold)
- return FFS_MEDIAERR;
+ return -ENOENT;
- buf_lock(sb, sector_old);
+ exfat_buf_lock(sb, sector_old);
- /* buf_lock() before call count_ext_entries() */
+ /* exfat_buf_lock() before call count_ext_entries() */
num_old_entries = fs_func->count_ext_entries(sb, p_dir, oldentry,
epold);
if (num_old_entries < 0) {
- buf_unlock(sb, sector_old);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOENT;
}
num_old_entries++;
ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname,
&num_new_entries, &dos_name);
if (ret) {
- buf_unlock(sb, sector_old);
+ exfat_buf_unlock(sb, sector_old);
return ret;
}
if (num_old_entries < num_new_entries) {
newentry = find_empty_entry(inode, p_dir, num_new_entries);
if (newentry < 0) {
- buf_unlock(sb, sector_old);
- return FFS_FULL;
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOSPC;
}
epnew = get_entry_in_dir(sb, p_dir, newentry, &sector_new);
if (!epnew) {
- buf_unlock(sb, sector_old);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOENT;
}
memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
@@ -3434,30 +2375,28 @@ s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_old);
-
- if (p_fs->vol_type == EXFAT) {
- epold = get_entry_in_dir(sb, p_dir, oldentry + 1,
- &sector_old);
- buf_lock(sb, sector_old);
- epnew = get_entry_in_dir(sb, p_dir, newentry + 1,
- &sector_new);
-
- if (!epold || !epnew) {
- buf_unlock(sb, sector_old);
- return FFS_MEDIAERR;
- }
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_old);
- memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_old);
+ epold = get_entry_in_dir(sb, p_dir, oldentry + 1,
+ &sector_old);
+ exfat_buf_lock(sb, sector_old);
+ epnew = get_entry_in_dir(sb, p_dir, newentry + 1,
+ &sector_new);
+
+ if (!epold || !epnew) {
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOENT;
}
+ memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_old);
+
ret = fs_func->init_ext_entry(sb, p_dir, newentry,
num_new_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fs_func->delete_dir_entry(sb, p_dir, oldentry, 0,
@@ -3470,20 +2409,20 @@ s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
- buf_modify(sb, sector_old);
- buf_unlock(sb, sector_old);
+ exfat_buf_modify(sb, sector_old);
+ exfat_buf_unlock(sb, sector_old);
ret = fs_func->init_ext_entry(sb, p_dir, oldentry,
num_new_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
num_old_entries);
}
- return FFS_SUCCESS;
+ return 0;
}
s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
@@ -3492,7 +2431,6 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
{
s32 ret, newentry, num_new_entries, num_old_entries;
sector_t sector_mov, sector_new;
- struct chain_t clu;
struct dos_name_t dos_name;
struct dentry_t *epmov, *epnew;
struct super_block *sb = inode->i_sb;
@@ -3501,41 +2439,41 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
epmov = get_entry_in_dir(sb, p_olddir, oldentry, &sector_mov);
if (!epmov)
- return FFS_MEDIAERR;
+ return -ENOENT;
/* check if the source and target directory is the same */
if (fs_func->get_entry_type(epmov) == TYPE_DIR &&
fs_func->get_entry_clu0(epmov) == p_newdir->dir)
- return FFS_INVALIDPATH;
+ return -EINVAL;
- buf_lock(sb, sector_mov);
+ exfat_buf_lock(sb, sector_mov);
- /* buf_lock() before call count_ext_entries() */
+ /* exfat_buf_lock() before call count_ext_entries() */
num_old_entries = fs_func->count_ext_entries(sb, p_olddir, oldentry,
epmov);
if (num_old_entries < 0) {
- buf_unlock(sb, sector_mov);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOENT;
}
num_old_entries++;
ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname,
&num_new_entries, &dos_name);
if (ret) {
- buf_unlock(sb, sector_mov);
+ exfat_buf_unlock(sb, sector_mov);
return ret;
}
newentry = find_empty_entry(inode, p_newdir, num_new_entries);
if (newentry < 0) {
- buf_unlock(sb, sector_mov);
- return FFS_FULL;
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOSPC;
}
epnew = get_entry_in_dir(sb, p_newdir, newentry, &sector_new);
if (!epnew) {
- buf_unlock(sb, sector_mov);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOENT;
}
memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
@@ -3544,42 +2482,26 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_mov);
-
- if (p_fs->vol_type == EXFAT) {
- epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1,
- &sector_mov);
- buf_lock(sb, sector_mov);
- epnew = get_entry_in_dir(sb, p_newdir, newentry + 1,
- &sector_new);
- if (!epmov || !epnew) {
- buf_unlock(sb, sector_mov);
- return FFS_MEDIAERR;
- }
-
- memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_mov);
- } else if (fs_func->get_entry_type(epnew) == TYPE_DIR) {
- /* change ".." pointer to new parent dir */
- clu.dir = fs_func->get_entry_clu0(epnew);
- clu.flags = 0x01;
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_mov);
- epnew = get_entry_in_dir(sb, &clu, 1, &sector_new);
- if (!epnew)
- return FFS_MEDIAERR;
-
- if (p_newdir->dir == p_fs->root_dir)
- fs_func->set_entry_clu0(epnew, CLUSTER_32(0));
- else
- fs_func->set_entry_clu0(epnew, p_newdir->dir);
- buf_modify(sb, sector_new);
+ epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1,
+ &sector_mov);
+ exfat_buf_lock(sb, sector_mov);
+ epnew = get_entry_in_dir(sb, p_newdir, newentry + 1,
+ &sector_new);
+ if (!epmov || !epnew) {
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOENT;
}
+ memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_mov);
+
ret = fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries,
p_uniname, &dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
@@ -3590,7 +2512,7 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
fid->entry = newentry;
- return FFS_SUCCESS;
+ return 0;
}
/*
@@ -3600,7 +2522,7 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
bool read)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if ((sec >= (p_fs->PBR_sector + p_fs->num_sectors)) &&
@@ -3612,8 +2534,8 @@ int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
}
if (!p_fs->dev_ejected) {
- ret = bdev_read(sb, sec, bh, 1, read);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_read(sb, sec, bh, 1, read);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
@@ -3623,7 +2545,7 @@ int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
bool sync)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if (sec >= (p_fs->PBR_sector + p_fs->num_sectors) &&
@@ -3641,8 +2563,8 @@ int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
}
if (!p_fs->dev_ejected) {
- ret = bdev_write(sb, sec, bh, 1, sync);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_write(sb, sec, bh, 1, sync);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
@@ -3652,7 +2574,7 @@ int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
int multi_sector_read(struct super_block *sb, sector_t sec,
struct buffer_head **bh, s32 num_secs, bool read)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if (((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors)) &&
@@ -3664,8 +2586,8 @@ int multi_sector_read(struct super_block *sb, sector_t sec,
}
if (!p_fs->dev_ejected) {
- ret = bdev_read(sb, sec, bh, num_secs, read);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_read(sb, sec, bh, num_secs, read);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
@@ -3675,7 +2597,7 @@ int multi_sector_read(struct super_block *sb, sector_t sec,
int multi_sector_write(struct super_block *sb, sector_t sec,
struct buffer_head *bh, s32 num_secs, bool sync)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if ((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors) &&
@@ -3692,8 +2614,8 @@ int multi_sector_write(struct super_block *sb, sector_t sec,
}
if (!p_fs->dev_ejected) {
- ret = bdev_write(sb, sec, bh, num_secs, sync);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_write(sb, sec, bh, num_secs, sync);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c
index a5c4b68925fb..91e8b0c4dce7 100644
--- a/drivers/staging/exfat/exfat_nls.c
+++ b/drivers/staging/exfat/exfat_nls.c
@@ -7,13 +7,6 @@
#include <linux/nls.h>
#include "exfat.h"
-static u16 bad_dos_chars[] = {
- /* + , ; = [ ] */
- 0x002B, 0x002C, 0x003B, 0x003D, 0x005B, 0x005D,
- 0xFF0B, 0xFF0C, 0xFF1B, 0xFF1D, 0xFF3B, 0xFF3D,
- 0
-};
-
static u16 bad_uni_chars[] = {
/* " * / : < > ? \ | */
0x0022, 0x002A, 0x002F, 0x003A,
@@ -96,11 +89,6 @@ static u16 *nls_wstrchr(u16 *str, u16 wchar)
return NULL;
}
-int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b)
-{
- return strncmp(a, b, DOS_NAME_LENGTH);
-}
-
int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b)
{
int i;
@@ -114,186 +102,6 @@ int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b)
return 0;
}
-void nls_uniname_to_dosname(struct super_block *sb,
- struct dos_name_t *p_dosname,
- struct uni_name_t *p_uniname, bool *p_lossy)
-{
- int i, j, len;
- bool lossy = false;
- u8 buf[MAX_CHARSET_SIZE];
- u8 lower = 0, upper = 0;
- u8 *dosname = p_dosname->name;
- u16 *uniname = p_uniname->name;
- u16 *p, *last_period;
- struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
-
- for (i = 0; i < DOS_NAME_LENGTH; i++)
- *(dosname + i) = ' ';
-
- if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_CUR_DIR_NAME)) {
- *(dosname) = '.';
- p_dosname->name_case = 0x0;
- if (p_lossy)
- *p_lossy = false;
- return;
- }
-
- if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_PAR_DIR_NAME)) {
- *(dosname) = '.';
- *(dosname + 1) = '.';
- p_dosname->name_case = 0x0;
- if (p_lossy)
- *p_lossy = false;
- return;
- }
-
- /* search for the last embedded period */
- last_period = NULL;
- for (p = uniname; *p; p++) {
- if (*p == (u16)'.')
- last_period = p;
- }
-
- i = 0;
- while (i < DOS_NAME_LENGTH) {
- if (i == 8) {
- if (!last_period)
- break;
-
- if (uniname <= last_period) {
- if (uniname < last_period)
- lossy = true;
- uniname = last_period + 1;
- }
- }
-
- if (*uniname == (u16)'\0') {
- break;
- } else if (*uniname == (u16)' ') {
- lossy = true;
- } else if (*uniname == (u16)'.') {
- if (uniname < last_period)
- lossy = true;
- else
- i = 8;
- } else if (nls_wstrchr(bad_dos_chars, *uniname)) {
- lossy = true;
- *(dosname + i) = '_';
- i++;
- } else {
- len = convert_uni_to_ch(nls, buf, *uniname, &lossy);
-
- if (len > 1) {
- if ((i >= 8) && ((i + len) > DOS_NAME_LENGTH))
- break;
-
- if ((i < 8) && ((i + len) > 8)) {
- i = 8;
- continue;
- }
-
- lower = 0xFF;
-
- for (j = 0; j < len; j++, i++)
- *(dosname + i) = *(buf + j);
- } else { /* len == 1 */
- if ((*buf >= 'a') && (*buf <= 'z')) {
- *(dosname + i) = *buf - ('a' - 'A');
-
- if (i < 8)
- lower |= 0x08;
- else
- lower |= 0x10;
- } else if ((*buf >= 'A') && (*buf <= 'Z')) {
- *(dosname + i) = *buf;
-
- if (i < 8)
- upper |= 0x08;
- else
- upper |= 0x10;
- } else {
- *(dosname + i) = *buf;
- }
- i++;
- }
- }
-
- uniname++;
- }
-
- if (*dosname == 0xE5)
- *dosname = 0x05;
-
- if (*uniname != 0x0)
- lossy = true;
-
- if (upper & lower)
- p_dosname->name_case = 0xFF;
- else
- p_dosname->name_case = lower;
-
- if (p_lossy)
- *p_lossy = lossy;
-}
-
-void nls_dosname_to_uniname(struct super_block *sb,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname)
-{
- int i = 0, j, n = 0;
- u8 buf[DOS_NAME_LENGTH + 2];
- u8 *dosname = p_dosname->name;
- u16 *uniname = p_uniname->name;
- struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
-
- if (*dosname == 0x05) {
- *buf = 0xE5;
- i++;
- n++;
- }
-
- for (; i < 8; i++, n++) {
- if (*(dosname + i) == ' ')
- break;
-
- if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
- (p_dosname->name_case & 0x08))
- *(buf + n) = *(dosname + i) + ('a' - 'A');
- else
- *(buf + n) = *(dosname + i);
- }
- if (*(dosname + 8) != ' ') {
- *(buf + n) = '.';
- n++;
- }
-
- for (i = 8; i < DOS_NAME_LENGTH; i++, n++) {
- if (*(dosname + i) == ' ')
- break;
-
- if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
- (p_dosname->name_case & 0x10))
- *(buf + n) = *(dosname + i) + ('a' - 'A');
- else
- *(buf + n) = *(dosname + i);
- }
- *(buf + n) = '\0';
-
- i = 0;
- j = 0;
- while (j < (MAX_NAME_LENGTH - 1)) {
- if (*(buf + i) == '\0')
- break;
-
- i += convert_ch_to_uni(nls, uniname, (buf + i), NULL);
-
- uniname++;
- j++;
- }
-
- *uniname = (u16)'\0';
-}
-
void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
struct uni_name_t *p_uniname)
{
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 3b2b0ceb7297..6e481908c59f 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -26,7 +26,7 @@
#include <linux/sched.h>
#include <linux/fs_struct.h>
#include <linux/namei.h>
-
+#include <linux/random.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/mutex.h>
@@ -284,12 +284,12 @@ static const struct dentry_operations exfat_dentry_ops = {
.d_compare = exfat_cmp,
};
-static DEFINE_SEMAPHORE(z_sem);
+static DEFINE_MUTEX(z_mutex);
static inline void fs_sync(struct super_block *sb, bool do_sync)
{
if (do_sync)
- bdev_sync(sb);
+ exfat_bdev_sync(sb);
}
/*
@@ -353,26 +353,28 @@ static int ffsMountVol(struct super_block *sb)
pr_info("[EXFAT] trying to mount...\n");
- down(&z_sem);
+ mutex_lock(&z_mutex);
- buf_init(sb);
+ exfat_buf_init(sb);
- sema_init(&p_fs->v_sem, 1);
+ mutex_init(&p_fs->v_mutex);
p_fs->dev_ejected = 0;
/* open the block device */
- bdev_open(sb);
+ exfat_bdev_open(sb);
if (p_bd->sector_size < sb->s_blocksize) {
- ret = FFS_MEDIAERR;
+ printk(KERN_INFO "EXFAT: mount failed - sector size %d less than blocksize %ld\n",
+ p_bd->sector_size, sb->s_blocksize);
+ ret = -EINVAL;
goto out;
}
if (p_bd->sector_size > sb->s_blocksize)
sb_set_blocksize(sb, p_bd->sector_size);
/* read Sector 0 */
- if (sector_read(sb, 0, &tmp_bh, 1) != FFS_SUCCESS) {
- ret = FFS_MEDIAERR;
+ if (sector_read(sb, 0, &tmp_bh, 1) != 0) {
+ ret = -EIO;
goto out;
}
@@ -383,8 +385,8 @@ static int ffsMountVol(struct super_block *sb)
/* check the validity of PBR */
if (GET16_A(p_pbr->signature) != PBR_SIGNATURE) {
brelse(tmp_bh);
- bdev_close(sb);
- ret = FFS_FORMATERR;
+ exfat_bdev_close(sb);
+ ret = -EFSCORRUPTED;
goto out;
}
@@ -394,16 +396,10 @@ static int ffsMountVol(struct super_block *sb)
break;
if (i < 53) {
-#ifdef CONFIG_EXFAT_DONT_MOUNT_VFAT
+ /* Not sure how we'd get here, but complain if it does */
ret = -EINVAL;
- printk(KERN_INFO "EXFAT: Attempted to mount VFAT filesystem\n");
+ pr_info("EXFAT: Attempted to mount VFAT filesystem\n");
goto out;
-#else
- if (GET16(p_pbr->bpb + 11)) /* num_fat_sectors */
- ret = fat16_mount(sb, p_pbr);
- else
- ret = fat32_mount(sb, p_pbr);
-#endif
} else {
ret = exfat_mount(sb, p_pbr);
}
@@ -411,38 +407,34 @@ static int ffsMountVol(struct super_block *sb)
brelse(tmp_bh);
if (ret) {
- bdev_close(sb);
+ exfat_bdev_close(sb);
goto out;
}
- if (p_fs->vol_type == EXFAT) {
- ret = load_alloc_bitmap(sb);
- if (ret) {
- bdev_close(sb);
- goto out;
- }
- ret = load_upcase_table(sb);
- if (ret) {
- free_alloc_bitmap(sb);
- bdev_close(sb);
- goto out;
- }
+ ret = load_alloc_bitmap(sb);
+ if (ret) {
+ exfat_bdev_close(sb);
+ goto out;
+ }
+ ret = load_upcase_table(sb);
+ if (ret) {
+ free_alloc_bitmap(sb);
+ exfat_bdev_close(sb);
+ goto out;
}
if (p_fs->dev_ejected) {
- if (p_fs->vol_type == EXFAT) {
- free_upcase_table(sb);
- free_alloc_bitmap(sb);
- }
- bdev_close(sb);
- ret = FFS_MEDIAERR;
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
+ exfat_bdev_close(sb);
+ ret = -EIO;
goto out;
}
pr_info("[EXFAT] mounted successfully\n");
out:
- up(&z_sem);
+ mutex_unlock(&z_mutex);
return ret;
}
@@ -450,39 +442,37 @@ out:
static int ffsUmountVol(struct super_block *sb)
{
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int err = FFS_SUCCESS;
+ int err = 0;
pr_info("[EXFAT] trying to unmount...\n");
- down(&z_sem);
+ mutex_lock(&z_mutex);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
- fs_sync(sb, false);
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
- if (p_fs->vol_type == EXFAT) {
- free_upcase_table(sb);
- free_alloc_bitmap(sb);
- }
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
- FAT_release_all(sb);
- buf_release_all(sb);
+ exfat_fat_release_all(sb);
+ exfat_buf_release_all(sb);
/* close the block device */
- bdev_close(sb);
+ exfat_bdev_close(sb);
if (p_fs->dev_ejected) {
pr_info("[EXFAT] unmounted with media errors. Device is already ejected.\n");
- err = FFS_MEDIAERR;
+ err = -EIO;
}
- buf_shutdown(sb);
+ exfat_buf_shutdown(sb);
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
- up(&z_sem);
+ mutex_unlock(&p_fs->v_mutex);
+ mutex_unlock(&z_mutex);
pr_info("[EXFAT] unmounted successfully\n");
@@ -491,15 +481,15 @@ static int ffsUmountVol(struct super_block *sb)
static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
{
- int err = FFS_SUCCESS;
+ int err = 0;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
/* check the validity of pointer parameters */
if (!info)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (p_fs->used_clusters == UINT_MAX)
p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb);
@@ -511,31 +501,31 @@ static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
info->FreeClusters = info->NumClusters - info->UsedClusters;
if (p_fs->dev_ejected)
- err = FFS_MEDIAERR;
+ err = -EIO;
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return err;
}
static int ffsSyncVol(struct super_block *sb, bool do_sync)
{
- int err = FFS_SUCCESS;
+ int err = 0;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* synchronize the file system */
fs_sync(sb, do_sync);
fs_set_vol_flags(sb, VOL_CLEAN);
if (p_fs->dev_ejected)
- err = FFS_MEDIAERR;
+ err = -EIO;
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return err;
}
@@ -559,10 +549,10 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
/* check the validity of pointer parameters */
if (!fid || !path || (*path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check the validity of directory name in the given pathname */
ret = resolve_path(inode, path, &dir, &uni_name);
@@ -578,7 +568,7 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries,
&dos_name, TYPE_ALL);
if (dentry < -1) {
- ret = FFS_NOTFOUND;
+ ret = -ENOENT;
goto out;
}
@@ -597,22 +587,13 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
fid->size = 0;
fid->start_clu = p_fs->root_dir;
} else {
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &dir, dentry,
- ES_2_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &dir, dentry, NULL);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep;
+ es = get_entry_set_in_dir(sb, &dir, dentry,
+ ES_2_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ ep2 = ep + 1;
fid->type = p_fs->fs_func->get_entry_type(ep);
fid->rwoffset = 0;
@@ -628,15 +609,14 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2);
}
- if (p_fs->vol_type == EXFAT)
- release_entry_set(es);
+ release_entry_set(es);
}
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -648,14 +628,14 @@ static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
struct uni_name_t uni_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int ret;
+ int ret = 0;
/* check the validity of pointer parameters */
if (!fid || !path || (*path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check the validity of directory name in the given pathname */
ret = resolve_path(inode, path, &dir, &uni_name);
@@ -667,17 +647,17 @@ static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
/* create a new file */
ret = create_file(inode, &dir, &uni_name, mode, fid);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -697,18 +677,18 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* check the validity of pointer parameters */
if (!buffer)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check if the given file ID is opened */
if (fid->type != TYPE_FILE) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
@@ -721,7 +701,7 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
if (count == 0) {
if (rcount)
*rcount = 0;
- ret = FFS_EOF;
+ ret = 0;
goto out;
}
@@ -742,9 +722,11 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
}
while (clu_offset > 0) {
- /* clu = FAT_read(sb, clu); */
- if (FAT_read(sb, clu, &clu) == -1)
- return FFS_MEDIAERR;
+ /* clu = exfat_fat_read(sb, clu); */
+ if (exfat_fat_read(sb, clu, &clu) == -1) {
+ ret = -EIO;
+ goto out;
+ }
clu_offset--;
}
@@ -772,13 +754,13 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
if ((offset == 0) && (oneblkread == p_bd->sector_size)) {
if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
memcpy((char *)buffer + read_bytes,
(char *)tmp_bh->b_data, (s32)oneblkread);
} else {
if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
memcpy((char *)buffer + read_bytes,
(char *)tmp_bh->b_data + offset,
@@ -797,11 +779,11 @@ err_out:
*rcount = read_bytes;
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -814,7 +796,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
s32 num_clusters, num_alloc, num_alloced = (s32)~0;
int ret = 0;
u32 clu, last_clu;
- sector_t LogSector, sector = 0;
+ sector_t LogSector;
u64 oneblkwrite, write_bytes;
struct chain_t new_clu;
struct timestamp_t tm;
@@ -827,18 +809,18 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* check the validity of pointer parameters */
if (!buffer)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check if the given file ID is opened */
if (fid->type != TYPE_FILE) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
@@ -848,7 +830,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if (count == 0) {
if (wcount)
*wcount = 0;
- ret = FFS_SUCCESS;
+ ret = 0;
goto out;
}
@@ -864,7 +846,8 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
while (count > 0) {
clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
- clu = last_clu = fid->start_clu;
+ clu = fid->start_clu;
+ last_clu = fid->start_clu;
if (fid->flags == 0x03) {
if ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
@@ -885,9 +868,9 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
while ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
last_clu = clu;
- /* clu = FAT_read(sb, clu); */
- if (FAT_read(sb, clu, &clu) == -1) {
- ret = FFS_MEDIAERR;
+ /* clu = exfat_fat_read(sb, clu); */
+ if (exfat_fat_read(sb, clu, &clu) == -1) {
+ ret = -EIO;
goto out;
}
clu_offset--;
@@ -909,7 +892,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if (num_alloced == 0)
break;
if (num_alloced < 0) {
- ret = FFS_MEDIAERR;
+ ret = num_alloced;
goto out;
}
@@ -928,7 +911,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
modified = true;
}
if (new_clu.flags == 0x01)
- FAT_write(sb, last_clu, new_clu.dir);
+ exfat_fat_write(sb, last_clu, new_clu.dir);
}
num_clusters += num_alloced;
@@ -957,12 +940,12 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if ((offset == 0) && (oneblkwrite == p_bd->sector_size)) {
if (sector_read(sb, LogSector, &tmp_bh, 0) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
memcpy((char *)tmp_bh->b_data,
(char *)buffer + write_bytes, (s32)oneblkwrite);
if (sector_write(sb, LogSector, tmp_bh, 0) !=
- FFS_SUCCESS) {
+ 0) {
brelse(tmp_bh);
goto err_out;
}
@@ -970,18 +953,18 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if ((offset > 0) ||
((fid->rwoffset + oneblkwrite) < fid->size)) {
if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
} else {
if (sector_read(sb, LogSector, &tmp_bh, 0) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
}
memcpy((char *)tmp_bh->b_data + offset,
(char *)buffer + write_bytes, (s32)oneblkwrite);
if (sector_write(sb, LogSector, tmp_bh, 0) !=
- FFS_SUCCESS) {
+ 0) {
brelse(tmp_bh);
goto err_out;
}
@@ -1002,25 +985,15 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
brelse(tmp_bh);
/* (3) update the direcoty entry */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es)
- goto err_out;
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep)
- goto err_out;
- ep2 = ep;
- }
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es)
+ goto err_out;
+ ep2 = ep + 1;
p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
p_fs->fs_func->set_entry_attr(ep, fid->attr);
- if (p_fs->vol_type != EXFAT)
- buf_modify(sb, sector);
-
if (modified) {
if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags)
p_fs->fs_func->set_entry_flag(ep2, fid->flags);
@@ -1030,18 +1003,13 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu)
p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu);
-
- if (p_fs->vol_type != EXFAT)
- buf_modify(sb, sector);
}
- if (p_fs->vol_type == EXFAT) {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1051,14 +1019,14 @@ err_out:
*wcount = write_bytes;
if (num_alloced == 0)
- ret = FFS_FULL;
+ ret = -ENOSPC;
else if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1068,7 +1036,6 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
s32 num_clusters;
u32 last_clu = CLUSTER_32(0);
int ret = 0;
- sector_t sector = 0;
struct chain_t clu;
struct timestamp_t tm;
struct dentry_t *ep, *ep2;
@@ -1081,11 +1048,11 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
new_size);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check if the given file ID is opened */
if (fid->type != TYPE_FILE) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
@@ -1095,7 +1062,7 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
if (old_size <= new_size) {
- ret = FFS_SUCCESS;
+ ret = 0;
goto out;
}
@@ -1114,8 +1081,8 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
} else {
while (num_clusters > 0) {
last_clu = clu.dir;
- if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
- ret = FFS_MEDIAERR;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = -EIO;
goto out;
}
num_clusters--;
@@ -1133,22 +1100,13 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
/* (1) update the directory entry */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
- ep2 = ep;
- }
+ ep2 = ep + 1;
p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
p_fs->fs_func->set_entry_attr(ep, fid->attr);
@@ -1159,17 +1117,13 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0));
}
- if (p_fs->vol_type != EXFAT) {
- buf_modify(sb, sector);
- } else {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
/* (2) cut off from the FAT chain */
if (last_clu != CLUSTER_32(0)) {
if (fid->flags == 0x01)
- FAT_write(sb, last_clu, CLUSTER_32(~0));
+ exfat_fat_write(sb, last_clu, CLUSTER_32(~0));
}
/* (3) free the clusters */
@@ -1180,18 +1134,18 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
if (fid->rwoffset > fid->size)
fid->rwoffset = fid->size;
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
pr_debug("%s exited (%d)\n", __func__, ret);
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1232,14 +1186,14 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* check the validity of pointer parameters */
if (!new_path || (*new_path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
update_parent_info(fid, old_parent_inode);
@@ -1252,19 +1206,19 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
/* check if the old file is "." or ".." */
if (p_fs->vol_type != EXFAT) {
if ((olddir.dir != p_fs->root_dir) && (dentry < 2)) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out2;
}
}
ep = get_entry_in_dir(sb, &olddir, dentry, NULL);
if (!ep) {
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
goto out2;
}
if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out2;
}
@@ -1272,12 +1226,12 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
if (new_inode) {
u32 entry_type;
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
new_fid = &EXFAT_I(new_inode)->fid;
update_parent_info(new_fid, new_parent_inode);
- p_dir = &(new_fid->dir);
+ p_dir = &new_fid->dir;
new_entry = new_fid->entry;
ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
if (!ep)
@@ -1294,7 +1248,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
new_clu.flags = new_fid->flags;
if (!is_dir_empty(sb, &new_clu)) {
- ret = FFS_FILEEXIST;
+ ret = -EEXIST;
goto out;
}
}
@@ -1314,7 +1268,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
ret = move_file(new_parent_inode, &olddir, dentry, &newdir,
&uni_name, fid);
- if ((ret == FFS_SUCCESS) && new_inode) {
+ if ((ret == 0) && new_inode) {
/* delete entries of new_dir */
ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
if (!ep)
@@ -1328,16 +1282,16 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
num_entries + 1);
}
out:
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out2:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1345,7 +1299,7 @@ out2:
static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
{
s32 dentry;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir, clu_to_free;
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
@@ -1353,10 +1307,10 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
dir.dir = fid->dir.dir;
dir.size = fid->dir.size;
@@ -1366,12 +1320,12 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
ep = get_entry_in_dir(sb, &dir, dentry, NULL);
if (!ep) {
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
goto out;
}
if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
fs_set_vol_flags(sb, VOL_DIRTY);
@@ -1390,16 +1344,16 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1409,7 +1363,7 @@ out:
static int ffsSetAttr(struct inode *inode, u32 attr)
{
u32 type;
- int ret = FFS_SUCCESS;
+ int ret = 0;
sector_t sector = 0;
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
@@ -1420,36 +1374,28 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
if (fid->attr == attr) {
if (p_fs->dev_ejected)
- return FFS_MEDIAERR;
- return FFS_SUCCESS;
+ return -EIO;
+ return 0;
}
if (is_dir) {
if ((fid->dir.dir == p_fs->root_dir) &&
(fid->entry == -1)) {
if (p_fs->dev_ejected)
- return FFS_MEDIAERR;
- return FFS_SUCCESS;
+ return -EIO;
+ return 0;
}
}
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* get the directory entry of given file */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
type = p_fs->fs_func->get_entry_type(ep);
@@ -1457,12 +1403,11 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) ||
((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) {
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
else
- ret = FFS_ERROR;
+ ret = -EINVAL;
- if (p_fs->vol_type == EXFAT)
- release_entry_set(es);
+ release_entry_set(es);
goto out;
}
@@ -1472,23 +1417,19 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
fid->attr = attr;
p_fs->fs_func->set_entry_attr(ep, attr);
- if (p_fs->vol_type != EXFAT) {
- buf_modify(sb, sector);
- } else {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1496,9 +1437,8 @@ out:
static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
{
- sector_t sector = 0;
s32 count;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir;
struct uni_name_t uni_name;
struct timestamp_t tm;
@@ -1512,7 +1452,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
pr_debug("%s entered\n", __func__);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (is_dir) {
if ((fid->dir.dir == p_fs->root_dir) &&
@@ -1541,35 +1481,25 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = FFS_MEDIAERR;
+ ret = count; /* propogate error upward */
goto out;
}
info->NumSubdirs = count;
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
goto out;
}
}
/* get the directory entry of given file or directory */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_2_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep;
- buf_lock(sb, sector);
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_2_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ ep2 = ep + 1;
/* set FILE_INFO structure using the acquired struct dentry_t */
info->Attr = p_fs->fs_func->get_entry_attr(ep);
@@ -1594,31 +1524,19 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
memset((char *)&info->AccessTimestamp, 0, sizeof(struct date_time_t));
- *(uni_name.name) = 0x0;
+ *uni_name.name = 0x0;
/* XXX this is very bad for exfat cuz name is already included in es.
* API should be revised
*/
- p_fs->fs_func->get_uni_name_from_ext_entry(sb, &(fid->dir), fid->entry,
+ p_fs->fs_func->get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
uni_name.name);
- if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
- get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
- &uni_name, 0x1);
nls_uniname_to_cstring(sb, info->Name, &uni_name);
- if (p_fs->vol_type == EXFAT) {
- info->NumSubdirs = 2;
- } else {
- buf_unlock(sb, sector);
- get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
- &uni_name, 0x0);
- nls_uniname_to_cstring(sb, info->ShortName, &uni_name);
- info->NumSubdirs = 0;
- }
+ info->NumSubdirs = 2;
info->Size = p_fs->fs_func->get_entry_size(ep2);
- if (p_fs->vol_type == EXFAT)
- release_entry_set(es);
+ release_entry_set(es);
if (is_dir) {
dir.dir = fid->start_clu;
@@ -1630,18 +1548,18 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = FFS_MEDIAERR;
+ ret = count; /* propogate error upward */
goto out;
}
info->NumSubdirs += count;
}
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
pr_debug("%s exited successfully\n", __func__);
return ret;
@@ -1649,8 +1567,7 @@ out:
static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
{
- sector_t sector = 0;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct timestamp_t tm;
struct dentry_t *ep, *ep2;
struct entry_set_cache_t *es = NULL;
@@ -1662,14 +1579,14 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
pr_debug("%s entered (inode %p info %p\n", __func__, inode, info);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (is_dir) {
if ((fid->dir.dir == p_fs->root_dir) &&
(fid->entry == -1)) {
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
- ret = FFS_SUCCESS;
+ ret = -EIO;
+ ret = 0;
goto out;
}
}
@@ -1677,23 +1594,13 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
fs_set_vol_flags(sb, VOL_DIRTY);
/* get the directory entry of given file or directory */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- /* for other than exfat */
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep;
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ ep2 = ep + 1;
p_fs->fs_func->set_entry_attr(ep, info->Attr);
@@ -1716,19 +1623,15 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
p_fs->fs_func->set_entry_size(ep2, info->Size);
- if (p_fs->vol_type != EXFAT) {
- buf_modify(sb, sector);
- } else {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
pr_debug("%s exited (%d)\n", __func__, ret);
@@ -1740,8 +1643,7 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
s32 num_clusters, num_alloced;
bool modified = false;
u32 last_clu;
- int ret = FFS_SUCCESS;
- sector_t sector = 0;
+ int ret = 0;
struct chain_t new_clu;
struct dentry_t *ep;
struct entry_set_cache_t *es = NULL;
@@ -1751,10 +1653,10 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
/* check the validity of pointer parameters */
if (!clu)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
fid->rwoffset = (s64)(clu_offset) << p_fs->cluster_size_bits;
@@ -1785,8 +1687,8 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
while ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) {
last_clu = *clu;
- if (FAT_read(sb, *clu, clu) == -1) {
- ret = FFS_MEDIAERR;
+ if (exfat_fat_read(sb, *clu, clu) == -1) {
+ ret = -EIO;
goto out;
}
clu_offset--;
@@ -1804,10 +1706,10 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
/* (1) allocate a cluster */
num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu);
if (num_alloced < 0) {
- ret = FFS_MEDIAERR;
+ ret = -EIO;
goto out;
} else if (num_alloced == 0) {
- ret = FFS_FULL;
+ ret = -ENOSPC;
goto out;
}
@@ -1825,34 +1727,23 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
modified = true;
}
if (new_clu.flags == 0x01)
- FAT_write(sb, last_clu, new_clu.dir);
+ exfat_fat_write(sb, last_clu, new_clu.dir);
}
num_clusters += num_alloced;
*clu = new_clu.dir;
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- /* get stream entry */
- ep++;
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ /* get stream entry */
+ ep++;
/* (3) update directory entry */
if (modified) {
- if (p_fs->vol_type != EXFAT) {
- ep = get_entry_in_dir(sb, &(fid->dir),
- fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- }
-
if (p_fs->fs_func->get_entry_flag(ep) != fid->flags)
p_fs->fs_func->set_entry_flag(ep, fid->flags);
@@ -1860,14 +1751,10 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
p_fs->fs_func->set_entry_clu0(ep,
fid->start_clu);
- if (p_fs->vol_type != EXFAT)
- buf_modify(sb, sector);
}
- if (p_fs->vol_type == EXFAT) {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
/* add number of new blocks to inode */
inode->i_blocks += num_alloced << (p_fs->cluster_size_bits - 9);
@@ -1878,11 +1765,11 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
fid->hint_last_clu = *clu;
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1893,7 +1780,7 @@ out:
static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
{
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir;
struct uni_name_t uni_name;
struct super_block *sb = inode->i_sb;
@@ -1903,10 +1790,10 @@ static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
/* check the validity of pointer parameters */
if (!fid || !path || (*path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check the validity of directory name in the given old pathname */
ret = resolve_path(inode, path, &dir, &uni_name);
@@ -1917,16 +1804,16 @@ static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
ret = create_dir(inode, &dir, &uni_name, fid);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1934,7 +1821,7 @@ out:
static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
{
int i, dentry, clu_offset;
- int ret = FFS_SUCCESS;
+ int ret = 0;
s32 dentries_per_clu, dentries_per_clu_bits = 0;
u32 type;
sector_t sector;
@@ -1949,14 +1836,14 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
/* check the validity of pointer parameters */
if (!dir_entry)
- return FFS_ERROR;
+ return -EINVAL;
/* check if the given file ID is opened */
if (fid->type != TYPE_DIR)
- return FFS_PERMISSIONERR;
+ return -ENOTDIR;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (fid->entry == -1) {
dir.dir = p_fs->root_dir;
@@ -2001,9 +1888,9 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
}
while (clu_offset > 0) {
- /* clu.dir = FAT_read(sb, clu.dir); */
- if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
- ret = FFS_MEDIAERR;
+ /* clu.dir = exfat_fat_read(sb, clu.dir); */
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = -EIO;
goto out;
}
clu_offset--;
@@ -2023,7 +1910,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
for ( ; i < dentries_per_clu; i++, dentry++) {
ep = get_entry_in_dir(sb, &clu, i, &sector);
if (!ep) {
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
goto out;
}
type = fs_func->get_entry_type(ep);
@@ -2034,7 +1921,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
if ((type != TYPE_FILE) && (type != TYPE_DIR))
continue;
- buf_lock(sb, sector);
+ exfat_buf_lock(sb, sector);
dir_entry->Attr = fs_func->get_entry_attr(ep);
fs_func->get_entry_time(ep, &tm, TM_CREATE);
@@ -2058,28 +1945,16 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
memset((char *)&dir_entry->AccessTimestamp, 0,
sizeof(struct date_time_t));
- *(uni_name.name) = 0x0;
+ *uni_name.name = 0x0;
fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry,
uni_name.name);
- if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
- get_uni_name_from_dos_entry(sb,
- (struct dos_dentry_t *)ep,
- &uni_name, 0x1);
nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name);
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
- if (p_fs->vol_type == EXFAT) {
- ep = get_entry_in_dir(sb, &clu, i + 1, NULL);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- } else {
- get_uni_name_from_dos_entry(sb,
- (struct dos_dentry_t *)ep,
- &uni_name, 0x0);
- nls_uniname_to_cstring(sb, dir_entry->ShortName,
- &uni_name);
+ ep = get_entry_in_dir(sb, &clu, i + 1, NULL);
+ if (!ep) {
+ ret = -ENOENT;
+ goto out;
}
dir_entry->Size = fs_func->get_entry_size(ep);
@@ -2095,7 +1970,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
fid->rwoffset = (s64)(++dentry);
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
goto out;
}
@@ -2108,24 +1983,24 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
else
clu.dir = CLUSTER_32(~0);
} else {
- /* clu.dir = FAT_read(sb, clu.dir); */
- if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
- ret = FFS_MEDIAERR;
+ /* clu.dir = exfat_fat_read(sb, clu.dir); */
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = -EIO;
goto out;
}
}
}
- *(dir_entry->Name) = '\0';
+ *dir_entry->Name = '\0';
fid->rwoffset = (s64)(++dentry);
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -2133,14 +2008,14 @@ out:
static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
{
s32 dentry;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir, clu_to_free;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
dir.dir = fid->dir.dir;
dir.size = fid->dir.size;
@@ -2151,18 +2026,18 @@ static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
/* check if the file is "." or ".." */
if (p_fs->vol_type != EXFAT) {
if ((dir.dir != p_fs->root_dir) && (dentry < 2))
- return FFS_PERMISSIONERR;
+ return -EPERM;
}
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
clu_to_free.dir = fid->start_clu;
clu_to_free.size = (s32)((fid->size - 1) >> p_fs->cluster_size_bits) + 1;
clu_to_free.flags = fid->flags;
if (!is_dir_empty(sb, &clu_to_free)) {
- ret = FFS_FILEEXIST;
+ ret = -ENOTEMPTY;
goto out;
}
@@ -2178,17 +2053,17 @@ static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -2202,7 +2077,7 @@ static int exfat_readdir(struct file *filp, struct dir_context *ctx)
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
struct dir_entry_t de;
unsigned long inum;
@@ -2244,12 +2119,11 @@ get_new:
/* at least we tried to read a sector
* move cpos to next sector position (should be aligned)
*/
- if (err == FFS_MEDIAERR) {
+ if (err == -EIO) {
cpos += 1 << p_bd->sector_size_bits;
cpos &= ~((1 << p_bd->sector_size_bits) - 1);
}
- err = -EIO;
goto end_of_dir;
}
@@ -2293,7 +2167,7 @@ static int exfat_ioctl_volume_id(struct inode *dir)
{
struct super_block *sb = dir->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
return p_fs->vol_id;
}
@@ -2301,7 +2175,7 @@ static int exfat_ioctl_volume_id(struct inode *dir)
static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
-struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = filp->f_path.dentry->d_inode;
#ifdef CONFIG_EXFAT_KERNEL_DEBUG
unsigned int flags;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
@@ -2351,6 +2225,7 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
struct inode *inode;
struct file_id_t fid;
loff_t i_pos;
@@ -2361,21 +2236,14 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
pr_debug("%s entered\n", __func__);
err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_REGULAR, &fid);
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else if (err == FFS_NAMETOOLONG)
- err = -ENAMETOOLONG;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_ctime = curtime;
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2389,7 +2257,10 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto out;
}
INC_IVERSION(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
/*
* timestamp is already written, so mark_inode_dirty() is unnecessary.
*/
@@ -2522,6 +2393,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
int err;
__lock_super(sb);
@@ -2531,22 +2403,22 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
EXFAT_I(inode)->fid.size = i_size_read(inode);
err = ffsRemoveFile(dir, &(EXFAT_I(inode)->fid));
- if (err) {
- if (err == FFS_PERMISSIONERR)
- err = -EPERM;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
mark_inode_dirty(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
exfat_detach(inode);
remove_inode_hash(inode);
@@ -2560,6 +2432,7 @@ static int exfat_symlink(struct inode *dir, struct dentry *dentry,
const char *target)
{
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
struct inode *inode;
struct file_id_t fid;
loff_t i_pos;
@@ -2572,32 +2445,22 @@ static int exfat_symlink(struct inode *dir, struct dentry *dentry,
pr_debug("%s entered\n", __func__);
err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_SYMLINK, &fid);
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
err = ffsWriteFile(dir, &fid, (char *)target, len, &ret);
if (err) {
ffsRemoveFile(dir, &fid);
-
- if (err == FFS_FULL)
- err = -ENOSPC;
- else
- err = -EIO;
goto out;
}
INC_IVERSION(dir);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_ctime = curtime;
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2611,7 +2474,10 @@ static int exfat_symlink(struct inode *dir, struct dentry *dentry,
goto out;
}
INC_IVERSION(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
EXFAT_I(inode)->target = kmemdup(target, len + 1, GFP_KERNEL);
@@ -2632,6 +2498,7 @@ out:
static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
struct inode *inode;
struct file_id_t fid;
loff_t i_pos;
@@ -2642,21 +2509,14 @@ static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
pr_debug("%s entered\n", __func__);
err = ffsCreateDir(dir, (u8 *)dentry->d_name.name, &fid);
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else if (err == FFS_NAMETOOLONG)
- err = -ENAMETOOLONG;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_ctime = curtime;
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2671,7 +2531,10 @@ static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out;
}
INC_IVERSION(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
@@ -2687,6 +2550,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
int err;
__lock_super(sb);
@@ -2696,21 +2560,13 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
EXFAT_I(inode)->fid.size = i_size_read(inode);
err = ffsRemoveDir(dir, &(EXFAT_I(inode)->fid));
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -ENOTEMPTY;
- else if (err == FFS_NOTFOUND)
- err = -ENOENT;
- else if (err == FFS_DIRBUSY)
- err = -EBUSY;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2718,7 +2574,9 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
exfat_detach(inode);
remove_inode_hash(inode);
@@ -2734,6 +2592,7 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
{
struct inode *old_inode, *new_inode;
struct super_block *sb = old_dir->i_sb;
+ struct timespec64 curtime;
loff_t i_pos;
int err;
@@ -2751,24 +2610,15 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
err = ffsMoveFile(old_dir, &(EXFAT_I(old_inode)->fid), new_dir,
new_dentry);
- if (err) {
- if (err == FFS_PERMISSIONERR)
- err = -EPERM;
- else if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_NOTFOUND)
- err = -ENOENT;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(new_dir);
- new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime =
- current_time(new_dir);
+ curtime = current_time(new_dir);
+ new_dir->i_ctime = curtime;
+ new_dir->i_mtime = curtime;
+ new_dir->i_atime = curtime;
+
if (IS_DIRSYNC(new_dir))
(void)exfat_sync_inode(new_dir);
else
@@ -2790,7 +2640,9 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
inc_nlink(new_dir);
}
INC_IVERSION(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ curtime = current_time(old_dir);
+ old_dir->i_ctime = curtime;
+ old_dir->i_mtime = curtime;
if (IS_DIRSYNC(old_dir))
(void)exfat_sync_inode(old_dir);
else
@@ -2814,13 +2666,16 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
loff_t start = i_size_read(inode), count = size - i_size_read(inode);
+ struct timespec64 curtime;
int err, err2;
err = generic_cont_expand_simple(inode, size);
if (err != 0)
return err;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_ctime = curtime;
+ inode->i_mtime = curtime;
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
@@ -2895,7 +2750,8 @@ static void exfat_truncate(struct inode *inode, loff_t old_size)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
+ struct timespec64 curtime;
int err;
__lock_super(sb);
@@ -2914,7 +2770,9 @@ static void exfat_truncate(struct inode *inode, loff_t old_size)
if (err)
goto out;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_ctime = curtime;
+ inode->i_mtime = curtime;
if (IS_DIRSYNC(inode))
(void)exfat_sync_inode(inode);
else
@@ -2936,8 +2794,8 @@ static int exfat_setattr(struct dentry *dentry, struct iattr *attr)
pr_debug("%s entered\n", __func__);
- if ((attr->ia_valid & ATTR_SIZE)
- && (attr->ia_size > i_size_read(inode))) {
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size > i_size_read(inode)) {
error = exfat_cont_expand(inode, attr->ia_size);
if (error || attr->ia_valid == ATTR_SIZE)
return error;
@@ -2946,8 +2804,8 @@ static int exfat_setattr(struct dentry *dentry, struct iattr *attr)
ia_valid = attr->ia_valid;
- if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET))
- && exfat_allow_set_time(sbi, inode)) {
+ if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) &&
+ exfat_allow_set_time(sbi, inode)) {
attr->ia_valid &= ~(ATTR_MTIME_SET |
ATTR_ATIME_SET |
ATTR_TIMES_SET);
@@ -3073,8 +2931,7 @@ static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
- struct bd_info_t *p_bd = &(sbi->bd_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
const unsigned long blocksize = sb->s_blocksize;
const unsigned char blocksize_bits = sb->s_blocksize_bits;
sector_t last_block;
@@ -3084,18 +2941,6 @@ static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
*phys = 0;
*mapped_blocks = 0;
- if ((p_fs->vol_type == FAT12) || (p_fs->vol_type == FAT16)) {
- if (inode->i_ino == EXFAT_ROOT_INO) {
- if (sector <
- (p_fs->dentries_in_root >>
- (p_bd->sector_size_bits - DENTRY_SIZE_BITS))) {
- *phys = sector + p_fs->root_start_sector;
- *mapped_blocks = 1;
- }
- return 0;
- }
- }
-
last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
if (sector >= last_block) {
if (*create == 0)
@@ -3114,12 +2959,7 @@ static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
err = ffsMapCluster(inode, clu_offset, &cluster);
- if (err) {
- if (err == FFS_FULL)
- return -ENOSPC;
- else
- return -EIO;
- } else if (cluster != CLUSTER_32(~0)) {
+ if (!err && (cluster != CLUSTER_32(~0))) {
*phys = START_SECTOR(cluster) + sec_offset;
*mapped_blocks = p_fs->sectors_per_clu - sec_offset;
}
@@ -3215,6 +3055,7 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ struct timespec64 curtime;
int err;
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
@@ -3223,7 +3064,9 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
exfat_write_failed(mapping, pos + len);
if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_ctime = curtime;
fid->attr |= ATTR_ARCHIVE;
mark_inode_dirty(inode);
}
@@ -3302,7 +3145,7 @@ static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos)
static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid)
{
struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
struct dir_entry_t info;
memcpy(&(EXFAT_I(inode)->fid), fid, sizeof(struct file_id_t));
@@ -3314,7 +3157,7 @@ static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid)
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
INC_IVERSION(inode);
- inode->i_generation = get_seconds();
+ inode->i_generation = prandom_u32();
if (info.Attr & ATTR_SUBDIR) { /* directory */
inode->i_generation &= ~1;
@@ -3501,7 +3344,7 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
struct vol_info_t info;
if (p_fs->used_clusters == UINT_MAX) {
- if (ffsGetVolInfo(sb, &info) == FFS_MEDIAERR)
+ if (ffsGetVolInfo(sb, &info) == -EIO)
return -EIO;
} else {
@@ -3674,7 +3517,8 @@ static int parse_options(char *options, int silent, int *debug,
opts->fs_uid = current_uid();
opts->fs_gid = current_gid();
- opts->fs_fmask = opts->fs_dmask = current->fs->umask;
+ opts->fs_fmask = current->fs->umask;
+ opts->fs_dmask = current->fs->umask;
opts->allow_utime = U16_MAX;
opts->codepage = exfat_default_codepage;
opts->iocharset = exfat_default_iocharset;
@@ -3787,7 +3631,8 @@ static int exfat_read_root(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
+ struct timespec64 curtime;
struct dir_entry_t info;
EXFAT_I(inode)->fid.dir.dir = p_fs->root_dir;
@@ -3818,7 +3663,10 @@ static int exfat_read_root(struct inode *inode)
EXFAT_I(inode)->mmu_private = i_size_read(inode);
exfat_save_attr(inode, ATTR_SUBDIR);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
set_nlink(inode, info.NumSubdirs + 2);
return 0;
@@ -3838,7 +3686,6 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
struct exfat_sb_info *sbi;
int debug, ret;
long error;
- char buf[50];
/*
* GFP_KERNEL is ok here, because while we do hold the
@@ -3885,17 +3732,6 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
* if (FAT_FIRST_ENT(sb, media) != first)
*/
- /* codepage is not meaningful in exfat */
- if (sbi->fs_info.vol_type != EXFAT) {
- error = -EINVAL;
- sprintf(buf, "cp%d", sbi->options.codepage);
- sbi->nls_disk = load_nls(buf);
- if (!sbi->nls_disk) {
- pr_err("[EXFAT] Codepage %s not found\n", buf);
- goto out_fail2;
- }
- }
-
sbi->nls_io = load_nls(sbi->options.iocharset);
error = -ENOMEM;
@@ -3984,10 +3820,10 @@ static void exfat_debug_kill_sb(struct super_block *sb)
* invalidate_bdev drops all device cache include
* dirty. We use this to simulate device removal.
*/
- down(&p_fs->v_sem);
- FAT_release_all(sb);
- buf_release_all(sb);
- up(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
+ exfat_fat_release_all(sb);
+ exfat_buf_release_all(sb);
+ mutex_unlock(&p_fs->v_mutex);
invalidate_bdev(bdev);
}
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index cb61c2a772bd..dad1ddcd7b0c 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig FB_TFT
tristate "Support for small TFT LCD display modules"
- depends on FB && SPI && OF
+ depends on FB && SPI
depends on GPIOLIB || COMPILE_TEST
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
@@ -95,8 +95,8 @@ config FB_TFT_PCD8544
Generic Framebuffer support for PCD8544
config FB_TFT_RA8875
- tristate "FB driver for the RA8875 LCD Controller"
- depends on FB_TFT
+ tristate "FB driver for the RA8875 LCD Controller"
+ depends on FB_TFT
help
Generic Framebuffer support for RA8875
@@ -112,6 +112,13 @@ config FB_TFT_S6D1121
help
Generic Framebuffer support for S6D1121
+config FB_TFT_SEPS525
+ tristate "FB driver for the SEPS525 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for SEPS525
+ Say Y if you have such a display that utilizes this controller.
+
config FB_TFT_SH1106
tristate "FB driver for the SH1106 OLED Controller"
depends on FB_TFT
@@ -125,10 +132,10 @@ config FB_TFT_SSD1289
Framebuffer support for SSD1289
config FB_TFT_SSD1305
- tristate "FB driver for the SSD1305 OLED Controller"
- depends on FB_TFT
- help
- Framebuffer support for SSD1305
+ tristate "FB driver for the SSD1305 OLED Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SSD1305
config FB_TFT_SSD1306
tristate "FB driver for the SSD1306 OLED Controller"
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 27af43f32f81..e87193f7df14 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_FB_TFT_PCD8544) += fb_pcd8544.o
obj-$(CONFIG_FB_TFT_RA8875) += fb_ra8875.o
obj-$(CONFIG_FB_TFT_S6D02A1) += fb_s6d02a1.o
obj-$(CONFIG_FB_TFT_S6D1121) += fb_s6d1121.o
+obj-$(CONFIG_FB_TFT_SEPS525) += fb_seps525.o
obj-$(CONFIG_FB_TFT_SH1106) += fb_sh1106.o
obj-$(CONFIG_FB_TFT_SSD1289) += fb_ssd1289.o
obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1305.o
diff --git a/drivers/staging/fbtft/fb_seps525.c b/drivers/staging/fbtft/fb_seps525.c
new file mode 100644
index 000000000000..05882e2cde7f
--- /dev/null
+++ b/drivers/staging/fbtft/fb_seps525.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FB driver for the NHD-1.69-160128UGC3 (Newhaven Display International, Inc.)
+ * using the SEPS525 (Syncoam) LCD Controller
+ *
+ * Copyright (C) 2016 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_seps525"
+#define WIDTH 160
+#define HEIGHT 128
+
+#define SEPS525_INDEX 0x00
+#define SEPS525_STATUS_RD 0x01
+#define SEPS525_OSC_CTL 0x02
+#define SEPS525_IREF 0x80
+#define SEPS525_CLOCK_DIV 0x03
+#define SEPS525_REDUCE_CURRENT 0x04
+#define SEPS525_SOFT_RST 0x05
+#define SEPS525_DISP_ONOFF 0x06
+#define SEPS525_PRECHARGE_TIME_R 0x08
+#define SEPS525_PRECHARGE_TIME_G 0x09
+#define SEPS525_PRECHARGE_TIME_B 0x0A
+#define SEPS525_PRECHARGE_CURRENT_R 0x0B
+#define SEPS525_PRECHARGE_CURRENT_G 0x0C
+#define SEPS525_PRECHARGE_CURRENT_B 0x0D
+#define SEPS525_DRIVING_CURRENT_R 0x10
+#define SEPS525_DRIVING_CURRENT_G 0x11
+#define SEPS525_DRIVING_CURRENT_B 0x12
+#define SEPS525_DISPLAYMODE_SET 0x13
+#define SEPS525_RGBIF 0x14
+#define SEPS525_RGB_POL 0x15
+#define SEPS525_MEMORY_WRITEMODE 0x16
+#define SEPS525_MX1_ADDR 0x17
+#define SEPS525_MX2_ADDR 0x18
+#define SEPS525_MY1_ADDR 0x19
+#define SEPS525_MY2_ADDR 0x1A
+#define SEPS525_MEMORY_ACCESS_POINTER_X 0x20
+#define SEPS525_MEMORY_ACCESS_POINTER_Y 0x21
+#define SEPS525_DDRAM_DATA_ACCESS_PORT 0x22
+#define SEPS525_GRAY_SCALE_TABLE_INDEX 0x50
+#define SEPS525_GRAY_SCALE_TABLE_DATA 0x51
+#define SEPS525_DUTY 0x28
+#define SEPS525_DSL 0x29
+#define SEPS525_D1_DDRAM_FAC 0x2E
+#define SEPS525_D1_DDRAM_FAR 0x2F
+#define SEPS525_D2_DDRAM_SAC 0x31
+#define SEPS525_D2_DDRAM_SAR 0x32
+#define SEPS525_SCR1_FX1 0x33
+#define SEPS525_SCR1_FX2 0x34
+#define SEPS525_SCR1_FY1 0x35
+#define SEPS525_SCR1_FY2 0x36
+#define SEPS525_SCR2_SX1 0x37
+#define SEPS525_SCR2_SX2 0x38
+#define SEPS525_SCR2_SY1 0x39
+#define SEPS525_SCR2_SY2 0x3A
+#define SEPS525_SCREEN_SAVER_CONTEROL 0x3B
+#define SEPS525_SS_SLEEP_TIMER 0x3C
+#define SEPS525_SCREEN_SAVER_MODE 0x3D
+#define SEPS525_SS_SCR1_FU 0x3E
+#define SEPS525_SS_SCR1_MXY 0x3F
+#define SEPS525_SS_SCR2_FU 0x40
+#define SEPS525_SS_SCR2_MXY 0x41
+#define SEPS525_MOVING_DIRECTION 0x42
+#define SEPS525_SS_SCR2_SX1 0x47
+#define SEPS525_SS_SCR2_SX2 0x48
+#define SEPS525_SS_SCR2_SY1 0x49
+#define SEPS525_SS_SCR2_SY2 0x4A
+
+/* SEPS525_DISPLAYMODE_SET */
+#define MODE_SWAP_BGR BIT(7)
+#define MODE_SM BIT(6)
+#define MODE_RD BIT(5)
+#define MODE_CD BIT(4)
+
+#define seps525_use_window 0 /* FBTFT doesn't really use it today */
+
+/* Init sequence taken from: Arduino Library for the Adafruit 2.2" display */
+static int init_display(struct fbtft_par *par)
+{
+ par->fbtftops.reset(par);
+
+ usleep_range(1000, 5000);
+
+ /* Disable Oscillator Power Down */
+ write_reg(par, SEPS525_REDUCE_CURRENT, 0x03);
+ usleep_range(1000, 5000);
+ /* Set Normal Driving Current */
+ write_reg(par, SEPS525_REDUCE_CURRENT, 0x00);
+ usleep_range(1000, 5000);
+
+ write_reg(par, SEPS525_SCREEN_SAVER_CONTEROL, 0x00);
+ /* Set EXPORT1 Pin at Internal Clock */
+ write_reg(par, SEPS525_OSC_CTL, 0x01);
+ /* Set Clock as 120 Frames/Sec */
+ write_reg(par, SEPS525_CLOCK_DIV, 0x90);
+ /* Set Reference Voltage Controlled by External Resister */
+ write_reg(par, SEPS525_IREF, 0x01);
+
+ /* precharge time R G B */
+ write_reg(par, SEPS525_PRECHARGE_TIME_R, 0x04);
+ write_reg(par, SEPS525_PRECHARGE_TIME_G, 0x05);
+ write_reg(par, SEPS525_PRECHARGE_TIME_B, 0x05);
+
+ /* precharge current R G B (uA) */
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_R, 0x9D);
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_G, 0x8C);
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_B, 0x57);
+
+ /* driving current R G B (uA) */
+ write_reg(par, SEPS525_DRIVING_CURRENT_R, 0x56);
+ write_reg(par, SEPS525_DRIVING_CURRENT_G, 0x4D);
+ write_reg(par, SEPS525_DRIVING_CURRENT_B, 0x46);
+ /* Set Color Sequence */
+ write_reg(par, SEPS525_DISPLAYMODE_SET, 0xA0);
+ write_reg(par, SEPS525_RGBIF, 0x01); /* Set MCU Interface Mode */
+ /* Set Memory Write Mode */
+ write_reg(par, SEPS525_MEMORY_WRITEMODE, 0x66);
+ write_reg(par, SEPS525_DUTY, 0x7F); /* 1/128 Duty (0x0F~0x7F) */
+ /* Set Mapping RAM Display Start Line (0x00~0x7F) */
+ write_reg(par, SEPS525_DSL, 0x00);
+ write_reg(par, SEPS525_DISP_ONOFF, 0x01); /* Display On (0x00/0x01) */
+ /* Set All Internal Register Value as Normal Mode */
+ write_reg(par, SEPS525_SOFT_RST, 0x00);
+ /* Set RGB Interface Polarity as Active Low */
+ write_reg(par, SEPS525_RGB_POL, 0x00);
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ if (seps525_use_window) {
+ /* Set Window Xs,Ys Xe,Ye*/
+ write_reg(par, SEPS525_MX1_ADDR, xs);
+ write_reg(par, SEPS525_MX2_ADDR, xe);
+ write_reg(par, SEPS525_MY1_ADDR, ys);
+ write_reg(par, SEPS525_MY2_ADDR, ye);
+ }
+ /* start position X,Y */
+ write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_X, xs);
+ write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_Y, ys);
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ u8 val;
+
+ switch (par->info->var.rotate) {
+ case 0:
+ val = 0;
+ break;
+ case 180:
+ val = MODE_RD | MODE_CD;
+ break;
+ case 90:
+ case 270:
+
+ default:
+ return -EINVAL;
+ }
+ /* Memory Access Control */
+ write_reg(par, SEPS525_DISPLAYMODE_SET, val |
+ (par->bgr ? MODE_SWAP_BGR : 0));
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "syncoam,seps525", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:seps525");
+MODULE_ALIAS("platform:seps525");
+
+MODULE_DESCRIPTION("FB driver for the SEPS525 LCD Controller");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 65681d0fe200..e763205e9e4f 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -91,7 +91,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x2C | (pump & 0x03));
/* Set inverse display */
- write_reg(par, 0xA6 | (0x01 & 0x01));
+ write_reg(par, 0xA6 | 0x01);
/* Set 4-bit grayscale mode */
write_reg(par, 0xD0 | (0x02 & 0x03));
@@ -157,8 +157,8 @@ static int set_var(struct fbtft_par *par)
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
- | (0x1 & 0x1) << 1 /* Increment page first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | (0x1 << 1) /* Increment page first */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
@@ -171,11 +171,11 @@ static int set_var(struct fbtft_par *par)
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
- | (0x1 & 0x1) << 2 /* Mirror Y ON */
+ | (0x1 << 2) /* Mirror Y ON */
| (0x0 & 0x1) << 1 /* Mirror X OFF */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
@@ -183,13 +183,13 @@ static int set_var(struct fbtft_par *par)
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
- | (0x1 & 0x1) << 1 /* Increment page first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | (0x1 << 1) /* Increment page first */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
- | (0x1 & 0x1) << 2 /* Mirror Y ON */
- | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x1 << 2) /* Mirror Y ON */
+ | (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
default:
@@ -197,12 +197,12 @@ static int set_var(struct fbtft_par *par)
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
| (0x0 & 0x1) << 2 /* Mirror Y OFF */
- | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
}
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index a0a67aa517f0..ffb84987dd86 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -22,8 +22,9 @@
#include <linux/uaccess.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
-#include <linux/of.h>
+
#include <video/mipi_display.h>
#include "fbtft.h"
@@ -70,7 +71,6 @@ void fbtft_dbg_hex(const struct device *dev, int groupsize,
}
EXPORT_SYMBOL(fbtft_dbg_hex);
-#ifdef CONFIG_OF
static int fbtft_request_one_gpio(struct fbtft_par *par,
const char *name, int index,
struct gpio_desc **gpiop)
@@ -92,14 +92,11 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
return ret;
}
-static int fbtft_request_gpios_dt(struct fbtft_par *par)
+static int fbtft_request_gpios(struct fbtft_par *par)
{
int i;
int ret;
- if (!par->info->device->of_node)
- return -EINVAL;
-
ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset);
if (ret)
return ret;
@@ -135,7 +132,6 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
return 0;
}
-#endif
#ifdef CONFIG_FB_BACKLIGHT
static int fbtft_backlight_update_status(struct backlight_device *bd)
@@ -317,7 +313,7 @@ static void fbtft_mkdirty(struct fb_info *info, int y, int height)
/* special case, needed ? */
if (y == -1) {
y = 0;
- height = info->var.yres - 1;
+ height = info->var.yres;
}
/* Mark display lines/area as dirty */
@@ -529,6 +525,7 @@ static void fbtft_merge_fbtftops(struct fbtft_ops *dst, struct fbtft_ops *src)
*
* @display: pointer to structure describing the display
* @dev: pointer to the device for this fb, this can be NULL
+ * @pdata: platform data for the display in use
*
* Creates a new frame buffer info structure.
*
@@ -666,7 +663,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
fbdefio->deferred_io = fbtft_deferred_io;
fb_deferred_io_init(info);
- strncpy(info->fix.id, dev->driver->name, 16);
+ snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.xpanstep = 0;
@@ -897,46 +894,54 @@ int fbtft_unregister_framebuffer(struct fb_info *fb_info)
}
EXPORT_SYMBOL(fbtft_unregister_framebuffer);
-#ifdef CONFIG_OF
/**
- * fbtft_init_display_dt() - Device Tree init_display() function
+ * fbtft_init_display_from_property() - Device Tree init_display() function
* @par: Driver data
*
* Return: 0 if successful, negative if error
*/
-static int fbtft_init_display_dt(struct fbtft_par *par)
+static int fbtft_init_display_from_property(struct fbtft_par *par)
{
- struct device_node *node = par->info->device->of_node;
- struct property *prop;
- const __be32 *p;
+ struct device *dev = par->info->device;
+ int buf[64], count, index, i, j, ret;
+ u32 *values;
u32 val;
- int buf[64], i, j;
- if (!node)
+ count = device_property_count_u32(dev, "init");
+ if (count < 0)
+ return count;
+ if (count == 0)
return -EINVAL;
- prop = of_find_property(node, "init", NULL);
- p = of_prop_next_u32(prop, NULL, &val);
- if (!p)
- return -EINVAL;
+ values = kmalloc_array(count, sizeof(*values), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ ret = device_property_read_u32_array(dev, "init", values, count);
+ if (ret)
+ goto out_free;
par->fbtftops.reset(par);
if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
- while (p) {
+ index = -1;
+ while (index < count) {
+ val = values[++index];
+
if (val & FBTFT_OF_INIT_CMD) {
val &= 0xFFFF;
i = 0;
- while (p && !(val & 0xFFFF0000)) {
+ while ((index < count) && !(val & 0xFFFF0000)) {
if (i > 63) {
- dev_err(par->info->device,
+ dev_err(dev,
"%s: Maximum register values exceeded\n",
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free;
}
buf[i++] = val;
- p = of_prop_next_u32(prop, p, &val);
+ val = values[++index];
}
/* make debug message */
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
@@ -966,17 +971,18 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
"init: msleep(%u)\n", val & 0xFFFF);
msleep(val & 0xFFFF);
- p = of_prop_next_u32(prop, p, &val);
+ val = values[++index];
} else {
- dev_err(par->info->device, "illegal init value 0x%X\n",
- val);
- return -EINVAL;
+ dev_err(dev, "illegal init value 0x%X\n", val);
+ ret = -EINVAL;
+ goto out_free;
}
}
- return 0;
+out_free:
+ kfree(values);
+ return ret;
}
-#endif
/**
* fbtft_init_display() - Generic init_display() function
@@ -1137,27 +1143,25 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
return 0;
}
-#ifdef CONFIG_OF
/* returns 0 if the property is not present */
-static u32 fbtft_of_value(struct device_node *node, const char *propname)
+static u32 fbtft_property_value(struct device *dev, const char *propname)
{
int ret;
u32 val = 0;
- ret = of_property_read_u32(node, propname, &val);
+ ret = device_property_read_u32(dev, propname, &val);
if (ret == 0)
- pr_info("%s: %s = %u\n", __func__, propname, val);
+ dev_info(dev, "%s: %s = %u\n", __func__, propname, val);
return val;
}
-static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
+static struct fbtft_platform_data *fbtft_properties_read(struct device *dev)
{
- struct device_node *node = dev->of_node;
struct fbtft_platform_data *pdata;
- if (!node) {
- dev_err(dev, "Missing platform data or DT\n");
+ if (!dev_fwnode(dev)) {
+ dev_err(dev, "Missing platform data or properties\n");
return ERR_PTR(-EINVAL);
}
@@ -1165,35 +1169,28 @@ static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->display.width = fbtft_of_value(node, "width");
- pdata->display.height = fbtft_of_value(node, "height");
- pdata->display.regwidth = fbtft_of_value(node, "regwidth");
- pdata->display.buswidth = fbtft_of_value(node, "buswidth");
- pdata->display.backlight = fbtft_of_value(node, "backlight");
- pdata->display.bpp = fbtft_of_value(node, "bpp");
- pdata->display.debug = fbtft_of_value(node, "debug");
- pdata->rotate = fbtft_of_value(node, "rotate");
- pdata->bgr = of_property_read_bool(node, "bgr");
- pdata->fps = fbtft_of_value(node, "fps");
- pdata->txbuflen = fbtft_of_value(node, "txbuflen");
- pdata->startbyte = fbtft_of_value(node, "startbyte");
- of_property_read_string(node, "gamma", (const char **)&pdata->gamma);
-
- if (of_find_property(node, "led-gpios", NULL))
+ pdata->display.width = fbtft_property_value(dev, "width");
+ pdata->display.height = fbtft_property_value(dev, "height");
+ pdata->display.regwidth = fbtft_property_value(dev, "regwidth");
+ pdata->display.buswidth = fbtft_property_value(dev, "buswidth");
+ pdata->display.backlight = fbtft_property_value(dev, "backlight");
+ pdata->display.bpp = fbtft_property_value(dev, "bpp");
+ pdata->display.debug = fbtft_property_value(dev, "debug");
+ pdata->rotate = fbtft_property_value(dev, "rotate");
+ pdata->bgr = device_property_read_bool(dev, "bgr");
+ pdata->fps = fbtft_property_value(dev, "fps");
+ pdata->txbuflen = fbtft_property_value(dev, "txbuflen");
+ pdata->startbyte = fbtft_property_value(dev, "startbyte");
+ device_property_read_string(dev, "gamma", (const char **)&pdata->gamma);
+
+ if (device_property_present(dev, "led-gpios"))
pdata->display.backlight = 1;
- if (of_find_property(node, "init", NULL))
- pdata->display.fbtftops.init_display = fbtft_init_display_dt;
- pdata->display.fbtftops.request_gpios = fbtft_request_gpios_dt;
+ if (device_property_present(dev, "init"))
+ pdata->display.fbtftops.init_display = fbtft_init_display_from_property;
+ pdata->display.fbtftops.request_gpios = fbtft_request_gpios;
return pdata;
}
-#else
-static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
-{
- dev_err(dev, "Missing platform data\n");
- return ERR_PTR(-EINVAL);
-}
-#endif
/**
* fbtft_probe_common() - Generic device probe() helper function
@@ -1227,7 +1224,7 @@ int fbtft_probe_common(struct fbtft_display *display,
pdata = dev->platform_data;
if (!pdata) {
- pdata = fbtft_probe_dt(dev);
+ pdata = fbtft_properties_read(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 9b6bdb62093d..5f782da51959 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -309,7 +309,7 @@ MODULE_DEVICE_TABLE(of, dt_ids); \
static struct spi_driver fbtft_driver_spi_driver = { \
.driver = { \
.name = _name, \
- .of_match_table = of_match_ptr(dt_ids), \
+ .of_match_table = dt_ids, \
}, \
.probe = fbtft_driver_probe_spi, \
.remove = fbtft_driver_remove_spi, \
@@ -319,7 +319,7 @@ static struct platform_driver fbtft_driver_platform_driver = { \
.driver = { \
.name = _name, \
.owner = THIS_MODULE, \
- .of_match_table = of_match_ptr(dt_ids), \
+ .of_match_table = dt_ids, \
}, \
.probe = fbtft_driver_probe_pdev, \
.remove = fbtft_driver_remove_pdev, \
diff --git a/drivers/staging/fieldbus/anybuss/anybuss-client.h b/drivers/staging/fieldbus/anybuss/anybuss-client.h
index 0c4b6a1ffe10..8ee1f1baccf1 100644
--- a/drivers/staging/fieldbus/anybuss/anybuss-client.h
+++ b/drivers/staging/fieldbus/anybuss/anybuss-client.h
@@ -12,6 +12,9 @@
#include <linux/types.h>
#include <linux/poll.h>
+/* move to <linux/fieldbus_dev.h> when taking this out of staging */
+#include "../fieldbus_dev.h"
+
struct anybuss_host;
struct anybuss_client {
@@ -61,12 +64,6 @@ anybuss_set_drvdata(struct anybuss_client *client, void *data)
int anybuss_set_power(struct anybuss_client *client, bool power_on);
-enum anybuss_offl_mode {
- AB_OFFL_MODE_CLEAR = 0,
- AB_OFFL_MODE_FREEZE,
- AB_OFFL_MODE_SET
-};
-
struct anybuss_memcfg {
u16 input_io;
u16 input_dpram;
@@ -76,7 +73,7 @@ struct anybuss_memcfg {
u16 output_dpram;
u16 output_total;
- enum anybuss_offl_mode offl_mode;
+ enum fieldbus_dev_offl_mode offl_mode;
};
int anybuss_start_init(struct anybuss_client *client,
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index 2ecffa42e561..5b8d0bae9ff3 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -127,12 +127,10 @@ static const struct regmap_config arcx_regmap_cfg = {
static struct regmap *create_parallel_regmap(struct platform_device *pdev,
int idx)
{
- struct resource *res;
void __iomem *base;
struct device *dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, idx + 1);
if (IS_ERR(base))
return ERR_CAST(base);
return devm_regmap_init_mmio(dev, base, &arcx_regmap_cfg);
@@ -230,7 +228,6 @@ static int controller_probe(struct platform_device *pdev)
struct regulator_config config = { };
struct regulator_dev *regulator;
int err, id;
- struct resource *res;
struct anybuss_host *host;
u8 status1, cap;
@@ -244,8 +241,7 @@ static int controller_probe(struct platform_device *pdev)
return PTR_ERR(cd->reset_gpiod);
/* CPLD control memory, sits at index 0 */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cd->cpld_base = devm_ioremap_resource(dev, res);
+ cd->cpld_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cd->cpld_base)) {
dev_err(dev,
"failed to map cpld base address\n");
diff --git a/drivers/staging/fieldbus/anybuss/hms-profinet.c b/drivers/staging/fieldbus/anybuss/hms-profinet.c
index 5446843e35f4..31c43a0a5776 100644
--- a/drivers/staging/fieldbus/anybuss/hms-profinet.c
+++ b/drivers/staging/fieldbus/anybuss/hms-profinet.c
@@ -96,7 +96,7 @@ static int __profi_enable(struct profi_priv *priv)
.output_io = 220,
.output_dpram = PROFI_DPRAM_SIZE,
.output_total = PROFI_DPRAM_SIZE,
- .offl_mode = AB_OFFL_MODE_CLEAR,
+ .offl_mode = FIELDBUS_DEV_OFFL_MODE_CLEAR,
};
/*
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index f69dc4930457..549cb7d51af8 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1022,13 +1022,13 @@ int anybuss_start_init(struct anybuss_client *client,
};
switch (cfg->offl_mode) {
- case AB_OFFL_MODE_CLEAR:
+ case FIELDBUS_DEV_OFFL_MODE_CLEAR:
op_mode = 0;
break;
- case AB_OFFL_MODE_FREEZE:
+ case FIELDBUS_DEV_OFFL_MODE_FREEZE:
op_mode = OP_MODE_FBFC;
break;
- case AB_OFFL_MODE_SET:
+ case FIELDBUS_DEV_OFFL_MODE_SET:
op_mode = OP_MODE_FBS;
break;
default:
diff --git a/drivers/staging/fieldbus/dev_core.c b/drivers/staging/fieldbus/dev_core.c
index f6f5b92ba914..1ba0234cc60d 100644
--- a/drivers/staging/fieldbus/dev_core.c
+++ b/drivers/staging/fieldbus/dev_core.c
@@ -23,9 +23,6 @@ static dev_t fieldbus_devt;
static DEFINE_IDA(fieldbus_ida);
static DEFINE_MUTEX(fieldbus_mtx);
-static const char ctrl_enabled[] = "enabled";
-static const char ctrl_disabled[] = "disabled";
-
static ssize_t online_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/staging/fieldbus/fieldbus_dev.h b/drivers/staging/fieldbus/fieldbus_dev.h
index a10fc3b446dc..301dca3b8d71 100644
--- a/drivers/staging/fieldbus/fieldbus_dev.h
+++ b/drivers/staging/fieldbus/fieldbus_dev.h
@@ -15,6 +15,12 @@ enum fieldbus_dev_type {
FIELDBUS_DEV_TYPE_PROFINET,
};
+enum fieldbus_dev_offl_mode {
+ FIELDBUS_DEV_OFFL_MODE_CLEAR = 0,
+ FIELDBUS_DEV_OFFL_MODE_FREEZE,
+ FIELDBUS_DEV_OFFL_MODE_SET
+};
+
/**
* struct fieldbus_dev - Fieldbus device
* @read_area: [DRIVER] function to read the process data area of the
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 14a9eebf687e..39c0fe347188 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -18,8 +18,6 @@
#include "ethsw.h"
-static struct workqueue_struct *ethsw_owq;
-
/* Minimal supported DPSW version */
#define DPSW_MIN_VER_MAJOR 8
#define DPSW_MIN_VER_MINOR 1
@@ -1174,10 +1172,6 @@ static int port_netdevice_event(struct notifier_block *unused,
return notifier_from_errno(err);
}
-static struct notifier_block port_nb __read_mostly = {
- .notifier_call = port_netdevice_event,
-};
-
struct ethsw_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
@@ -1233,8 +1227,10 @@ static int port_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
struct ethsw_switchdev_event_work *switchdev_work;
struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
if (!ethsw_port_dev_check(dev))
return NOTIFY_DONE;
@@ -1270,7 +1266,7 @@ static int port_switchdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
- queue_work(ethsw_owq, &switchdev_work->work);
+ queue_work(ethsw->workqueue, &switchdev_work->work);
return NOTIFY_DONE;
@@ -1318,31 +1314,27 @@ static int port_switchdev_blocking_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
-static struct notifier_block port_switchdev_nb = {
- .notifier_call = port_switchdev_event,
-};
-
-static struct notifier_block port_switchdev_blocking_nb = {
- .notifier_call = port_switchdev_blocking_event,
-};
-
static int ethsw_register_notifier(struct device *dev)
{
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
- err = register_netdevice_notifier(&port_nb);
+ ethsw->port_nb.notifier_call = port_netdevice_event;
+ err = register_netdevice_notifier(&ethsw->port_nb);
if (err) {
dev_err(dev, "Failed to register netdev notifier\n");
return err;
}
- err = register_switchdev_notifier(&port_switchdev_nb);
+ ethsw->port_switchdev_nb.notifier_call = port_switchdev_event;
+ err = register_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err) {
dev_err(dev, "Failed to register switchdev notifier\n");
goto err_switchdev_nb;
}
- err = register_switchdev_blocking_notifier(&port_switchdev_blocking_nb);
+ ethsw->port_switchdevb_nb.notifier_call = port_switchdev_blocking_event;
+ err = register_switchdev_blocking_notifier(&ethsw->port_switchdevb_nb);
if (err) {
dev_err(dev, "Failed to register switchdev blocking notifier\n");
goto err_switchdev_blocking_nb;
@@ -1351,9 +1343,9 @@ static int ethsw_register_notifier(struct device *dev)
return 0;
err_switchdev_blocking_nb:
- unregister_switchdev_notifier(&port_switchdev_nb);
+ unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
err_switchdev_nb:
- unregister_netdevice_notifier(&port_nb);
+ unregister_netdevice_notifier(&ethsw->port_nb);
return err;
}
@@ -1435,9 +1427,10 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
}
}
- ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
- "ethsw");
- if (!ethsw_owq) {
+ ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
+ WQ_MEM_RECLAIM, "ethsw",
+ ethsw->sw_attr.id);
+ if (!ethsw->workqueue) {
err = -ENOMEM;
goto err_close;
}
@@ -1449,7 +1442,7 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
return 0;
err_destroy_ordered_workqueue:
- destroy_workqueue(ethsw_owq);
+ destroy_workqueue(ethsw->workqueue);
err_close:
dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -1491,21 +1484,22 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
static void ethsw_unregister_notifier(struct device *dev)
{
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
struct notifier_block *nb;
int err;
- nb = &port_switchdev_blocking_nb;
+ nb = &ethsw->port_switchdevb_nb;
err = unregister_switchdev_blocking_notifier(nb);
if (err)
dev_err(dev,
"Failed to unregister switchdev blocking notifier (%d)\n", err);
- err = unregister_switchdev_notifier(&port_switchdev_nb);
+ err = unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err)
dev_err(dev,
"Failed to unregister switchdev notifier (%d)\n", err);
- err = unregister_netdevice_notifier(&port_nb);
+ err = unregister_netdevice_notifier(&ethsw->port_nb);
if (err)
dev_err(dev,
"Failed to unregister netdev notifier (%d)\n", err);
@@ -1536,7 +1530,7 @@ static int ethsw_remove(struct fsl_mc_device *sw_dev)
ethsw_teardown_irqs(sw_dev);
- destroy_workqueue(ethsw_owq);
+ destroy_workqueue(ethsw->workqueue);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
index 3ea8a0ad8c10..a0244f7d5003 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
@@ -66,6 +66,11 @@ struct ethsw_core {
u8 vlans[VLAN_VID_MASK + 1];
bool learning;
+
+ struct notifier_block port_nb;
+ struct notifier_block port_switchdev_nb;
+ struct notifier_block port_switchdevb_nb;
+ struct workqueue_struct *workqueue;
};
#endif /* __ETHSW_H */
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
index 9543f8454af9..6964aac2a7ed 100644
--- a/drivers/staging/fwserial/Kconfig
+++ b/drivers/staging/fwserial/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config FIREWIRE_SERIAL
- tristate "TTY over Firewire"
- depends on FIREWIRE && TTY
- help
- This enables TTY over IEEE 1394, providing high-speed serial
+ tristate "TTY over Firewire"
+ depends on FIREWIRE && TTY
+ help
+ This enables TTY over IEEE 1394, providing high-speed serial
connectivity to cabled peers. This driver implements a
ad-hoc transport protocol and is currently limited to
Linux-to-Linux communication.
@@ -14,18 +14,18 @@ config FIREWIRE_SERIAL
if FIREWIRE_SERIAL
config FWTTY_MAX_TOTAL_PORTS
- int "Maximum number of serial ports supported"
- default "64"
- help
- Set this to the maximum number of serial ports you want the
+ int "Maximum number of serial ports supported"
+ default "64"
+ help
+ Set this to the maximum number of serial ports you want the
firewire-serial driver to support.
config FWTTY_MAX_CARD_PORTS
- int "Maximum number of serial ports supported per adapter"
- range 0 FWTTY_MAX_TOTAL_PORTS
- default "32"
- help
- Set this to the maximum number of serial ports each firewire
+ int "Maximum number of serial ports supported per adapter"
+ range 0 FWTTY_MAX_TOTAL_PORTS
+ default "32"
+ help
+ Set this to the maximum number of serial ports each firewire
adapter supports. The actual number of serial ports registered
is set with the module parameter "ttys".
diff --git a/drivers/staging/gasket/gasket_constants.h b/drivers/staging/gasket/gasket_constants.h
index 50d87c7b178c..9ea9c8833f27 100644
--- a/drivers/staging/gasket/gasket_constants.h
+++ b/drivers/staging/gasket/gasket_constants.h
@@ -13,9 +13,6 @@
/* The maximum devices per each type. */
#define GASKET_DEV_MAX 256
-/* The number of supported (and possible) PCI BARs. */
-#define GASKET_NUM_BARS 6
-
/* The number of supported Gasket page tables per device. */
#define GASKET_MAX_NUM_PAGE_TABLES 1
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
index 13179f063a61..cd8be80d2076 100644
--- a/drivers/staging/gasket/gasket_core.c
+++ b/drivers/staging/gasket/gasket_core.c
@@ -371,7 +371,7 @@ static int gasket_setup_pci(struct pci_dev *pci_dev,
{
int i, mapped_bars, ret;
- for (i = 0; i < GASKET_NUM_BARS; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
ret = gasket_map_pci_bar(gasket_dev, i);
if (ret) {
mapped_bars = i;
@@ -393,7 +393,7 @@ static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
{
int i;
- for (i = 0; i < GASKET_NUM_BARS; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
gasket_unmap_pci_bar(gasket_dev, i);
}
@@ -493,7 +493,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
(enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
switch (sysfs_type) {
case ATTR_BAR_OFFSETS:
- for (i = 0; i < GASKET_NUM_BARS; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_desc = &driver_desc->bar_descriptions[i];
if (bar_desc->size == 0)
continue;
@@ -505,7 +505,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
}
break;
case ATTR_BAR_SIZES:
- for (i = 0; i < GASKET_NUM_BARS; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_desc = &driver_desc->bar_descriptions[i];
if (bar_desc->size == 0)
continue;
@@ -556,7 +556,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
break;
case ATTR_USER_MEM_RANGES:
- for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
current_written =
gasket_write_mappable_regions(buf, driver_desc,
i);
@@ -736,7 +736,7 @@ static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
const struct gasket_driver_desc *driver_desc;
driver_desc = gasket_dev->internal_desc->driver_desc;
- for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
struct gasket_bar_desc bar_desc =
driver_desc->bar_descriptions[i];
diff --git a/drivers/staging/gasket/gasket_core.h b/drivers/staging/gasket/gasket_core.h
index be44ac1e3118..c417acadb0d5 100644
--- a/drivers/staging/gasket/gasket_core.h
+++ b/drivers/staging/gasket/gasket_core.h
@@ -268,7 +268,7 @@ struct gasket_dev {
char kobj_name[GASKET_NAME_MAX];
/* Virtual address of mapped BAR memory range. */
- struct gasket_bar_data bar_data[GASKET_NUM_BARS];
+ struct gasket_bar_data bar_data[PCI_STD_NUM_BARS];
/* Coherent buffer. */
struct gasket_coherent_buffer coherent_buffer;
@@ -369,7 +369,7 @@ struct gasket_driver_desc {
/* Set of 6 bar descriptions that describe all PCIe bars.
* Note that BUS/AXI devices (i.e. non PCI devices) use those.
*/
- struct gasket_bar_desc bar_descriptions[GASKET_NUM_BARS];
+ struct gasket_bar_desc bar_descriptions[PCI_STD_NUM_BARS];
/*
* Coherent buffer description.
diff --git a/drivers/staging/gasket/gasket_ioctl.c b/drivers/staging/gasket/gasket_ioctl.c
index 240f9bb10b71..e3047d36d8db 100644
--- a/drivers/staging/gasket/gasket_ioctl.c
+++ b/drivers/staging/gasket/gasket_ioctl.c
@@ -34,8 +34,8 @@ static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
- return gasket_interrupt_set_eventfd(
- gasket_dev->interrupt_data, die.interrupt, die.event_fd);
+ return gasket_interrupt_set_eventfd(gasket_dev->interrupt_data,
+ die.interrupt, die.event_fd);
}
/* Read the size of the page table. */
@@ -54,9 +54,9 @@ static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
ibuf.size = gasket_page_table_num_entries(
gasket_dev->page_table[ibuf.page_table_index]);
- trace_gasket_ioctl_page_table_data(
- ibuf.page_table_index, ibuf.size, ibuf.host_address,
- ibuf.device_address);
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
return -EFAULT;
@@ -101,9 +101,9 @@ static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
- trace_gasket_ioctl_page_table_data(
- ibuf.page_table_index, ibuf.size, ibuf.host_address,
- ibuf.device_address);
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 82099db4bf0c..a480409090c0 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -7,7 +7,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index e6b660489165..bf3e2a9cc07f 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -155,6 +155,11 @@
* The DOUT/RDY output must also be wired to an interrupt capable GPIO.
*/
+enum {
+ AD7192_SYSCALIB_ZERO_SCALE,
+ AD7192_SYSCALIB_FULL_SCALE,
+};
+
struct ad7192_state {
struct regulator *avdd;
struct regulator *dvdd;
@@ -169,10 +174,80 @@ struct ad7192_state {
u8 devid;
u8 clock_sel;
struct mutex lock; /* protect sensor state */
+ u8 syscalib_mode[8];
struct ad_sigma_delta sd;
};
+static const char * const ad7192_syscalib_modes[] = {
+ [AD7192_SYSCALIB_ZERO_SCALE] = "zero_scale",
+ [AD7192_SYSCALIB_FULL_SCALE] = "full_scale",
+};
+
+static int ad7192_set_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ st->syscalib_mode[chan->channel] = mode;
+
+ return 0;
+}
+
+static int ad7192_get_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ return st->syscalib_mode[chan->channel];
+}
+
+static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ bool sys_calib;
+ int ret, temp;
+
+ ret = strtobool(buf, &sys_calib);
+ if (ret)
+ return ret;
+
+ temp = st->syscalib_mode[chan->channel];
+ if (sys_calib) {
+ if (temp == AD7192_SYSCALIB_ZERO_SCALE)
+ ret = ad_sd_calibrate(&st->sd, AD7192_MODE_CAL_SYS_ZERO,
+ chan->address);
+ else
+ ret = ad_sd_calibrate(&st->sd, AD7192_MODE_CAL_SYS_FULL,
+ chan->address);
+ }
+
+ return ret ? ret : len;
+}
+
+static const struct iio_enum ad7192_syscalib_mode_enum = {
+ .items = ad7192_syscalib_modes,
+ .num_items = ARRAY_SIZE(ad7192_syscalib_modes),
+ .set = ad7192_set_syscalib_mode,
+ .get = ad7192_get_syscalib_mode
+};
+
+static const struct iio_chan_spec_ext_info ad7192_calibsys_ext_info[] = {
+ {
+ .name = "sys_calibration",
+ .write = ad7192_write_syscalib,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("sys_calibration_mode", IIO_SEPARATE,
+ &ad7192_syscalib_mode_enum),
+ IIO_ENUM_AVAILABLE("sys_calibration_mode", &ad7192_syscalib_mode_enum),
+ {}
+};
+
static struct ad7192_state *ad_sigma_delta_to_ad7192(struct ad_sigma_delta *sd)
{
return container_of(sd, struct ad7192_state, sd);
@@ -770,9 +845,11 @@ static int ad7192_channels_config(struct iio_dev *indio_dev)
*chan = channels[i];
chan->info_mask_shared_by_all |=
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY);
- if (chan->type != IIO_TEMP)
+ if (chan->type != IIO_TEMP) {
chan->info_mask_shared_by_type_available |=
BIT(IIO_CHAN_INFO_SCALE);
+ chan->ext_info = ad7192_calibsys_ext_info;
+ }
chan++;
}
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 038d6732c3fd..23026978a5a5 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -417,6 +417,10 @@ static int ad9834_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
mutex_init(&st->lock);
st->mclk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(st->mclk)) {
+ ret = PTR_ERR(st->mclk);
+ goto error_disable_reg;
+ }
ret = clk_prepare_enable(st->mclk);
if (ret) {
diff --git a/drivers/staging/isdn/avm/b1.c b/drivers/staging/isdn/avm/b1.c
index 40ca1e8fa09f..32ec8cf31fd0 100644
--- a/drivers/staging/isdn/avm/b1.c
+++ b/drivers/staging/isdn/avm/b1.c
@@ -261,9 +261,10 @@ int b1_loaded(avmcard *card)
b1_put_byte(base, SEND_POLL);
for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
if (b1_rx_full(base)) {
- if ((ans = b1_get_byte(base)) == RECEIVE_POLL) {
+ ans = b1_get_byte(base);
+ if (ans == RECEIVE_POLL)
return 1;
- }
+
printk(KERN_ERR "%s: b1_loaded: got 0x%x, firmware not running\n",
card->name, ans);
return 0;
@@ -284,8 +285,9 @@ int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
int retval;
b1_reset(port);
+ retval = b1_load_t4file(card, &data->firmware);
- if ((retval = b1_load_t4file(card, &data->firmware))) {
+ if (retval) {
b1_reset(port);
printk(KERN_ERR "%s: failed to load t4file!!\n",
card->name);
@@ -295,7 +297,8 @@ int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
b1_disable_irq(port);
if (data->configuration.len > 0 && data->configuration.data) {
- if ((retval = b1_load_config(card, &data->configuration))) {
+ retval = b1_load_config(card, &data->configuration);
+ if (retval) {
b1_reset(port);
printk(KERN_ERR "%s: failed to load config!!\n",
card->name);
@@ -525,7 +528,9 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
MsgLen = 30;
CAPIMSG_SETLEN(card->msgbuf, 30);
}
- if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
+
+ skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC);
+ if (!skb) {
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
@@ -539,7 +544,9 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
ApplId = (unsigned) b1_get_word(card->port);
MsgLen = b1_get_slice(card->port, card->msgbuf);
- if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
+ skb = alloc_skb(MsgLen, GFP_ATOMIC);
+
+ if (!skb) {
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
spin_unlock_irqrestore(&card->lock, flags);
@@ -663,11 +670,17 @@ int b1_proc_show(struct seq_file *m, void *v)
seq_printf(m, "%-16s %s\n", "type", s);
if (card->cardtype == avm_t1isa)
seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
- if ((s = cinfo->version[VER_DRIVER]) != NULL)
+
+ s = cinfo->version[VER_DRIVER];
+ if (s)
seq_printf(m, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
+
+ s = cinfo->version[VER_CARDTYPE];
+ if (s)
seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != NULL)
+
+ s = cinfo->version[VER_SERIAL];
+ if (s)
seq_printf(m, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
@@ -784,13 +797,15 @@ static int __init b1_init(void)
char *p;
char rev[32];
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ p = strchr(revision, ':');
+ if (p && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
+ p = strchr(rev, '$');
+ if (p && p > rev)
*(p - 1) = 0;
- } else
+ } else {
strcpy(rev, "1.0");
-
+ }
printk(KERN_INFO "b1: revision %s\n", rev);
return 0;
diff --git a/drivers/staging/isdn/gigaset/interface.c b/drivers/staging/isdn/gigaset/interface.c
index 17fa615a8c68..9ddadd07e707 100644
--- a/drivers/staging/isdn/gigaset/interface.c
+++ b/drivers/staging/isdn/gigaset/interface.c
@@ -518,7 +518,7 @@ void gigaset_if_init(struct cardstate *cs)
if (!IS_ERR(cs->tty_dev))
dev_set_drvdata(cs->tty_dev, cs);
else {
- pr_warning("could not register device to the tty subsystem\n");
+ pr_warn("could not register device to the tty subsystem\n");
cs->tty_dev = NULL;
}
mutex_unlock(&cs->mutex);
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
index bc02534d8dc3..5460bf973c9c 100644
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ b/drivers/staging/kpc2000/kpc2000_i2c.c
@@ -99,7 +99,8 @@ struct i2c_device {
#define SMBHSTSTS_INTR 0x02
#define SMBHSTSTS_HOST_BUSY 0x01
-#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | SMBHSTSTS_INTR)
+#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_FAILED | \
+ SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | SMBHSTSTS_INTR)
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
@@ -136,17 +137,18 @@ static int i801_check_pre(struct i2c_device *priv)
status = inb_p(SMBHSTSTS(priv));
if (status & SMBHSTSTS_HOST_BUSY) {
- dev_err(&priv->adapter.dev, "SMBus is busy, can't use it! (status=%x)\n", status);
+ dev_err(&priv->adapter.dev,
+ "SMBus is busy, can't use it! (status=%x)\n", status);
return -EBUSY;
}
status &= STATUS_FLAGS;
if (status) {
- //dev_dbg(&priv->adapter.dev, "Clearing status flags (%02x)\n", status);
outb_p(status, SMBHSTSTS(priv));
status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
if (status) {
- dev_err(&priv->adapter.dev, "Failed clearing status flags (%02x)\n", status);
+ dev_err(&priv->adapter.dev,
+ "Failed clearing status flags (%02x)\n", status);
return -EBUSY;
}
}
@@ -162,15 +164,20 @@ static int i801_check_post(struct i2c_device *priv, int status, int timeout)
if (timeout) {
dev_err(&priv->adapter.dev, "Transaction timeout\n");
/* try to stop the current command */
- dev_dbg(&priv->adapter.dev, "Terminating the current operation\n");
- outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, SMBHSTCNT(priv));
+ dev_dbg(&priv->adapter.dev,
+ "Terminating the current operation\n");
+ outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
+ SMBHSTCNT(priv));
usleep_range(1000, 2000);
- outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), SMBHSTCNT(priv));
+ outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
+ SMBHSTCNT(priv));
/* Check if it worked */
status = inb_p(SMBHSTSTS(priv));
- if ((status & SMBHSTSTS_HOST_BUSY) || !(status & SMBHSTSTS_FAILED))
- dev_err(&priv->adapter.dev, "Failed terminating the transaction\n");
+ if ((status & SMBHSTSTS_HOST_BUSY) ||
+ !(status & SMBHSTSTS_FAILED))
+ dev_err(&priv->adapter.dev,
+ "Failed terminating the transaction\n");
outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
return -ETIMEDOUT;
}
@@ -244,7 +251,9 @@ static void i801_wait_hwpec(struct i2c_device *priv)
outb_p(status, SMBHSTSTS(priv));
}
-static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int hwpec)
+static int i801_block_transaction_by_block(struct i2c_device *priv,
+ union i2c_smbus_data *data,
+ char read_write, int hwpec)
{
int i, len;
int status;
@@ -259,7 +268,8 @@ static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_sm
outb_p(data->block[i + 1], SMBBLKDAT(priv));
}
- status = i801_transaction(priv, I801_BLOCK_DATA | ENABLE_INT9 | I801_PEC_EN * hwpec);
+ status = i801_transaction(priv,
+ I801_BLOCK_DATA | ENABLE_INT9 | I801_PEC_EN * hwpec);
if (status)
return status;
@@ -275,7 +285,10 @@ static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_sm
return 0;
}
-static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int command, int hwpec)
+static int i801_block_transaction_byte_by_byte(struct i2c_device *priv,
+ union i2c_smbus_data *data,
+ char read_write, int command,
+ int hwpec)
{
int i, len;
int smbcmd;
@@ -301,7 +314,8 @@ static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2
else
smbcmd = I801_BLOCK_LAST;
} else {
- if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_READ)
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
+ read_write == I2C_SMBUS_READ)
smbcmd = I801_I2C_BLOCK_DATA;
else
smbcmd = I801_BLOCK_DATA;
@@ -309,25 +323,33 @@ static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2
outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv));
if (i == 1)
- outb_p(inb(SMBHSTCNT(priv)) | I801_START, SMBHSTCNT(priv));
+ outb_p(inb(SMBHSTCNT(priv)) | I801_START,
+ SMBHSTCNT(priv));
/* We will always wait for a fraction of a second! */
timeout = 0;
do {
usleep_range(250, 500);
status = inb_p(SMBHSTSTS(priv));
- } while ((!(status & SMBHSTSTS_BYTE_DONE)) && (timeout++ < MAX_RETRIES));
+ } while (!(status & SMBHSTSTS_BYTE_DONE) &&
+ (timeout++ < MAX_RETRIES));
result = i801_check_post(priv, status, timeout > MAX_RETRIES);
if (result < 0)
return result;
- if (i == 1 && read_write == I2C_SMBUS_READ && command != I2C_SMBUS_I2C_BLOCK_DATA) {
+ if (i == 1 && read_write == I2C_SMBUS_READ &&
+ command != I2C_SMBUS_I2C_BLOCK_DATA) {
len = inb_p(SMBHSTDAT0(priv));
if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
- dev_err(&priv->adapter.dev, "Illegal SMBus block read size %d\n", len);
+ dev_err(&priv->adapter.dev,
+ "Illegal SMBus block read size %d\n",
+ len);
/* Recover */
- while (inb_p(SMBHSTSTS(priv)) & SMBHSTSTS_HOST_BUSY)
- outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
- outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
+ while (inb_p(SMBHSTSTS(priv)) &
+ SMBHSTSTS_HOST_BUSY)
+ outb_p(SMBHSTSTS_BYTE_DONE,
+ SMBHSTSTS(priv));
+ outb_p(SMBHSTSTS_INTR,
+ SMBHSTSTS(priv));
return -EPROTO;
}
data->block[0] = len;
@@ -354,7 +376,9 @@ static int i801_set_block_buffer_mode(struct i2c_device *priv)
}
/* Block transaction function */
-static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int command, int hwpec)
+static int i801_block_transaction(struct i2c_device *priv,
+ union i2c_smbus_data *data, char read_write,
+ int command, int hwpec)
{
int result = 0;
//unsigned char hostc;
@@ -366,12 +390,14 @@ static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data
//pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
//pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc | SMBHSTCFG_I2C_EN);
} else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
- dev_err(&priv->adapter.dev, "I2C block read is unsupported!\n");
+ dev_err(&priv->adapter.dev,
+ "I2C block read is unsupported!\n");
return -EOPNOTSUPP;
}
}
- if (read_write == I2C_SMBUS_WRITE || command == I2C_SMBUS_I2C_BLOCK_DATA) {
+ if (read_write == I2C_SMBUS_WRITE ||
+ command == I2C_SMBUS_I2C_BLOCK_DATA) {
if (data->block[0] < 1)
data->block[0] = 1;
if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
@@ -384,13 +410,21 @@ static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data
* SMBus (not I2C) block transactions, even though the datasheet
* doesn't mention this limitation.
*/
- if ((priv->features & FEATURE_BLOCK_BUFFER) && command != I2C_SMBUS_I2C_BLOCK_DATA && i801_set_block_buffer_mode(priv) == 0)
- result = i801_block_transaction_by_block(priv, data, read_write, hwpec);
- else
- result = i801_block_transaction_byte_by_byte(priv, data, read_write, command, hwpec);
+ if ((priv->features & FEATURE_BLOCK_BUFFER) &&
+ command != I2C_SMBUS_I2C_BLOCK_DATA &&
+ i801_set_block_buffer_mode(priv) == 0) {
+ result = i801_block_transaction_by_block(priv, data,
+ read_write, hwpec);
+ } else {
+ result = i801_block_transaction_byte_by_byte(priv, data,
+ read_write,
+ command, hwpec);
+ }
+
if (result == 0 && hwpec)
i801_wait_hwpec(priv);
- if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_WRITE) {
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
+ read_write == I2C_SMBUS_WRITE) {
/* restore saved configuration register value */
//TODO: Figure out the right thing to do here...
//pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc);
@@ -399,32 +433,41 @@ static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data
}
/* Return negative errno on error. */
-static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data)
+static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
{
int hwpec;
int block = 0;
int ret, xact = 0;
struct i2c_device *priv = i2c_get_adapdata(adap);
- hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA;
+ hwpec = (priv->features & FEATURE_SMBUS_PEC) &&
+ (flags & I2C_CLIENT_PEC) &&
+ size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA;
switch (size) {
case I2C_SMBUS_QUICK:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_QUICK\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
xact = I801_QUICK;
break;
case I2C_SMBUS_BYTE:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
if (read_write == I2C_SMBUS_WRITE)
outb_p(command, SMBHSTCMD(priv));
xact = I801_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
outb_p(command, SMBHSTCMD(priv));
if (read_write == I2C_SMBUS_WRITE)
outb_p(data->byte, SMBHSTDAT0(priv));
@@ -432,7 +475,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
break;
case I2C_SMBUS_WORD_DATA:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_WORD_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
outb_p(command, SMBHSTCMD(priv));
if (read_write == I2C_SMBUS_WRITE) {
outb_p(data->word & 0xff, SMBHSTDAT0(priv));
@@ -442,7 +487,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
break;
case I2C_SMBUS_BLOCK_DATA:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BLOCK_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
outb_p(command, SMBHSTCMD(priv));
block = 1;
break;
@@ -463,7 +510,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
block = 1;
break;
default:
- dev_dbg(&priv->adapter.dev, " [acc] Unsupported transaction %d\n", size);
+ dev_dbg(&priv->adapter.dev,
+ " [acc] Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
@@ -472,13 +520,14 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
} else {
dev_dbg(&priv->adapter.dev, " [acc] hwpec: no\n");
- outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC), SMBAUXCTL(priv));
+ outb_p(inb_p(SMBAUXCTL(priv)) &
+ (~SMBAUXCTL_CRC), SMBAUXCTL(priv));
}
if (block) {
- //ret = 0;
dev_dbg(&priv->adapter.dev, " [acc] block: yes\n");
- ret = i801_block_transaction(priv, data, read_write, size, hwpec);
+ ret = i801_block_transaction(priv, data, read_write, size,
+ hwpec);
} else {
dev_dbg(&priv->adapter.dev, " [acc] block: no\n");
ret = i801_transaction(priv, xact | ENABLE_INT9);
@@ -490,7 +539,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
*/
if (hwpec || block) {
dev_dbg(&priv->adapter.dev, " [acc] hwpec || block\n");
- outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC |
+ SMBAUXCTL_E32B), SMBAUXCTL(priv));
}
if (block) {
dev_dbg(&priv->adapter.dev, " [acc] block\n");
@@ -501,19 +551,22 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
return ret;
}
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK)) {
- dev_dbg(&priv->adapter.dev, " [acc] I2C_SMBUS_WRITE || I801_QUICK -> ret 0\n");
+ dev_dbg(&priv->adapter.dev,
+ " [acc] I2C_SMBUS_WRITE || I801_QUICK -> ret 0\n");
return 0;
}
switch (xact & 0x7f) {
case I801_BYTE: /* Result put in SMBHSTDAT0 */
case I801_BYTE_DATA:
- dev_dbg(&priv->adapter.dev, " [acc] I801_BYTE or I801_BYTE_DATA\n");
+ dev_dbg(&priv->adapter.dev,
+ " [acc] I801_BYTE or I801_BYTE_DATA\n");
data->byte = inb_p(SMBHSTDAT0(priv));
break;
case I801_WORD_DATA:
dev_dbg(&priv->adapter.dev, " [acc] I801_WORD_DATA\n");
- data->word = inb_p(SMBHSTDAT0(priv)) + (inb_p(SMBHSTDAT1(priv)) << 8);
+ data->word = inb_p(SMBHSTDAT0(priv)) +
+ (inb_p(SMBHSTDAT1(priv)) << 8);
break;
}
return 0;
@@ -535,30 +588,47 @@ static u32 i801_func(struct i2c_adapter *adapter)
// http://lxr.free-electrons.com/source/include/uapi/linux/i2c.h#L85
u32 f =
- I2C_FUNC_I2C | /* 0x00000001 (I enabled this one) */
- !I2C_FUNC_10BIT_ADDR | /* 0x00000002 */
- !I2C_FUNC_PROTOCOL_MANGLING | /* 0x00000004 */
- ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) | /* 0x00000008 */
- !I2C_FUNC_SMBUS_BLOCK_PROC_CALL | /* 0x00008000 */
- I2C_FUNC_SMBUS_QUICK | /* 0x00010000 */
- !I2C_FUNC_SMBUS_READ_BYTE | /* 0x00020000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE | /* 0x00040000 */
- !I2C_FUNC_SMBUS_READ_BYTE_DATA | /* 0x00080000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE_DATA | /* 0x00100000 */
- !I2C_FUNC_SMBUS_READ_WORD_DATA | /* 0x00200000 */
- !I2C_FUNC_SMBUS_WRITE_WORD_DATA | /* 0x00400000 */
- !I2C_FUNC_SMBUS_PROC_CALL | /* 0x00800000 */
- !I2C_FUNC_SMBUS_READ_BLOCK_DATA | /* 0x01000000 */
- !I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | /* 0x02000000 */
- ((priv->features & FEATURE_I2C_BLOCK_READ) ? I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | /* 0x04000000 */
- I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | /* 0x08000000 */
+ I2C_FUNC_I2C | /* 0x00000001(I enabled this
+ * one)
+ */
+ !I2C_FUNC_10BIT_ADDR | /* 0x00000002 */
+ !I2C_FUNC_PROTOCOL_MANGLING | /* 0x00000004 */
+ ((priv->features & FEATURE_SMBUS_PEC) ?
+ I2C_FUNC_SMBUS_PEC : 0) | /* 0x00000008 */
+ !I2C_FUNC_SMBUS_BLOCK_PROC_CALL | /* 0x00008000 */
+ I2C_FUNC_SMBUS_QUICK | /* 0x00010000 */
+ !I2C_FUNC_SMBUS_READ_BYTE | /* 0x00020000 */
+ !I2C_FUNC_SMBUS_WRITE_BYTE | /* 0x00040000 */
+ !I2C_FUNC_SMBUS_READ_BYTE_DATA | /* 0x00080000 */
+ !I2C_FUNC_SMBUS_WRITE_BYTE_DATA | /* 0x00100000 */
+ !I2C_FUNC_SMBUS_READ_WORD_DATA | /* 0x00200000 */
+ !I2C_FUNC_SMBUS_WRITE_WORD_DATA | /* 0x00400000 */
+ !I2C_FUNC_SMBUS_PROC_CALL | /* 0x00800000 */
+ !I2C_FUNC_SMBUS_READ_BLOCK_DATA | /* 0x01000000 */
+ !I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | /* 0x02000000 */
+ ((priv->features & FEATURE_I2C_BLOCK_READ) ?
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | /* 0x04000000 */
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | /* 0x08000000 */
I2C_FUNC_SMBUS_BYTE | /* _READ_BYTE _WRITE_BYTE */
- I2C_FUNC_SMBUS_BYTE_DATA | /* _READ_BYTE_DATA _WRITE_BYTE_DATA */
- I2C_FUNC_SMBUS_WORD_DATA | /* _READ_WORD_DATA _WRITE_WORD_DATA */
- I2C_FUNC_SMBUS_BLOCK_DATA | /* _READ_BLOCK_DATA _WRITE_BLOCK_DATA */
- !I2C_FUNC_SMBUS_I2C_BLOCK | /* _READ_I2C_BLOCK _WRITE_I2C_BLOCK */
- !I2C_FUNC_SMBUS_EMUL; /* _QUICK _BYTE _BYTE_DATA _WORD_DATA _PROC_CALL _WRITE_BLOCK_DATA _I2C_BLOCK _PEC */
+ I2C_FUNC_SMBUS_BYTE_DATA | /* _READ_BYTE_DATA
+ * _WRITE_BYTE_DATA
+ */
+ I2C_FUNC_SMBUS_WORD_DATA | /* _READ_WORD_DATA
+ * _WRITE_WORD_DATA
+ */
+ I2C_FUNC_SMBUS_BLOCK_DATA | /* _READ_BLOCK_DATA
+ * _WRITE_BLOCK_DATA
+ */
+ !I2C_FUNC_SMBUS_I2C_BLOCK | /* _READ_I2C_BLOCK
+ * _WRITE_I2C_BLOCK
+ */
+ !I2C_FUNC_SMBUS_EMUL; /* _QUICK _BYTE
+ * _BYTE_DATA _WORD_DATA
+ * _PROC_CALL
+ * _WRITE_BLOCK_DATA
+ * _I2C_BLOCK _PEC
+ */
return f;
}
@@ -610,8 +680,8 @@ static int pi2c_probe(struct platform_device *pldev)
/* Retry up to 3 times on lost arbitration */
priv->adapter.retries = 3;
- //snprintf(priv->adapter.name, sizeof(priv->adapter.name), "Fake SMBus I801 adapter at %04lx", priv->smba);
- snprintf(priv->adapter.name, sizeof(priv->adapter.name), "Fake SMBus I801 adapter");
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+ "Fake SMBus I801 adapter");
err = i2c_add_adapter(&priv->adapter);
if (err) {
diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c
index 3be33c450cab..8becf972af9c 100644
--- a/drivers/staging/kpc2000/kpc2000_spi.c
+++ b/drivers/staging/kpc2000/kpc2000_spi.c
@@ -50,6 +50,7 @@ static struct flash_platform_data p2kr0_spi0_pdata = {
.nr_parts = ARRAY_SIZE(p2kr0_spi0_parts),
.parts = p2kr0_spi0_parts,
};
+
static struct flash_platform_data p2kr0_spi1_pdata = {
.name = "SPI1",
.nr_parts = ARRAY_SIZE(p2kr0_spi1_parts),
@@ -162,14 +163,12 @@ union kp_spi_ffctrl {
kp_spi_read_reg(struct kp_spi_controller_state *cs, int idx)
{
u64 __iomem *addr = cs->base;
- u64 val;
addr += idx;
if ((idx == KP_SPI_REG_CONFIG) && (cs->conf_cache >= 0))
return cs->conf_cache;
- val = readq(addr);
- return val;
+ return readq(addr);
}
static inline void
@@ -227,8 +226,7 @@ kp_spi_txrx_pio(struct spi_device *spidev, struct spi_transfer *transfer)
kp_spi_write_reg(cs, KP_SPI_REG_TXDATA, val);
processed++;
}
- }
- else if (rx) {
+ } else if (rx) {
for (i = 0 ; i < c ; i++) {
char test = 0;
@@ -315,19 +313,19 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m)
if (transfer->speed_hz > KP_SPI_CLK ||
(len && !(rx_buf || tx_buf))) {
dev_dbg(kpspi->dev, " transfer: %d Hz, %d %s%s, %d bpw\n",
- transfer->speed_hz,
- len,
- tx_buf ? "tx" : "",
- rx_buf ? "rx" : "",
- transfer->bits_per_word);
+ transfer->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ transfer->bits_per_word);
dev_dbg(kpspi->dev, " transfer -EINVAL\n");
return -EINVAL;
}
if (transfer->speed_hz &&
transfer->speed_hz < (KP_SPI_CLK >> 15)) {
dev_dbg(kpspi->dev, "speed_hz %d below minimum %d Hz\n",
- transfer->speed_hz,
- KP_SPI_CLK >> 15);
+ transfer->speed_hz,
+ KP_SPI_CLK >> 15);
dev_dbg(kpspi->dev, " speed_hz -EINVAL\n");
return -EINVAL;
}
@@ -478,7 +476,7 @@ kp_spi_probe(struct platform_device *pldev)
/* register the slave boards */
#define NEW_SPI_DEVICE_FROM_BOARD_INFO_TABLE(table) \
for (i = 0 ; i < ARRAY_SIZE(table) ; i++) { \
- spi_new_device(master, &(table[i])); \
+ spi_new_device(master, &table[i]); \
}
switch ((drvdata->card_id & 0xFFFF0000) >> 16) {
diff --git a/drivers/staging/media/allegro-dvt/nal-h264.c b/drivers/staging/media/allegro-dvt/nal-h264.c
index 4e14b77851e1..bd48b8883572 100644
--- a/drivers/staging/media/allegro-dvt/nal-h264.c
+++ b/drivers/staging/media/allegro-dvt/nal-h264.c
@@ -235,7 +235,7 @@ static inline int rbsp_write_bit(struct rbsp *rbsp, bool value)
rbsp->pos++;
- if (value == 1 ||
+ if (value ||
(rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0))) {
rbsp->num_consecutive_zeros = 0;
} else {
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index f670bbde4159..deb90ae37859 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -26,21 +26,9 @@
#include "hantro_hw.h"
-#define VP8_MB_DIM 16
-#define VP8_MB_WIDTH(w) DIV_ROUND_UP(w, VP8_MB_DIM)
-#define VP8_MB_HEIGHT(h) DIV_ROUND_UP(h, VP8_MB_DIM)
-
-#define H264_MB_DIM 16
-#define H264_MB_WIDTH(w) DIV_ROUND_UP(w, H264_MB_DIM)
-#define H264_MB_HEIGHT(h) DIV_ROUND_UP(h, H264_MB_DIM)
-
-#define MPEG2_MB_DIM 16
-#define MPEG2_MB_WIDTH(w) DIV_ROUND_UP(w, MPEG2_MB_DIM)
-#define MPEG2_MB_HEIGHT(h) DIV_ROUND_UP(h, MPEG2_MB_DIM)
-
-#define JPEG_MB_DIM 16
-#define JPEG_MB_WIDTH(w) DIV_ROUND_UP(w, JPEG_MB_DIM)
-#define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM)
+#define MB_DIM 16
+#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
+#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
struct hantro_ctx;
struct hantro_codec_ops;
@@ -379,7 +367,7 @@ static inline void hantro_reg_write(struct hantro_dev *vpu,
bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx);
void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
-dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts);
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts);
static inline struct vb2_v4l2_buffer *
hantro_get_src_buf(struct hantro_ctx *ctx)
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 6d9d41170832..26108c96b674 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -43,8 +43,9 @@ void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
return ctrl ? ctrl->p_cur.p : NULL;
}
-dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts)
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
{
+ struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
struct vb2_buffer *buf;
int index;
@@ -413,20 +414,18 @@ static int hantro_open(struct file *filp)
if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
ctx->buf_finish = hantro_enc_buf_finish;
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- queue_init);
} else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
ctx->buf_finish = hantro_dec_buf_finish;
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- queue_init);
} else {
- ctx->fh.m2m_ctx = ERR_PTR(-ENODEV);
+ ret = -ENODEV;
+ goto err_ctx_free;
}
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
- kfree(ctx);
- return ret;
+ goto err_ctx_free;
}
v4l2_fh_init(&ctx->fh, vdev);
@@ -447,6 +446,7 @@ static int hantro_open(struct file *filp)
err_fh_free:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
+err_ctx_free:
kfree(ctx);
return ret;
}
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 7ab534936843..3cd40a8f0daa 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -34,9 +34,11 @@ static void set_params(struct hantro_ctx *ctx)
reg = G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0x0);
if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
- reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
- if (dec_param->nal_ref_idc)
- reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ if (sps->profile_idc > 66) {
+ reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
+ if (dec_param->nal_ref_idc)
+ reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ }
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
@@ -49,8 +51,8 @@ static void set_params(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Decoder control register 1. */
- reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(sps->pic_width_in_mbs_minus1 + 1) |
- G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(sps->pic_height_in_map_units_minus1 + 1) |
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width)) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
G1_REG_DEC_CTRL1_REF_FRAMES(sps->max_num_ref_frames);
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
@@ -61,7 +63,7 @@ static void set_params(struct hantro_ctx *ctx)
/* always use the matrix sent from userspace */
reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
- if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
@@ -79,7 +81,7 @@ static void set_params(struct hantro_ctx *ctx)
reg |= G1_REG_DEC_CTRL4_CABAC_E;
if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
reg |= G1_REG_DEC_CTRL4_DIR_8X8_INFER_E;
- if (sps->chroma_format_idc == 0)
+ if (sps->profile_idc >= 100 && sps->chroma_format_idc == 0)
reg |= G1_REG_DEC_CTRL4_BLACKWHITE_E;
if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
reg |= G1_REG_DEC_CTRL4_WEIGHT_PRED_E;
@@ -220,10 +222,9 @@ static void set_ref(struct hantro_ctx *ctx)
/* Set up addresses of DPB buffers. */
for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
- struct vb2_buffer *buf = hantro_h264_get_ref_buf(ctx, i);
+ dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
- vdpu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(buf, 0),
- G1_REG_ADDR_REF(i));
+ vdpu_write_relaxed(vpu, dma_addr, G1_REG_ADDR_REF(i));
}
}
@@ -233,6 +234,7 @@ static void set_buffers(struct hantro_ctx *ctx)
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct hantro_dev *vpu = ctx->dev;
dma_addr_t src_dma, dst_dma;
+ size_t offset = 0;
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
@@ -243,18 +245,30 @@ static void set_buffers(struct hantro_ctx *ctx)
/* Destination (decoded frame) buffer. */
dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
- vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
+ /* Adjust dma addr to start at second line for bottom field */
+ if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ offset = ALIGN(ctx->src_fmt.width, MB_DIM);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DST);
/* Higher profiles require DMV buffer appended to reference frames. */
- if (ctrls->sps->profile_idc > 66) {
- size_t pic_size = ctx->h264_dec.pic_size;
- size_t mv_offset = round_up(pic_size, 8);
-
+ if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
+ unsigned int bytes_per_mb = 384;
+
+ /* DMV buffer for monochrome start directly after Y-plane */
+ if (ctrls->sps->profile_idc >= 100 &&
+ ctrls->sps->chroma_format_idc == 0)
+ bytes_per_mb = 256;
+ offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+
+ /*
+ * DMV buffer is split in two for field encoded frames,
+ * adjust offset for bottom field
+ */
if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
- mv_offset += 32 * H264_MB_WIDTH(ctx->dst_fmt.width);
-
- vdpu_write_relaxed(vpu, dst_dma + mv_offset,
- G1_REG_ADDR_DIR_MV);
+ offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DIR_MV);
}
/* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
index 80f0e94f8afa..f3bf67d8a289 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -105,17 +105,14 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
switch (picture->picture_coding_type) {
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(vq,
+ backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
/* fall-through */
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(vq,
+ forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
}
@@ -207,8 +204,8 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
G1_REG_DEC_AXI_WR_ID(0);
vdpu_write_relaxed(vpu, reg, G1_SWREG(3));
- reg = G1_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
- G1_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ reg = G1_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ G1_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
G1_REG_ALT_SCAN_E(picture->alternate_scan) |
G1_REG_TOPFIELDFIRST_E(picture->top_field_first);
vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
index 6d99c2be01cf..cad18094fee0 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -370,19 +370,18 @@ static void cfg_tap(struct hantro_ctx *ctx,
static void cfg_ref(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame_header *hdr)
{
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
dma_addr_t ref;
vb2_dst = hantro_get_dst_buf(ctx);
- ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
- ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
WARN_ON(!ref && hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -390,7 +389,7 @@ static void cfg_ref(struct hantro_ctx *ctx,
ref |= G1_REG_ADDR_REF_TOPC_E;
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
- ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
WARN_ON(!ref && hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -470,8 +469,8 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Frame dimensions */
- mb_width = VP8_MB_WIDTH(width);
- mb_height = VP8_MB_HEIGHT(height);
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(mb_width) |
G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(mb_height) |
G1_REG_DEC_CTRL1_PIC_MB_W_EXT(mb_width >> 9) |
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index ecd34a7db190..938b48d4d3d9 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -116,8 +116,8 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
/* Make sure that all registers are written at this point. */
vepu_write(vpu, reg, H1_REG_AXI_CTRL);
- reg = H1_REG_ENC_CTRL_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | H1_REG_ENC_CTRL_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ reg = H1_REG_ENC_CTRL_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | H1_REG_ENC_CTRL_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| H1_REG_ENC_CTRL_ENC_MODE_JPEG
| H1_REG_ENC_PIC_INTRA
| H1_REG_ENC_CTRL_EN_BIT;
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
index 0d758e0c0f99..568640eab3a6 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -20,9 +20,9 @@
/* Size with u32 units. */
#define CABAC_INIT_BUFFER_SIZE (460 * 2)
#define POC_BUFFER_SIZE 34
-#define SCALING_LIST_SIZE (6 * 16 + 6 * 64)
+#define SCALING_LIST_SIZE (6 * 16 + 2 * 64)
-#define POC_CMP(p0, p1) ((p0) < (p1) ? -1 : 1)
+#define HANTRO_CMP(a, b) ((a) < (b) ? -1 : 1)
/* Data structure describing auxiliary buffer format. */
struct hantro_h264_dec_priv_tbl {
@@ -194,23 +194,6 @@ static const u32 h264_cabac_table[] = {
0x1f0c2517, 0x1f261440
};
-/*
- * NOTE: The scaling lists are in zig-zag order, apply inverse scanning process
- * to get the values in matrix order. In addition, the hardware requires bytes
- * swapped within each subsequent 4 bytes. Both arrays below include both
- * transformations.
- */
-static const u32 zig_zag_4x4[] = {
- 3, 2, 7, 11, 6, 1, 0, 5, 10, 15, 14, 9, 4, 8, 13, 12
-};
-
-static const u32 zig_zag_8x8[] = {
- 3, 2, 11, 19, 10, 1, 0, 9, 18, 27, 35, 26, 17, 8, 7, 6,
- 15, 16, 25, 34, 43, 51, 42, 33, 24, 23, 14, 5, 4, 13, 22, 31,
- 32, 41, 50, 59, 58, 49, 40, 39, 30, 21, 12, 20, 29, 38, 47, 48,
- 57, 56, 55, 46, 37, 28, 36, 45, 54, 63, 62, 53, 44, 52, 61, 60
-};
-
static void
reorder_scaling_list(struct hantro_ctx *ctx)
{
@@ -218,33 +201,23 @@ reorder_scaling_list(struct hantro_ctx *ctx)
const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
- const size_t num_list_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8);
const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
- u8 *dst = tbl->scaling_list;
- const u8 *src;
+ u32 *dst = (u32 *)tbl->scaling_list;
+ const u32 *src;
int i, j;
- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_4x4) != list_len_4x4);
- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_8x8) != list_len_8x8);
- BUILD_BUG_ON(ARRAY_SIZE(tbl->scaling_list) !=
- num_list_4x4 * list_len_4x4 +
- num_list_8x8 * list_len_8x8);
-
- src = &scaling->scaling_list_4x4[0][0];
- for (i = 0; i < num_list_4x4; ++i) {
- for (j = 0; j < list_len_4x4; ++j)
- dst[zig_zag_4x4[j]] = src[j];
- src += list_len_4x4;
- dst += list_len_4x4;
+ for (i = 0; i < num_list_4x4; i++) {
+ src = (u32 *)&scaling->scaling_list_4x4[i];
+ for (j = 0; j < list_len_4x4 / 4; j++)
+ *dst++ = swab32(src[j]);
}
- src = &scaling->scaling_list_8x8[0][0];
- for (i = 0; i < num_list_8x8; ++i) {
- for (j = 0; j < list_len_8x8; ++j)
- dst[zig_zag_8x8[j]] = src[j];
- src += list_len_8x8;
- dst += list_len_8x8;
+ /* Only Intra/Inter Y lists */
+ for (i = 0; i < 2; i++) {
+ src = (u32 *)&scaling->scaling_list_8x8[i];
+ for (j = 0; j < list_len_8x8 / 4; j++)
+ *dst++ = swab32(src[j]);
}
}
@@ -271,6 +244,7 @@ struct hantro_h264_reflist_builder {
const struct v4l2_h264_dpb_entry *dpb;
s32 pocs[HANTRO_H264_DPB_SIZE];
u8 unordered_reflist[HANTRO_H264_DPB_SIZE];
+ int frame_nums[HANTRO_H264_DPB_SIZE];
s32 curpoc;
u8 num_valid;
};
@@ -294,13 +268,20 @@ static void
init_reflist_builder(struct hantro_ctx *ctx,
struct hantro_h264_reflist_builder *b)
{
+ const struct v4l2_ctrl_h264_slice_params *slice_params;
const struct v4l2_ctrl_h264_decode_params *dec_param;
+ const struct v4l2_ctrl_h264_sps *sps;
struct vb2_v4l2_buffer *buf = hantro_get_dst_buf(ctx);
const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ int cur_frame_num, max_frame_num;
unsigned int i;
dec_param = ctx->h264_dec.ctrls.decode;
+ slice_params = &ctx->h264_dec.ctrls.slices[0];
+ sps = ctx->h264_dec.ctrls.sps;
+ max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ cur_frame_num = slice_params->frame_num;
memset(b, 0, sizeof(*b));
b->dpb = dpb;
@@ -318,6 +299,18 @@ init_reflist_builder(struct hantro_ctx *ctx,
continue;
buf = to_vb2_v4l2_buffer(vb2_get_buffer(cap_q, buf_idx));
+
+ /*
+ * Handle frame_num wraparound as described in section
+ * '8.2.4.1 Decoding process for picture numbers' of the spec.
+ * TODO: This logic will have to be adjusted when we start
+ * supporting interlaced content.
+ */
+ if (dpb[i].frame_num > cur_frame_num)
+ b->frame_nums[i] = (int)dpb[i].frame_num - max_frame_num;
+ else
+ b->frame_nums[i] = dpb[i].frame_num;
+
b->pocs[i] = get_poc(buf->field, dpb[i].top_field_order_cnt,
dpb[i].bottom_field_order_cnt);
b->unordered_reflist[b->num_valid] = i;
@@ -353,9 +346,10 @@ static int p_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
* ascending order.
*/
if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
- return b->frame_num - a->frame_num;
+ return HANTRO_CMP(builder->frame_nums[idxb],
+ builder->frame_nums[idxa]);
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
}
static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
@@ -381,7 +375,7 @@ static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
/* Long term pics in ascending pic num order. */
if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
poca = builder->pocs[idxa];
pocb = builder->pocs[idxb];
@@ -392,11 +386,11 @@ static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
* order.
*/
if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
else if (poca < builder->curpoc)
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
}
static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
@@ -422,22 +416,22 @@ static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
/* Long term pics in ascending pic num order. */
if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
poca = builder->pocs[idxa];
pocb = builder->pocs[idxb];
/*
* Short term pics with POC > cur POC first in POC ascending order
- * followed by short term pics with POC > cur POC in POC descending
+ * followed by short term pics with POC < cur POC in POC descending
* order.
*/
if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
else if (poca < builder->curpoc)
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
}
static void
@@ -537,22 +531,18 @@ static void update_dpb(struct hantro_ctx *ctx)
}
}
-struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
- unsigned int dpb_idx)
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx)
{
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
- struct vb2_buffer *buf;
- int buf_idx = -1;
+ dma_addr_t dma_addr = 0;
if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
- buf_idx = vb2_find_timestamp(cap_q,
- dpb[dpb_idx].reference_ts, 0);
+ dma_addr = hantro_get_ref(ctx, dpb[dpb_idx].reference_ts);
- if (buf_idx >= 0) {
- buf = vb2_get_buffer(cap_q, buf_idx);
- } else {
+ if (!dma_addr) {
struct vb2_v4l2_buffer *dst_buf;
+ struct vb2_buffer *buf;
/*
* If a DPB entry is unused or invalid, address of current
@@ -560,9 +550,10 @@ struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
*/
dst_buf = hantro_get_dst_buf(ctx);
buf = &dst_buf->vb2_buf;
+ dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
}
- return buf;
+ return dma_addr;
}
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
@@ -627,7 +618,6 @@ int hantro_h264_dec_init(struct hantro_ctx *ctx)
struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
struct hantro_aux_buf *priv = &h264_dec->priv;
struct hantro_h264_dec_priv_tbl *tbl;
- struct v4l2_pix_format_mplane pix_mp;
priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
GFP_KERNEL);
@@ -638,9 +628,5 @@ int hantro_h264_dec_init(struct hantro_ctx *ctx)
tbl = priv->cpu;
memcpy(tbl->cabac_table, h264_cabac_table, sizeof(tbl->cabac_table));
- v4l2_fill_pixfmt_mp(&pix_mp, ctx->dst_fmt.pixelformat,
- ctx->dst_fmt.width, ctx->dst_fmt.height);
- h264_dec->pic_size = pix_mp.plane_fmt[0].sizeimage;
-
return 0;
}
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 2fab655bf098..fa91dd1848b7 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -80,15 +80,12 @@ struct hantro_h264_dec_reflists {
* @dpb: DPB
* @reflists: P/B0/B1 reflists
* @ctrls: V4L2 controls attached to a run
- * @pic_size: Size in bytes of decoded picture, this is needed
- * to pass the location of motion vectors.
*/
struct hantro_h264_dec_hw_ctx {
struct hantro_aux_buf priv;
struct v4l2_h264_dpb_entry dpb[HANTRO_H264_DPB_SIZE];
struct hantro_h264_dec_reflists reflists;
struct hantro_h264_dec_ctrls ctrls;
- size_t pic_size;
};
/**
@@ -158,8 +155,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
-struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
- unsigned int dpb_idx);
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx);
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx);
void hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
int hantro_h264_dec_init(struct hantro_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 3dae52abb96c..1dae76f20034 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -240,14 +240,30 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
pix_mp->height);
/*
+ * A decoded 8-bit 4:2:0 NV12 frame may need memory for up to
+ * 448 bytes per macroblock with additional 32 bytes on
+ * multi-core variants.
+ *
* The H264 decoder needs extra space on the output buffers
* to store motion vectors. This is needed for reference
* frames.
+ *
+ * Memory layout is as follow:
+ *
+ * +---------------------------+
+ * | Y-plane 256 bytes x MBs |
+ * +---------------------------+
+ * | UV-plane 128 bytes x MBs |
+ * +---------------------------+
+ * | MV buffer 64 bytes x MBs |
+ * +---------------------------+
+ * | MC sync 32 bytes |
+ * +---------------------------+
*/
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
pix_mp->plane_fmt[0].sizeimage +=
- 128 * DIV_ROUND_UP(pix_mp->width, 16) *
- DIV_ROUND_UP(pix_mp->height, 16);
+ 64 * MB_WIDTH(pix_mp->width) *
+ MB_WIDTH(pix_mp->height) + 32;
} else if (!pix_mp->plane_fmt[0].sizeimage) {
/*
* For coded formats the application can specify
@@ -367,20 +383,27 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
const struct hantro_fmt *formats;
unsigned int num_fmts;
- struct vb2_queue *vq;
int ret;
- /* Change not allowed if queue is busy. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (vb2_is_busy(vq))
- return -EBUSY;
+ ret = vidioc_try_fmt_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
if (!hantro_is_encoder_ctx(ctx)) {
struct vb2_queue *peer_vq;
/*
+ * In order to support dynamic resolution change,
+ * the decoder admits a resolution change, as long
+ * as the pixelformat remains. Can't be done if streaming.
+ */
+ if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
+ pix_mp->pixelformat != ctx->src_fmt.pixelformat))
+ return -EBUSY;
+ /*
* Since format change on the OUTPUT queue will reset
* the CAPTURE queue, we can't allow doing so
* when the CAPTURE queue has buffers allocated.
@@ -389,12 +412,15 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(peer_vq))
return -EBUSY;
+ } else {
+ /*
+ * The encoder doesn't admit a format change if
+ * there are OUTPUT buffers allocated.
+ */
+ if (vb2_is_busy(vq))
+ return -EBUSY;
}
- ret = vidioc_try_fmt_out_mplane(file, priv, f);
- if (ret)
- return ret;
-
formats = hantro_get_formats(ctx, &num_fmts);
ctx->vpu_src_fmt = hantro_find_format(formats, num_fmts,
pix_mp->pixelformat);
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
index 6bfcc47d1e58..f8db6fcaad73 100644
--- a/drivers/staging/media/hantro/rk3288_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c
@@ -48,10 +48,10 @@ static const struct hantro_fmt rk3288_vpu_enc_fmts[] = {
.frmsize = {
.min_width = 96,
.max_width = 8192,
- .step_width = JPEG_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 32,
.max_height = 8192,
- .step_height = JPEG_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
@@ -67,11 +67,11 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.max_depth = 2,
.frmsize = {
.min_width = 48,
- .max_width = 3840,
- .step_width = H264_MB_DIM,
+ .max_width = 4096,
+ .step_width = MB_DIM,
.min_height = 48,
- .max_height = 2160,
- .step_height = H264_MB_DIM,
+ .max_height = 2304,
+ .step_height = MB_DIM,
},
},
{
@@ -81,10 +81,10 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 1920,
- .step_width = MPEG2_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 1088,
- .step_height = MPEG2_MB_DIM,
+ .step_height = MB_DIM,
},
},
{
@@ -94,10 +94,10 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = VP8_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 2160,
- .step_height = VP8_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw.c b/drivers/staging/media/hantro/rk3399_vpu_hw.c
index 14d14bc6b12b..9ac1f2cb6a16 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw.c
@@ -47,10 +47,10 @@ static const struct hantro_fmt rk3399_vpu_enc_fmts[] = {
.frmsize = {
.min_width = 96,
.max_width = 8192,
- .step_width = JPEG_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 32,
.max_height = 8192,
- .step_height = JPEG_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
@@ -67,10 +67,10 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 1920,
- .step_width = MPEG2_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 1088,
- .step_height = MPEG2_MB_DIM,
+ .step_height = MB_DIM,
},
},
{
@@ -80,10 +80,10 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = VP8_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 2160,
- .step_height = VP8_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index 06162f569b5e..067892345b5d 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -149,8 +149,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);
- reg = VEPU_REG_MB_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | VEPU_REG_MB_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ reg = VEPU_REG_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | VEPU_REG_MB_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| VEPU_REG_FRAME_TYPE_INTRA
| VEPU_REG_ENCODE_FORMAT_JPEG
| VEPU_REG_ENCODE_ENABLE;
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
index e7ba5c0441cc..b40d2cdf832f 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -107,17 +107,14 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
switch (picture->picture_coding_type) {
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(vq,
+ backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
/* fall-through */
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(vq,
+ forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
}
@@ -223,8 +220,8 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
VDPU_REG_DEC_CLK_GATE_E(1);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
- reg = VDPU_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
- VDPU_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ reg = VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
VDPU_REG_ALT_SCAN_E(picture->alternate_scan) |
VDPU_REG_TOPFIELDFIRST_E(picture->top_field_first);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
index f17e32620b08..76d7ed3fd69a 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
@@ -449,18 +449,16 @@ static void cfg_ref(struct hantro_ctx *ctx,
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
- struct vb2_queue *cap_q;
dma_addr_t ref;
- cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_dst = hantro_get_dst_buf(ctx);
- ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
- ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
WARN_ON(!ref && hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -468,7 +466,7 @@ static void cfg_ref(struct hantro_ctx *ctx,
ref |= VDPU_REG_VP8_GREF_SIGN_BIAS;
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
- ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
WARN_ON(!ref && hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -563,8 +561,8 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
hantro_reg_write(vpu, &vp8_dec_filter_disable, 1);
/* Frame dimensions */
- mb_width = VP8_MB_WIDTH(width);
- mb_height = VP8_MB_HEIGHT(height);
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
hantro_reg_write(vpu, &vp8_dec_mb_width, mb_width);
hantro_reg_write(vpu, &vp8_dec_mb_height, mb_height);
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 35e60a120dc1..2a4f77e83ed3 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -428,32 +428,19 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
- int i, ret;
u32 code;
- for (i = 0; i < PRP_NUM_PADS; i++) {
- priv->pad[i].flags = (i == PRP_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
-
/* init default frame interval */
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
/* set a default mbus format */
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
- ret = imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
- V4L2_FIELD_NONE, NULL);
- if (ret)
- return ret;
-
- return media_entity_pads_init(&sd->entity, PRP_NUM_PADS, priv->pad);
+ return imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
+ V4L2_FIELD_NONE, NULL);
}
static const struct v4l2_subdev_pad_ops prp_pad_ops = {
@@ -487,6 +474,7 @@ static const struct v4l2_subdev_internal_ops prp_internal_ops = {
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
+ int i;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -496,7 +484,12 @@ static int prp_init(struct imx_ic_priv *ic_priv)
ic_priv->task_priv = priv;
priv->ic_priv = ic_priv;
- return 0;
+ for (i = 0; i < PRP_NUM_PADS; i++)
+ priv->pad[i].flags = (i == PRP_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ return media_entity_pads_init(&ic_priv->sd.entity, PRP_NUM_PADS,
+ priv->pad);
}
static void prp_remove(struct imx_ic_priv *ic_priv)
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 67ffa46a8e96..09c4e3f33807 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -1240,21 +1240,16 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
int i, ret;
u32 code;
+ /* set a default mbus format */
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
- priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
- /* set a default mbus format */
- imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
640, 480, code, V4L2_FIELD_NONE,
&priv->cc[i]);
@@ -1266,22 +1261,26 @@ static int prp_registered(struct v4l2_subdev *sd)
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
- ret = media_entity_pads_init(&sd->entity, PRPENCVF_NUM_PADS,
- priv->pad);
- if (ret)
- return ret;
+ priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
+ &ic_priv->sd,
+ PRPENCVF_SRC_PAD);
+ if (IS_ERR(priv->vdev))
+ return PTR_ERR(priv->vdev);
ret = imx_media_capture_device_register(priv->vdev);
if (ret)
- return ret;
+ goto remove_vdev;
ret = prp_init_controls(priv);
if (ret)
- goto unreg;
+ goto unreg_vdev;
return 0;
-unreg:
+
+unreg_vdev:
imx_media_capture_device_unregister(priv->vdev);
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
return ret;
}
@@ -1290,6 +1289,8 @@ static void prp_unregistered(struct v4l2_subdev *sd)
struct prp_priv *priv = sd_to_priv(sd);
imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
+
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
}
@@ -1325,6 +1326,7 @@ static const struct v4l2_subdev_internal_ops prp_internal_ops = {
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
+ int i, ret;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1336,15 +1338,19 @@ static int prp_init(struct imx_ic_priv *ic_priv)
spin_lock_init(&priv->irqlock);
timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
- priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
- &ic_priv->sd,
- PRPENCVF_SRC_PAD);
- if (IS_ERR(priv->vdev))
- return PTR_ERR(priv->vdev);
-
mutex_init(&priv->lock);
- return 0;
+ for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&ic_priv->sd.entity, PRPENCVF_NUM_PADS,
+ priv->pad);
+ if (ret)
+ mutex_destroy(&priv->lock);
+
+ return ret;
}
static void prp_remove(struct imx_ic_priv *ic_priv)
@@ -1352,7 +1358,6 @@ static void prp_remove(struct imx_ic_priv *ic_priv)
struct prp_priv *priv = ic_priv->task_priv;
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
}
struct imx_ic_ops imx_ic_prpencvf_ops = {
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index b33a07bc9105..7712e7be8625 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -26,6 +26,8 @@
#include <media/imx.h>
#include "imx-media.h"
+#define IMX_CAPTURE_NAME "imx-capture"
+
struct capture_priv {
struct imx_media_video_dev vdev;
@@ -69,8 +71,8 @@ static int vidioc_querycap(struct file *file, void *fh,
{
struct capture_priv *priv = video_drvdata(file);
- strscpy(cap->driver, "imx-media-capture", sizeof(cap->driver));
- strscpy(cap->card, "imx-media-capture", sizeof(cap->card));
+ strscpy(cap->driver, IMX_CAPTURE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX_CAPTURE_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", priv->src_sd->name);
@@ -765,13 +767,6 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
INIT_LIST_HEAD(&priv->ready_q);
- priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
- if (ret) {
- v4l2_err(sd, "failed to init dev pad\n");
- goto unreg;
- }
-
/* create the link from the src_sd devnode pad to device node */
ret = media_create_pad_link(&sd->entity, priv->src_sd_pad,
&vfd->entity, 0, 0);
@@ -834,6 +829,7 @@ imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
{
struct capture_priv *priv;
struct video_device *vfd;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -858,6 +854,13 @@ imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
vfd->queue = &priv->q;
priv->vdev.vfd = vfd;
+ priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
+ if (ret) {
+ video_device_release(vfd);
+ return ERR_PTR(ret);
+ }
+
INIT_LIST_HEAD(&priv->vdev.list);
video_set_drvdata(vfd, priv);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 367e39f5b382..b60ed4f22f6d 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -627,8 +627,8 @@ static int csi_idmac_start(struct csi_priv *priv)
}
priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
- priv->idmac_ch,
- IPU_IRQ_NFB4EOF);
+ priv->idmac_ch,
+ IPU_IRQ_NFB4EOF);
ret = devm_request_irq(priv->dev, priv->nfb4eof_irq,
csi_idmac_nfb4eof_interrupt, 0,
"imx-smfc-nfb4eof", priv);
@@ -1472,7 +1472,7 @@ static void csi_try_fmt(struct csi_priv *priv,
imx_media_enum_mbus_format(&code, 0,
CS_SEL_ANY, false);
*cc = imx_media_find_mbus_format(code,
- CS_SEL_ANY, false);
+ CS_SEL_ANY, false);
sdformat->format.code = (*cc)->codes[0];
}
@@ -1740,9 +1740,6 @@ static int csi_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
return v4l2_event_unsubscribe(fh, sub);
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int csi_registered(struct v4l2_subdev *sd)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1759,9 +1756,6 @@ static int csi_registered(struct v4l2_subdev *sd)
priv->csi = csi;
for (i = 0; i < CSI_NUM_PADS; i++) {
- priv->pad[i].flags = (i == CSI_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
code = 0;
if (i != CSI_SINK_PAD)
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
@@ -1793,16 +1787,22 @@ static int csi_registered(struct v4l2_subdev *sd)
goto put_csi;
}
- ret = media_entity_pads_init(&sd->entity, CSI_NUM_PADS, priv->pad);
- if (ret)
+ priv->vdev = imx_media_capture_device_init(priv->sd.dev,
+ &priv->sd,
+ CSI_SRC_PAD_IDMAC);
+ if (IS_ERR(priv->vdev)) {
+ ret = PTR_ERR(priv->vdev);
goto free_fim;
+ }
ret = imx_media_capture_device_register(priv->vdev);
if (ret)
- goto free_fim;
+ goto remove_vdev;
return 0;
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
free_fim:
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1816,6 +1816,7 @@ static void csi_unregistered(struct v4l2_subdev *sd)
struct csi_priv *priv = v4l2_get_subdevdata(sd);
imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1923,7 +1924,7 @@ static int imx_csi_probe(struct platform_device *pdev)
struct ipu_client_platformdata *pdata;
struct pinctrl *pinctrl;
struct csi_priv *priv;
- int ret;
+ int i, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1963,10 +1964,14 @@ static int imx_csi_probe(struct platform_device *pdev)
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(priv->ipu));
- priv->vdev = imx_media_capture_device_init(priv->sd.dev, &priv->sd,
- CSI_SRC_PAD_IDMAC);
- if (IS_ERR(priv->vdev))
- return PTR_ERR(priv->vdev);
+ for (i = 0; i < CSI_NUM_PADS; i++)
+ priv->pad[i].flags = (i == CSI_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->sd.entity, CSI_NUM_PADS,
+ priv->pad);
+ if (ret)
+ return ret;
mutex_init(&priv->lock);
@@ -1997,7 +2002,6 @@ static int imx_csi_probe(struct platform_device *pdev)
free:
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
return ret;
}
@@ -2008,7 +2012,6 @@ static int imx_csi_remove(struct platform_device *pdev)
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 4cc6a7462ae2..0788a1874557 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -184,7 +184,15 @@ static const struct imx_media_pixfmt rgb_formats[] = {
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 24,
}, {
- .fourcc = V4L2_PIX_FMT_BGR32,
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBX32,
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
},
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index cfad65a16917..0d83c2c41606 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -841,9 +841,6 @@ out:
return ret;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int vdic_registered(struct v4l2_subdev *sd)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
@@ -851,9 +848,6 @@ static int vdic_registered(struct v4l2_subdev *sd)
u32 code;
for (i = 0; i < VDIC_NUM_PADS; i++) {
- priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
-
code = 0;
if (i != VDIC_SINK_PAD_IDMAC)
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
@@ -874,15 +868,7 @@ static int vdic_registered(struct v4l2_subdev *sd)
priv->active_input_pad = VDIC_SINK_PAD_DIRECT;
- ret = vdic_init_controls(priv);
- if (ret)
- return ret;
-
- ret = media_entity_pads_init(&sd->entity, VDIC_NUM_PADS, priv->pad);
- if (ret)
- v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
-
- return ret;
+ return vdic_init_controls(priv);
}
static void vdic_unregistered(struct v4l2_subdev *sd)
@@ -927,7 +913,7 @@ struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
u32 grp_id)
{
struct vdic_priv *priv;
- int ret;
+ int i, ret;
priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -949,6 +935,15 @@ struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
mutex_init(&priv->lock);
+ for (i = 0; i < VDIC_NUM_PADS; i++)
+ priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+
+ ret = media_entity_pads_init(&priv->sd.entity, VDIC_NUM_PADS,
+ priv->pad);
+ if (ret)
+ goto free;
+
ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
if (ret)
goto free;
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index bfa4b254c4e4..cd3dd6e33ef0 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -497,26 +497,13 @@ out:
return ret;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int csi2_registered(struct v4l2_subdev *sd)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
- int i, ret;
-
- for (i = 0; i < CSI2_NUM_PADS; i++) {
- csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
/* set a default mbus format */
- ret = imx_media_init_mbus_fmt(&csi2->format_mbus,
+ return imx_media_init_mbus_fmt(&csi2->format_mbus,
640, 480, 0, V4L2_FIELD_NONE, NULL);
- if (ret)
- return ret;
-
- return media_entity_pads_init(&sd->entity, CSI2_NUM_PADS, csi2->pad);
}
static const struct media_entity_operations csi2_entity_ops = {
@@ -573,7 +560,7 @@ static int csi2_probe(struct platform_device *pdev)
unsigned int sink_port = 0;
struct csi2_dev *csi2;
struct resource *res;
- int ret;
+ int i, ret;
csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
if (!csi2)
@@ -592,6 +579,16 @@ static int csi2_probe(struct platform_device *pdev)
csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
csi2->sd.grp_id = IMX_MEDIA_GRP_ID_CSI2;
+ for (i = 0; i < CSI2_NUM_PADS; i++) {
+ csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&csi2->sd.entity, CSI2_NUM_PADS,
+ csi2->pad);
+ if (ret)
+ return ret;
+
csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(csi2->pllref_clk)) {
v4l2_err(&csi2->sd, "failed to get pll reference clock\n");
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index bfd6b5fbf484..db30e2c70f2f 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1100,9 +1100,6 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
int i;
for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
- csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
/* set a default mbus format */
ret = imx_media_init_mbus_fmt(&csi->format_mbus[i],
800, 600, 0, V4L2_FIELD_NONE,
@@ -1115,11 +1112,16 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
csi->frame_interval[i].denominator = 30;
}
- ret = media_entity_pads_init(&sd->entity, IMX7_CSI_PADS_NUM, csi->pad);
- if (ret < 0)
- return ret;
+ csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
+ IMX7_CSI_PAD_SRC);
+ if (IS_ERR(csi->vdev))
+ return PTR_ERR(csi->vdev);
+
+ ret = imx_media_capture_device_register(csi->vdev);
+ if (ret)
+ imx_media_capture_device_remove(csi->vdev);
- return imx_media_capture_device_register(csi->vdev);
+ return ret;
}
static void imx7_csi_unregistered(struct v4l2_subdev *sd)
@@ -1127,6 +1129,7 @@ static void imx7_csi_unregistered(struct v4l2_subdev *sd)
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
imx_media_capture_device_unregister(csi->vdev);
+ imx_media_capture_device_remove(csi->vdev);
}
static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
@@ -1189,7 +1192,7 @@ static int imx7_csi_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct imx_media_dev *imxmd;
struct imx7_csi *csi;
- int ret;
+ int i, ret;
csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
if (!csi)
@@ -1251,14 +1254,18 @@ static int imx7_csi_probe(struct platform_device *pdev)
csi->sd.grp_id = IMX_MEDIA_GRP_ID_CSI;
snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
- csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
- IMX7_CSI_PAD_SRC);
- if (IS_ERR(csi->vdev))
- return PTR_ERR(csi->vdev);
-
v4l2_ctrl_handler_init(&csi->ctrl_hdlr, 0);
csi->sd.ctrl_handler = &csi->ctrl_hdlr;
+ for (i = 0; i < IMX7_CSI_PADS_NUM; i++)
+ csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csi->sd.entity, IMX7_CSI_PADS_NUM,
+ csi->pad);
+ if (ret < 0)
+ goto free;
+
ret = v4l2_async_register_fwnode_subdev(&csi->sd,
sizeof(struct v4l2_async_subdev),
NULL, 0,
@@ -1269,8 +1276,6 @@ static int imx7_csi_probe(struct platform_device *pdev)
return 0;
free:
- imx_media_capture_device_unregister(csi->vdev);
- imx_media_capture_device_remove(csi->vdev);
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
cleanup:
@@ -1298,9 +1303,6 @@ static int imx7_csi_remove(struct platform_device *pdev)
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
- imx_media_capture_device_unregister(csi->vdev);
- imx_media_capture_device_remove(csi->vdev);
-
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 73d8354e618c..99166afca071 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -293,7 +293,7 @@ static int mipi_csis_dump_regs(struct csi_state *state)
struct device *dev = &state->pdev->dev;
unsigned int i;
u32 cfg;
- struct {
+ static const struct {
u32 offset;
const char * const name;
} registers[] = {
@@ -350,6 +350,8 @@ static void mipi_csis_sw_reset(struct csi_state *state)
static int mipi_csis_phy_init(struct csi_state *state)
{
state->mipi_phy_regulator = devm_regulator_get(state->dev, "phy");
+ if (IS_ERR(state->mipi_phy_regulator))
+ return PTR_ERR(state->mipi_phy_regulator);
return regulator_set_voltage(state->mipi_phy_regulator, 1000000,
1000000);
@@ -780,17 +782,6 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mipi_csis_registered(struct v4l2_subdev *mipi_sd)
-{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
-
- state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- return media_entity_pads_init(&state->mipi_sd.entity, CSIS_PADS_NUM,
- state->pads);
-}
-
static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
.log_status = mipi_csis_log_status,
};
@@ -816,10 +807,6 @@ static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
.pad = &mipi_csis_pad_ops,
};
-static const struct v4l2_subdev_internal_ops mipi_csis_internal_ops = {
- .registered = mipi_csis_registered,
-};
-
static int mipi_csis_parse_dt(struct platform_device *pdev,
struct csi_state *state)
{
@@ -880,7 +867,6 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
mipi_sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
mipi_sd->entity.ops = &mipi_csis_entity_ops;
- mipi_sd->internal_ops = &mipi_csis_internal_ops;
mipi_sd->dev = &pdev->dev;
@@ -892,6 +878,13 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
v4l2_set_subdevdata(mipi_sd, &pdev->dev);
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&mipi_sd->entity, CSIS_PADS_NUM,
+ state->pads);
+ if (ret)
+ return ret;
+
ret = v4l2_async_register_fwnode_subdev(mipi_sd,
sizeof(struct v4l2_async_subdev),
&sink_port, 1,
@@ -947,7 +940,6 @@ static void mipi_csis_debugfs_exit(struct csi_state *state)
static int mipi_csis_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *mem_res;
struct csi_state *state;
int ret;
@@ -966,11 +958,13 @@ static int mipi_csis_probe(struct platform_device *pdev)
return ret;
}
- mipi_csis_phy_init(state);
+ ret = mipi_csis_phy_init(state);
+ if (ret < 0)
+ return ret;
+
mipi_csis_phy_reset(state);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->regs = devm_ioremap_resource(dev, mem_res);
+ state->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
diff --git a/drivers/staging/media/ipu3/Makefile b/drivers/staging/media/ipu3/Makefile
index cc288ae6d5f2..9def80ef28f3 100644
--- a/drivers/staging/media/ipu3/Makefile
+++ b/drivers/staging/media/ipu3/Makefile
@@ -10,9 +10,3 @@ ipu3-imgu-objs += \
ipu3-css.o ipu3-v4l2.o ipu3.o
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3-imgu.o
-
-# HACK! While this driver is in bad shape, don't enable several warnings
-# that would be otherwise enabled with W=1
-ccflags-y += $(call cc-disable-warning, packed-not-aligned)
-ccflags-y += $(call cc-disable-warning, type-limits)
-ccflags-y += $(call cc-disable-warning, unused-const-variable)
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
index 5e55baeaea1a..b44bb4a72ca7 100644
--- a/drivers/staging/media/ipu3/TODO
+++ b/drivers/staging/media/ipu3/TODO
@@ -9,12 +9,9 @@ staging directory.
relevant. (Sakari)
- IPU3 driver documentation (Laurent)
- Add diagram in driver rst to describe output capability.
Comments on configuring v4l2 subdevs for CIO2 and ImgU.
- uAPI documentation:
- Further clarification on some ambiguities such as data type conversion of
- IEFD CU inputs. (Sakari)
Move acronyms to doc-rst file. (Mauro)
- Switch to yavta from v4l2n in driver docs.
@@ -27,5 +24,3 @@ staging directory.
- Document different operation modes, and which buffer queues are relevant
in each mode. To process an image, which queues require a buffer an in
which ones is it optional?
-
-- Make sure it builds fine with no warnings with W=1
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index c7cd27efac8a..08eaa0bad0de 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -1217,6 +1217,11 @@ struct ipu3_uapi_shd_config {
*
* All CU inputs are unsigned, they will be converted to signed when written
* to register, i.e. a01 will be written to 9 bit register in s4.4 format.
+ * The data precision s4.4 means 4 bits for integer parts and 4 bits for the
+ * fractional part, the first bit indicates positive or negative value.
+ * For userspace software (commonly the imaging library), the computation for
+ * the CU slope values should be based on the slope resolution 1/16 (binary
+ * 0.0001 - the minimal interval value), the slope value range is [-256, +255].
* This applies to &ipu3_uapi_iefd_cux6_ed, &ipu3_uapi_iefd_cux2_1,
* &ipu3_uapi_iefd_cux2_1, &ipu3_uapi_iefd_cux4 and &ipu3_uapi_iefd_cux6_rad.
*/
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 1a966cb2f3a6..6fb60b58447a 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -908,11 +908,7 @@ static int iss_map_mem_resource(struct platform_device *pdev,
struct iss_device *iss,
enum iss_mem_resources res)
{
- struct resource *mem;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
-
- iss->regs[res] = devm_ioremap_resource(iss->dev, mem);
+ iss->regs[res] = devm_platform_ioremap_resource(pdev, res);
return PTR_ERR_OR_ZERO(iss->regs[res]);
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 54144dc9f509..673aa3a5f2bd 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -671,7 +671,7 @@ iss_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
+ if (!subdev)
return -EINVAL;
/*
@@ -726,7 +726,7 @@ iss_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
+ if (!subdev)
return -EINVAL;
sdsel.pad = pad;
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
index c85ac6db0302..1bce49d3e7e2 100644
--- a/drivers/staging/media/sunxi/cedrus/Makefile
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o \
- cedrus_mpeg2.o cedrus_h264.o
+ cedrus_mpeg2.o cedrus_h264.o cedrus_h265.o
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 2d3ea8b74dfd..c6ddd46eff82 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -95,6 +95,45 @@ static const struct cedrus_control cedrus_controls[] = {
.codec = CEDRUS_CODEC_H264,
.required = false,
},
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
+ .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE,
+ .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = false,
+ },
};
#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
@@ -241,6 +280,16 @@ static int cedrus_open(struct file *file)
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto err_ctrls;
}
+ ctx->dst_fmt.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
+ cedrus_prepare_format(&ctx->dst_fmt);
+ ctx->src_fmt.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
+ /*
+ * TILED_NV12 has more strict requirements, so copy the width and
+ * height to src_fmt to ensure that is matches the dst_fmt resolution.
+ */
+ ctx->src_fmt.width = ctx->dst_fmt.width;
+ ctx->src_fmt.height = ctx->dst_fmt.height;
+ cedrus_prepare_format(&ctx->src_fmt);
v4l2_fh_add(&ctx->fh);
@@ -330,6 +379,7 @@ static int cedrus_probe(struct platform_device *pdev)
dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2;
dev->dec_ops[CEDRUS_CODEC_H264] = &cedrus_dec_ops_h264;
+ dev->dec_ops[CEDRUS_CODEC_H265] = &cedrus_dec_ops_h265;
mutex_init(&dev->dev_mutex);
@@ -357,6 +407,8 @@ static int cedrus_probe(struct platform_device *pdev)
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
+ strscpy(dev->mdev.bus_info, "platform:" CEDRUS_NAME,
+ sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
dev->mdev.ops = &cedrus_m2m_media_ops;
@@ -438,22 +490,26 @@ static const struct cedrus_variant sun8i_a33_cedrus_variant = {
};
static const struct cedrus_variant sun8i_h3_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_a64_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h5_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h6_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
.mod_rate = 600000000,
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 2f017a651848..96765555ab8a 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -27,12 +27,14 @@
#define CEDRUS_NAME "cedrus"
#define CEDRUS_CAPABILITY_UNTILED BIT(0)
+#define CEDRUS_CAPABILITY_H265_DEC BIT(1)
#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
CEDRUS_CODEC_H264,
+ CEDRUS_CODEC_H265,
CEDRUS_CODEC_LAST,
};
@@ -67,6 +69,12 @@ struct cedrus_mpeg2_run {
const struct v4l2_ctrl_mpeg2_quantization *quantization;
};
+struct cedrus_h265_run {
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+};
+
struct cedrus_run {
struct vb2_v4l2_buffer *src;
struct vb2_v4l2_buffer *dst;
@@ -74,6 +82,7 @@ struct cedrus_run {
union {
struct cedrus_h264_run h264;
struct cedrus_mpeg2_run mpeg2;
+ struct cedrus_h265_run h265;
};
};
@@ -107,9 +116,24 @@ struct cedrus_ctx {
ssize_t mv_col_buf_size;
void *pic_info_buf;
dma_addr_t pic_info_buf_dma;
+ ssize_t pic_info_buf_size;
void *neighbor_info_buf;
dma_addr_t neighbor_info_buf_dma;
+ void *deblk_buf;
+ dma_addr_t deblk_buf_dma;
+ ssize_t deblk_buf_size;
+ void *intra_pred_buf;
+ dma_addr_t intra_pred_buf_dma;
+ ssize_t intra_pred_buf_size;
} h264;
+ struct {
+ void *mv_col_buf;
+ dma_addr_t mv_col_buf_addr;
+ ssize_t mv_col_buf_size;
+ ssize_t mv_col_buf_unit_size;
+ void *neighbor_info_buf;
+ dma_addr_t neighbor_info_buf_addr;
+ } h265;
} codec;
};
@@ -155,6 +179,7 @@ struct cedrus_dev {
extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
extern struct cedrus_dec_ops cedrus_dec_ops_h264;
+extern struct cedrus_dec_ops cedrus_dec_ops_h265;
static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val)
{
@@ -179,12 +204,16 @@ static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
int index, unsigned int plane)
{
- struct vb2_buffer *buf;
+ struct vb2_buffer *buf = NULL;
+ struct vb2_queue *vq;
if (index < 0)
return 0;
- buf = ctx->fh.m2m_ctx->cap_q_ctx.q.bufs[index];
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vq)
+ buf = vb2_get_buffer(vq, index);
+
return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 56ca4c9ad01c..4a2fc33a1d79 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -59,6 +59,15 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_H264_SPS);
break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ run.h265.sps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SPS);
+ run.h265.pps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_PPS);
+ run.h265.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS);
+ break;
+
default:
break;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index d6a782703c9b..bfb4a4820a67 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -6,6 +6,7 @@
* Copyright (c) 2018 Bootlin
*/
+#include <linux/delay.h>
#include <linux/types.h>
#include <media/videobuf2-dma-contig.h>
@@ -38,7 +39,7 @@ struct cedrus_h264_sram_ref_pic {
#define CEDRUS_H264_FRAME_NUM 18
#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K)
-#define CEDRUS_PIC_INFO_BUF_SIZE (128 * SZ_1K)
+#define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K)
static void cedrus_h264_write_sram(struct cedrus_dev *dev,
enum cedrus_h264_sram_off off,
@@ -96,7 +97,7 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct vb2_queue *cap_q;
struct cedrus_buffer *output_buf;
struct cedrus_dev *dev = ctx->dev;
unsigned long used_dpbs = 0;
@@ -104,6 +105,8 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
unsigned int output = 0;
unsigned int i;
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
memset(pic_list, 0, sizeof(pic_list));
for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
@@ -167,12 +170,14 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
enum cedrus_h264_sram_off sram)
{
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct vb2_queue *cap_q;
struct cedrus_dev *dev = ctx->dev;
u8 sram_array[CEDRUS_MAX_REF_IDX];
unsigned int i;
size_t size;
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
memset(sram_array, 0, sizeof(sram_array));
for (i = 0; i < num_ref; i++) {
@@ -240,8 +245,8 @@ static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
sizeof(scaling->scaling_list_8x8[0]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1,
- scaling->scaling_list_8x8[3],
- sizeof(scaling->scaling_list_8x8[3]));
+ scaling->scaling_list_8x8[1],
+ sizeof(scaling->scaling_list_8x8[1]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4,
scaling->scaling_list_4x4,
@@ -289,6 +294,28 @@ static void cedrus_write_pred_weight_table(struct cedrus_ctx *ctx,
}
}
+/*
+ * It turns out that using VE_H264_VLD_OFFSET to skip bits is not reliable. In
+ * rare cases frame is not decoded correctly. However, setting offset to 0 and
+ * skipping appropriate amount of bits with flush bits trigger always works.
+ */
+static void cedrus_skip_bits(struct cedrus_dev *dev, int num)
+{
+ int count = 0;
+
+ while (count < num) {
+ int tmp = min(num - count, 32);
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_FLUSH_BITS |
+ VE_H264_TRIGGER_TYPE_N_BITS(tmp));
+ while (cedrus_read(dev, VE_H264_STATUS) & VE_H264_STATUS_VLD_BUSY)
+ udelay(1);
+
+ count += tmp;
+ }
+}
+
static void cedrus_set_params(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
@@ -299,12 +326,13 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
struct vb2_buffer *src_buf = &run->src->vb2_buf;
struct cedrus_dev *dev = ctx->dev;
dma_addr_t src_buf_addr;
- u32 offset = slice->header_bit_size;
- u32 len = (slice->size * 8) - offset;
+ u32 len = slice->size * 8;
+ unsigned int pic_width_in_mbs;
+ bool mbaff_pic;
u32 reg;
cedrus_write(dev, VE_H264_VLD_LEN, len);
- cedrus_write(dev, VE_H264_VLD_OFFSET, offset);
+ cedrus_write(dev, VE_H264_VLD_OFFSET, 0);
src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
cedrus_write(dev, VE_H264_VLD_END,
@@ -314,6 +342,20 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
VE_H264_VLD_ADDR_LAST);
+ if (ctx->src_fmt.width > 2048) {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_MIXED_RAM |
+ VE_BUF_CTRL_DBLK_MIXED_RAM);
+ cedrus_write(dev, VE_DBLK_DRAM_BUF_ADDR,
+ ctx->codec.h264.deblk_buf_dma);
+ cedrus_write(dev, VE_INTRAPRED_DRAM_BUF_ADDR,
+ ctx->codec.h264.intra_pred_buf_dma);
+ } else {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_INT_SRAM |
+ VE_BUF_CTRL_DBLK_INT_SRAM);
+ }
+
/*
* FIXME: Since the bitstream parsing is done in software, and
* in userspace, this shouldn't be needed anymore. But it
@@ -323,6 +365,8 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_INIT_SWDEC);
+ cedrus_skip_bits(dev, slice->header_bit_size);
+
if (((pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) &&
(slice->slice_type == V4L2_H264_SLICE_TYPE_P ||
slice->slice_type == V4L2_H264_SLICE_TYPE_SP)) ||
@@ -370,12 +414,20 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
reg |= VE_H264_SPS_DIRECT_8X8_INFERENCE;
cedrus_write(dev, VE_H264_SPS, reg);
+ mbaff_pic = !(slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+ pic_width_in_mbs = sps->pic_width_in_mbs_minus1 + 1;
+
// slice parameters
reg = 0;
+ reg |= ((slice->first_mb_in_slice % pic_width_in_mbs) & 0xff) << 24;
+ reg |= (((slice->first_mb_in_slice / pic_width_in_mbs) *
+ (mbaff_pic + 1)) & 0xff) << 16;
reg |= decode->nal_ref_idc ? BIT(12) : 0;
reg |= (slice->slice_type & 0xf) << 8;
reg |= slice->cabac_init_idc & 0x3;
- reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
+ if (ctx->fh.m2m_ctx->new_frame)
+ reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
reg |= VE_H264_SHS_FIELD_PIC;
if (slice->flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
@@ -447,7 +499,7 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx,
{
struct cedrus_dev *dev = ctx->dev;
- cedrus_engine_enable(dev, CEDRUS_CODEC_H264);
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_H264);
cedrus_write(dev, VE_H264_SDROT_CTRL, 0);
cedrus_write(dev, VE_H264_EXTRA_BUFFER1,
@@ -464,18 +516,30 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx,
static int cedrus_h264_start(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
+ unsigned int pic_info_size;
unsigned int field_size;
unsigned int mv_col_size;
int ret;
+ /* Formula for picture buffer size is taken from CedarX source. */
+
+ if (ctx->src_fmt.width > 2048)
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x4000;
+ else
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x1000;
+
/*
- * FIXME: It seems that the H6 cedarX code is using a formula
- * here based on the size of the frame, while all the older
- * code is using a fixed size, so that might need to be
- * changed at some point.
+ * FIXME: If V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY is set,
+ * there is no need to multiply by 2.
*/
+ pic_info_size += ctx->src_fmt.height * 2 * 64;
+
+ if (pic_info_size < CEDRUS_MIN_PIC_INFO_BUF_SIZE)
+ pic_info_size = CEDRUS_MIN_PIC_INFO_BUF_SIZE;
+
+ ctx->codec.h264.pic_info_buf_size = pic_info_size;
ctx->codec.h264.pic_info_buf =
- dma_alloc_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_alloc_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
&ctx->codec.h264.pic_info_buf_dma,
GFP_KERNEL);
if (!ctx->codec.h264.pic_info_buf)
@@ -528,15 +592,56 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
goto err_neighbor_buf;
}
+ if (ctx->src_fmt.width > 2048) {
+ /*
+ * Formulas for deblock and intra prediction buffer sizes
+ * are taken from CedarX source.
+ */
+
+ ctx->codec.h264.deblk_buf_size =
+ ALIGN(ctx->src_fmt.width, 32) * 12;
+ ctx->codec.h264.deblk_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.deblk_buf_size,
+ &ctx->codec.h264.deblk_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.deblk_buf) {
+ ret = -ENOMEM;
+ goto err_mv_col_buf;
+ }
+
+ ctx->codec.h264.intra_pred_buf_size =
+ ALIGN(ctx->src_fmt.width, 64) * 5;
+ ctx->codec.h264.intra_pred_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.intra_pred_buf_size,
+ &ctx->codec.h264.intra_pred_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.intra_pred_buf) {
+ ret = -ENOMEM;
+ goto err_deblk_buf;
+ }
+ }
+
return 0;
+err_deblk_buf:
+ dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma);
+
+err_mv_col_buf:
+ dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma);
+
err_neighbor_buf:
dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma);
err_pic_buf:
- dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma);
return ret;
@@ -552,9 +657,17 @@ static void cedrus_h264_stop(struct cedrus_ctx *ctx)
dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma);
- dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma);
+ if (ctx->codec.h264.deblk_buf_size)
+ dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma);
+ if (ctx->codec.h264.intra_pred_buf_size)
+ dma_free_coherent(dev->dev, ctx->codec.h264.intra_pred_buf_size,
+ ctx->codec.h264.intra_pred_buf,
+ ctx->codec.h264.intra_pred_buf_dma);
}
static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
new file mode 100644
index 000000000000..6945dc74e1d7
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2013 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ */
+
+#include <linux/types.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+/*
+ * These are the sizes for side buffers required by the hardware for storing
+ * internal decoding metadata. They match the values used by the early BSP
+ * implementations, that were initially exposed in libvdpau-sunxi.
+ * Subsequent BSP implementations seem to double the neighbor info buffer size
+ * for the H6 SoC, which may be related to 10 bit H265 support.
+ */
+#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K)
+#define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K)
+#define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160
+
+struct cedrus_h265_sram_frame_info {
+ __le32 top_pic_order_cnt;
+ __le32 bottom_pic_order_cnt;
+ __le32 top_mv_col_buf_addr;
+ __le32 bottom_mv_col_buf_addr;
+ __le32 luma_addr;
+ __le32 chroma_addr;
+} __packed;
+
+struct cedrus_h265_sram_pred_weight {
+ __s8 delta_weight;
+ __s8 offset;
+} __packed;
+
+static enum cedrus_irq_status cedrus_h265_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ reg = cedrus_read(dev, VE_DEC_H265_STATUS);
+ reg &= VE_DEC_H265_STATUS_CHECK_MASK;
+
+ if (reg & VE_DEC_H265_STATUS_CHECK_ERROR ||
+ !(reg & VE_DEC_H265_STATUS_SUCCESS))
+ return CEDRUS_IRQ_ERROR;
+
+ return CEDRUS_IRQ_OK;
+}
+
+static void cedrus_h265_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_CHECK_MASK);
+}
+
+static void cedrus_h265_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_DEC_H265_CTRL);
+
+ reg &= ~VE_DEC_H265_CTRL_IRQ_MASK;
+
+ cedrus_write(dev, VE_DEC_H265_CTRL, reg);
+}
+
+static void cedrus_h265_sram_write_offset(struct cedrus_dev *dev, u32 offset)
+{
+ cedrus_write(dev, VE_DEC_H265_SRAM_OFFSET, offset);
+}
+
+static void cedrus_h265_sram_write_data(struct cedrus_dev *dev, void *data,
+ unsigned int size)
+{
+ u32 *word = data;
+
+ while (size >= sizeof(u32)) {
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, *word++);
+ size -= sizeof(u32);
+ }
+}
+
+static inline dma_addr_t
+cedrus_h265_frame_info_mv_col_buf_addr(struct cedrus_ctx *ctx,
+ unsigned int index, unsigned int field)
+{
+ return ctx->codec.h265.mv_col_buf_addr + index *
+ ctx->codec.h265.mv_col_buf_unit_size +
+ field * ctx->codec.h265.mv_col_buf_unit_size / 2;
+}
+
+static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
+ unsigned int index,
+ bool field_pic,
+ u32 pic_order_cnt[],
+ int buffer_index)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 0);
+ dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 1);
+ dma_addr_t mv_col_buf_addr[2] = {
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index, 0),
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index,
+ field_pic ? 1 : 0)
+ };
+ u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO +
+ VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT * index;
+ struct cedrus_h265_sram_frame_info frame_info = {
+ .top_pic_order_cnt = cpu_to_le32(pic_order_cnt[0]),
+ .bottom_pic_order_cnt = cpu_to_le32(field_pic ?
+ pic_order_cnt[1] :
+ pic_order_cnt[0]),
+ .top_mv_col_buf_addr =
+ cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .bottom_mv_col_buf_addr = cpu_to_le32(field_pic ?
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[1]) :
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .luma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_luma_addr)),
+ .chroma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_chroma_addr)),
+ };
+
+ cedrus_h265_sram_write_offset(dev, offset);
+ cedrus_h265_sram_write_data(dev, &frame_info, sizeof(frame_info));
+}
+
+static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ u8 num_active_dpb_entries)
+{
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ unsigned int i;
+
+ for (i = 0; i < num_active_dpb_entries; i++) {
+ int buffer_index = vb2_find_timestamp(vq, dpb[i].timestamp, 0);
+ u32 pic_order_cnt[2] = {
+ dpb[i].pic_order_cnt[0],
+ dpb[i].pic_order_cnt[1]
+ };
+
+ cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic,
+ pic_order_cnt,
+ buffer_index);
+ }
+}
+
+static void cedrus_h265_ref_pic_list_write(struct cedrus_dev *dev,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ const u8 list[],
+ u8 num_ref_idx_active,
+ u32 sram_offset)
+{
+ unsigned int i;
+ u32 word = 0;
+
+ cedrus_h265_sram_write_offset(dev, sram_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int shift = (i % 4) * 8;
+ unsigned int index = list[i];
+ u8 value = list[i];
+
+ if (dpb[index].rps == V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
+ value |= VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF;
+
+ /* Each SRAM word gathers up to 4 references. */
+ word |= value << shift;
+
+ /* Write the word to SRAM and clear it for the next batch. */
+ if ((i % 4) == 3 || i == (num_ref_idx_active - 1)) {
+ cedrus_h265_sram_write_data(dev, &word, sizeof(word));
+ word = 0;
+ }
+ }
+}
+
+static void cedrus_h265_pred_weight_write(struct cedrus_dev *dev,
+ const s8 delta_luma_weight[],
+ const s8 luma_offset[],
+ const s8 delta_chroma_weight[][2],
+ const s8 chroma_offset[][2],
+ u8 num_ref_idx_active,
+ u32 sram_luma_offset,
+ u32 sram_chroma_offset)
+{
+ struct cedrus_h265_sram_pred_weight pred_weight[2] = { { 0 } };
+ unsigned int i, j;
+
+ cedrus_h265_sram_write_offset(dev, sram_luma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int index = i % 2;
+
+ pred_weight[index].delta_weight = delta_luma_weight[i];
+ pred_weight[index].offset = luma_offset[i];
+
+ if (index == 1 || i == (num_ref_idx_active - 1))
+ cedrus_h265_sram_write_data(dev, (u32 *)&pred_weight,
+ sizeof(pred_weight));
+ }
+
+ cedrus_h265_sram_write_offset(dev, sram_chroma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ for (j = 0; j < 2; j++) {
+ pred_weight[j].delta_weight = delta_chroma_weight[i][j];
+ pred_weight[j].offset = chroma_offset[i][j];
+ }
+
+ cedrus_h265_sram_write_data(dev, &pred_weight,
+ sizeof(pred_weight));
+ }
+}
+
+static void cedrus_h265_setup(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_hevc_pred_weight_table *pred_weight_table;
+ dma_addr_t src_buf_addr;
+ dma_addr_t src_buf_end_addr;
+ u32 chroma_log2_weight_denom;
+ u32 output_pic_list_index;
+ u32 pic_order_cnt[2];
+ u32 reg;
+
+ sps = run->h265.sps;
+ pps = run->h265.pps;
+ slice_params = run->h265.slice_params;
+ pred_weight_table = &slice_params->pred_weight_table;
+
+ /* MV column buffer size and allocation. */
+ if (!ctx->codec.h265.mv_col_buf_size) {
+ unsigned int num_buffers =
+ run->dst->vb2_buf.vb2_queue->num_buffers;
+ unsigned int log2_max_luma_coding_block_size =
+ sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ unsigned int ctb_size_luma =
+ 1UL << log2_max_luma_coding_block_size;
+
+ /*
+ * Each CTB requires a MV col buffer with a specific unit size.
+ * Since the address is given with missing lsb bits, 1 KiB is
+ * added to each buffer to ensure proper alignment.
+ */
+ ctx->codec.h265.mv_col_buf_unit_size =
+ DIV_ROUND_UP(ctx->src_fmt.width, ctb_size_luma) *
+ DIV_ROUND_UP(ctx->src_fmt.height, ctb_size_luma) *
+ CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE + SZ_1K;
+
+ ctx->codec.h265.mv_col_buf_size = num_buffers *
+ ctx->codec.h265.mv_col_buf_unit_size;
+
+ ctx->codec.h265.mv_col_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h265.mv_col_buf_size,
+ &ctx->codec.h265.mv_col_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.mv_col_buf) {
+ ctx->codec.h265.mv_col_buf_size = 0;
+ // TODO: Abort the process here.
+ return;
+ }
+ }
+
+ /* Activate H265 engine. */
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_H265);
+
+ /* Source offset and length in bits. */
+
+ reg = slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, reg);
+
+ reg = slice_params->bit_size - slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
+
+ /* Source beginning and end addresses. */
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
+
+ reg = VE_DEC_H265_BITS_ADDR_BASE(src_buf_addr);
+ reg |= VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA;
+
+ cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
+
+ src_buf_end_addr = src_buf_addr +
+ DIV_ROUND_UP(slice_params->bit_size, 8);
+
+ reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
+ cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
+
+ /* Coding tree block address: start at the beginning. */
+ reg = VE_DEC_H265_DEC_CTB_ADDR_X(0) | VE_DEC_H265_DEC_CTB_ADDR_Y(0);
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_ADDR, reg);
+
+ cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0);
+ cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0);
+
+ /* Clear the number of correctly-decoded coding tree blocks. */
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_NUM, 0);
+
+ /* Initialize bitstream access. */
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC);
+
+ /* Bitstream parameters. */
+
+ reg = VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(slice_params->nal_unit_type) |
+ VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(slice_params->nuh_temporal_id_plus1);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_NAL_HDR, reg);
+
+ /* SPS. */
+
+ reg = VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(sps->max_transform_hierarchy_depth_intra) |
+ VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(sps->max_transform_hierarchy_depth_inter) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(sps->log2_diff_max_min_luma_transform_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(sps->log2_min_luma_transform_block_size_minus2) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_luma_coding_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(sps->bit_depth_chroma_minus8) |
+ VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(sps->chroma_format_idc);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE,
+ V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_AMP_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE,
+ V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SPS_HDR, reg);
+
+ reg = VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_pcm_luma_coding_block_size) |
+ VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_pcm_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(sps->pcm_sample_bit_depth_chroma_minus1) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(sps->pcm_sample_bit_depth_luma_minus1);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PCM_CTRL, reg);
+
+ /* PPS. */
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(pps->pps_cr_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(pps->pps_cb_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(pps->init_qp_minus26) |
+ VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(pps->diff_cu_qp_delta_depth);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED,
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED,
+ V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED,
+ V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED,
+ pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL0, reg);
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(pps->log2_parallel_merge_level_minus2);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ pps->flags);
+
+ /* TODO: VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED */
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED, pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED, pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL1, reg);
+
+ /* Slice Parameters. */
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(slice_params->pic_struct) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(slice_params->five_minus_max_num_merge_cand) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(slice_params->num_ref_idx_l1_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(slice_params->num_ref_idx_l0_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(slice_params->collocated_ref_idx) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(slice_params->colour_plane_id) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(slice_params->slice_type);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
+ V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT,
+ pps->flags);
+
+ /* FIXME: For multi-slice support. */
+ reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO0, reg);
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(slice_params->slice_tc_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(slice_params->slice_beta_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(slice_params->num_rps_poc_st_curr_after == 0) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(slice_params->slice_cr_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(slice_params->slice_cb_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(slice_params->slice_qp_delta);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ slice_params->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO1, reg);
+
+ chroma_log2_weight_denom = pred_weight_table->luma_log2_weight_denom +
+ pred_weight_table->delta_chroma_log2_weight_denom;
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(0) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(chroma_log2_weight_denom) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(pred_weight_table->luma_log2_weight_denom);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO2, reg);
+
+ /* Decoded picture size. */
+
+ reg = VE_DEC_H265_DEC_PIC_SIZE_WIDTH(ctx->src_fmt.width) |
+ VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(ctx->src_fmt.height);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PIC_SIZE, reg);
+
+ /* Scaling list. */
+
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_CTRL0, reg);
+
+ /* Neightbor information address. */
+ reg = VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(ctx->codec.h265.neighbor_info_buf_addr);
+ cedrus_write(dev, VE_DEC_H265_NEIGHBOR_INFO_ADDR, reg);
+
+ /* Write decoded picture buffer in pic list. */
+ cedrus_h265_frame_info_write_dpb(ctx, slice_params->dpb,
+ slice_params->num_active_dpb_entries);
+
+ /* Output frame. */
+
+ output_pic_list_index = V4L2_HEVC_DPB_ENTRIES_NUM_MAX;
+ pic_order_cnt[0] = slice_params->slice_pic_order_cnt;
+ pic_order_cnt[1] = slice_params->slice_pic_order_cnt;
+
+ cedrus_h265_frame_info_write_single(ctx, output_pic_list_index,
+ slice_params->pic_struct != 0,
+ pic_order_cnt,
+ run->dst->vb2_buf.index);
+
+ cedrus_write(dev, VE_DEC_H265_OUTPUT_FRAME_IDX, output_pic_list_index);
+
+ /* Reference picture list 0 (for P/B frames). */
+ if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I) {
+ cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ slice_params->ref_idx_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0);
+
+ if ((pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED) ||
+ (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED))
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l0,
+ pred_weight_table->luma_offset_l0,
+ pred_weight_table->delta_chroma_weight_l0,
+ pred_weight_table->chroma_offset_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0);
+ }
+
+ /* Reference picture list 1 (for B frames). */
+ if (slice_params->slice_type == V4L2_HEVC_SLICE_TYPE_B) {
+ cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ slice_params->ref_idx_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1);
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED)
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l1,
+ pred_weight_table->luma_offset_l1,
+ pred_weight_table->delta_chroma_weight_l1,
+ pred_weight_table->chroma_offset_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1);
+ }
+
+ /* Enable appropriate interruptions. */
+ cedrus_write(dev, VE_DEC_H265_CTRL, VE_DEC_H265_CTRL_IRQ_MASK);
+}
+
+static int cedrus_h265_start(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ /* The buffer size is calculated at setup time. */
+ ctx->codec.h265.mv_col_buf_size = 0;
+
+ ctx->codec.h265.neighbor_info_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h265.neighbor_info_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.neighbor_info_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cedrus_h265_stop(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ if (ctx->codec.h265.mv_col_buf_size > 0) {
+ dma_free_coherent(dev->dev, ctx->codec.h265.mv_col_buf_size,
+ ctx->codec.h265.mv_col_buf,
+ ctx->codec.h265.mv_col_buf_addr);
+
+ ctx->codec.h265.mv_col_buf_size = 0;
+ }
+
+ dma_free_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h265.neighbor_info_buf,
+ ctx->codec.h265.neighbor_info_buf_addr);
+}
+
+static void cedrus_h265_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_DEC_SLICE);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_h265 = {
+ .irq_clear = cedrus_h265_irq_clear,
+ .irq_disable = cedrus_h265_irq_disable,
+ .irq_status = cedrus_h265_irq_status,
+ .setup = cedrus_h265_setup,
+ .start = cedrus_h265_start,
+ .stop = cedrus_h265_stop,
+ .trigger = cedrus_h265_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index a942cd9bed57..daf5f244f93b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -30,7 +30,7 @@
#include "cedrus_hw.h"
#include "cedrus_regs.h"
-int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
+int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec)
{
u32 reg = 0;
@@ -50,11 +50,20 @@ int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
reg |= VE_MODE_DEC_H264;
break;
+ case CEDRUS_CODEC_H265:
+ reg |= VE_MODE_DEC_H265;
+ break;
+
default:
return -EINVAL;
}
- cedrus_write(dev, VE_MODE, reg);
+ if (ctx->src_fmt.width == 4096)
+ reg |= VE_MODE_PIC_WIDTH_IS_4096;
+ if (ctx->src_fmt.width > 2048)
+ reg |= VE_MODE_PIC_WIDTH_MORE_2048;
+
+ cedrus_write(ctx->dev, VE_MODE, reg);
return 0;
}
@@ -103,7 +112,6 @@ static irqreturn_t cedrus_irq(int irq, void *data)
{
struct cedrus_dev *dev = data;
struct cedrus_ctx *ctx;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
enum vb2_buffer_state state;
enum cedrus_irq_status status;
@@ -121,24 +129,13 @@ static irqreturn_t cedrus_irq(int irq, void *data)
dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
- src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
- if (!src_buf || !dst_buf) {
- v4l2_err(&dev->v4l2_dev,
- "Missing source and/or destination buffers\n");
- return IRQ_HANDLED;
- }
-
if (status == CEDRUS_IRQ_ERROR)
state = VB2_BUF_STATE_ERROR;
else
state = VB2_BUF_STATE_DONE;
- v4l2_m2m_buf_done(src_buf, state);
- v4l2_m2m_buf_done(dst_buf, state);
-
- v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ state);
return IRQ_HANDLED;
}
@@ -146,7 +143,6 @@ static irqreturn_t cedrus_irq(int irq, void *data)
int cedrus_hw_probe(struct cedrus_dev *dev)
{
const struct cedrus_variant *variant;
- struct resource *res;
int irq_dec;
int ret;
@@ -225,8 +221,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
goto err_sram;
}
- res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(dev->dev, res);
+ dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
if (IS_ERR(dev->base)) {
dev_err(dev->dev, "Failed to map registers\n");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
index 27d0882397aa..604ff932fbf5 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -16,7 +16,7 @@
#ifndef _CEDRUS_HW_H_
#define _CEDRUS_HW_H_
-int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec);
+int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec);
void cedrus_engine_disable(struct cedrus_dev *dev);
void cedrus_dst_format_set(struct cedrus_dev *dev,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
index 13c34927bad5..8bcd6b8f9e2d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -96,7 +96,7 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
quantization = run->mpeg2.quantization;
/* Activate MPEG engine. */
- cedrus_engine_enable(dev, CEDRUS_CODEC_MPEG2);
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_MPEG2);
/* Set intra quantization matrix. */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index ddd29788d685..7beb03d3bb39 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -10,6 +10,9 @@
#ifndef _CEDRUS_REGS_H_
#define _CEDRUS_REGS_H_
+#define SHIFT_AND_MASK_BITS(v, h, l) \
+ (((unsigned long)(v) << (l)) & GENMASK(h, l))
+
/*
* Common acronyms and contractions used in register descriptions:
* * VLD : Variable-Length Decoder
@@ -18,13 +21,22 @@
* * MC: Motion Compensation
* * STCD: Start Code Detect
* * SDRT: Scale Down and Rotate
+ * * WB: Writeback
+ * * BITS/BS: Bitstream
+ * * MB: Macroblock
+ * * CTU: Coding Tree Unit
+ * * CTB: Coding Tree Block
+ * * IDX: Index
*/
#define VE_ENGINE_DEC_MPEG 0x100
#define VE_ENGINE_DEC_H264 0x200
+#define VE_ENGINE_DEC_H265 0x500
#define VE_MODE 0x00
+#define VE_MODE_PIC_WIDTH_IS_4096 BIT(22)
+#define VE_MODE_PIC_WIDTH_MORE_2048 BIT(21)
#define VE_MODE_REC_WR_MODE_2MB (0x01 << 20)
#define VE_MODE_REC_WR_MODE_1MB (0x00 << 20)
#define VE_MODE_DDR_MODE_BW_128 (0x03 << 16)
@@ -34,11 +46,22 @@
#define VE_MODE_DEC_H264 (0x01 << 0)
#define VE_MODE_DEC_MPEG (0x00 << 0)
+#define VE_BUF_CTRL 0x50
+
+#define VE_BUF_CTRL_INTRAPRED_EXT_RAM (0x02 << 2)
+#define VE_BUF_CTRL_INTRAPRED_MIXED_RAM (0x01 << 2)
+#define VE_BUF_CTRL_INTRAPRED_INT_SRAM (0x00 << 2)
+#define VE_BUF_CTRL_DBLK_EXT_RAM (0x02 << 0)
+#define VE_BUF_CTRL_DBLK_MIXED_RAM (0x01 << 0)
+#define VE_BUF_CTRL_DBLK_INT_SRAM (0x00 << 0)
+
+#define VE_DBLK_DRAM_BUF_ADDR 0x54
+#define VE_INTRAPRED_DRAM_BUF_ADDR 0x58
#define VE_PRIMARY_CHROMA_BUF_LEN 0xc4
#define VE_PRIMARY_FB_LINE_STRIDE 0xc8
-#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) (((s) << 16) & GENMASK(31, 16))
-#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) (((s) << 0) & GENMASK(15, 0))
+#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) SHIFT_AND_MASK_BITS(s, 31, 16)
+#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) SHIFT_AND_MASK_BITS(s, 15, 0)
#define VE_CHROMA_BUF_LEN 0xe8
@@ -46,7 +69,7 @@
#define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30)
#define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30)
#define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30)
-#define VE_CHROMA_BUF_LEN_SDRT(l) ((l) & GENMASK(27, 0))
+#define VE_CHROMA_BUF_LEN_SDRT(l) SHIFT_AND_MASK_BITS(l, 27, 0)
#define VE_PRIMARY_OUT_FMT 0xec
@@ -69,15 +92,15 @@
#define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00)
-#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) (((t) << 28) & GENMASK(30, 28))
+#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) SHIFT_AND_MASK_BITS(t, 30, 28)
#define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x))
#define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \
- (((__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
+ (((unsigned long)(__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
#define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \
- (((p) << 10) & GENMASK(11, 10))
+ SHIFT_AND_MASK_BITS(p, 11, 10)
#define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \
- (((s) << 8) & GENMASK(9, 8))
+ SHIFT_AND_MASK_BITS(s, 9, 8)
#define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \
((v) ? BIT(7) : 0)
#define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \
@@ -98,19 +121,19 @@
#define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08)
#define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \
- ((DIV_ROUND_UP((w), 16) << 8) & GENMASK(15, 8))
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(w, 16), 15, 8)
#define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \
- ((DIV_ROUND_UP((h), 16) << 0) & GENMASK(7, 0))
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(h, 16), 7, 0)
#define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c)
-#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) (((w) << 16) & GENMASK(27, 16))
-#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) (((h) << 0) & GENMASK(11, 0))
+#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) SHIFT_AND_MASK_BITS(w, 27, 16)
+#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) SHIFT_AND_MASK_BITS(h, 11, 0)
#define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10)
-#define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8))
-#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(7, 0))
+#define VE_DEC_MPEG_MBADDR_X(w) SHIFT_AND_MASK_BITS(w, 15, 8)
+#define VE_DEC_MPEG_MBADDR_Y(h) SHIFT_AND_MASK_BITS(h, 7, 0)
#define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14)
@@ -225,13 +248,277 @@
#define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14)
#define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14)
#define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \
- (((v) & GENMASK(7, 0)) | (((i) << 8) & GENMASK(13, 8)))
+ (SHIFT_AND_MASK_BITS(i, 13, 8) | SHIFT_AND_MASK_BITS(v, 7, 0))
#define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4)
#define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8)
#define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc)
#define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0)
+#define VE_DEC_H265_DEC_NAL_HDR (VE_ENGINE_DEC_H265 + 0x00)
+
+#define VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 0)
+
+#define VE_DEC_H265_FLAG(reg_flag, ctrl_flag, flags) \
+ (((flags) & (ctrl_flag)) ? reg_flag : 0)
+
+#define VE_DEC_H265_DEC_SPS_HDR (VE_ENGINE_DEC_H265 + 0x04)
+
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE BIT(26)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED BIT(25)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED BIT(24)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED BIT(23)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE BIT(2)
+
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(v) \
+ SHIFT_AND_MASK_BITS(v, 22, 20)
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 17)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 16, 15)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 13)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 11)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 9)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_LUMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 3)
+#define VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(v) \
+ SHIFT_AND_MASK_BITS(v, 1, 0)
+
+#define VE_DEC_H265_DEC_PIC_SIZE (VE_ENGINE_DEC_H265 + 0x08)
+
+#define VE_DEC_H265_DEC_PIC_SIZE_WIDTH(w) (((w) << 0) & GENMASK(13, 0))
+#define VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(h) (((h) << 16) & GENMASK(29, 16))
+
+#define VE_DEC_H265_DEC_PCM_CTRL (VE_ENGINE_DEC_H265 + 0x0c)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED BIT(15)
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED BIT(14)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 11, 10)
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 9, 8)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 7, 4)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0 (VE_ENGINE_DEC_H265 + 0x10)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 24)
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 16)
+#define VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 8)
+#define VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1 (VE_ENGINE_DEC_H265 + 0x14)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(6)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED BIT(5)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED BIT(4)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 8)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0 (VE_ENGINE_DEC_H265 + 0x18)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_FLAG_ENABLED BIT(31)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_SRAM (0 << 30)
+#define VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT (1 << 30)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0 (VE_ENGINE_DEC_H265 + 0x20)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0 BIT(11)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT BIT(10)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO BIT(9)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA BIT(8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA BIT(7)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE BIT(6)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT BIT(1)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC BIT(0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(v) \
+ SHIFT_AND_MASK_BITS(v, 26, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 23, 20)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(v) \
+ SHIFT_AND_MASK_BITS(v, 15, 12)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 2)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1 (VE_ENGINE_DEC_H265 + 0x24)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED BIT(23)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(22)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 31, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 27, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(v) \
+ ((v) ? BIT(21) : 0)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 20, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2 (VE_ENGINE_DEC_H265 + 0x28)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 2, 0)
+
+#define VE_DEC_H265_DEC_CTB_ADDR (VE_ENGINE_DEC_H265 + 0x2c)
+
+#define VE_DEC_H265_DEC_CTB_ADDR_Y(y) SHIFT_AND_MASK_BITS(y, 25, 16)
+#define VE_DEC_H265_DEC_CTB_ADDR_X(x) SHIFT_AND_MASK_BITS(x, 9, 0)
+
+#define VE_DEC_H265_CTRL (VE_ENGINE_DEC_H265 + 0x30)
+
+#define VE_DEC_H265_CTRL_DDR_CONSISTENCY_EN BIT(31)
+#define VE_DEC_H265_CTRL_STCD_EN BIT(25)
+#define VE_DEC_H265_CTRL_EPTB_DEC_BYPASS_EN BIT(24)
+#define VE_DEC_H265_CTRL_TQ_BYPASS_EN BIT(12)
+#define VE_DEC_H265_CTRL_VLD_BYPASS_EN BIT(11)
+#define VE_DEC_H265_CTRL_NCRI_CACHE_DISABLE BIT(10)
+#define VE_DEC_H265_CTRL_ROTATE_SCALE_OUT_EN BIT(9)
+#define VE_DEC_H265_CTRL_MC_NO_WRITEBACK BIT(8)
+#define VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN BIT(2)
+#define VE_DEC_H265_CTRL_ERROR_IRQ_EN BIT(1)
+#define VE_DEC_H265_CTRL_FINISH_IRQ_EN BIT(0)
+#define VE_DEC_H265_CTRL_IRQ_MASK \
+ (VE_DEC_H265_CTRL_FINISH_IRQ_EN | VE_DEC_H265_CTRL_ERROR_IRQ_EN | \
+ VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN)
+
+#define VE_DEC_H265_TRIGGER (VE_ENGINE_DEC_H265 + 0x34)
+
+#define VE_DEC_H265_TRIGGER_STCD_VC1 (0x02 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_AVS (0x01 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_HEVC (0x00 << 4)
+#define VE_DEC_H265_TRIGGER_DEC_SLICE (0x08 << 0)
+#define VE_DEC_H265_TRIGGER_INIT_SWDEC (0x07 << 0)
+#define VE_DEC_H265_TRIGGER_BYTE_ALIGN (0x06 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCUE (0x05 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCSE (0x04 << 0)
+#define VE_DEC_H265_TRIGGER_FLUSH_BITS (0x03 << 0)
+#define VE_DEC_H265_TRIGGER_GET_BITS (0x02 << 0)
+#define VE_DEC_H265_TRIGGER_SHOW_BITS (0x01 << 0)
+
+#define VE_DEC_H265_STATUS (VE_ENGINE_DEC_H265 + 0x38)
+
+#define VE_DEC_H265_STATUS_STCD BIT(24)
+#define VE_DEC_H265_STATUS_STCD_BUSY BIT(21)
+#define VE_DEC_H265_STATUS_WB_BUSY BIT(20)
+#define VE_DEC_H265_STATUS_BS_DMA_BUSY BIT(19)
+#define VE_DEC_H265_STATUS_IQIT_BUSY BIT(18)
+#define VE_DEC_H265_STATUS_INTER_BUSY BIT(17)
+#define VE_DEC_H265_STATUS_MORE_DATA BIT(16)
+#define VE_DEC_H265_STATUS_VLD_BUSY BIT(14)
+#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY BIT(13)
+#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY BIT(12)
+#define VE_DEC_H265_STATUS_INTRA_BUSY BIT(11)
+#define VE_DEC_H265_STATUS_SAO_BUSY BIT(10)
+#define VE_DEC_H265_STATUS_MVP_BUSY BIT(9)
+#define VE_DEC_H265_STATUS_SWDEC_BUSY BIT(8)
+#define VE_DEC_H265_STATUS_OVER_TIME BIT(3)
+#define VE_DEC_H265_STATUS_VLD_DATA_REQ BIT(2)
+#define VE_DEC_H265_STATUS_ERROR BIT(1)
+#define VE_DEC_H265_STATUS_SUCCESS BIT(0)
+#define VE_DEC_H265_STATUS_STCD_TYPE_MASK GENMASK(23, 22)
+#define VE_DEC_H265_STATUS_CHECK_MASK \
+ (VE_DEC_H265_STATUS_SUCCESS | VE_DEC_H265_STATUS_ERROR | \
+ VE_DEC_H265_STATUS_VLD_DATA_REQ)
+#define VE_DEC_H265_STATUS_CHECK_ERROR \
+ (VE_DEC_H265_STATUS_ERROR | VE_DEC_H265_STATUS_VLD_DATA_REQ)
+
+#define VE_DEC_H265_DEC_CTB_NUM (VE_ENGINE_DEC_H265 + 0x3c)
+
+#define VE_DEC_H265_BITS_ADDR (VE_ENGINE_DEC_H265 + 0x40)
+
+#define VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA BIT(30)
+#define VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA BIT(29)
+#define VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA BIT(28)
+#define VE_DEC_H265_BITS_ADDR_BASE(a) (((a) >> 8) & GENMASK(27, 0))
+
+#define VE_DEC_H265_BITS_OFFSET (VE_ENGINE_DEC_H265 + 0x44)
+#define VE_DEC_H265_BITS_LEN (VE_ENGINE_DEC_H265 + 0x48)
+
+#define VE_DEC_H265_BITS_END_ADDR (VE_ENGINE_DEC_H265 + 0x4c)
+
+#define VE_DEC_H265_BITS_END_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_SDRT_CTRL (VE_ENGINE_DEC_H265 + 0x50)
+#define VE_DEC_H265_SDRT_LUMA_ADDR (VE_ENGINE_DEC_H265 + 0x54)
+#define VE_DEC_H265_SDRT_CHROMA_ADDR (VE_ENGINE_DEC_H265 + 0x58)
+
+#define VE_DEC_H265_OUTPUT_FRAME_IDX (VE_ENGINE_DEC_H265 + 0x5c)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR (VE_ENGINE_DEC_H265 + 0x60)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR (VE_ENGINE_DEC_H265 + 0x64)
+#define VE_DEC_H265_TILE_START_CTB (VE_ENGINE_DEC_H265 + 0x68)
+#define VE_DEC_H265_TILE_END_CTB (VE_ENGINE_DEC_H265 + 0x6c)
+
+#define VE_DEC_H265_LOW_ADDR (VE_ENGINE_DEC_H265 + 0x80)
+
+#define VE_DEC_H265_LOW_ADDR_PRIMARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 31, 24)
+#define VE_DEC_H265_LOW_ADDR_SECONDARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 23, 16)
+#define VE_DEC_H265_LOW_ADDR_ENTRY_POINTS_BUF(a) \
+ SHIFT_AND_MASK_BITS(a, 7, 0)
+
+#define VE_DEC_H265_SRAM_OFFSET (VE_ENGINE_DEC_H265 + 0xe0)
+
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0 0x00
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0 0x20
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1 0x60
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1 0x80
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO 0x400
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT 0x20
+#define VE_DEC_H265_SRAM_OFFSET_SCALING_LISTS 0x800
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0 0xc00
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1 0xc10
+
+#define VE_DEC_H265_SRAM_DATA (VE_ENGINE_DEC_H265 + 0xe4)
+
+#define VE_DEC_H265_SRAM_DATA_ADDR_BASE(a) ((a) >> 8)
+#define VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF BIT(7)
+
#define VE_H264_SPS 0x200
#define VE_H264_SPS_MBS_ONLY BIT(18)
#define VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD BIT(17)
@@ -267,13 +554,16 @@
VE_H264_CTRL_SLICE_DECODE_INT)
#define VE_H264_TRIGGER_TYPE 0x224
+#define VE_H264_TRIGGER_TYPE_N_BITS(x) (((x) & 0x3f) << 8)
#define VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE (8 << 0)
#define VE_H264_TRIGGER_TYPE_INIT_SWDEC (7 << 0)
+#define VE_H264_TRIGGER_TYPE_FLUSH_BITS (3 << 0)
#define VE_H264_STATUS 0x228
#define VE_H264_STATUS_VLD_DATA_REQ_INT VE_H264_CTRL_VLD_DATA_REQ_INT
#define VE_H264_STATUS_DECODE_ERR_INT VE_H264_CTRL_DECODE_ERR_INT
#define VE_H264_STATUS_SLICE_DECODE_INT VE_H264_CTRL_SLICE_DECODE_INT
+#define VE_H264_STATUS_VLD_BUSY BIT(8)
#define VE_H264_STATUS_INT_MASK VE_H264_CTRL_INT_MASK
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index eeee3efd247b..15cf1f10221b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -29,8 +29,8 @@
#define CEDRUS_MIN_WIDTH 16U
#define CEDRUS_MIN_HEIGHT 16U
-#define CEDRUS_MAX_WIDTH 3840U
-#define CEDRUS_MAX_HEIGHT 2160U
+#define CEDRUS_MAX_WIDTH 4096U
+#define CEDRUS_MAX_HEIGHT 2304U
static struct cedrus_format cedrus_formats[] = {
{
@@ -42,6 +42,11 @@ static struct cedrus_format cedrus_formats[] = {
.directions = CEDRUS_DECODE_SRC,
},
{
+ .pixelformat = V4L2_PIX_FMT_HEVC_SLICE,
+ .directions = CEDRUS_DECODE_SRC,
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
.directions = CEDRUS_DECODE_DST,
},
@@ -62,34 +67,31 @@ static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
static struct cedrus_format *cedrus_find_format(u32 pixelformat, u32 directions,
unsigned int capabilities)
{
+ struct cedrus_format *first_valid_fmt = NULL;
struct cedrus_format *fmt;
unsigned int i;
for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
fmt = &cedrus_formats[i];
- if (fmt->capabilities && (fmt->capabilities & capabilities) !=
- fmt->capabilities)
+ if ((fmt->capabilities & capabilities) != fmt->capabilities ||
+ !(fmt->directions & directions))
continue;
- if (fmt->pixelformat == pixelformat &&
- (fmt->directions & directions) != 0)
+ if (fmt->pixelformat == pixelformat)
break;
+
+ if (!first_valid_fmt)
+ first_valid_fmt = fmt;
}
if (i == CEDRUS_FORMATS_COUNT)
- return NULL;
+ return first_valid_fmt;
return &cedrus_formats[i];
}
-static bool cedrus_check_format(u32 pixelformat, u32 directions,
- unsigned int capabilities)
-{
- return cedrus_find_format(pixelformat, directions, capabilities);
-}
-
-static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
{
unsigned int width = pix_fmt->width;
unsigned int height = pix_fmt->height;
@@ -105,9 +107,11 @@ static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
switch (pix_fmt->pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_HEVC_SLICE:
/* Zero bytes per line for encoded source. */
bytesperline = 0;
-
+ /* Choose some minimum size since this can't be 0 */
+ sizeimage = max_t(u32, SZ_1K, sizeimage);
break;
case V4L2_PIX_FMT_SUNXI_TILED_NV12:
@@ -214,16 +218,7 @@ static int cedrus_g_fmt_vid_cap(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
- /* Fall back to dummy default by lack of hardware configuration. */
- if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
- cedrus_prepare_format(&f->fmt.pix);
-
- return 0;
- }
-
f->fmt.pix = ctx->dst_fmt;
-
return 0;
}
@@ -232,17 +227,7 @@ static int cedrus_g_fmt_vid_out(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
- /* Fall back to dummy default by lack of hardware configuration. */
- if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
- f->fmt.pix.sizeimage = SZ_1K;
- cedrus_prepare_format(&f->fmt.pix);
-
- return 0;
- }
-
f->fmt.pix = ctx->src_fmt;
-
return 0;
}
@@ -252,11 +237,14 @@ static int cedrus_try_fmt_vid_cap(struct file *file, void *priv,
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+ struct cedrus_format *fmt =
+ cedrus_find_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
+ dev->capabilities);
- if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
- dev->capabilities))
+ if (!fmt)
return -EINVAL;
+ pix_fmt->pixelformat = fmt->pixelformat;
cedrus_prepare_format(pix_fmt);
return 0;
@@ -268,15 +256,14 @@ static int cedrus_try_fmt_vid_out(struct file *file, void *priv,
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+ struct cedrus_format *fmt =
+ cedrus_find_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
+ dev->capabilities);
- if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
- dev->capabilities))
- return -EINVAL;
-
- /* Source image size has to be provided by userspace. */
- if (pix_fmt->sizeimage == 0)
+ if (!fmt)
return -EINVAL;
+ pix_fmt->pixelformat = fmt->pixelformat;
cedrus_prepare_format(pix_fmt);
return 0;
@@ -322,6 +309,17 @@ static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
ctx->src_fmt = f->fmt.pix;
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ vq->subsystem_flags |=
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ default:
+ vq->subsystem_flags &=
+ ~VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ }
+
/* Propagate colorspace information to capture. */
ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
@@ -355,6 +353,9 @@ const struct v4l2_ioctl_ops cedrus_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
+
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -364,21 +365,12 @@ static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
struct device *alloc_devs[])
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
- struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt;
- u32 directions;
- if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
- directions = CEDRUS_DECODE_SRC;
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
pix_fmt = &ctx->src_fmt;
- } else {
- directions = CEDRUS_DECODE_DST;
+ else
pix_fmt = &ctx->dst_fmt;
- }
-
- if (!cedrus_check_format(pix_fmt->pixelformat, directions,
- dev->capabilities))
- return -EINVAL;
if (*nplanes) {
if (sizes[0] < pix_fmt->sizeimage)
@@ -453,6 +445,10 @@ static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
ctx->current_codec = CEDRUS_CODEC_H264;
break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ ctx->current_codec = CEDRUS_CODEC_H265;
+ break;
+
default:
return -EINVAL;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.h b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
index 0e4f7a8cccf2..05050c0a0921 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
@@ -26,5 +26,6 @@ extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt);
#endif
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index 8948d5246409..6262eb25c80b 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig MOST
- tristate "MOST support"
+ tristate "MOST support"
depends on HAS_DMA && CONFIGFS_FS
- default n
- help
+ default n
+ help
Say Y here if you want to enable MOST support.
This driver needs at least one additional component to enable the
desired access from userspace (e.g. character devices) and one that
@@ -12,7 +12,7 @@ menuconfig MOST
To compile this driver as a module, choose M here: the
module will be called most_core.
- If in doubt, say N here.
+ If in doubt, say N here.
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index 724d098aeef0..f880147c82fd 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -494,6 +494,7 @@ err_remove_ida:
static struct cdev_component comp = {
.cc = {
+ .mod = THIS_MODULE,
.name = "cdev",
.probe_channel = comp_probe,
.disconnect_channel = comp_disconnect_channel,
diff --git a/drivers/staging/most/configfs.c b/drivers/staging/most/configfs.c
index 025495657b68..34a9fb53985c 100644
--- a/drivers/staging/most/configfs.c
+++ b/drivers/staging/most/configfs.c
@@ -164,6 +164,7 @@ static ssize_t mdev_link_direction_store(struct config_item *item,
!sysfs_streq(page, "dir_tx") && !sysfs_streq(page, "tx"))
return -EINVAL;
strcpy(mdev_link->direction, page);
+ strim(mdev_link->direction);
return count;
}
@@ -182,6 +183,7 @@ static ssize_t mdev_link_datatype_store(struct config_item *item,
!sysfs_streq(page, "isoc_avp"))
return -EINVAL;
strcpy(mdev_link->datatype, page);
+ strim(mdev_link->datatype);
return count;
}
@@ -196,6 +198,7 @@ static ssize_t mdev_link_device_store(struct config_item *item,
struct mdev_link *mdev_link = to_mdev_link(item);
strcpy(mdev_link->device, page);
+ strim(mdev_link->device);
return count;
}
@@ -210,6 +213,7 @@ static ssize_t mdev_link_channel_store(struct config_item *item,
struct mdev_link *mdev_link = to_mdev_link(item);
strcpy(mdev_link->channel, page);
+ strim(mdev_link->channel);
return count;
}
@@ -391,22 +395,29 @@ static const struct config_item_type mdev_link_type = {
struct most_common {
struct config_group group;
+ struct module *mod;
+ struct configfs_subsystem subsys;
};
-static struct most_common *to_most_common(struct config_item *item)
+static struct most_common *to_most_common(struct configfs_subsystem *subsys)
{
- return container_of(to_config_group(item), struct most_common, group);
+ return container_of(subsys, struct most_common, subsys);
}
static struct config_item *most_common_make_item(struct config_group *group,
const char *name)
{
struct mdev_link *mdev_link;
+ struct most_common *mc = to_most_common(group->cg_subsys);
mdev_link = kzalloc(sizeof(*mdev_link), GFP_KERNEL);
if (!mdev_link)
return ERR_PTR(-ENOMEM);
+ if (!try_module_get(mc->mod)) {
+ kfree(mdev_link);
+ return ERR_PTR(-ENOLCK);
+ }
config_item_init_type_name(&mdev_link->item, name,
&mdev_link_type);
@@ -422,15 +433,26 @@ static struct config_item *most_common_make_item(struct config_group *group,
static void most_common_release(struct config_item *item)
{
- kfree(to_most_common(item));
+ struct config_group *group = to_config_group(item);
+
+ kfree(to_most_common(group->cg_subsys));
}
static struct configfs_item_operations most_common_item_ops = {
.release = most_common_release,
};
+static void most_common_disconnect(struct config_group *group,
+ struct config_item *item)
+{
+ struct most_common *mc = to_most_common(group->cg_subsys);
+
+ module_put(mc->mod);
+}
+
static struct configfs_group_operations most_common_group_ops = {
.make_item = most_common_make_item,
+ .disconnect_notify = most_common_disconnect,
};
static const struct config_item_type most_common_type = {
@@ -439,29 +461,35 @@ static const struct config_item_type most_common_type = {
.ct_owner = THIS_MODULE,
};
-static struct configfs_subsystem most_cdev_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "most_cdev",
- .ci_type = &most_common_type,
+static struct most_common most_cdev = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_cdev",
+ .ci_type = &most_common_type,
+ },
},
},
};
-static struct configfs_subsystem most_net_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "most_net",
- .ci_type = &most_common_type,
+static struct most_common most_net = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_net",
+ .ci_type = &most_common_type,
+ },
},
},
};
-static struct configfs_subsystem most_video_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "most_video",
- .ci_type = &most_common_type,
+static struct most_common most_video = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_video",
+ .ci_type = &most_common_type,
+ },
},
},
};
@@ -487,7 +515,7 @@ static struct config_item *most_snd_grp_make_item(struct config_group *group,
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&mdev_link->item, name, &mdev_link_type);
- mdev_link->create_link = 0;
+ mdev_link->create_link = false;
strcpy(mdev_link->name, name);
strcpy(mdev_link->comp, "sound");
return &mdev_link->item;
@@ -545,13 +573,14 @@ static const struct config_item_type most_snd_grp_type = {
struct most_sound {
struct configfs_subsystem subsys;
struct list_head soundcard_list;
+ struct module *mod;
};
static struct config_group *most_sound_make_group(struct config_group *group,
const char *name)
{
struct most_snd_grp *most;
- struct most_sound *ms = container_of(to_configfs_subsystem(group),
+ struct most_sound *ms = container_of(group->cg_subsys,
struct most_sound, subsys);
list_for_each_entry(most, &ms->soundcard_list, list) {
@@ -560,17 +589,29 @@ static struct config_group *most_sound_make_group(struct config_group *group,
return ERR_PTR(-EPROTO);
}
}
+ if (!try_module_get(ms->mod))
+ return ERR_PTR(-ENOLCK);
most = kzalloc(sizeof(*most), GFP_KERNEL);
- if (!most)
+ if (!most) {
+ module_put(ms->mod);
return ERR_PTR(-ENOMEM);
-
+ }
config_group_init_type_name(&most->group, name, &most_snd_grp_type);
list_add_tail(&most->list, &ms->soundcard_list);
return &most->group;
}
+static void most_sound_disconnect(struct config_group *group,
+ struct config_item *item)
+{
+ struct most_sound *ms = container_of(group->cg_subsys,
+ struct most_sound, subsys);
+ module_put(ms->mod);
+}
+
static struct configfs_group_operations most_sound_group_ops = {
.make_group = most_sound_make_group,
+ .disconnect_notify = most_sound_disconnect,
};
static const struct config_item_type most_sound_type = {
@@ -593,16 +634,21 @@ int most_register_configfs_subsys(struct core_component *c)
{
int ret;
- if (!strcmp(c->name, "cdev"))
- ret = configfs_register_subsystem(&most_cdev_subsys);
- else if (!strcmp(c->name, "net"))
- ret = configfs_register_subsystem(&most_net_subsys);
- else if (!strcmp(c->name, "video"))
- ret = configfs_register_subsystem(&most_video_subsys);
- else if (!strcmp(c->name, "sound"))
+ if (!strcmp(c->name, "cdev")) {
+ most_cdev.mod = c->mod;
+ ret = configfs_register_subsystem(&most_cdev.subsys);
+ } else if (!strcmp(c->name, "net")) {
+ most_net.mod = c->mod;
+ ret = configfs_register_subsystem(&most_net.subsys);
+ } else if (!strcmp(c->name, "video")) {
+ most_video.mod = c->mod;
+ ret = configfs_register_subsystem(&most_video.subsys);
+ } else if (!strcmp(c->name, "sound")) {
+ most_sound_subsys.mod = c->mod;
ret = configfs_register_subsystem(&most_sound_subsys.subsys);
- else
+ } else {
return -ENODEV;
+ }
if (ret) {
pr_err("Error %d while registering subsystem %s\n",
@@ -631,11 +677,11 @@ void most_interface_register_notify(const char *mdev)
void most_deregister_configfs_subsys(struct core_component *c)
{
if (!strcmp(c->name, "cdev"))
- configfs_unregister_subsystem(&most_cdev_subsys);
+ configfs_unregister_subsystem(&most_cdev.subsys);
else if (!strcmp(c->name, "net"))
- configfs_unregister_subsystem(&most_net_subsys);
+ configfs_unregister_subsystem(&most_net.subsys);
else if (!strcmp(c->name, "video"))
- configfs_unregister_subsystem(&most_video_subsys);
+ configfs_unregister_subsystem(&most_video.subsys);
else if (!strcmp(c->name, "sound"))
configfs_unregister_subsystem(&most_sound_subsys.subsys);
}
@@ -643,14 +689,14 @@ EXPORT_SYMBOL_GPL(most_deregister_configfs_subsys);
int __init configfs_init(void)
{
- config_group_init(&most_cdev_subsys.su_group);
- mutex_init(&most_cdev_subsys.su_mutex);
+ config_group_init(&most_cdev.subsys.su_group);
+ mutex_init(&most_cdev.subsys.su_mutex);
- config_group_init(&most_net_subsys.su_group);
- mutex_init(&most_net_subsys.su_mutex);
+ config_group_init(&most_net.subsys.su_group);
+ mutex_init(&most_net.subsys.su_mutex);
- config_group_init(&most_video_subsys.su_group);
- mutex_init(&most_video_subsys.su_mutex);
+ config_group_init(&most_video.subsys.su_group);
+ mutex_init(&most_video.subsys.su_mutex);
config_group_init(&most_sound_subsys.subsys.su_group);
mutex_init(&most_sound_subsys.subsys.su_mutex);
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 8e9a0b67c6ed..51a6b41d5b82 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -52,7 +52,7 @@ struct most_channel {
u16 channel_id;
char name[STRING_SIZE];
bool is_poisoned;
- struct mutex start_mutex;
+ struct mutex start_mutex; /* channel activation synchronization */
struct mutex nq_mutex; /* nq thread synchronization */
int is_starving;
struct most_interface *iface;
@@ -60,7 +60,7 @@ struct most_channel {
bool keep_mbo;
bool enqueue_halt;
struct list_head fifo;
- spinlock_t fifo_lock;
+ spinlock_t fifo_lock; /* fifo access synchronization */
struct list_head halt_fifo;
struct list_head list;
struct pipe pipe0;
@@ -84,11 +84,11 @@ static const struct {
int most_ch_data_type;
const char *name;
} ch_data_type[] = {
- { MOST_CH_CONTROL, "control\n" },
- { MOST_CH_ASYNC, "async\n" },
- { MOST_CH_SYNC, "sync\n" },
- { MOST_CH_ISOC, "isoc\n"},
- { MOST_CH_ISOC, "isoc_avp\n"},
+ { MOST_CH_CONTROL, "control" },
+ { MOST_CH_ASYNC, "async" },
+ { MOST_CH_SYNC, "sync" },
+ { MOST_CH_ISOC, "isoc"},
+ { MOST_CH_ISOC, "isoc_avp"},
};
/**
@@ -521,48 +521,6 @@ static ssize_t components_show(struct device_driver *drv, char *buf)
}
/**
- * split_string - parses buf and extracts ':' separated substrings.
- *
- * @buf: complete string from attribute 'add_channel'
- * @a: storage for 1st substring (=interface name)
- * @b: storage for 2nd substring (=channel name)
- * @c: storage for 3rd substring (=component name)
- * @d: storage optional 4th substring (=user defined name)
- *
- * Examples:
- *
- * Input: "mdev0:ch6:cdev:my_channel\n" or
- * "mdev0:ch6:cdev:my_channel"
- *
- * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
- *
- * Input: "mdev1:ep81:cdev\n"
- * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
- *
- * Input: "mdev1:ep81"
- * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
- */
-static int split_string(char *buf, char **a, char **b, char **c, char **d)
-{
- *a = strsep(&buf, ":");
- if (!*a)
- return -EIO;
-
- *b = strsep(&buf, ":\n");
- if (!*b)
- return -EIO;
-
- *c = strsep(&buf, ":\n");
- if (!*c)
- return -EIO;
-
- if (d)
- *d = strsep(&buf, ":\n");
-
- return 0;
-}
-
-/**
* get_channel - get pointer to channel
* @mdev: name of the device interface
* @mdev_ch: name of channel
@@ -675,13 +633,13 @@ int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
if (!c)
return -ENODEV;
- if (!strcmp(buf, "dir_rx\n")) {
+ if (!strcmp(buf, "dir_rx")) {
c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "rx\n")) {
+ } else if (!strcmp(buf, "rx")) {
c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "dir_tx\n")) {
+ } else if (!strcmp(buf, "dir_tx")) {
c->cfg.direction = MOST_CH_TX;
- } else if (!strcmp(buf, "tx\n")) {
+ } else if (!strcmp(buf, "tx")) {
c->cfg.direction = MOST_CH_TX;
} else {
pr_info("Invalid direction\n");
@@ -723,48 +681,6 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
return link_channel_to_component(c, comp, link_name, comp_param);
}
-/**
- * remove_link_store - store function for remove_link attribute
- * @drv: device driver
- * @buf: buffer
- * @len: buffer length
- *
- * Example:
- * echo "mdev0:ep81" >remove_link
- */
-static ssize_t remove_link_store(struct device_driver *drv,
- const char *buf,
- size_t len)
-{
- struct most_channel *c;
- struct core_component *comp;
- char buffer[STRING_SIZE];
- char *mdev;
- char *mdev_ch;
- char *comp_name;
- int ret;
- size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
-
- strlcpy(buffer, buf, max_len);
- ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
- if (ret)
- return ret;
- comp = match_component(comp_name);
- if (!comp)
- return -ENODEV;
- c = get_channel(mdev, mdev_ch);
- if (!c)
- return -ENODEV;
-
- if (comp->disconnect_channel(c->iface, c->channel_id))
- return -EIO;
- if (c->pipe0.comp == comp)
- c->pipe0.comp = NULL;
- if (c->pipe1.comp == comp)
- c->pipe1.comp = NULL;
- return len;
-}
-
int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
{
struct most_channel *c;
@@ -790,12 +706,10 @@ int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
static DRIVER_ATTR_RO(links);
static DRIVER_ATTR_RO(components);
-static DRIVER_ATTR_WO(remove_link);
static struct attribute *mc_attrs[] = {
DRV_ATTR(links),
DRV_ATTR(components),
- DRV_ATTR(remove_link),
NULL,
};
diff --git a/drivers/staging/most/core.h b/drivers/staging/most/core.h
index 652aaa771029..49859aef98df 100644
--- a/drivers/staging/most/core.h
+++ b/drivers/staging/most/core.h
@@ -265,6 +265,7 @@ struct most_interface {
struct core_component {
struct list_head list;
const char *name;
+ struct module *mod;
int (*probe_channel)(struct most_interface *iface, int channel_idx,
struct most_channel_config *cfg, char *name,
char *param);
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index 26a31854c636..6cab1bb8956e 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -498,6 +498,7 @@ put_nd:
}
static struct core_component comp = {
+ .mod = THIS_MODULE,
.name = "net",
.probe_channel = comp_probe_channel,
.disconnect_channel = comp_disconnect_channel,
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 79817061fcfa..723d0bd1cc21 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -344,8 +344,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
pr_err("Requested number of channels not supported.\n");
return -EINVAL;
}
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
/**
@@ -359,7 +358,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
*/
static int pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
/**
@@ -469,7 +468,6 @@ static const struct snd_pcm_ops pcm_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static int split_arg_list(char *buf, u16 *ch_num, char **sample_res)
@@ -663,6 +661,8 @@ skip_adpt_alloc:
pcm->private_data = channel;
strscpy(pcm->name, device_name, sizeof(pcm->name));
snd_pcm_set_ops(pcm, direction, &pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
@@ -782,6 +782,7 @@ static int audio_tx_completion(struct most_interface *iface, int channel_id)
* Initialization of the struct core_component
*/
static struct core_component comp = {
+ .mod = THIS_MODULE,
.name = DRIVER_NAME,
.probe_channel = audio_probe_channel,
.disconnect_channel = audio_disconnect_channel,
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 250af9fb704d..10c1ef7e3a3e 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -528,6 +528,7 @@ static int comp_disconnect_channel(struct most_interface *iface,
}
static struct core_component comp = {
+ .mod = THIS_MODULE,
.name = "video",
.probe_channel = comp_probe_channel,
.disconnect_channel = comp_disconnect_channel,
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index d964642d95a3..20b898954416 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -208,8 +208,8 @@ static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
{
- dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
- "tctx %08x, tdtx: %08x, rbase %08x, " \
+ dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, "
+ "tctx %08x, tdtx: %08x, rbase %08x, "
"rcnt %08x, rctx %08x, rdtx %08x\n",
mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
@@ -220,7 +220,7 @@ static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
- dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
+ dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, "
"intr_stat %08x, intr_mask %08x\n",
mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
@@ -243,9 +243,9 @@ static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
tx_desc = &chan->tx_ring[i];
rx_desc = &chan->rx_ring[i];
- dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
+ dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, "
"tx addr1: %08x, rx addr0 %08x, flags %08x\n",
- i, tx_desc->addr0, tx_desc->flags, \
+ i, tx_desc->addr0, tx_desc->flags,
tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
}
}
@@ -548,7 +548,8 @@ static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
int i;
chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+ 2 * HSDMA_DESCS_NUM *
+ sizeof(*chan->tx_ring),
&chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
if (!chan->tx_ring)
goto no_mem;
@@ -569,8 +570,8 @@ static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
{
if (chan->tx_ring) {
dma_free_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
- chan->tx_ring, chan->desc_addr);
+ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+ chan->tx_ring, chan->desc_addr);
chan->tx_ring = NULL;
chan->rx_ring = NULL;
}
@@ -650,7 +651,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
struct mtk_hsdma_chan *chan;
struct mtk_hsdam_engine *hsdma;
struct dma_device *dd;
- struct resource *res;
int ret;
int irq;
void __iomem *base;
@@ -667,8 +667,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
if (!hsdma)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
hsdma->base = base + HSDMA_BASE_OFFSET;
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index af928b75a940..ce58042f2f21 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -2,7 +2,6 @@
config PCI_MT7621
tristate "MediaTek MT7621 PCI Controller"
depends on RALINK
- depends on PCI
select PCI_DRIVERS_GENERIC
help
This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 6b98827da57f..3633c924848e 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -29,15 +29,14 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/sys_soc.h>
#include <mt7621.h>
#include <ralink_regs.h>
#include "../../pci/pci.h"
/* sysctl */
-#define MT7621_CHIP_REV_ID 0x0c
#define MT7621_GPIO_MODE 0x60
-#define CHIP_REV_MT7621_E2 0x0101
/* MediaTek specific configuration registers */
#define PCIE_FTS_NUM 0x70c
@@ -126,6 +125,8 @@ struct mt7621_pcie_port {
* @ports: pointer to PCIe port information
* @perst: gpio reset
* @rst: pointer to pcie reset
+ * @resets_inverted: depends on chip revision
+ * reset lines are inverted.
*/
struct mt7621_pcie {
void __iomem *base;
@@ -140,6 +141,7 @@ struct mt7621_pcie {
struct list_head ports;
struct gpio_desc *perst;
struct reset_control *rst;
+ bool resets_inverted;
};
static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
@@ -229,9 +231,9 @@ static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port)
static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
{
- u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
+ struct mt7621_pcie *pcie = port->pcie;
- if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
+ if (pcie->resets_inverted)
reset_control_assert(port->pcie_rst);
else
reset_control_deassert(port->pcie_rst);
@@ -239,9 +241,9 @@ static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
{
- u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
+ struct mt7621_pcie *pcie = port->pcie;
- if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
+ if (pcie->resets_inverted)
reset_control_deassert(port->pcie_rst);
else
reset_control_assert(port->pcie_rst);
@@ -641,9 +643,14 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host,
return pci_host_probe(host);
}
+static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
+ { .soc_id = "mt7621", .revision = "E2" }
+};
+
static int mt7621_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct soc_device_attribute *attr;
struct mt7621_pcie *pcie;
struct pci_host_bridge *bridge;
int err;
@@ -661,6 +668,10 @@ static int mt7621_pci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
INIT_LIST_HEAD(&pcie->ports);
+ attr = soc_device_match(mt7621_pci_quirks_match);
+ if (attr)
+ pcie->resets_inverted = true;
+
err = mt7621_pcie_parse_dt(pcie);
if (err) {
dev_err(dev, "Parsing DT failed\n");
diff --git a/drivers/staging/netlogic/TODO b/drivers/staging/netlogic/TODO
index 8f172b017b94..20e22ecb9903 100644
--- a/drivers/staging/netlogic/TODO
+++ b/drivers/staging/netlogic/TODO
@@ -1,6 +1,6 @@
* Implementing 64bit stat counter in software
* All memory allocation should be changed to DMA allocations
-* Changing comments in to linux standred format
+* Changing comments into linux standard format
Please send patches
To:
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 05079f7be841..204fcdfc022f 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -976,8 +976,7 @@ static int xlr_net_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->port_id = (pdev->id * 4) + port;
priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, port);
- priv->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ priv->base_addr = devm_platform_ioremap_resource(pdev, port);
if (IS_ERR(priv->base_addr)) {
err = PTR_ERR(priv->base_addr);
goto err_gmac;
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index 5c12cacf75e1..9fa98c16f1d9 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -8,7 +8,7 @@ config MFD_NVEC
controller.
To compile this driver as a module, say M here: the module will be
- called mfd-nvec
+ called mfd-nvec
config KEYBOARD_NVEC
tristate "Keyboard on nVidia compliant EC"
@@ -18,7 +18,7 @@ config KEYBOARD_NVEC
a nVidia compliant embedded controller.
To compile this driver as a module, say M here: the module will be
- called keyboard-nvec
+ called keyboard-nvec
config SERIO_NVEC_PS2
tristate "PS2 on nVidia EC"
@@ -28,7 +28,7 @@ config SERIO_NVEC_PS2
to a nVidia compliant embedded controller.
To compile this driver as a module, say M here: the module will be
- called serio-nvec-ps2
+ called serio-nvec-ps2
config NVEC_POWER
@@ -39,7 +39,7 @@ config NVEC_POWER
nVidia compliant embedded controllers.
To compile this driver as a module, say M here: the module will be
- called nvec-power
+ called nvec-power
config NVEC_PAZ00
@@ -50,5 +50,5 @@ config NVEC_PAZ00
devices, e.g. Toshbia AC100 and Dynabooks AZ netbooks.
To compile this driver as a module, say M here: the module will be
- called nvec-paz00
+ called nvec-paz00
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index a5321cc692c5..582c9187559d 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -1836,8 +1836,7 @@ static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
*
* Returns: Pipe or NULL if none are ready
*/
-static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(
- struct octeon_hcd *usb,
+static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb,
enum cvmx_usb_transfer xfer_type)
{
struct list_head *list = usb->active_pipes + xfer_type;
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index ffac0c4b3f5c..c798672d61b2 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -65,7 +65,7 @@ int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- cvmx_helper_link_info_t li)
+ union cvmx_helper_link_info li)
{
if (li.s.link_up) {
pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
@@ -81,7 +81,7 @@ void cvm_oct_note_carrier(struct octeon_ethernet *priv,
void cvm_oct_adjust_link(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
link_info.u64 = 0;
link_info.s.link_up = dev->phydev->link ? 1 : 0;
@@ -106,7 +106,7 @@ int cvm_oct_common_stop(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
union cvmx_gmxx_prtx_cfg gmx_cfg;
int index = INDEX(priv->port);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index d91fd5ce9e68..0c4fac31540a 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -53,7 +53,7 @@ static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
static void cvm_oct_check_preamble_errors(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
unsigned long flags;
link_info.u64 = priv->link_info;
@@ -103,7 +103,7 @@ static void cvm_oct_check_preamble_errors(struct net_device *dev)
static void cvm_oct_rgmii_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
bool status_change;
link_info = cvmx_helper_link_get(priv->port);
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 0e65955c746b..2c16230f993c 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -60,7 +60,7 @@ static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
*
* Returns Non-zero if the packet can be dropped, zero otherwise.
*/
-static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
+static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
{
int port;
@@ -135,7 +135,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
return 0;
}
-static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb)
+static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
{
int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
@@ -215,7 +215,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
struct sk_buff *skb = NULL;
struct sk_buff **pskb = NULL;
int skb_in_hw;
- cvmx_wqe_t *work;
+ struct cvmx_wqe *work;
int port;
if (USE_ASYNC_IOBDMA && did_work_request)
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 83469061a542..b334cf89794e 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -127,7 +127,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
*/
int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
{
- cvmx_pko_command_word0_t pko_command;
+ union cvmx_pko_command_word0 pko_command;
union cvmx_buf_ptr hw_buffer;
u64 old_scratch;
u64 old_scratch2;
@@ -514,7 +514,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
void *copy_location;
/* Get a work queue entry */
- cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
+ struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
if (unlikely(!work)) {
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
@@ -598,7 +598,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
#endif
work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
(ip_hdr(skb)->frag_off ==
- 1 << 14));
+ cpu_to_be16(1 << 14)));
#if 0
/* Assume Linux is sending a good packet */
work->word2.s.IP_exc = 0;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index cf8e9a23ebf9..f42c3816ce49 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -172,7 +172,7 @@ static void cvm_oct_configure_common_hw(void)
*/
int cvm_oct_free_work(void *work_queue_entry)
{
- cvmx_wqe_t *work = work_queue_entry;
+ struct cvmx_wqe *work = work_queue_entry;
int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
@@ -460,7 +460,7 @@ int cvm_oct_common_open(struct net_device *dev,
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
int rv;
rv = cvm_oct_phy_setup_device(dev);
@@ -496,7 +496,7 @@ int cvm_oct_common_open(struct net_device *dev,
void cvm_oct_link_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
link_info = cvmx_helper_link_get(priv->port);
if (link_info.u64 == priv->link_info)
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index a8a864b40913..a6140705706f 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -14,7 +14,7 @@
#include <linux/of.h>
#include <linux/phy.h>
-#ifdef CONFIG_MIPS
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
#include <asm/octeon/octeon.h>
@@ -91,7 +91,7 @@ int cvm_oct_common_stop(struct net_device *dev);
int cvm_oct_common_open(struct net_device *dev,
void (*link_poll)(struct net_device *));
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- cvmx_helper_link_info_t li);
+ union cvmx_helper_link_info li);
void cvm_oct_link_poll(struct net_device *dev);
extern int always_use_pow;
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
index b78ce9eaab85..79213c045504 100644
--- a/drivers/staging/octeon/octeon-stubs.h
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -1,5 +1,8 @@
#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
-#define XKPHYS_TO_PHYS(p) (p)
+
+#ifndef XKPHYS_TO_PHYS
+# define XKPHYS_TO_PHYS(p) (p)
+#endif
#define OCTEON_IRQ_WORKQ0 0
#define OCTEON_IRQ_RML 0
@@ -38,7 +41,7 @@
#define CVMX_NPI_RSL_INT_BLOCKS 0
#define CVMX_POW_WQ_INT_PC 0
-typedef union {
+union cvmx_pip_wqe_word2 {
uint64_t u64;
struct {
uint64_t bufs:8;
@@ -114,13 +117,13 @@ typedef union {
uint64_t err_code:8;
} snoip;
-} cvmx_pip_wqe_word2;
+};
union cvmx_pip_wqe_word0 {
struct {
uint64_t next_ptr:40;
uint8_t unused;
- uint16_t hw_chksum;
+ __wsum hw_chksum;
} cn38xx;
struct {
uint64_t pknd:6; /* 0..5 */
@@ -180,15 +183,15 @@ union cvmx_buf_ptr {
} s;
};
-typedef struct {
+struct cvmx_wqe {
union cvmx_wqe_word0 word0;
union cvmx_wqe_word1 word1;
- cvmx_pip_wqe_word2 word2;
+ union cvmx_pip_wqe_word2 word2;
union cvmx_buf_ptr packet_ptr;
uint8_t packet_data[96];
-} cvmx_wqe_t;
+};
-typedef union {
+union cvmx_helper_link_info {
uint64_t u64;
struct {
uint64_t reserved_20_63:44;
@@ -196,18 +199,18 @@ typedef union {
uint64_t full_duplex:1; /**< 1 if the link is full duplex */
uint64_t speed:18; /**< Speed of the link in Mbps */
} s;
-} cvmx_helper_link_info_t;
+};
-typedef enum {
+enum cvmx_fau_reg_32 {
CVMX_FAU_REG_32_START = 0,
-} cvmx_fau_reg_32_t;
+};
-typedef enum {
+enum cvmx_fau_op_size {
CVMX_FAU_OP_SIZE_8 = 0,
CVMX_FAU_OP_SIZE_16 = 1,
CVMX_FAU_OP_SIZE_32 = 2,
CVMX_FAU_OP_SIZE_64 = 3
-} cvmx_fau_op_size_t;
+};
typedef enum {
CVMX_SPI_MODE_UNKNOWN = 0,
@@ -1134,27 +1137,27 @@ union cvmx_npi_rsl_int_blocks {
} cn50xx;
};
-typedef union {
+union cvmx_pko_command_word0 {
uint64_t u64;
struct {
- uint64_t total_bytes:16;
- uint64_t segs:6;
- uint64_t dontfree:1;
- uint64_t ignore_i:1;
- uint64_t ipoffp1:7;
- uint64_t gather:1;
- uint64_t rsp:1;
- uint64_t wqp:1;
- uint64_t n2:1;
- uint64_t le:1;
- uint64_t reg0:11;
- uint64_t subone0:1;
- uint64_t reg1:11;
- uint64_t subone1:1;
- uint64_t size0:2;
- uint64_t size1:2;
+ uint64_t total_bytes:16;
+ uint64_t segs:6;
+ uint64_t dontfree:1;
+ uint64_t ignore_i:1;
+ uint64_t ipoffp1:7;
+ uint64_t gather:1;
+ uint64_t rsp:1;
+ uint64_t wqp:1;
+ uint64_t n2:1;
+ uint64_t le:1;
+ uint64_t reg0:11;
+ uint64_t subone0:1;
+ uint64_t reg1:11;
+ uint64_t subone1:1;
+ uint64_t size0:2;
+ uint64_t size1:2;
} s;
-} cvmx_pko_command_word0_t;
+};
union cvmx_ciu_timx {
uint64_t u64;
@@ -1175,16 +1178,18 @@ union cvmx_gmxx_rxx_rx_inbnd {
} s;
};
-static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
+static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg,
int32_t value)
{
return value;
}
-static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
+static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
{ }
-static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
+static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
{ }
static inline uint64_t cvmx_scratch_read64(uint64_t address)
@@ -1195,7 +1200,7 @@ static inline uint64_t cvmx_scratch_read64(uint64_t address)
static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
{ }
-static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
+static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
{
return 0;
}
@@ -1264,15 +1269,15 @@ static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
return 0;
}
-static inline cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
+static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
{
- cvmx_helper_link_info_t ret = { .u64 = 0 };
+ union cvmx_helper_link_info ret = { .u64 = 0 };
return ret;
}
static inline int cvmx_helper_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info)
+ union cvmx_helper_link_info link_info)
{
return 0;
}
@@ -1342,14 +1347,14 @@ static inline void cvmx_pow_work_request_async(int scr_addr,
cvmx_pow_wait_t wait)
{ }
-static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
+static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
{
- cvmx_wqe_t *wqe = (void *)(unsigned long)scr_addr;
+ struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr;
return wqe;
}
-static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
+static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
{
return (void *)(unsigned long)wait;
}
@@ -1361,7 +1366,7 @@ static inline int cvmx_spi_restart_interface(int interface,
}
static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
- cvmx_fau_reg_32_t reg,
+ enum cvmx_fau_reg_32 reg,
int32_t value)
{ }
@@ -1370,6 +1375,7 @@ static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
int port)
{
union cvmx_gmxx_rxx_rx_inbnd r;
+
r.u64 = 0;
return r;
}
@@ -1379,29 +1385,27 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
{ }
static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
- uint64_t queue, cvmx_pko_command_word0_t pko_command,
+ uint64_t queue, union cvmx_pko_command_word0 pko_command,
union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking)
{
- cvmx_pko_status_t ret = 0;
-
- return ret;
+ return 0;
}
-static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
+static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
{ }
-static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
+static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
{ }
-static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
+static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
{
return 0;
}
-static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
+static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
{ }
-static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
+static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
enum cvmx_pow_tag_type tag_type,
uint64_t qos, uint64_t grp)
{ }
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index f5c716bb3413..d1a0dea09ef0 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -3,7 +3,7 @@ config FB_OLPC_DCON
tristate "One Laptop Per Child Display CONtroller support"
depends on OLPC && FB
depends on I2C
- depends on (GPIO_CS5535 || GPIO_CS5535=n)
+ depends on GPIO_CS5535 && ACPI
select BACKLIGHT_CLASS_DEVICE
help
In order to support very low power operation, the XO laptop uses a
@@ -15,22 +15,3 @@ config FB_OLPC_DCON
This controller is only available on OLPC platforms. Unless you have
one of these platforms, you will want to say 'N'.
-config FB_OLPC_DCON_1
- bool "OLPC XO-1 DCON support"
- depends on FB_OLPC_DCON && GPIO_CS5535
- default y
- help
- Enable support for the DCON in XO-1 model laptops. The kernel
- communicates with the DCON using model-specific code. If you
- have an XO-1 (or if you're unsure what model you have), you should
- say 'Y'.
-
-config FB_OLPC_DCON_1_5
- bool "OLPC XO-1.5 DCON support"
- depends on FB_OLPC_DCON && ACPI
- default y
- help
- Enable support for the DCON in XO-1.5 model laptops. The kernel
- communicates with the DCON using model-specific code. If you
- have an XO-1.5 (or if you're unsure what model you have), you
- should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
index cb1248c5c162..734b2ce26066 100644
--- a/drivers/staging/olpc_dcon/Makefile
+++ b/drivers/staging/olpc_dcon/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-olpc-dcon-objs += olpc_dcon.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o
+olpc-dcon-objs += olpc_dcon.o olpc_dcon_xo_1.o olpc_dcon_xo_1_5.o
obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index d8296f2ae872..7c263358b44a 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -8,7 +8,6 @@ TODO:
internals, but isn't properly integrated, is not the correct solution.
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
- - allow simultaneous XO-1 and XO-1.5 support
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
copy:
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index a254238be181..a0d6d90f4cc8 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -790,15 +790,11 @@ static struct i2c_driver dcon_driver = {
static int __init olpc_dcon_init(void)
{
-#ifdef CONFIG_FB_OLPC_DCON_1_5
/* XO-1.5 */
if (olpc_board_at_least(olpc_board(0xd0)))
pdata = &dcon_pdata_xo_1_5;
-#endif
-#ifdef CONFIG_FB_OLPC_DCON_1
- if (!pdata)
+ else
pdata = &dcon_pdata_xo_1;
-#endif
return i2c_add_driver(&dcon_driver);
}
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 22d976a09785..41bd1360b56e 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -106,12 +106,7 @@ struct dcon_gpio {
irqreturn_t dcon_interrupt(int irq, void *id);
-#ifdef CONFIG_FB_OLPC_DCON_1
extern struct dcon_platform_data dcon_pdata_xo_1;
-#endif
-
-#ifdef CONFIG_FB_OLPC_DCON_1_5
extern struct dcon_platform_data dcon_pdata_xo_1_5;
-#endif
#endif
diff --git a/drivers/staging/pi433/Kconfig b/drivers/staging/pi433/Kconfig
index 8acde0814206..dd9e4709d1a8 100644
--- a/drivers/staging/pi433/Kconfig
+++ b/drivers/staging/pi433/Kconfig
@@ -1,17 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
config PI433
- tristate "Pi433 - a 433MHz radio module for Raspberry Pi"
- depends on SPI
- help
- This option allows you to enable support for the radio module Pi433.
+ tristate "Pi433 - a 433MHz radio module for Raspberry Pi"
+ depends on SPI
+ help
+ This option allows you to enable support for the radio module Pi433.
- Pi433 is a shield that fits onto the GPIO header of a Raspberry Pi
- or compatible. It extends the Raspberry Pi with the option, to
- send and receive data in the 433MHz ISM band - for example to
- communicate between two systems without using ethernet or bluetooth
- or for control or read sockets, actors, sensors, widely available
- for low price.
+ Pi433 is a shield that fits onto the GPIO header of a Raspberry Pi
+ or compatible. It extends the Raspberry Pi with the option, to
+ send and receive data in the 433MHz ISM band - for example to
+ communicate between two systems without using ethernet or bluetooth
+ or for control or read sockets, actors, sensors, widely available
+ for low price.
- For details or the option to buy, please visit https://pi433.de/en.html
+ For details or the option to buy, please visit https://pi433.de/en.html
- If in doubt, say N here, but saying yes most probably won't hurt
+ If in doubt, say N here, but saying yes most probably won't hurt
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 40c6f4e7632f..313d22f6210f 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -928,16 +928,6 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return 0;
}
-#ifdef CONFIG_COMPAT
-static long
-pi433_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- return pi433_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
-}
-#else
-#define pi433_compat_ioctl NULL
-#endif /* CONFIG_COMPAT */
-
/*-------------------------------------------------------------------------*/
static int pi433_open(struct inode *inode, struct file *filp)
@@ -1094,7 +1084,7 @@ static const struct file_operations pi433_fops = {
.write = pi433_write,
.read = pi433_read,
.unlocked_ioctl = pi433_ioctl,
- .compat_ioctl = pi433_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pi433_open,
.release = pi433_release,
.llseek = no_llseek,
diff --git a/drivers/staging/qlge/TODO b/drivers/staging/qlge/TODO
index 51c509084e80..f93f7428f5d5 100644
--- a/drivers/staging/qlge/TODO
+++ b/drivers/staging/qlge/TODO
@@ -1,6 +1,3 @@
-* reception stalls permanently (until admin intervention) if the rx buffer
- queues become empty because of allocation failures (ex. under memory
- pressure)
* commit 7c734359d350 ("qlge: Size RX buffers based on MTU.", v2.6.33-rc1)
introduced dead code in the receive routines, which should be rewritten
anyways by the admission of the author himself, see the comment above
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index ad7c5eb8a3b6..6ec7e3ce3863 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -34,8 +34,13 @@
#define NUM_TX_RING_ENTRIES 256
#define NUM_RX_RING_ENTRIES 256
-#define NUM_SMALL_BUFFERS 512
-#define NUM_LARGE_BUFFERS 512
+/* Use the same len for sbq and lbq. Note that it seems like the device might
+ * support different sizes.
+ */
+#define QLGE_BQ_SHIFT 9
+#define QLGE_BQ_LEN BIT(QLGE_BQ_SHIFT)
+#define QLGE_BQ_SIZE (QLGE_BQ_LEN * sizeof(__le64))
+
#define DB_PAGE_SIZE 4096
/* Calculate the number of (4k) pages required to
@@ -46,8 +51,8 @@
(((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
+ MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64))
#define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
@@ -77,6 +82,11 @@
#define LSD(x) ((u32)((u64)(x)))
#define MSD(x) ((u32)((((u64)(x)) >> 32)))
+/* In some cases, the device interprets a value of 0x0000 as 65536. These
+ * cases are marked using the following macro.
+ */
+#define QLGE_FIT16(value) ((u16)(value))
+
/* MPI test register definitions. This register
* is used for determining alternate NIC function's
* PCI->func number.
@@ -1358,25 +1368,6 @@ struct tx_ring_desc {
struct tx_ring_desc *next;
};
-struct page_chunk {
- struct page *page; /* master page */
- char *va; /* virt addr for this chunk */
- u64 map; /* mapping for master */
- unsigned int offset; /* offset for this chunk */
- unsigned int last_flag; /* flag set for last chunk in page */
-};
-
-struct bq_desc {
- union {
- struct page_chunk pg_chunk;
- struct sk_buff *skb;
- } p;
- __le64 *addr;
- u32 index;
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
- DEFINE_DMA_UNMAP_LEN(maplen);
-};
-
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
struct tx_ring {
@@ -1406,15 +1397,68 @@ struct tx_ring {
u64 tx_errors;
};
-/*
- * Type of inbound queue.
- */
-enum {
- DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
- TX_Q = 3, /* Handles outbound completions. */
- RX_Q = 4, /* Handles inbound completions. */
+struct qlge_page_chunk {
+ struct page *page;
+ void *va; /* virt addr including offset */
+ unsigned int offset;
};
+struct qlge_bq_desc {
+ union {
+ /* for large buffers */
+ struct qlge_page_chunk pg_chunk;
+ /* for small buffers */
+ struct sk_buff *skb;
+ } p;
+ dma_addr_t dma_addr;
+ /* address in ring where the buffer address is written for the device */
+ __le64 *buf_ptr;
+ u32 index;
+};
+
+/* buffer queue */
+struct qlge_bq {
+ __le64 *base;
+ dma_addr_t base_dma;
+ __le64 *base_indirect;
+ dma_addr_t base_indirect_dma;
+ struct qlge_bq_desc *queue;
+ /* prod_idx is the index of the first buffer that may NOT be used by
+ * hw, ie. one after the last. Advanced by sw.
+ */
+ void __iomem *prod_idx_db_reg;
+ /* next index where sw should refill a buffer for hw */
+ u16 next_to_use;
+ /* next index where sw expects to find a buffer filled by hw */
+ u16 next_to_clean;
+ enum {
+ QLGE_SB, /* small buffer */
+ QLGE_LB, /* large buffer */
+ } type;
+};
+
+#define QLGE_BQ_CONTAINER(bq) \
+({ \
+ typeof(bq) _bq = bq; \
+ (struct rx_ring *)((char *)_bq - (_bq->type == QLGE_SB ? \
+ offsetof(struct rx_ring, sbq) : \
+ offsetof(struct rx_ring, lbq))); \
+})
+
+/* Experience shows that the device ignores the low 4 bits of the tail index.
+ * Refill up to a x16 multiple.
+ */
+#define QLGE_BQ_ALIGN(index) ALIGN_DOWN(index, 16)
+
+#define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1))
+
+#define QLGE_BQ_HW_OWNED(bq) \
+({ \
+ typeof(bq) _bq = bq; \
+ QLGE_BQ_WRAP(QLGE_BQ_ALIGN((_bq)->next_to_use) - \
+ (_bq)->next_to_clean); \
+})
+
struct rx_ring {
struct cqicb cqicb; /* The chip's completion queue init control block. */
@@ -1432,40 +1476,17 @@ struct rx_ring {
void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
/* Large buffer queue elements. */
- u32 lbq_len; /* entry count */
- u32 lbq_size; /* size in bytes of queue */
- u32 lbq_buf_size;
- void *lbq_base;
- dma_addr_t lbq_base_dma;
- void *lbq_base_indirect;
- dma_addr_t lbq_base_indirect_dma;
- struct page_chunk pg_chunk; /* current page for chunks */
- struct bq_desc *lbq; /* array of control blocks */
- void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
- u32 lbq_prod_idx; /* current sw prod idx */
- u32 lbq_curr_idx; /* next entry we expect */
- u32 lbq_clean_idx; /* beginning of new descs */
- u32 lbq_free_cnt; /* free buffer desc cnt */
+ struct qlge_bq lbq;
+ struct qlge_page_chunk master_chunk;
+ dma_addr_t chunk_dma_addr;
/* Small buffer queue elements. */
- u32 sbq_len; /* entry count */
- u32 sbq_size; /* size in bytes of queue */
- u32 sbq_buf_size;
- void *sbq_base;
- dma_addr_t sbq_base_dma;
- void *sbq_base_indirect;
- dma_addr_t sbq_base_indirect_dma;
- struct bq_desc *sbq; /* array of control blocks */
- void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
- u32 sbq_prod_idx; /* current sw prod idx */
- u32 sbq_curr_idx; /* next entry we expect */
- u32 sbq_clean_idx; /* beginning of new descs */
- u32 sbq_free_cnt; /* free buffer desc cnt */
+ struct qlge_bq sbq;
/* Misc. handler elements. */
- u32 type; /* Type of queue, tx, rx. */
u32 irq; /* Which vector this ring is assigned. */
u32 cpu; /* Which CPU this should run on. */
+ struct delayed_work refill_work;
char name[IFNAMSIZ + 5];
struct napi_struct napi;
u8 reserved;
@@ -1982,11 +2003,6 @@ struct intr_context {
u32 intr_dis_mask; /* value/mask used to disable this intr */
u32 intr_read_mask; /* value/mask used to read this intr */
char name[IFNAMSIZ * 2];
- atomic_t irq_cnt; /* irq_cnt is used in single vector
- * environment. It's incremented for each
- * irq handler that is scheduled. When each
- * handler finishes it decrements irq_cnt and
- * enables interrupts if it's zero. */
irq_handler_t handler;
};
@@ -2074,7 +2090,6 @@ struct ql_adapter {
u32 port; /* Port number this adapter */
spinlock_t adapter_lock;
- spinlock_t hw_lock;
spinlock_t stats_lock;
/* PCI Bus Relative Register Addresses */
@@ -2115,6 +2130,7 @@ struct ql_adapter {
struct rx_ring rx_ring[MAX_RX_RINGS];
struct tx_ring tx_ring[MAX_TX_RINGS];
unsigned int lbq_buf_order;
+ u32 lbq_buf_size;
int rx_csum;
u32 default_rx_queue;
@@ -2235,7 +2251,6 @@ void ql_mpi_reset_work(struct work_struct *work);
void ql_mpi_core_to_log(struct work_struct *work);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
void ql_set_ethtool_ops(struct net_device *ndev);
int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
void ql_mpi_idc_work(struct work_struct *work);
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 31389ab8bdf7..83f34ca43aa4 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -7,7 +7,7 @@
/* Read a NIC register from the alternate function. */
static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
- u32 reg)
+ u32 reg)
{
u32 register_to_read;
u32 reg_val;
@@ -26,7 +26,7 @@ static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
/* Write a NIC register from the alternate function. */
static int ql_write_other_func_reg(struct ql_adapter *qdev,
- u32 reg, u32 reg_val)
+ u32 reg, u32 reg_val)
{
u32 register_to_read;
int status = 0;
@@ -41,7 +41,7 @@ static int ql_write_other_func_reg(struct ql_adapter *qdev,
}
static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
- u32 bit, u32 err_bit)
+ u32 bit, u32 err_bit)
{
u32 temp;
int count = 10;
@@ -61,22 +61,22 @@ static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
}
static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
+ u32 *data)
{
int status;
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
+ XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* set up for reg read */
- ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+ ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
+ XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
@@ -111,8 +111,8 @@ exit:
}
static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
- u32 *direct_ptr, u32 *indirect_ptr,
- unsigned int direct_valid, unsigned int indirect_valid)
+ u32 *direct_ptr, u32 *indirect_ptr,
+ bool direct_valid, bool indirect_valid)
{
unsigned int status;
@@ -133,16 +133,15 @@ static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
}
static int ql_get_serdes_regs(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump)
+ struct ql_mpi_coredump *mpi_coredump)
{
int status;
- unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
- unsigned int xaui_indirect_valid, i;
+ bool xfi_direct_valid = false, xfi_indirect_valid = false;
+ bool xaui_direct_valid = true, xaui_indirect_valid = true;
+ unsigned int i;
u32 *direct_ptr, temp;
u32 *indirect_ptr;
- xfi_direct_valid = xfi_indirect_valid = 0;
- xaui_direct_valid = xaui_indirect_valid = 1;
/* The XAUI needs to be read out per port */
status = ql_read_other_func_serdes_reg(qdev,
@@ -152,7 +151,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_indirect_valid = 0;
+ xaui_indirect_valid = false;
status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
@@ -161,7 +160,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_direct_valid = 0;
+ xaui_direct_valid = false;
/*
* XFI register is shared so only need to read one
@@ -176,18 +175,18 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
/* now see if i'm NIC 1 or NIC 2 */
if (qdev->func & 1)
/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_indirect_valid = 1;
+ xfi_indirect_valid = true;
else
- xfi_direct_valid = 1;
+ xfi_direct_valid = true;
}
if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
XG_SERDES_ADDR_XFI2_PWR_UP) {
/* now see if i'm NIC 1 or NIC 2 */
if (qdev->func & 1)
/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_direct_valid = 1;
+ xfi_direct_valid = true;
else
- xfi_indirect_valid = 1;
+ xfi_indirect_valid = true;
}
/* Get XAUI_AN register block. */
@@ -203,7 +202,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
+ xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_HSS_PCS register block. */
if (qdev->func & 1) {
@@ -220,7 +219,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
+ xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_XFI_AN register block. */
if (qdev->func & 1) {
@@ -233,7 +232,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_TRAIN register block. */
if (qdev->func & 1) {
@@ -248,7 +247,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PCS register block. */
if (qdev->func & 1) {
@@ -265,7 +264,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_TX register block. */
if (qdev->func & 1) {
@@ -280,7 +279,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_RX register block. */
if (qdev->func & 1) {
@@ -296,7 +295,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PLL register block. */
@@ -313,18 +312,18 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
return 0;
}
static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
+ u32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
@@ -333,7 +332,7 @@ static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
@@ -347,17 +346,17 @@ exit:
* skipping unused locations.
*/
static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
- unsigned int other_function)
+ unsigned int other_function)
{
int status = 0;
int i;
for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
/* We're reading 400 xgmac registers, but we filter out
- * serveral locations that are non-responsive to reads.
+ * several locations that are non-responsive to reads.
*/
if ((i == 0x00000114) ||
- (i == 0x00000118) ||
+ (i == 0x00000118) ||
(i == 0x0000013c) ||
(i == 0x00000140) ||
(i > 0x00000150 && i < 0x000001fc) ||
@@ -389,7 +388,6 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
{
- int status = 0;
int i;
for (i = 0; i < 8; i++, buf++) {
@@ -402,7 +400,7 @@ static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
*buf = ql_read32(qdev, CNA_ETS);
}
- return status;
+ return 0;
}
static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
@@ -411,7 +409,7 @@ static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
ql_write32(qdev, INTR_EN,
- qdev->intr_context[i].intr_read_mask);
+ qdev->intr_context[i].intr_read_mask);
*buf = ql_read32(qdev, INTR_EN);
}
}
@@ -427,7 +425,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
for (i = 0; i < 16; i++) {
status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_CAM_MAC, i, value);
+ MAC_ADDR_TYPE_CAM_MAC, i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
@@ -439,7 +437,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
}
for (i = 0; i < 32; i++) {
status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_MULTI_MAC, i, value);
+ MAC_ADDR_TYPE_MULTI_MAC, i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
@@ -498,7 +496,7 @@ end:
/* Read the MPI Processor core registers */
static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
- u32 offset, u32 count)
+ u32 offset, u32 count)
{
int i, status = 0;
for (i = 0; i < count; i++, buf++) {
@@ -511,7 +509,7 @@ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
/* Read the ASIC probe dump */
static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
- u32 valid, u32 *buf)
+ u32 valid, u32 *buf)
{
u32 module, mux_sel, probe, lo_val, hi_val;
@@ -546,13 +544,13 @@ static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
/* First we have to enable the probe mux */
ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
- PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
- PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
- PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
- PRB_MX_ADDR_VALID_FC_MOD, buf);
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
return 0;
}
@@ -667,7 +665,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
result_index = 0;
while ((result_index & MAC_ADDR_MR) == 0) {
result_index = ql_read32(qdev,
- MAC_ADDR_IDX);
+ MAC_ADDR_IDX);
}
result_data = ql_read32(qdev, MAC_ADDR_DATA);
*buf = result_index;
@@ -741,7 +739,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Insert the global header */
memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
+ sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
mpi_coredump->mpi_global_header.headerSize =
sizeof(struct mpi_coredump_global_header);
@@ -752,23 +750,23 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get generic NIC reg dump */
ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
+ NIC1_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
- NIC2_CONTROL_SEG_NUM,
+ NIC2_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
/* Get XGMac registers. (Segment 18, Rev C. step 21) */
ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
- NIC1_XGMAC_SEG_NUM,
+ NIC1_XGMAC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
- NIC2_XGMAC_SEG_NUM,
+ NIC2_XGMAC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
@@ -799,97 +797,97 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Rev C. Step 20a */
ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
- XAUI_AN_SEG_NUM,
+ XAUI_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xaui_an),
"XAUI AN Registers");
/* Rev C. Step 20b */
ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
- XAUI_HSS_PCS_SEG_NUM,
+ XAUI_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xaui_hss_pcs),
"XAUI HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
+ sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_an),
"XFI AN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
- XFI_TRAIN_SEG_NUM,
+ XFI_TRAIN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_train),
"XFI TRAIN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
- XFI_HSS_PCS_SEG_NUM,
+ XFI_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_pcs),
"XFI HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
- XFI_HSS_TX_SEG_NUM,
+ XFI_HSS_TX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_tx),
"XFI HSS TX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
- XFI_HSS_RX_SEG_NUM,
+ XFI_HSS_RX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_rx),
"XFI HSS RX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
- XFI_HSS_PLL_SEG_NUM,
+ XFI_HSS_PLL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_pll),
"XFI HSS PLL Registers");
ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
- XAUI2_AN_SEG_NUM,
+ XAUI2_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xaui_an),
"XAUI2 AN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
- XAUI2_HSS_PCS_SEG_NUM,
+ XAUI2_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
"XAUI2 HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
- XFI2_AN_SEG_NUM,
+ XFI2_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_an),
"XFI2 AN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
- XFI2_TRAIN_SEG_NUM,
+ XFI2_TRAIN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_train),
"XFI2 TRAIN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
- XFI2_HSS_PCS_SEG_NUM,
+ XFI2_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
"XFI2 HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
- XFI2_HSS_TX_SEG_NUM,
+ XFI2_HSS_TX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_tx),
"XFI2 HSS TX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
- XFI2_HSS_RX_SEG_NUM,
+ XFI2_HSS_RX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_rx),
"XFI2 HSS RX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
- XFI2_HSS_PLL_SEG_NUM,
+ XFI2_HSS_PLL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_pll),
"XFI2 HSS PLL Registers");
@@ -903,7 +901,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
}
ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
- CORE_SEG_NUM,
+ CORE_SEG_NUM,
sizeof(mpi_coredump->core_regs_seg_hdr) +
sizeof(mpi_coredump->mpi_core_regs) +
sizeof(mpi_coredump->mpi_core_sh_regs),
@@ -922,7 +920,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the Test Logic Registers */
ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
- TEST_LOGIC_SEG_NUM,
+ TEST_LOGIC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->test_logic_regs),
"Test Logic Regs");
@@ -933,7 +931,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the RMII Registers */
ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
- RMII_SEG_NUM,
+ RMII_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->rmii_regs),
"RMII Registers");
@@ -944,7 +942,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FCMAC1 Registers */
ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
- FCMAC1_SEG_NUM,
+ FCMAC1_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fcmac1_regs),
"FCMAC1 Registers");
@@ -956,7 +954,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FCMAC2 Registers */
ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
- FCMAC2_SEG_NUM,
+ FCMAC2_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fcmac2_regs),
"FCMAC2 Registers");
@@ -968,7 +966,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FC1 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
- FC1_MBOX_SEG_NUM,
+ FC1_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fc1_mbx_regs),
"FC1 MBox Regs");
@@ -979,7 +977,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the IDE Registers */
ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
- IDE_SEG_NUM,
+ IDE_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ide_regs),
"IDE Registers");
@@ -990,7 +988,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the NIC1 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
- NIC1_MBOX_SEG_NUM,
+ NIC1_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic1_mbx_regs),
"NIC1 MBox Regs");
@@ -1001,7 +999,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the SMBus Registers */
ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
- SMBUS_SEG_NUM,
+ SMBUS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->smbus_regs),
"SMBus Registers");
@@ -1012,7 +1010,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FC2 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
- FC2_MBOX_SEG_NUM,
+ FC2_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fc2_mbx_regs),
"FC2 MBox Regs");
@@ -1023,7 +1021,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the NIC2 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
- NIC2_MBOX_SEG_NUM,
+ NIC2_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic2_mbx_regs),
"NIC2 MBox Regs");
@@ -1034,7 +1032,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the I2C Registers */
ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
- I2C_SEG_NUM,
+ I2C_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->i2c_regs),
"I2C Registers");
@@ -1045,7 +1043,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the MEMC Registers */
ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
- MEMC_SEG_NUM,
+ MEMC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->memc_regs),
"MEMC Registers");
@@ -1056,7 +1054,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the PBus Registers */
ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
- PBUS_SEG_NUM,
+ PBUS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->pbus_regs),
"PBUS Registers");
@@ -1067,7 +1065,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the MDE Registers */
ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
- MDE_SEG_NUM,
+ MDE_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->mde_regs),
"MDE Registers");
@@ -1077,7 +1075,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
+ MISC_NIC_INFO_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->misc_nic_info),
"MISC NIC INFO");
@@ -1089,14 +1087,14 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Segment 31 */
/* Get indexed register values. */
ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
+ INTR_STATES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->intr_states),
"INTR States");
ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
+ CAM_ENTRIES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->cam_entries),
"CAM Entries");
@@ -1105,18 +1103,18 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
+ ROUTING_WORDS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ &mpi_coredump->nic_routing_words[0]);
if (status)
goto err;
/* Segment 34 (Rev C. step 23) */
ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
+ ETS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ets),
"ETS Registers");
@@ -1125,24 +1123,24 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
- PROBE_DUMP_SEG_NUM,
+ PROBE_DUMP_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->probe_dump),
"Probe Dump");
ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
- ROUTING_INDEX_SEG_NUM,
+ ROUTING_INDEX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->routing_regs),
"Routing Regs");
status = ql_get_routing_index_registers(qdev,
- &mpi_coredump->routing_regs[0]);
+ &mpi_coredump->routing_regs[0]);
if (status)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
- MAC_PROTOCOL_SEG_NUM,
+ MAC_PROTOCOL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->mac_prot_regs),
"MAC Prot Regs");
@@ -1150,7 +1148,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the semaphore registers for all 5 functions */
ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
- SEM_REGS_SEG_NUM,
+ SEM_REGS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->sem_regs), "Sem Registers");
@@ -1176,12 +1174,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
}
ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
- WCS_RAM_SEG_NUM,
+ WCS_RAM_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->code_ram),
"WCS RAM");
status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
- CODE_RAM_ADDR, CODE_RAM_CNT);
+ CODE_RAM_ADDR, CODE_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of CODE RAM. Status = 0x%.08x\n",
@@ -1191,12 +1189,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Insert the segment header */
ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
- MEMC_RAM_SEG_NUM,
+ MEMC_RAM_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->memc_ram),
"MEMC RAM");
status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
- MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of MEMC RAM. Status = 0x%.08x\n",
@@ -1231,7 +1229,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
+ sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
mpi_coredump->mpi_global_header.headerSize =
sizeof(struct mpi_coredump_global_header);
@@ -1243,7 +1241,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
/* segment 16 */
ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
+ MISC_NIC_INFO_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->misc_nic_info),
"MISC NIC INFO");
@@ -1254,7 +1252,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
/* Segment 16, Rev C. Step 18 */
ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
+ NIC1_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_regs),
"NIC Registers");
@@ -1265,14 +1263,14 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
/* Segment 31 */
/* Get indexed register values. */
ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
+ INTR_STATES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->intr_states),
"INTR States");
ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
+ CAM_ENTRIES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->cam_entries),
"CAM Entries");
@@ -1281,18 +1279,18 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
return;
ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
+ ROUTING_WORDS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ &mpi_coredump->nic_routing_words[0]);
if (status)
return;
/* Segment 34 (Rev C. step 23) */
ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
+ ETS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ets),
"ETS Registers");
@@ -1630,6 +1628,7 @@ void ql_dump_qdev(struct ql_adapter *qdev)
DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
+ DUMP_QDEV_FIELD(qdev, "%u", lbq_buf_size);
}
#endif
@@ -1650,7 +1649,7 @@ void ql_dump_wqicb(struct wqicb *wqicb)
void ql_dump_tx_ring(struct tx_ring *tx_ring)
{
- if (tx_ring == NULL)
+ if (!tx_ring)
return;
pr_err("===================== Dumping tx_ring %d ===============\n",
tx_ring->wq_id);
@@ -1730,16 +1729,24 @@ void ql_dump_cqicb(struct cqicb *cqicb)
le16_to_cpu(cqicb->sbq_len));
}
+static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+
+ if (rx_ring->cq_id < qdev->rss_ring_count)
+ return "RX COMPLETION";
+ else
+ return "TX COMPLETION";
+};
+
void ql_dump_rx_ring(struct rx_ring *rx_ring)
{
- if (rx_ring == NULL)
+ if (!rx_ring)
return;
pr_err("===================== Dumping rx_ring %d ===============\n",
rx_ring->cq_id);
- pr_err("Dumping rx_ring %d, type = %s%s%s\n",
- rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
- rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
- rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
+ pr_err("Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
+ qlge_rx_ring_type_name(rx_ring));
pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
pr_err("rx_ring->cq_base_dma = %llx\n",
@@ -1758,41 +1765,33 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
- pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
- pr_err("rx_ring->lbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_dma);
- pr_err("rx_ring->lbq_base_indirect = %p\n",
- rx_ring->lbq_base_indirect);
- pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_indirect_dma);
- pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
- pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
- pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
- pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
- rx_ring->lbq_prod_idx_db_reg);
- pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
- pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
+ pr_err("rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
+ pr_err("rx_ring->lbq.base_dma = %llx\n",
+ (unsigned long long)rx_ring->lbq.base_dma);
+ pr_err("rx_ring->lbq.base_indirect = %p\n",
+ rx_ring->lbq.base_indirect);
+ pr_err("rx_ring->lbq.base_indirect_dma = %llx\n",
+ (unsigned long long)rx_ring->lbq.base_indirect_dma);
+ pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue);
+ pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n",
+ rx_ring->lbq.prod_idx_db_reg);
+ pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
+ pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
- pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
-
- pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
- pr_err("rx_ring->sbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_dma);
- pr_err("rx_ring->sbq_base_indirect = %p\n",
- rx_ring->sbq_base_indirect);
- pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_indirect_dma);
- pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
- pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
- pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
- pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
- rx_ring->sbq_prod_idx_db_reg);
- pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
- pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
- pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
- pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
- pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
+
+ pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
+ pr_err("rx_ring->sbq.base_dma = %llx\n",
+ (unsigned long long)rx_ring->sbq.base_dma);
+ pr_err("rx_ring->sbq.base_indirect = %p\n",
+ rx_ring->sbq.base_indirect);
+ pr_err("rx_ring->sbq.base_indirect_dma = %llx\n",
+ (unsigned long long)rx_ring->sbq.base_indirect_dma);
+ pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue);
+ pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n",
+ rx_ring->sbq.prod_idx_db_reg);
+ pr_err("rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
+ pr_err("rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
pr_err("rx_ring->irq = %d\n", rx_ring->irq);
pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
@@ -1806,7 +1805,7 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
pr_err("%s: Enter\n", __func__);
ptr = kmalloc(size, GFP_ATOMIC);
- if (ptr == NULL)
+ if (!ptr)
return;
if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
@@ -1992,7 +1991,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
le16_to_cpu(ib_mac_rsp->vlan_id));
pr_err("flags4 = %s%s%s\n",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 6cae33072496..6ad4515311f7 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -167,9 +167,9 @@ void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
{
u32 temp;
- int count = UDELAY_COUNT;
+ int count;
- while (count) {
+ for (count = 0; count < UDELAY_COUNT; count++) {
temp = ql_read32(qdev, reg);
/* check for errors */
@@ -181,7 +181,6 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
} else if (temp & bit)
return 0;
udelay(UDELAY_DELAY);
- count--;
}
netif_alert(qdev, probe, qdev->ndev,
"Timed out waiting for reg %x to come ready.\n", reg);
@@ -193,17 +192,16 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
*/
static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
{
- int count = UDELAY_COUNT;
+ int count;
u32 temp;
- while (count) {
+ for (count = 0; count < UDELAY_COUNT; count++) {
temp = ql_read32(qdev, CFG);
if (temp & CFG_LE)
return -EIO;
if (!(temp & bit))
return 0;
udelay(UDELAY_DELAY);
- count--;
}
return -ETIMEDOUT;
}
@@ -625,75 +623,26 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
}
-/* If we're running with multiple MSI-X vectors then we enable on the fly.
- * Otherwise, we may have multiple outstanding workers and don't want to
- * enable until the last one finishes. In this case, the irq_cnt gets
- * incremented every time we queue a worker and decremented every time
- * a worker finishes. Once it hits zero we enable the interrupt.
- */
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
- u32 var = 0;
- unsigned long hw_flags = 0;
- struct intr_context *ctx = qdev->intr_context + intr;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
- /* Always enable if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- return var;
- }
+ struct intr_context *ctx = &qdev->intr_context[intr];
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- if (atomic_dec_and_test(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- }
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
- return var;
+ ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
}
-static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
- u32 var = 0;
- struct intr_context *ctx;
-
- /* HW disables for us if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
- return 0;
+ struct intr_context *ctx = &qdev->intr_context[intr];
- ctx = qdev->intr_context + intr;
- spin_lock(&qdev->hw_lock);
- if (!atomic_read(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_dis_mask);
- var = ql_read32(qdev, STS);
- }
- atomic_inc(&ctx->irq_cnt);
- spin_unlock(&qdev->hw_lock);
- return var;
+ ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
}
static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
{
int i;
- for (i = 0; i < qdev->intr_count; i++) {
- /* The enable call does a atomic_dec_and_test
- * and enables only if the result is zero.
- * So we precharge it here.
- */
- if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
- i == 0))
- atomic_set(&qdev->intr_context[i].irq_cnt, 1);
- ql_enable_completion_interrupt(qdev, i);
- }
+ for (i = 0; i < qdev->intr_count; i++)
+ ql_enable_completion_interrupt(qdev, i);
}
static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
@@ -1027,48 +976,32 @@ static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
return PAGE_SIZE << qdev->lbq_buf_order;
}
-/* Get the next large buffer. */
-static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
{
- struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
- rx_ring->lbq_curr_idx++;
- if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_free_cnt++;
- return lbq_desc;
+ struct qlge_bq_desc *bq_desc;
+
+ bq_desc = &bq->queue[bq->next_to_clean];
+ bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
+
+ return bq_desc;
}
-static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
{
- struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
+ struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(lbq_desc, mapaddr),
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
+ qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
- /* If it's the last chunk of our master page then
- * we unmap it.
- */
- if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
- == ql_lbq_block_size(qdev))
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- return lbq_desc;
-}
+ if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
+ ql_lbq_block_size(qdev)) {
+ /* last chunk of the master page */
+ pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ }
-/* Get the next small buffer. */
-static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
-{
- struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
- rx_ring->sbq_curr_idx++;
- if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_free_cnt++;
- return sbq_desc;
+ return lbq_desc;
}
/* Update an rx ring index. */
@@ -1087,178 +1020,192 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
}
-static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
- struct bq_desc *lbq_desc)
+static const char * const bq_type_name[] = {
+ [QLGE_SB] = "sbq",
+ [QLGE_LB] = "lbq",
+};
+
+/* return 0 or negative error */
+static int qlge_refill_sb(struct rx_ring *rx_ring,
+ struct qlge_bq_desc *sbq_desc, gfp_t gfp)
{
- if (!rx_ring->pg_chunk.page) {
- u64 map;
- rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
- qdev->lbq_buf_order);
- if (unlikely(!rx_ring->pg_chunk.page)) {
- netif_err(qdev, drv, qdev->ndev,
- "page allocation failed.\n");
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct sk_buff *skb;
+
+ if (sbq_desc->p.skb)
+ return 0;
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "ring %u sbq: getting new skb for index %d.\n",
+ rx_ring->cq_id, sbq_desc->index);
+
+ skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, QLGE_SB_PAD);
+
+ sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
+ SMALL_BUF_MAP_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
+ netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+ *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
+
+ sbq_desc->p.skb = skb;
+ return 0;
+}
+
+/* return 0 or negative error */
+static int qlge_refill_lb(struct rx_ring *rx_ring,
+ struct qlge_bq_desc *lbq_desc, gfp_t gfp)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
+
+ if (!master_chunk->page) {
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
+ if (unlikely(!page))
return -ENOMEM;
- }
- rx_ring->pg_chunk.offset = 0;
- map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
- 0, ql_lbq_block_size(qdev),
+ dma_addr = pci_map_page(qdev->pdev, page, 0,
+ ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- __free_pages(rx_ring->pg_chunk.page,
- qdev->lbq_buf_order);
- rx_ring->pg_chunk.page = NULL;
+ if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
+ __free_pages(page, qdev->lbq_buf_order);
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
- return -ENOMEM;
+ return -EIO;
}
- rx_ring->pg_chunk.map = map;
- rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
+ master_chunk->page = page;
+ master_chunk->va = page_address(page);
+ master_chunk->offset = 0;
+ rx_ring->chunk_dma_addr = dma_addr;
}
- /* Copy the current master pg_chunk info
- * to the current descriptor.
- */
- lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
+ lbq_desc->p.pg_chunk = *master_chunk;
+ lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
+ *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
+ lbq_desc->p.pg_chunk.offset);
/* Adjust the master page chunk for next
* buffer get.
*/
- rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
- if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
- rx_ring->pg_chunk.page = NULL;
- lbq_desc->p.pg_chunk.last_flag = 1;
+ master_chunk->offset += qdev->lbq_buf_size;
+ if (master_chunk->offset == ql_lbq_block_size(qdev)) {
+ master_chunk->page = NULL;
} else {
- rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
- get_page(rx_ring->pg_chunk.page);
- lbq_desc->p.pg_chunk.last_flag = 0;
+ master_chunk->va += qdev->lbq_buf_size;
+ get_page(master_chunk->page);
}
+
return 0;
}
-/* Process (refill) a large buffer queue. */
-static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+
+/* return 0 or negative error */
+static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
{
- u32 clean_idx = rx_ring->lbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *lbq_desc;
- u64 map;
+ struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_bq_desc *bq_desc;
+ int refill_count;
+ int retval;
int i;
- while (rx_ring->lbq_free_cnt > 32) {
- for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- lbq_desc = &rx_ring->lbq[clean_idx];
- if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- rx_ring->lbq_clean_idx = clean_idx;
- netif_err(qdev, ifup, qdev->ndev,
- "Could not get a page chunk, i=%d, clean_idx =%d .\n",
- i, clean_idx);
- return;
- }
+ refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
+ bq->next_to_use);
+ if (!refill_count)
+ return 0;
+
+ i = bq->next_to_use;
+ bq_desc = &bq->queue[i];
+ i -= QLGE_BQ_LEN;
+ do {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "ring %u %s: try cleaning idx %d\n",
+ rx_ring->cq_id, bq_type_name[bq->type], i);
- map = lbq_desc->p.pg_chunk.map +
- lbq_desc->p.pg_chunk.offset;
- dma_unmap_addr_set(lbq_desc, mapaddr, map);
- dma_unmap_len_set(lbq_desc, maplen,
- rx_ring->lbq_buf_size);
- *lbq_desc->addr = cpu_to_le64(map);
-
- pci_dma_sync_single_for_device(qdev->pdev, map,
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
- clean_idx++;
- if (clean_idx == rx_ring->lbq_len)
- clean_idx = 0;
+ if (bq->type == QLGE_SB)
+ retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
+ else
+ retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
+ if (retval < 0) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "ring %u %s: Could not get a page chunk, idx %d\n",
+ rx_ring->cq_id, bq_type_name[bq->type], i);
+ break;
}
- rx_ring->lbq_clean_idx = clean_idx;
- rx_ring->lbq_prod_idx += 16;
- if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_free_cnt -= 16;
- }
+ bq_desc++;
+ i++;
+ if (unlikely(!i)) {
+ bq_desc = &bq->queue[0];
+ i -= QLGE_BQ_LEN;
+ }
+ refill_count--;
+ } while (refill_count);
+ i += QLGE_BQ_LEN;
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
- ql_write_db_reg(rx_ring->lbq_prod_idx,
- rx_ring->lbq_prod_idx_db_reg);
+ if (bq->next_to_use != i) {
+ if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "ring %u %s: updating prod idx = %d.\n",
+ rx_ring->cq_id, bq_type_name[bq->type],
+ i);
+ ql_write_db_reg(i, bq->prod_idx_db_reg);
+ }
+ bq->next_to_use = i;
}
+
+ return retval;
}
-/* Process (refill) a small buffer queue. */
-static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
+ unsigned long delay)
{
- u32 clean_idx = rx_ring->sbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *sbq_desc;
- u64 map;
- int i;
-
- while (rx_ring->sbq_free_cnt > 16) {
- for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
- sbq_desc = &rx_ring->sbq[clean_idx];
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- if (sbq_desc->p.skb == NULL) {
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
- sbq_desc->p.skb =
- netdev_alloc_skb(qdev->ndev,
- SMALL_BUFFER_SIZE);
- if (sbq_desc->p.skb == NULL) {
- rx_ring->sbq_clean_idx = clean_idx;
- return;
- }
- skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
- map = pci_map_single(qdev->pdev,
- sbq_desc->p.skb->data,
- rx_ring->sbq_buf_size,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- netif_err(qdev, ifup, qdev->ndev,
- "PCI mapping failed.\n");
- rx_ring->sbq_clean_idx = clean_idx;
- dev_kfree_skb_any(sbq_desc->p.skb);
- sbq_desc->p.skb = NULL;
- return;
- }
- dma_unmap_addr_set(sbq_desc, mapaddr, map);
- dma_unmap_len_set(sbq_desc, maplen,
- rx_ring->sbq_buf_size);
- *sbq_desc->addr = cpu_to_le64(map);
- }
+ bool sbq_fail, lbq_fail;
- clean_idx++;
- if (clean_idx == rx_ring->sbq_len)
- clean_idx = 0;
- }
- rx_ring->sbq_clean_idx = clean_idx;
- rx_ring->sbq_prod_idx += 16;
- if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_free_cnt -= 16;
- }
+ sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
+ lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
- ql_write_db_reg(rx_ring->sbq_prod_idx,
- rx_ring->sbq_prod_idx_db_reg);
- }
+ /* Minimum number of buffers needed to be able to receive at least one
+ * frame of any format:
+ * sbq: 1 for header + 1 for data
+ * lbq: mtu 9000 / lb size
+ * Below this, the queue might stall.
+ */
+ if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
+ (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
+ DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
+ /* Allocations can take a long time in certain cases (ex.
+ * reclaim). Therefore, use a workqueue for long-running
+ * work items.
+ */
+ queue_delayed_work_on(smp_processor_id(), system_long_wq,
+ &rx_ring->refill_work, delay);
}
-static void ql_update_buffer_queues(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static void qlge_slow_refill(struct work_struct *work)
{
- ql_update_sbq(qdev, rx_ring);
- ql_update_lbq(qdev, rx_ring);
+ struct rx_ring *rx_ring = container_of(work, struct rx_ring,
+ refill_work.work);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ napi_disable(napi);
+ ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
+ napi_enable(napi);
+
+ local_bh_disable();
+ /* napi_disable() might have prevented incomplete napi work from being
+ * rescheduled.
+ */
+ napi_schedule(napi);
+ /* trigger softirq processing */
+ local_bh_enable();
}
/* Unmaps tx buffers. Can be called from send() if a pci mapping
@@ -1495,7 +1442,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
u16 vlan_id)
{
struct sk_buff *skb;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
/* Frame error, so drop the packet. */
@@ -1544,7 +1491,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
void *addr;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
size_t hlen = ETH_HLEN;
@@ -1634,31 +1581,24 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
u32 length,
u16 vlan_id)
{
+ struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- struct sk_buff *new_skb = NULL;
- struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+ struct sk_buff *skb, *new_skb;
skb = sbq_desc->p.skb;
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
- if (new_skb == NULL) {
+ if (!new_skb) {
rx_ring->rx_dropped++;
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
skb_put_data(new_skb, skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
skb = new_skb;
/* Frame error, so drop the packet. */
@@ -1759,11 +1699,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
- struct bq_desc *lbq_desc;
- struct bq_desc *sbq_desc;
- struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ struct qlge_bq_desc *lbq_desc, *sbq_desc;
+ struct sk_buff *skb = NULL;
size_t hlen = ETH_HLEN;
/*
@@ -1776,11 +1715,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
/*
* Headers fit nicely into a small buffer.
*/
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
@@ -1808,35 +1745,22 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* from the "data" small buffer to the "header" small
* buffer.
*/
- sbq_desc = ql_get_curr_sbuf(rx_ring);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr
- (sbq_desc, mapaddr),
- dma_unmap_len
- (sbq_desc, maplen),
+ sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE);
skb_put_data(skb, sbq_desc->p.skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr
- (sbq_desc,
- mapaddr),
- dma_unmap_len
- (sbq_desc,
- maplen),
- PCI_DMA_FROMDEVICE);
} else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes in a single small buffer.\n",
length);
- sbq_desc = ql_get_curr_sbuf(rx_ring);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc,
- mapaddr),
- dma_unmap_len(sbq_desc,
- maplen),
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL;
}
@@ -1868,15 +1792,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
- if (skb == NULL) {
+ if (!skb) {
netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
"No skb available, drop the packet.\n");
return NULL;
}
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(lbq_desc,
- mapaddr),
- dma_unmap_len(lbq_desc, maplen),
+ pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ qdev->lbq_buf_size,
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1907,11 +1829,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* eventually be in trouble.
*/
int size, i = 0;
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
* This is an non TCP/UDP IP frame, so
@@ -1931,8 +1851,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
do {
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- size = (length < rx_ring->lbq_buf_size) ? length :
- rx_ring->lbq_buf_size;
+ size = min(length, qdev->lbq_buf_size);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Adding page %d to skb for %d bytes.\n",
@@ -2286,7 +2205,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
if (count == budget)
break;
}
- ql_update_buffer_queues(qdev, rx_ring);
+ ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
ql_write_cq_idx(rx_ring);
return count;
}
@@ -2500,21 +2419,22 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
u32 var;
int work_done = 0;
- spin_lock(&qdev->hw_lock);
- if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "Shared Interrupt, Not ours!\n");
- spin_unlock(&qdev->hw_lock);
- return IRQ_NONE;
- }
- spin_unlock(&qdev->hw_lock);
+ /* Experience shows that when using INTx interrupts, interrupts must
+ * be masked manually.
+ * When using MSI mode, INTR_EN_EN must be explicitly disabled
+ * (even though it is auto-masked), otherwise a later command to
+ * enable it is not effective.
+ */
+ if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
+ ql_disable_completion_interrupt(qdev, 0);
- var = ql_disable_completion_interrupt(qdev, intr_context->intr);
+ var = ql_read32(qdev, STS);
/*
* Check for fatal error.
*/
if (var & STS_FE) {
+ ql_disable_completion_interrupt(qdev, 0);
ql_queue_asic_error(qdev);
netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
@@ -2534,7 +2454,6 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
netif_err(qdev, intr, qdev->ndev,
"Got MPI processor interrupt.\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
qdev->workqueue, &qdev->mpi_work, 0);
@@ -2550,11 +2469,18 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
if (var & intr_context->irq_mask) {
netif_info(qdev, intr, qdev->ndev,
"Waking handler for rx_ring[0].\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
napi_schedule(&rx_ring->napi);
work_done++;
+ } else {
+ /* Experience shows that the device sometimes signals an
+ * interrupt but no work is scheduled from this function.
+ * Nevertheless, the interrupt is auto-masked. Therefore, we
+ * systematically re-enable the interrupt if we didn't
+ * schedule napi.
+ */
+ ql_enable_completion_interrupt(qdev, 0);
}
- ql_enable_completion_interrupt(qdev, intr_context->intr);
+
return work_done ? IRQ_HANDLED : IRQ_NONE;
}
@@ -2737,7 +2663,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
qdev->rx_ring_shadow_reg_area =
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->rx_ring_shadow_reg_dma);
- if (qdev->rx_ring_shadow_reg_area == NULL) {
+ if (!qdev->rx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
return -ENOMEM;
@@ -2746,7 +2672,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
qdev->tx_ring_shadow_reg_area =
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
- if (qdev->tx_ring_shadow_reg_area == NULL) {
+ if (!qdev->tx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
@@ -2798,14 +2724,14 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
&tx_ring->wq_base_dma);
- if ((tx_ring->wq_base == NULL) ||
+ if (!tx_ring->wq_base ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
goto pci_alloc_err;
tx_ring->q =
kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
GFP_KERNEL);
- if (tx_ring->q == NULL)
+ if (!tx_ring->q)
goto err;
return 0;
@@ -2820,54 +2746,46 @@ pci_alloc_err:
static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
- struct bq_desc *lbq_desc;
+ struct qlge_bq *lbq = &rx_ring->lbq;
+ unsigned int last_offset;
- uint32_t curr_idx, clean_idx;
+ last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
+ while (lbq->next_to_clean != lbq->next_to_use) {
+ struct qlge_bq_desc *lbq_desc =
+ &lbq->queue[lbq->next_to_clean];
- curr_idx = rx_ring->lbq_curr_idx;
- clean_idx = rx_ring->lbq_clean_idx;
- while (curr_idx != clean_idx) {
- lbq_desc = &rx_ring->lbq[curr_idx];
-
- if (lbq_desc->p.pg_chunk.last_flag) {
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
+ if (lbq_desc->p.pg_chunk.offset == last_offset)
+ pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
- lbq_desc->p.pg_chunk.last_flag = 0;
- }
-
put_page(lbq_desc->p.pg_chunk.page);
- lbq_desc->p.pg_chunk.page = NULL;
-
- if (++curr_idx == rx_ring->lbq_len)
- curr_idx = 0;
+ lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
}
- if (rx_ring->pg_chunk.page) {
- pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
- put_page(rx_ring->pg_chunk.page);
- rx_ring->pg_chunk.page = NULL;
+
+ if (rx_ring->master_chunk.page) {
+ pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
+ ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ put_page(rx_ring->master_chunk.page);
+ rx_ring->master_chunk.page = NULL;
}
}
static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
int i;
- struct bq_desc *sbq_desc;
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- if (sbq_desc == NULL) {
+ for (i = 0; i < QLGE_BQ_LEN; i++) {
+ struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
+
+ if (!sbq_desc) {
netif_err(qdev, ifup, qdev->ndev,
"sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -2881,89 +2799,83 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
static void ql_free_rx_buffers(struct ql_adapter *qdev)
{
int i;
- struct rx_ring *rx_ring;
for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->lbq)
+ struct rx_ring *rx_ring = &qdev->rx_ring[i];
+
+ if (rx_ring->lbq.queue)
ql_free_lbq_buffers(qdev, rx_ring);
- if (rx_ring->sbq)
+ if (rx_ring->sbq.queue)
ql_free_sbq_buffers(qdev, rx_ring);
}
}
static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
{
- struct rx_ring *rx_ring;
int i;
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->type != TX_Q)
- ql_update_buffer_queues(qdev, rx_ring);
- }
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
+ HZ / 2);
}
-static void ql_init_lbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static int qlge_init_bq(struct qlge_bq *bq)
{
+ struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_bq_desc *bq_desc;
+ __le64 *buf_ptr;
int i;
- struct bq_desc *lbq_desc;
- __le64 *bq = rx_ring->lbq_base;
- memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->lbq_len; i++) {
- lbq_desc = &rx_ring->lbq[i];
- memset(lbq_desc, 0, sizeof(*lbq_desc));
- lbq_desc->index = i;
- lbq_desc->addr = bq;
- bq++;
+ bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ &bq->base_dma);
+ if (!bq->base) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "ring %u %s allocation failed.\n", rx_ring->cq_id,
+ bq_type_name[bq->type]);
+ return -ENOMEM;
}
-}
-static void ql_init_sbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *sbq_desc;
- __le64 *bq = rx_ring->sbq_base;
+ bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
+ GFP_KERNEL);
+ if (!bq->queue)
+ return -ENOMEM;
- memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- memset(sbq_desc, 0, sizeof(*sbq_desc));
- sbq_desc->index = i;
- sbq_desc->addr = bq;
- bq++;
+ buf_ptr = bq->base;
+ bq_desc = &bq->queue[0];
+ for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
+ bq_desc->p.skb = NULL;
+ bq_desc->index = i;
+ bq_desc->buf_ptr = buf_ptr;
}
+
+ return 0;
}
static void ql_free_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
/* Free the small buffer queue. */
- if (rx_ring->sbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->sbq_size,
- rx_ring->sbq_base, rx_ring->sbq_base_dma);
- rx_ring->sbq_base = NULL;
+ if (rx_ring->sbq.base) {
+ pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ rx_ring->sbq.base, rx_ring->sbq.base_dma);
+ rx_ring->sbq.base = NULL;
}
/* Free the small buffer queue control blocks. */
- kfree(rx_ring->sbq);
- rx_ring->sbq = NULL;
+ kfree(rx_ring->sbq.queue);
+ rx_ring->sbq.queue = NULL;
/* Free the large buffer queue. */
- if (rx_ring->lbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->lbq_size,
- rx_ring->lbq_base, rx_ring->lbq_base_dma);
- rx_ring->lbq_base = NULL;
+ if (rx_ring->lbq.base) {
+ pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ rx_ring->lbq.base, rx_ring->lbq.base_dma);
+ rx_ring->lbq.base = NULL;
}
/* Free the large buffer queue control blocks. */
- kfree(rx_ring->lbq);
- rx_ring->lbq = NULL;
+ kfree(rx_ring->lbq.queue);
+ rx_ring->lbq.queue = NULL;
/* Free the rx queue. */
if (rx_ring->cq_base) {
@@ -2987,67 +2899,18 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
&rx_ring->cq_base_dma);
- if (rx_ring->cq_base == NULL) {
+ if (!rx_ring->cq_base) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
- if (rx_ring->sbq_len) {
- /*
- * Allocate small buffer queue.
- */
- rx_ring->sbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
- &rx_ring->sbq_base_dma);
-
- if (rx_ring->sbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue allocation failed.\n");
- goto err_mem;
- }
-
- /*
- * Allocate small buffer queue control blocks.
- */
- rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL)
- goto err_mem;
-
- ql_init_sbq_ring(qdev, rx_ring);
- }
-
- if (rx_ring->lbq_len) {
- /*
- * Allocate large buffer queue.
- */
- rx_ring->lbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
- &rx_ring->lbq_base_dma);
-
- if (rx_ring->lbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue allocation failed.\n");
- goto err_mem;
- }
- /*
- * Allocate large buffer queue control blocks.
- */
- rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL)
- goto err_mem;
-
- ql_init_lbq_ring(qdev, rx_ring);
+ if (rx_ring->cq_id < qdev->rss_ring_count &&
+ (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
+ ql_free_rx_resources(qdev, rx_ring);
+ return -ENOMEM;
}
return 0;
-
-err_mem:
- ql_free_rx_resources(qdev, rx_ring);
- return -ENOMEM;
}
static void ql_tx_ring_clean(struct ql_adapter *qdev)
@@ -3133,7 +2996,6 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
- u16 bq_len;
u64 tmp;
__le64 *base_indirect_ptr;
int page_entries;
@@ -3144,12 +3006,12 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
*rx_ring->prod_idx_sh_reg = 0;
shadow_reg += sizeof(u64);
shadow_reg_dma += sizeof(u64);
- rx_ring->lbq_base_indirect = shadow_reg;
- rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
- shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- rx_ring->sbq_base_indirect = shadow_reg;
- rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
+ rx_ring->lbq.base_indirect = shadow_reg;
+ rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
+ shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ rx_ring->sbq.base_indirect = shadow_reg;
+ rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
/* PCI doorbell mem area + 0x00 for consumer index register */
rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
@@ -3160,16 +3022,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->valid_db_reg = doorbell_area + 0x04;
/* PCI doorbell mem area + 0x18 for large buffer consumer */
- rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
+ rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
/* PCI doorbell mem area + 0x1c */
- rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
+ rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
memset((void *)cqicb, 0, sizeof(struct cqicb));
cqicb->msix_vect = rx_ring->irq;
- bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
- cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
+ cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
+ LEN_CPP_CONT);
cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
@@ -3181,59 +3043,42 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->flags = FLAGS_LC | /* Load queue base address */
FLAGS_LV | /* Load MSI-X vector */
FLAGS_LI; /* Load irq delay values */
- if (rx_ring->lbq_len) {
+ if (rx_ring->cq_id < qdev->rss_ring_count) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
- tmp = (u64)rx_ring->lbq_base_dma;
- base_indirect_ptr = rx_ring->lbq_base_indirect;
+ tmp = (u64)rx_ring->lbq.base_dma;
+ base_indirect_ptr = rx_ring->lbq.base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- cqicb->lbq_addr =
- cpu_to_le64(rx_ring->lbq_base_indirect_dma);
- bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
- (u16) rx_ring->lbq_buf_size;
- cqicb->lbq_buf_size = cpu_to_le16(bq_len);
- bq_len = (rx_ring->lbq_len == 65536) ? 0 :
- (u16) rx_ring->lbq_len;
- cqicb->lbq_len = cpu_to_le16(bq_len);
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_clean_idx = 0;
- rx_ring->lbq_free_cnt = rx_ring->lbq_len;
- }
- if (rx_ring->sbq_len) {
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
+ cqicb->lbq_buf_size =
+ cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
+ cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
+ rx_ring->lbq.next_to_use = 0;
+ rx_ring->lbq.next_to_clean = 0;
+
cqicb->flags |= FLAGS_LS; /* Load sbq values */
- tmp = (u64)rx_ring->sbq_base_dma;
- base_indirect_ptr = rx_ring->sbq_base_indirect;
+ tmp = (u64)rx_ring->sbq.base_dma;
+ base_indirect_ptr = rx_ring->sbq.base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
cqicb->sbq_addr =
- cpu_to_le64(rx_ring->sbq_base_indirect_dma);
- cqicb->sbq_buf_size =
- cpu_to_le16((u16)(rx_ring->sbq_buf_size));
- bq_len = (rx_ring->sbq_len == 65536) ? 0 :
- (u16) rx_ring->sbq_len;
- cqicb->sbq_len = cpu_to_le16(bq_len);
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_clean_idx = 0;
- rx_ring->sbq_free_cnt = rx_ring->sbq_len;
- }
- switch (rx_ring->type) {
- case TX_Q:
- cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
- cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
- break;
- case RX_Q:
+ cpu_to_le64(rx_ring->sbq.base_indirect_dma);
+ cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
+ cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
+ rx_ring->sbq.next_to_use = 0;
+ rx_ring->sbq.next_to_clean = 0;
+ }
+ if (rx_ring->cq_id < qdev->rss_ring_count) {
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
@@ -3241,10 +3086,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
64);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
- break;
- default:
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Invalid rx_ring->type = %d.\n", rx_ring->type);
+ } else {
+ cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+ cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
}
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
@@ -3366,6 +3210,7 @@ msi:
}
}
qlge_irq_type = LEG_IRQ;
+ set_bit(QL_LEGACY_ENABLED, &qdev->flags);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Running with legacy interrupts.\n");
}
@@ -3509,6 +3354,16 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
intr_context->intr_dis_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_DISABLE;
+ if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
+ /* Experience shows that when using INTx interrupts,
+ * the device does not always auto-mask INTR_EN_EN.
+ * Moreover, masking INTR_EN_EN manually does not
+ * immediately prevent interrupt generation.
+ */
+ intr_context->intr_en_mask |= INTR_EN_EI << 16 |
+ INTR_EN_EI;
+ intr_context->intr_dis_mask |= INTR_EN_EI << 16;
+ }
intr_context->intr_read_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
/*
@@ -3557,7 +3412,6 @@ static int ql_request_irq(struct ql_adapter *qdev)
ql_resolve_queues_to_irqs(qdev);
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- atomic_set(&intr_context->irq_cnt, 0);
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
status = request_irq(qdev->msi_x_entry[i].vector,
intr_context->handler,
@@ -3591,12 +3445,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
goto err_irq;
netif_err(qdev, ifup, qdev->ndev,
- "Hooked intr %d, queue type %s, with name %s.\n",
- i,
- qdev->rx_ring[0].type == DEFAULT_Q ?
- "DEFAULT_Q" :
- qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
- qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+ "Hooked intr 0, queue type RX_Q, with name %s.\n",
intr_context->name);
}
intr_context->hooked = 1;
@@ -4072,6 +3921,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
static int qlge_close(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ int i;
/* If we hit pci_channel_io_perm_failure
* failure condition, then we already
@@ -4089,21 +3939,31 @@ static int qlge_close(struct net_device *ndev)
*/
while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
msleep(1);
+
+ /* Make sure refill_work doesn't re-enable napi */
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
+
ql_adapter_down(qdev);
ql_release_adapter_resources(qdev);
return 0;
}
+static void qlge_set_lb_size(struct ql_adapter *qdev)
+{
+ if (qdev->ndev->mtu <= 1500)
+ qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
+ else
+ qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
+ qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
+}
+
static int ql_configure_rings(struct ql_adapter *qdev)
{
int i;
struct rx_ring *rx_ring;
struct tx_ring *tx_ring;
int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
- unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
-
- qdev->lbq_buf_order = get_order(lbq_buf_len);
/* In a perfect world we have one RSS ring for each CPU
* and each has it's own vector. To do that we ask for
@@ -4148,15 +4008,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->cq_len = qdev->rx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = NUM_LARGE_BUFFERS;
- rx_ring->lbq_size =
- rx_ring->lbq_len * sizeof(__le64);
- rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- rx_ring->sbq_len = NUM_SMALL_BUFFERS;
- rx_ring->sbq_size =
- rx_ring->sbq_len * sizeof(__le64);
- rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
- rx_ring->type = RX_Q;
+ rx_ring->lbq.type = QLGE_LB;
+ rx_ring->sbq.type = QLGE_SB;
+ INIT_DELAYED_WORK(&rx_ring->refill_work,
+ &qlge_slow_refill);
} else {
/*
* Outbound queue handles outbound completions only.
@@ -4165,13 +4020,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->cq_len = qdev->tx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = 0;
- rx_ring->lbq_size = 0;
- rx_ring->lbq_buf_size = 0;
- rx_ring->sbq_len = 0;
- rx_ring->sbq_size = 0;
- rx_ring->sbq_buf_size = 0;
- rx_ring->type = TX_Q;
}
}
return 0;
@@ -4186,6 +4034,7 @@ static int qlge_open(struct net_device *ndev)
if (err)
return err;
+ qlge_set_lb_size(qdev);
err = ql_configure_rings(qdev);
if (err)
return err;
@@ -4207,9 +4056,7 @@ error_up:
static int ql_change_rx_buffers(struct ql_adapter *qdev)
{
- struct rx_ring *rx_ring;
- int i, status;
- u32 lbq_buf_len;
+ int status;
/* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
@@ -4232,16 +4079,7 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
if (status)
goto error;
- /* Get the new rx buffer size. */
- lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
- qdev->lbq_buf_order = get_order(lbq_buf_len);
-
- for (i = 0; i < qdev->rss_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- /* Set the new size. */
- rx_ring->lbq_buf_size = lbq_buf_len;
- }
+ qlge_set_lb_size(qdev);
status = ql_adapter_up(qdev);
if (status)
@@ -4642,13 +4480,12 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
goto err_out2;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
- spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
if (qlge_mpi_coredump) {
qdev->mpi_coredump =
vmalloc(sizeof(struct ql_mpi_coredump));
- if (qdev->mpi_coredump == NULL) {
+ if (!qdev->mpi_coredump) {
err = -ENOMEM;
goto err_out2;
}
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 957c72985a06..9e422bbbb6ab 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -1257,7 +1257,6 @@ void ql_mpi_work(struct work_struct *work)
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
mutex_unlock(&qdev->mpi_mutex);
- ql_enable_completion_interrupt(qdev, 0);
}
void ql_mpi_reset_work(struct work_struct *work)
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index 900424db9b97..eabf1093328e 100644
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -796,7 +796,6 @@ static int gdma_dma_probe(struct platform_device *pdev)
struct gdma_dma_dev *dma_dev;
struct dma_device *dd;
unsigned int i;
- struct resource *res;
int ret;
int irq;
void __iomem *base;
@@ -818,8 +817,7 @@ static int gdma_dma_probe(struct platform_device *pdev)
return -EINVAL;
dma_dev->data = data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
dma_dev->base = base;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 51a5b71f8c25..88e42cc1d837 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -440,15 +440,9 @@ static void update_bmc_sta(struct adapter *padapter)
tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i] & 0x7f);
}
- if (pcur_network->Configuration.DSConfig > 14) {
- /* force to A mode. 5G doesn't support CCK rates */
- network_type = WIRELESS_11A;
- tx_ra_bitmap = 0x150; /* 6, 12, 24 Mbps */
- } else {
- /* force to b mode */
- network_type = WIRELESS_11B;
- tx_ra_bitmap = 0xf;
- }
+ /* force to b mode */
+ network_type = WIRELESS_11B;
+ tx_ra_bitmap = 0xf;
raid = networktype_to_raid(network_type);
init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
@@ -560,29 +554,24 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
static void update_hw_ht_param(struct adapter *padapter)
{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u8 max_ampdu_len;
+ u8 min_mpdu_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
DBG_88E("%s\n", __func__);
- /* handle A-MPDU parameter field */
- /*
- ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- ampdu_params_info [4:2]:Min MPDU Start Spacing
- */
- max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
-
- min_MPDU_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+ /* handle A-MPDU parameter field
+ * ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ * ampdu_params_info [4:2]:Min MPDU Start Spacing
+ */
+ max_ampdu_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
+ min_mpdu_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, &min_mpdu_spacing);
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, &max_ampdu_len);
- /* */
- /* Config SM Power Save setting */
- /* */
+ /* Config SM Power Save setting */
pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & 0x0C) >> 2;
if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 02c476f45b33..d9b0f9e6235c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -25,7 +25,7 @@ enum{
* When we want to enable write operation, we should change to pwr on state.
* When we stop write, we should switch to 500k mode and disable LDO 2.5V.
*/
-void efuse_power_switch(struct adapter *pAdapter, u8 write, u8 pwrstate)
+static void efuse_power_switch(struct adapter *pAdapter, u8 write, u8 pwrstate)
{
u8 tempval;
u16 tmpv16;
@@ -615,10 +615,9 @@ static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuse
static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt)
{
u16 efuse_addr = *pAddr;
- u8 badworden = 0;
+ u8 badworden;
u32 PgWriteSuccess = 0;
- badworden = 0x0f;
badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pTargetPkt->word_en, pTargetPkt->data);
if (badworden == 0x0F) {
/* write ok */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 1ec3b237212e..e764436e120f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -2045,9 +2045,9 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
while (1) {
do_join_r = rtw_do_join(padapter);
- if (do_join_r == _SUCCESS) {
+ if (do_join_r == _SUCCESS)
break;
- }
+
DBG_88E("roaming do_join return %d\n", do_join_r);
pmlmepriv->to_roaming--;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 18dc9fc1c04a..e984b4605e91 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -507,7 +507,7 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pwps_ie = rtw_get_wps_ie(cur_network->ies+_FIXED_IE_LENGTH_, cur_network->ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
/* inerset & update wps_probe_resp_ie */
- if ((pmlmepriv->wps_probe_resp_ie != NULL) && pwps_ie && (wps_ielen > 0)) {
+ if (pmlmepriv->wps_probe_resp_ie && pwps_ie && wps_ielen > 0) {
uint wps_offset, remainder_ielen;
u8 *premainder_ie;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 7b16632048b7..03dc7e5fcc38 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -514,7 +514,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE;
else
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE);
pwrctrlpriv->bFwCurrentInPSMode = false;
@@ -621,7 +621,7 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
else
pwrctrlpriv->LpsIdleCount = 2;
pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE);
}
} else {
ret = -EINVAL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 91a30142c567..73f2cb5ebaa6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -83,7 +83,8 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
INIT_LIST_HEAD(&pstapriv->sta_hash[i]);
- list_add_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
+ list_add_tail(&psta->list,
+ get_list_head(&pstapriv->free_sta_queue));
psta++;
}
@@ -134,31 +135,30 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
struct recv_reorder_ctrl *preorder_ctrl;
int index;
- if (pstapriv) {
- /* delete all reordering_ctrl_timer */
- spin_lock_bh(&pstapriv->sta_hash_lock);
- for (index = 0; index < NUM_STA; index++) {
- phead = &pstapriv->sta_hash[index];
- plist = phead->next;
+ if (!pstapriv)
+ return _SUCCESS;
- while (phead != plist) {
- int i;
+ /* delete all reordering_ctrl_timer */
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &pstapriv->sta_hash[index];
+ plist = phead->next;
- psta = container_of(plist, struct sta_info,
- hash_list);
- plist = plist->next;
+ while (phead != plist) {
+ int i;
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
- }
+ psta = container_of(plist, struct sta_info, hash_list);
+ plist = plist->next;
+
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
}
}
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- /*===============================*/
-
- vfree(pstapriv->pallocated_stainfo_buf);
}
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ vfree(pstapriv->pallocated_stainfo_buf);
return _SUCCESS;
}
@@ -167,7 +167,7 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
s32 index;
struct list_head *phash_list;
- struct sta_info *psta;
+ struct sta_info *psta;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
int i = 0;
@@ -180,65 +180,70 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
struct sta_info, list);
if (!psta) {
spin_unlock_bh(&pfree_sta_queue->lock);
- } else {
- list_del_init(&psta->list);
- spin_unlock_bh(&pfree_sta_queue->lock);
- _rtw_init_stainfo(psta);
- memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
- index = wifi_mac_hash(hwaddr);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_, ("%s: index=%x", __func__, index));
- if (index >= NUM_STA) {
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("ERROR => %s: index >= NUM_STA", __func__));
- psta = NULL;
- goto exit;
- }
- phash_list = &pstapriv->sta_hash[index];
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- list_add_tail(&psta->hash_list, phash_list);
- pstapriv->asoc_sta_count++;
- spin_unlock_bh(&pstapriv->sta_hash_lock);
+ return NULL;
+ }
-/* Commented by Albert 2009/08/13 */
-/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
-/* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */
-/* So, we initialize the tid_rxseq variable as the 0xffff. */
+ list_del_init(&psta->list);
+ spin_unlock_bh(&pfree_sta_queue->lock);
+ _rtw_init_stainfo(psta);
+ memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
+ index = wifi_mac_hash(hwaddr);
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("%s: index=%x", __func__, index));
+ if (index >= NUM_STA) {
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
+ ("ERROR => %s: index >= NUM_STA", __func__));
+ return NULL;
+ }
+ phash_list = &pstapriv->sta_hash[index];
- for (i = 0; i < 16; i++)
- memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ list_add_tail(&psta->hash_list, phash_list);
+ pstapriv->asoc_sta_count++;
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
- ("alloc number_%d stainfo with hwaddr = %pM\n",
- pstapriv->asoc_sta_count, hwaddr));
+ /* Commented by Albert 2009/08/13
+ * For the SMC router, the sequence number of first packet of
+ * WPS handshake will be 0. In this case, this packet will be
+ * dropped by recv_decache function if we use the 0x00 as the
+ * default value for tid_rxseq variable. So, we initialize the
+ * tid_rxseq variable as the 0xffff.
+ */
- init_addba_retry_timer(pstapriv->padapter, psta);
+ for (i = 0; i < 16; i++)
+ memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i],
+ &wRxSeqInitialValue, 2);
- /* for A-MPDU Rx reordering buffer control */
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("alloc number_%d stainfo with hwaddr = %pM\n",
+ pstapriv->asoc_sta_count, hwaddr));
- preorder_ctrl->padapter = pstapriv->padapter;
+ init_addba_retry_timer(pstapriv->padapter, psta);
- preorder_ctrl->enable = false;
+ /* for A-MPDU Rx reordering buffer control */
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->wend_b = 0xffff;
- preorder_ctrl->wsize_b = 64;/* 64; */
+ preorder_ctrl->padapter = pstapriv->padapter;
- _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
+ preorder_ctrl->enable = false;
- rtw_init_recv_timer(preorder_ctrl);
- }
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* 64; */
- /* init for DM */
- psta->rssi_stat.UndecoratedSmoothedPWDB = -1;
- psta->rssi_stat.UndecoratedSmoothedCCK = -1;
+ _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
- /* init for the sequence number of received management frame */
- psta->RxMgmtFrameSeqNum = 0xffff;
+ rtw_init_recv_timer(preorder_ctrl);
}
-exit:
+ /* init for DM */
+ psta->rssi_stat.UndecoratedSmoothedPWDB = -1;
+ psta->rssi_stat.UndecoratedSmoothedCCK = -1;
+
+ /* init for the sequence number of received management frame */
+ psta->RxMgmtFrameSeqNum = 0xffff;
+
return psta;
}
@@ -296,7 +301,9 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
del_timer_sync(&psta->addba_retry_timer);
- /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
+ /* for A-MPDU Rx reordering buffer control, cancel
+ * reordering_ctrl_timer
+ */
for (i = 0; i < 16; i++) {
struct list_head *phead, *plist;
struct recv_frame *prframe;
@@ -311,7 +318,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&ppending_recvframe_queue->lock);
- phead = get_list_head(ppending_recvframe_queue);
+ phead = get_list_head(ppending_recvframe_queue);
plist = phead->next;
while (!list_empty(phead)) {
@@ -444,23 +451,21 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
{
struct sta_info *psta;
- u32 res = _SUCCESS;
- unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
- psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
+ psta = rtw_alloc_stainfo(pstapriv, bc_addr);
if (!psta) {
- res = _FAIL;
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("rtw_alloc_stainfo fail"));
- goto exit;
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
+ ("rtw_alloc_stainfo fail"));
+ return _FAIL;
}
/* default broadcast & multicast use macid 1 */
psta->mac_id = 1;
-exit:
- return res;
+ return _SUCCESS;
}
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
@@ -471,13 +476,13 @@ struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
return rtw_get_stainfo(pstapriv, bc_addr);
}
-u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
+bool rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
{
- u8 res = true;
+ bool res = true;
#ifdef CONFIG_88EU_AP_MODE
struct list_head *plist, *phead;
struct rtw_wlan_acl_node *paclnode;
- u8 match = false;
+ bool match = false;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
@@ -499,9 +504,9 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
spin_unlock_bh(&pacl_node_q->lock);
if (pacl_list->mode == 1)/* accept unless in deny list */
- res = (match) ? false : true;
+ res = !match;
else if (pacl_list->mode == 2)/* deny unless in accept list */
- res = (match) ? true : false;
+ res = match;
else
res = true;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index c985b1468d41..af8a79ce8736 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -667,7 +667,7 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var
void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
{
unsigned int i;
- u8 max_AMPDU_len, min_MPDU_spacing;
+ u8 max_ampdu_len, min_mpdu_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -689,16 +689,16 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
} else {
/* modify from fw by Thomas 2010/11/17 */
if ((pmlmeinfo->HT_caps.ampdu_params_info & 0x3) > (pIE->data[i] & 0x3))
- max_AMPDU_len = pIE->data[i] & 0x3;
+ max_ampdu_len = pIE->data[i] & 0x3;
else
- max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x3;
+ max_ampdu_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x3;
if ((pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) > (pIE->data[i] & 0x1c))
- min_MPDU_spacing = pmlmeinfo->HT_caps.ampdu_params_info & 0x1c;
+ min_mpdu_spacing = pmlmeinfo->HT_caps.ampdu_params_info & 0x1c;
else
- min_MPDU_spacing = pIE->data[i] & 0x1c;
+ min_mpdu_spacing = pIE->data[i] & 0x1c;
- pmlmeinfo->HT_caps.ampdu_params_info = max_AMPDU_len | min_MPDU_spacing;
+ pmlmeinfo->HT_caps.ampdu_params_info = max_ampdu_len | min_mpdu_spacing;
}
}
@@ -729,8 +729,8 @@ void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
void HTOnAssocRsp(struct adapter *padapter)
{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
+ u8 max_ampdu_len;
+ u8 min_mpdu_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -748,13 +748,11 @@ void HTOnAssocRsp(struct adapter *padapter)
* AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
* AMPDU_para [4:2]:Min MPDU Start Spacing
*/
- max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
+ max_ampdu_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
+ min_mpdu_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
- min_MPDU_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, &min_mpdu_spacing);
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, &max_ampdu_len);
}
void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 952f2ab51347..c37591657bac 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -776,7 +776,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -784,7 +784,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 47352f210c0b..7646167a0b36 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -47,8 +47,6 @@ static u8 _is_fw_read_cmd_down(struct adapter *adapt, u8 msgbox_num)
******************************************/
static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer)
{
- u8 bcmd_down = false;
- s32 retry_cnts = 100;
u8 h2c_box_num;
u32 msgbox_addr;
u32 msgbox_ex_addr;
@@ -71,39 +69,34 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
goto exit;
/* pay attention to if race condition happened in H2C cmd setting. */
- do {
- h2c_box_num = adapt->HalData->LastHMEBoxNum;
-
- if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
- DBG_88E(" fw read cmd failed...\n");
- goto exit;
- }
-
- *(u8 *)(&h2c_cmd) = ElementID;
-
- if (CmdLen <= 3) {
- memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, CmdLen);
- } else {
- memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, 3);
- ext_cmd_len = CmdLen-3;
- memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer+3, ext_cmd_len);
+ h2c_box_num = adapt->HalData->LastHMEBoxNum;
- /* Write Ext command */
- msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * RTL88E_EX_MESSAGE_BOX_SIZE);
- for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++)
- usb_write8(adapt, msgbox_ex_addr+cmd_idx, *((u8 *)(&h2c_cmd_ex)+cmd_idx));
- }
- /* Write command */
- msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL88E_MESSAGE_BOX_SIZE);
- for (cmd_idx = 0; cmd_idx < RTL88E_MESSAGE_BOX_SIZE; cmd_idx++)
- usb_write8(adapt, msgbox_addr+cmd_idx, *((u8 *)(&h2c_cmd)+cmd_idx));
+ if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
+ DBG_88E(" fw read cmd failed...\n");
+ goto exit;
+ }
- bcmd_down = true;
+ *(u8 *)(&h2c_cmd) = ElementID;
- adapt->HalData->LastHMEBoxNum =
- (h2c_box_num+1) % RTL88E_MAX_H2C_BOX_NUMS;
+ if (CmdLen <= 3) {
+ memcpy((u8 *)(&h2c_cmd) + 1, pCmdBuffer, CmdLen);
+ } else {
+ memcpy((u8 *)(&h2c_cmd) + 1, pCmdBuffer, 3);
+ ext_cmd_len = CmdLen - 3;
+ memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer + 3, ext_cmd_len);
+
+ /* Write Ext command */
+ msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * RTL88E_EX_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++)
+ usb_write8(adapt, msgbox_ex_addr + cmd_idx, *((u8 *)(&h2c_cmd_ex) + cmd_idx));
+ }
+ /* Write command */
+ msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL88E_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < RTL88E_MESSAGE_BOX_SIZE; cmd_idx++)
+ usb_write8(adapt, msgbox_addr + cmd_idx, *((u8 *)(&h2c_cmd) + cmd_idx));
- } while ((!bcmd_down) && (retry_cnts--));
+ adapt->HalData->LastHMEBoxNum =
+ (h2c_box_num + 1) % RTL88E_MAX_H2C_BOX_NUMS;
ret = _SUCCESS;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 086f98d38cba..57ae0e83dd3e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -552,7 +552,6 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
pHalData->AntDivCfg = 1; /* 0xC1[3] is ignored. */
} else {
pHalData->AntDivCfg = 0;
- pHalData->TRxAntDivType = pHalData->TRxAntDivType; /* The value in the driver setting of device manager. */
}
DBG_88E("EEPROM : AntDivCfg = %x, TRxAntDivType = %x\n", pHalData->AntDivCfg, pHalData->TRxAntDivType);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index c0d51ba70a75..1cf8cff9a2a4 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -22,8 +22,7 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
int i, res = _SUCCESS;
struct recv_buf *precvbuf;
- tasklet_init(&precvpriv->recv_tasklet,
- (void(*)(unsigned long))rtl8188eu_recv_tasklet,
+ tasklet_init(&precvpriv->recv_tasklet, rtl8188eu_recv_tasklet,
(unsigned long)padapter);
/* init recv_buf */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index ab94ad9d608a..2808f2b119bf 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -17,8 +17,7 @@ s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
{
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
- tasklet_init(&pxmitpriv->xmit_tasklet,
- (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
+ tasklet_init(&pxmitpriv->xmit_tasklet, rtl8188eu_xmit_tasklet,
(unsigned long)adapt);
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index c2c7ef974dc5..23251ffa8404 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -43,7 +43,7 @@ enum rx_packet_type {
};
#define INTERRUPT_MSG_FORMAT_LEN 60
-void rtl8188eu_recv_tasklet(void *priv);
+void rtl8188eu_recv_tasklet(unsigned long priv);
void rtl8188e_process_phy_info(struct adapter *padapter,
struct recv_frame *prframe);
void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 421e9f45306f..c6c2ad20d9cf 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -148,7 +148,7 @@ void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc,
s32 rtl8188eu_init_xmit_priv(struct adapter *padapter);
s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
#define hal_xmit_handler rtl8188eu_xmit_buf_handler
-void rtl8188eu_xmit_tasklet(void *priv);
+void rtl8188eu_xmit_tasklet(unsigned long priv);
bool rtl8188eu_xmitframe_complete(struct adapter *padapter,
struct xmit_priv *pxmitpriv);
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 3ec53761e9fd..7a9c8ff0daa9 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -82,7 +82,6 @@ u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data);
void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset,
u16 _size_byte, u8 *pbuf);
-void efuse_power_switch(struct adapter *adapt, u8 write, u8 pwrstate);
int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data);
bool Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data);
void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index dc685a14aeb8..6165adafc451 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -354,6 +354,6 @@ void rtw_free_all_stainfo(struct adapter *adapt);
struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
-u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
+bool rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index ec5835d1aa8c..710c33fd4965 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -148,17 +148,10 @@ static char *translate_scan(struct adapter *padapter,
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg");
} else {
- if (pnetwork->network.Configuration.DSConfig > 14) {
- if (ht_cap)
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11an");
- else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11a");
- } else {
- if (ht_cap)
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
- else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
- }
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
}
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
@@ -650,17 +643,10 @@ static int rtw_wx_get_name(struct net_device *dev,
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
} else {
- if (pcur_bss->Configuration.DSConfig > 14) {
- if (ht_cap)
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11an");
- else
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11a");
- } else {
- if (ht_cap)
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
- else
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
- }
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
}
} else {
snprintf(wrqu->name, IFNAMSIZ, "unassociated");
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index aaab0d577453..3cd6da1f843d 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -773,10 +773,10 @@ void usb_write_port_cancel(struct adapter *padapter)
}
}
-void rtl8188eu_recv_tasklet(void *priv)
+void rtl8188eu_recv_tasklet(unsigned long priv)
{
struct sk_buff *pskb;
- struct adapter *adapt = priv;
+ struct adapter *adapt = (struct adapter *)priv;
struct recv_priv *precvpriv = &adapt->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
@@ -792,9 +792,9 @@ void rtl8188eu_recv_tasklet(void *priv)
}
}
-void rtl8188eu_xmit_tasklet(void *priv)
+void rtl8188eu_xmit_tasklet(unsigned long priv)
{
- struct adapter *adapt = priv;
+ struct adapter *adapt = (struct adapter *)priv;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index ef92ce957466..980b850d729a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -1686,11 +1686,10 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
static u32 last_beacon_adc_pwdb;
struct rtllib_hdr_3addr *hdr;
u16 sc;
- unsigned int frag, seq;
+ unsigned int seq;
hdr = (struct rtllib_hdr_3addr *)buffer;
sc = le16_to_cpu(hdr->seq_ctl);
- frag = WLAN_GET_SEQ_FRAG(sc);
seq = WLAN_GET_SEQ_SEQ(sc);
curr_st->Seq_Num = seq;
if (!prev_st->bIsAMPDU)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index f932cb15e4e5..dace81a7d1ba 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -715,8 +715,8 @@ void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode)
if ((wireless_mode == WIRELESS_MODE_N_24G) ||
(wireless_mode == WIRELESS_MODE_N_5G)) {
priv->rtllib->pHTInfo->bEnableHT = 1;
- RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n",
- __func__, wireless_mode);
+ RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n",
+ __func__, wireless_mode);
} else {
priv->rtllib->pHTInfo->bEnableHT = 0;
RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 0\n",
@@ -1616,14 +1616,15 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb_push(skb, priv->rtllib->tx_headroom);
ret = _rtl92e_tx(dev, skb);
- if (ret != 0)
- kfree_skb(skb);
if (queue_index != MGNT_QUEUE) {
priv->rtllib->stats.tx_bytes += (skb->len -
priv->rtllib->tx_headroom);
priv->rtllib->stats.tx_packets++;
}
+
+ if (ret != 0)
+ kfree_skb(skb);
}
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index f2f7529e7c80..6e2f620afd14 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2044,8 +2044,9 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
}
-static inline void rtllib_sta_ps(struct rtllib_device *ieee)
+static inline void rtllib_sta_ps(unsigned long data)
{
+ struct rtllib_device *ieee = (struct rtllib_device *)data;
u64 time;
short sleep;
unsigned long flags, flags2;
@@ -3027,9 +3028,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_init(&ieee->ps_task,
- (void(*)(unsigned long)) rtllib_sta_ps,
- (unsigned long)ieee);
+ tasklet_init(&ieee->ps_task, rtllib_sta_ps, (unsigned long)ieee);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 5c33bcb0db2e..00fea127bdc3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1620,7 +1620,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
for (i = 0; i < network->rates_len; i++) {
network->rates[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates[i]);
#endif
@@ -1647,7 +1647,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
for (i = 0; i < network->rates_ex_len; i++) {
network->rates_ex[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates_ex[i]);
#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 33a6af7aad22..90692db81b71 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1683,8 +1683,9 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
return 1;
}
-static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
+static inline void ieee80211_sta_ps(unsigned long data)
{
+ struct ieee80211_device *ieee = (struct ieee80211_device *)data;
u32 th, tl;
short sleep;
@@ -2331,7 +2332,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
/* ensure no-one start an associating process (thus setting
* the ieee->state to ieee80211_ASSOCIATING) while we
- * have just cheked it and we are going to enable scan.
+ * have just checked it and we are going to enable scan.
* The ieee80211_new_net function is always called with
* lock held (from both ieee80211_softmac_check_all_nets and
* the rx path), so we cannot be in the middle of such function
@@ -2593,9 +2594,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_init(&ieee->ps_task,
- (void(*)(unsigned long)) ieee80211_sta_ps,
- (unsigned long)ieee);
+ tasklet_init(&ieee->ps_task, ieee80211_sta_ps, (unsigned long)ieee);
}
void ieee80211_softmac_free(struct ieee80211_device *ieee)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 9dd5c04181ea..33c596f9ec96 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -109,7 +109,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
/* Add basic and extended rates */
max_rate = 0;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
for (i = 0, j = 0; i < network->rates_len; ) {
if (j < network->rates_ex_len &&
((network->rates_ex[j] & 0x7F) <
@@ -119,12 +119,12 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
rate = network->rates[i++] & 0x7F;
if (rate > max_rate)
max_rate = rate;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
}
for (; j < network->rates_ex_len; j++) {
rate = network->rates_ex[j] & 0x7F;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
if (rate > max_rate)
max_rate = rate;
@@ -214,7 +214,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
* for given network. */
iwe.cmd = IWEVCUSTOM;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
" Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index b169460b9f26..63e0f7b1b852 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -43,8 +43,8 @@ void phy_set_rf8256_bandwidth(struct net_device *dev, enum ht_channel_width Band
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
if (priv->card_8192_version == VERSION_819XU_A ||
- priv->card_8192_version ==
- VERSION_819XU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
+ priv->card_8192_version == VERSION_819XU_B) {
+ /* 8256 D-cut, E-cut, xiong: consider it later! */
rtl8192_phy_SetRFReg(dev,
(enum rf90_radio_path_e)eRFPath,
0x0b, bMask12Bits, 0x100); /* phy para:1ba */
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 2821411878ce..7e2cabd16e88 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -98,8 +98,6 @@ static char *ifname = "wlan%d";
static int hwwep = 1; /* default use hw. set 0 to use software security */
static int channels = 0x3fff;
-
-
module_param(ifname, charp, 0644);
module_param(hwwep, int, 0644);
module_param(channels, int, 0644);
@@ -112,7 +110,6 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void rtl8192_usb_disconnect(struct usb_interface *intf);
-
static struct usb_driver rtl8192_usb_driver = {
.name = RTL819XU_MODULE_NAME, /* Driver name */
.id_table = rtl8192_usb_id_tbl, /* PCI_ID table */
@@ -122,7 +119,6 @@ static struct usb_driver rtl8192_usb_driver = {
.resume = NULL, /* PM resume fn */
};
-
struct CHANNEL_LIST {
u8 Channel[32];
u8 Len;
@@ -207,9 +203,6 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv *priv)
}
}
-
-
-
static void CamResetAllEntry(struct net_device *dev)
{
u32 ulcommand = 0;
@@ -297,7 +290,6 @@ int write_nic_byte(struct net_device *dev, int indx, u8 data)
return 0;
}
-
int write_nic_word(struct net_device *dev, int indx, u16 data)
{
int status;
@@ -324,7 +316,6 @@ int write_nic_word(struct net_device *dev, int indx, u16 data)
return 0;
}
-
int write_nic_dword(struct net_device *dev, int indx, u32 data)
{
int status;
@@ -343,7 +334,6 @@ int write_nic_dword(struct net_device *dev, int indx, u32 data)
usbdata, 4, HZ / 2);
kfree(usbdata);
-
if (status < 0) {
netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
return status;
@@ -352,8 +342,6 @@ int write_nic_dword(struct net_device *dev, int indx, u32 data)
return 0;
}
-
-
int read_nic_byte(struct net_device *dev, int indx, u8 *data)
{
int status;
@@ -379,8 +367,6 @@ int read_nic_byte(struct net_device *dev, int indx, u8 *data)
return 0;
}
-
-
int read_nic_word(struct net_device *dev, int indx, u16 *data)
{
int status;
@@ -628,13 +614,13 @@ static void rtl8192_proc_init_one(struct net_device *dev)
return;
proc_create_single("stats-rx", S_IFREG | S_IRUGO, dir,
- proc_get_stats_rx);
+ proc_get_stats_rx);
proc_create_single("stats-tx", S_IFREG | S_IRUGO, dir,
- proc_get_stats_tx);
+ proc_get_stats_tx);
proc_create_single("stats-ap", S_IFREG | S_IRUGO, dir,
- proc_get_stats_ap);
+ proc_get_stats_ap);
proc_create_single("registers", S_IFREG | S_IRUGO, dir,
- proc_get_registers);
+ proc_get_registers);
}
static void rtl8192_proc_remove_one(struct net_device *dev)
@@ -788,7 +774,6 @@ void rtl8192_set_rxconf(struct net_device *dev)
rxconf = rxconf | RCR_CBSSID;
}
-
if (priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
rxconf = rxconf | RCR_AICV;
rxconf = rxconf | RCR_APWRMGT;
@@ -797,7 +782,6 @@ void rtl8192_set_rxconf(struct net_device *dev)
if (priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
rxconf = rxconf | RCR_ACRC32;
-
rxconf = rxconf & ~RX_FIFO_THRESHOLD_MASK;
rxconf = rxconf | (RX_FIFO_THRESHOLD_NONE << RX_FIFO_THRESHOLD_SHIFT);
rxconf = rxconf & ~MAX_RX_DMA_MASK;
@@ -901,13 +885,11 @@ static u32 rtl819xusb_rx_command_packet(struct net_device *dev,
return status;
}
-
static void rtl8192_data_hard_stop(struct net_device *dev)
{
/* FIXME !! */
}
-
static void rtl8192_data_hard_resume(struct net_device *dev)
{
/* FIXME !! */
@@ -951,7 +933,6 @@ static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
-
spin_lock_irqsave(&priv->tx_lock, flags);
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
@@ -1123,7 +1104,6 @@ static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
}
}
-
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
@@ -1188,7 +1168,6 @@ static void rtl8192_net_update(struct net_device *dev)
*/
void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
{
-
}
short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
@@ -1232,6 +1211,8 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
return 0;
DMESGE("Error TX CMD URB, error %d", status);
+ dev_kfree_skb(skb);
+ usb_free_urb(tx_urb);
return -1;
}
@@ -1389,7 +1370,6 @@ static u8 MRateToHwRate8190Pci(u8 rate)
return ret;
}
-
static u8 QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
{
u8 tmp_Short;
@@ -1422,7 +1402,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
(struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
struct usb_device *udev = priv->udev;
int pend;
- int status;
+ int status, rt = -1;
struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
unsigned int idx_pipe;
@@ -1566,8 +1546,10 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
if (bSend0Byte) {
tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
- if (!tx_urb_zero)
- return -ENOMEM;
+ if (!tx_urb_zero) {
+ rt = -ENOMEM;
+ goto error;
+ }
usb_fill_bulk_urb(tx_urb_zero, udev,
usb_sndbulkpipe(udev, idx_pipe),
&zero, 0, tx_zero_isr, dev);
@@ -1577,7 +1559,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
"Error TX URB for zero byte %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+ goto error;
}
}
netif_trans_update(dev);
@@ -1588,7 +1570,12 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
RT_TRACE(COMP_ERR, "Error TX URB %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+
+error:
+ dev_kfree_skb_any(skb);
+ usb_free_urb(tx_urb);
+ usb_free_urb(tx_urb_zero);
+ return rt;
}
static short rtl8192_usb_initendpoints(struct net_device *dev)
@@ -1742,7 +1729,6 @@ static const struct ieee80211_qos_parameters def_qos_parameters = {
{0, 0, 0, 0} /* tx_op_limit */
};
-
static void rtl8192_update_beacon(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv,
@@ -1913,15 +1899,12 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
if (set_qos_param == 1)
schedule_work(&priv->qos_activate);
-
return 0;
}
-
-static int rtl8192_handle_assoc_response(
- struct net_device *dev,
- struct ieee80211_assoc_response_frame *resp,
- struct ieee80211_network *network)
+static int rtl8192_handle_assoc_response(struct net_device *dev,
+ struct ieee80211_assoc_response_frame *resp,
+ struct ieee80211_network *network)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1929,7 +1912,6 @@ static int rtl8192_handle_assoc_response(
return 0;
}
-
static void rtl8192_update_ratr_table(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2211,14 +2193,13 @@ static void rtl8192_init_priv_lock(struct r8192_priv *priv)
static void rtl819x_watchdog_wqcallback(struct work_struct *work);
-static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
+static void rtl8192_irq_rx_tasklet(unsigned long data);
/* init tasklet and wait_queue here. only 2.6 above kernel is considered */
#define DRV_NAME "wlan0"
static void rtl8192_init_priv_task(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
-
INIT_WORK(&priv->reset_wq, rtl8192_restart);
INIT_DELAYED_WORK(&priv->watch_dog_wq,
@@ -2233,8 +2214,7 @@ static void rtl8192_init_priv_task(struct net_device *dev)
InitialGainOperateWorkItemCallBack);
INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
- tasklet_init(&priv->irq_rx_tasklet,
- (void(*)(unsigned long))rtl8192_irq_rx_tasklet,
+ tasklet_init(&priv->irq_rx_tasklet, rtl8192_irq_rx_tasklet,
(unsigned long)priv);
}
@@ -2515,7 +2495,6 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
break;
}
-
if (priv->rf_type == RF_1T2R)
RT_TRACE(COMP_EPROM, "\n1T2R config\n");
else
@@ -2668,7 +2647,6 @@ static void rtl8192_hwconfig(struct net_device *dev)
/* Set Auto Rate fallback control */
}
-
/* InitializeAdapter and PhyCfg */
static bool rtl8192_adapter_start(struct net_device *dev)
{
@@ -2803,14 +2781,12 @@ static bool rtl8192_adapter_start(struct net_device *dev)
RT_TRACE(COMP_INIT, "%s():after phy RF config\n", __func__);
}
-
if (priv->ieee80211->FwRWRF)
/* We can force firmware to do RF-R/W */
priv->Rf_Mode = RF_OP_By_FW;
else
priv->Rf_Mode = RF_OP_By_SW_3wire;
-
rtl8192_phy_updateInitGain(dev);
/*--set CCK and OFDM Block "ON"--*/
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
@@ -2865,7 +2841,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
}
write_nic_byte(dev, 0x87, 0x0);
-
return init_status;
}
@@ -2996,7 +2971,6 @@ static RESET_TYPE RxCheckStuck(struct net_device *dev)
return RESET_TYPE_NORESET;
}
-
/**
* This function is called by Checkforhang to check whether we should
* ask OS to reset driver
@@ -3052,8 +3026,6 @@ static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
static int _rtl8192_up(struct net_device *dev);
static int rtl8192_close(struct net_device *dev);
-
-
static void CamRestoreAllEntry(struct net_device *dev)
{
u8 EntryId = 0;
@@ -3070,7 +3042,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
RT_TRACE(COMP_SEC, "%s:\n", __func__);
-
if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) {
for (EntryId = 0; EntryId < 4; EntryId++) {
@@ -3096,8 +3067,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
MacAddr, 0, NULL);
}
-
-
if (priv->ieee80211->group_key_type == KEY_TYPE_TKIP) {
MacAddr = CAM_CONST_BROAD;
for (EntryId = 1; EntryId < 4; EntryId++) {
@@ -3134,7 +3103,6 @@ static void rtl819x_ifsilentreset(struct net_device *dev)
int reset_status = 0;
struct ieee80211_device *ieee = priv->ieee80211;
-
/* If we need to check CCK stop, please uncomment this line. */
/* bStuck = Adapter->HalFunc.CheckHWStopHandler(Adapter); */
@@ -3258,7 +3226,6 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
}
}
-
static void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -3369,7 +3336,6 @@ static int _rtl8192_up(struct net_device *dev)
return 0;
}
-
static int rtl8192_open(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3381,7 +3347,6 @@ static int rtl8192_open(struct net_device *dev)
return ret;
}
-
int rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3392,7 +3357,6 @@ int rtl8192_up(struct net_device *dev)
return _rtl8192_up(dev);
}
-
static int rtl8192_close(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3440,7 +3404,6 @@ int rtl8192_down(struct net_device *dev)
deinit_hal_dm(dev);
del_timer_sync(&priv->watch_dog_timer);
-
ieee80211_softmac_stop_protocol(priv->ieee80211);
memset(&priv->ieee80211->current_network, 0,
offsetof(struct ieee80211_network, list));
@@ -3449,7 +3412,6 @@ int rtl8192_down(struct net_device *dev)
return 0;
}
-
void rtl8192_commit(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3495,7 +3457,6 @@ static void r8192_set_multicast(struct net_device *dev)
priv->promisc = promisc;
}
-
static int r8192_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3525,7 +3486,6 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
mutex_lock(&priv->wx_mutex);
-
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
ret = -EINVAL;
goto out;
@@ -3778,7 +3738,6 @@ static long rtl819x_translate_todbm(u8 signal_strength_index)
return signal_power;
}
-
/* We can not declare RSSI/EVM total value of sliding window to
* be a local static. Otherwise, it may increase when we return from S3/S4. The
* value will be kept in memory or disk. Declare the value in the adaptor
@@ -3841,7 +3800,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (!bcheck)
return;
-
/* only rtl8190 supported
* rtl8190_process_cck_rxpathsel(priv,pprevious_stats);
*/
@@ -3851,17 +3809,15 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
/* record the general signal strength to the sliding window. */
-
/* <2> Showed on UI for engineering
* hardware does not provide rssi information for each rf path in CCK
*/
if (!pprevious_stats->bIsCCK &&
(pprevious_stats->bPacketToSelf || pprevious_stats->bToSelfBA)) {
for (rfpath = RF90_PATH_A; rfpath < priv->NumTotalRFPath; rfpath++) {
- if (!rtl8192_phy_CheckIsLegalRFPath(
- priv->ieee80211->dev, rfpath))
+ if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev,
+ rfpath))
continue;
-
if (priv->stats.rx_rssi_percentage[rfpath] == 0)
priv->stats.rx_rssi_percentage[rfpath] =
pprevious_stats->RxMIMOSignalStrength[rfpath];
@@ -3881,7 +3837,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
}
}
-
/* Check PWDB. */
RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
pprevious_stats->bIsCCK ? "CCK" : "OFDM",
@@ -3908,7 +3863,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
pprevious_stats->bIsCCK ? "CCK" : "OFDM",
pprevious_stats->RxPWDBAll);
-
if (pprevious_stats->bPacketToSelf ||
pprevious_stats->bPacketBeacon ||
pprevious_stats->bToSelfBA) {
@@ -4083,7 +4037,6 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
u8 rf_rx_num = 0;
u8 sq;
-
priv->stats.numqry_phystatus++;
is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
@@ -4192,8 +4145,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
else
continue;
- if (!rtl8192_phy_CheckIsLegalRFPath(
- priv->ieee80211->dev, i))
+ if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, i))
continue;
rx_pwr[i] =
@@ -4214,7 +4166,6 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
precord_stats->RxMIMOSignalStrength[i] = (u8)RSSI;
}
-
/* (2)PWDB, Average PWDB calculated by hardware
* (for rate adaptive)
*/
@@ -4259,7 +4210,6 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
evm & 0xff;
}
-
/* record rx statistics for debug */
rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
prxsc = (struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *)
@@ -4288,16 +4238,14 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
}
} /* QueryRxPhyStatus8190Pci */
-static void rtl8192_record_rxdesc_forlateruse(
- struct ieee80211_rx_stats *psrc_stats,
- struct ieee80211_rx_stats *ptarget_stats)
+static void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
+ struct ieee80211_rx_stats *ptarget_stats)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
ptarget_stats->Seq_Num = psrc_stats->Seq_Num;
}
-
static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
struct ieee80211_rx_stats *pstats,
struct rx_drvinfo_819x_usb *pdrvinfo)
@@ -4341,8 +4289,6 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
bToSelfBA = true;
}
-
-
if (bpacket_match_bssid)
priv->stats.numpacket_matchbssid++;
if (bpacket_toself)
@@ -4383,7 +4329,6 @@ UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
/* 1: short preamble/GI, 0: long preamble/GI */
u32 preamble_guardinterval;
-
if (stats->bCRC)
rcvType = 2;
else if (stats->bICV)
@@ -4491,7 +4436,6 @@ UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
priv->stats.received_rate_histogram[rcvType][rateIndex]++;
}
-
static void query_rxdesc_status(struct sk_buff *skb,
struct ieee80211_rx_stats *stats,
bool bIsRxAggrSubframe)
@@ -4526,8 +4470,7 @@ static void query_rxdesc_status(struct sk_buff *skb,
* Driver info are written to the RxBuffer following rx desc
*/
if (stats->RxDrvInfoSize != 0) {
- driver_info = (struct rx_drvinfo_819x_usb *)(
- skb->data
+ driver_info = (struct rx_drvinfo_819x_usb *)(skb->data
+ sizeof(struct rx_desc_819x_usb)
+ stats->RxBufShift
);
@@ -4556,7 +4499,6 @@ static void query_rxdesc_status(struct sk_buff *skb,
stats->bShortPreamble = driver_info->SPLCP;
-
UpdateReceivedRateHistogramStatistics8190(dev, stats);
stats->bIsAMPDU = (driver_info->PartAggr == 1);
@@ -4569,7 +4511,7 @@ static void query_rxdesc_status(struct sk_buff *skb,
/* Rx A-MPDU */
if (driver_info->FirstAGGR == 1 || driver_info->PartAggr == 1)
RT_TRACE(COMP_RXDESC,
- "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
+ "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
driver_info->FirstAGGR, driver_info->PartAggr);
}
@@ -4636,9 +4578,8 @@ static void rtl8192_rx_nomal(struct sk_buff *skb)
}
}
-static void rtl819xusb_process_received_packet(
- struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
+static void rtl819xusb_process_received_packet(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -4671,8 +4612,6 @@ static void rtl819xusb_process_received_packet(
#ifdef SW_CRC_CHECK
SwCrcCheck();
#endif
-
-
}
static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
@@ -4691,7 +4630,6 @@ static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
stats->ntotalfrag = 1;
}
-
static void rtl8192_rx_cmd(struct sk_buff *skb)
{
struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
@@ -4716,8 +4654,9 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
}
}
-static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
+static void rtl8192_irq_rx_tasklet(unsigned long data)
{
+ struct r8192_priv *priv = (struct r8192_priv *)data;
struct sk_buff *skb;
struct rtl8192_rx_info *info;
@@ -4759,7 +4698,6 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_start_xmit = ieee80211_xmit,
};
-
/****************************************************************************
* ---------------------------- USB_STUFF---------------------------
*****************************************************************************/
@@ -4815,7 +4753,6 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
RT_TRACE(COMP_INIT, "dev name=======> %s\n", dev->name);
rtl8192_proc_init_one(dev);
-
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
@@ -4843,7 +4780,6 @@ static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
cancel_work_sync(&priv->qos_activate);
}
-
static void rtl8192_usb_disconnect(struct usb_interface *intf)
{
struct net_device *dev = usb_get_intfdata(intf);
@@ -4907,7 +4843,6 @@ static int __init rtl8192_usb_module_init(void)
return usb_register(&rtl8192_usb_driver);
}
-
static void __exit rtl8192_usb_module_exit(void)
{
usb_deregister(&rtl8192_usb_driver);
@@ -4949,7 +4884,6 @@ void EnableHWSecurityConfig8192(struct net_device *dev)
write_nic_byte(dev, SECR, SECR_value);
}
-
void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
{
@@ -4970,7 +4904,6 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
else
usConfig |= BIT(15) | (KeyType << 2) | KeyIndex;
-
for (i = 0; i < CAM_CONTENT_COUNT; i++) {
TargetCommand = i + CAM_CONTENT_COUNT * EntryNo;
TargetCommand |= BIT(31) | BIT(16);
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index e064f43fd8b6..bc98cdaf61ec 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -169,19 +169,20 @@ static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tx_rate;
- /* 87B have to S/W beacon for DTM encryption_cmn. */
- if (priv->ieee80211->current_network.mode == IEEE_A ||
- priv->ieee80211->current_network.mode == IEEE_N_5G ||
- (priv->ieee80211->current_network.mode == IEEE_N_24G &&
- (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
- tx_rate = 60;
- DMESG("send beacon frame tx rate is 6Mbpm\n");
- } else {
- tx_rate = 10;
- DMESG("send beacon frame tx rate is 1Mbpm\n");
- }
- rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
+ /* 87B have to S/W beacon for DTM encryption_cmn. */
+ if (priv->ieee80211->current_network.mode == IEEE_A ||
+ priv->ieee80211->current_network.mode == IEEE_N_5G ||
+ (priv->ieee80211->current_network.mode == IEEE_N_24G &&
+ (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
+ tx_rate = 60;
+ DMESG("send beacon frame tx rate is 6Mbpm\n");
+ } else {
+ tx_rate = 10;
+ DMESG("send beacon frame tx rate is 1Mbpm\n");
+ }
+
+ rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
}
/*-----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index db99129d3169..5901026949f2 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -75,7 +75,7 @@ static void BlinkWorkItemCallback(struct work_struct *work);
* Initialize an LED_871x object.
*/
static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
- enum LED_PIN_871x LedPin)
+ enum LED_PIN_871x LedPin)
{
pLed->padapter = padapter;
pLed->LedPin = LedPin;
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 9901815604f4..00ea0beb12c9 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -33,7 +33,7 @@ static u8 bridge_tunnel_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8};
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
static u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
-static void recv_tasklet(void *priv);
+static void recv_tasklet(unsigned long priv);
void r8712_init_recv_priv(struct recv_priv *precvpriv,
struct _adapter *padapter)
@@ -61,13 +61,12 @@ void r8712_init_recv_priv(struct recv_priv *precvpriv,
precvbuf->ref_cnt = 0;
precvbuf->adapter = padapter;
list_add_tail(&precvbuf->list,
- &(precvpriv->free_recv_buf_queue.queue));
+ &(precvpriv->free_recv_buf_queue.queue));
precvbuf++;
}
precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
- tasklet_init(&precvpriv->recv_tasklet,
- (void(*)(unsigned long))recv_tasklet,
- (unsigned long)padapter);
+ tasklet_init(&precvpriv->recv_tasklet, recv_tasklet,
+ (unsigned long)padapter);
skb_queue_head_init(&precvpriv->rx_skb_queue);
skb_queue_head_init(&precvpriv->free_recv_skb_queue);
@@ -119,7 +118,7 @@ void r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf)
}
void r8712_free_recvframe(union recv_frame *precvframe,
- struct __queue *pfree_recv_queue)
+ struct __queue *pfree_recv_queue)
{
unsigned long irqL;
struct _adapter *padapter = precvframe->u.hdr.adapter;
@@ -140,7 +139,7 @@ void r8712_free_recvframe(union recv_frame *precvframe,
}
static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
- struct recv_stat *prxstat)
+ struct recv_stat *prxstat)
{
u16 drvinfo_sz;
@@ -177,7 +176,7 @@ static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
/*perform defrag*/
static union recv_frame *recvframe_defrag(struct _adapter *adapter,
- struct __queue *defrag_q)
+ struct __queue *defrag_q)
{
struct list_head *plist, *phead;
u8 wlanhdr_offset;
@@ -289,7 +288,6 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
r8712_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe = NULL;
}
-
}
if ((ismfrag == 0) && (fragnum != 0)) {
/* the last fragment frame
@@ -379,26 +377,26 @@ static void amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
/* convert hdr + possible LLC headers into Ethernet header */
eth_type = (sub_skb->data[6] << 8) | sub_skb->data[7];
if (sub_skb->len >= 8 &&
- ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
- eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
- !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
+ ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
+ eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
+ !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType
*/
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
- ETH_ALEN);
+ ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
- ETH_ALEN);
+ ETH_ALEN);
} else {
__be16 len;
/* Leave Ethernet header part of hdr and full payload */
len = htons(sub_skb->len);
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
- ETH_ALEN);
+ ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
- ETH_ALEN);
+ ETH_ALEN);
}
/* Indicate the packets to upper layer */
if (sub_skb) {
@@ -438,7 +436,6 @@ void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
r8712_event_handle(padapter, (__le32 *)poffset);
poffset += (cmd_len + 8);/*8 bytes alignment*/
} while (le32_to_cpu(voffset) & BIT(31));
-
}
static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl,
@@ -472,7 +469,7 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl,
}
static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
- union recv_frame *prframe)
+ union recv_frame *prframe)
{
struct list_head *phead, *plist;
union recv_frame *pnextrframe;
@@ -499,8 +496,8 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
}
int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
- struct recv_reorder_ctrl *preorder_ctrl,
- int bforced)
+ struct recv_reorder_ctrl *preorder_ctrl,
+ int bforced)
{
struct list_head *phead, *plist;
union recv_frame *prframe;
@@ -530,7 +527,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
plist = plist->next;
list_del_init(&(prframe->u.hdr.list));
if (SN_EQUAL(preorder_ctrl->indicate_seq,
- pattrib->seq_num))
+ pattrib->seq_num))
preorder_ctrl->indicate_seq =
(preorder_ctrl->indicate_seq + 1) % 4096;
/*indicate this recv_frame*/
@@ -555,7 +552,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
}
static int recv_indicatepkt_reorder(struct _adapter *padapter,
- union recv_frame *prframe)
+ union recv_frame *prframe)
{
unsigned long irql;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
@@ -624,7 +621,7 @@ void r8712_reordering_ctrl_timeout_handler(void *pcontext)
}
static int r8712_process_recv_indicatepkts(struct _adapter *padapter,
- union recv_frame *prframe)
+ union recv_frame *prframe)
{
int retval = _SUCCESS;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1080,10 +1077,10 @@ static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
} while ((transfer_len > 0) && pkt_cnt > 0);
}
-static void recv_tasklet(void *priv)
+static void recv_tasklet(unsigned long priv)
{
struct sk_buff *pskb;
- struct _adapter *padapter = priv;
+ struct _adapter *padapter = (struct _adapter *)priv;
struct recv_priv *precvpriv = &padapter->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 944336e0d2e2..363b82e3e7c6 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -142,9 +142,9 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "wpa_ie=");
for (i = 0; i < wpa_len; i++) {
- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+ n += scnprintf(buf + n, MAX_WPA_IE_LEN - n,
"%02x", wpa_ie[i]);
- if (n >= MAX_WPA_IE_LEN)
+ if (n == MAX_WPA_IE_LEN-1)
break;
}
memset(iwe, 0, sizeof(*iwe));
@@ -162,9 +162,9 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "rsn_ie=");
for (i = 0; i < rsn_len; i++) {
- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+ n += scnprintf(buf + n, MAX_WPA_IE_LEN - n,
"%02x", rsn_ie[i]);
- if (n >= MAX_WPA_IE_LEN)
+ if (n == MAX_WPA_IE_LEN-1)
break;
}
memset(iwe, 0, sizeof(*iwe));
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index aa8f8500cbb2..29b85330815f 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -231,8 +231,7 @@ end_of_mp_stop_test:
return _SUCCESS;
}
-uint oid_rt_pro_set_data_rate_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -283,17 +282,15 @@ uint oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != SET_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
if (mp_stop_test(Adapter) == _FAIL)
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -328,8 +325,7 @@ uint oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_tx_power_control_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -347,71 +343,61 @@ uint oid_rt_pro_set_tx_power_control_hdl(
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_query_tx_packet_sent_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return RNDIS_STATUS_NOT_ACCEPTED;
+
if (poid_par_priv->information_buf_len == sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
Adapter->mppriv.tx_pktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_query_rx_packet_received_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return RNDIS_STATUS_NOT_ACCEPTED;
+
if (poid_par_priv->information_buf_len == sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
Adapter->mppriv.rx_pktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_query_rx_packet_crc32_error_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return RNDIS_STATUS_NOT_ACCEPTED;
+
if (poid_par_priv->information_buf_len == sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
Adapter->mppriv.rx_crcerrpktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -422,10 +408,8 @@ uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -435,13 +419,12 @@ uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv
Adapter->mppriv.rx_pktcount = 0;
Adapter->mppriv.rx_crcerrpktcount = 0;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -452,8 +435,7 @@ uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -468,8 +450,7 @@ uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -484,8 +465,7 @@ uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -497,8 +477,7 @@ uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -511,8 +490,7 @@ uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -525,8 +503,7 @@ uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -539,8 +516,7 @@ uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -553,8 +529,7 @@ uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_read_register_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -735,8 +710,7 @@ uint oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
}
/*----------------------------------------------------------------------*/
-uint oid_rt_get_efuse_current_size_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -829,8 +803,7 @@ uint oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index cc5809e49e35..f0b85338b567 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -143,9 +143,8 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
INIT_WORK(&padapter->wk_filter_rx_ff0, r8712_SetFilter);
alloc_hwxmits(padapter);
init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
- tasklet_init(&pxmitpriv->xmit_tasklet,
- (void(*)(unsigned long))r8712_xmit_bh,
- (unsigned long)padapter);
+ tasklet_init(&pxmitpriv->xmit_tasklet, r8712_xmit_bh,
+ (unsigned long)padapter);
return 0;
}
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index b14da38bf652..f227828094bf 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -277,7 +277,7 @@ int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe);
int r8712_xmit_enqueue(struct _adapter *padapter,
struct xmit_frame *pxmitframe);
void r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe);
-void r8712_xmit_bh(void *priv);
+void r8712_xmit_bh(unsigned long priv);
void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe,
struct xmit_buf *pxmitbuf);
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 9d290bc2fdb7..0045da3bb69a 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -308,10 +308,10 @@ void r8712_usb_read_port_cancel(struct _adapter *padapter)
}
}
-void r8712_xmit_bh(void *priv)
+void r8712_xmit_bh(unsigned long priv)
{
int ret = false;
- struct _adapter *padapter = priv;
+ struct _adapter *padapter = (struct _adapter *)priv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
if (padapter->driver_stopped ||
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 6d18d23acdc0..7117d16a30f9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -216,8 +216,9 @@ void expire_timeout_chk(struct adapter *padapter)
/* check auth_queue */
#ifdef DBG_EXPIRATION_CHK
if (phead != plist) {
- DBG_871X(FUNC_NDEV_FMT" auth_list, cnt:%u\n"
- , FUNC_NDEV_ARG(padapter->pnetdev), pstapriv->auth_list_cnt);
+ DBG_871X(FUNC_NDEV_FMT " auth_list, cnt:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev),
+ pstapriv->auth_list_cnt);
}
#endif
while (phead != plist) {
@@ -1446,14 +1447,14 @@ u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
- kfree((u8 *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1496,7 +1497,7 @@ static int rtw_ap_set_key(
}
psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
if (psetkeyparm == NULL) {
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
res = _FAIL;
goto exit;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 8d93c2f26890..13a9b54b4561 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -372,13 +372,13 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
if ((pcmd->cmdcode != _JoinBss_CMD_) &&
(pcmd->cmdcode != _CreateBss_CMD_)) {
/* free parmbuf in cmd_obj */
- kfree((unsigned char *)pcmd->parmbuf);
+ kfree(pcmd->parmbuf);
}
if (pcmd->rsp != NULL) {
if (pcmd->rspsz != 0) {
/* free rsp in cmd_obj */
- kfree((unsigned char *)pcmd->rsp);
+ kfree(pcmd->rsp);
}
}
@@ -507,19 +507,9 @@ post_process:
cmd_process_time = jiffies_to_msecs(jiffies - cmd_start_time);
if (cmd_process_time > 1000) {
- if (pcmd->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
- DBG_871X(ADPT_FMT" cmd =%d process_time =%lu > 1 sec\n",
- ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
- /* rtw_warn_on(1); */
- } else if (pcmd->cmdcode == GEN_CMD_CODE(_Set_MLME_EVT)) {
- DBG_871X(ADPT_FMT" cmd =%d, process_time =%lu > 1 sec\n",
- ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
- /* rtw_warn_on(1); */
- } else {
- DBG_871X(ADPT_FMT" cmd =%d, process_time =%lu > 1 sec\n",
- ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
- /* rtw_warn_on(1); */
- }
+ DBG_871X(ADPT_FMT "cmd= %d process_time= %lu > 1 sec\n",
+ ADPT_ARG(pcmd->padapter), pcmd->cmdcode,
+ cmd_process_time);
}
/* call callback function for post-processed */
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 34adf5789c98..71fcb466019a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -19,7 +19,7 @@ int rtw_init_mlme_priv(struct adapter *padapter)
int i;
u8 *pbuf;
struct wlan_network *pnetwork;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int res = _SUCCESS;
pmlmepriv->nic_hdl = (u8 *)padapter;
@@ -40,7 +40,7 @@ int rtw_init_mlme_priv(struct adapter *padapter)
pbuf = vzalloc(array_size(MAX_BSS_CNT, sizeof(struct wlan_network)));
- if (pbuf == NULL) {
+ if (!pbuf) {
res = _FAIL;
goto exit;
}
@@ -112,9 +112,8 @@ void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
if (pmlmepriv) {
rtw_free_mlme_priv_ie_data(pmlmepriv);
- if (pmlmepriv->free_bss_buf) {
+ if (pmlmepriv->free_bss_buf)
vfree(pmlmepriv->free_bss_buf);
- }
}
}
@@ -185,10 +184,10 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwor
/* _irqL irqL; */
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
- if (pnetwork->fixed == true)
+ if (pnetwork->fixed)
return;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
@@ -209,7 +208,6 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwor
pmlmepriv->num_of_scanned--;
-
/* DBG_871X("_rtw_free_network:SSID =%s\n", pnetwork->network.Ssid.Ssid); */
spin_unlock_bh(&free_queue->lock);
@@ -220,10 +218,10 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
- if (pnetwork->fixed == true)
+ if (pnetwork->fixed)
return;
/* spin_lock_irqsave(&free_queue->lock, irqL); */
@@ -301,12 +299,8 @@ void rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
spin_unlock_bh(&scanned_queue->lock);
}
-
-
-
sint rtw_if_up(struct adapter *padapter)
{
-
sint res;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
@@ -318,7 +312,6 @@ sint rtw_if_up(struct adapter *padapter)
return res;
}
-
void rtw_generate_random_ibss(u8 *pibss)
{
unsigned long curtime = jiffies;
@@ -329,7 +322,6 @@ void rtw_generate_random_ibss(u8 *pibss)
pibss[3] = (u8)(curtime & 0xff) ;/* p[0]; */
pibss[4] = (u8)((curtime>>8) & 0xff) ;/* p[1]; */
pibss[5] = (u8)((curtime>>16) & 0xff) ;/* p[2]; */
- return;
}
u8 *rtw_get_capability_from_ie(u8 *ie)
@@ -337,7 +329,6 @@ u8 *rtw_get_capability_from_ie(u8 *ie)
return ie + 8 + 2;
}
-
u16 rtw_get_capability(struct wlan_bssid_ex *bss)
{
__le16 val;
@@ -425,7 +416,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst, u8 fea
memcpy((u8 *)&tmps, rtw_get_capability_from_ie(src->IEs), 2);
memcpy((u8 *)&tmpd, rtw_get_capability_from_ie(dst->IEs), 2);
-
s_cap = le16_to_cpu(tmps);
d_cap = le16_to_cpu(tmpd);
@@ -467,7 +457,6 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
{
struct list_head *plist, *phead;
-
struct wlan_network *pwlan = NULL;
struct wlan_network *oldest = NULL;
@@ -482,7 +471,7 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
- if (pwlan->fixed != true) {
+ if (!pwlan->fixed) {
if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
oldest = pwlan;
}
@@ -579,12 +568,8 @@ static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex
}
}
-
/*
-
Caller must hold pmlmepriv->lock first.
-
-
*/
void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
{
@@ -625,7 +610,6 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
}
-
/* If we didn't find a match, then get a new network slot to initialize
* with this beacon's information */
/* if (phead == plist) { */
@@ -634,7 +618,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
/* If there are no more slots, expire the oldest */
/* list_del_init(&oldest->list); */
pnetwork = oldest;
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
goto exit;
}
@@ -655,7 +639,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
goto exit;
}
@@ -739,7 +723,7 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor
privacy = pnetwork->network.Privacy;
if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen) != NULL)
+ if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen))
return true;
else
return false;
@@ -754,15 +738,13 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor
if (psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPA2PSK) {
p = rtw_get_ie(pnetwork->network.IEs + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pnetwork->network.IELength - _BEACON_IE_OFFSET_));
- if (p && ie_len > 0) {
+ if (p && ie_len > 0)
bselected = true;
- } else {
+ else
bselected = false;
- }
}
}
-
if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) {
DBG_871X("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy);
bselected = false;
@@ -773,7 +755,6 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor
bselected = false;
}
-
return bselected;
}
@@ -783,7 +764,6 @@ void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_event\n"));
}
-
void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
{
u32 len;
@@ -800,7 +780,6 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
return;
}
-
spin_lock_bh(&pmlmepriv->lock);
/* update IBSS_network 's timestamp */
@@ -823,21 +802,16 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
/* lock pmlmepriv->lock when you accessing network_q */
if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == false) {
- if (pnetwork->Ssid.Ssid[0] == 0) {
+ if (pnetwork->Ssid.Ssid[0] == 0)
pnetwork->Ssid.SsidLength = 0;
- }
rtw_add_network(adapter, pnetwork);
}
exit:
spin_unlock_bh(&pmlmepriv->lock);
-
- return;
}
-
-
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
u8 timer_cancelled = false;
@@ -868,12 +842,11 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
if (timer_cancelled)
_cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled);
-
spin_lock_bh(&pmlmepriv->lock);
rtw_set_signal_stat_timer(&adapter->recvpriv);
- if (pmlmepriv->to_join == true) {
+ if (pmlmepriv->to_join) {
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
if (check_fwstate(pmlmepriv, _FW_LINKED) == false) {
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
@@ -896,9 +869,8 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
- if (rtw_createbss_cmd(adapter) != _SUCCESS) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error =>rtw_createbss_cmd status FAIL\n"));
- }
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error =>rtw_createbss_cmd status FAIL\n"));
pmlmepriv->to_join = false;
}
@@ -1009,7 +981,6 @@ static void find_network(struct adapter *adapter)
else
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_assoc_resources : pwlan == NULL\n\n"));
-
if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) &&
(adapter->stapriv.asoc_sta_count == 1))
rtw_free_network_nolock(adapter, pwlan);
@@ -1169,9 +1140,8 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
psta = rtw_get_stainfo(pstapriv, pnetwork->network.MacAddress);
- if (psta == NULL) {
+ if (!psta)
psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress);
- }
if (psta) { /* update ptarget_sta */
@@ -1189,7 +1159,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
psta->wireless_mode = pmlmeext->cur_wireless_mode;
psta->raid = networktype_to_raid_ex(padapter, psta);
-
/* sta mode */
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
@@ -1221,7 +1190,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
padapter->securitypriv.wps_ie_len = 0;
}
-
/* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
/* if A-MPDU Rx is enabled, resetting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
/* todo: check if AP can send A-MPDU packets */
@@ -1238,7 +1206,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
preorder_ctrl->wsize_b = 64;/* max_ampdu_sz;ex. 32(kbytes) -> wsize_b =32 */
}
-
bmc_sta = rtw_get_bcmc_stainfo(padapter);
if (bmc_sta) {
for (i = 0; i < 16 ; i++) {
@@ -1272,7 +1239,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nfw_state:%x, BSSID:"MAC_FMT"\n"
, get_fwstate(pmlmepriv), MAC_ARG(pnetwork->network.MacAddress)));
-
/* why not use ptarget_wlan?? */
memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
/* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
@@ -1281,7 +1247,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
cur_network->aid = pnetwork->join_res;
-
rtw_set_signal_stat_timer(&padapter->recvpriv);
padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength;
@@ -1349,12 +1314,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
rtw_get_encrypt_decrypt_from_registrypriv(adapter);
-
- if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n"));
- } else {
+ else
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
- }
the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
@@ -1377,7 +1340,7 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
/* s1. find ptarget_wlan */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (the_same_macaddr == true) {
+ if (the_same_macaddr) {
ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
} else {
pcur_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
@@ -1412,11 +1375,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
goto ignore_joinbss_callback;
}
-
/* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
- if (ptarget_sta == NULL) {
+ if (!ptarget_sta) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
goto ignore_joinbss_callback;
@@ -1432,7 +1394,6 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv)));
}
-
/* s5. Cancel assoc_timer */
_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
@@ -1506,7 +1467,7 @@ void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta, u3
{
u16 media_status_rpt;
- if (psta == NULL)
+ if (!psta)
return;
media_status_rpt = (u16)((psta->mac_id<<8)|mstatus); /* MACID|OPMODE:1 connect */
@@ -1564,7 +1525,7 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
/* for AD-HOC mode */
psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta != NULL) {
+ if (psta) {
/* the sta have been in sta_info_queue => do nothing */
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n"));
@@ -1573,7 +1534,7 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
}
psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta == NULL) {
+ if (!psta) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n"));
return;
}
@@ -1591,7 +1552,6 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
-
psta->ieee8021x_blocked = false;
spin_lock_bh(&pmlmepriv->lock);
@@ -1612,7 +1572,6 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
spin_unlock_bh(&pmlmepriv->lock);
-
mlmeext_sta_add_event_callback(adapter, psta);
}
@@ -1648,7 +1607,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
return;
-
mlmeext_sta_del_event_callback(adapter);
spin_lock_bh(&pmlmepriv->lock);
@@ -1726,13 +1684,8 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
_clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE);
}
- if (rtw_createbss_cmd(adapter) != _SUCCESS) {
-
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>stadel_event_callback: rtw_createbss_cmd status FAIL***\n "));
-
- }
-
-
}
}
@@ -1750,7 +1703,6 @@ void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
cpwm_int_hdl(padapter, preportpwrstate);
}
-
void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf)
{
WMMOnAssocRsp(padapter);
@@ -1840,8 +1792,6 @@ void rtw_mlme_reset_auto_scan_int(struct adapter *adapter)
mlme->auto_scan_int_ms = mlme->roam_scan_int_ms;
} else
mlme->auto_scan_int_ms = 0; /* disabled */
-
- return;
}
static void rtw_auto_scan_handler(struct adapter *padapter)
@@ -1859,7 +1809,7 @@ static void rtw_auto_scan_handler(struct adapter *padapter)
goto exit;
}
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true) {
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
DBG_871X(FUNC_ADPT_FMT" exit BusyTraffic\n", FUNC_ADPT_ARG(padapter));
goto exit;
}
@@ -1879,20 +1829,20 @@ void rtw_dynamic_check_timer_handler(struct adapter *adapter)
if (!adapter)
return;
- if (adapter->hw_init_completed == false)
+ if (!adapter->hw_init_completed)
return;
- if ((adapter->bDriverStopped == true) || (adapter->bSurpriseRemoved == true))
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- if (adapter->net_closed == true)
+ if (adapter->net_closed)
return;
if (is_primary_adapter(adapter))
DBG_871X("IsBtDisabled =%d, IsBtControlLps =%d\n", hal_btcoex_IsBtDisabled(adapter), hal_btcoex_IsBtControlLps(adapter));
- if ((adapter_to_pwrctl(adapter)->bFwCurrentInPSMode == true)
- && (hal_btcoex_IsBtControlLps(adapter) == false)
+ if ((adapter_to_pwrctl(adapter)->bFwCurrentInPSMode)
+ && !(hal_btcoex_IsBtControlLps(adapter))
) {
u8 bEnterPS;
@@ -1907,16 +1857,14 @@ void rtw_dynamic_check_timer_handler(struct adapter *adapter)
}
} else {
- if (is_primary_adapter(adapter)) {
+ if (is_primary_adapter(adapter))
rtw_dynamic_chk_wk_cmd(adapter);
- }
}
/* auto site survey */
rtw_auto_scan_handler(adapter);
}
-
inline bool rtw_is_scan_deny(struct adapter *adapter)
{
struct mlme_priv *mlmepriv = &adapter->mlmepriv;
@@ -1994,26 +1942,24 @@ int rtw_select_roaming_candidate(struct mlme_priv *mlme)
{
int ret = _FAIL;
struct list_head *phead;
- struct adapter *adapter;
struct __queue *queue = &(mlme->scanned_queue);
struct wlan_network *pnetwork = NULL;
struct wlan_network *candidate = NULL;
- if (mlme->cur_network_scanned == NULL) {
+ if (!mlme->cur_network_scanned) {
rtw_warn_on(1);
return ret;
}
spin_lock_bh(&(mlme->scanned_queue.lock));
phead = get_list_head(queue);
- adapter = (struct adapter *)mlme->nic_hdl;
mlme->pscanned = get_next(phead);
while (phead != mlme->pscanned) {
pnetwork = LIST_CONTAINOR(mlme->pscanned, struct wlan_network, list);
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork == NULL)\n", __func__));
ret = _FAIL;
goto exit;
@@ -2031,7 +1977,7 @@ int rtw_select_roaming_candidate(struct mlme_priv *mlme)
}
- if (candidate == NULL) {
+ if (!candidate) {
DBG_871X("%s: return _FAIL(candidate == NULL)\n", __func__);
ret = _FAIL;
goto exit;
@@ -2064,9 +2010,8 @@ static int rtw_check_join_candidate(struct mlme_priv *mlme
int updated = false;
struct adapter *adapter = container_of(mlme, struct adapter, mlmepriv);
-
/* check bssid, if needed */
- if (mlme->assoc_by_bssid == true) {
+ if (mlme->assoc_by_bssid) {
if (memcmp(competitor->network.MacAddress, mlme->assoc_bssid, ETH_ALEN))
goto exit;
}
@@ -2115,12 +2060,8 @@ exit:
/*
Calling context:
The caller of the sub-routine will be in critical section...
-
The caller must hold the following spinlock
-
pmlmepriv->lock
-
-
*/
int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
@@ -2148,7 +2089,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
while (phead != pmlmepriv->pscanned) {
pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork == NULL)\n", __func__));
ret = _FAIL;
goto exit;
@@ -2166,7 +2107,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
}
- if (candidate == NULL) {
+ if (!candidate) {
DBG_871X("%s: return _FAIL(candidate == NULL)\n", __func__);
#ifdef CONFIG_WOWLAN
_clr_fwstate_(pmlmepriv, _FW_LINKED|_FW_UNDER_LINKING);
@@ -2207,14 +2148,14 @@ sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
sint res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL; /* try again */
goto exit;
}
psetauthparm = rtw_zmalloc(sizeof(struct setauth_parm));
- if (psetauthparm == NULL) {
- kfree((unsigned char *)pcmd);
+ if (!psetauthparm) {
+ kfree(pcmd);
res = _FAIL;
goto exit;
}
@@ -2227,7 +2168,6 @@ sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
pcmd->rsp = NULL;
pcmd->rspsz = 0;
-
INIT_LIST_HEAD(&pcmd->list);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("after enqueue set_auth_cmd, auth_mode =%x\n", psecuritypriv->dot11AuthAlgrthm));
@@ -2247,7 +2187,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
sint res = _SUCCESS;
psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
res = _FAIL;
goto exit;
}
@@ -2291,15 +2231,14 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
default:
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key:psecuritypriv->dot11PrivacyAlgrthm = %x (must be 1 or 2 or 4 or 5)\n", psecuritypriv->dot11PrivacyAlgrthm));
res = _FAIL;
- kfree((unsigned char *)psetkeyparm);
+ kfree(psetkeyparm);
goto exit;
}
-
if (enqueue) {
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
- kfree((unsigned char *)psetkeyparm);
+ if (!pcmd) {
+ kfree(psetkeyparm);
res = _FAIL; /* try again */
goto exit;
}
@@ -2315,7 +2254,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
} else {
setkey_hdl(adapter, (u8 *)psetkeyparm);
- kfree((u8 *) psetkeyparm);
+ kfree(psetkeyparm);
}
exit:
return res;
@@ -2350,7 +2289,6 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
}
-
/* */
/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
/* Added by Annie, 2006-05-07. */
@@ -2679,7 +2617,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_SGI_20);
/* Get HT BW */
- if (in_ie == NULL) {
+ if (!in_ie) {
/* TDLS: TODO 20/40 issue */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
operation_bw = padapter->mlmeextpriv.cur_bwmode;
@@ -2794,7 +2732,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
phtpriv->ht_option = true;
- if (in_ie != NULL) {
+ if (in_ie) {
p = rtw_get_ie(in_ie, _HT_ADD_INFO_IE_, &ielen, in_len);
if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
out_len = *pout_len;
@@ -2824,7 +2762,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 cbw40_enable = 0;
-
if (!phtpriv->ht_option)
return;
@@ -2834,7 +2771,7 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
DBG_871X("+rtw_update_ht_cap()\n");
/* maybe needs check if ap supports rx ampdu. */
- if ((phtpriv->ampdu_enable == false) && (pregistrypriv->ampdu_enable == 1)) {
+ if (!(phtpriv->ampdu_enable) && pregistrypriv->ampdu_enable == 1) {
if (pregistrypriv->wifi_spec == 1) {
/* remove this part because testbed AP should disable RX AMPDU */
/* phtpriv->ampdu_enable = false; */
@@ -2847,7 +2784,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
/* phtpriv->ampdu_enable = true; */
}
-
/* check Max Rx A-MPDU Size */
len = 0;
p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_CAPABILITY_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
@@ -2861,7 +2797,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
}
-
len = 0;
p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
if (p && len > 0) {
@@ -2961,7 +2896,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
return;
}
- if (psta == NULL) {
+ if (!psta) {
DBG_871X("%s, psta ==NUL\n", __func__);
return;
}
@@ -2971,10 +2906,9 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
return;
}
-
phtpriv = &psta->htpriv;
- if ((phtpriv->ht_option == true) && (phtpriv->ampdu_enable == true)) {
+ if (phtpriv->ht_option && phtpriv->ampdu_enable) {
issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
@@ -2994,10 +2928,8 @@ void rtw_append_exented_cap(struct adapter *padapter, u8 *out_ie, uint *pout_len
u8 cap_content[8] = {0};
u8 *pframe;
-
- if (phtpriv->bss_coexist) {
+ if (phtpriv->bss_coexist)
SET_EXT_CAPABILITY_ELE_BSS_COEXIST(cap_content, 1);
- }
pframe = rtw_set_ie(out_ie + *pout_len, EID_EXTCapability, 8, cap_content, pout_len);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 2128886c9924..5e687f6d2c3e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -344,7 +344,7 @@ static void init_channel_list(struct adapter *padapter, RT_CHANNEL_INFO *channel
struct p2p_channels *channel_list)
{
- struct p2p_oper_class_map op_class[] = {
+ static const struct p2p_oper_class_map op_class[] = {
{ IEEE80211G, 81, 1, 13, 1, BW20 },
{ IEEE80211G, 82, 14, 14, 1, BW20 },
{ IEEE80211A, 115, 36, 48, 4, BW20 },
@@ -363,7 +363,7 @@ static void init_channel_list(struct adapter *padapter, RT_CHANNEL_INFO *channel
for (op = 0; op_class[op].op_class; op++) {
u8 ch;
- struct p2p_oper_class_map *o = &op_class[op];
+ const struct p2p_oper_class_map *o = &op_class[op];
struct p2p_reg_class *reg = NULL;
for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
@@ -2922,7 +2922,8 @@ int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
int i = 0;
do {
- ret = _issue_probereq(padapter, pssid, da, ch, append_wps, wait_ms > 0?true:false);
+ ret = _issue_probereq(padapter, pssid, da, ch, append_wps,
+ wait_ms > 0);
i++;
@@ -3086,8 +3087,6 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
DBG_871X("%s\n", __func__);
dump_mgntframe(padapter, pmgntframe);
-
- return;
}
@@ -3405,8 +3404,6 @@ exit:
rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
else
rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
-
- return;
}
/* when wait_ack is ture, this function shoule be called at process context */
@@ -3513,7 +3510,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
}
do {
- ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0?true:false);
+ ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0);
i++;
@@ -3661,7 +3658,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
da = get_my_bssid(&(pmlmeinfo->network));
do {
- ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0?true:false);
+ ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0);
i++;
@@ -3769,7 +3766,7 @@ int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int
int i = 0;
do {
- ret = _issue_deauth(padapter, da, reason, wait_ms > 0?true:false);
+ ret = _issue_deauth(padapter, da, reason, wait_ms > 0);
i++;
@@ -5260,8 +5257,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
DBG_871X("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
@@ -5306,8 +5301,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
DBG_871X("report_add_sta_event: add STA\n");
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
/****************************************************************************
@@ -5869,8 +5862,6 @@ void link_timer_hdl(struct timer_list *t)
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
-
- return;
}
void addba_timer_hdl(struct timer_list *t)
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index 4075de07e0a9..30137f0bd984 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -190,7 +190,6 @@ void rtw_ps_processor(struct adapter *padapter)
}
exit:
pwrpriv->ps_processing = false;
- return;
}
static void pwr_state_check_handler(struct timer_list *t)
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 687ff3c6f09f..7fa8c84cf5f4 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -1400,10 +1400,8 @@ static sint validate_80211w_mgmt(struct adapter *adapter, union recv_frame *prec
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
u8 *ptr = precv_frame->u.hdr.rx_data;
- u8 type;
u8 subtype;
- type = GetFrameType(ptr);
subtype = GetFrameSubType(ptr); /* bit(7)~bit(2) */
/* only support station mode */
@@ -1412,9 +1410,8 @@ static sint validate_80211w_mgmt(struct adapter *adapter, union recv_frame *prec
/* unicast management frame decrypt */
if (pattrib->privacy && !(IS_MCAST(GetAddr1Ptr(ptr))) &&
(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC || subtype == WIFI_ACTION)) {
- u8 *ppp, *mgmt_DATA;
+ u8 *mgmt_DATA;
u32 data_len = 0;
- ppp = GetAddr2Ptr(ptr);
pattrib->bdecrypted = 0;
pattrib->encrypt = _AES_;
@@ -1709,7 +1706,7 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
struct __queue *defrag_q)
{
struct list_head *plist, *phead;
- u8 *data, wlanhdr_offset;
+ u8 wlanhdr_offset;
u8 curfragnum;
struct recv_frame_hdr *pfhdr, *pnfhdr;
union recv_frame *prframe, *pnextrframe;
@@ -1739,8 +1736,6 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
plist = get_next(plist);
- data = get_recvframe_data(prframe);
-
while (phead != plist) {
pnextrframe = (union recv_frame *)plist;
pnfhdr = &pnextrframe->u.hdr;
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 57cfe06d7d73..9c4607114cea 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -303,13 +303,18 @@ void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
*((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
if (crc[3] != payload[length-1] || crc[2] != payload[length-2] || crc[1] != payload[length-3] || crc[0] != payload[length-4]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_wep_decrypt:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
- crc[3], payload[length-1], crc[2], payload[length-2], crc[1], payload[length-3], crc[0], payload[length-4]));
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
+ __func__,
+ crc[3], payload[length - 1],
+ crc[2], payload[length - 2],
+ crc[1], payload[length - 3],
+ crc[0], payload[length - 4]));
}
WEP_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
}
- return;
}
/* 3 =====TKIP related ===== */
@@ -657,11 +662,9 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
u8 hw_hdr_offset = 0;
struct arc4context mycontext;
sint curfragnum, length;
- u32 prwskeylen;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
- /* struct sta_info *stainfo; */
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -676,36 +679,14 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _TKIP_) {
-/*
- if (pattrib->psta)
- {
- stainfo = pattrib->psta;
- }
- else
{
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- stainfo =rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
- }
-*/
- /* if (stainfo!= NULL) */
- {
-/*
- if (!(stainfo->state &_FW_LINKED))
- {
- DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
- return _FAIL;
- }
-*/
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo!= NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
if (IS_MCAST(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
- /* prwskey =&stainfo->dot118021x_UncstKey.skey[0]; */
prwskey = pattrib->dot118021x_UncstKey.skey;
- prwskeylen = 16;
-
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
iv = pframe+pattrib->hdrlen;
payload = pframe+pattrib->iv_len+pattrib->hdrlen;
@@ -742,13 +723,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
TKIP_SW_ENC_CNT_INC(psecuritypriv, pattrib->ra);
}
-/*
- else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo == NULL!!!\n"));
- DBG_871X("%s, psta ==NUL\n", __func__);
- res = _FAIL;
- }
-*/
}
return res;
@@ -765,14 +739,12 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
u8 crc[4];
struct arc4context mycontext;
sint length;
- u32 prwskeylen;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
-/* struct recv_priv *precvpriv =&padapter->recvpriv; */
u32 res = _SUCCESS;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
@@ -817,13 +789,9 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
no_gkey_bc_cnt = 0;
no_gkey_mc_cnt = 0;
- /* DBG_871X("rx bc/mc packets, to perform sw rtw_tkip_decrypt\n"); */
- /* prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey; */
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
- prwskeylen = 16;
} else {
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
- prwskeylen = 16;
}
iv = pframe+prxattrib->hdrlen;
@@ -846,15 +814,19 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
*((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
if (crc[3] != payload[length-1] || crc[2] != payload[length-2] || crc[1] != payload[length-3] || crc[0] != payload[length-4]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
("rtw_wep_decrypt:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
- crc[3], payload[length-1], crc[2], payload[length-2], crc[1], payload[length-3], crc[0], payload[length-4]));
+ crc[3], payload[length - 1],
+ crc[2], payload[length - 2],
+ crc[1], payload[length - 3],
+ crc[0], payload[length - 4]));
res = _FAIL;
}
TKIP_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo == NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo == NULL!!!\n", __func__));
res = _FAIL;
}
@@ -1426,7 +1398,7 @@ static sint aes_cipher(u8 *key, uint hdrlen,
aes128k128d(key, chain_buffer, aes_out);
for (i = 0; i < num_blocks; i++) {
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
payload_index += 16;
aes128k128d(key, chain_buffer, aes_out);
@@ -1437,7 +1409,7 @@ static sint aes_cipher(u8 *key, uint hdrlen,
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++) {
- padded_buffer[j] = pframe[payload_index++];/* padded_buffer[j] = message[payload_index++]; */
+ padded_buffer[j] = pframe[payload_index++];
}
bitwise_xor(aes_out, padded_buffer, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
@@ -1449,7 +1421,7 @@ static sint aes_cipher(u8 *key, uint hdrlen,
/* Insert MIC into payload */
for (j = 0; j < 8; j++)
- pframe[payload_index+j] = mic[j]; /* message[payload_index+j] = mic[j]; */
+ pframe[payload_index+j] = mic[j];
payload_index = hdrlen + 8;
for (i = 0; i < num_blocks; i++) {
@@ -1463,9 +1435,9 @@ static sint aes_cipher(u8 *key, uint hdrlen,
frtype
); /* add for CONFIG_IEEE80211W, none 11w also can use */
aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<16;j++) message[payload_index++] = chain_buffer[j]; */
+ pframe[payload_index++] = chain_buffer[j];
}
if (payload_remainder > 0) {
@@ -1484,12 +1456,12 @@ static sint aes_cipher(u8 *key, uint hdrlen,
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index+j];/* padded_buffer[j] = message[payload_index+j]; */
+ padded_buffer[j] = pframe[payload_index+j];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < payload_remainder; j++)
- pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<payload_remainder;j++) message[payload_index++] = chain_buffer[j]; */
+ pframe[payload_index++] = chain_buffer[j];
}
/* Encrypt the MIC */
@@ -1506,12 +1478,12 @@ static sint aes_cipher(u8 *key, uint hdrlen,
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < 8; j++)
- padded_buffer[j] = pframe[j+hdrlen+8+plen];/* padded_buffer[j] = message[j+hdrlen+8+plen]; */
+ padded_buffer[j] = pframe[j+hdrlen+8+plen];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < 8; j++)
- pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<8;j++) message[payload_index++] = chain_buffer[j]; */
+ pframe[payload_index++] = chain_buffer[j];
return _SUCCESS;
}
@@ -1525,15 +1497,12 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* Intermediate Buffers */
sint curfragnum, length;
- u32 prwskeylen;
u8 *pframe, *prwskey; /* *payload,*iv */
u8 hw_hdr_offset = 0;
- /* struct sta_info *stainfo = NULL; */
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-/* uint offset = 0; */
u32 res = _SUCCESS;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
@@ -1544,16 +1513,13 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _AES_) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo!= NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
if (IS_MCAST(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
- /* prwskey =&stainfo->dot118021x_UncstKey.skey[0]; */
prwskey = pattrib->dot118021x_UncstKey.skey;
- prwskeylen = 16;
-
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
@@ -1574,10 +1540,10 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
}
static sint aes_decipher(u8 *key, uint hdrlen,
- u8 *pframe, uint plen)
+ u8 *pframe, uint plen)
{
static u8 message[MAX_MSG_SIZE];
- uint qc_exists, a4_exists, i, j, payload_remainder,
+ uint qc_exists, a4_exists, i, j, payload_remainder,
num_blocks, payload_index;
sint res = _SUCCESS;
u8 pn_vector[6];
@@ -1593,9 +1559,8 @@ static sint aes_decipher(u8 *key, uint hdrlen,
u8 mic[8];
-/* uint offset = 0; */
- uint frtype = GetFrameType(pframe);
- uint frsubtype = GetFrameSubType(pframe);
+ uint frtype = GetFrameType(pframe);
+ uint frsubtype = GetFrameSubType(pframe);
frsubtype = frsubtype>>4;
@@ -1615,11 +1580,11 @@ static sint aes_decipher(u8 *key, uint hdrlen,
payload_remainder = (plen-8) % 16;
pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen+1];
- pn_vector[2] = pframe[hdrlen+4];
- pn_vector[3] = pframe[hdrlen+5];
- pn_vector[4] = pframe[hdrlen+6];
- pn_vector[5] = pframe[hdrlen+7];
+ pn_vector[1] = pframe[hdrlen + 1];
+ pn_vector[2] = pframe[hdrlen + 4];
+ pn_vector[3] = pframe[hdrlen + 5];
+ pn_vector[4] = pframe[hdrlen + 6];
+ pn_vector[5] = pframe[hdrlen + 7];
if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
a4_exists = 0;
@@ -1651,22 +1616,17 @@ static sint aes_decipher(u8 *key, uint hdrlen,
payload_index = hdrlen + 8; /* 8 is for extiv */
for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(
- ctr_preload,
- a4_exists,
- qc_exists,
- pframe,
- pn_vector,
- i+1,
- frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
- );
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
-
- for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
+ construct_ctr_preload(ctr_preload, a4_exists,
+ qc_exists, pframe,
+ pn_vector, i + 1,
+ frtype); /* add for CONFIG_IEEE80211W, none 11w also can use */
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
if (payload_remainder > 0) {
/* If there is a short final block, then pad it,*/
@@ -1835,10 +1795,18 @@ static sint aes_decipher(u8 *key, uint hdrlen,
/* compare the mic */
for (i = 0; i < 8; i++) {
if (pframe[hdrlen+8+plen-8+i] != message[hdrlen+8+plen-8+i]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
- i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]));
- DBG_871X("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
- i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]);
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ __func__,
+ i,
+ pframe[hdrlen + 8 + plen - 8 + i],
+ message[hdrlen + 8 + plen - 8 + i]));
+ DBG_871X("%s:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ __func__,
+ i,
+ pframe[hdrlen + 8 + plen - 8 + i],
+ message[hdrlen + 8 + plen - 8 + i]);
res = _FAIL;
}
}
@@ -1861,7 +1829,6 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
-/* struct recv_priv *precvpriv =&padapter->recvpriv; */
u32 res = _SUCCESS;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
@@ -1869,15 +1836,15 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
if (prxattrib->encrypt == _AES_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo != NULL) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo!= NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s: stainfo!= NULL!!!\n", __func__));
if (IS_MCAST(prxattrib->ra)) {
static unsigned long start;
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
- /* DBG_871X("rx bc/mc packets, to perform sw rtw_aes_decrypt\n"); */
- /* prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey; */
if (psecuritypriv->binstallGrpkey == false) {
res = _FAIL;
@@ -1927,7 +1894,9 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
AES_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo == NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s: stainfo == NULL!!!\n", __func__));
res = _FAIL;
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index bdc52d8d5625..09d2ca30d653 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -187,7 +187,6 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
/* struct sta_info *rtw_alloc_stainfo(_queue *pfree_sta_queue, unsigned char *hwaddr) */
struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- uint tmp_aid;
s32 index;
struct list_head *phash_list;
struct sta_info *psta;
@@ -211,8 +210,6 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
/* spin_unlock_bh(&(pfree_sta_queue->lock)); */
- tmp_aid = psta->aid;
-
_rtw_init_stainfo(psta);
psta->padapter = pstapriv->padapter;
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index ea3ea2a6b314..9590e6f351c1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -606,19 +606,6 @@ inline void clear_cam_entry(struct adapter *adapter, u8 id)
clear_cam_cache(adapter, id);
}
-inline void write_cam_from_cache(struct adapter *adapter, u8 id)
-{
- struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
- struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
- struct cam_entry_cache cache;
-
- spin_lock_bh(&cam_ctl->lock);
- memcpy(&cache, &dvobj->cam_cache[id], sizeof(struct cam_entry_cache));
- spin_unlock_bh(&cam_ctl->lock);
-
- _write_cam(adapter, id, cache.ctrl, cache.mac, cache.key);
-}
-
void write_cam_cache(struct adapter *adapter, u8 id, u16 ctrl, u8 *mac, u8 *key)
{
struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
@@ -1170,8 +1157,6 @@ void HT_info_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
pmlmeinfo->HT_info_enable = 1;
memcpy(&(pmlmeinfo->HT_info), pIE->data, pIE->Length);
-
- return;
}
void HTOnAssocRsp(struct adapter *padapter)
@@ -1481,11 +1466,11 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
}
- kfree((u8 *)bssid);
+ kfree(bssid);
return _SUCCESS;
_mismatch:
- kfree((u8 *)bssid);
+ kfree(bssid);
if (pmlmepriv->NumOfBcnInfoChkFail == 0)
pmlmepriv->timeBcnInfoChkStart = jiffies;
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index b5dcb78fb4f4..fdb585ff5925 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -25,9 +25,6 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
spin_lock_init(&psta_xmitpriv->lock);
- /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
- /* _init_txservq(&(psta_xmitpriv->blk_q[i])); */
-
_init_txservq(&psta_xmitpriv->be_q);
_init_txservq(&psta_xmitpriv->bk_q);
_init_txservq(&psta_xmitpriv->vi_q);
@@ -54,18 +51,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->adapter = padapter;
- /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
- /* _rtw_init_queue(&pxmitpriv->blk_strms[i]); */
-
_rtw_init_queue(&pxmitpriv->be_pending);
_rtw_init_queue(&pxmitpriv->bk_pending);
_rtw_init_queue(&pxmitpriv->vi_pending);
_rtw_init_queue(&pxmitpriv->vo_pending);
_rtw_init_queue(&pxmitpriv->bm_pending);
- /* _rtw_init_queue(&pxmitpriv->legacy_dz_queue); */
- /* _rtw_init_queue(&pxmitpriv->apsd_queue); */
-
_rtw_init_queue(&pxmitpriv->free_xmit_queue);
/*
@@ -83,13 +74,11 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
}
pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_frame_buf), 4);
- /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
- /* ((SIZE_PTR) (pxmitpriv->pallocated_frame_buf) &3); */
pxframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
for (i = 0; i < NR_XMITFRAME; i++) {
- INIT_LIST_HEAD(&(pxframe->list));
+ INIT_LIST_HEAD(&pxframe->list);
pxframe->padapter = padapter;
pxframe->frame_tag = NULL_FRAMETAG;
@@ -99,7 +88,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
- list_add_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
+ list_add_tail(&pxframe->list,
+ &pxmitpriv->free_xmit_queue.queue);
pxframe++;
}
@@ -108,7 +98,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
-
/* init xmit_buf */
_rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
_rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
@@ -122,8 +111,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
}
pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmitbuf), 4);
- /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
- /* ((SIZE_PTR) (pxmitpriv->pallocated_xmitbuf) &3); */
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
@@ -150,13 +137,13 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->flags = XMIT_VO_QUEUE;
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list,
+ &pxmitpriv->free_xmitbuf_queue.queue);
#ifdef DBG_XMIT_BUF
pxmitbuf->no = i;
#endif
pxmitbuf++;
-
}
pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
@@ -176,7 +163,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe = (struct xmit_frame *)pxmitpriv->xframe_ext;
for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
- INIT_LIST_HEAD(&(pxframe->list));
+ INIT_LIST_HEAD(&pxframe->list);
pxframe->padapter = padapter;
pxframe->frame_tag = NULL_FRAMETAG;
@@ -188,7 +175,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe->ext_tag = 1;
- list_add_tail(&(pxframe->list), &(pxmitpriv->free_xframe_ext_queue.queue));
+ list_add_tail(&pxframe->list,
+ &pxmitpriv->free_xframe_ext_queue.queue);
pxframe++;
}
@@ -227,12 +215,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->len = 0;
pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list,
+ &pxmitpriv->free_xmit_extbuf_queue.queue);
#ifdef DBG_XMIT_BUF_EXT
pxmitbuf->no = i;
#endif
pxmitbuf++;
-
}
pxmitpriv->free_xmit_extbuf_cnt = NR_XMIT_EXTBUFF;
@@ -265,9 +253,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++)
pxmitpriv->wmm_para_seq[i] = i;
- }
pxmitpriv->ack_tx = false;
mutex_init(&pxmitpriv->ack_tx_mutex);
@@ -306,7 +293,6 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
if (pxmitpriv->pallocated_frame_buf)
vfree(pxmitpriv->pallocated_frame_buf);
-
if (pxmitpriv->pallocated_xmitbuf)
vfree(pxmitpriv->pallocated_xmitbuf);
@@ -329,9 +315,8 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
pxmitbuf++;
}
- if (pxmitpriv->pallocated_xmit_extbuf) {
+ if (pxmitpriv->pallocated_xmit_extbuf)
vfree(pxmitpriv->pallocated_xmit_extbuf);
- }
for (i = 0; i < CMDBUF_MAX; i++) {
pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i];
@@ -372,8 +357,8 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *
u32 sz;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
/* struct sta_info *psta = pattrib->psta; */
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pattrib->nr_frags != 1)
sz = padapter->xmitpriv.frag_len;
@@ -404,7 +389,6 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *
break;
}
-
/* check ERP protection */
if (pattrib->rtsen || pattrib->cts2self) {
if (pattrib->rtsen)
@@ -485,20 +469,12 @@ static void update_attrib_phy_info(struct adapter *padapter, struct pkt_attrib *
else
pattrib->ampdu_spacing = psta->htpriv.rx_ampdu_min_spacing;
- /* if (pattrib->ht_en && psta->htpriv.ampdu_enable) */
- /* */
- /* if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) */
- /* pattrib->ampdu_en = true; */
- /* */
-
-
pattrib->retry_ctrl = false;
#ifdef CONFIG_AUTO_AP_MODE
if (psta->isrc && psta->pid > 0)
pattrib->pctrl = true;
#endif
-
}
static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta)
@@ -548,7 +524,6 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
/* For WPS 1.0 WEP, driver should not encrypt EAPOL Packet for WPS handshake. */
if (((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) && (pattrib->ether_type == 0x888e))
pattrib->encrypt = _NO_PRIVACY_;
-
}
switch (pattrib->encrypt) {
@@ -576,7 +551,6 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
else
TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
-
memcpy(pattrib->dot11tkiptxmickey.skey, psta->dot11tkiptxmickey.skey, 16);
break;
@@ -620,7 +594,6 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
exit:
return res;
-
}
u8 qos_acm(u8 acm_mask, u8 priority)
@@ -658,14 +631,12 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
struct iphdr ip_hdr;
s32 UserPriority = 0;
-
_rtw_open_pktfile(ppktfile->pkt, ppktfile);
_rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
/* get UserPriority from IP hdr */
if (pattrib->ether_type == 0x0800) {
_rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
-/* UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
UserPriority = ip_hdr.tos >> 5;
}
pattrib->priority = UserPriority;
@@ -675,7 +646,6 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib *pattrib)
{
- uint i;
struct pkt_file pktfile;
struct sta_info *psta = NULL;
struct ethhdr etherhdr;
@@ -689,15 +659,13 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib);
_rtw_open_pktfile(pkt, &pktfile);
- i = _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
+ _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
pattrib->ether_type = ntohs(etherhdr.h_proto);
-
memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN);
memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN);
-
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
@@ -748,8 +716,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_icmp);
}
}
-
-
} else if (0x888e == pattrib->ether_type) {
DBG_871X_LEVEL(_drv_always_, "send eapol packet\n");
}
@@ -804,8 +770,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
return _FAIL;
}
-
-
/* TODO:_lock */
if (update_attrib_sec_info(padapter, pattrib, psta) == _FAIL) {
DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sec);
@@ -815,8 +779,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
update_attrib_phy_info(padapter, pattrib, psta);
- /* DBG_8192C("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */
-
pattrib->psta = psta;
/* TODO:_unlock */
@@ -839,7 +801,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
if (pmlmepriv->acm_mask != 0)
pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority);
-
}
}
@@ -854,7 +815,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
sint curfragnum, length;
u8 *pframe, *payload, mic[8];
struct mic_data micdata;
- /* struct sta_info *stainfo; */
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -862,54 +822,23 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
u8 hw_hdr_offset = 0;
sint bmcst = IS_MCAST(pattrib->ra);
-/*
- if (pattrib->psta)
- {
- stainfo = pattrib->psta;
- }
- else
- {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- stainfo =rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
- }
-
- if (stainfo == NULL)
- {
- DBG_871X("%s, psta ==NUL\n", __func__);
- return _FAIL;
- }
-
- if (!(stainfo->state &_FW_LINKED))
- {
- DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
- return _FAIL;
- }
-*/
-
hw_hdr_offset = TXDESC_OFFSET;
- if (pattrib->encrypt == _TKIP_) { /* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
+ if (pattrib->encrypt == _TKIP_) {
/* encode mic code */
- /* if (stainfo!= NULL) */
{
u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
pframe = pxmitframe->buf_addr + hw_hdr_offset;
if (bmcst) {
- if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16)) {
- /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
- /* msleep(10); */
+ if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
return _FAIL;
- }
/* start to calculate the mic code */
rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
} else {
- if (!memcmp(&pattrib->dot11tkiptxmickey.skey[0], null_key, 16)) {
- /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
- /* msleep(10); */
+ if (!memcmp(&pattrib->dot11tkiptxmickey.skey[0], null_key, 16))
return _FAIL;
- }
/* start to calculate the mic code */
rtw_secmicsetkey(&micdata, &pattrib->dot11tkiptxmickey.skey[0]);
}
@@ -926,14 +855,11 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
rtw_secmicappend(&micdata, &pframe[16], 6);
else
rtw_secmicappend(&micdata, &pframe[10], 6);
-
}
- /* if (pqospriv->qos_option == 1) */
if (pattrib->qos_en)
priority[0] = (u8)pxmitframe->attrib.priority;
-
rtw_secmicappend(&micdata, &priority[0], 4);
payload = pframe;
@@ -956,7 +882,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum =%d length =%d pattrib->icv_len =%d", curfragnum, length, pattrib->icv_len));
}
}
- rtw_secgetmic(&micdata, &(mic[0]));
+ rtw_secgetmic(&micdata, &mic[0]);
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: before add mic code!!!\n"));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: pattrib->last_txcmdsz =%d!!!\n", pattrib->last_txcmdsz));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: mic[0]= 0x%.2x , mic[1]= 0x%.2x , mic[2]= 0x%.2x , mic[3]= 0x%.2x\n\
@@ -964,7 +890,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]));
/* add mic code and add the mic code length in last_txcmdsz */
- memcpy(payload, &(mic[0]), 8);
+ memcpy(payload, &mic[0], 8);
pattrib->last_txcmdsz += 8;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ========last pkt ========\n"));
@@ -975,9 +901,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
*(payload+curfragnum+4), *(payload+curfragnum+5), *(payload+curfragnum+6), *(payload+curfragnum+7)));
}
/*
- else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: rtw_get_stainfo == NULL!!!\n"));
- }
*/
}
return _SUCCESS;
@@ -985,13 +908,9 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
-
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- /* struct security_priv *psecuritypriv =&padapter->securitypriv; */
- /* if ((psecuritypriv->sw_encrypt)||(pattrib->bswenc)) */
if (pattrib->bswenc) {
- /* DBG_871X("start xmitframe_swencrypt\n"); */
RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### xmitframe_swencrypt\n"));
switch (pattrib->encrypt) {
case _WEP40_:
@@ -1007,7 +926,6 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
default:
break;
}
-
} else
RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
@@ -1030,7 +948,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
SetFrameSubType(fctrl, pattrib->subtype);
if (pattrib->subtype & WIFI_DATA_TYPE) {
- if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
/* to_ds = 1, fr_ds = 0; */
{
@@ -1044,8 +962,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (pqospriv->qos_option)
qos_option = true;
-
- } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)) {
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
/* to_ds = 0, fr_ds = 1; */
SetFrDs(fctrl);
memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
@@ -1106,7 +1023,6 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
return _FAIL;
}
-
if (psta) {
psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
@@ -1119,7 +1035,6 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
pattrib->ampdu_en = true;
-
/* re-check if enable ampdu by BA_starting_seqctrl */
if (pattrib->ampdu_en == true) {
u16 tx_seq;
@@ -1128,24 +1043,19 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
/* check BA_starting_seqctrl */
if (SN_LESS(pattrib->seqnum, tx_seq)) {
- /* DBG_871X("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
pattrib->ampdu_en = false;/* AGG BK */
} else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff;
pattrib->ampdu_en = true;/* AGG EN */
} else {
- /* DBG_871X("tx ampdu over run\n"); */
psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff;
pattrib->ampdu_en = true;/* AGG EN */
}
-
}
}
}
-
} else {
-
}
exit:
@@ -1203,9 +1113,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
u8 *pframe, *mem_start;
u8 hw_hdr_offset;
- /* struct sta_info *psta; */
- /* struct sta_priv *pstapriv = &padapter->stapriv; */
- /* struct mlme_priv *pmlmepriv = &padapter->mlmepriv; */
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
@@ -1215,30 +1122,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
s32 bmcst = IS_MCAST(pattrib->ra);
s32 res = _SUCCESS;
-/*
- if (pattrib->psta)
- {
- psta = pattrib->psta;
- } else
- {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
- }
-
- if (psta == NULL)
- {
-
- DBG_871X("%s, psta ==NUL\n", __func__);
- return _FAIL;
- }
-
-
- if (!(psta->state &_FW_LINKED))
- {
- DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
- return _FAIL;
- }
-*/
if (!pxmitframe->buf_addr) {
DBG_8192C("==> %s buf_addr == NULL\n", __func__);
return _FAIL;
@@ -1293,10 +1176,8 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
mpdu_len -= llc_sz;
}
- if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc))
mpdu_len -= pattrib->icv_len;
- }
-
if (bmcst) {
/* don't do fragment to broadcat/multicast packets */
@@ -1330,7 +1211,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
-
}
if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
@@ -1410,7 +1290,8 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit
pmlmeext->mgnt_80211w_IPN++;
/* add MME IE with MIC all zero, MME string doesn't include element id and length */
- pframe = rtw_set_ie(pframe, _MME_IE_, 16, MME, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _MME_IE_, 16,
+ MME, &pattrib->pktlen);
pattrib->last_txcmdsz = pattrib->pktlen;
/* total frame length - header length */
frame_body_len = pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr);
@@ -1441,7 +1322,6 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
if (!psta) {
-
DBG_871X("%s, psta ==NUL\n", __func__);
goto xmitframe_coalesce_fail;
}
@@ -1451,7 +1331,6 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit
goto xmitframe_coalesce_fail;
}
- /* DBG_871X("%s, action frame category =%d\n", __func__, pframe[WLAN_HDR_A3_LEN]); */
/* according 802.11-2012 standard, these five types are not robust types */
if (subtype == WIFI_ACTION &&
(pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_PUBLIC ||
@@ -1550,7 +1429,6 @@ s32 rtw_put_snap(u8 *data, u16 h_proto)
void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
{
-
uint protection;
u8 *perp;
sint erp_len;
@@ -1582,7 +1460,6 @@ void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
}
break;
-
}
}
@@ -1666,7 +1543,6 @@ struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv,
pxmitbuf->priv_data = pcmdframe;
return pcmdframe;
-
}
struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
@@ -1681,14 +1557,13 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
if (list_empty(&pfree_queue->queue)) {
pxmitbuf = NULL;
} else {
-
phead = get_list_head(pfree_queue);
plist = get_next(phead);
pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
- list_del_init(&(pxmitbuf->list));
+ list_del_init(&pxmitbuf->list);
}
if (pxmitbuf) {
@@ -1697,7 +1572,6 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
DBG_871X("DBG_XMIT_BUF_EXT ALLOC no =%d, free_xmit_extbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
#endif
-
pxmitbuf->priv_data = NULL;
pxmitbuf->len = 0;
@@ -1708,7 +1582,6 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__);
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
}
-
}
spin_unlock_irqrestore(&pfree_queue->lock, irqL);
@@ -1728,7 +1601,7 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
+ list_add_tail(&pxmitbuf->list, get_list_head(pfree_queue));
pxmitpriv->free_xmit_extbuf_cnt++;
#ifdef DBG_XMIT_BUF_EXT
DBG_871X("DBG_XMIT_BUF_EXT FREE no =%d, free_xmit_extbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
@@ -1746,21 +1619,18 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
struct list_head *plist, *phead;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
- /* DBG_871X("+rtw_alloc_xmitbuf\n"); */
-
spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
if (list_empty(&pfree_xmitbuf_queue->queue)) {
pxmitbuf = NULL;
} else {
-
phead = get_list_head(pfree_xmitbuf_queue);
plist = get_next(phead);
pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
- list_del_init(&(pxmitbuf->list));
+ list_del_init(&pxmitbuf->list);
}
if (pxmitbuf) {
@@ -1768,7 +1638,6 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
#ifdef DBG_XMIT_BUF
DBG_871X("DBG_XMIT_BUF ALLOC no =%d, free_xmitbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
#endif
- /* DBG_871X("alloc, free_xmitbuf_cnt =%d\n", pxmitpriv->free_xmitbuf_cnt); */
pxmitbuf->priv_data = NULL;
@@ -1797,8 +1666,6 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
_irqL irqL;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
- /* DBG_871X("+rtw_free_xmitbuf\n"); */
-
if (!pxmitbuf)
return _FAIL;
@@ -1815,10 +1682,10 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
+ list_add_tail(&pxmitbuf->list,
+ get_list_head(pfree_xmitbuf_queue));
pxmitpriv->free_xmitbuf_cnt++;
- /* DBG_871X("FREE, free_xmitbuf_cnt =%d\n", pxmitpriv->free_xmitbuf_cnt); */
#ifdef DBG_XMIT_BUF
DBG_871X("DBG_XMIT_BUF FREE no =%d, free_xmitbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
#endif
@@ -1834,7 +1701,6 @@ static void rtw_init_xmitframe(struct xmit_frame *pxframe)
pxframe->pxmitbuf = NULL;
memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
- /* pxframe->attrib.psta = NULL; */
pxframe->frame_tag = DATA_FRAMETAG;
@@ -1879,7 +1745,7 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf
pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
- list_del_init(&(pxframe->list));
+ list_del_init(&pxframe->list);
pxmitpriv->free_xmitframe_cnt--;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt =%d\n", pxmitpriv->free_xmitframe_cnt));
}
@@ -1906,7 +1772,7 @@ struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv)
plist = get_next(phead);
pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
- list_del_init(&(pxframe->list));
+ list_del_init(&pxframe->list);
pxmitpriv->free_xframe_ext_cnt--;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe_ext():free_xmitframe_cnt =%d\n", pxmitpriv->free_xframe_ext_cnt));
}
@@ -1974,7 +1840,6 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
else if (pxmitframe->ext_tag == 1)
queue = &pxmitpriv->free_xframe_ext_queue;
else {
-
}
spin_lock_bh(&queue->lock);
@@ -2006,21 +1871,19 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct list_head *plist, *phead;
struct xmit_frame *pxmitframe;
- spin_lock_bh(&(pframequeue->lock));
+ spin_lock_bh(&pframequeue->lock);
phead = get_list_head(pframequeue);
plist = get_next(phead);
while (phead != plist) {
-
pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
plist = get_next(plist);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
-
}
- spin_unlock_bh(&(pframequeue->lock));
+ spin_unlock_bh(&pframequeue->lock);
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -2029,7 +1892,6 @@ s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitfram
if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
("rtw_xmitframe_enqueue: drop xmit pkt for classifier fail\n"));
-/* pxmitframe->pkt = NULL; */
return _FAIL;
}
@@ -2043,21 +1905,21 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
switch (up) {
case 1:
case 2:
- ptxservq = &(psta->sta_xmitpriv.bk_q);
+ ptxservq = &psta->sta_xmitpriv.bk_q;
*(ac) = 3;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BK\n"));
break;
case 4:
case 5:
- ptxservq = &(psta->sta_xmitpriv.vi_q);
+ ptxservq = &psta->sta_xmitpriv.vi_q;
*(ac) = 1;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VI\n"));
break;
case 6:
case 7:
- ptxservq = &(psta->sta_xmitpriv.vo_q);
+ ptxservq = &psta->sta_xmitpriv.vo_q;
*(ac) = 0;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VO\n"));
break;
@@ -2065,11 +1927,10 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
case 0:
case 3:
default:
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ ptxservq = &psta->sta_xmitpriv.be_q;
*(ac) = 2;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BE\n"));
break;
-
}
return ptxservq;
@@ -2081,7 +1942,6 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
*/
s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
- /* _irqL irqL0; */
u8 ac_index;
struct sta_info *psta;
struct tx_servq *ptxservq;
@@ -2091,15 +1951,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class);
-/*
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- psta = rtw_get_stainfo(pstapriv, pattrib->ra);
- }
-*/
-
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
if (pattrib->psta != psta) {
DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_sta);
@@ -2123,22 +1974,13 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
- /* spin_lock_irqsave(&pstapending->lock, irqL0); */
-
- if (list_empty(&ptxservq->tx_pending)) {
+ if (list_empty(&ptxservq->tx_pending))
list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
- }
-
- /* spin_lock_irqsave(&ptxservq->sta_pending.lock, irqL1); */
list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
ptxservq->qcnt++;
phwxmits[ac_index].accnt++;
- /* spin_unlock_irqrestore(&ptxservq->sta_pending.lock, irqL1); */
-
- /* spin_unlock_irqrestore(&pstapending->lock, irqL0); */
-
exit:
return res;
@@ -2161,45 +2003,24 @@ s32 rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits = pxmitpriv->hwxmits;
if (pxmitpriv->hwxmit_entry == 5) {
- /* pxmitpriv->bmc_txqueue.head = 0; */
- /* hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; */
hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
- /* pxmitpriv->vo_txqueue.head = 0; */
- /* hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; */
hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
- /* pxmitpriv->vi_txqueue.head = 0; */
- /* hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; */
hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
- /* pxmitpriv->bk_txqueue.head = 0; */
- /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
- /* pxmitpriv->be_txqueue.head = 0; */
- /* hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; */
hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
-
} else if (pxmitpriv->hwxmit_entry == 4) {
-
- /* pxmitpriv->vo_txqueue.head = 0; */
- /* hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; */
hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
- /* pxmitpriv->vi_txqueue.head = 0; */
- /* hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; */
hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
- /* pxmitpriv->be_txqueue.head = 0; */
- /* hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; */
hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
- /* pxmitpriv->bk_txqueue.head = 0; */
- /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
} else {
-
}
return _SUCCESS;
@@ -2207,24 +2028,17 @@ s32 rtw_alloc_hwxmits(struct adapter *padapter)
void rtw_free_hwxmits(struct adapter *padapter)
{
- struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- hwxmits = pxmitpriv->hwxmits;
- if (hwxmits)
- kfree((u8 *)hwxmits);
+ kfree(pxmitpriv->hwxmits);
}
void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry)
{
sint i;
- for (i = 0; i < entry; i++, phwxmit++) {
- /* spin_lock_init(&phwxmit->xmit_lock); */
- /* INIT_LIST_HEAD(&phwxmit->pending); */
- /* phwxmit->txcmdcnt = 0; */
+ for (i = 0; i < entry; i++, phwxmit++)
phwxmit->accnt = 0;
- }
}
u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
@@ -2259,11 +2073,9 @@ u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
default:
addr = MGT_QUEUE_INX;
break;
-
}
return addr;
-
}
static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib)
@@ -2310,7 +2122,7 @@ s32 rtw_xmit(struct adapter *padapter, _pkt **ppkt)
if (!pxmitframe) {
drop_cnt++;
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: no more pxmitframe\n"));
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("%s: no more pxmitframe\n", __func__));
DBG_COUNTER(padapter->tx_logs.core_tx_err_pxmitframe);
return -1;
}
@@ -2318,7 +2130,7 @@ s32 rtw_xmit(struct adapter *padapter, _pkt **ppkt)
res = update_attrib(padapter, *ppkt, &pxmitframe->attrib);
if (res == _FAIL) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: update attrib fail\n"));
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("%s: update attrib fail\n", __func__));
#ifdef DBG_TX_DROP_FRAME
DBG_871X("DBG_TX_DROP_FRAME %s update attrib fail\n", __func__);
#endif
@@ -2355,7 +2167,6 @@ inline bool xmitframe_hiq_filter(struct xmit_frame *xmitframe)
struct registry_priv *registry = &adapter->registrypriv;
if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_SPECIAL) {
-
struct pkt_attrib *attrib = &xmitframe->attrib;
if (attrib->ether_type == 0x0806
@@ -2389,17 +2200,6 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_fwstate);
return ret;
}
-/*
- if (pattrib->psta)
- {
- psta = pattrib->psta;
- }
- else
- {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- psta =rtw_get_stainfo(pstapriv, pattrib->ra);
- }
-*/
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
if (pattrib->psta != psta) {
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_sta);
@@ -2421,16 +2221,13 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (pattrib->triggered == 1) {
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_trigger);
- /* DBG_871X("directly xmit pspoll_triggered packet\n"); */
- /* pattrib->triggered = 0; */
if (bmcst && xmitframe_hiq_filter(pxmitframe))
pattrib->qsel = 0x11;/* HIQ */
return ret;
}
-
if (bmcst) {
spin_lock_bh(&psta->sleep_q.lock);
@@ -2439,8 +2236,6 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
list_del_init(&pxmitframe->list);
- /* spin_lock_bh(&psta->sleep_q.lock); */
-
list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
psta->sleepq_len++;
@@ -2448,32 +2243,24 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (!(pstapriv->tim_bitmap & BIT(0)))
update_tim = true;
- pstapriv->tim_bitmap |= BIT(0);/* */
+ pstapriv->tim_bitmap |= BIT(0);
pstapriv->sta_dz_bitmap |= BIT(0);
- /* DBG_871X("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
-
- if (update_tim) {
+ if (update_tim)
update_beacon(padapter, _TIM_IE_, NULL, true);
- } else {
+ else
chk_bmc_sleepq_cmd(padapter);
- }
-
- /* spin_unlock_bh(&psta->sleep_q.lock); */
ret = true;
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_mcast);
-
}
spin_unlock_bh(&psta->sleep_q.lock);
return ret;
-
}
-
spin_lock_bh(&psta->sleep_q.lock);
if (psta->state&WIFI_SLEEP_STATE) {
@@ -2482,8 +2269,6 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (pstapriv->sta_dz_bitmap & BIT(psta->aid)) {
list_del_init(&pxmitframe->list);
- /* spin_lock_bh(&psta->sleep_q.lock); */
-
list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
psta->sleepq_len++;
@@ -2517,32 +2302,20 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
pstapriv->tim_bitmap |= BIT(psta->aid);
- /* DBG_871X("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
-
if (update_tim)
- /* DBG_871X("sleepq_len == 1, update BCNTIM\n"); */
/* upate BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, true);
}
- /* spin_unlock_bh(&psta->sleep_q.lock); */
-
- /* if (psta->sleepq_len > (NR_XMITFRAME>>3)) */
- /* */
- /* wakeup_sta_to_xmit(padapter, psta); */
- /* */
-
ret = true;
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_ucast);
}
-
}
spin_unlock_bh(&psta->sleep_q.lock);
return ret;
-
}
static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue)
@@ -2575,11 +2348,8 @@ static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struc
ptxservq->qcnt--;
phwxmits[ac_index].accnt--;
} else {
- /* DBG_871X("xmitframe_enqueue_for_sleeping_sta return false\n"); */
}
-
}
-
}
void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
@@ -2594,34 +2364,28 @@ void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
/* for BC/MC Frames */
psta_bmc = rtw_get_bcmc_stainfo(padapter);
-
spin_lock_bh(&pxmitpriv->lock);
psta->state |= WIFI_SLEEP_STATE;
pstapriv->sta_dz_bitmap |= BIT(psta->aid);
-
-
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
-
+ list_del_init(&pstaxmitpriv->vo_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
-
+ list_del_init(&pstaxmitpriv->vi_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
-
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
- list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
+ list_del_init(&pstaxmitpriv->bk_q.tx_pending);
/* for BC/MC Frames */
pstaxmitpriv = &psta_bmc->sta_xmitpriv;
dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
spin_unlock_bh(&pxmitpriv->lock);
}
@@ -2637,8 +2401,6 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
psta_bmc = rtw_get_bcmc_stainfo(padapter);
-
- /* spin_lock_bh(&psta->sleep_q.lock); */
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
@@ -2690,26 +2452,12 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pxmitframe->attrib.triggered = 1;
-/*
- spin_unlock_bh(&psta->sleep_q.lock);
- if (rtw_hal_xmit(padapter, pxmitframe) == true)
- {
- rtw_os_xmit_complete(padapter, pxmitframe);
- }
- spin_lock_bh(&psta->sleep_q.lock);
-*/
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
-
-
}
if (psta->sleepq_len == 0) {
- if (pstapriv->tim_bitmap & BIT(psta->aid)) {
- /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
+ if (pstapriv->tim_bitmap & BIT(psta->aid))
update_mask = BIT(0);
- }
pstapriv->tim_bitmap &= ~BIT(psta->aid);
@@ -2746,44 +2494,25 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
else
pxmitframe->attrib.mdata = 0;
-
pxmitframe->attrib.triggered = 1;
-/*
- spin_unlock_bh(&psta_bmc->sleep_q.lock);
- if (rtw_hal_xmit(padapter, pxmitframe) == true)
- {
- rtw_os_xmit_complete(padapter, pxmitframe);
- }
- spin_lock_bh(&psta_bmc->sleep_q.lock);
-
-*/
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
-
}
if (psta_bmc->sleepq_len == 0) {
- if (pstapriv->tim_bitmap & BIT(0)) {
- /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
+ if (pstapriv->tim_bitmap & BIT(0))
update_mask |= BIT(1);
- }
+
pstapriv->tim_bitmap &= ~BIT(0);
pstapriv->sta_dz_bitmap &= ~BIT(0);
}
-
}
_exit:
- /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
if (update_mask)
- /* update_BCNTIM(padapter); */
- /* printk("%s => call update_beacon\n", __func__); */
update_beacon(padapter, _TIM_IE_, NULL, true);
-
}
void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
@@ -2794,8 +2523,6 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- /* spin_lock_bh(&psta->sleep_q.lock); */
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
@@ -2848,16 +2575,10 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
update_beacon(padapter, _TIM_IE_, NULL, true);
- /* update_mask = BIT(0); */
}
-
}
- /* spin_unlock_bh(&psta->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
}
@@ -2875,7 +2596,7 @@ void enqueue_pending_xmitbuf(
list_add_tail(&pxmitbuf->list, get_list_head(pqueue));
spin_unlock_bh(&pqueue->lock);
- complete(&(pri_adapter->xmitpriv.xmit_comp));
+ complete(&pri_adapter->xmitpriv.xmit_comp);
}
void enqueue_pending_xmitbuf_to_head(
@@ -2898,7 +2619,6 @@ struct xmit_buf *dequeue_pending_xmitbuf(
struct xmit_buf *pxmitbuf;
struct __queue *pqueue;
-
pxmitbuf = NULL;
pqueue = &pxmitpriv->pending_xmitbuf_queue;
@@ -2924,7 +2644,6 @@ struct xmit_buf *dequeue_pending_xmitbuf_under_survey(
struct xmit_buf *pxmitbuf;
struct __queue *pqueue;
-
pxmitbuf = NULL;
pqueue = &pxmitpriv->pending_xmitbuf_queue;
@@ -2983,7 +2702,6 @@ int rtw_xmit_thread(void *context)
s32 err;
struct adapter *padapter;
-
err = _SUCCESS;
padapter = context;
@@ -2992,7 +2710,7 @@ int rtw_xmit_thread(void *context)
do {
err = rtw_hal_xmit_thread_handler(padapter);
flush_signals_thread();
- } while (_SUCCESS == err);
+ } while (err == _SUCCESS);
complete(&padapter->xmitpriv.terminate_xmitthread_comp);
@@ -3022,9 +2740,8 @@ int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg)
status = sctx->status;
}
- if (status == RTW_SCTX_DONE_SUCCESS) {
+ if (status == RTW_SCTX_DONE_SUCCESS)
ret = _SUCCESS;
- }
return ret;
}
@@ -3075,9 +2792,8 @@ void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
{
struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
- if (pxmitpriv->ack_tx) {
+ if (pxmitpriv->ack_tx)
rtw_sctx_done_err(&pack_tx_ops, status);
- } else {
+ else
DBG_871X("%s ack_tx not set\n", __func__);
- }
}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index 3239d37087c8..1ca9063a269f 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -402,8 +402,6 @@ static void GetDeltaSwingTable_8723B(
*TemperatureUP_B = (u8 *)DeltaSwingTableIdx_2GA_P_8188E;
*TemperatureDOWN_B = (u8 *)DeltaSwingTableIdx_2GA_N_8188E;
}
-
- return;
}
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 6e4a1fcb8790..d5793e4614bf 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -1315,7 +1315,7 @@ void EXhalbtcoutsrc_DisplayBtCoexInfo(PBTC_COEXIST pBtCoexist)
/*
* Description:
- *Run BT-Coexist mechansim or not
+ *Run BT-Coexist mechanism or not
*
*/
void hal_btcoex_SetBTCoexist(struct adapter *padapter, u8 bBtExist)
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index eddd56abbb2d..109bd85b0cd8 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -30,7 +30,6 @@ void rtw_hal_data_deinit(struct adapter *padapter)
{
if (is_primary_adapter(padapter)) { /* if (padapter->isprimary) */
if (padapter->HalData) {
- phy_free_filebuf(padapter);
vfree(padapter->HalData);
padapter->HalData = NULL;
padapter->hal_data_sz = 0;
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 6539bee9b5ba..eb7de3617d83 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -2202,1079 +2202,3 @@ void Hal_ChannelPlanToRegulation(struct adapter *Adapter, u16 ChannelPlan)
break;
}
}
-
-
-static char file_path_bs[PATH_MAX];
-
-#define GetLineFromBuffer(buffer) strsep(&buffer, "\n")
-
-int phy_ConfigMACWithParaFile(struct adapter *Adapter, char *pFileName)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegValue, u4bMove;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_MAC_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->mac_reg_len == 0) && !pHalData->mac_reg) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->mac_reg = vzalloc(rlen);
- if (pHalData->mac_reg) {
- memcpy(pHalData->mac_reg, pHalData->para_file_buf, rlen);
- pHalData->mac_reg_len = rlen;
- } else
- DBG_871X("%s mac_reg alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->mac_reg_len != 0) && (pHalData->mac_reg != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->mac_reg, pHalData->mac_reg_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- /* Get 1st hex value as register offset */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- if (u4bRegOffset == 0xffff) /* Ending. */
- break;
-
- /* Get 2nd hex value as register value. */
- szLine += u4bMove;
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove))
- rtw_write8(Adapter, u4bRegOffset, (u8)u4bRegValue);
- }
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-int phy_ConfigBBWithParaFile(
- struct adapter *Adapter, char *pFileName, u32 ConfigType
-)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegValue, u4bMove;
- char *pBuf = NULL;
- u32 *pBufLen = NULL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_BB_PARA_FILE))
- return rtStatus;
-
- switch (ConfigType) {
- case CONFIG_BB_PHY_REG:
- pBuf = pHalData->bb_phy_reg;
- pBufLen = &pHalData->bb_phy_reg_len;
- break;
- case CONFIG_BB_AGC_TAB:
- pBuf = pHalData->bb_agc_tab;
- pBufLen = &pHalData->bb_agc_tab_len;
- break;
- default:
- DBG_871X("Unknown ConfigType!! %d\r\n", ConfigType);
- break;
- }
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pBuf = vzalloc(rlen);
- if (pBuf) {
- memcpy(pBuf, pHalData->para_file_buf, rlen);
- *pBufLen = rlen;
-
- switch (ConfigType) {
- case CONFIG_BB_PHY_REG:
- pHalData->bb_phy_reg = pBuf;
- break;
- case CONFIG_BB_AGC_TAB:
- pHalData->bb_agc_tab = pBuf;
- break;
- }
- } else
- DBG_871X("%s(): ConfigType %d alloc fail !\n", __func__, ConfigType);
- }
- }
- } else {
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- /* Get 1st hex value as register offset. */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- if (u4bRegOffset == 0xffff) /* Ending. */
- break;
- else if (u4bRegOffset == 0xfe || u4bRegOffset == 0xffe)
- msleep(50);
- else if (u4bRegOffset == 0xfd)
- mdelay(5);
- else if (u4bRegOffset == 0xfc)
- mdelay(1);
- else if (u4bRegOffset == 0xfb)
- udelay(50);
- else if (u4bRegOffset == 0xfa)
- udelay(5);
- else if (u4bRegOffset == 0xf9)
- udelay(1);
-
- /* Get 2nd hex value as register value. */
- szLine += u4bMove;
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- /* DBG_871X("[BB-ADDR]%03lX =%08lX\n", u4bRegOffset, u4bRegValue); */
- PHY_SetBBReg(Adapter, u4bRegOffset, bMaskDWord, u4bRegValue);
-
- if (u4bRegOffset == 0xa24)
- pHalData->odmpriv.RFCalibrateInfo.RegA24 = u4bRegValue;
-
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
- }
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-static void phy_DecryptBBPgParaFile(struct adapter *Adapter, char *buffer)
-{
- u32 i = 0, j = 0;
- u8 map[95] = {0};
- u8 currentChar;
- char *BufOfLines, *ptmp;
-
- /* DBG_871X("=====>phy_DecryptBBPgParaFile()\n"); */
- /* 32 the ascii code of the first visable char, 126 the last one */
- for (i = 0; i < 95; ++i)
- map[i] = (u8) (94 - i);
-
- ptmp = buffer;
- i = 0;
- for (BufOfLines = GetLineFromBuffer(ptmp); BufOfLines != NULL; BufOfLines = GetLineFromBuffer(ptmp)) {
- /* DBG_871X("Encrypted Line: %s\n", BufOfLines); */
-
- for (j = 0; j < strlen(BufOfLines); ++j) {
- currentChar = BufOfLines[j];
-
- if (currentChar == '\0')
- break;
-
- currentChar -= (u8) ((((i + j) * 3) % 128));
-
- BufOfLines[j] = map[currentChar - 32] + 32;
- }
- /* DBG_871X("Decrypted Line: %s\n", BufOfLines); */
- if (strlen(BufOfLines) != 0)
- i++;
- BufOfLines[strlen(BufOfLines)] = '\n';
- }
-}
-
-static int phy_ParseBBPgParaFile(struct adapter *Adapter, char *buffer)
-{
- int rtStatus = _SUCCESS;
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegMask, u4bRegValue;
- u32 u4bMove;
- bool firstLine = true;
- u8 tx_num = 0;
- u8 band = 0, rf_path = 0;
-
- /* DBG_871X("=====>phy_ParseBBPgParaFile()\n"); */
-
- if (Adapter->registrypriv.RegDecryptCustomFile == 1)
- phy_DecryptBBPgParaFile(Adapter, buffer);
-
- ptmp = buffer;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- if (isAllSpaceOrTab(szLine, sizeof(*szLine)))
- continue;
-
- /* Get header info (relative value or exact value) */
- if (firstLine) {
- if (eqNByte(szLine, (u8 *)("#[v1]"), 5)) {
-
- pHalData->odmpriv.PhyRegPgVersion = szLine[3] - '0';
- /* DBG_871X("This is a new format PHY_REG_PG.txt\n"); */
- } else if (eqNByte(szLine, (u8 *)("#[v0]"), 5)) {
- pHalData->odmpriv.PhyRegPgVersion = szLine[3] - '0';
- /* DBG_871X("This is a old format PHY_REG_PG.txt ok\n"); */
- } else {
- DBG_871X("The format in PHY_REG_PG are invalid %s\n", szLine);
- return _FAIL;
- }
-
- if (eqNByte(szLine + 5, (u8 *)("[Exact]#"), 8)) {
- pHalData->odmpriv.PhyRegPgValueType = PHY_REG_PG_EXACT_VALUE;
- /* DBG_871X("The values in PHY_REG_PG are exact values ok\n"); */
- firstLine = false;
- continue;
- } else if (eqNByte(szLine + 5, (u8 *)("[Relative]#"), 11)) {
- pHalData->odmpriv.PhyRegPgValueType = PHY_REG_PG_RELATIVE_VALUE;
- /* DBG_871X("The values in PHY_REG_PG are relative values ok\n"); */
- firstLine = false;
- continue;
- } else {
- DBG_871X("The values in PHY_REG_PG are invalid %s\n", szLine);
- return _FAIL;
- }
- }
-
- if (pHalData->odmpriv.PhyRegPgVersion == 0) {
- /* Get 1st hex value as register offset. */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- szLine += u4bMove;
- if (u4bRegOffset == 0xffff) /* Ending. */
- break;
-
- /* Get 2nd hex value as register mask. */
- if (GetHexValueFromString(szLine, &u4bRegMask, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_RELATIVE_VALUE) {
- /* Get 3rd hex value as register value. */
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- PHY_StoreTxPowerByRate(Adapter, 0, 0, 1, u4bRegOffset, u4bRegMask, u4bRegValue);
- /* DBG_871X("[ADDR] %03X =%08X Mask =%08x\n", u4bRegOffset, u4bRegValue, u4bRegMask); */
- } else
- return _FAIL;
- } else if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
- u32 combineValue = 0;
- u8 integer = 0, fraction = 0;
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
- PHY_StoreTxPowerByRate(Adapter, 0, 0, 1, u4bRegOffset, u4bRegMask, combineValue);
-
- /* DBG_871X("[ADDR] 0x%3x = 0x%4x\n", u4bRegOffset, combineValue); */
- }
- }
- } else if (pHalData->odmpriv.PhyRegPgVersion > 0) {
- u32 index = 0;
-
- if (eqNByte(szLine, "0xffff", 6))
- break;
-
- if (!eqNByte("#[END]#", szLine, 7)) {
- /* load the table label info */
- if (szLine[0] == '#') {
- index = 0;
- if (eqNByte(szLine, "#[2.4G]", 7)) {
- band = BAND_ON_2_4G;
- index += 8;
- } else if (eqNByte(szLine, "#[5G]", 5)) {
- band = BAND_ON_5G;
- index += 6;
- } else {
- DBG_871X("Invalid band %s in PHY_REG_PG.txt\n", szLine);
- return _FAIL;
- }
-
- rf_path = szLine[index] - 'A';
- /* DBG_871X(" Table label Band %d, RfPath %d\n", band, rf_path); */
- } else { /* load rows of tables */
- if (szLine[1] == '1')
- tx_num = RF_1TX;
- else if (szLine[1] == '2')
- tx_num = RF_2TX;
- else if (szLine[1] == '3')
- tx_num = RF_3TX;
- else if (szLine[1] == '4')
- tx_num = RF_4TX;
- else {
- DBG_871X("Invalid row in PHY_REG_PG.txt %c\n", szLine[1]);
- return _FAIL;
- }
-
- while (szLine[index] != ']')
- ++index;
- ++index;/* skip ] */
-
- /* Get 2nd hex value as register offset. */
- szLine += index;
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- /* Get 2nd hex value as register mask. */
- if (GetHexValueFromString(szLine, &u4bRegMask, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_RELATIVE_VALUE) {
- /* Get 3rd hex value as register value. */
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- PHY_StoreTxPowerByRate(Adapter, band, rf_path, tx_num, u4bRegOffset, u4bRegMask, u4bRegValue);
- /* DBG_871X("[ADDR] %03X (tx_num %d) =%08X Mask =%08x\n", u4bRegOffset, tx_num, u4bRegValue, u4bRegMask); */
- } else
- return _FAIL;
- } else if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
- u32 combineValue = 0;
- u8 integer = 0, fraction = 0;
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
- PHY_StoreTxPowerByRate(Adapter, band, rf_path, tx_num, u4bRegOffset, u4bRegMask, combineValue);
-
- /* DBG_871X("[ADDR] 0x%3x (tx_num %d) = 0x%4x\n", u4bRegOffset, tx_num, combineValue); */
- }
- }
- }
- }
- }
- }
- /* DBG_871X("<=====phy_ParseBBPgParaFile()\n"); */
- return rtStatus;
-}
-
-int phy_ConfigBBWithPgParaFile(struct adapter *Adapter, char *pFileName)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_BB_PG_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->bb_phy_reg_pg_len == 0) && !pHalData->bb_phy_reg_pg) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->bb_phy_reg_pg = vzalloc(rlen);
- if (pHalData->bb_phy_reg_pg) {
- memcpy(pHalData->bb_phy_reg_pg, pHalData->para_file_buf, rlen);
- pHalData->bb_phy_reg_pg_len = rlen;
- } else
- DBG_871X("%s bb_phy_reg_pg alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->bb_phy_reg_pg_len != 0) && (pHalData->bb_phy_reg_pg != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->bb_phy_reg_pg, pHalData->bb_phy_reg_pg_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("phy_ConfigBBWithPgParaFile(): read %s ok\n", pFileName); */
- phy_ParseBBPgParaFile(Adapter, pHalData->para_file_buf);
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-int PHY_ConfigRFWithParaFile(
- struct adapter *Adapter, char *pFileName, u8 eRFPath
-)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegValue, u4bMove;
- u16 i;
- char *pBuf = NULL;
- u32 *pBufLen = NULL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_PARA_FILE))
- return rtStatus;
-
- switch (eRFPath) {
- case ODM_RF_PATH_A:
- pBuf = pHalData->rf_radio_a;
- pBufLen = &pHalData->rf_radio_a_len;
- break;
- case ODM_RF_PATH_B:
- pBuf = pHalData->rf_radio_b;
- pBufLen = &pHalData->rf_radio_b_len;
- break;
- default:
- DBG_871X("Unknown RF path!! %d\r\n", eRFPath);
- break;
- }
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pBuf = vzalloc(rlen);
- if (pBuf) {
- memcpy(pBuf, pHalData->para_file_buf, rlen);
- *pBufLen = rlen;
-
- switch (eRFPath) {
- case ODM_RF_PATH_A:
- pHalData->rf_radio_a = pBuf;
- break;
- case ODM_RF_PATH_B:
- pHalData->rf_radio_b = pBuf;
- break;
- }
- } else
- DBG_871X("%s(): eRFPath =%d alloc fail !\n", __func__, eRFPath);
- }
- }
- } else {
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("%s(): read %s successfully\n", __func__, pFileName); */
-
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- /* Get 1st hex value as register offset. */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- if (u4bRegOffset == 0xfe || u4bRegOffset == 0xffe) /* Deay specific ms. Only RF configuration require delay. */
- msleep(50);
- else if (u4bRegOffset == 0xfd) {
- /* mdelay(5); */
- for (i = 0; i < 100; i++)
- udelay(MAX_STALL_TIME);
- } else if (u4bRegOffset == 0xfc) {
- /* mdelay(1); */
- for (i = 0; i < 20; i++)
- udelay(MAX_STALL_TIME);
- } else if (u4bRegOffset == 0xfb)
- udelay(50);
- else if (u4bRegOffset == 0xfa)
- udelay(5);
- else if (u4bRegOffset == 0xf9)
- udelay(1);
- else if (u4bRegOffset == 0xffff)
- break;
-
- /* Get 2nd hex value as register value. */
- szLine += u4bMove;
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- PHY_SetRFReg(Adapter, eRFPath, u4bRegOffset, bRFRegOffsetMask, u4bRegValue);
-
- /* Temp add, for frequency lock, if no delay, that may cause */
- /* frequency shift, ex: 2412MHz => 2417MHz */
- /* If frequency shift, the following action may works. */
- /* Fractional-N table in radio_a.txt */
- /* 0x2a 0x00001 channel 1 */
- /* 0x2b 0x00808 frequency divider. */
- /* 0x2b 0x53333 */
- /* 0x2c 0x0000c */
- udelay(1);
- }
- }
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-static void initDeltaSwingIndexTables(
- struct adapter *Adapter,
- char *Band,
- char *Path,
- char *Sign,
- char *Channel,
- char *Rate,
- char *Data
-)
-{
- #define STR_EQUAL_5G(_band, _path, _sign, _rate, _chnl) \
- ((strcmp(Band, _band) == 0) && (strcmp(Path, _path) == 0) && (strcmp(Sign, _sign) == 0) &&\
- (strcmp(Rate, _rate) == 0) && (strcmp(Channel, _chnl) == 0)\
- )
- #define STR_EQUAL_2G(_band, _path, _sign, _rate) \
- ((strcmp(Band, _band) == 0) && (strcmp(Path, _path) == 0) && (strcmp(Sign, _sign) == 0) &&\
- (strcmp(Rate, _rate) == 0)\
- )
-
- #define STORE_SWING_TABLE(_array, _iteratedIdx) \
- for (token = strsep(&Data, delim); token != NULL; token = strsep(&Data, delim)) {\
- sscanf(token, "%d", &idx);\
- _array[_iteratedIdx++] = (u8)idx;\
- } \
-
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
- PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
- u32 j = 0;
- char *token;
- char delim[] = ",";
- u32 idx = 0;
-
- /* DBG_871X("===>initDeltaSwingIndexTables(): Band: %s;\nPath: %s;\nSign: %s;\nChannel: %s;\nRate: %s;\n, Data: %s;\n", */
- /* Band, Path, Sign, Channel, Rate, Data); */
-
- if (STR_EQUAL_2G("2G", "A", "+", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_P, j);
- } else if (STR_EQUAL_2G("2G", "A", "-", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_N, j);
- } else if (STR_EQUAL_2G("2G", "B", "+", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_P, j);
- } else if (STR_EQUAL_2G("2G", "B", "-", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_N, j);
- } else if (STR_EQUAL_2G("2G", "A", "+", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GA_P, j);
- } else if (STR_EQUAL_2G("2G", "A", "-", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GA_N, j);
- } else if (STR_EQUAL_2G("2G", "B", "+", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GB_P, j);
- } else if (STR_EQUAL_2G("2G", "B", "-", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GB_N, j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[0], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[0], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[0], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[0], j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[1], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[1], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[1], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[1], j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[2], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[2], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[2], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[2], j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[3], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[3], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[3], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[3], j);
- } else
- DBG_871X("===>initDeltaSwingIndexTables(): The input is invalid!!\n");
-}
-
-int PHY_ConfigRFWithTxPwrTrackParaFile(struct adapter *Adapter, char *pFileName)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_TXPWR_TRACK_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->rf_tx_pwr_track_len == 0) && !pHalData->rf_tx_pwr_track) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->rf_tx_pwr_track = vzalloc(rlen);
- if (pHalData->rf_tx_pwr_track) {
- memcpy(pHalData->rf_tx_pwr_track, pHalData->para_file_buf, rlen);
- pHalData->rf_tx_pwr_track_len = rlen;
- } else
- DBG_871X("%s rf_tx_pwr_track alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->rf_tx_pwr_track_len != 0) && (pHalData->rf_tx_pwr_track != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->rf_tx_pwr_track, pHalData->rf_tx_pwr_track_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("%s(): read %s successfully\n", __func__, pFileName); */
-
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- char band[5] = "", path[5] = "", sign[5] = "";
- char chnl[5] = "", rate[10] = "";
- char data[300] = ""; /* 100 is too small */
- const int len = strlen(szLine);
- int i;
-
- if (len < 10 || szLine[0] != '[')
- continue;
-
- strncpy(band, szLine+1, 2);
- strncpy(path, szLine+5, 1);
- strncpy(sign, szLine+8, 1);
-
- i = 10; /* szLine+10 */
- if (!ParseQualifiedString(szLine, &i, rate, '[', ']')) {
- /* DBG_871X("Fail to parse rate!\n"); */
- }
- if (!ParseQualifiedString(szLine, &i, chnl, '[', ']')) {
- /* DBG_871X("Fail to parse channel group!\n"); */
- }
- while (i < len && szLine[i] != '{')
- i++;
- if (!ParseQualifiedString(szLine, &i, data, '{', '}')) {
- /* DBG_871X("Fail to parse data!\n"); */
- }
-
- initDeltaSwingIndexTables(Adapter, band, path, sign, chnl, rate, data);
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-static int phy_ParsePowerLimitTableFile(struct adapter *Adapter, char *buffer)
-{
- u32 i = 0, forCnt = 0;
- u8 loadingStage = 0, limitValue = 0, fraction = 0;
- char *szLine, *ptmp;
- int rtStatus = _SUCCESS;
- char band[10], bandwidth[10], rateSection[10],
- regulation[TXPWR_LMT_MAX_REGULATION_NUM][10], rfPath[10], colNumBuf[10];
- u8 colNum = 0;
-
- DBG_871X("===>phy_ParsePowerLimitTableFile()\n");
-
- if (Adapter->registrypriv.RegDecryptCustomFile == 1)
- phy_DecryptBBPgParaFile(Adapter, buffer);
-
- ptmp = buffer;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- /* skip comment */
- if (IsCommentString(szLine)) {
- continue;
- }
-
- if (loadingStage == 0) {
- for (forCnt = 0; forCnt < TXPWR_LMT_MAX_REGULATION_NUM; ++forCnt)
- memset((void *) regulation[forCnt], 0, 10);
-
- memset((void *) band, 0, 10);
- memset((void *) bandwidth, 0, 10);
- memset((void *) rateSection, 0, 10);
- memset((void *) rfPath, 0, 10);
- memset((void *) colNumBuf, 0, 10);
-
- if (szLine[0] != '#' || szLine[1] != '#')
- continue;
-
- /* skip the space */
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- szLine[--i] = ' '; /* return the space in front of the regulation info */
-
- /* Parse the label of the table */
- if (!ParseQualifiedString(szLine, &i, band, ' ', ',')) {
- DBG_871X("Fail to parse band!\n");
- return _FAIL;
- }
- if (!ParseQualifiedString(szLine, &i, bandwidth, ' ', ',')) {
- DBG_871X("Fail to parse bandwidth!\n");
- return _FAIL;
- }
- if (!ParseQualifiedString(szLine, &i, rfPath, ' ', ',')) {
- DBG_871X("Fail to parse rf path!\n");
- return _FAIL;
- }
- if (!ParseQualifiedString(szLine, &i, rateSection, ' ', ',')) {
- DBG_871X("Fail to parse rate!\n");
- return _FAIL;
- }
-
- loadingStage = 1;
- } else if (loadingStage == 1) {
- if (szLine[0] != '#' || szLine[1] != '#')
- continue;
-
- /* skip the space */
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- if (!eqNByte((u8 *)(szLine + i), (u8 *)("START"), 5)) {
- DBG_871X("Lost \"## START\" label\n");
- return _FAIL;
- }
-
- loadingStage = 2;
- } else if (loadingStage == 2) {
- if (szLine[0] != '#' || szLine[1] != '#')
- continue;
-
- /* skip the space */
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- if (!ParseQualifiedString(szLine, &i, colNumBuf, '#', '#')) {
- DBG_871X("Fail to parse column number!\n");
- return _FAIL;
- }
-
- if (!GetU1ByteIntegerFromStringInDecimal(colNumBuf, &colNum))
- return _FAIL;
-
- if (colNum > TXPWR_LMT_MAX_REGULATION_NUM) {
- DBG_871X(
- "invalid col number %d (greater than max %d)\n",
- colNum, TXPWR_LMT_MAX_REGULATION_NUM
- );
- return _FAIL;
- }
-
- for (forCnt = 0; forCnt < colNum; ++forCnt) {
- u8 regulation_name_cnt = 0;
-
- /* skip the space */
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- while (szLine[i] != ' ' && szLine[i] != '\t' && szLine[i] != '\0')
- regulation[forCnt][regulation_name_cnt++] = szLine[i++];
- /* DBG_871X("regulation %s!\n", regulation[forCnt]); */
-
- if (regulation_name_cnt == 0) {
- DBG_871X("invalid number of regulation!\n");
- return _FAIL;
- }
- }
-
- loadingStage = 3;
- } else if (loadingStage == 3) {
- char channel[10] = {0}, powerLimit[10] = {0};
- u8 cnt = 0;
-
- /* the table ends */
- if (szLine[0] == '#' && szLine[1] == '#') {
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- if (eqNByte((u8 *)(szLine + i), (u8 *)("END"), 3)) {
- loadingStage = 0;
- continue;
- } else {
- DBG_871X("Wrong format\n");
- DBG_871X("<===== phy_ParsePowerLimitTableFile()\n");
- return _FAIL;
- }
- }
-
- if ((szLine[0] != 'c' && szLine[0] != 'C') ||
- (szLine[1] != 'h' && szLine[1] != 'H')) {
- DBG_871X("Meet wrong channel => power limt pair\n");
- continue;
- }
- i = 2;/* move to the location behind 'h' */
-
- /* load the channel number */
- cnt = 0;
- while (szLine[i] >= '0' && szLine[i] <= '9') {
- channel[cnt] = szLine[i];
- ++cnt;
- ++i;
- }
- /* DBG_871X("chnl %s!\n", channel); */
-
- for (forCnt = 0; forCnt < colNum; ++forCnt) {
- /* skip the space between channel number and the power limit value */
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- /* load the power limit value */
- cnt = 0;
- fraction = 0;
- memset((void *) powerLimit, 0, 10);
- while ((szLine[i] >= '0' && szLine[i] <= '9') || szLine[i] == '.') {
- if (szLine[i] == '.') {
- if ((szLine[i+1] >= '0' && szLine[i+1] <= '9')) {
- fraction = szLine[i+1];
- i += 2;
- } else {
- DBG_871X("Wrong fraction in TXPWR_LMT.txt\n");
- return _FAIL;
- }
-
- break;
- }
-
- powerLimit[cnt] = szLine[i];
- ++cnt;
- ++i;
- }
-
- if (powerLimit[0] == '\0') {
- powerLimit[0] = '6';
- powerLimit[1] = '3';
- i += 2;
- } else {
- if (!GetU1ByteIntegerFromStringInDecimal(powerLimit, &limitValue))
- return _FAIL;
-
- limitValue *= 2;
- cnt = 0;
- if (fraction == '5')
- ++limitValue;
-
- /* the value is greater or equal to 100 */
- if (limitValue >= 100) {
- powerLimit[cnt++] = limitValue/100 + '0';
- limitValue %= 100;
-
- if (limitValue >= 10) {
- powerLimit[cnt++] = limitValue/10 + '0';
- limitValue %= 10;
- } else
- powerLimit[cnt++] = '0';
-
- powerLimit[cnt++] = limitValue + '0';
- } else if (limitValue >= 10) { /* the value is greater or equal to 10 */
- powerLimit[cnt++] = limitValue/10 + '0';
- limitValue %= 10;
- powerLimit[cnt++] = limitValue + '0';
- }
- /* the value is less than 10 */
- else
- powerLimit[cnt++] = limitValue + '0';
-
- powerLimit[cnt] = '\0';
- }
-
- /* DBG_871X("ch%s => %s\n", channel, powerLimit); */
-
- /* store the power limit value */
- PHY_SetTxPowerLimit(Adapter, (u8 *)regulation[forCnt], (u8 *)band,
- (u8 *)bandwidth, (u8 *)rateSection, (u8 *)rfPath, (u8 *)channel, (u8 *)powerLimit);
-
- }
- } else {
- DBG_871X("Abnormal loading stage in phy_ParsePowerLimitTableFile()!\n");
- rtStatus = _FAIL;
- break;
- }
- }
-
- DBG_871X("<===phy_ParsePowerLimitTableFile()\n");
- return rtStatus;
-}
-
-int PHY_ConfigRFWithPowerLimitTableParaFile(
- struct adapter *Adapter, char *pFileName
-)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_TXPWR_LMT_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->rf_tx_pwr_lmt_len == 0) && !pHalData->rf_tx_pwr_lmt) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->rf_tx_pwr_lmt = vzalloc(rlen);
- if (pHalData->rf_tx_pwr_lmt) {
- memcpy(pHalData->rf_tx_pwr_lmt, pHalData->para_file_buf, rlen);
- pHalData->rf_tx_pwr_lmt_len = rlen;
- } else
- DBG_871X("%s rf_tx_pwr_lmt alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->rf_tx_pwr_lmt_len != 0) && (pHalData->rf_tx_pwr_lmt != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->rf_tx_pwr_lmt, pHalData->rf_tx_pwr_lmt_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("%s(): read %s ok\n", __func__, pFileName); */
- rtStatus = phy_ParsePowerLimitTableFile(Adapter, pHalData->para_file_buf);
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-void phy_free_filebuf(struct adapter *padapter)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
-
- if (pHalData->mac_reg)
- vfree(pHalData->mac_reg);
- if (pHalData->bb_phy_reg)
- vfree(pHalData->bb_phy_reg);
- if (pHalData->bb_agc_tab)
- vfree(pHalData->bb_agc_tab);
- if (pHalData->bb_phy_reg_pg)
- vfree(pHalData->bb_phy_reg_pg);
- if (pHalData->bb_phy_reg_mp)
- vfree(pHalData->bb_phy_reg_mp);
- if (pHalData->rf_radio_a)
- vfree(pHalData->rf_radio_a);
- if (pHalData->rf_radio_b)
- vfree(pHalData->rf_radio_b);
- if (pHalData->rf_tx_pwr_track)
- vfree(pHalData->rf_tx_pwr_track);
- if (pHalData->rf_tx_pwr_lmt)
- vfree(pHalData->rf_tx_pwr_lmt);
-
-}
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index 70d98c58ca97..40fe43c62c45 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -1074,7 +1074,6 @@ void odm_FAThresholdCheck(
dm_FA_thres[1] = 4000;
dm_FA_thres[2] = 5000;
}
- return;
}
u8 odm_ForbiddenIGICheck(void *pDM_VOID, u8 DIG_Dynamic_MIN, u8 CurrentIGI)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index 7760fd0eb6c9..71b5a50b6ef6 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -2071,8 +2071,6 @@ static void ConstructBtNullFunctionData(
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 pktlen;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
u8 bssid[ETH_ALEN];
@@ -2080,8 +2078,6 @@ static void ConstructBtNullFunctionData(
FUNC_ADPT_ARG(padapter), bQoS, bEosp, bForcePowerSave);
pwlanhdr = (struct ieee80211_hdr *)pframe;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
if (!StaAddr) {
memcpy(bssid, myid(&padapter->eeprompriv), ETH_ALEN);
@@ -2122,12 +2118,9 @@ static void ConstructBtNullFunctionData(
static void SetFwRsvdPagePkt_BTCoex(struct adapter *padapter)
{
- struct hal_com_data *pHalData;
struct xmit_frame *pcmdframe;
struct pkt_attrib *pattrib;
struct xmit_priv *pxmitpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
u32 BeaconLength = 0;
u32 BTQosNullLength = 0;
u8 *ReservedPagePacket;
@@ -2140,10 +2133,7 @@ static void SetFwRsvdPagePkt_BTCoex(struct adapter *padapter)
/* DBG_8192C("+" FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(padapter)); */
- pHalData = GET_HAL_DATA(padapter);
pxmitpriv = &padapter->xmitpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
TxDescLen = TXDESC_SIZE;
TxDescOffset = TXDESC_OFFSET;
PageSize = PAGE_SIZE_TX_8723B;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
index c514cb735afd..650fbedd34e8 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
@@ -188,7 +188,8 @@ void rtl8723b_HalDmWatchDog(struct adapter *Adapter)
bBtDisabled = hal_btcoex_IsBtDisabled(Adapter);
- ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_BT_ENABLED, ((bBtDisabled == true)?false:true));
+ ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_BT_ENABLED,
+ !bBtDisabled);
ODM_DMWatchdog(&pHalData->odmpriv);
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index faeaf24fa833..66127f6c8e4d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -2234,12 +2234,8 @@ void rtl8723b_set_hal_ops(struct hal_ops *pHalFunc)
void rtl8723b_InitAntenna_Selection(struct adapter *padapter)
{
- struct hal_com_data *pHalData;
u8 val;
-
- pHalData = GET_HAL_DATA(padapter);
-
val = rtw_read8(padapter, REG_LEDCFG2);
/* Let 8051 take control antenna settting */
val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
@@ -3053,7 +3049,6 @@ static void rtl8723b_fill_default_txdesc(
{
struct adapter *padapter;
struct hal_com_data *pHalData;
- struct dm_priv *pdmpriv;
struct mlme_ext_priv *pmlmeext;
struct mlme_ext_info *pmlmeinfo;
struct pkt_attrib *pattrib;
@@ -3064,7 +3059,6 @@ static void rtl8723b_fill_default_txdesc(
padapter = pxmitframe->padapter;
pHalData = GET_HAL_DATA(padapter);
- pdmpriv = &pHalData->dmpriv;
pmlmeext = &padapter->mlmeextpriv;
pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -3773,7 +3767,6 @@ void C2HPacketHandler_8723B(struct adapter *padapter, u8 *pbuffer, u16 length)
process_c2h_event(padapter, &C2hEvent, tmpBuf);
/* c2h_handler_8723b(padapter,&C2hEvent); */
- return;
}
void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
@@ -4157,9 +4150,8 @@ void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
break;
}
- /* The value of ((usNavUpper + HAL_NAV_UPPER_UNIT_8723B - 1) / HAL_NAV_UPPER_UNIT_8723B) */
- /* is getting the upper integer. */
- usNavUpper = (usNavUpper + HAL_NAV_UPPER_UNIT_8723B - 1) / HAL_NAV_UPPER_UNIT_8723B;
+ usNavUpper = DIV_ROUND_UP(usNavUpper,
+ HAL_NAV_UPPER_UNIT_8723B);
rtw_write8(padapter, REG_NAV_UPPER, (u8)usNavUpper);
}
break;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 6df2b58bdc67..cf23414d7224 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -362,24 +362,10 @@ void PHY_SetRFReg_8723B(
*/
s32 PHY_MACConfig8723B(struct adapter *Adapter)
{
- int rtStatus = _SUCCESS;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- s8 *pszMACRegFile;
- s8 sz8723MACRegFile[] = RTL8723B_PHY_MACREG;
-
-
- pszMACRegFile = sz8723MACRegFile;
-
- /* */
- /* Config MAC */
- /* */
- rtStatus = phy_ConfigMACWithParaFile(Adapter, pszMACRegFile);
- if (rtStatus == _FAIL) {
- ODM_ReadAndConfig_MP_8723B_MAC_REG(&pHalData->odmpriv);
- rtStatus = _SUCCESS;
- }
- return rtStatus;
+ ODM_ReadAndConfig_MP_8723B_MAC_REG(&pHalData->odmpriv);
+ return _SUCCESS;
}
/**
@@ -427,17 +413,6 @@ static void phy_InitBBRFRegisterDefinition(struct adapter *Adapter)
static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rtStatus = _SUCCESS;
- u8 sz8723BBRegFile[] = RTL8723B_PHY_REG;
- u8 sz8723AGCTableFile[] = RTL8723B_AGC_TAB;
- u8 sz8723BBBRegPgFile[] = RTL8723B_PHY_REG_PG;
- u8 sz8723BRFTxPwrLmtFile[] = RTL8723B_TXPWR_LMT;
- u8 *pszBBRegFile = NULL, *pszAGCTableFile = NULL, *pszBBRegPgFile = NULL, *pszRFTxPwrLmtFile = NULL;
-
- pszBBRegFile = sz8723BBRegFile;
- pszAGCTableFile = sz8723AGCTableFile;
- pszBBRegPgFile = sz8723BBBRegPgFile;
- pszRFTxPwrLmtFile = sz8723BRFTxPwrLmtFile;
/* Read Tx Power Limit File */
PHY_InitTxPowerLimit(Adapter);
@@ -445,30 +420,14 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
Adapter->registrypriv.RegEnableTxPowerLimit == 1 ||
(Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory == 1)
) {
- if (PHY_ConfigRFWithPowerLimitTableParaFile(Adapter, pszRFTxPwrLmtFile) == _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_TXPWR_LMT, (ODM_RF_RADIO_PATH_E)0))
- rtStatus = _FAIL;
- }
-
- if (rtStatus != _SUCCESS) {
- DBG_871X("%s():Read Tx power limit fail\n", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
- }
+ ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv,
+ CONFIG_RF_TXPWR_LMT, 0);
}
/* */
/* 1. Read PHY_REG.TXT BB INIT!! */
/* */
- if (phy_ConfigBBWithParaFile(Adapter, pszBBRegFile, CONFIG_BB_PHY_REG) ==
- _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG))
- rtStatus = _FAIL;
- }
-
- if (rtStatus != _SUCCESS) {
- DBG_8192C("%s():Write BB Reg Fail!!", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
- }
+ ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG);
/* If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
PHY_InitTxPowerByRate(Adapter);
@@ -476,11 +435,8 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
Adapter->registrypriv.RegEnableTxPowerByRate == 1 ||
(Adapter->registrypriv.RegEnableTxPowerByRate == 2 && pHalData->EEPROMRegulatory != 2)
) {
- if (phy_ConfigBBWithPgParaFile(Adapter, pszBBRegPgFile) ==
- _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG_PG))
- rtStatus = _FAIL;
- }
+ ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv,
+ CONFIG_BB_PHY_REG_PG);
if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE)
PHY_TxPowerByRateConfiguration(Adapter);
@@ -490,29 +446,14 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
(Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory == 1)
)
PHY_ConvertTxPowerLimitToPowerIndex(Adapter);
-
- if (rtStatus != _SUCCESS) {
- DBG_8192C("%s():BB_PG Reg Fail!!\n", __func__);
- }
}
/* */
/* 2. Read BB AGC table Initialization */
/* */
- if (phy_ConfigBBWithParaFile(Adapter, pszAGCTableFile,
- CONFIG_BB_AGC_TAB) == _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB))
- rtStatus = _FAIL;
- }
-
- if (rtStatus != _SUCCESS) {
- DBG_8192C("%s():AGC Table Fail\n", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
- }
-
-phy_BB8190_Config_ParaFile_Fail:
+ ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB);
- return rtStatus;
+ return _SUCCESS;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
index d0ffe0af5339..aafceaf9b139 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
@@ -85,19 +85,8 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
u32 u4RegValue = 0;
u8 eRFPath;
struct bb_register_def *pPhyReg;
-
- int rtStatus = _SUCCESS;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- static char sz8723RadioAFile[] = RTL8723B_PHY_RADIO_A;
- static char sz8723RadioBFile[] = RTL8723B_PHY_RADIO_B;
- static s8 sz8723BTxPwrTrackFile[] = RTL8723B_TXPWR_TRACK;
- char *pszRadioAFile, *pszRadioBFile, *pszTxPwrTrackFile;
-
- pszRadioAFile = sz8723RadioAFile;
- pszRadioBFile = sz8723RadioBFile;
- pszTxPwrTrackFile = sz8723BTxPwrTrackFile;
-
/* 3----------------------------------------------------------------- */
/* 3 <2> Initialize RF */
/* 3----------------------------------------------------------------- */
@@ -136,21 +125,11 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
/*----Initialize RF fom connfiguration file----*/
switch (eRFPath) {
case RF_PATH_A:
- if (PHY_ConfigRFWithParaFile(Adapter, pszRadioAFile,
- eRFPath) == _FAIL) {
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
- rtStatus = _FAIL;
- }
- break;
case RF_PATH_B:
- if (PHY_ConfigRFWithParaFile(Adapter, pszRadioBFile,
- eRFPath) == _FAIL) {
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
- rtStatus = _FAIL;
- }
+ ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv,
+ CONFIG_RF_RADIO, eRFPath);
break;
case RF_PATH_C:
- break;
case RF_PATH_D:
break;
}
@@ -166,28 +145,16 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV << 16, u4RegValue);
break;
}
-
- if (rtStatus != _SUCCESS) {
- /* RT_TRACE(COMP_FPGA, DBG_LOUD, ("phy_RF6052_Config_ParaFile():Radio[%d] Fail!!", eRFPath)); */
- goto phy_RF6052_Config_ParaFile_Fail;
- }
-
}
/* 3 ----------------------------------------------------------------- */
/* 3 Configuration of Tx Power Tracking */
/* 3 ----------------------------------------------------------------- */
- if (PHY_ConfigRFWithTxPwrTrackParaFile(Adapter, pszTxPwrTrackFile) ==
- _FAIL) {
- ODM_ConfigRFWithTxPwrTrackHeaderFile(&pHalData->odmpriv);
- }
+ ODM_ConfigRFWithTxPwrTrackHeaderFile(&pHalData->odmpriv);
/* RT_TRACE(COMP_INIT, DBG_LOUD, ("<---phy_RF6052_Config_ParaFile()\n")); */
- return rtStatus;
-
-phy_RF6052_Config_ParaFile_Fail:
- return rtStatus;
+ return _SUCCESS;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 0f3301091258..1e8b61443408 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -179,8 +179,6 @@ static void rtl8723bs_c2h_packet_handler(struct adapter *padapter,
kfree(tmp);
/* DBG_871X("-%s res(%d)\n", __func__, res); */
-
- return;
}
static inline union recv_frame *try_alloc_recvframe(struct recv_priv *precvpriv,
@@ -232,7 +230,7 @@ static inline bool pkt_exceeds_tail(struct recv_priv *precvpriv,
return false;
}
-static void rtl8723bs_recv_tasklet(void *priv)
+static void rtl8723bs_recv_tasklet(unsigned long priv)
{
struct adapter *padapter;
struct hal_com_data *p_hal_data;
@@ -246,7 +244,7 @@ static void rtl8723bs_recv_tasklet(void *priv)
_pkt *pkt_copy = NULL;
u8 shift_sz = 0, rx_report_sz = 0;
- padapter = priv;
+ padapter = (struct adapter *)priv;
p_hal_data = GET_HAL_DATA(padapter);
precvpriv = &padapter->recvpriv;
recv_buf_queue = &precvpriv->recv_buf_pending_queue;
@@ -464,11 +462,8 @@ s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
goto initbuferror;
/* 3 2. init tasklet */
- tasklet_init(
- &precvpriv->recv_tasklet,
- (void(*)(unsigned long))rtl8723bs_recv_tasklet,
- (unsigned long)padapter
- );
+ tasklet_init(&precvpriv->recv_tasklet, rtl8723bs_recv_tasklet,
+ (unsigned long)padapter);
goto exit;
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index 0f5dd4629e6f..e813382e78a6 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -570,14 +570,11 @@ static void HalRxAggr8723BSdio(struct adapter *padapter)
static void sdio_AggSettingRxUpdate(struct adapter *padapter)
{
- struct hal_com_data *pHalData;
u8 valueDMA;
u8 valueRxAggCtrl = 0;
u8 aggBurstNum = 3; /* 0:1, 1:2, 2:3, 3:4 */
u8 aggBurstSize = 0; /* 0:1K, 1:512Byte, 2:256Byte... */
- pHalData = GET_HAL_DATA(padapter);
-
valueDMA = rtw_read8(padapter, REG_TRXDMA_CTRL);
valueDMA |= RXDMA_AGG_EN;
rtw_write8(padapter, REG_TRXDMA_CTRL, valueDMA);
@@ -713,13 +710,11 @@ static u32 rtl8723bs_hal_init(struct adapter *padapter)
s32 ret;
struct hal_com_data *pHalData;
struct pwrctrl_priv *pwrctrlpriv;
- struct registry_priv *pregistrypriv;
u32 NavUpper = WiFiNavUpperUs;
u8 u1bTmp;
pHalData = GET_HAL_DATA(padapter);
pwrctrlpriv = adapter_to_pwrctl(padapter);
- pregistrypriv = &padapter->registrypriv;
if (
adapter_to_pwrctl(padapter)->bips_processing == true &&
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index 301d327d0624..b6b4adb5a28a 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -15,7 +15,7 @@
/* */
/* Description: */
-/* The following mapping is for SDIO host local register space. */
+/* The following mapping is for SDIO host local register space. */
/* */
/* Creadted by Roger, 2011.01.31. */
/* */
@@ -61,7 +61,6 @@ static u8 get_deviceid(u32 addr)
u8 devide_id;
u16 pseudo_id;
-
pseudo_id = (u16)(addr >> 16);
switch (pseudo_id) {
case 0x1025:
@@ -72,10 +71,6 @@ static u8 get_deviceid(u32 addr)
devide_id = WLAN_IOREG_DEVICE_ID;
break;
-/* case 0x1027: */
-/* devide_id = SDIO_FIRMWARE_FIFO; */
-/* break; */
-
case 0x1031:
devide_id = WLAN_TX_HIQ_DEVICE_ID;
break;
@@ -93,7 +88,6 @@ static u8 get_deviceid(u32 addr)
break;
default:
-/* devide_id = (u8)((addr >> 13) & 0xF); */
devide_id = WLAN_IOREG_DEVICE_ID;
break;
}
@@ -111,7 +105,6 @@ static u32 _cvrt2ftaddr(const u32 addr, u8 *pdevice_id, u16 *poffset)
u16 offset;
u32 ftaddr;
-
device_id = get_deviceid(addr);
offset = 0;
@@ -427,20 +420,16 @@ static u32 sdio_read_port(
struct adapter *adapter;
struct sdio_data *psdio;
struct hal_com_data *hal;
- u32 oldcnt;
s32 err;
-
adapter = intfhdl->padapter;
psdio = &adapter_to_dvobj(adapter)->intf_data;
hal = GET_HAL_DATA(adapter);
HalSdioGetCmdAddr8723BSdio(adapter, addr, hal->SdioRxFIFOCnt++, &addr);
- oldcnt = cnt;
if (cnt > psdio->block_transfer_len)
cnt = _RND(cnt, psdio->block_transfer_len);
-/* cnt = sdio_align_size(cnt); */
err = _sd_read(intfhdl, addr, cnt, mem);
@@ -490,7 +479,6 @@ static u32 sdio_write_port(
if (cnt > psdio->block_transfer_len)
cnt = _RND(cnt, psdio->block_transfer_len);
-/* cnt = sdio_align_size(cnt); */
err = sd_write(intfhdl, addr, cnt, xmitbuf->pdata);
@@ -538,7 +526,6 @@ static s32 _sdio_local_read(
u8 *tmpbuf;
u32 n;
-
intfhdl = &adapter->iopriv.intf;
HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
@@ -711,7 +698,6 @@ static s32 ReadInterrupt8723BSdio(struct adapter *adapter, u32 *phisr)
u32 hisr, himr;
u8 val8, hisr_len;
-
if (!phisr)
return false;
@@ -737,73 +723,48 @@ static s32 ReadInterrupt8723BSdio(struct adapter *adapter, u32 *phisr)
}
/* */
-/* Description: */
-/* Initialize SDIO Host Interrupt Mask configuration variables for future use. */
+/* Description: */
+/* Initialize SDIO Host Interrupt Mask configuration variables for future use. */
/* */
-/* Assumption: */
-/* Using SDIO Local register ONLY for configuration. */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
/* */
-/* Created by Roger, 2011.02.11. */
+/* Created by Roger, 2011.02.11. */
/* */
void InitInterrupt8723BSdio(struct adapter *adapter)
{
struct hal_com_data *haldata;
-
haldata = GET_HAL_DATA(adapter);
- haldata->sdio_himr = (u32)( \
- SDIO_HIMR_RX_REQUEST_MSK |
- SDIO_HIMR_AVAL_MSK |
-/* SDIO_HIMR_TXERR_MSK | */
-/* SDIO_HIMR_RXERR_MSK | */
-/* SDIO_HIMR_TXFOVW_MSK | */
-/* SDIO_HIMR_RXFOVW_MSK | */
-/* SDIO_HIMR_TXBCNOK_MSK | */
-/* SDIO_HIMR_TXBCNERR_MSK | */
-/* SDIO_HIMR_BCNERLY_INT_MSK | */
-/* SDIO_HIMR_C2HCMD_MSK | */
-/* SDIO_HIMR_HSISR_IND_MSK | */
-/* SDIO_HIMR_GTINT3_IND_MSK | */
-/* SDIO_HIMR_GTINT4_IND_MSK | */
-/* SDIO_HIMR_PSTIMEOUT_MSK | */
-/* SDIO_HIMR_OCPINT_MSK | */
-/* SDIO_HIMR_ATIMEND_MSK | */
-/* SDIO_HIMR_ATIMEND_E_MSK | */
-/* SDIO_HIMR_CTWEND_MSK | */
- 0);
+ haldata->sdio_himr = (u32)(SDIO_HIMR_RX_REQUEST_MSK |
+ SDIO_HIMR_AVAL_MSK |
+ 0);
}
/* */
-/* Description: */
-/* Initialize System Host Interrupt Mask configuration variables for future use. */
+/* Description: */
+/* Initialize System Host Interrupt Mask configuration variables for future use. */
/* */
-/* Created by Roger, 2011.08.03. */
+/* Created by Roger, 2011.08.03. */
/* */
void InitSysInterrupt8723BSdio(struct adapter *adapter)
{
struct hal_com_data *haldata;
-
haldata = GET_HAL_DATA(adapter);
- haldata->SysIntrMask = ( \
-/* HSIMR_GPIO12_0_INT_EN | */
-/* HSIMR_SPS_OCP_INT_EN | */
-/* HSIMR_RON_INT_EN | */
-/* HSIMR_PDNINT_EN | */
-/* HSIMR_GPIO9_INT_EN | */
- 0);
+ haldata->SysIntrMask = (0);
}
/* */
-/* Description: */
-/* Enalbe SDIO Host Interrupt Mask configuration on SDIO local domain. */
+/* Description: */
+/* Enalbe SDIO Host Interrupt Mask configuration on SDIO local domain. */
/* */
-/* Assumption: */
-/* 1. Using SDIO Local register ONLY for configuration. */
-/* 2. PASSIVE LEVEL */
+/* Assumption: */
+/* 1. Using SDIO Local register ONLY for configuration. */
+/* 2. PASSIVE LEVEL */
/* */
-/* Created by Roger, 2011.02.11. */
+/* Created by Roger, 2011.02.11. */
/* */
void EnableInterrupt8723BSdio(struct adapter *adapter)
{
@@ -849,13 +810,13 @@ void EnableInterrupt8723BSdio(struct adapter *adapter)
}
/* */
-/* Description: */
-/* Disable SDIO Host IMR configuration to mask unnecessary interrupt service. */
+/* Description: */
+/* Disable SDIO Host IMR configuration to mask unnecessary interrupt service. */
/* */
-/* Assumption: */
-/* Using SDIO Local register ONLY for configuration. */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
/* */
-/* Created by Roger, 2011.02.11. */
+/* Created by Roger, 2011.02.11. */
/* */
void DisableInterrupt8723BSdio(struct adapter *adapter)
{
@@ -866,13 +827,13 @@ void DisableInterrupt8723BSdio(struct adapter *adapter)
}
/* */
-/* Description: */
-/* Using 0x100 to check the power status of FW. */
+/* Description: */
+/* Using 0x100 to check the power status of FW. */
/* */
-/* Assumption: */
-/* Using SDIO Local register ONLY for configuration. */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
/* */
-/* Created by Isaac, 2013.09.10. */
+/* Created by Isaac, 2013.09.10. */
/* */
u8 CheckIPSStatus(struct adapter *adapter)
{
@@ -896,7 +857,6 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
struct recv_priv *recv_priv;
struct recv_buf *recvbuf;
-
/* Patch for some SDIO Host 4 bytes issue */
/* ex. RK3188 */
readsize = RND4(size);
@@ -938,7 +898,6 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
return NULL;
}
-
/* 3 4. init recvbuf */
recvbuf->len = size;
recvbuf->phead = recvbuf->pskb->head;
@@ -972,7 +931,6 @@ void sd_int_dpc(struct adapter *adapter)
struct intf_hdl *intfhdl = &adapter->iopriv.intf;
struct pwrctrl_priv *pwrctl;
-
hal = GET_HAL_DATA(adapter);
dvobj = adapter_to_dvobj(adapter);
pwrctl = dvobj_to_pwrctl(dvobj);
@@ -992,7 +950,6 @@ void sd_int_dpc(struct adapter *adapter)
report.state = SdioLocalCmd52Read1Byte(adapter, SDIO_REG_HCPWM1_8723B);
- /* cpwm_int_hdl(adapter, &report); */
_set_workitem(&(pwrctl->cpwm_event));
}
@@ -1029,7 +986,7 @@ void sd_int_dpc(struct adapter *adapter)
if (c2h_id_filter_ccx_8723b((u8 *)c2h_evt)) {
/* Handle CCX report here */
rtw_hal_c2h_handler(adapter, (u8 *)c2h_evt);
- kfree((u8 *)c2h_evt);
+ kfree(c2h_evt);
} else {
rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
}
@@ -1049,13 +1006,11 @@ void sd_int_dpc(struct adapter *adapter)
if (hal->sdio_hisr & SDIO_HISR_RXERR)
DBG_8192C("%s: Rx Error\n", __func__);
-
if (hal->sdio_hisr & SDIO_HISR_RX_REQUEST) {
struct recv_buf *recvbuf;
int alloc_fail_time = 0;
u32 hisr;
-/* DBG_8192C("%s: RX Request, size =%d\n", __func__, hal->SdioRxFIFOSize); */
hal->sdio_hisr ^= SDIO_HISR_RX_REQUEST;
do {
hal->SdioRxFIFOSize = SdioLocalCmd52Read2Byte(adapter, SDIO_REG_RX0_REQ_LEN);
@@ -1090,7 +1045,6 @@ void sd_int_hdl(struct adapter *adapter)
{
struct hal_com_data *hal;
-
if (
(adapter->bDriverStopped) || (adapter->bSurpriseRemoved)
)
@@ -1120,27 +1074,24 @@ void sd_int_hdl(struct adapter *adapter)
}
/* */
-/* Description: */
-/* Query SDIO Local register to query current the number of Free TxPacketBuffer page. */
+/* Description: */
+/* Query SDIO Local register to query current the number of Free TxPacketBuffer page. */
/* */
-/* Assumption: */
-/* 1. Running at PASSIVE_LEVEL */
-/* 2. RT_TX_SPINLOCK is NOT acquired. */
+/* Assumption: */
+/* 1. Running at PASSIVE_LEVEL */
+/* 2. RT_TX_SPINLOCK is NOT acquired. */
/* */
-/* Created by Roger, 2011.01.28. */
+/* Created by Roger, 2011.01.28. */
/* */
u8 HalQueryTxBufferStatus8723BSdio(struct adapter *adapter)
{
struct hal_com_data *hal;
u32 numof_free_page;
- /* _irql irql; */
-
hal = GET_HAL_DATA(adapter);
numof_free_page = SdioLocalCmd53Read4Byte(adapter, SDIO_REG_FREE_TXPG);
- /* spin_lock_bh(&phal->SdioTxFIFOFreePageLock); */
memcpy(hal->SdioTxFIFOFreePage, &numof_free_page, 4);
RT_TRACE(_module_hci_ops_c_, _drv_notice_,
("%s: Free page for HIQ(%#x), MIDQ(%#x), LOWQ(%#x), PUBQ(%#x)\n",
@@ -1149,14 +1100,13 @@ u8 HalQueryTxBufferStatus8723BSdio(struct adapter *adapter)
hal->SdioTxFIFOFreePage[MID_QUEUE_IDX],
hal->SdioTxFIFOFreePage[LOW_QUEUE_IDX],
hal->SdioTxFIFOFreePage[PUBLIC_QUEUE_IDX]));
- /* spin_unlock_bh(&hal->SdioTxFIFOFreePageLock); */
return true;
}
/* */
-/* Description: */
-/* Query SDIO Local register to get the current number of TX OQT Free Space. */
+/* Description: */
+/* Query SDIO Local register to get the current number of TX OQT Free Space. */
/* */
void HalQueryTxOQTBufferStatus8723BSdio(struct adapter *adapter)
{
@@ -1190,7 +1140,6 @@ u8 RecvOnePkt(struct adapter *adapter, u32 size)
recvbuf = sd_recv_rxfifo(adapter, size);
if (recvbuf) {
- /* printk("Completed Recv One Pkt.\n"); */
sd_rxhandler(adapter, recvbuf);
res = true;
} else {
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 8d7fce1e39b7..6ec9087f2eb1 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -197,9 +197,6 @@ struct registry_priv
u8 RFE_Type;
u8 check_fw_ps;
- u8 load_phy_file;
- u8 RegDecryptCustomFile;
-
#ifdef CONFIG_MULTI_VIR_IFACES
u8 ext_iface_num;/* primary/secondary iface is excluded */
#endif
@@ -693,7 +690,6 @@ void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
void indicate_wx_scan_complete_event(struct adapter *padapter);
int rtw_change_ifname(struct adapter *padapter, const char *ifname);
-extern char *rtw_phy_file_path;
extern char *rtw_initmac;
extern int rtw_mc2u_disable;
extern int rtw_ht_enable;
diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
index 9167f1e7827f..e9a3006a3e20 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
@@ -219,30 +219,4 @@ struct adapter * Adapter,
u16 ChannelPlan
);
-#define MAX_PARA_FILE_BUF_LEN 25600
-
-#define LOAD_MAC_PARA_FILE BIT0
-#define LOAD_BB_PARA_FILE BIT1
-#define LOAD_BB_PG_PARA_FILE BIT2
-#define LOAD_BB_MP_PARA_FILE BIT3
-#define LOAD_RF_PARA_FILE BIT4
-#define LOAD_RF_TXPWR_TRACK_PARA_FILE BIT5
-#define LOAD_RF_TXPWR_LMT_PARA_FILE BIT6
-
-int phy_ConfigMACWithParaFile(struct adapter *Adapter, char*pFileName);
-
-int phy_ConfigBBWithParaFile(struct adapter *Adapter, char*pFileName, u32 ConfigType);
-
-int phy_ConfigBBWithPgParaFile(struct adapter *Adapter, char*pFileName);
-
-int phy_ConfigBBWithMpParaFile(struct adapter *Adapter, char*pFileName);
-
-int PHY_ConfigRFWithParaFile(struct adapter *Adapter, char*pFileName, u8 eRFPath);
-
-int PHY_ConfigRFWithTxPwrTrackParaFile(struct adapter *Adapter, char*pFileName);
-
-int PHY_ConfigRFWithPowerLimitTableParaFile(struct adapter *Adapter, char*pFileName);
-
-void phy_free_filebuf(struct adapter *padapter);
-
#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_data.h b/drivers/staging/rtl8723bs/include/hal_data.h
index 7d782659a84f..e5e667df6154 100644
--- a/drivers/staging/rtl8723bs/include/hal_data.h
+++ b/drivers/staging/rtl8723bs/include/hal_data.h
@@ -440,27 +440,6 @@ struct hal_com_data {
u32 SysIntrStatus;
u32 SysIntrMask;
-
- char para_file_buf[MAX_PARA_FILE_BUF_LEN];
- char *mac_reg;
- u32 mac_reg_len;
- char *bb_phy_reg;
- u32 bb_phy_reg_len;
- char *bb_agc_tab;
- u32 bb_agc_tab_len;
- char *bb_phy_reg_pg;
- u32 bb_phy_reg_pg_len;
- char *bb_phy_reg_mp;
- u32 bb_phy_reg_mp_len;
- char *rf_radio_a;
- u32 rf_radio_a_len;
- char *rf_radio_b;
- u32 rf_radio_b_len;
- char *rf_tx_pwr_track;
- u32 rf_tx_pwr_track_len;
- char *rf_tx_pwr_lmt;
- u32 rf_tx_pwr_lmt_len;
-
#ifdef CONFIG_BACKGROUND_NOISE_MONITOR
s16 noise[ODM_MAX_CHANNEL_NUM];
#endif
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index 81a9c19ecc6a..a40cf7b60a69 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -171,10 +171,6 @@ extern void rtw_softap_lock_suspend(void);
extern void rtw_softap_unlock_suspend(void);
#endif
-/* File operation APIs, just for linux now */
-extern int rtw_is_file_readable(char *path);
-extern int rtw_retrive_from_file(char *path, u8 *buf, u32 sz);
-
extern void rtw_free_netdev(struct net_device * netdev);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index c582ede1ac12..a2d9de866c4b 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -127,13 +127,6 @@ static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
netif_tx_stop_all_queues(pnetdev);
}
-static inline void rtw_merge_string(char *dst, int dst_len, char *src1, char *src2)
-{
- int len = 0;
- len += snprintf(dst+len, dst_len - len, "%s", src1);
- len += snprintf(dst+len, dst_len - len, "%s", src2);
-}
-
#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
#define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
index 8f00ced1c697..f36516fa84c7 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -21,21 +21,6 @@
#include "hal_phy_cfg.h"
/* */
-/* RTL8723B From file */
-/* */
-#define RTL8723B_FW_IMG "rtl8723b/FW_NIC.bin"
-#define RTL8723B_FW_WW_IMG "rtl8723b/FW_WoWLAN.bin"
-#define RTL8723B_PHY_REG "rtl8723b/PHY_REG.txt"
-#define RTL8723B_PHY_RADIO_A "rtl8723b/RadioA.txt"
-#define RTL8723B_PHY_RADIO_B "rtl8723b/RadioB.txt"
-#define RTL8723B_TXPWR_TRACK "rtl8723b/TxPowerTrack.txt"
-#define RTL8723B_AGC_TAB "rtl8723b/AGC_TAB.txt"
-#define RTL8723B_PHY_MACREG "rtl8723b/MAC_REG.txt"
-#define RTL8723B_PHY_REG_PG "rtl8723b/PHY_REG_PG.txt"
-#define RTL8723B_PHY_REG_MP "rtl8723b/PHY_REG_MP.txt"
-#define RTL8723B_TXPWR_LMT "rtl8723b/TXPWR_LMT.txt"
-
-/* */
/* RTL8723B From header */
/* */
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index fd3cf955c9f8..73e8ec09b6e1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -576,7 +576,6 @@ void read_cam(struct adapter *padapter , u8 entry, u8 *get_key);
/* modify HW only */
void _write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
void _clear_cam_entry(struct adapter *padapter, u8 entry);
-void write_cam_from_cache(struct adapter *adapter, u8 id);
/* modify both HW and cache */
void write_cam(struct adapter *padapter, u8 id, u16 ctrl, u8 *mac, u8 *key);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index f819abb756dc..322cabb97b99 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -165,7 +165,7 @@ static void rtw_spt_band_free(struct ieee80211_supported_band *spt_band)
+ sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
+ sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM;
}
- kfree((u8 *)spt_band);
+ kfree(spt_band);
}
static const struct ieee80211_txrx_stypes
@@ -240,10 +240,6 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
u16 channel;
u32 freq;
u64 notify_timestamp;
- u16 notify_capability;
- u16 notify_interval;
- u8 *notify_ie;
- size_t notify_ielen;
s32 notify_signal;
u8 *buf = NULL, *pbuf;
size_t len, bssinf_len = 0;
@@ -324,12 +320,6 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
notify_timestamp = ktime_to_us(ktime_get_boottime());
- notify_interval = le16_to_cpu(*(__le16 *)rtw_get_beacon_interval_from_ie(pnetwork->network.IEs));
- notify_capability = le16_to_cpu(*(__le16 *)rtw_get_capability_from_ie(pnetwork->network.IEs));
-
- notify_ie = pnetwork->network.IEs+_FIXED_IE_LENGTH_;
- notify_ielen = pnetwork->network.IELength-_FIXED_IE_LENGTH_;
-
/* We've set wiphy's signal_type as CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm) */
if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network, 0)) {
@@ -1156,7 +1146,7 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
addkey_end:
- kfree((u8 *)param);
+ kfree(param);
return ret;
@@ -1305,7 +1295,6 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
struct wireless_dev *rtw_wdev = padapter->rtw_wdev;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
int ret = 0;
- u8 change = false;
DBG_871X(FUNC_NDEV_FMT" type =%d\n", FUNC_NDEV_ARG(ndev), type);
@@ -1336,7 +1325,6 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
if (old_type != type)
{
- change = true;
pmlmeext->action_public_rxseq = 0xffff;
pmlmeext->action_public_dialog_token = 0xff;
}
@@ -1410,19 +1398,19 @@ void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnet
struct wireless_dev *pwdev = padapter->rtw_wdev;
struct wiphy *wiphy = pwdev->wiphy;
struct cfg80211_bss *bss = NULL;
- struct wlan_bssid_ex select_network = pnetwork->network;
+ struct wlan_bssid_ex *select_network = &pnetwork->network;
bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
- select_network.MacAddress, select_network.Ssid.Ssid,
- select_network.Ssid.SsidLength, 0/*WLAN_CAPABILITY_ESS*/,
+ select_network->MacAddress, select_network->Ssid.Ssid,
+ select_network->Ssid.SsidLength, 0/*WLAN_CAPABILITY_ESS*/,
0/*WLAN_CAPABILITY_ESS*/);
if (bss) {
cfg80211_unlink_bss(wiphy, bss);
- DBG_8192C("%s(): cfg80211_unlink %s!! () ", __func__, select_network.Ssid.Ssid);
+ DBG_8192C("%s(): cfg80211_unlink %s!! () ", __func__,
+ select_network->Ssid.Ssid);
cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
}
- return;
}
void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter)
@@ -1513,7 +1501,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
int i;
u8 _status = false;
int ret = 0;
- struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct ndis_802_11_ssid *ssid = NULL;
struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
u8 survey_times =3;
u8 survey_times_for_one_ch =6;
@@ -1604,7 +1592,13 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
goto check_need_indicate_scan_done;
}
- memset(ssid, 0, sizeof(struct ndis_802_11_ssid)*RTW_SSID_SCAN_AMOUNT);
+ ssid = kzalloc(RTW_SSID_SCAN_AMOUNT * sizeof(struct ndis_802_11_ssid),
+ GFP_KERNEL);
+ if (!ssid) {
+ ret = -ENOMEM;
+ goto check_need_indicate_scan_done;
+ }
+
/* parsing request ssids, n_ssids */
for (i = 0; i < request->n_ssids && i < RTW_SSID_SCAN_AMOUNT; i++) {
#ifdef DEBUG_CFG80211
@@ -1648,6 +1642,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
}
check_need_indicate_scan_done:
+ kfree(ssid);
if (need_indicate_scan_done)
{
rtw_cfg80211_surveydone_event_callback(padapter);
@@ -1798,7 +1793,7 @@ static int rtw_cfg80211_set_key_mgt(struct security_priv *psecuritypriv, u32 key
static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t ielen)
{
- u8 *buf = NULL, *pos = NULL;
+ u8 *buf = NULL;
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
int wpa_ielen = 0;
@@ -1833,7 +1828,6 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
DBG_8192C("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
}
- pos = buf;
if (ielen < RSN_HEADER_LEN) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("Ie len too short %d\n", ielen));
ret = -1;
@@ -2194,7 +2188,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
ret = -EOPNOTSUPP ;
}
- kfree((u8 *)pwep);
+ kfree(pwep);
if (ret < 0)
goto exit;
@@ -2433,7 +2427,6 @@ void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter, unsigned char
static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
{
- int ret = 0;
int rtap_len;
int qos_len = 0;
int dot11_hdr_len = 24;
@@ -2499,9 +2492,7 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
DBG_8192C("should be eapol packet\n");
/* Use the real net device to transmit the packet */
- ret = _rtw_xmit_entry(skb, padapter->pnetdev);
-
- return ret;
+ return _rtw_xmit_entry(skb, padapter->pnetdev);
}
else if ((frame_control & (IEEE80211_FCTL_FTYPE|IEEE80211_FCTL_STYPE))
@@ -2647,7 +2638,7 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st
out:
if (ret && mon_wdev) {
- kfree((u8 *)mon_wdev);
+ kfree(mon_wdev);
mon_wdev = NULL;
}
@@ -2808,14 +2799,11 @@ static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_beacon_data *info)
{
- int ret = 0;
struct adapter *adapter = (struct adapter *)rtw_netdev_priv(ndev);
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
- ret = rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
-
- return ret;
+ return rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
}
static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
@@ -3503,7 +3491,7 @@ void rtw_wdev_free(struct wireless_dev *wdev)
wiphy_free(wdev->wiphy);
- kfree((u8 *)wdev);
+ kfree(wdev);
}
void rtw_wdev_unregister(struct wireless_dev *wdev)
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index d1b199e3e5bd..db6528a01229 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2425,19 +2425,13 @@ static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info,
return 0;
}
-static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- return ret;
-}
-
static int rtw_get_ap_info(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
- u32 cnt = 0, wpa_ielen;
+ int wpa_ielen;
+ u32 cnt = 0;
struct list_head *plist, *phead;
unsigned char *pbuf;
u8 bssid[ETH_ALEN];
@@ -2793,7 +2787,7 @@ static int rtw_dbg_port(struct net_device *dev,
DBG_871X("oper_ch =%d\n", rtw_get_oper_ch(padapter));
DBG_871X("oper_bw =%d\n", rtw_get_oper_bw(padapter));
- DBG_871X("oper_ch_offet =%d\n", rtw_get_oper_choffset(padapter));
+ DBG_871X("oper_ch_offset =%d\n", rtw_get_oper_choffset(padapter));
break;
case 0x05:
@@ -4458,43 +4452,6 @@ static int rtw_pm_set(struct net_device *dev,
return ret;
}
-static int rtw_mp_efuse_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- int err = 0;
- return err;
-}
-
-static int rtw_mp_efuse_set(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- int err = 0;
- return err;
-}
-
-static int rtw_tdls(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- return ret;
-}
-
-
-static int rtw_tdls_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- return ret;
-}
-
-
-
-
-
static int rtw_test(
struct net_device *dev,
struct iw_request_info *info,
@@ -4744,7 +4701,7 @@ static iw_handler rtw_private_handler[] = {
rtw_wx_write32, /* 0x00 */
rtw_wx_read32, /* 0x01 */
rtw_drvext_hdl, /* 0x02 */
- rtw_mp_ioctl_hdl, /* 0x03 */
+ NULL, /* 0x03 */
/* for MM DTV platform */
rtw_get_ap_info, /* 0x04 */
@@ -4771,15 +4728,15 @@ static iw_handler rtw_private_handler[] = {
NULL, /* 0x12 */
rtw_p2p_get2, /* 0x13 */
- rtw_tdls, /* 0x14 */
- rtw_tdls_get, /* 0x15 */
+ NULL, /* 0x14 */
+ NULL, /* 0x15 */
rtw_pm_set, /* 0x16 */
rtw_wx_priv_null, /* 0x17 */
rtw_rereg_nd_name, /* 0x18 */
rtw_wx_priv_null, /* 0x19 */
- rtw_mp_efuse_set, /* 0x1A */
- rtw_mp_efuse_get, /* 0x1B */
+ NULL, /* 0x1A */
+ NULL, /* 0x1B */
NULL, /* 0x1C is reserved for hostapd */
rtw_test, /* 0x1D */
};
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index ec3a75485233..47e984d5b7cb 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -201,24 +201,6 @@ MODULE_PARM_DESC(rtw_tx_pwr_lmt_enable, "0:Disable, 1:Enable, 2: Depend on efuse
module_param(rtw_tx_pwr_by_rate, int, 0644);
MODULE_PARM_DESC(rtw_tx_pwr_by_rate, "0:Disable, 1:Enable, 2: Depend on efuse");
-char *rtw_phy_file_path = "";
-module_param(rtw_phy_file_path, charp, 0644);
-MODULE_PARM_DESC(rtw_phy_file_path, "The path of phy parameter");
-/* PHY FILE Bit Map */
-/* BIT0 - MAC, 0: non-support, 1: support */
-/* BIT1 - BB, 0: non-support, 1: support */
-/* BIT2 - BB_PG, 0: non-support, 1: support */
-/* BIT3 - BB_MP, 0: non-support, 1: support */
-/* BIT4 - RF, 0: non-support, 1: support */
-/* BIT5 - RF_TXPWR_TRACK, 0: non-support, 1: support */
-/* BIT6 - RF_TXPWR_LMT, 0: non-support, 1: support */
-static int rtw_load_phy_file = (BIT2 | BIT6);
-module_param(rtw_load_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_load_phy_file, "PHY File Bit Map");
-static int rtw_decrypt_phy_file;
-module_param(rtw_decrypt_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_decrypt_phy_file, "Enable Decrypt PHY File");
-
int _netdev_open(struct net_device *pnetdev);
int netdev_open (struct net_device *pnetdev);
static int netdev_close (struct net_device *pnetdev);
@@ -321,8 +303,6 @@ static void loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->bEn_RFE = 1;
registry_par->RFE_Type = 64;
- registry_par->load_phy_file = (u8)rtw_load_phy_file;
- registry_par->RegDecryptCustomFile = (u8)rtw_decrypt_phy_file;
registry_par->qos_opt_enable = (u8)rtw_qos_opt_enable;
registry_par->hiq_filter = (u8)rtw_hiq_filter;
@@ -1141,8 +1121,7 @@ void rtw_ndev_destructor(struct net_device *ndev)
{
DBG_871X(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
- if (ndev->ieee80211_ptr)
- kfree((u8 *)ndev->ieee80211_ptr);
+ kfree(ndev->ieee80211_ptr);
}
void rtw_dev_unload(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 25a80041ce87..f5614e56371e 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -65,142 +65,6 @@ void _rtw_init_queue(struct __queue *pqueue)
spin_lock_init(&(pqueue->lock));
}
-/*
-* Open a file with the specific @param path, @param flag, @param mode
-* @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
-* @param path the path of the file to open
-* @param flag file operation flags, please refer to linux document
-* @param mode please refer to linux document
-* @return Linux specific error code
-*/
-static int openFile(struct file **fpp, char *path, int flag, int mode)
-{
- struct file *fp;
-
- fp = filp_open(path, flag, mode);
- if (IS_ERR(fp)) {
- *fpp = NULL;
- return PTR_ERR(fp);
- }
- else {
- *fpp = fp;
- return 0;
- }
-}
-
-/*
-* Close the file with the specific @param fp
-* @param fp the pointer of struct file to close
-* @return always 0
-*/
-static int closeFile(struct file *fp)
-{
- filp_close(fp, NULL);
- return 0;
-}
-
-static int readFile(struct file *fp, char *buf, int len)
-{
- int rlen = 0, sum = 0;
-
- if (!fp->f_op || !fp->f_op->read)
- return -EPERM;
-
- while (sum < len) {
- rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
- if (rlen > 0)
- sum += rlen;
- else if (0 != rlen)
- return rlen;
- else
- break;
- }
-
- return sum;
-
-}
-
-/*
-* Test if the specifi @param path is a file and readable
-* @param path the path of the file to test
-* @return Linux specific error code
-*/
-static int isFileReadable(char *path)
-{
- struct file *fp;
- int ret = 0;
- char buf;
-
- fp = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(fp))
- return PTR_ERR(fp);
-
- if (readFile(fp, &buf, 1) != 1)
- ret = -EINVAL;
-
- filp_close(fp, NULL);
- return ret;
-}
-
-/*
-* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
-* @param path the path of the file to open and read
-* @param buf the starting address of the buffer to store file content
-* @param sz how many bytes to read at most
-* @return the byte we've read, or Linux specific error code
-*/
-static int retriveFromFile(char *path, u8 *buf, u32 sz)
-{
- int ret = -1;
- struct file *fp;
-
- if (path && buf) {
- ret = openFile(&fp, path, O_RDONLY, 0);
-
- if (ret == 0) {
- DBG_871X("%s openFile path:%s fp =%p\n", __func__, path , fp);
-
- ret = readFile(fp, buf, sz);
- closeFile(fp);
-
- DBG_871X("%s readFile, ret:%d\n", __func__, ret);
-
- } else {
- DBG_871X("%s openFile path:%s Fail, ret:%d\n", __func__, path, ret);
- }
- } else {
- DBG_871X("%s NULL pointer\n", __func__);
- ret = -EINVAL;
- }
- return ret;
-}
-
-/*
-* Test if the specifi @param path is a file and readable
-* @param path the path of the file to test
-* @return true or false
-*/
-int rtw_is_file_readable(char *path)
-{
- if (isFileReadable(path) == 0)
- return true;
- else
- return false;
-}
-
-/*
-* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
-* @param path the path of the file to open and read
-* @param buf the starting address of the buffer to store file content
-* @param sz how many bytes to read at most
-* @return the byte we've read
-*/
-int rtw_retrive_from_file(char *path, u8 *buf, u32 sz)
-{
- int ret = retriveFromFile(path, buf, sz);
- return ret >= 0 ? ret : 0;
-}
-
struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
{
struct net_device *pnetdev;
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index d3784c44f6d0..859f4a0afb95 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -18,18 +18,13 @@
static const struct sdio_device_id sdio_ids[] =
{
{ SDIO_DEVICE(0x024c, 0x0523), },
+ { SDIO_DEVICE(0x024c, 0x0525), },
{ SDIO_DEVICE(0x024c, 0x0623), },
{ SDIO_DEVICE(0x024c, 0x0626), },
{ SDIO_DEVICE(0x024c, 0xb723), },
{ /* end: all zeroes */ },
};
-static const struct acpi_device_id acpi_ids[] = {
- {"OBDA8723", 0x0000},
- {}
-};
-
MODULE_DEVICE_TABLE(sdio, sdio_ids);
-MODULE_DEVICE_TABLE(acpi, acpi_ids);
static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id);
static void rtw_dev_remove(struct sdio_func *func);
@@ -281,7 +276,6 @@ static void sdio_dvobj_deinit(struct sdio_func *func)
sdio_deinit(dvobj);
devobj_deinit(dvobj);
}
- return;
}
void rtw_set_hal_ops(struct adapter *padapter)
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index e853fa9cc950..d53dd138a356 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -590,7 +590,7 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
int retval, i;
u8 val;
- retval = ms_set_rw_reg_addr(chip, Pro_StatusReg, 6, SystemParm, 1);
+ retval = ms_set_rw_reg_addr(chip, PRO_STATUS_REG, 6, SYSTEM_PARAM, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -840,7 +840,7 @@ static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
ms_cleanup_work(chip);
- retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_DATA_COUNT1, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -885,7 +885,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
int found_sys_info = 0, found_model_name = 0;
#endif
- retval = ms_set_rw_reg_addr(chip, Pro_IntReg, 2, Pro_SystemParm, 7);
+ retval = ms_set_rw_reg_addr(chip, PRO_INT_REG, 2, PRO_SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1232,7 +1232,7 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
int retval;
u8 val[2];
- retval = ms_set_rw_reg_addr(chip, StatusReg0, 2, 0, 0);
+ retval = ms_set_rw_reg_addr(chip, STATUS_REG0, 2, 0, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1255,8 +1255,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
int retval, i;
u8 val, data[10];
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1307,8 +1307,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm,
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
+ MS_EXTRA_SIZE, SYSTEM_PARAM,
6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1339,8 +1339,8 @@ static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
if (!buf || (buf_len < MS_EXTRA_SIZE))
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6 + MS_EXTRA_SIZE);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6 + MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1392,8 +1392,8 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
int retval;
u8 val, data[6];
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1465,8 +1465,8 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1519,8 +1519,8 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
int retval, i = 0;
u8 val, data[6];
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1579,7 +1579,7 @@ static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len)
memset(extra, 0xFF, MS_EXTRA_SIZE);
- if (type == setPS_NG) {
+ if (type == set_PS_NG) {
/* set page status as 1:NG,and block status keep 1:OK */
extra[0] = 0xB8;
} else {
@@ -1670,8 +1670,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
+ MS_EXTRA_SIZE, SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1725,7 +1725,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
return STATUS_FAIL;
if (uncorrect_flag) {
- ms_set_page_status(log_blk, setPS_NG,
+ ms_set_page_status(log_blk, set_PS_NG,
extra,
MS_EXTRA_SIZE);
if (i == 0)
@@ -1738,8 +1738,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
i, extra[0]);
MS_SET_BAD_BLOCK_FLG(ms_card);
- ms_set_page_status(log_blk, setPS_Error,
- extra,
+ ms_set_page_status(log_blk,
+ set_PS_error, extra,
MS_EXTRA_SIZE);
ms_write_extra_data(chip, new_blk, i,
extra,
@@ -1767,8 +1767,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, (6 + MS_EXTRA_SIZE));
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, (6 + MS_EXTRA_SIZE));
ms_set_err_code(chip, MS_NO_ERROR);
@@ -1822,8 +1822,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
if (i == 0) {
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm,
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
+ MS_EXTRA_SIZE, SYSTEM_PARAM,
7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1980,8 +1980,8 @@ RE_SEARCH:
for (reg_addr = BLOCK_SIZE_0; reg_addr <= PAGE_SIZE_1; reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
- rtsx_add_cmd(chip, READ_REG_CMD, MS_Device_Type, 0, 0);
- rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_Support, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_device_type, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_support, 0, 0);
retval = rtsx_send_cmd(chip, MS_CARD, 100);
if (retval < 0)
@@ -2057,7 +2057,7 @@ RE_SEARCH:
/* Switch I/F Mode */
if (ptr[15]) {
- retval = ms_set_rw_reg_addr(chip, 0, 0, SystemParm, 1);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, SYSTEM_PARAM, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -2887,7 +2887,7 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, Pro_TPCParm, 0x01);
+ retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, PRO_TPC_PARM, 0x01);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -2970,8 +2970,8 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -3026,7 +3026,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (!(chip->card_wp & MS_CARD)) {
reset_ms(chip);
ms_set_page_status
- (log_blk, setPS_NG,
+ (log_blk, set_PS_NG,
extra,
MS_EXTRA_SIZE);
ms_write_extra_data
@@ -3131,8 +3131,8 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
u8 *ptr;
if (!start_page) {
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -3165,8 +3165,8 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
return STATUS_FAIL;
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, (6 + MS_EXTRA_SIZE));
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, (6 + MS_EXTRA_SIZE));
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -3773,9 +3773,9 @@ static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
u8 buf[6];
if (type == 0)
- retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_TPCParm, 1);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_TPC_PARM, 1);
else
- retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_DATA_COUNT1, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -4154,7 +4154,7 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
- goto SetICVFinish;
+ goto set_ICV_finish;
}
#ifdef MG_SET_ICV_SLOW
@@ -4195,7 +4195,7 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SENSE_TYPE_MG_WRITE_ERR);
}
retval = STATUS_FAIL;
- goto SetICVFinish;
+ goto set_ICV_finish;
}
}
#else
@@ -4214,11 +4214,11 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
- goto SetICVFinish;
+ goto set_ICV_finish;
}
#endif
-SetICVFinish:
+set_ICV_finish:
kfree(buf);
return retval;
}
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
index 952cc14dd079..33bda9ce36b6 100644
--- a/drivers/staging/rts5208/ms.h
+++ b/drivers/staging/rts5208/ms.h
@@ -92,37 +92,37 @@
#define PRO_FORMAT 0x10
#define PRO_SLEEP 0x11
-#define IntReg 0x01
-#define StatusReg0 0x02
-#define StatusReg1 0x03
-
-#define SystemParm 0x10
-#define BlockAdrs 0x11
-#define CMDParm 0x14
-#define PageAdrs 0x15
-
-#define OverwriteFlag 0x16
-#define ManagemenFlag 0x17
-#define LogicalAdrs 0x18
-#define ReserveArea 0x1A
-
-#define Pro_IntReg 0x01
-#define Pro_StatusReg 0x02
-#define Pro_TypeReg 0x04
-#define Pro_IFModeReg 0x05
-#define Pro_CatagoryReg 0x06
-#define Pro_ClassReg 0x07
-
-#define Pro_SystemParm 0x10
-#define Pro_DataCount1 0x11
-#define Pro_DataCount0 0x12
-#define Pro_DataAddr3 0x13
-#define Pro_DataAddr2 0x14
-#define Pro_DataAddr1 0x15
-#define Pro_DataAddr0 0x16
-
-#define Pro_TPCParm 0x17
-#define Pro_CMDParm 0x18
+#define INT_REG 0x01
+#define STATUS_REG0 0x02
+#define STATUS_REG1 0x03
+
+#define SYSTEM_PARAM 0x10
+#define BLOCK_ADRS 0x11
+#define CMD_PARM 0x14
+#define PAGE_ADRS 0x15
+
+#define OVERWRITE_FLAG 0x16
+#define MANAGEMEN_FLAG 0x17
+#define LOGICAL_ADRS 0x18
+#define RESERVE_AREA 0x1A
+
+#define PRO_INT_REG 0x01
+#define PRO_STATUS_REG 0x02
+#define PRO_TYPE_REG 0x04
+#define PRO_IF_mode_REG 0x05
+#define PRO_CATEGORY_REG 0x06
+#define PRO_CLASS_REG 0x07
+
+#define PRO_SYSTEM_PARAM 0x10
+#define PRO_DATA_COUNT1 0x11
+#define PRO_DATA_COUNT0 0x12
+#define PRO_DATA_ADDR3 0x13
+#define PRO_DATA_ADDR2 0x14
+#define PRO_DATA_ADDR1 0x15
+#define PRO_DATA_ADDR0 0x16
+
+#define PRO_TPC_PARM 0x17
+#define PRO_CMD_PARM 0x18
#define INT_REG_CED 0x80
#define INT_REG_ERR 0x40
@@ -152,12 +152,12 @@
#define PAGE_SIZE_0 (PPBUF_BASE2 + 0x1a0 + 8)
#define PAGE_SIZE_1 (PPBUF_BASE2 + 0x1a0 + 9)
-#define MS_Device_Type (PPBUF_BASE2 + 0x1D8)
+#define MS_device_type (PPBUF_BASE2 + 0x1D8)
-#define MS_4bit_Support (PPBUF_BASE2 + 0x1D3)
+#define MS_4bit_support (PPBUF_BASE2 + 0x1D3)
-#define setPS_NG 1
-#define setPS_Error 0
+#define set_PS_NG 1
+#define set_PS_error 0
#define PARALLEL_8BIT_IF 0x40
#define PARALLEL_4BIT_IF 0x00
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index fa597953e9a0..cb95ad6fa4f9 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -873,7 +873,8 @@ static int rtsx_probe(struct pci_dev *pci,
(unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));
dev->rtsx_resv_buf = dmam_alloc_coherent(&pci->dev, RTSX_RESV_BUF_LEN,
- &dev->rtsx_resv_buf_addr, GFP_KERNEL);
+ &dev->rtsx_resv_buf_addr,
+ GFP_KERNEL);
if (!dev->rtsx_resv_buf) {
dev_err(&pci->dev, "alloc dma buffer fail\n");
err = -ENXIO;
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 561851cc8780..5f1eefe80f1e 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -677,8 +677,8 @@ static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
- timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, msecs_to_jiffies(timeout));
+ timeleft = wait_for_completion_interruptible_timeout(&trans_done,
+ msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
index dc9e8cad7a74..f4ff62653b56 100644
--- a/drivers/staging/rts5208/sd.h
+++ b/drivers/staging/rts5208/sd.h
@@ -232,7 +232,7 @@
#define DCM_LOW_FREQUENCY_MODE 0x01
#define DCM_HIGH_FREQUENCY_MODE_SET 0x0C
-#define DCM_Low_FREQUENCY_MODE_SET 0x00
+#define DCM_LOW_FREQUENCY_MODE_SET 0x00
#define MULTIPLY_BY_1 0x00
#define MULTIPLY_BY_2 0x01
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index f3dc96a4c59d..0f369935fb6c 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -630,13 +630,13 @@ static int reset_xd(struct rtsx_chip *chip)
xd_card->zone_cnt = 32;
xd_card->capacity = 1024000;
break;
- case xD_1G_X8_512:
+ case XD_1G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 64;
xd_card->capacity = 2048000;
break;
- case xD_2G_X8_512:
+ case XD_2G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 128;
@@ -669,10 +669,10 @@ static int reset_xd(struct rtsx_chip *chip)
return STATUS_FAIL;
}
- retval = xd_read_id(chip, READ_xD_ID, id_buf, 4);
+ retval = xd_read_id(chip, READ_XD_ID, id_buf, 4);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- dev_dbg(rtsx_dev(chip), "READ_xD_ID: 0x%x 0x%x 0x%x 0x%x\n",
+ dev_dbg(rtsx_dev(chip), "READ_XD_ID: 0x%x 0x%x 0x%x 0x%x\n",
id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
if (id_buf[2] != XD_ID_CODE)
return STATUS_FAIL;
diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h
index 57b94129b26f..98c00f268e56 100644
--- a/drivers/staging/rts5208/xd.h
+++ b/drivers/staging/rts5208/xd.h
@@ -36,7 +36,7 @@
#define BLK_ERASE_1 0x60
#define BLK_ERASE_2 0xD0
#define READ_STS 0x70
-#define READ_xD_ID 0x9A
+#define READ_XD_ID 0x9A
#define COPY_BACK_512 0x8A
#define COPY_BACK_2K 0x85
#define READ1_1_2 0x30
@@ -72,8 +72,8 @@
#define XD_128M_X16_2048 0xC1
#define XD_4M_X8_512_1 0xE3
#define XD_4M_X8_512_2 0xE5
-#define xD_1G_X8_512 0xD3
-#define xD_2G_X8_512 0xD5
+#define XD_1G_X8_512 0xD3
+#define XD_2G_X8_512 0xD5
#define XD_ID_CODE 0xB5
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 5a317cc98a4b..02860d3ec365 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -56,7 +56,6 @@ static unsigned int get_mxclk_freq(void)
static void set_chip_clock(unsigned int frequency)
{
struct pll_value pll;
- unsigned int actual_mx_clk;
/* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */
if (sm750_get_chip_type() == SM750LE)
@@ -66,8 +65,8 @@ static void set_chip_clock(unsigned int frequency)
/*
* Set up PLL structure to hold the value to be set in clocks.
*/
- pll.inputFreq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
- pll.clockType = MXCLK_PLL;
+ pll.input_freq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
+ pll.clock_type = MXCLK_PLL;
/*
* Call sm750_calc_pll_value() to fill the other fields
@@ -76,7 +75,7 @@ static void set_chip_clock(unsigned int frequency)
* Return value of sm750_calc_pll_value gives the actual
* possible clock.
*/
- actual_mx_clk = sm750_calc_pll_value(frequency, &pll);
+ sm750_calc_pll_value(frequency, &pll);
/* Master Clock Control: MXCLK_PLL */
poke32(MXCLK_PLL_CTRL, sm750_format_pll_reg(&pll));
@@ -211,13 +210,13 @@ unsigned int ddk750_get_vm_size(void)
return data;
}
-int ddk750_init_hw(struct initchip_param *pInitParam)
+int ddk750_init_hw(struct initchip_param *p_init_param)
{
unsigned int reg;
- if (pInitParam->powerMode != 0)
- pInitParam->powerMode = 0;
- sm750_set_power_mode(pInitParam->powerMode);
+ if (p_init_param->power_mode != 0)
+ p_init_param->power_mode = 0;
+ sm750_set_power_mode(p_init_param->power_mode);
/* Enable display power gate & LOCALMEM power gate*/
reg = peek32(CURRENT_GATE);
@@ -238,13 +237,13 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
}
/* Set the Main Chip Clock */
- set_chip_clock(MHz((unsigned int)pInitParam->chipClock));
+ set_chip_clock(MHz((unsigned int)p_init_param->chip_clock));
/* Set up memory clock. */
- set_memory_clock(MHz(pInitParam->memClock));
+ set_memory_clock(MHz(p_init_param->mem_clock));
/* Set up master clock */
- set_master_clock(MHz(pInitParam->masterClock));
+ set_master_clock(MHz(p_init_param->master_clock));
/*
* Reset the memory controller.
@@ -252,7 +251,7 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
* the system might hang when sw accesses the memory.
* The memory should be resetted after changing the MXCLK.
*/
- if (pInitParam->resetMemory == 1) {
+ if (p_init_param->reset_memory == 1) {
reg = peek32(MISC_CTRL);
reg &= ~MISC_CTRL_LOCALMEM_RESET;
poke32(MISC_CTRL, reg);
@@ -261,7 +260,7 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
poke32(MISC_CTRL, reg);
}
- if (pInitParam->setAllEngOff == 1) {
+ if (p_init_param->set_all_eng_off == 1) {
sm750_enable_2d_engine(0);
/* Disable Overlay, if a former application left it on */
@@ -337,13 +336,13 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
ret = 0;
mini_diff = ~0;
request = request_orig / 1000;
- input = pll->inputFreq / 1000;
+ input = pll->input_freq / 1000;
/*
* for MXCLK register,
* no POD provided, so need be treated differently
*/
- if (pll->clockType == MXCLK_PLL)
+ if (pll->clock_type == MXCLK_PLL)
max_d = 3;
for (N = 15; N > 1; N--) {
@@ -365,7 +364,7 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
if (M < 256 && M > 0) {
unsigned int diff;
- tmp_clock = pll->inputFreq * M / N / X;
+ tmp_clock = pll->input_freq * M / N / X;
diff = abs(tmp_clock - request_orig);
if (diff < mini_diff) {
pll->M = M;
@@ -383,14 +382,14 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
return ret;
}
-unsigned int sm750_format_pll_reg(struct pll_value *pPLL)
+unsigned int sm750_format_pll_reg(struct pll_value *p_PLL)
{
#ifndef VALIDATION_CHIP
- unsigned int POD = pPLL->POD;
+ unsigned int POD = p_PLL->POD;
#endif
- unsigned int OD = pPLL->OD;
- unsigned int M = pPLL->M;
- unsigned int N = pPLL->N;
+ unsigned int OD = p_PLL->OD;
+ unsigned int M = p_PLL->M;
+ unsigned int N = p_PLL->N;
/*
* Note that all PLL's have the same format. Here, we just use
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 3e92b3297160..ee2e9d90f7dd 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -40,8 +40,8 @@ enum clock_type {
};
struct pll_value {
- enum clock_type clockType;
- unsigned long inputFreq; /* Input clock frequency to the PLL */
+ enum clock_type clock_type;
+ unsigned long input_freq; /* Input clock frequency to the PLL */
/* Use this when clockType = PANEL_PLL */
unsigned long M;
@@ -53,41 +53,41 @@ struct pll_value {
/* input struct to initChipParam() function */
struct initchip_param {
/* Use power mode 0 or 1 */
- unsigned short powerMode;
+ unsigned short power_mode;
/*
* Speed of main chip clock in MHz unit
* 0 = keep the current clock setting
* Others = the new main chip clock
*/
- unsigned short chipClock;
+ unsigned short chip_clock;
/*
* Speed of memory clock in MHz unit
* 0 = keep the current clock setting
* Others = the new memory clock
*/
- unsigned short memClock;
+ unsigned short mem_clock;
/*
* Speed of master clock in MHz unit
* 0 = keep the current clock setting
* Others = the new master clock
*/
- unsigned short masterClock;
+ unsigned short master_clock;
/*
* 0 = leave all engine state untouched.
* 1 = make sure they are off: 2D, Overlay,
* video alpha, alpha, hardware cursors
*/
- unsigned short setAllEngOff;
+ unsigned short set_all_eng_off;
/*
* 0 = Do not reset the memory controller
* 1 = Reset the memory controller
*/
- unsigned char resetMemory;
+ unsigned char reset_memory;
/* More initialization parameter can be added if needed */
};
@@ -95,7 +95,7 @@ struct initchip_param {
enum logical_chip_type sm750_get_chip_type(void);
void sm750_set_chip_type(unsigned short dev_id, u8 rev_id);
unsigned int sm750_calc_pll_value(unsigned int request, struct pll_value *pll);
-unsigned int sm750_format_pll_reg(struct pll_value *pPLL);
+unsigned int sm750_format_pll_reg(struct pll_value *p_PLL);
unsigned int ddk750_get_vm_size(void);
int ddk750_init_hw(struct initchip_param *pinit_param);
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 887ea8aef43f..172624ff98b0 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -147,8 +147,8 @@ void ddk750_set_logical_disp_out(enum disp_output output)
if (output & PNL_SEQ_USAGE) {
/* set panel sequence */
- sw_panel_power_sequence((output & PNL_SEQ_MASK) >> PNL_SEQ_OFFSET,
- 4);
+ sw_panel_power_sequence((output & PNL_SEQ_MASK) >>
+ PNL_SEQ_OFFSET, 4);
}
if (output & DAC_USAGE)
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 4dac691ad1b1..e00a6cb31947 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -13,8 +13,9 @@
* HW only supports 7 predefined pixel clocks, and clock select is
* in bit 29:27 of Display Control register.
*/
-static unsigned long displayControlAdjust_SM750LE(struct mode_parameter *pModeParam,
- unsigned long dispControl)
+static unsigned long
+displayControlAdjust_SM750LE(struct mode_parameter *pModeParam,
+ unsigned long dispControl)
{
unsigned long x, y;
@@ -81,7 +82,7 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
int cnt = 0;
unsigned int tmp, reg;
- if (pll->clockType == SECONDARY_PLL) {
+ if (pll->clock_type == SECONDARY_PLL) {
/* programe secondary pixel clock */
poke32(CRT_PLL_CTRL, sm750_format_pll_reg(pll));
@@ -134,7 +135,7 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
poke32(CRT_DISPLAY_CTRL, tmp | reg);
}
- } else if (pll->clockType == PRIMARY_PLL) {
+ } else if (pll->clock_type == PRIMARY_PLL) {
unsigned int reserved;
poke32(PANEL_PLL_CTRL, sm750_format_pll_reg(pll));
@@ -209,12 +210,11 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
int ddk750_setModeTiming(struct mode_parameter *parm, enum clock_type clock)
{
struct pll_value pll;
- unsigned int uiActualPixelClk;
- pll.inputFreq = DEFAULT_INPUT_CLOCK;
- pll.clockType = clock;
+ pll.input_freq = DEFAULT_INPUT_CLOCK;
+ pll.clock_type = clock;
- uiActualPixelClk = sm750_calc_pll_value(parm->pixel_clock, &pll);
+ sm750_calc_pll_value(parm->pixel_clock, &pll);
if (sm750_get_chip_type() == SM750LE) {
/* set graphic mode via IO method */
outb_p(0x88, 0x3d4);
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index c8e856c13912..73e0e9f41ec5 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -39,8 +39,10 @@ unsigned short sii164GetVendorID(void)
{
unsigned short vendorID;
- vendorID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_HIGH) << 8) |
- (unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_LOW);
+ vendorID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_VENDOR_ID_HIGH) << 8) |
+ (unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_VENDOR_ID_LOW);
return vendorID;
}
@@ -56,13 +58,18 @@ unsigned short sii164GetDeviceID(void)
{
unsigned short deviceID;
- deviceID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_HIGH) << 8) |
- (unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_LOW);
+ deviceID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_DEVICE_ID_HIGH) << 8) |
+ (unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_DEVICE_ID_LOW);
return deviceID;
}
-/* DVI.C will handle all SiI164 chip stuffs and try it best to make code minimal and useful */
+/*
+ * DVI.C will handle all SiI164 chip stuffs and try its best to make code
+ * minimal and useful
+ */
/*
* sii164InitChip
@@ -133,7 +140,8 @@ long sii164InitChip(unsigned char edge_select,
#endif
/* Check if SII164 Chip exists */
- if ((sii164GetVendorID() == SII164_VENDOR_ID) && (sii164GetDeviceID() == SII164_DEVICE_ID)) {
+ if ((sii164GetVendorID() == SII164_VENDOR_ID) &&
+ (sii164GetDeviceID() == SII164_DEVICE_ID)) {
/*
* Initialize SII164 controller chip.
*/
@@ -254,7 +262,9 @@ void sii164ResetChip(void)
/*
* sii164GetChipString
- * This function returns a char string name of the current DVI Controller chip.
+ * This function returns a char string name of the current DVI Controller
+ * chip.
+ *
* It's convenient for application need to display the chip name.
*/
char *sii164GetChipString(void)
@@ -330,8 +340,8 @@ void sii164EnableHotPlugDetection(unsigned char enableHotPlug)
detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
- /* Depending on each DVI controller, need to enable the hot plug based on each
- * individual chip design.
+ /* Depending on each DVI controller, need to enable the hot plug based
+ * on each individual chip design.
*/
if (enableHotPlug != 0)
sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_USE_MDI);
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index 862e7bf27353..d940cb729066 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -6,10 +6,13 @@
/* Hot Plug detection mode structure */
enum sii164_hot_plug_mode {
- SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit (always high). */
- SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
- SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
- SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
+ SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit
+ * (always high).
+ */
+
+ SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
+ SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
+ SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
};
/* Silicon Image SiI164 chip prototype */
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index dbcbbd1055da..8faa601c700b 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -130,20 +130,28 @@ int sm750_hw_fillrect(struct lynx_accel *accel,
return 0;
}
-int sm750_hw_copyarea(
-struct lynx_accel *accel,
-unsigned int sBase, /* Address of source: offset in frame buffer */
-unsigned int sPitch, /* Pitch value of source surface in BYTE */
-unsigned int sx,
-unsigned int sy, /* Starting coordinate of source surface */
-unsigned int dBase, /* Address of destination: offset in frame buffer */
-unsigned int dPitch, /* Pitch value of destination surface in BYTE */
-unsigned int Bpp, /* Color depth of destination surface */
-unsigned int dx,
-unsigned int dy, /* Starting coordinate of destination surface */
-unsigned int width,
-unsigned int height, /* width and height of rectangle in pixel value */
-unsigned int rop2) /* ROP value */
+/**
+ * sm750_hm_copyarea
+ * @sBase: Address of source: offset in frame buffer
+ * @sPitch: Pitch value of source surface in BYTE
+ * @sx: Starting x coordinate of source surface
+ * @sy: Starting y coordinate of source surface
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @Bpp: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @rop2: ROP value
+ */
+int sm750_hw_copyarea(struct lynx_accel *accel,
+ unsigned int sBase, unsigned int sPitch,
+ unsigned int sx, unsigned int sy,
+ unsigned int dBase, unsigned int dPitch,
+ unsigned int Bpp, unsigned int dx, unsigned int dy,
+ unsigned int width, unsigned int height,
+ unsigned int rop2)
{
unsigned int nDirection, de_ctrl;
@@ -216,7 +224,7 @@ unsigned int rop2) /* ROP value */
/*
* Note:
- * DE_FOREGROUND are DE_BACKGROUND are don't care.
+ * DE_FOREGROUND and DE_BACKGROUND are don't care.
* DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS
* are set by set deSetTransparency().
*/
@@ -235,21 +243,21 @@ unsigned int rop2) /* ROP value */
*/
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
- /*
- * Program pitch (distance between the 1st points of two adjacent lines).
- * Note that input pitch is BYTE value, but the 2D Pitch register uses
- * pixel values. Need Byte to pixel conversion.
- */
+ /*
+ * Program pitch (distance between the 1st points of two adjacent lines).
+ * Note that input pitch is BYTE value, but the 2D Pitch register uses
+ * pixel values. Need Byte to pixel conversion.
+ */
write_dpr(accel, DE_PITCH,
((dPitch / Bpp << DE_PITCH_DESTINATION_SHIFT) &
DE_PITCH_DESTINATION_MASK) |
(sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
- /*
- * Screen Window width in Pixels.
- * 2D engine uses this value to calculate the linear address in frame buffer
- * for a given point.
- */
+ /*
+ * Screen Window width in Pixels.
+ * 2D engine uses this value to calculate the linear address in frame buffer
+ * for a given point.
+ */
write_dpr(accel, DE_WINDOW_WIDTH,
((dPitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) &
DE_WINDOW_WIDTH_DST_MASK) |
@@ -288,20 +296,28 @@ static unsigned int deGetTransparency(struct lynx_accel *accel)
return de_ctrl;
}
-int sm750_hw_imageblit(struct lynx_accel *accel,
- const char *pSrcbuf, /* pointer to start of source buffer in system memory */
- u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
- u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
- u32 dBase, /* Address of destination: offset in frame buffer */
- u32 dPitch, /* Pitch value of destination surface in BYTE */
- u32 bytePerPixel, /* Color depth of destination surface */
- u32 dx,
- u32 dy, /* Starting coordinate of destination surface */
- u32 width,
- u32 height, /* width and height of rectangle in pixel value */
- u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */
- u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */
- u32 rop2) /* ROP value */
+/**
+ * sm750_hw_imageblit
+ * @pSrcbuf: pointer to start of source buffer in system memory
+ * @srcDelta: Pitch value (in bytes) of the source buffer, +ive means top down
+ * and -ive mean button up
+ * @startBit: Mono data can start at any bit in a byte, this value should be
+ * 0 to 7
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @bytePerPixel: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @fColor: Foreground color (corresponding to a 1 in the monochrome data
+ * @bColor: Background color (corresponding to a 0 in the monochrome data
+ * @rop2: ROP value
+ */
+int sm750_hw_imageblit(struct lynx_accel *accel, const char *pSrcbuf,
+ u32 srcDelta, u32 startBit, u32 dBase, u32 dPitch,
+ u32 bytePerPixel, u32 dx, u32 dy, u32 width,
+ u32 height, u32 fColor, u32 bColor, u32 rop2)
{
unsigned int ulBytesPerScan;
unsigned int ul4BytesPerScan;
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index c4f42002a50f..2c79cb730a0a 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -190,37 +190,54 @@ void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt);
void sm750_hw_de_init(struct lynx_accel *accel);
int sm750_hw_fillrect(struct lynx_accel *accel,
- u32 base, u32 pitch, u32 Bpp,
- u32 x, u32 y, u32 width, u32 height,
- u32 color, u32 rop);
-
-int sm750_hw_copyarea(
-struct lynx_accel *accel,
-unsigned int sBase, /* Address of source: offset in frame buffer */
-unsigned int sPitch, /* Pitch value of source surface in BYTE */
-unsigned int sx,
-unsigned int sy, /* Starting coordinate of source surface */
-unsigned int dBase, /* Address of destination: offset in frame buffer */
-unsigned int dPitch, /* Pitch value of destination surface in BYTE */
-unsigned int bpp, /* Color depth of destination surface */
-unsigned int dx,
-unsigned int dy, /* Starting coordinate of destination surface */
-unsigned int width,
-unsigned int height, /* width and height of rectangle in pixel value */
-unsigned int rop2);
-
-int sm750_hw_imageblit(struct lynx_accel *accel,
- const char *pSrcbuf, /* pointer to start of source buffer in system memory */
- u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
- u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
- u32 dBase, /* Address of destination: offset in frame buffer */
- u32 dPitch, /* Pitch value of destination surface in BYTE */
- u32 bytePerPixel, /* Color depth of destination surface */
- u32 dx,
- u32 dy, /* Starting coordinate of destination surface */
- u32 width,
- u32 height, /* width and height of rectangle in pixel value */
- u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */
- u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */
- u32 rop2);
+ u32 base, u32 pitch, u32 Bpp,
+ u32 x, u32 y, u32 width, u32 height,
+ u32 color, u32 rop);
+
+/**
+ * sm750_hm_copyarea
+ * @sBase: Address of source: offset in frame buffer
+ * @sPitch: Pitch value of source surface in BYTE
+ * @sx: Starting x coordinate of source surface
+ * @sy: Starting y coordinate of source surface
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @Bpp: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @rop2: ROP value
+ */
+int sm750_hw_copyarea(struct lynx_accel *accel,
+ unsigned int sBase, unsigned int sPitch,
+ unsigned int sx, unsigned int sy,
+ unsigned int dBase, unsigned int dPitch,
+ unsigned int Bpp, unsigned int dx, unsigned int dy,
+ unsigned int width, unsigned int height,
+ unsigned int rop2);
+
+/**
+ * sm750_hw_imageblit
+ * @pSrcbuf: pointer to start of source buffer in system memory
+ * @srcDelta: Pitch value (in bytes) of the source buffer, +ive means top down
+ *>----- and -ive mean button up
+ * @startBit: Mono data can start at any bit in a byte, this value should be
+ *>----- 0 to 7
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @bytePerPixel: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @fColor: Foreground color (corresponding to a 1 in the monochrome data
+ * @bColor: Background color (corresponding to a 0 in the monochrome data
+ * @rop2: ROP value
+ */
+int sm750_hw_imageblit(struct lynx_accel *accel, const char *pSrcbuf,
+ u32 srcDelta, u32 startBit, u32 dBase, u32 dPitch,
+ u32 bytePerPixel, u32 dx, u32 dy, u32 width,
+ u32 height, u32 fColor, u32 bColor, u32 rop2);
+
#endif
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index 16ac07eb58d6..b59643dd61ed 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -5,14 +5,11 @@
/* hw_cursor_xxx works for voyager,718 and 750 */
void sm750_hw_cursor_enable(struct lynx_cursor *cursor);
void sm750_hw_cursor_disable(struct lynx_cursor *cursor);
-void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
- int w, int h);
-void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
- int x, int y);
-void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
- u32 fg, u32 bg);
-void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
- u16 rop, const u8 *data, const u8 *mask);
-void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
- u16 rop, const u8 *data, const u8 *mask);
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor, int w, int h);
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor, int x, int y);
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg);
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop,
+ const u8 *data, const u8 *mask);
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop,
+ const u8 *data, const u8 *mask);
#endif
diff --git a/drivers/staging/uwb/rsv.c b/drivers/staging/uwb/rsv.c
index f45a04ff7275..d593a41c3d8d 100644
--- a/drivers/staging/uwb/rsv.c
+++ b/drivers/staging/uwb/rsv.c
@@ -614,7 +614,7 @@ int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
struct uwb_rsv_move *mv;
int ret = 0;
- if (bow->can_reserve_extra_mases == false)
+ if (!bow->can_reserve_extra_mases)
return -EBUSY;
mv = &rsv->mv;
@@ -643,7 +643,7 @@ void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
struct uwb_rsv *rsv;
struct uwb_mas_bm mas;
- if (bow->can_reserve_extra_mases == false)
+ if (!bow->can_reserve_extra_mases)
return;
list_for_each_entry(rsv, &rc->reservations, rc_node) {
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Kconfig b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
index f66319512faf..d32ea348e846 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config SND_BCM2835
- tristate "BCM2835 Audio"
- depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
- select SND_PCM
- select BCM2835_VCHIQ
- help
- Say Y or M if you want to support BCM2835 built in audio
+ tristate "BCM2835 Audio"
+ depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
+ select SND_PCM
+ select BCM2835_VCHIQ
+ help
+ Say Y or M if you want to support BCM2835 built in audio
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index c6f9cf1913d2..73144f1ce45e 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -9,7 +9,7 @@
struct bcm2835_audio_instance {
struct device *dev;
- VCHI_SERVICE_HANDLE_T vchi_handle;
+ struct vchi_service_handle *vchi_handle;
struct completion msg_avail_comp;
struct mutex vchi_mutex;
struct bcm2835_alsa_stream *alsa_stream;
@@ -90,7 +90,7 @@ static int bcm2835_audio_send_simple(struct bcm2835_audio_instance *instance,
}
static void audio_vchi_callback(void *param,
- const VCHI_CALLBACK_REASON_T reason,
+ const enum vchi_callback_reason reason,
void *msg_handle)
{
struct bcm2835_audio_instance *instance = param;
@@ -103,6 +103,9 @@ static void audio_vchi_callback(void *param,
status = vchi_msg_dequeue(instance->vchi_handle,
&m, sizeof(m), &msg_len, VCHI_FLAGS_NONE);
+ if (status)
+ return;
+
if (m.type == VC_AUDIO_MSG_TYPE_RESULT) {
instance->result = m.result.success;
complete(&instance->msg_avail_comp);
@@ -119,7 +122,7 @@ static void audio_vchi_callback(void *param,
}
static int
-vc_vchi_audio_init(VCHI_INSTANCE_T vchi_instance,
+vc_vchi_audio_init(struct vchi_instance_handle *vchi_instance,
struct bcm2835_audio_instance *instance)
{
struct service_creation params = {
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index ed0feb34b6c8..d2fe8d36ab7d 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -44,7 +44,7 @@ enum snd_bcm2835_ctrl {
};
struct bcm2835_vchi_ctx {
- VCHI_INSTANCE_T vchi_instance;
+ struct vchi_instance_handle *vchi_instance;
};
/* definition of the chip-specific record */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index d4d1e44b16b2..beb6a0063bb8 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1090,8 +1090,8 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
- if (!ret
- && camera_port ==
+ if (!ret &&
+ camera_port ==
&dev->component[COMP_CAMERA]->output[CAM_PORT_VIDEO]) {
bool overlay_enabled =
!!dev->component[COMP_PREVIEW]->enabled;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index 1c180ead4a20..de03b90021a8 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -155,7 +155,7 @@ struct mmal_msg_context {
};
struct vchiq_mmal_instance {
- VCHI_SERVICE_HANDLE_T handle;
+ struct vchi_service_handle *handle;
/* ensure serialised access to service */
struct mutex vchiq_mutex;
@@ -535,7 +535,7 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
/* incoming event service callback */
static void service_callback(void *param,
- const VCHI_CALLBACK_REASON_T reason,
+ const enum vchi_callback_reason reason,
void *bulk_ctx)
{
struct vchiq_mmal_instance *instance = param;
@@ -1814,7 +1814,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
{
int status;
struct vchiq_mmal_instance *instance;
- static VCHI_INSTANCE_T vchi_instance;
+ static struct vchi_instance_handle *vchi_instance;
struct service_creation params = {
.version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
.service_id = VC_MMAL_SERVER_NAME,
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index f85562b9ba9e..56b1037d8e25 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -8,17 +8,17 @@
#include "interface/vchi/vchi_common.h"
/******************************************************************************
- Global defs
+ * Global defs
*****************************************************************************/
-#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
-#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
-#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
+#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x)) + VCHI_BULK_ALIGN - 1) & ~(VCHI_BULK_ALIGN - 1))
+#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN - 1))
+#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN - 1))))
#ifdef USE_VCHIQ_ARM
#define VCHI_BULK_ALIGNED(x) 1
#else
-#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
+#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN - 1)) == 0)
#endif
struct vchi_version {
@@ -45,18 +45,19 @@ struct vchi_held_msg {
struct service_creation {
struct vchi_version version;
int32_t service_id;
- VCHI_CALLBACK_T callback;
+ vchi_callback callback;
void *callback_param;
};
// Opaque handle for a VCHI instance
-typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
+struct vchi_instance_handle;
// Opaque handle for a server or client
-typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
+struct vchi_service_handle;
/******************************************************************************
- Global funcs - implementation is specific to which side you are on (local / remote)
+ * Global funcs - implementation is specific to which side you are on
+ * (local / remote)
*****************************************************************************/
#ifdef __cplusplus
@@ -64,98 +65,99 @@ extern "C" {
#endif
// Routine used to initialise the vchi on both local + remote connections
-extern int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle);
+extern int32_t vchi_initialise(struct vchi_instance_handle **instance_handle);
extern int32_t vchi_exit(void);
-extern int32_t vchi_connect(VCHI_INSTANCE_T instance_handle);
+extern int32_t vchi_connect(struct vchi_instance_handle *instance_handle);
//When this is called, ensure that all services have no data pending.
//Bulk transfers can remain 'queued'
-extern int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle);
+extern int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle);
// helper functions
-extern void *vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
-extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
-extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
+extern void *vchi_allocate_buffer(struct vchi_service_handle *handle, uint32_t *length);
+extern void vchi_free_buffer(struct vchi_service_handle *handle, void *address);
+extern uint32_t vchi_current_time(struct vchi_instance_handle *instance_handle);
/******************************************************************************
- Global service API
+ * Global service API
*****************************************************************************/
// Routine to destroy a service
-extern int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_destroy(const struct vchi_service_handle *handle);
// Routine to open a named service
-extern int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
+extern int32_t vchi_service_open(struct vchi_instance_handle *instance_handle,
struct service_creation *setup,
- VCHI_SERVICE_HANDLE_T *handle);
+ struct vchi_service_handle **handle);
-extern int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_get_peer_version(const struct vchi_service_handle *handle,
short *peer_version);
// Routine to close a named service
-extern int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_close(const struct vchi_service_handle *handle);
// Routine to increment ref count on a named service
-extern int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_use(const struct vchi_service_handle *handle);
// Routine to decrement ref count on a named service
-extern int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_release(const struct vchi_service_handle *handle);
// Routine to set a control option for a named service
-extern int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
- VCHI_SERVICE_OPTION_T option,
- int value);
+extern int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
+ enum vchi_service_option option,
+ int value);
/* Routine to send a message from kernel memory across a service */
extern int
-vchi_queue_kernel_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_kernel_message(struct vchi_service_handle *handle,
void *data,
unsigned int size);
/* Routine to send a message from user memory across a service */
extern int
-vchi_queue_user_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_user_message(struct vchi_service_handle *handle,
void __user *data,
unsigned int size);
// Routine to receive a msg from a service
// Dequeue is equivalent to hold, copy into client buffer, release
-extern int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_dequeue(struct vchi_service_handle *handle,
void *data,
uint32_t max_data_size_to_read,
uint32_t *actual_msg_size,
- VCHI_FLAGS_T flags);
+ enum vchi_flags flags);
// Routine to look at a message in place.
// The message is not dequeued, so a subsequent call to peek or dequeue
// will return the same message.
-extern int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_peek(struct vchi_service_handle *handle,
void **data,
uint32_t *msg_size,
- VCHI_FLAGS_T flags);
+ enum vchi_flags flags);
// Routine to remove a message after it has been read in place with peek
// The first message on the queue is dequeued.
-extern int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_msg_remove(struct vchi_service_handle *handle);
// Routine to look at a message in place.
// The message is dequeued, so the caller is left holding it; the descriptor is
// filled in and must be released when the user has finished with the message.
-extern int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_hold(struct vchi_service_handle *handle,
void **data, // } may be NULL, as info can be
uint32_t *msg_size, // } obtained from HELD_MSG_T
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
struct vchi_held_msg *message_descriptor);
// Initialise an iterator to look through messages in place
-extern int32_t vchi_msg_look_ahead(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_look_ahead(struct vchi_service_handle *handle,
struct vchi_msg_iter *iter,
- VCHI_FLAGS_T flags);
+ enum vchi_flags flags);
-/******************************************************************************
- Global service support API - operations on held messages and message iterators
- *****************************************************************************/
+/*******************************************************************************
+ * Global service support API - operations on held messages
+ * and message iterators
+ ******************************************************************************/
// Routine to get the address of a held message
extern void *vchi_held_msg_ptr(const struct vchi_held_msg *message);
@@ -196,42 +198,42 @@ extern int32_t vchi_msg_iter_hold_next(struct vchi_msg_iter *iter,
struct vchi_held_msg *message);
/******************************************************************************
- Global bulk API
+ * Global bulk API
*****************************************************************************/
// Routine to prepare interface for a transfer from the other side
-extern int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle,
void *data_dst,
uint32_t data_size,
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
void *transfer_handle);
// Prepare interface for a transfer from the other side into relocatable memory.
-int32_t vchi_bulk_queue_receive_reloc(const VCHI_SERVICE_HANDLE_T handle,
+int32_t vchi_bulk_queue_receive_reloc(const struct vchi_service_handle *handle,
uint32_t offset,
uint32_t data_size,
- const VCHI_FLAGS_T flags,
+ const enum vchi_flags flags,
void * const bulk_handle);
// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
-extern int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
const void *data_src,
uint32_t data_size,
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
void *transfer_handle);
/******************************************************************************
- Configuration plumbing
+ * Configuration plumbing
*****************************************************************************/
#ifdef __cplusplus
}
#endif
-extern int32_t vchi_bulk_queue_transmit_reloc(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_bulk_queue_transmit_reloc(struct vchi_service_handle *handle,
uint32_t offset,
uint32_t data_size,
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
void *transfer_handle);
#endif /* VCHI_H_ */
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
index 89aa4e6122cd..138c36151a22 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
@@ -4,13 +4,17 @@
#ifndef VCHI_CFG_H_
#define VCHI_CFG_H_
-/****************************************************************************************
- * Defines in this first section are part of the VCHI API and may be examined by VCHI
- * services.
- ***************************************************************************************/
-
-/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
-/* Really determined by the message driver, and should be available from a run-time call. */
+/*******************************************************************************
+ * Defines in this first section are part of the VCHI API and may be examined by
+ * VCHI services.
+ ******************************************************************************/
+
+/*
+ * Required alignment of base addresses for bulk transfer, if unaligned
+ * transfers are not enabled
+ * Really determined by the message driver, and should be available from
+ * a run-time call.
+ */
#ifndef VCHI_BULK_ALIGN
# if __VCCOREVER__ >= 0x04000000
# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
@@ -19,9 +23,13 @@
# endif
#endif
-/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
-/* May be less than or greater than VCHI_BULK_ALIGN */
-/* Really determined by the message driver, and should be available from a run-time call. */
+/*
+ * Required length multiple for bulk transfers, if unaligned transfers are
+ * not enabled
+ * May be less than or greater than VCHI_BULK_ALIGN
+ * Really determined by the message driver, and should be available from
+ * a run-time call.
+ */
#ifndef VCHI_BULK_GRANULARITY
# if __VCCOREVER__ >= 0x04000000
# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
@@ -39,19 +47,24 @@
# endif
#endif
-/******************************************************************************************
- * Defines below are system configuration options, and should not be used by VCHI services.
- *****************************************************************************************/
+/******************************************************************************
+ * Defines below are system configuration options, and should not be used by
+ * VCHI services.
+ ******************************************************************************/
-/* How many connections can we support? A localhost implementation uses 2 connections,
- * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
- * driver. */
+/*
+ * How many connections can we support? A localhost implementation uses
+ * 2 connections, 1 for host-app, 1 for VMCS, and these are hooked together
+ * by a loopback MPHI VCFW driver.
+ */
#ifndef VCHI_MAX_NUM_CONNECTIONS
# define VCHI_MAX_NUM_CONNECTIONS 3
#endif
-/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
- * amount of static memory. */
+/*
+ * How many services can we open per connection? Extending this doesn't cost
+ * processing time, just a small amount of static memory.
+ */
#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
# define VCHI_MAX_SERVICES_PER_CONNECTION 36
#endif
@@ -66,8 +79,10 @@
# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
#endif
-/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
- * receive queue space, less message headers. */
+/*
+ * How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the
+ * effective receive queue space, less message headers.
+ */
#ifndef VCHI_NUM_READ_SLOTS
# if defined(VCHI_LOCAL_HOST_PORT)
# define VCHI_NUM_READ_SLOTS 4
@@ -76,112 +91,141 @@
# endif
#endif
-/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
- * performance. Only define on VideoCore end, talking to host.
+/*
+ * Do we utilise overrun facility for receive message slots? Can aid peer
+ * transmit performance. Only define on VideoCore end, talking to host.
*/
//#define VCHI_MSG_RX_OVERRUN
-/* How many transmit slots do we use. Generally don't need many, as the hardware driver
- * underneath VCHI will usually have its own buffering. */
+/*
+ * How many transmit slots do we use. Generally don't need many,
+ * as the hardware driver underneath VCHI will usually have its own buffering.
+ */
#ifndef VCHI_NUM_WRITE_SLOTS
# define VCHI_NUM_WRITE_SLOTS 4
#endif
-/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
- * then it's taking up too much buffer space, and the peer service will be told to stop
- * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
- * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
- * is too high. */
+/*
+ * If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or
+ * more slots, then it's taking up too much buffer space,
+ * and the peer service will be told to stop transmitting with an XOFF message.
+ * For this to be effective, the VCHI_NUM_READ_SLOTS needs to be considerably
+ * bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency is too high.
+ */
#ifndef VCHI_XOFF_THRESHOLD
# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
#endif
-/* After we've sent an XOFF, the peer will be told to resume transmission once the local
- * service has dequeued/released enough messages that it's now occupying
- * VCHI_XON_THRESHOLD slots or fewer. */
+/*
+ * After we've sent an XOFF, the peer will be told to resume transmission
+ * once the local service has dequeued/released enough messages that it's now
+ * occupying VCHI_XON_THRESHOLD slots or fewer.
+ */
#ifndef VCHI_XON_THRESHOLD
# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
#endif
-/* A size below which a bulk transfer omits the handshake completely and always goes
- * via the message channel, if bulk auxiliary is being sent on that service. (The user
- * can guarantee this by enabling unaligned transmits).
- * Not API. */
+/*
+ * A size below which a bulk transfer omits the handshake completely and always
+ * goes via the message channel, if bulk auxiliary is being sent on that
+ * service. (The user can guarantee this by enabling unaligned transmits).
+ * Not API.
+ */
#ifndef VCHI_MIN_BULK_SIZE
# define VCHI_MIN_BULK_SIZE (VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096)
#endif
-/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
- * speed and latency; the smaller the chunk size the better change of messages and other
- * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
- * break transmissions into chunks.
+/*
+ * Maximum size of bulk transmission chunks, for each interface type.
+ * A trade-off between speed and latency; the smaller the chunk size the better
+ * change of messages and other bulk transmissions getting in when big bulk
+ * transfers are happening. Set to 0 to not break transmissions into chunks.
*/
#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
#endif
-/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
- * with multiple-line frames. Only use if the receiver can cope. */
+/*
+ * NB Chunked CCP2 transmissions violate the letter of the CCP2 spec
+ * by using "JPEG8" mode with multiple-line frames. Only use if the receiver
+ * can cope.
+ */
#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
#endif
-/* How many TX messages can we have pending in our transmit slots. Once exhausted,
- * vchi_msg_queue will be blocked. */
+/*
+ * How many TX messages can we have pending in our transmit slots.
+ * Once exhausted, vchi_msg_queue will be blocked.
+ */
#ifndef VCHI_TX_MSG_QUEUE_SIZE
# define VCHI_TX_MSG_QUEUE_SIZE 256
#endif
-/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
- * will be suspended until older messages are dequeued/released. */
+/*
+ * How many RX messages can we have parsed in the receive slots. Once exhausted,
+ * parsing will be suspended until older messages are dequeued/released.
+ */
#ifndef VCHI_RX_MSG_QUEUE_SIZE
# define VCHI_RX_MSG_QUEUE_SIZE 256
#endif
-/* Really should be able to cope if we run out of received message descriptors, by
- * suspending parsing as the comment above says, but we don't. This sweeps the issue
- * under the carpet. */
-#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
+/*
+ * Really should be able to cope if we run out of received message descriptors,
+ * by suspending parsing as the comment above says, but we don't.
+ * This sweeps the issue under the carpet.
+ */
+#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE / 16 + 1) * VCHI_NUM_READ_SLOTS
# undef VCHI_RX_MSG_QUEUE_SIZE
-# define VCHI_RX_MSG_QUEUE_SIZE ((VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS)
+# define VCHI_RX_MSG_QUEUE_SIZE ((VCHI_MAX_MSG_SIZE / 16 + 1) * VCHI_NUM_READ_SLOTS)
#endif
-/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
- * will be blocked. */
+/*
+ * How many bulk transmits can we have pending. Once exhausted,
+ * vchi_bulk_queue_transmit will be blocked.
+ */
#ifndef VCHI_TX_BULK_QUEUE_SIZE
# define VCHI_TX_BULK_QUEUE_SIZE 64
#endif
-/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
- * will be blocked. */
+/*
+ * How many bulk receives can we have pending. Once exhausted,
+ *vchi_bulk_queue_receive will be blocked.
+ */
#ifndef VCHI_RX_BULK_QUEUE_SIZE
# define VCHI_RX_BULK_QUEUE_SIZE 64
#endif
-/* A limit on how many outstanding bulk requests we expect the peer to give us. If
- * the peer asks for more than this, VCHI will fail and assert. The number is determined
- * by the peer's hardware - it's the number of outstanding requests that can be queued
- * on all bulk channels. VC3's MPHI peripheral allows 16. */
+/*
+ * A limit on how many outstanding bulk requests we expect the peer to give us.
+ * If the peer asks for more than this, VCHI will fail and assert.
+ * The number is determined by the peer's hardware
+ * - it's the number of outstanding requests that can be queued
+ * on all bulk channels. VC3's MPHI peripheral allows 16.
+ */
#ifndef VCHI_MAX_PEER_BULK_REQUESTS
# define VCHI_MAX_PEER_BULK_REQUESTS 32
#endif
-/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
+/*
+ * Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
* transmitter on and off.
*/
/*#define VCHI_CCP2TX_MANUAL_POWER*/
#ifndef VCHI_CCP2TX_MANUAL_POWER
-/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
- * negative for no IDLE.
+/*
+ * Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state.
+ * Set negative for no IDLE.
*/
# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
# define VCHI_CCP2TX_IDLE_TIMEOUT 5
# endif
-/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
- * negative for no OFF.
+/*
+ * Timeout (in milliseconds) for putting the CCP2TX interface into OFF state.
+ * Set negative for no OFF.
*/
# ifndef VCHI_CCP2TX_OFF_TIMEOUT
# define VCHI_CCP2TX_OFF_TIMEOUT 1000
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
index e7955cbaf26a..141af16ce031 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_common.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
@@ -5,7 +5,7 @@
#define VCHI_COMMON_H_
//flags used when sending messages (must be bitmapped)
-typedef enum {
+enum vchi_flags {
VCHI_FLAGS_NONE = 0x0,
VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
@@ -20,17 +20,17 @@ typedef enum {
VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
VCHI_FLAGS_INTERNAL = 0xFF0000
-} VCHI_FLAGS_T;
+};
// constants for vchi_crc_control()
-typedef enum {
+enum vchi_crc_control {
VCHI_CRC_NOTHING = -1,
VCHI_CRC_PER_SERVICE = 0,
VCHI_CRC_EVERYTHING = 1,
-} VCHI_CRC_CONTROL_T;
+};
//callback reasons when an event occurs on a service
-typedef enum {
+enum vchi_callback_reason {
VCHI_CALLBACK_REASON_MIN,
//This indicates that there is data available
@@ -73,22 +73,22 @@ typedef enum {
VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
VCHI_CALLBACK_REASON_MAX
-} VCHI_CALLBACK_REASON_T;
+};
// service control options
-typedef enum {
+enum vchi_service_option {
VCHI_SERVICE_OPTION_MIN,
VCHI_SERVICE_OPTION_TRACE,
VCHI_SERVICE_OPTION_SYNCHRONOUS,
VCHI_SERVICE_OPTION_MAX
-} VCHI_SERVICE_OPTION_T;
+};
//Callback used by all services / bulk transfers
-typedef void (*VCHI_CALLBACK_T)(void *callback_param, //my service local param
- VCHI_CALLBACK_REASON_T reason,
- void *handle); //for transmitting msg's only
+typedef void (*vchi_callback)(void *callback_param, //my service local param
+ enum vchi_callback_reason reason,
+ void *handle); //for transmitting msg's only
/*
* Define vector struct for scatter-gather (vector) operations
@@ -112,12 +112,6 @@ struct vchi_msg_vector {
int32_t vec_len;
};
-// Opaque type for a connection API
-typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
-
-// Opaque type for a message driver
-typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
-
// Iterator structure for reading ahead through received message queue. Allocated by client,
// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 8dc730cfe7a6..ca30bfd52919 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -81,7 +81,6 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
struct rpi_firmware *fw = drvdata->fw;
struct vchiq_slot_zero *vchiq_slot_zero;
- struct resource *res;
void *slot_mem;
dma_addr_t slot_phys;
u32 channelbase;
@@ -135,8 +134,7 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
if (vchiq_init_state(state, vchiq_slot_zero) != VCHIQ_SUCCESS)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g_regs = devm_ioremap_resource(&pdev->dev, res);
+ g_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g_regs))
return PTR_ERR(g_regs);
@@ -170,10 +168,10 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
return 0;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_platform_init_state(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
struct vchiq_2835_state *platform_state;
state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
@@ -216,7 +214,7 @@ remote_event_signal(struct remote_event *event)
writel(0, g_regs + BELL2); /* trigger vc interrupt */
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
int dir)
{
@@ -249,24 +247,23 @@ vchiq_complete_bulk(struct vchiq_bulk *bulk)
bulk->actual);
}
-void
-vchiq_dump_platform_state(void *dump_context)
+int vchiq_dump_platform_state(void *dump_context)
{
char buf[80];
int len;
len = snprintf(buf, sizeof(buf),
" Platform: 2835 (VC master)");
- vchiq_dump(dump_context, buf, len + 1);
+ return vchiq_dump(dump_context, buf, len + 1);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_platform_suspend(struct vchiq_state *state)
{
return VCHIQ_ERROR;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_platform_resume(struct vchiq_state *state)
{
return VCHIQ_SUCCESS;
@@ -526,11 +523,11 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
return NULL;
}
- WARN_ON(g_free_fragments == NULL);
+ WARN_ON(!g_free_fragments);
down(&g_free_fragments_mutex);
fragments = g_free_fragments;
- WARN_ON(fragments == NULL);
+ WARN_ON(!fragments);
g_free_fragments = *(char **) g_free_fragments;
up(&g_free_fragments_mutex);
pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index b1595b13dea8..02148a24818a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -84,7 +84,7 @@ static void suspend_timer_callback(struct timer_list *t);
struct user_service {
struct vchiq_service *service;
void *userdata;
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
char is_vchi;
char dequeue_pending;
char close_pending;
@@ -103,7 +103,7 @@ struct bulk_waiter_node {
struct list_head list;
};
-struct vchiq_instance_struct {
+struct vchiq_instance {
struct vchiq_state *state;
struct vchiq_completion_data completions[MAX_COMPLETIONS];
int completion_insert;
@@ -172,16 +172,16 @@ static const char *const ioctl_names[] = {
vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
(VCHIQ_IOC_MAX + 1));
-static VCHIQ_STATUS_T
-vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
- unsigned int size, VCHIQ_BULK_DIR_T dir);
+static enum vchiq_status
+vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
+ unsigned int size, enum vchiq_bulk_dir dir);
#define VCHIQ_INIT_RETRIES 10
-VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
+enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_state *state;
- VCHIQ_INSTANCE_T instance = NULL;
+ struct vchiq_instance *instance = NULL;
int i;
vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
@@ -230,9 +230,9 @@ failed:
}
EXPORT_SYMBOL(vchiq_initialise);
-VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
+enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct vchiq_state *state = instance->state;
vchiq_log_trace(vchiq_core_log_level,
@@ -267,14 +267,14 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
}
EXPORT_SYMBOL(vchiq_shutdown);
-static int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
+static int vchiq_is_connected(struct vchiq_instance *instance)
{
return instance->connected;
}
-VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
+enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct vchiq_state *state = instance->state;
vchiq_log_trace(vchiq_core_log_level,
@@ -301,12 +301,12 @@ failed:
}
EXPORT_SYMBOL(vchiq_connect);
-VCHIQ_STATUS_T vchiq_add_service(
- VCHIQ_INSTANCE_T instance,
+enum vchiq_status vchiq_add_service(
+ struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *phandle)
+ unsigned int *phandle)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
int srvstate;
@@ -340,12 +340,12 @@ VCHIQ_STATUS_T vchiq_add_service(
}
EXPORT_SYMBOL(vchiq_add_service);
-VCHIQ_STATUS_T vchiq_open_service(
- VCHIQ_INSTANCE_T instance,
+enum vchiq_status vchiq_open_service(
+ struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *phandle)
+ unsigned int *phandle)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
@@ -380,11 +380,11 @@ failed:
}
EXPORT_SYMBOL(vchiq_open_service);
-VCHIQ_STATUS_T
-vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
- unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+enum vchiq_status
+vchiq_bulk_transmit(unsigned int handle, const void *data,
+ unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
@@ -405,11 +405,11 @@ vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
}
EXPORT_SYMBOL(vchiq_bulk_transmit);
-VCHIQ_STATUS_T
-vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
- unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+enum vchiq_status
+vchiq_bulk_receive(unsigned int handle, void *data,
+ unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
@@ -429,13 +429,13 @@ vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
}
EXPORT_SYMBOL(vchiq_bulk_receive);
-static VCHIQ_STATUS_T
-vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
- unsigned int size, VCHIQ_BULK_DIR_T dir)
+static enum vchiq_status
+vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
+ unsigned int size, enum vchiq_bulk_dir dir)
{
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
struct vchiq_service *service;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct bulk_waiter_node *waiter = NULL;
service = find_service_by_handle(handle);
@@ -515,8 +515,8 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
*
***************************************************************************/
-static VCHIQ_STATUS_T
-add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
+static enum vchiq_status
+add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
struct vchiq_header *header, struct user_service *user_service,
void *bulk_userdata)
{
@@ -582,9 +582,9 @@ add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
*
***************************************************************************/
-static VCHIQ_STATUS_T
-service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
- VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
+static enum vchiq_status
+service_callback(enum vchiq_reason reason, struct vchiq_header *header,
+ unsigned int handle, void *bulk_userdata)
{
/* How do we ensure the callback goes to the right client?
** The service_user data points to a user_service record
@@ -593,7 +593,7 @@ service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
*/
struct user_service *user_service;
struct vchiq_service *service;
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
bool skip_completion = false;
DEBUG_INITIALISE(g_state.local)
@@ -630,7 +630,7 @@ service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
*/
if ((user_service->message_available_pos -
instance->completion_remove) < 0) {
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
vchiq_log_info(vchiq_arm_log_level,
"Inserting extra MESSAGE_AVAILABLE");
@@ -772,8 +772,8 @@ static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
* vchiq_ioc_queue_message
*
**************************************************************************/
-static VCHIQ_STATUS_T
-vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+static enum vchiq_status
+vchiq_ioc_queue_message(unsigned int handle,
struct vchiq_element *elements,
unsigned long count)
{
@@ -804,8 +804,8 @@ vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
static long
vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- VCHIQ_INSTANCE_T instance = file->private_data;
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ struct vchiq_instance *instance = file->private_data;
+ enum vchiq_status status = VCHIQ_SUCCESS;
struct vchiq_service *service = NULL;
long ret = 0;
int i, rc;
@@ -827,7 +827,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* Remove all services */
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i)) != NULL) {
+ instance, &i))) {
status = vchiq_remove_service(service->handle);
unlock_service(service);
if (status != VCHIQ_SUCCESS)
@@ -907,7 +907,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
&args.params, srvstate,
instance, user_service_free);
- if (service != NULL) {
+ if (service) {
user_service->service = service;
user_service->userdata = userdata;
user_service->instance = instance;
@@ -952,7 +952,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_CLOSE_SERVICE:
case VCHIQ_IOC_REMOVE_SERVICE: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
struct user_service *user_service;
service = find_service_for_instance(instance, handle);
@@ -985,10 +985,10 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_USE_SERVICE:
case VCHIQ_IOC_RELEASE_SERVICE: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
service = find_service_for_instance(instance, handle);
- if (service != NULL) {
+ if (service) {
status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
vchiq_use_service_internal(service) :
vchiq_release_service_internal(service);
@@ -1021,7 +1021,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
service = find_service_for_instance(instance, args.handle);
- if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
+ if (service && (args.count <= MAX_ELEMENTS)) {
/* Copy elements into kernel space */
struct vchiq_element elements[MAX_ELEMENTS];
@@ -1042,7 +1042,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct vchiq_queue_bulk_transfer args;
struct bulk_waiter_node *waiter = NULL;
- VCHIQ_BULK_DIR_T dir =
+ enum vchiq_bulk_dir dir =
(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
@@ -1107,7 +1107,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
kfree(waiter);
} else {
- const VCHIQ_BULK_MODE_T mode_waiting =
+ const enum vchiq_bulk_mode mode_waiting =
VCHIQ_BULK_MODE_WAITING;
waiter->pid = current->pid;
mutex_lock(&instance->bulk_waiter_list_mutex);
@@ -1343,11 +1343,11 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
spin_unlock(&msg_queue_spinlock);
complete(&user_service->remove_event);
- if (header == NULL)
+ if (!header)
ret = -ENOTCONN;
else if (header->size <= args.bufsize) {
/* Copy to user space if msgbuf is not NULL */
- if ((args.buf == NULL) ||
+ if (!args.buf ||
(copy_to_user((void __user *)args.buf,
header->data,
header->size) == 0)) {
@@ -1368,7 +1368,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} break;
case VCHIQ_IOC_GET_CLIENT_ID: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
ret = vchiq_get_client_id(handle);
} break;
@@ -1423,10 +1423,10 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} break;
case VCHIQ_IOC_CLOSE_DELIVERED: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
service = find_closed_service_for_instance(instance, handle);
- if (service != NULL) {
+ if (service) {
struct user_service *user_service =
(struct user_service *)service->base.userdata;
close_delivered(user_service);
@@ -1611,7 +1611,7 @@ struct vchiq_queue_bulk_transfer32 {
compat_uptr_t data;
unsigned int size;
compat_uptr_t userdata;
- VCHIQ_BULK_MODE_T mode;
+ enum vchiq_bulk_mode mode;
};
#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
@@ -1666,7 +1666,7 @@ vchiq_compat_ioctl_queue_bulk(struct file *file,
}
struct vchiq_completion_data32 {
- VCHIQ_REASON_T reason;
+ enum vchiq_reason reason;
compat_uptr_t header;
compat_uptr_t service_userdata;
compat_uptr_t bulk_userdata;
@@ -1919,7 +1919,7 @@ vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int vchiq_open(struct inode *inode, struct file *file)
{
struct vchiq_state *state = vchiq_get_state();
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
@@ -1951,7 +1951,7 @@ static int vchiq_open(struct inode *inode, struct file *file)
static int vchiq_release(struct inode *inode, struct file *file)
{
- VCHIQ_INSTANCE_T instance = file->private_data;
+ struct vchiq_instance *instance = file->private_data;
struct vchiq_state *state = vchiq_get_state();
struct vchiq_service *service;
int ret = 0;
@@ -2072,43 +2072,45 @@ out:
*
***************************************************************************/
-void
-vchiq_dump(void *dump_context, const char *str, int len)
+int vchiq_dump(void *dump_context, const char *str, int len)
{
struct dump_context *context = (struct dump_context *)dump_context;
+ int copy_bytes;
- if (context->actual < context->space) {
- int copy_bytes;
+ if (context->actual >= context->space)
+ return 0;
- if (context->offset > 0) {
- int skip_bytes = min(len, (int)context->offset);
+ if (context->offset > 0) {
+ int skip_bytes = min_t(int, len, context->offset);
- str += skip_bytes;
- len -= skip_bytes;
- context->offset -= skip_bytes;
- if (context->offset > 0)
- return;
- }
- copy_bytes = min(len, (int)(context->space - context->actual));
- if (copy_bytes == 0)
- return;
- if (copy_to_user(context->buf + context->actual, str,
- copy_bytes))
- context->actual = -EFAULT;
- context->actual += copy_bytes;
- len -= copy_bytes;
-
- /* If tne terminating NUL is included in the length, then it
- ** marks the end of a line and should be replaced with a
- ** carriage return. */
- if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
- char cr = '\n';
-
- if (copy_to_user(context->buf + context->actual - 1,
- &cr, 1))
- context->actual = -EFAULT;
- }
+ str += skip_bytes;
+ len -= skip_bytes;
+ context->offset -= skip_bytes;
+ if (context->offset > 0)
+ return 0;
+ }
+ copy_bytes = min_t(int, len, context->space - context->actual);
+ if (copy_bytes == 0)
+ return 0;
+ if (copy_to_user(context->buf + context->actual, str,
+ copy_bytes))
+ return -EFAULT;
+ context->actual += copy_bytes;
+ len -= copy_bytes;
+
+ /*
+ * If the terminating NUL is included in the length, then it
+ * marks the end of a line and should be replaced with a
+ * carriage return.
+ */
+ if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
+ char cr = '\n';
+
+ if (copy_to_user(context->buf + context->actual - 1,
+ &cr, 1))
+ return -EFAULT;
}
+ return 0;
}
/****************************************************************************
@@ -2117,8 +2119,7 @@ vchiq_dump(void *dump_context, const char *str, int len)
*
***************************************************************************/
-void
-vchiq_dump_platform_instances(void *dump_context)
+int vchiq_dump_platform_instances(void *dump_context)
{
struct vchiq_state *state = vchiq_get_state();
char buf[80];
@@ -2130,37 +2131,43 @@ vchiq_dump_platform_instances(void *dump_context)
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = state->services[i];
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
- if (service && (service->base.callback == service_callback)) {
- instance = service->instance;
- if (instance)
- instance->mark = 0;
- }
+ if (!service || service->base.callback != service_callback)
+ continue;
+
+ instance = service->instance;
+ if (instance)
+ instance->mark = 0;
}
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = state->services[i];
- VCHIQ_INSTANCE_T instance;
-
- if (service && (service->base.callback == service_callback)) {
- instance = service->instance;
- if (instance && !instance->mark) {
- len = snprintf(buf, sizeof(buf),
- "Instance %pK: pid %d,%s completions %d/%d",
- instance, instance->pid,
- instance->connected ? " connected, " :
- "",
- instance->completion_insert -
- instance->completion_remove,
- MAX_COMPLETIONS);
-
- vchiq_dump(dump_context, buf, len + 1);
-
- instance->mark = 1;
- }
- }
+ struct vchiq_instance *instance;
+ int err;
+
+ if (!service || service->base.callback != service_callback)
+ continue;
+
+ instance = service->instance;
+ if (!instance || instance->mark)
+ continue;
+
+ len = snprintf(buf, sizeof(buf),
+ "Instance %pK: pid %d,%s completions %d/%d",
+ instance, instance->pid,
+ instance->connected ? " connected, " :
+ "",
+ instance->completion_insert -
+ instance->completion_remove,
+ MAX_COMPLETIONS);
+
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
+ instance->mark = 1;
}
+ return 0;
}
/****************************************************************************
@@ -2169,9 +2176,8 @@ vchiq_dump_platform_instances(void *dump_context)
*
***************************************************************************/
-void
-vchiq_dump_platform_service_state(void *dump_context,
- struct vchiq_service *service)
+int vchiq_dump_platform_service_state(void *dump_context,
+ struct vchiq_service *service)
{
struct user_service *user_service =
(struct user_service *)service->base.userdata;
@@ -2192,7 +2198,7 @@ vchiq_dump_platform_service_state(void *dump_context,
" (dequeue pending)");
}
- vchiq_dump(dump_context, buf, len + 1);
+ return vchiq_dump(dump_context, buf, len + 1);
}
/****************************************************************************
@@ -2206,13 +2212,16 @@ vchiq_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct dump_context context;
+ int err;
context.buf = buf;
context.actual = 0;
context.space = count;
context.offset = *ppos;
- vchiq_dump_state(&context, &g_state);
+ err = vchiq_dump_state(&context, &g_state);
+ if (err)
+ return err;
*ppos += context.actual;
@@ -2223,13 +2232,13 @@ struct vchiq_state *
vchiq_get_state(void)
{
- if (g_state.remote == NULL)
+ if (!g_state.remote)
printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
else if (g_state.remote->initialised != 1)
printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
__func__, g_state.remote->initialised);
- return ((g_state.remote != NULL) &&
+ return (g_state.remote &&
(g_state.remote->initialised == 1)) ? &g_state : NULL;
}
@@ -2270,10 +2279,10 @@ vchiq_videocore_wanted(struct vchiq_state *state)
return 1;
}
-static VCHIQ_STATUS_T
-vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
+static enum vchiq_status
+vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
struct vchiq_header *header,
- VCHIQ_SERVICE_HANDLE_T service_user,
+ unsigned int service_user,
void *bulk_user)
{
vchiq_log_error(vchiq_susp_log_level,
@@ -2287,9 +2296,9 @@ vchiq_keepalive_thread_func(void *v)
struct vchiq_state *state = (struct vchiq_state *)v;
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- VCHIQ_STATUS_T status;
- VCHIQ_INSTANCE_T instance;
- VCHIQ_SERVICE_HANDLE_T ka_handle;
+ enum vchiq_status status;
+ struct vchiq_instance *instance;
+ unsigned int ka_handle;
struct vchiq_service_params params = {
.fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
@@ -2361,7 +2370,7 @@ exit:
return 0;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state)
{
@@ -2563,10 +2572,10 @@ unblock_resume(struct vchiq_arm_state *arm_state)
/* Initiate suspend via slot handler. Should be called with the write lock
* held */
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_arm_vcsuspend(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
if (!arm_state)
@@ -2684,12 +2693,12 @@ out:
return resume;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ enum vchiq_status ret = VCHIQ_SUCCESS;
char entity[16];
int *entity_uc;
int local_uc, local_entity_uc;
@@ -2798,7 +2807,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
}
if (ret == VCHIQ_SUCCESS) {
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
while (ack_cnt && (status == VCHIQ_SUCCESS)) {
@@ -2817,11 +2826,11 @@ out:
return ret;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ enum vchiq_status ret = VCHIQ_SUCCESS;
char entity[16];
int *entity_uc;
@@ -2898,33 +2907,33 @@ vchiq_on_remote_release(struct vchiq_state *state)
complete(&arm_state->ka_evt);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_use_service_internal(struct vchiq_service *service)
{
return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_release_service_internal(struct vchiq_service *service)
{
return vchiq_release_internal(service->state, service);
}
struct vchiq_debugfs_node *
-vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
{
return &instance->debugfs_node;
}
int
-vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_use_count(struct vchiq_instance *instance)
{
struct vchiq_service *service;
int use_count = 0, i;
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i)) != NULL) {
+ instance, &i))) {
use_count += service->service_use_count;
unlock_service(service);
}
@@ -2932,26 +2941,26 @@ vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
}
int
-vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_pid(struct vchiq_instance *instance)
{
return instance->pid;
}
int
-vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_trace(struct vchiq_instance *instance)
{
return instance->trace;
}
void
-vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
+vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
{
struct vchiq_service *service;
int i;
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i)) != NULL) {
+ instance, &i))) {
service->trace = trace;
unlock_service(service);
}
@@ -2969,10 +2978,10 @@ static void suspend_timer_callback(struct timer_list *t)
vchiq_check_suspend(state);
}
-VCHIQ_STATUS_T
-vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_use_service(unsigned int handle)
{
- VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ enum vchiq_status ret = VCHIQ_ERROR;
struct vchiq_service *service = find_service_by_handle(handle);
if (service) {
@@ -2983,10 +2992,10 @@ vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
return ret;
}
-VCHIQ_STATUS_T
-vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_release_service(unsigned int handle)
{
- VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ enum vchiq_status ret = VCHIQ_ERROR;
struct vchiq_service *service = find_service_by_handle(handle);
if (service) {
@@ -3088,11 +3097,11 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
vchiq_dump_platform_use_state(state);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_check_service(struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state;
- VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ enum vchiq_status ret = VCHIQ_ERROR;
if (!service || !service->state)
goto out;
@@ -3128,35 +3137,36 @@ void vchiq_on_remote_use_active(struct vchiq_state *state)
}
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
- VCHIQ_CONNSTATE_T oldstate,
- VCHIQ_CONNSTATE_T newstate)
+ enum vchiq_connstate oldstate,
+ enum vchiq_connstate newstate)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ char threadname[16];
vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
get_conn_state_name(oldstate), get_conn_state_name(newstate));
- if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
- write_lock_bh(&arm_state->susp_res_lock);
- if (!arm_state->first_connect) {
- char threadname[16];
+ if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
+ return;
- arm_state->first_connect = 1;
- write_unlock_bh(&arm_state->susp_res_lock);
- snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
- state->id);
- arm_state->ka_thread = kthread_create(
- &vchiq_keepalive_thread_func,
- (void *)state,
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->first_connect) {
+ write_unlock_bh(&arm_state->susp_res_lock);
+ return;
+ }
+
+ arm_state->first_connect = 1;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+ state->id);
+ arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+ (void *)state,
+ threadname);
+ if (IS_ERR(arm_state->ka_thread)) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "vchiq: FATAL: couldn't create thread %s",
threadname);
- if (IS_ERR(arm_state->ka_thread)) {
- vchiq_log_error(vchiq_susp_log_level,
- "vchiq: FATAL: couldn't create thread %s",
- threadname);
- } else {
- wake_up_process(arm_state->ka_thread);
- }
- } else
- write_unlock_bh(&arm_state->susp_res_lock);
+ } else {
+ wake_up_process(arm_state->ka_thread);
}
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index b424323e9613..19d2a2eefb6a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -109,13 +109,13 @@ int vchiq_platform_init(struct platform_device *pdev,
extern struct vchiq_state *
vchiq_get_state(void);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_arm_vcsuspend(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_arm_vcresume(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state);
@@ -124,16 +124,16 @@ vchiq_check_resume(struct vchiq_state *state);
extern void
vchiq_check_suspend(struct vchiq_state *state);
-VCHIQ_STATUS_T
-vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
+enum vchiq_status
+vchiq_use_service(unsigned int handle);
-extern VCHIQ_STATUS_T
-vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
+extern enum vchiq_status
+vchiq_release_service(unsigned int handle);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_check_service(struct vchiq_service *service);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_platform_suspend(struct vchiq_state *state);
extern int
@@ -154,27 +154,27 @@ vchiq_platform_get_arm_state(struct vchiq_state *state);
extern int
vchiq_videocore_wanted(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_release_internal(struct vchiq_state *state,
struct vchiq_service *service);
extern struct vchiq_debugfs_node *
-vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_debugfs_node(struct vchiq_instance *instance);
extern int
-vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_use_count(struct vchiq_instance *instance);
extern int
-vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_pid(struct vchiq_instance *instance);
extern int
-vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_trace(struct vchiq_instance *instance);
extern void
-vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace);
+vchiq_instance_set_trace(struct vchiq_instance *instance, int trace);
extern void
set_suspend_state(struct vchiq_arm_state *arm_state,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 56a23a297fa4..76351078affb 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -132,7 +132,7 @@ vchiq_set_service_state(struct vchiq_service *service, int newstate)
}
struct vchiq_service *
-find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
+find_service_by_handle(unsigned int handle)
{
struct vchiq_service *service;
@@ -177,8 +177,8 @@ find_service_by_port(struct vchiq_state *state, int localport)
}
struct vchiq_service *
-find_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle)
+find_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle)
{
struct vchiq_service *service;
@@ -201,8 +201,8 @@ find_service_for_instance(VCHIQ_INSTANCE_T instance,
}
struct vchiq_service *
-find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle)
+find_closed_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle)
{
struct vchiq_service *service;
@@ -227,7 +227,7 @@ find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
}
struct vchiq_service *
-next_service_by_instance(struct vchiq_state *state, VCHIQ_INSTANCE_T instance,
+next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance,
int *pidx)
{
struct vchiq_service *service = NULL;
@@ -295,7 +295,7 @@ unlock:
}
int
-vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
+vchiq_get_client_id(unsigned int handle)
{
struct vchiq_service *service = find_service_by_handle(handle);
int id;
@@ -308,7 +308,7 @@ vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
}
void *
-vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
+vchiq_get_service_userdata(unsigned int handle)
{
struct vchiq_service *service = handle_to_service(handle);
@@ -316,7 +316,7 @@ vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
}
int
-vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
+vchiq_get_service_fourcc(unsigned int handle)
{
struct vchiq_service *service = handle_to_service(handle);
@@ -354,11 +354,11 @@ mark_service_closing(struct vchiq_service *service)
mark_service_closing_internal(service, 0);
}
-static inline VCHIQ_STATUS_T
-make_service_callback(struct vchiq_service *service, VCHIQ_REASON_T reason,
+static inline enum vchiq_status
+make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
struct vchiq_header *header, void *bulk_userdata)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
@@ -375,9 +375,9 @@ make_service_callback(struct vchiq_service *service, VCHIQ_REASON_T reason,
}
inline void
-vchiq_set_conn_state(struct vchiq_state *state, VCHIQ_CONNSTATE_T newstate)
+vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
{
- VCHIQ_CONNSTATE_T oldstate = state->conn_state;
+ enum vchiq_connstate oldstate = state->conn_state;
vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
conn_state_names[oldstate],
@@ -542,7 +542,7 @@ reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
if (space > slot_space) {
struct vchiq_header *header;
/* Fill the remaining space with padding */
- WARN_ON(state->tx_data == NULL);
+ WARN_ON(!state->tx_data);
header = (struct vchiq_header *)
(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
header->msgid = VCHIQ_MSGID_PADDING;
@@ -779,7 +779,7 @@ copy_message_data(
}
/* Called by the slot handler and application threads */
-static VCHIQ_STATUS_T
+static enum vchiq_status
queue_message(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
@@ -1027,7 +1027,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
}
/* Called by the slot handler and application threads */
-static VCHIQ_STATUS_T
+static enum vchiq_status
queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
@@ -1178,11 +1178,11 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
}
/* Called by the slot handler - don't hold the bulk mutex */
-static VCHIQ_STATUS_T
+static enum vchiq_status
notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
int retry_poll)
{
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
vchiq_log_trace(vchiq_core_log_level,
"%d: nb:%d %cx - p=%x rn=%x r=%x",
@@ -1230,7 +1230,7 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
spin_unlock(&bulk_waiter_spinlock);
} else if (bulk->mode ==
VCHIQ_BULK_MODE_CALLBACK) {
- VCHIQ_REASON_T reason = (bulk->dir ==
+ enum vchiq_reason reason = (bulk->dir ==
VCHIQ_BULK_TRANSMIT) ?
((bulk->actual ==
VCHIQ_BULK_ACTUAL_ABORTED) ?
@@ -2078,7 +2078,7 @@ init_bulk_queue(struct vchiq_bulk_queue *queue)
}
inline const char *
-get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
+get_conn_state_name(enum vchiq_connstate conn_state)
{
return conn_state_names[conn_state];
}
@@ -2123,18 +2123,15 @@ vchiq_init_slots(void *mem_base, int mem_size)
return slot_zero;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
{
struct vchiq_shared_state *local;
struct vchiq_shared_state *remote;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
char threadname[16];
int i;
- vchiq_log_warning(vchiq_core_log_level,
- "%s: slot_zero = %pK", __func__, slot_zero);
-
if (vchiq_states[0]) {
pr_err("%s: VCHIQ state already initialized\n", __func__);
return VCHIQ_ERROR;
@@ -2283,8 +2280,8 @@ fail_free_handler_thread:
struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
const struct vchiq_service_params *params,
- int srvstate, VCHIQ_INSTANCE_T instance,
- VCHIQ_USERDATA_TERM_T userdata_term)
+ int srvstate, struct vchiq_instance *instance,
+ vchiq_userdata_term userdata_term)
{
struct vchiq_service *service;
struct vchiq_service **pservice = NULL;
@@ -2412,7 +2409,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
return service;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_open_service_internal(struct vchiq_service *service, int client_id)
{
struct vchiq_open_payload payload = {
@@ -2421,7 +2418,7 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
service->version,
service->version_min
};
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
service->client_id = client_id;
vchiq_use_service_internal(service);
@@ -2519,7 +2516,7 @@ release_service_messages(struct vchiq_service *service)
static int
do_abort_bulks(struct vchiq_service *service)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
/* Abort any outstanding bulk transfers */
if (mutex_lock_killable(&service->bulk_mutex))
@@ -2535,10 +2532,10 @@ do_abort_bulks(struct vchiq_service *service)
return (status == VCHIQ_SUCCESS);
}
-static VCHIQ_STATUS_T
+static enum vchiq_status
close_service_complete(struct vchiq_service *service, int failstate)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
int newstate;
@@ -2597,11 +2594,11 @@ close_service_complete(struct vchiq_service *service, int failstate)
}
/* Called by the slot handler */
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
{
struct vchiq_state *state = service->state;
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
@@ -2777,8 +2774,8 @@ vchiq_free_service_internal(struct vchiq_service *service)
unlock_service(service);
}
-VCHIQ_STATUS_T
-vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
+enum vchiq_status
+vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
int i;
@@ -2813,8 +2810,8 @@ vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
return VCHIQ_SUCCESS;
}
-VCHIQ_STATUS_T
-vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
+enum vchiq_status
+vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
int i;
@@ -2830,12 +2827,12 @@ vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
return VCHIQ_SUCCESS;
}
-VCHIQ_STATUS_T
-vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_close_service(unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
if (!service)
return VCHIQ_ERROR;
@@ -2889,12 +2886,12 @@ vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
return status;
}
-VCHIQ_STATUS_T
-vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_remove_service(unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
if (!service)
return VCHIQ_ERROR;
@@ -2955,10 +2952,10 @@ vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
* When called in blocking mode, the userdata field points to a bulk_waiter
* structure.
*/
-VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
+enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
void *offset, int size, void *userdata,
- VCHIQ_BULK_MODE_T mode,
- VCHIQ_BULK_DIR_T dir)
+ enum vchiq_bulk_mode mode,
+ enum vchiq_bulk_dir dir)
{
struct vchiq_service *service = find_service_by_handle(handle);
struct vchiq_bulk_queue *queue;
@@ -2968,7 +2965,7 @@ VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
int payload[2];
if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
@@ -3103,15 +3100,15 @@ error_exit:
return status;
}
-VCHIQ_STATUS_T
-vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+enum vchiq_status
+vchiq_queue_message(unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
size_t size)
{
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
if (!service ||
(vchiq_check_service(service) != VCHIQ_SUCCESS))
@@ -3156,7 +3153,7 @@ error_exit:
}
void
-vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle,
+vchiq_release_message(unsigned int handle,
struct vchiq_header *header)
{
struct vchiq_service *service = find_service_by_handle(handle);
@@ -3195,10 +3192,10 @@ release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
remote_event_signal(&state->remote->sync_release);
}
-VCHIQ_STATUS_T
-vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
+enum vchiq_status
+vchiq_get_peer_version(unsigned int handle, short *peer_version)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_service *service = find_service_by_handle(handle);
if (!service ||
@@ -3224,12 +3221,12 @@ void vchiq_get_config(struct vchiq_config *config)
config->version_min = VCHIQ_VERSION_MIN;
}
-VCHIQ_STATUS_T
-vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
- VCHIQ_SERVICE_OPTION_T option, int value)
+enum vchiq_status
+vchiq_set_service_option(unsigned int handle,
+ enum vchiq_service_option option, int value)
{
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
if (service) {
switch (option) {
@@ -3301,7 +3298,7 @@ vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
return status;
}
-static void
+static int
vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
struct vchiq_shared_state *shared, const char *label)
{
@@ -3321,16 +3318,21 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
int i;
char buf[80];
int len;
+ int err;
len = scnprintf(buf, sizeof(buf),
" %s: slots %d-%d tx_pos=%x recycle=%x",
label, shared->slot_first, shared->slot_last,
shared->tx_pos, shared->slot_queue_recycle);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Slots claimed:");
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
for (i = shared->slot_first; i <= shared->slot_last; i++) {
struct vchiq_slot_info slot_info =
@@ -3339,27 +3341,34 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
len = scnprintf(buf, sizeof(buf),
" %d: %d/%d", i, slot_info.use_count,
slot_info.release_count);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
}
}
for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
debug_names[i], shared->debug[i], shared->debug[i]);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
}
+ return 0;
}
-void
-vchiq_dump_state(void *dump_context, struct vchiq_state *state)
+int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
{
char buf[80];
int len;
int i;
+ int err;
len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
conn_state_names[state->conn_state]);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
@@ -3367,12 +3376,16 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
state->rx_pos,
state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Version: %d (min %d)",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
if (VCHIQ_ENABLE_STATS) {
len = scnprintf(buf, sizeof(buf),
@@ -3380,7 +3393,9 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
"error_count=%d",
state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
state->stats.error_count);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
}
len = scnprintf(buf, sizeof(buf),
@@ -3391,30 +3406,49 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
state->data_quota - state->data_use_count,
state->local->slot_queue_recycle - state->slot_queue_available,
state->stats.slot_stalls, state->stats.data_stalls);
- vchiq_dump(dump_context, buf, len + 1);
-
- vchiq_dump_platform_state(dump_context);
-
- vchiq_dump_shared_state(dump_context, state, state->local, "Local");
- vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
-
- vchiq_dump_platform_instances(dump_context);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
+
+ err = vchiq_dump_platform_state(dump_context);
+ if (err)
+ return err;
+
+ err = vchiq_dump_shared_state(dump_context,
+ state,
+ state->local,
+ "Local");
+ if (err)
+ return err;
+ err = vchiq_dump_shared_state(dump_context,
+ state,
+ state->remote,
+ "Remote");
+ if (err)
+ return err;
+
+ err = vchiq_dump_platform_instances(dump_context);
+ if (err)
+ return err;
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = find_service_by_port(state, i);
if (service) {
- vchiq_dump_service_state(dump_context, service);
+ err = vchiq_dump_service_state(dump_context, service);
unlock_service(service);
+ if (err)
+ return err;
}
}
+ return 0;
}
-void
-vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
+int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
{
char buf[80];
int len;
+ int err;
len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
service->localport, srvstate_names[service->srvstate],
@@ -3447,7 +3481,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service_quota->slot_use_count,
service_quota->slot_quota);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
tx_pending = service->bulk_tx.local_insert -
service->bulk_tx.remote_insert;
@@ -3466,7 +3502,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
BULK_INDEX(service->bulk_rx.remove)].size : 0);
if (VCHIQ_ENABLE_STATS) {
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Ctrl: tx_count=%d, tx_bytes=%llu, "
@@ -3475,7 +3513,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service->stats.ctrl_tx_bytes,
service->stats.ctrl_rx_count,
service->stats.ctrl_rx_bytes);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Bulk: tx_count=%d, tx_bytes=%llu, "
@@ -3484,7 +3524,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service->stats.bulk_tx_bytes,
service->stats.bulk_rx_count,
service->stats.bulk_rx_bytes);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" %d quota stalls, %d slot stalls, "
@@ -3497,10 +3539,13 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
}
}
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
if (service->srvstate != VCHIQ_SRVSTATE_FREE)
- vchiq_dump_platform_service_state(dump_context, service);
+ err = vchiq_dump_platform_service_state(dump_context, service);
+ return err;
}
void
@@ -3527,9 +3572,9 @@ vchiq_loud_error_footer(void)
"================");
}
-VCHIQ_STATUS_T vchiq_send_remote_use(struct vchiq_state *state)
+enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ enum vchiq_status status = VCHIQ_RETRY;
if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
status = queue_message(state, NULL,
@@ -3538,9 +3583,9 @@ VCHIQ_STATUS_T vchiq_send_remote_use(struct vchiq_state *state)
return status;
}
-VCHIQ_STATUS_T vchiq_send_remote_use_active(struct vchiq_state *state)
+enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ enum vchiq_status status = VCHIQ_RETRY;
if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
status = queue_message(state, NULL,
@@ -3578,7 +3623,7 @@ void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
}
*s++ = '\0';
- if ((label != NULL) && (*label != '\0'))
+ if (label && (*label != '\0'))
vchiq_log_trace(VCHIQ_LOG_TRACE,
"%s: %08x: %s", label, addr, line_buf);
else
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 63f71b2a492f..c31f953a9986 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -60,8 +60,8 @@ vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
-#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(struct vchiq_slot_zero) + \
- VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
+#define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
+ VCHIQ_SLOT_SIZE)
#define VCHIQ_MSG_PADDING 0 /* - */
#define VCHIQ_MSG_CONNECT 1 /* - */
@@ -169,7 +169,7 @@ enum {
#endif /* VCHIQ_ENABLE_DEBUG */
-typedef enum {
+enum vchiq_connstate {
VCHIQ_CONNSTATE_DISCONNECTED,
VCHIQ_CONNSTATE_CONNECTING,
VCHIQ_CONNSTATE_CONNECTED,
@@ -179,7 +179,7 @@ typedef enum {
VCHIQ_CONNSTATE_RESUMING,
VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
VCHIQ_CONNSTATE_RESUME_TIMEOUT
-} VCHIQ_CONNSTATE_T;
+};
enum {
VCHIQ_SRVSTATE_FREE,
@@ -202,12 +202,12 @@ enum {
VCHIQ_POLL_COUNT
};
-typedef enum {
+enum vchiq_bulk_dir {
VCHIQ_BULK_TRANSMIT,
VCHIQ_BULK_RECEIVE
-} VCHIQ_BULK_DIR_T;
+};
-typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
+typedef void (*vchiq_userdata_term)(void *userdata);
struct vchiq_bulk {
short mode;
@@ -236,7 +236,7 @@ struct remote_event {
u32 __unused;
};
-typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
+struct opaque_platform_state;
struct vchiq_slot {
char data[VCHIQ_SLOT_SIZE];
@@ -250,10 +250,10 @@ struct vchiq_slot_info {
struct vchiq_service {
struct vchiq_service_base base;
- VCHIQ_SERVICE_HANDLE_T handle;
+ unsigned int handle;
unsigned int ref_count;
int srvstate;
- VCHIQ_USERDATA_TERM_T userdata_term;
+ vchiq_userdata_term userdata_term;
unsigned int localport;
unsigned int remoteport;
int public_fourcc;
@@ -268,7 +268,7 @@ struct vchiq_service {
short peer_version;
struct vchiq_state *state;
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
int service_use_count;
@@ -367,7 +367,7 @@ struct vchiq_slot_zero {
struct vchiq_state {
int id;
int initialised;
- VCHIQ_CONNSTATE_T conn_state;
+ enum vchiq_connstate conn_state;
short version_common;
struct vchiq_shared_state *local;
@@ -382,7 +382,7 @@ struct vchiq_state {
/* Mutex protecting services */
struct mutex mutex;
- VCHIQ_INSTANCE_T *instance;
+ struct vchiq_instance **instance;
/* Processes incoming messages */
struct task_struct *slot_handler_thread;
@@ -468,7 +468,7 @@ struct vchiq_state {
struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
- VCHIQ_PLATFORM_STATE_T platform_state;
+ struct opaque_platform_state *platform_state;
};
struct bulk_waiter {
@@ -486,27 +486,27 @@ extern int vchiq_sync_log_level;
extern struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
extern const char *
-get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
+get_conn_state_name(enum vchiq_connstate conn_state);
extern struct vchiq_slot_zero *
vchiq_init_slots(void *mem_base, int mem_size);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero);
-extern VCHIQ_STATUS_T
-vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance);
+extern enum vchiq_status
+vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
extern struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
const struct vchiq_service_params *params,
- int srvstate, VCHIQ_INSTANCE_T instance,
- VCHIQ_USERDATA_TERM_T userdata_term);
+ int srvstate, struct vchiq_instance *instance,
+ vchiq_userdata_term userdata_term);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_open_service_internal(struct vchiq_service *service, int client_id);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
extern void
@@ -515,21 +515,21 @@ vchiq_terminate_service_internal(struct vchiq_service *service);
extern void
vchiq_free_service_internal(struct vchiq_service *service);
-extern VCHIQ_STATUS_T
-vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance);
+extern enum vchiq_status
+vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
extern void
remote_event_pollall(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
-vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *offset, int size,
- void *userdata, VCHIQ_BULK_MODE_T mode,
- VCHIQ_BULK_DIR_T dir);
+extern enum vchiq_status
+vchiq_bulk_transfer(unsigned int handle, void *offset, int size,
+ void *userdata, enum vchiq_bulk_mode mode,
+ enum vchiq_bulk_dir dir);
-extern void
+extern int
vchiq_dump_state(void *dump_context, struct vchiq_state *state);
-extern void
+extern int
vchiq_dump_service_state(void *dump_context, struct vchiq_service *service);
extern void
@@ -543,7 +543,7 @@ request_poll(struct vchiq_state *state, struct vchiq_service *service,
int poll_type);
static inline struct vchiq_service *
-handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
+handle_to_service(unsigned int handle)
{
struct vchiq_state *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
(VCHIQ_MAX_STATES - 1)];
@@ -554,21 +554,21 @@ handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
}
extern struct vchiq_service *
-find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
+find_service_by_handle(unsigned int handle);
extern struct vchiq_service *
find_service_by_port(struct vchiq_state *state, int localport);
extern struct vchiq_service *
-find_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle);
+find_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle);
extern struct vchiq_service *
-find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle);
+find_closed_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle);
extern struct vchiq_service *
-next_service_by_instance(struct vchiq_state *state, VCHIQ_INSTANCE_T instance,
+next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance,
int *pidx);
extern void
@@ -580,7 +580,7 @@ unlock_service(struct vchiq_service *service);
/* The following functions are called from vchiq_core, and external
** implementations must be provided. */
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
int dir);
@@ -596,29 +596,29 @@ vchiq_platform_check_suspend(struct vchiq_state *state);
extern void
vchiq_platform_paused(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_platform_resume(struct vchiq_state *state);
extern void
vchiq_platform_resumed(struct vchiq_state *state);
-extern void
+extern int
vchiq_dump(void *dump_context, const char *str, int len);
-extern void
+extern int
vchiq_dump_platform_state(void *dump_context);
-extern void
+extern int
vchiq_dump_platform_instances(void *dump_context);
-extern void
+extern int
vchiq_dump_platform_service_state(void *dump_context,
struct vchiq_service *service);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_use_service_internal(struct vchiq_service *service);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_release_service_internal(struct vchiq_service *service);
extern void
@@ -627,31 +627,31 @@ vchiq_on_remote_use(struct vchiq_state *state);
extern void
vchiq_on_remote_release(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_platform_init_state(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_check_service(struct vchiq_service *service);
extern void
vchiq_on_remote_use_active(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_send_remote_use(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_send_remote_use_active(struct vchiq_state *state);
extern void
vchiq_platform_conn_state_changed(struct vchiq_state *state,
- VCHIQ_CONNSTATE_T oldstate,
- VCHIQ_CONNSTATE_T newstate);
+ enum vchiq_connstate oldstate,
+ enum vchiq_connstate newstate);
extern void
vchiq_platform_handle_timeout(struct vchiq_state *state);
extern void
-vchiq_set_conn_state(struct vchiq_state *state, VCHIQ_CONNSTATE_T newstate);
+vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
extern void
vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index f217b78d95a0..89cc52211de4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -117,7 +117,7 @@ static const struct file_operations debugfs_log_fops = {
static int debugfs_usecount_show(struct seq_file *f, void *offset)
{
- VCHIQ_INSTANCE_T instance = f->private;
+ struct vchiq_instance *instance = f->private;
int use_count;
use_count = vchiq_instance_get_use_count(instance);
@@ -129,7 +129,7 @@ DEFINE_SHOW_ATTRIBUTE(debugfs_usecount);
static int debugfs_trace_show(struct seq_file *f, void *offset)
{
- VCHIQ_INSTANCE_T instance = f->private;
+ struct vchiq_instance *instance = f->private;
int trace;
trace = vchiq_instance_get_trace(instance);
@@ -148,7 +148,7 @@ static ssize_t debugfs_trace_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *f = (struct seq_file *)file->private_data;
- VCHIQ_INSTANCE_T instance = f->private;
+ struct vchiq_instance *instance = f->private;
char firstchar;
if (copy_from_user(&firstchar, buffer, 1))
@@ -184,7 +184,7 @@ static const struct file_operations debugfs_trace_fops = {
};
/* add an instance (process) to the debugfs entries */
-void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
{
char pidstr[16];
struct dentry *top;
@@ -201,7 +201,7 @@ void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
vchiq_instance_get_debugfs_node(instance)->dentry = top;
}
-void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
{
struct vchiq_debugfs_node *node =
vchiq_instance_get_debugfs_node(instance);
@@ -242,11 +242,11 @@ void vchiq_debugfs_deinit(void)
{
}
-void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
{
}
-void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
{
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
index 9b563d105fdb..ec2f033cdf32 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
@@ -14,8 +14,8 @@ void vchiq_debugfs_init(void);
void vchiq_debugfs_deinit(void);
-void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance);
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance);
-void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance);
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance);
#endif /* VCHIQ_DEBUGFS_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index c23bd105c40f..07c6a3db5ab6 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -15,7 +15,7 @@
#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
-typedef enum {
+enum vchiq_reason {
VCHIQ_SERVICE_OPENED, /* service, -, - */
VCHIQ_SERVICE_CLOSED, /* service, -, - */
VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
@@ -23,28 +23,28 @@ typedef enum {
VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
-} VCHIQ_REASON_T;
+};
-typedef enum {
+enum vchiq_status {
VCHIQ_ERROR = -1,
VCHIQ_SUCCESS = 0,
VCHIQ_RETRY = 1
-} VCHIQ_STATUS_T;
+};
-typedef enum {
+enum vchiq_bulk_mode {
VCHIQ_BULK_MODE_CALLBACK,
VCHIQ_BULK_MODE_BLOCKING,
VCHIQ_BULK_MODE_NOCALLBACK,
VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
-} VCHIQ_BULK_MODE_T;
+};
-typedef enum {
+enum vchiq_service_option {
VCHIQ_SERVICE_OPTION_AUTOCLOSE,
VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
VCHIQ_SERVICE_OPTION_SYNCHRONOUS,
VCHIQ_SERVICE_OPTION_TRACE
-} VCHIQ_SERVICE_OPTION_T;
+};
struct vchiq_header {
/* The message identifier - opaque to applications. */
@@ -61,21 +61,19 @@ struct vchiq_element {
unsigned int size;
};
-typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
-
-typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T,
- struct vchiq_header *,
- VCHIQ_SERVICE_HANDLE_T, void *);
+typedef enum vchiq_status (*vchiq_callback)(enum vchiq_reason,
+ struct vchiq_header *,
+ unsigned int, void *);
struct vchiq_service_base {
int fourcc;
- VCHIQ_CALLBACK_T callback;
+ vchiq_callback callback;
void *userdata;
};
struct vchiq_service_params {
int fourcc;
- VCHIQ_CALLBACK_T callback;
+ vchiq_callback callback;
void *userdata;
short version; /* Increment for non-trivial changes */
short version_min; /* Update for incompatible changes */
@@ -92,57 +90,57 @@ struct vchiq_config {
short version_min; /* The minimum compatible version of VCHIQ */
};
-typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
-typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
+struct vchiq_instance;
+typedef void (*vchiq_remote_callback)(void *cb_arg);
-extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
-extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
-extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
-extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
+extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance);
+extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
+extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
+extern enum vchiq_status vchiq_add_service(struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *pservice);
-extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
+ unsigned int *pservice);
+extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *pservice);
-extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T
-vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+ unsigned int *pservice);
+extern enum vchiq_status vchiq_close_service(unsigned int service);
+extern enum vchiq_status vchiq_remove_service(unsigned int service);
+extern enum vchiq_status vchiq_use_service(unsigned int service);
+extern enum vchiq_status vchiq_release_service(unsigned int service);
+extern enum vchiq_status
+vchiq_queue_message(unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
size_t size);
-extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
+extern void vchiq_release_message(unsigned int service,
struct vchiq_header *header);
-extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
+extern enum vchiq_status vchiq_bulk_transmit(unsigned int service,
const void *data, unsigned int size, void *userdata,
- VCHIQ_BULK_MODE_T mode);
-extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
+ enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_receive(unsigned int service,
void *data, unsigned int size, void *userdata,
- VCHIQ_BULK_MODE_T mode);
-extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
+ enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_transmit_handle(unsigned int service,
const void *offset, unsigned int size,
- void *userdata, VCHIQ_BULK_MODE_T mode);
-extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
+ void *userdata, enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_receive_handle(unsigned int service,
void *offset, unsigned int size, void *userdata,
- VCHIQ_BULK_MODE_T mode);
-extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
-extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
-extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
+ enum vchiq_bulk_mode mode);
+extern int vchiq_get_client_id(unsigned int service);
+extern void *vchiq_get_service_userdata(unsigned int service);
+extern int vchiq_get_service_fourcc(unsigned int service);
extern void vchiq_get_config(struct vchiq_config *config);
-extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
- VCHIQ_SERVICE_OPTION_T option, int value);
+extern enum vchiq_status vchiq_set_service_option(unsigned int service,
+ enum vchiq_service_option option, int value);
-extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
- VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
-extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
+extern enum vchiq_status vchiq_remote_use(struct vchiq_instance *instance,
+ vchiq_remote_callback callback, void *cb_arg);
+extern enum vchiq_status vchiq_remote_release(struct vchiq_instance *instance);
-extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
+extern enum vchiq_status vchiq_dump_phys_mem(unsigned int service,
void *ptr, size_t num_bytes);
-extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
+extern enum vchiq_status vchiq_get_peer_version(unsigned int handle,
short *peer_version);
#endif /* VCHIQ_IF_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
index 460ccea088bf..202889b3774f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
@@ -28,11 +28,11 @@ struct vchiq_queue_bulk_transfer {
void *data;
unsigned int size;
void *userdata;
- VCHIQ_BULK_MODE_T mode;
+ enum vchiq_bulk_mode mode;
};
struct vchiq_completion_data {
- VCHIQ_REASON_T reason;
+ enum vchiq_reason reason;
struct vchiq_header *header;
void *service_userdata;
void *bulk_userdata;
@@ -60,7 +60,7 @@ struct vchiq_get_config {
struct vchiq_set_service_option {
unsigned int handle;
- VCHIQ_SERVICE_OPTION_T option;
+ enum vchiq_service_option option;
int value;
};
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 17a4f2c8d8b1..0ce3b08b3441 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -12,22 +12,22 @@
#define vchiq_status_to_vchi(status) ((int32_t)status)
struct shim_service {
- VCHIQ_SERVICE_HANDLE_T handle;
+ unsigned int handle;
struct vchiu_queue queue;
- VCHI_CALLBACK_T callback;
+ vchi_callback callback;
void *callback_param;
};
/***********************************************************
* Name: vchi_msg_peek
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* void **data,
* uint32_t *msg_size,
- * VCHI_FLAGS_T flags
+ * enum vchi_flags flags
*
* Description: Routine to return a pointer to the current message (to allow in
* place processing). The message can be removed using
@@ -36,10 +36,10 @@ struct shim_service {
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
- void **data,
- uint32_t *msg_size,
- VCHI_FLAGS_T flags)
+int32_t vchi_msg_peek(struct vchi_service_handle *handle,
+ void **data,
+ uint32_t *msg_size,
+ enum vchi_flags flags)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(vchi_msg_peek);
/***********************************************************
* Name: vchi_msg_remove
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
*
* Description: Routine to remove a message (after it has been read with
* vchi_msg_peek)
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(vchi_msg_peek);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_msg_remove(struct vchi_service_handle *handle)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(vchi_msg_remove);
/***********************************************************
* Name: vchi_msg_queue
*
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* ssize_t (*copy_callback)(void *context, void *dest,
* size_t offset, size_t maxsize),
* void *context,
@@ -99,14 +99,14 @@ EXPORT_SYMBOL(vchi_msg_remove);
*
***********************************************************/
static
-int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
+int32_t vchi_msg_queue(struct vchi_service_handle *handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
uint32_t data_size)
{
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
while (1) {
status = vchiq_queue_message(service->handle,
@@ -139,7 +139,7 @@ vchi_queue_kernel_message_callback(void *context,
}
int
-vchi_queue_kernel_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_kernel_message(struct vchi_service_handle *handle,
void *data,
unsigned int size)
{
@@ -169,7 +169,7 @@ vchi_queue_user_message_callback(void *context,
}
int
-vchi_queue_user_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_user_message(struct vchi_service_handle *handle,
void __user *data,
unsigned int size)
{
@@ -190,7 +190,7 @@ EXPORT_SYMBOL(vchi_queue_user_message);
* Arguments: VCHI_BULK_HANDLE_T handle,
* void *data_dst,
* const uint32_t data_size,
- * VCHI_FLAGS_T flags
+ * enum vchi_flags flags
* void *bulk_handle
*
* Description: Routine to setup a rcv buffer
@@ -198,15 +198,13 @@ EXPORT_SYMBOL(vchi_queue_user_message);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
- void *data_dst,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *bulk_handle)
+int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle, void *data_dst,
+ uint32_t data_size, enum vchi_flags flags,
+ void *bulk_handle)
{
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_BULK_MODE_T mode;
- VCHIQ_STATUS_T status;
+ enum vchiq_bulk_mode mode;
+ enum vchiq_status status;
switch ((int)flags) {
case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
@@ -250,7 +248,7 @@ EXPORT_SYMBOL(vchi_bulk_queue_receive);
* Arguments: VCHI_BULK_HANDLE_T handle,
* const void *data_src,
* uint32_t data_size,
- * VCHI_FLAGS_T flags,
+ * enum vchi_flags flags,
* void *bulk_handle
*
* Description: Routine to transmit some data
@@ -258,15 +256,15 @@ EXPORT_SYMBOL(vchi_bulk_queue_receive);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
- const void *data_src,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *bulk_handle)
+int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
+ const void *data_src,
+ uint32_t data_size,
+ enum vchi_flags flags,
+ void *bulk_handle)
{
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_BULK_MODE_T mode;
- VCHIQ_STATUS_T status;
+ enum vchiq_bulk_mode mode;
+ enum vchiq_status status;
switch ((int)flags) {
case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
@@ -309,22 +307,20 @@ EXPORT_SYMBOL(vchi_bulk_queue_transmit);
/***********************************************************
* Name: vchi_msg_dequeue
*
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* void *data,
* uint32_t max_data_size_to_read,
* uint32_t *actual_msg_size
- * VCHI_FLAGS_T flags
+ * enum vchi_flags flags
*
* Description: Routine to dequeue a message into the supplied buffer
*
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
- void *data,
- uint32_t max_data_size_to_read,
- uint32_t *actual_msg_size,
- VCHI_FLAGS_T flags)
+int32_t vchi_msg_dequeue(struct vchi_service_handle *handle, void *data,
+ uint32_t max_data_size_to_read,
+ uint32_t *actual_msg_size, enum vchi_flags flags)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -364,13 +360,13 @@ int32_t vchi_held_msg_release(struct vchi_held_msg *message)
{
/*
* Convert the service field pointer back to an
- * VCHIQ_SERVICE_HANDLE_T which is an int.
+ * unsigned int which is an int.
* This pointer is opaque to everything except
* vchi_msg_hold which simply upcasted the int
* to a pointer.
*/
- vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)(long)message->service,
+ vchiq_release_message((unsigned int)(long)message->service,
(struct vchiq_header *)message->message);
return 0;
@@ -380,10 +376,10 @@ EXPORT_SYMBOL(vchi_held_msg_release);
/***********************************************************
* Name: vchi_msg_hold
*
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* void **data,
* uint32_t *msg_size,
- * VCHI_FLAGS_T flags,
+ * enum vchi_flags flags,
* struct vchi_held_msg *message_handle
*
* Description: Routine to return a pointer to the current message (to allow
@@ -394,11 +390,9 @@ EXPORT_SYMBOL(vchi_held_msg_release);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
- void **data,
- uint32_t *msg_size,
- VCHI_FLAGS_T flags,
- struct vchi_held_msg *message_handle)
+int32_t vchi_msg_hold(struct vchi_service_handle *handle, void **data,
+ uint32_t *msg_size, enum vchi_flags flags,
+ struct vchi_held_msg *message_handle)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -416,7 +410,7 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
*msg_size = header->size;
/*
- * upcast the VCHIQ_SERVICE_HANDLE_T which is an int
+ * upcast the unsigned int which is an int
* to a pointer and stuff it in the held message.
* This pointer is opaque to everything except
* vchi_held_msg_release which simply downcasts it back
@@ -434,7 +428,7 @@ EXPORT_SYMBOL(vchi_msg_hold);
/***********************************************************
* Name: vchi_initialise
*
- * Arguments: VCHI_INSTANCE_T *instance_handle
+ * Arguments: struct vchi_instance_handle **instance_handle
*
* Description: Initialises the hardware but does not transmit anything
* When run as a Host App this will be called twice hence the need
@@ -444,14 +438,14 @@ EXPORT_SYMBOL(vchi_msg_hold);
*
***********************************************************/
-int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
+int32_t vchi_initialise(struct vchi_instance_handle **instance_handle)
{
- VCHIQ_INSTANCE_T instance;
- VCHIQ_STATUS_T status;
+ struct vchiq_instance *instance;
+ enum vchiq_status status;
status = vchiq_initialise(&instance);
- *instance_handle = (VCHI_INSTANCE_T)instance;
+ *instance_handle = (struct vchi_instance_handle *)instance;
return vchiq_status_to_vchi(status);
}
@@ -460,7 +454,7 @@ EXPORT_SYMBOL(vchi_initialise);
/***********************************************************
* Name: vchi_connect
*
- * Arguments: VCHI_INSTANCE_T instance_handle
+ * Arguments: struct vchi_instance_handle *instance_handle
*
* Description: Starts the command service on each connection,
* causing INIT messages to be pinged back and forth
@@ -468,9 +462,9 @@ EXPORT_SYMBOL(vchi_initialise);
* Returns: 0 if successful, failure otherwise
*
***********************************************************/
-int32_t vchi_connect(VCHI_INSTANCE_T instance_handle)
+int32_t vchi_connect(struct vchi_instance_handle *instance_handle)
{
- VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
return vchiq_connect(instance);
}
@@ -479,7 +473,7 @@ EXPORT_SYMBOL(vchi_connect);
/***********************************************************
* Name: vchi_disconnect
*
- * Arguments: VCHI_INSTANCE_T instance_handle
+ * Arguments: struct vchi_instance_handle *instance_handle
*
* Description: Stops the command service on each connection,
* causing DE-INIT messages to be pinged back and forth
@@ -487,9 +481,9 @@ EXPORT_SYMBOL(vchi_connect);
* Returns: 0 if successful, failure otherwise
*
***********************************************************/
-int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
+int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle)
{
- VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
return vchiq_status_to_vchi(vchiq_shutdown(instance));
}
@@ -499,9 +493,9 @@ EXPORT_SYMBOL(vchi_disconnect);
* Name: vchi_service_open
* Name: vchi_service_create
*
- * Arguments: VCHI_INSTANCE_T *instance_handle
+ * Arguments: struct vchi_instance_handle *instance_handle
* struct service_creation *setup,
- * VCHI_SERVICE_HANDLE_T *handle
+ * struct vchi_service_handle **handle
*
* Description: Routine to open a service
*
@@ -509,9 +503,9 @@ EXPORT_SYMBOL(vchi_disconnect);
*
***********************************************************/
-static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
+static enum vchiq_status shim_callback(enum vchiq_reason reason,
struct vchiq_header *header,
- VCHIQ_SERVICE_HANDLE_T handle,
+ unsigned int handle,
void *bulk_user)
{
struct shim_service *service =
@@ -571,7 +565,7 @@ done:
return VCHIQ_SUCCESS;
}
-static struct shim_service *service_alloc(VCHIQ_INSTANCE_T instance,
+static struct shim_service *service_alloc(struct vchiq_instance *instance,
struct service_creation *setup)
{
struct shim_service *service = kzalloc(sizeof(struct shim_service), GFP_KERNEL);
@@ -579,7 +573,7 @@ static struct shim_service *service_alloc(VCHIQ_INSTANCE_T instance,
(void)instance;
if (service) {
- if (vchiu_queue_init(&service->queue, 64)) {
+ if (!vchiu_queue_init(&service->queue, 64)) {
service->callback = setup->callback;
service->callback_param = setup->callback_param;
} else {
@@ -599,18 +593,18 @@ static void service_free(struct shim_service *service)
}
}
-int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
+int32_t vchi_service_open(struct vchi_instance_handle *instance_handle,
struct service_creation *setup,
- VCHI_SERVICE_HANDLE_T *handle)
+ struct vchi_service_handle **handle)
{
- VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
struct shim_service *service = service_alloc(instance, setup);
- *handle = (VCHI_SERVICE_HANDLE_T)service;
+ *handle = (struct vchi_service_handle *)service;
if (service) {
struct vchiq_service_params params;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
memset(&params, 0, sizeof(params));
params.fourcc = setup->service_id;
@@ -628,17 +622,17 @@ int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
}
}
- return (service != NULL) ? 0 : -1;
+ return service ? 0 : -1;
}
EXPORT_SYMBOL(vchi_service_open);
-int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_close(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
if (service) {
- VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
+ enum vchiq_status status = vchiq_close_service(service->handle);
if (status == VCHIQ_SUCCESS)
service_free(service);
@@ -648,13 +642,13 @@ int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
}
EXPORT_SYMBOL(vchi_service_close);
-int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_destroy(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
if (service) {
- VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
+ enum vchiq_status status = vchiq_remove_service(service->handle);
if (status == VCHIQ_SUCCESS) {
service_free(service);
@@ -667,13 +661,13 @@ int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
}
EXPORT_SYMBOL(vchi_service_destroy);
-int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
- VCHI_SERVICE_OPTION_T option,
+int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
+ enum vchi_service_option option,
int value)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_SERVICE_OPTION_T vchiq_option;
+ enum vchiq_service_option vchiq_option;
switch (option) {
case VCHI_SERVICE_OPTION_TRACE:
@@ -687,7 +681,7 @@ int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
break;
}
if (service) {
- VCHIQ_STATUS_T status =
+ enum vchiq_status status =
vchiq_set_service_option(service->handle,
vchiq_option,
value);
@@ -698,13 +692,13 @@ int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
}
EXPORT_SYMBOL(vchi_service_set_option);
-int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle, short *peer_version)
+int32_t vchi_get_peer_version(const struct vchi_service_handle *handle, short *peer_version)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
if (service) {
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
status = vchiq_get_peer_version(service->handle, peer_version);
ret = vchiq_status_to_vchi(status);
@@ -716,14 +710,14 @@ EXPORT_SYMBOL(vchi_get_peer_version);
/***********************************************************
* Name: vchi_service_use
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ * Arguments: const struct vchi_service_handle *handle
*
* Description: Routine to increment refcount on a service
*
* Returns: void
*
***********************************************************/
-int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_use(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
@@ -737,14 +731,14 @@ EXPORT_SYMBOL(vchi_service_use);
/***********************************************************
* Name: vchi_service_release
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ * Arguments: const struct vchi_service_handle *handle
*
* Description: Routine to decrement refcount on a service
*
* Returns: void
*
***********************************************************/
-int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_release(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index 5e6d3035dc05..644844d88fed 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -24,9 +24,9 @@ int vchiu_queue_init(struct vchiu_queue *queue, int size)
GFP_KERNEL);
if (!queue->storage) {
vchiu_queue_delete(queue);
- return 0;
+ return -ENOMEM;
}
- return 1;
+ return 0;
}
void vchiu_queue_delete(struct vchiu_queue *queue)
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 6a33aaa1a49f..fd0ea4dbcb91 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -494,7 +494,7 @@ static const struct file_operations vme_user_fops = {
.write = vme_user_write,
.llseek = vme_user_llseek,
.unlocked_ioctl = vme_user_unlocked_ioctl,
- .compat_ioctl = vme_user_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = vme_user_mmap,
};
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index eba4ee0750dc..e65c9825ea5a 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -79,14 +79,10 @@ static void s_vCalculateOFDMRParameter(unsigned char byRate, u8 bb_type,
*
* Return Value: none
*/
-static
-void
-s_vCalculateOFDMRParameter(
- unsigned char byRate,
- u8 bb_type,
- unsigned char *pbyTxRate,
- unsigned char *pbyRsvTime
-)
+static void s_vCalculateOFDMRParameter(unsigned char byRate,
+ u8 bb_type,
+ unsigned char *pbyTxRate,
+ unsigned char *pbyRsvTime)
{
switch (byRate) {
case RATE_6M:
@@ -736,8 +732,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_36 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_36M),
bb_type,
&byTxRate,
@@ -745,8 +740,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_48 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_48M),
bb_type,
&byTxRate,
@@ -754,8 +748,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_54 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
@@ -763,8 +756,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_72 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 887c1692e05b..25867cb9d13d 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -45,7 +45,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type);
void CARDvUpdateBasicTopRate(struct vnt_private *priv);
bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
void CARDvSetLoopbackMode(struct vnt_private *priv,
- unsigned short wLoopbackMode);
+ unsigned short wLoopbackMode);
bool CARDbSoftwareReset(struct vnt_private *priv);
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 082302944c37..f69fc687d4c3 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -295,7 +295,8 @@ static void device_init_registers(struct vnt_private *priv)
/* Get Desire Power Value */
priv->byCurPwr = 0xFF;
priv->byCCKPwr = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_PWR_CCK);
- priv->byOFDMPwrG = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_PWR_OFDMG);
+ priv->byOFDMPwrG = SROMbyReadEmbedded(priv->PortOffset,
+ EEP_OFS_PWR_OFDMG);
/* Load power Table */
for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
@@ -373,7 +374,7 @@ static void device_init_registers(struct vnt_private *priv)
priv->bRadioOff = false;
priv->byRadioCtl = SROMbyReadEmbedded(priv->PortOffset,
- EEP_OFS_RADIOCTL);
+ EEP_OFS_RADIOCTL);
priv->bHWRadioOff = false;
if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
@@ -659,12 +660,13 @@ static int device_init_td0_ring(struct vnt_private *priv)
desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
- desc->next = &(priv->apTD0Rings[(i+1) % priv->opts.tx_descs[0]]);
- desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
+ desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
+ desc->next_desc = cpu_to_le32(curr +
+ sizeof(struct vnt_tx_desc));
}
if (i > 0)
- priv->apTD0Rings[i-1].next_desc = cpu_to_le32(priv->td0_pool_dma);
+ priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
return 0;
@@ -704,7 +706,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
}
if (i > 0)
- priv->apTD1Rings[i-1].next_desc = cpu_to_le32(priv->td1_pool_dma);
+ priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
return 0;
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 9725de3bca6a..bfd598a93b04 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -97,10 +97,7 @@ void PSvEnablePowerSaving(struct vnt_private *priv,
*
*/
-void
-PSvDisablePowerSaving(
- struct vnt_private *priv
-)
+void PSvDisablePowerSaving(struct vnt_private *priv)
{
/* disable power saving hw function */
MACbPSWakeup(priv);
@@ -126,10 +123,7 @@ PSvDisablePowerSaving(
*
*/
-bool
-PSbIsNextTBTTWakeUp(
- struct vnt_private *priv
-)
+bool PSbIsNextTBTTWakeUp(struct vnt_private *priv)
{
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &hw->conf;
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index e80fed69bafe..fb2855e686a7 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -436,7 +436,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:disable */
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ - 1]);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
@@ -558,7 +558,8 @@ static bool RFbAL2230Init(struct vnt_private *priv)
MACvTimer0MicroSDelay(priv, 30);/* 30us */
ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
- ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
+ ret &= IFRFbWriteEmbedded(priv,
+ dwAL2230InitTable[CB_AL2230_INIT_SEQ - 1]);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 042ac67a9709..affb70eba10f 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -61,23 +61,14 @@
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData);
bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, u16 byChannel);
-bool RFbInit(
- struct vnt_private *priv
-);
+bool RFbInit(struct vnt_private *priv);
bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, u16 uChannel);
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH);
-bool RFbRawSetPower(
- struct vnt_private *priv,
- unsigned char byPwr,
- unsigned int rate
-);
+bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
+ unsigned int rate);
-void
-RFvRSSITodBm(
- struct vnt_private *priv,
- unsigned char byCurrRSSI,
- long *pldBm
-);
+void RFvRSSITodBm(struct vnt_private *priv, unsigned char byCurrRSSI,
+ long *pldBm);
/* {{ RobertYu: 20050104 */
bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv, u16 byOldChannel, u16 byNewChannel);
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index a14908895b9e..37fcc42ed000 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1289,7 +1289,7 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
current_rate = rate->hw_value;
if (priv->wCurrentRate != current_rate &&
- !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
+ !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
priv->wCurrentRate = current_rate;
RFbSetPower(priv, priv->wCurrentRate,
@@ -1396,7 +1396,8 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
tx_key = info->control.hw_key;
if (tx_key->keylen > 0)
vnt_fill_txkey(hdr, tx_buffer_head->tx_key,
- tx_key, skb, tx_body_size, td_info->mic_hdr);
+ tx_key, skb, tx_body_size,
+ td_info->mic_hdr);
}
return 0;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 856ba97aec4f..4ac85ecb0921 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -249,10 +249,10 @@ static int vnt_init_registers(struct vnt_private *priv)
} else {
priv->tx_antenna_mode = ANT_B;
- if (priv->tx_rx_ant_inv)
- priv->rx_antenna_mode = ANT_A;
- else
- priv->rx_antenna_mode = ANT_B;
+ if (priv->tx_rx_ant_inv)
+ priv->rx_antenna_mode = ANT_A;
+ else
+ priv->rx_antenna_mode = ANT_B;
}
}
@@ -362,7 +362,6 @@ static int vnt_init_registers(struct vnt_private *priv)
goto end;
}
-
ret = vnt_mac_set_led(priv, LEDSTS_TMLEN, 0x38);
if (ret)
goto end;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 4e9cfacf75f2..f9020a4f7bbf 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -112,11 +112,11 @@ static u32 vnt_get_rsvtime(struct vnt_private *priv, u8 pkt_type,
frame_length, rate);
if (pkt_type == PK_TYPE_11B)
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, (u16)priv->top_cck_basic_rate);
+ ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
+ (u16)priv->top_cck_basic_rate);
else
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, (u16)priv->top_ofdm_basic_rate);
+ ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
+ (u16)priv->top_ofdm_basic_rate);
if (need_ack)
return data_time + priv->sifs + ack_time;
diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
new file mode 100644
index 000000000000..26de6762b942
--- /dev/null
+++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
@@ -0,0 +1,97 @@
+The WFxxx chip series can be connected via SPI or via SDIO.
+
+SPI
+---
+
+You have to declare the WFxxx chip in your device tree.
+
+Required properties:
+ - compatible: Should be "silabs,wfx-spi"
+ - reg: Chip select address of device
+ - spi-max-frequency: Maximum SPI clocking speed of device in Hz
+ - interrupts-extended: Should contain interrupt line (interrupt-parent +
+ interrupt can also been used). Trigger should be `IRQ_TYPE_EDGE_RISING`.
+
+Optional properties:
+ - reset-gpios: phandle of gpio that will be used to reset chip during probe.
+ Without this property, you may encounter issues with warm boot.
+
+Please consult Documentation/devicetree/bindings/spi/spi-bus.txt for optional
+SPI connection related properties,
+
+Example:
+
+&spi1 {
+ wfx {
+ compatible = "silabs,wfx-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_irq &wfx_gpios>;
+ interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>;
+ wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_HIGH>;
+ reg = <0>;
+ spi-max-frequency = <42000000>;
+ };
+};
+
+
+SDIO
+----
+
+The driver is able to detect a WFxxx chip on SDIO bus by matching its Vendor ID
+and Product ID. However, driver will only provide limited features in this
+case. Thus declaring WFxxx chip in device tree is strongly recommended (and may
+become mandatory in the future).
+
+Required properties:
+ - compatible: Should be "silabs,wfx-sdio"
+ - reg: Should be 1
+
+In addition, it is recommended to declare a mmc-pwrseq on SDIO host above WFx.
+Without it, you may encounter issues with warm boot. mmc-pwrseq should be
+compatible with mmc-pwrseq-simple. Please consult
+Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt for more
+information.
+
+Example:
+
+/ {
+ wfx_pwrseq: wfx_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_reset>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mmc1 {
+ mmc-pwrseq = <&wfx_pwrseq>;
+ #address-size = <1>;
+ #size = <0>;
+
+ mmc@1 {
+ compatible = "silabs,wfx-sdio";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_wakeup>;
+ wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+Note that #address-size and #size shoud already be defined in node mmc1, but it
+is rarely the case.
+
+Common properties
+-----------------
+
+Some properties are recognized either by SPI and SDIO versions:
+ - wakeup-gpios: phandle of gpio that will be used to wake-up chip. Without
+ this property, driver will disable most of power saving features.
+ - config-file: Use an alternative file as PDS. Default is `wf200.pds`. Only
+ necessary for development/debug purpose.
+ - slk_key: String representing hexadecimal value of secure link key to use.
+ Must contains 64 hexadecimal digits. Not supported in current version.
+
+WFx driver also supports `mac-address` and `local-mac-address` as described in
+Documentation/devicetree/binding/net/ethernet.txt
+
diff --git a/drivers/staging/wfx/Kconfig b/drivers/staging/wfx/Kconfig
new file mode 100644
index 000000000000..83ee4d0ca8c6
--- /dev/null
+++ b/drivers/staging/wfx/Kconfig
@@ -0,0 +1,8 @@
+config WFX
+ tristate "Silicon Labs wireless chips WF200 and further"
+ depends on MAC80211
+ depends on MMC || !MMC # do not allow WFX=y if MMC=m
+ depends on (SPI || MMC)
+ help
+ This is a driver for Silicons Labs WFxxx series (WF200 and further)
+ chipsets. This chip can be found on SPI or SDIO buses.
diff --git a/drivers/staging/wfx/Makefile b/drivers/staging/wfx/Makefile
new file mode 100644
index 000000000000..0d9c1ed092f6
--- /dev/null
+++ b/drivers/staging/wfx/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Necessary for CREATE_TRACE_POINTS
+CFLAGS_debug.o = -I$(src)
+
+wfx-y := \
+ bh.o \
+ hwio.o \
+ fwio.o \
+ hif_tx.o \
+ hif_rx.o \
+ queue.o \
+ data_tx.o \
+ data_rx.o \
+ scan.o \
+ sta.o \
+ key.o \
+ main.o \
+ sta.o \
+ debug.o
+wfx-$(CONFIG_SPI) += bus_spi.o
+wfx-$(subst m,y,$(CONFIG_MMC)) += bus_sdio.o
+
+obj-$(CONFIG_WFX) += wfx.o
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
new file mode 100644
index 000000000000..e44772289af8
--- /dev/null
+++ b/drivers/staging/wfx/TODO
@@ -0,0 +1,17 @@
+This is a list of things that need to be done to get this driver out of the
+staging directory.
+
+ - I have to take a decision about secure link support. I can:
+ - drop completely
+ - keep it in an external patch (my preferred option)
+ - replace call to mbedtls with kernel crypto API (necessitate a
+ bunch of work)
+ - pull mbedtls in kernel (non-realistic)
+
+ - mac80211 interface does not (yet) have expected quality to be placed
+ outside of staging:
+ - Some processings are redundant with mac80211 ones
+ - Many members from wfx_dev/wfx_vif can be retrieved from mac80211
+ structures
+ - Some functions are too complex
+ - ...
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
new file mode 100644
index 000000000000..2432ba95c2f5
--- /dev/null
+++ b/drivers/staging/wfx/bh.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Interrupt bottom half (BH).
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/gpio/consumer.h>
+#include <net/mac80211.h>
+
+#include "bh.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "traces.h"
+#include "secure_link.h"
+#include "hif_rx.h"
+#include "hif_api_cmd.h"
+
+static void device_wakeup(struct wfx_dev *wdev)
+{
+ if (!wdev->pdata.gpio_wakeup)
+ return;
+ if (gpiod_get_value(wdev->pdata.gpio_wakeup))
+ return;
+
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 1);
+ if (wfx_api_older_than(wdev, 1, 4)) {
+ if (!completion_done(&wdev->hif.ctrl_ready))
+ udelay(2000);
+ } else {
+ // completion.h does not provide any function to wait
+ // completion without consume it (a kind of
+ // wait_for_completion_done_timeout()). So we have to emulate
+ // it.
+ if (wait_for_completion_timeout(&wdev->hif.ctrl_ready,
+ msecs_to_jiffies(2) + 1))
+ complete(&wdev->hif.ctrl_ready);
+ else
+ dev_err(wdev->dev, "timeout while wake up chip\n");
+ }
+}
+
+static void device_release(struct wfx_dev *wdev)
+{
+ if (!wdev->pdata.gpio_wakeup)
+ return;
+
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
+}
+
+static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
+{
+ struct sk_buff *skb;
+ struct hif_msg *hif;
+ size_t alloc_len;
+ size_t computed_len;
+ int release_count;
+ int piggyback = 0;
+
+ WARN(read_len < 4, "corrupted read");
+ WARN(read_len > round_down(0xFFF, 2) * sizeof(u16),
+ "%s: request exceed WFx capability", __func__);
+
+ // Add 2 to take into account piggyback size
+ alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2);
+ skb = dev_alloc_skb(alloc_len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (wfx_data_read(wdev, skb->data, alloc_len))
+ goto err;
+
+ piggyback = le16_to_cpup((u16 *)(skb->data + alloc_len - 2));
+ _trace_piggyback(piggyback, false);
+
+ hif = (struct hif_msg *)skb->data;
+ WARN(hif->encrypted & 0x1, "unsupported encryption type");
+ if (hif->encrypted == 0x2) {
+ if (wfx_sl_decode(wdev, (void *)hif)) {
+ dev_kfree_skb(skb);
+ // If frame was a confirmation, expect trouble in next
+ // exchange. However, it is harmless to fail to decode
+ // an indication frame, so try to continue. Anyway,
+ // piggyback is probably correct.
+ return piggyback;
+ }
+ le16_to_cpus(&hif->len);
+ computed_len = round_up(hif->len - sizeof(hif->len), 16)
+ + sizeof(struct hif_sl_msg)
+ + sizeof(struct hif_sl_tag);
+ } else {
+ le16_to_cpus(&hif->len);
+ computed_len = round_up(hif->len, 2);
+ }
+ if (computed_len != read_len) {
+ dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n",
+ computed_len, read_len);
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1,
+ hif, read_len, true);
+ goto err;
+ }
+
+ if (!(hif->id & HIF_ID_IS_INDICATION)) {
+ (*is_cnf)++;
+ if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT)
+ release_count = le32_to_cpu(((struct hif_cnf_multi_transmit *)hif->body)->num_tx_confs);
+ else
+ release_count = 1;
+ WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter");
+ wdev->hif.tx_buffers_used -= release_count;
+ if (!wdev->hif.tx_buffers_used)
+ wake_up(&wdev->hif.tx_buffers_empty);
+ }
+ _trace_hif_recv(hif, wdev->hif.tx_buffers_used);
+
+ if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) {
+ if (hif->seqnum != wdev->hif.rx_seqnum)
+ dev_warn(wdev->dev, "wrong message sequence: %d != %d\n",
+ hif->seqnum, wdev->hif.rx_seqnum);
+ wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1);
+ }
+
+ skb_put(skb, hif->len);
+ // wfx_handle_rx takes care on SKB livetime
+ wfx_handle_rx(wdev, skb);
+
+ return piggyback;
+
+err:
+ if (skb)
+ dev_kfree_skb(skb);
+ return -EIO;
+}
+
+static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf)
+{
+ size_t len;
+ int i;
+ int ctrl_reg, piggyback;
+
+ piggyback = 0;
+ for (i = 0; i < max_msg; i++) {
+ if (piggyback & CTRL_NEXT_LEN_MASK)
+ ctrl_reg = piggyback;
+ else if (try_wait_for_completion(&wdev->hif.ctrl_ready))
+ ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0);
+ else
+ ctrl_reg = 0;
+ if (!(ctrl_reg & CTRL_NEXT_LEN_MASK))
+ return i;
+ // ctrl_reg units are 16bits words
+ len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2;
+ piggyback = rx_helper(wdev, len, num_cnf);
+ if (piggyback < 0)
+ return i;
+ if (!(piggyback & CTRL_WLAN_READY))
+ dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n",
+ piggyback);
+ }
+ if (piggyback & CTRL_NEXT_LEN_MASK) {
+ ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback);
+ complete(&wdev->hif.ctrl_ready);
+ if (ctrl_reg)
+ dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n",
+ ctrl_reg, piggyback);
+ }
+ return i;
+}
+
+static void tx_helper(struct wfx_dev *wdev, struct hif_msg *hif)
+{
+ int ret;
+ void *data;
+ bool is_encrypted = false;
+ size_t len = le16_to_cpu(hif->len);
+
+ WARN(len < sizeof(*hif), "try to send corrupted data");
+
+ hif->seqnum = wdev->hif.tx_seqnum;
+ wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1);
+
+ if (wfx_is_secure_command(wdev, hif->id)) {
+ len = round_up(len - sizeof(hif->len), 16) + sizeof(hif->len) +
+ sizeof(struct hif_sl_msg_hdr) +
+ sizeof(struct hif_sl_tag);
+ // AES support encryption in-place. However, mac80211 access to
+ // 802.11 header after frame was sent (to get MAC addresses).
+ // So, keep origin buffer clear.
+ data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ goto end;
+ is_encrypted = true;
+ ret = wfx_sl_encode(wdev, hif, data);
+ if (ret)
+ goto end;
+ } else {
+ data = hif;
+ }
+ WARN(len > wdev->hw_caps.size_inp_ch_buf,
+ "%s: request exceed WFx capability: %zu > %d\n", __func__,
+ len, wdev->hw_caps.size_inp_ch_buf);
+ len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len);
+ ret = wfx_data_write(wdev, data, len);
+ if (ret)
+ goto end;
+
+ wdev->hif.tx_buffers_used++;
+ _trace_hif_send(hif, wdev->hif.tx_buffers_used);
+end:
+ if (is_encrypted)
+ kfree(data);
+}
+
+static int bh_work_tx(struct wfx_dev *wdev, int max_msg)
+{
+ struct hif_msg *hif;
+ int i;
+
+ for (i = 0; i < max_msg; i++) {
+ hif = NULL;
+ if (wdev->hif.tx_buffers_used < wdev->hw_caps.num_inp_ch_bufs) {
+ if (try_wait_for_completion(&wdev->hif_cmd.ready)) {
+ WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
+ hif = wdev->hif_cmd.buf_send;
+ } else {
+ hif = wfx_tx_queues_get(wdev);
+ }
+ }
+ if (!hif)
+ return i;
+ tx_helper(wdev, hif);
+ }
+ return i;
+}
+
+/* In SDIO mode, it is necessary to make an access to a register to acknowledge
+ * last received message. It could be possible to restrict this acknowledge to
+ * SDIO mode and only if last operation was rx.
+ */
+static void ack_sdio_data(struct wfx_dev *wdev)
+{
+ u32 cfg_reg;
+
+ config_reg_read(wdev, &cfg_reg);
+ if (cfg_reg & 0xFF) {
+ dev_warn(wdev->dev, "chip reports errors: %02x\n",
+ cfg_reg & 0xFF);
+ config_reg_write_bits(wdev, 0xFF, 0x00);
+ }
+}
+
+static void bh_work(struct work_struct *work)
+{
+ struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh);
+ int stats_req = 0, stats_cnf = 0, stats_ind = 0;
+ bool release_chip = false, last_op_is_rx = false;
+ int num_tx, num_rx;
+
+ device_wakeup(wdev);
+ do {
+ num_tx = bh_work_tx(wdev, 32);
+ stats_req += num_tx;
+ if (num_tx)
+ last_op_is_rx = false;
+ num_rx = bh_work_rx(wdev, 32, &stats_cnf);
+ stats_ind += num_rx;
+ if (num_rx)
+ last_op_is_rx = true;
+ } while (num_rx || num_tx);
+ stats_ind -= stats_cnf;
+
+ if (last_op_is_rx)
+ ack_sdio_data(wdev);
+ if (!wdev->hif.tx_buffers_used && !work_pending(work) &&
+ !atomic_read(&wdev->scan_in_progress)) {
+ device_release(wdev);
+ release_chip = true;
+ }
+ _trace_bh_stats(stats_ind, stats_req, stats_cnf,
+ wdev->hif.tx_buffers_used, release_chip);
+}
+
+/*
+ * An IRQ from chip did occur
+ */
+void wfx_bh_request_rx(struct wfx_dev *wdev)
+{
+ u32 cur, prev;
+
+ control_reg_read(wdev, &cur);
+ prev = atomic_xchg(&wdev->hif.ctrl_reg, cur);
+ complete(&wdev->hif.ctrl_ready);
+ queue_work(system_highpri_wq, &wdev->hif.bh);
+
+ if (!(cur & CTRL_NEXT_LEN_MASK))
+ dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n",
+ cur);
+ if (prev != 0)
+ dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n",
+ prev, cur);
+}
+
+/*
+ * Driver want to send data
+ */
+void wfx_bh_request_tx(struct wfx_dev *wdev)
+{
+ queue_work(system_highpri_wq, &wdev->hif.bh);
+}
+
+void wfx_bh_register(struct wfx_dev *wdev)
+{
+ INIT_WORK(&wdev->hif.bh, bh_work);
+ init_completion(&wdev->hif.ctrl_ready);
+ init_waitqueue_head(&wdev->hif.tx_buffers_empty);
+}
+
+void wfx_bh_unregister(struct wfx_dev *wdev)
+{
+ flush_work(&wdev->hif.bh);
+}
diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h
new file mode 100644
index 000000000000..93ca98424e0b
--- /dev/null
+++ b/drivers/staging/wfx/bh.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interrupt bottom half.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_BH_H
+#define WFX_BH_H
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct wfx_dev;
+
+struct wfx_hif {
+ struct work_struct bh;
+ struct completion ctrl_ready;
+ wait_queue_head_t tx_buffers_empty;
+ atomic_t ctrl_reg;
+ int rx_seqnum;
+ int tx_seqnum;
+ int tx_buffers_used;
+};
+
+void wfx_bh_register(struct wfx_dev *wdev);
+void wfx_bh_unregister(struct wfx_dev *wdev);
+void wfx_bh_request_rx(struct wfx_dev *wdev);
+void wfx_bh_request_tx(struct wfx_dev *wdev);
+
+#endif /* WFX_BH_H */
diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h
new file mode 100644
index 000000000000..62d6ecabe4cb
--- /dev/null
+++ b/drivers/staging/wfx/bus.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common bus abstraction layer.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_BUS_H
+#define WFX_BUS_H
+
+#include <linux/mmc/sdio_func.h>
+#include <linux/spi/spi.h>
+
+#define WFX_REG_CONFIG 0x0
+#define WFX_REG_CONTROL 0x1
+#define WFX_REG_IN_OUT_QUEUE 0x2
+#define WFX_REG_AHB_DPORT 0x3
+#define WFX_REG_BASE_ADDR 0x4
+#define WFX_REG_SRAM_DPORT 0x5
+#define WFX_REG_SET_GEN_R_W 0x6
+#define WFX_REG_FRAME_OUT 0x7
+
+struct hwbus_ops {
+ int (*copy_from_io)(void *bus_priv, unsigned int addr,
+ void *dst, size_t count);
+ int (*copy_to_io)(void *bus_priv, unsigned int addr,
+ const void *src, size_t count);
+ void (*lock)(void *bus_priv);
+ void (*unlock)(void *bus_priv);
+ size_t (*align_size)(void *bus_priv, size_t size);
+};
+
+extern struct sdio_driver wfx_sdio_driver;
+extern struct spi_driver wfx_spi_driver;
+
+#endif
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
new file mode 100644
index 000000000000..f8901164c206
--- /dev/null
+++ b/drivers/staging/wfx/bus_sdio.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SDIO interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+
+#include "bus.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "main.h"
+#include "bh.h"
+
+static const struct wfx_platform_data wfx_sdio_pdata = {
+ .file_fw = "wfm_wf200",
+ .file_pds = "wf200.pds",
+};
+
+struct wfx_sdio_priv {
+ struct sdio_func *func;
+ struct wfx_dev *core;
+ u8 buf_id_tx;
+ u8 buf_id_rx;
+ int of_irq;
+};
+
+static int wfx_sdio_copy_from_io(void *priv, unsigned int reg_id,
+ void *dst, size_t count)
+{
+ struct wfx_sdio_priv *bus = priv;
+ unsigned int sdio_addr = reg_id << 2;
+ int ret;
+
+ WARN(reg_id > 7, "chip only has 7 registers");
+ WARN(((uintptr_t)dst) & 3, "unaligned buffer size");
+ WARN(count & 3, "unaligned buffer address");
+
+ /* Use queue mode buffers */
+ if (reg_id == WFX_REG_IN_OUT_QUEUE)
+ sdio_addr |= (bus->buf_id_rx + 1) << 7;
+ ret = sdio_memcpy_fromio(bus->func, dst, sdio_addr, count);
+ if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
+ bus->buf_id_rx = (bus->buf_id_rx + 1) % 4;
+
+ return ret;
+}
+
+static int wfx_sdio_copy_to_io(void *priv, unsigned int reg_id,
+ const void *src, size_t count)
+{
+ struct wfx_sdio_priv *bus = priv;
+ unsigned int sdio_addr = reg_id << 2;
+ int ret;
+
+ WARN(reg_id > 7, "chip only has 7 registers");
+ WARN(((uintptr_t)src) & 3, "unaligned buffer size");
+ WARN(count & 3, "unaligned buffer address");
+
+ /* Use queue mode buffers */
+ if (reg_id == WFX_REG_IN_OUT_QUEUE)
+ sdio_addr |= bus->buf_id_tx << 7;
+ // FIXME: discards 'const' qualifier for src
+ ret = sdio_memcpy_toio(bus->func, sdio_addr, (void *)src, count);
+ if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
+ bus->buf_id_tx = (bus->buf_id_tx + 1) % 32;
+
+ return ret;
+}
+
+static void wfx_sdio_lock(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ sdio_claim_host(bus->func);
+}
+
+static void wfx_sdio_unlock(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ sdio_release_host(bus->func);
+}
+
+static void wfx_sdio_irq_handler(struct sdio_func *func)
+{
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ if (bus->core)
+ wfx_bh_request_rx(bus->core);
+ else
+ WARN(!bus->core, "race condition in driver init/deinit");
+}
+
+static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ if (!bus->core) {
+ WARN(!bus->core, "race condition in driver init/deinit");
+ return IRQ_NONE;
+ }
+ sdio_claim_host(bus->func);
+ wfx_bh_request_rx(bus->core);
+ sdio_release_host(bus->func);
+ return IRQ_HANDLED;
+}
+
+static int wfx_sdio_irq_subscribe(struct wfx_sdio_priv *bus)
+{
+ int ret;
+
+ if (bus->of_irq) {
+ ret = request_irq(bus->of_irq, wfx_sdio_irq_handler_ext,
+ IRQF_TRIGGER_RISING, "wfx", bus);
+ } else {
+ sdio_claim_host(bus->func);
+ ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler);
+ sdio_release_host(bus->func);
+ }
+ return ret;
+}
+
+static int wfx_sdio_irq_unsubscribe(struct wfx_sdio_priv *bus)
+{
+ int ret;
+
+ if (bus->of_irq) {
+ free_irq(bus->of_irq, bus);
+ ret = 0;
+ } else {
+ sdio_claim_host(bus->func);
+ ret = sdio_release_irq(bus->func);
+ sdio_release_host(bus->func);
+ }
+ return ret;
+}
+
+static size_t wfx_sdio_align_size(void *priv, size_t size)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ return sdio_align_size(bus->func, size);
+}
+
+static const struct hwbus_ops wfx_sdio_hwbus_ops = {
+ .copy_from_io = wfx_sdio_copy_from_io,
+ .copy_to_io = wfx_sdio_copy_to_io,
+ .lock = wfx_sdio_lock,
+ .unlock = wfx_sdio_unlock,
+ .align_size = wfx_sdio_align_size,
+};
+
+static const struct of_device_id wfx_sdio_of_match[];
+static int wfx_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct device_node *np = func->dev.of_node;
+ struct wfx_sdio_priv *bus;
+ int ret;
+
+ if (func->num != 1) {
+ dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n", func->num);
+ return -ENODEV;
+ }
+
+ bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ if (np) {
+ if (!of_match_node(wfx_sdio_of_match, np)) {
+ dev_warn(&func->dev, "no compatible device found in DT\n");
+ return -ENODEV;
+ }
+ bus->of_irq = irq_of_parse_and_map(np, 0);
+ } else {
+ dev_warn(&func->dev,
+ "device is not declared in DT, features will be limited\n");
+ // FIXME: ignore VID/PID and only rely on device tree
+ // return -ENODEV;
+ }
+
+ bus->func = func;
+ sdio_set_drvdata(func, bus);
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0 |
+ MMC_QUIRK_BLKSZ_FOR_BYTE_MODE |
+ MMC_QUIRK_BROKEN_BYTE_MODE_512;
+
+ sdio_claim_host(func);
+ ret = sdio_enable_func(func);
+ // Block of 64 bytes is more efficient than 512B for frame sizes < 4k
+ sdio_set_block_size(func, 64);
+ sdio_release_host(func);
+ if (ret)
+ goto err0;
+
+ ret = wfx_sdio_irq_subscribe(bus);
+ if (ret)
+ goto err1;
+
+ bus->core = wfx_init_common(&func->dev, &wfx_sdio_pdata,
+ &wfx_sdio_hwbus_ops, bus);
+ if (!bus->core) {
+ ret = -EIO;
+ goto err2;
+ }
+
+ ret = wfx_probe(bus->core);
+ if (ret)
+ goto err3;
+
+ return 0;
+
+err3:
+ wfx_free_common(bus->core);
+err2:
+ wfx_sdio_irq_unsubscribe(bus);
+err1:
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+err0:
+ return ret;
+}
+
+static void wfx_sdio_remove(struct sdio_func *func)
+{
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ wfx_release(bus->core);
+ wfx_free_common(bus->core);
+ wfx_sdio_irq_unsubscribe(bus);
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+}
+
+#define SDIO_VENDOR_ID_SILABS 0x0000
+#define SDIO_DEVICE_ID_SILABS_WF200 0x1000
+static const struct sdio_device_id wfx_sdio_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_SILABS, SDIO_DEVICE_ID_SILABS_WF200) },
+ // FIXME: ignore VID/PID and only rely on device tree
+ // { SDIO_DEVICE(SDIO_ANY_ID, SDIO_ANY_ID) },
+ { },
+};
+MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id wfx_sdio_of_match[] = {
+ { .compatible = "silabs,wfx-sdio" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
+#endif
+
+struct sdio_driver wfx_sdio_driver = {
+ .name = "wfx-sdio",
+ .id_table = wfx_sdio_ids,
+ .probe = wfx_sdio_probe,
+ .remove = wfx_sdio_remove,
+ .drv = {
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(wfx_sdio_of_match),
+ }
+};
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
new file mode 100644
index 000000000000..ab0cda1e124f
--- /dev/null
+++ b/drivers/staging/wfx/bus_spi.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2011, Sagrad Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+
+#include "bus.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "main.h"
+#include "bh.h"
+
+static int gpio_reset = -2;
+module_param(gpio_reset, int, 0644);
+MODULE_PARM_DESC(gpio_reset, "gpio number for reset. -1 for none.");
+
+#define SET_WRITE 0x7FFF /* usage: and operation */
+#define SET_READ 0x8000 /* usage: or operation */
+
+static const struct wfx_platform_data wfx_spi_pdata = {
+ .file_fw = "wfm_wf200",
+ .file_pds = "wf200.pds",
+ .use_rising_clk = true,
+};
+
+struct wfx_spi_priv {
+ struct spi_device *func;
+ struct wfx_dev *core;
+ struct gpio_desc *gpio_reset;
+ struct work_struct request_rx;
+ bool need_swab;
+};
+
+/*
+ * WFx chip read data 16bits at time and place them directly into (little
+ * endian) CPU register. So, chip expect byte order like "B1 B0 B3 B2" (while
+ * LE is "B0 B1 B2 B3" and BE is "B3 B2 B1 B0")
+ *
+ * A little endian host with bits_per_word == 16 should do the right job
+ * natively. The code below to support big endian host and commonly used SPI
+ * 8bits.
+ */
+static int wfx_spi_copy_from_io(void *priv, unsigned int addr,
+ void *dst, size_t count)
+{
+ struct wfx_spi_priv *bus = priv;
+ u16 regaddr = (addr << 12) | (count / 2) | SET_READ;
+ struct spi_message m;
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .rx_buf = dst,
+ .len = count,
+ };
+ u16 *dst16 = dst;
+ int ret, i;
+
+ WARN(count % 2, "buffer size must be a multiple of 2");
+
+ cpu_to_le16s(&regaddr);
+ if (bus->need_swab)
+ swab16s(&regaddr);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ ret = spi_sync(bus->func, &m);
+
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&dst16[i]);
+ return ret;
+}
+
+static int wfx_spi_copy_to_io(void *priv, unsigned int addr,
+ const void *src, size_t count)
+{
+ struct wfx_spi_priv *bus = priv;
+ u16 regaddr = (addr << 12) | (count / 2);
+ // FIXME: use a bounce buffer
+ u16 *src16 = (void *)src;
+ int ret, i;
+ struct spi_message m;
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .tx_buf = src,
+ .len = count,
+ };
+
+ WARN(count % 2, "buffer size must be a multiple of 2");
+ WARN(regaddr & SET_READ, "bad addr or size overflow");
+
+ cpu_to_le16s(&regaddr);
+
+ if (bus->need_swab)
+ swab16s(&regaddr);
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&src16[i]);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ ret = spi_sync(bus->func, &m);
+
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&src16[i]);
+ return ret;
+}
+
+static void wfx_spi_lock(void *priv)
+{
+}
+
+static void wfx_spi_unlock(void *priv)
+{
+}
+
+static irqreturn_t wfx_spi_irq_handler(int irq, void *priv)
+{
+ struct wfx_spi_priv *bus = priv;
+
+ if (!bus->core) {
+ WARN(!bus->core, "race condition in driver init/deinit");
+ return IRQ_NONE;
+ }
+ queue_work(system_highpri_wq, &bus->request_rx);
+ return IRQ_HANDLED;
+}
+
+static void wfx_spi_request_rx(struct work_struct *work)
+{
+ struct wfx_spi_priv *bus =
+ container_of(work, struct wfx_spi_priv, request_rx);
+
+ wfx_bh_request_rx(bus->core);
+}
+
+static size_t wfx_spi_align_size(void *priv, size_t size)
+{
+ // Most of SPI controllers avoid DMA if buffer size is not 32bit aligned
+ return ALIGN(size, 4);
+}
+
+static const struct hwbus_ops wfx_spi_hwbus_ops = {
+ .copy_from_io = wfx_spi_copy_from_io,
+ .copy_to_io = wfx_spi_copy_to_io,
+ .lock = wfx_spi_lock,
+ .unlock = wfx_spi_unlock,
+ .align_size = wfx_spi_align_size,
+};
+
+static int wfx_spi_probe(struct spi_device *func)
+{
+ struct wfx_spi_priv *bus;
+ int ret;
+
+ if (!func->bits_per_word)
+ func->bits_per_word = 16;
+ ret = spi_setup(func);
+ if (ret)
+ return ret;
+ // Trace below is also displayed by spi_setup() if compiled with DEBUG
+ dev_dbg(&func->dev, "SPI params: CS=%d, mode=%d bits/word=%d speed=%d\n",
+ func->chip_select, func->mode, func->bits_per_word,
+ func->max_speed_hz);
+ if (func->bits_per_word != 16 && func->bits_per_word != 8)
+ dev_warn(&func->dev, "unusual bits/word value: %d\n",
+ func->bits_per_word);
+ if (func->max_speed_hz > 49000000)
+ dev_warn(&func->dev, "%dHz is a very high speed\n",
+ func->max_speed_hz);
+
+ bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ bus->func = func;
+ if (func->bits_per_word == 8 || IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ bus->need_swab = true;
+ spi_set_drvdata(func, bus);
+
+ bus->gpio_reset = wfx_get_gpio(&func->dev, gpio_reset, "reset");
+ if (!bus->gpio_reset) {
+ dev_warn(&func->dev, "try to load firmware anyway\n");
+ } else {
+ gpiod_set_value(bus->gpio_reset, 0);
+ udelay(100);
+ gpiod_set_value(bus->gpio_reset, 1);
+ udelay(2000);
+ }
+
+ ret = devm_request_irq(&func->dev, func->irq, wfx_spi_irq_handler,
+ IRQF_TRIGGER_RISING, "wfx", bus);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&bus->request_rx, wfx_spi_request_rx);
+ bus->core = wfx_init_common(&func->dev, &wfx_spi_pdata,
+ &wfx_spi_hwbus_ops, bus);
+ if (!bus->core)
+ return -EIO;
+
+ ret = wfx_probe(bus->core);
+ if (ret)
+ wfx_free_common(bus->core);
+
+ return ret;
+}
+
+/* Disconnect Function to be called by SPI stack when device is disconnected */
+static int wfx_spi_disconnect(struct spi_device *func)
+{
+ struct wfx_spi_priv *bus = spi_get_drvdata(func);
+
+ wfx_release(bus->core);
+ wfx_free_common(bus->core);
+ // A few IRQ will be sent during device release. Hopefully, no IRQ
+ // should happen after wdev/wvif are released.
+ devm_free_irq(&func->dev, func->irq, bus);
+ flush_work(&bus->request_rx);
+ return 0;
+}
+
+/*
+ * For dynamic driver binding, kernel does not use OF to match driver. It only
+ * use modalias and modalias is a copy of 'compatible' DT node with vendor
+ * stripped.
+ */
+static const struct spi_device_id wfx_spi_id[] = {
+ { "wfx-spi", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, wfx_spi_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id wfx_spi_of_match[] = {
+ { .compatible = "silabs,wfx-spi" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_spi_of_match);
+#endif
+
+struct spi_driver wfx_spi_driver = {
+ .driver = {
+ .name = "wfx-spi",
+ .of_match_table = of_match_ptr(wfx_spi_of_match),
+ },
+ .id_table = wfx_spi_id,
+ .probe = wfx_spi_probe,
+ .remove = wfx_spi_disconnect,
+};
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
new file mode 100644
index 000000000000..e7fcce8d0cc4
--- /dev/null
+++ b/drivers/staging/wfx/data_rx.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "data_rx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "sta.h"
+
+static int wfx_handle_pspoll(struct wfx_vif *wvif, struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
+ int link_id = 0;
+ u32 pspoll_mask = 0;
+ int i;
+
+ if (wvif->state != WFX_STATE_AP)
+ return 1;
+ if (!ether_addr_equal(wvif->vif->addr, pspoll->bssid))
+ return 1;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wvif->vif, pspoll->ta);
+ if (sta)
+ link_id = ((struct wfx_sta_priv *)&sta->drv_priv)->link_id;
+ rcu_read_unlock();
+ if (link_id)
+ pspoll_mask = BIT(link_id);
+ else
+ return 1;
+
+ wvif->pspoll_mask |= pspoll_mask;
+ /* Do not report pspols if data for given link id is queued already. */
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ if (wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
+ pspoll_mask)) {
+ wfx_bh_request_tx(wvif->wdev);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int wfx_drop_encrypt_data(struct wfx_dev *wdev, struct hif_ind_rx *arg, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *) skb->data;
+ size_t hdrlen = ieee80211_hdrlen(frame->frame_control);
+ size_t iv_len, icv_len;
+
+ /* Oops... There is no fast way to ask mac80211 about
+ * IV/ICV lengths. Even defineas are not exposed.
+ */
+ switch (arg->rx_flags.encryp) {
+ case HIF_RI_FLAGS_WEP_ENCRYPTED:
+ iv_len = 4 /* WEP_IV_LEN */;
+ icv_len = 4 /* WEP_ICV_LEN */;
+ break;
+ case HIF_RI_FLAGS_TKIP_ENCRYPTED:
+ iv_len = 8 /* TKIP_IV_LEN */;
+ icv_len = 4 /* TKIP_ICV_LEN */
+ + 8 /*MICHAEL_MIC_LEN*/;
+ break;
+ case HIF_RI_FLAGS_AES_ENCRYPTED:
+ iv_len = 8 /* CCMP_HDR_LEN */;
+ icv_len = 8 /* CCMP_MIC_LEN */;
+ break;
+ case HIF_RI_FLAGS_WAPI_ENCRYPTED:
+ iv_len = 18 /* WAPI_HDR_LEN */;
+ icv_len = 16 /* WAPI_MIC_LEN */;
+ break;
+ default:
+ dev_err(wdev->dev, "unknown encryption type %d\n",
+ arg->rx_flags.encryp);
+ return -EIO;
+ }
+
+ /* Firmware strips ICV in case of MIC failure. */
+ if (arg->status == HIF_STATUS_MICFAILURE)
+ icv_len = 0;
+
+ if (skb->len < hdrlen + iv_len + icv_len) {
+ dev_warn(wdev->dev, "malformed SDU received\n");
+ return -EIO;
+ }
+
+ /* Remove IV, ICV and MIC */
+ skb_trim(skb, skb->len - icv_len);
+ memmove(skb->data + iv_len, skb->data, hdrlen);
+ skb_pull(skb, iv_len);
+ return 0;
+
+}
+
+void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
+ struct sk_buff *skb)
+{
+ int link_id = arg->rx_flags.peer_sta_id;
+ struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct wfx_link_entry *entry = NULL;
+ bool early_data = false;
+
+ memset(hdr, 0, sizeof(*hdr));
+
+ // FIXME: Why do we drop these frames?
+ if (!arg->rcpi_rssi &&
+ (ieee80211_is_probe_resp(frame->frame_control) ||
+ ieee80211_is_beacon(frame->frame_control)))
+ goto drop;
+
+ if (link_id && link_id <= WFX_MAX_STA_IN_AP_MODE) {
+ entry = &wvif->link_id_db[link_id - 1];
+ entry->timestamp = jiffies;
+ if (entry->status == WFX_LINK_SOFT &&
+ ieee80211_is_data(frame->frame_control))
+ early_data = true;
+ }
+
+ if (arg->status == HIF_STATUS_MICFAILURE)
+ hdr->flag |= RX_FLAG_MMIC_ERROR;
+ else if (arg->status)
+ goto drop;
+
+ if (skb->len < sizeof(struct ieee80211_pspoll)) {
+ dev_warn(wvif->wdev->dev, "malformed SDU received\n");
+ goto drop;
+ }
+
+ if (ieee80211_is_pspoll(frame->frame_control))
+ if (wfx_handle_pspoll(wvif, skb))
+ goto drop;
+
+ hdr->band = NL80211_BAND_2GHZ;
+ hdr->freq = ieee80211_channel_to_frequency(arg->channel_number,
+ hdr->band);
+
+ if (arg->rxed_rate >= 14) {
+ hdr->encoding = RX_ENC_HT;
+ hdr->rate_idx = arg->rxed_rate - 14;
+ } else if (arg->rxed_rate >= 4) {
+ hdr->rate_idx = arg->rxed_rate - 2;
+ } else {
+ hdr->rate_idx = arg->rxed_rate;
+ }
+
+ hdr->signal = arg->rcpi_rssi / 2 - 110;
+ hdr->antenna = 0;
+
+ if (arg->rx_flags.encryp) {
+ if (wfx_drop_encrypt_data(wvif->wdev, arg, skb))
+ goto drop;
+ hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED;
+ if (arg->rx_flags.encryp == HIF_RI_FLAGS_TKIP_ENCRYPTED)
+ hdr->flag |= RX_FLAG_MMIC_STRIPPED;
+ }
+
+ /* Filter block ACK negotiation: fully controlled by firmware */
+ if (ieee80211_is_action(frame->frame_control) &&
+ arg->rx_flags.match_uc_addr &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK)
+ goto drop;
+ if (ieee80211_is_beacon(frame->frame_control) &&
+ !arg->status && wvif->vif &&
+ ether_addr_equal(ieee80211_get_SA(frame),
+ wvif->vif->bss_conf.bssid)) {
+ const u8 *tim_ie;
+ u8 *ies = mgmt->u.beacon.variable;
+ size_t ies_len = skb->len - (ies - skb->data);
+
+ tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
+ if (tim_ie) {
+ struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *)&tim_ie[2];
+
+ if (wvif->dtim_period != tim->dtim_period) {
+ wvif->dtim_period = tim->dtim_period;
+ schedule_work(&wvif->set_beacon_wakeup_period_work);
+ }
+ }
+
+ /* Disable beacon filter once we're associated... */
+ if (wvif->disable_beacon_filter &&
+ (wvif->vif->bss_conf.assoc ||
+ wvif->vif->bss_conf.ibss_joined)) {
+ wvif->disable_beacon_filter = false;
+ schedule_work(&wvif->update_filtering_work);
+ }
+ }
+
+ if (early_data) {
+ spin_lock_bh(&wvif->ps_state_lock);
+ /* Double-check status with lock held */
+ if (entry->status == WFX_LINK_SOFT)
+ skb_queue_tail(&entry->rx_queue, skb);
+ else
+ ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
+ spin_unlock_bh(&wvif->ps_state_lock);
+ } else {
+ ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
+ }
+
+ return;
+
+drop:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
new file mode 100644
index 000000000000..a50ce352bc5e
--- /dev/null
+++ b/drivers/staging/wfx/data_rx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_DATA_RX_H
+#define WFX_DATA_RX_H
+
+#include "hif_api_cmd.h"
+
+struct wfx_vif;
+struct sk_buff;
+
+void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
+ struct sk_buff *skb);
+
+#endif /* WFX_DATA_RX_H */
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
new file mode 100644
index 000000000000..b722e9773232
--- /dev/null
+++ b/drivers/staging/wfx/data_tx.c
@@ -0,0 +1,837 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "data_tx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "sta.h"
+#include "queue.h"
+#include "debug.h"
+#include "traces.h"
+#include "hif_tx_mib.h"
+
+#define WFX_INVALID_RATE_ID (0xFF)
+#define WFX_LINK_ID_NO_ASSOC 15
+#define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
+
+static int wfx_get_hw_rate(struct wfx_dev *wdev,
+ const struct ieee80211_tx_rate *rate)
+{
+ if (rate->idx < 0)
+ return -1;
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ if (rate->idx > 7) {
+ WARN(1, "wrong rate->idx value: %d", rate->idx);
+ return -1;
+ }
+ return rate->idx + 14;
+ }
+ // WFx only support 2GHz, else band information should be retrieved
+ // from ieee80211_tx_info
+ return wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates[rate->idx].hw_value;
+}
+
+/* TX policy cache implementation */
+
+static void wfx_tx_policy_build(struct wfx_vif *wvif, struct tx_policy *policy,
+ struct ieee80211_tx_rate *rates)
+{
+ int i;
+ size_t count;
+ struct wfx_dev *wdev = wvif->wdev;
+
+ WARN(rates[0].idx < 0, "invalid rate policy");
+ memset(policy, 0, sizeof(*policy));
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ if (rates[i].idx < 0)
+ break;
+ count = i;
+
+ /* HACK!!! Device has problems (at least) switching from
+ * 54Mbps CTS to 1Mbps. This switch takes enormous amount
+ * of time (100-200 ms), leading to valuable throughput drop.
+ * As a workaround, additional g-rates are injected to the
+ * policy.
+ */
+ if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
+ rates[0].idx > 4 && rates[0].count > 2 &&
+ rates[1].idx < 2) {
+ int mid_rate = (rates[0].idx + 4) >> 1;
+
+ /* Decrease number of retries for the initial rate */
+ rates[0].count -= 2;
+
+ if (mid_rate != 4) {
+ /* Keep fallback rate at 1Mbps. */
+ rates[3] = rates[1];
+
+ /* Inject 1 transmission on lowest g-rate */
+ rates[2].idx = 4;
+ rates[2].count = 1;
+ rates[2].flags = rates[1].flags;
+
+ /* Inject 1 transmission on mid-rate */
+ rates[1].idx = mid_rate;
+ rates[1].count = 1;
+
+ /* Fallback to 1 Mbps is a really bad thing,
+ * so let's try to increase probability of
+ * successful transmission on the lowest g rate
+ * even more
+ */
+ if (rates[0].count >= 3) {
+ --rates[0].count;
+ ++rates[2].count;
+ }
+
+ /* Adjust amount of rates defined */
+ count += 2;
+ } else {
+ /* Keep fallback rate at 1Mbps. */
+ rates[2] = rates[1];
+
+ /* Inject 2 transmissions on lowest g-rate */
+ rates[1].idx = 4;
+ rates[1].count = 2;
+
+ /* Adjust amount of rates defined */
+ count += 1;
+ }
+ }
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
+ int rateid;
+ u8 count;
+
+ if (rates[i].idx < 0)
+ break;
+ WARN_ON(rates[i].count > 15);
+ rateid = wfx_get_hw_rate(wdev, &rates[i]);
+ // Pack two values in each byte of policy->rates
+ count = rates[i].count;
+ if (rateid % 2)
+ count <<= 4;
+ policy->rates[rateid / 2] |= count;
+ }
+}
+
+static bool tx_policy_is_equal(const struct tx_policy *a,
+ const struct tx_policy *b)
+{
+ return !memcmp(a->rates, b->rates, sizeof(a->rates));
+}
+
+static int wfx_tx_policy_find(struct tx_policy_cache *cache,
+ struct tx_policy *wanted)
+{
+ struct tx_policy *it;
+
+ list_for_each_entry(it, &cache->used, link)
+ if (tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ list_for_each_entry(it, &cache->free, link)
+ if (tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ return -1;
+}
+
+static void wfx_tx_policy_use(struct tx_policy_cache *cache,
+ struct tx_policy *entry)
+{
+ ++entry->usage_count;
+ list_move(&entry->link, &cache->used);
+}
+
+static int wfx_tx_policy_release(struct tx_policy_cache *cache,
+ struct tx_policy *entry)
+{
+ int ret = --entry->usage_count;
+
+ if (!ret)
+ list_move(&entry->link, &cache->free);
+ return ret;
+}
+
+static int wfx_tx_policy_get(struct wfx_vif *wvif,
+ struct ieee80211_tx_rate *rates,
+ bool *renew)
+{
+ int idx;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ struct tx_policy wanted;
+
+ wfx_tx_policy_build(wvif, &wanted, rates);
+
+ spin_lock_bh(&cache->lock);
+ if (WARN_ON(list_empty(&cache->free))) {
+ spin_unlock_bh(&cache->lock);
+ return WFX_INVALID_RATE_ID;
+ }
+ idx = wfx_tx_policy_find(cache, &wanted);
+ if (idx >= 0) {
+ *renew = false;
+ } else {
+ struct tx_policy *entry;
+ *renew = true;
+ /* If policy is not found create a new one
+ * using the oldest entry in "free" list
+ */
+ entry = list_entry(cache->free.prev, struct tx_policy, link);
+ memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
+ entry->uploaded = 0;
+ entry->usage_count = 0;
+ idx = entry - cache->cache;
+ }
+ wfx_tx_policy_use(cache, &cache->cache[idx]);
+ if (list_empty(&cache->free)) {
+ /* Lock TX queues. */
+ wfx_tx_queues_lock(wvif->wdev);
+ }
+ spin_unlock_bh(&cache->lock);
+ return idx;
+}
+
+static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
+{
+ int usage, locked;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+
+ spin_lock_bh(&cache->lock);
+ locked = list_empty(&cache->free);
+ usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
+ if (locked && !usage) {
+ /* Unlock TX queues. */
+ wfx_tx_queues_unlock(wvif->wdev);
+ }
+ spin_unlock_bh(&cache->lock);
+}
+
+static int wfx_tx_policy_upload(struct wfx_vif *wvif)
+{
+ int i;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ struct hif_mib_set_tx_rate_retry_policy *arg =
+ kzalloc(struct_size(arg,
+ tx_rate_retry_policy,
+ HIF_MIB_NUM_TX_RATE_RETRY_POLICIES),
+ GFP_KERNEL);
+ struct hif_mib_tx_rate_retry_policy *dst;
+
+ spin_lock_bh(&cache->lock);
+ /* Upload only modified entries. */
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) {
+ struct tx_policy *src = &cache->cache[i];
+
+ if (!src->uploaded && memzcmp(src->rates, sizeof(src->rates))) {
+ dst = arg->tx_rate_retry_policy +
+ arg->num_tx_rate_policies;
+
+ dst->policy_index = i;
+ dst->short_retry_count = 255;
+ dst->long_retry_count = 255;
+ dst->first_rate_sel = 1;
+ dst->terminate = 1;
+ dst->count_init = 1;
+ memcpy(&dst->rates, src->rates, sizeof(src->rates));
+ src->uploaded = 1;
+ arg->num_tx_rate_policies++;
+ }
+ }
+ spin_unlock_bh(&cache->lock);
+ hif_set_tx_rate_retry_policy(wvif, arg);
+ kfree(arg);
+ return 0;
+}
+
+static void wfx_tx_policy_upload_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, tx_policy_upload_work);
+
+ wfx_tx_policy_upload(wvif);
+
+ wfx_tx_unlock(wvif->wdev);
+ wfx_tx_queues_unlock(wvif->wdev);
+}
+
+void wfx_tx_policy_init(struct wfx_vif *wvif)
+{
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ int i;
+
+ memset(cache, 0, sizeof(*cache));
+
+ spin_lock_init(&cache->lock);
+ INIT_LIST_HEAD(&cache->used);
+ INIT_LIST_HEAD(&cache->free);
+ INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
+
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
+ list_add(&cache->cache[i].link, &cache->free);
+}
+
+/* Link ID related functions */
+
+static int wfx_alloc_link_id(struct wfx_vif *wvif, const u8 *mac)
+{
+ int i, ret = 0;
+ unsigned long max_inactivity = 0;
+ unsigned long now = jiffies;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ if (!wvif->link_id_db[i].status) {
+ ret = i + 1;
+ break;
+ } else if (wvif->link_id_db[i].status != WFX_LINK_HARD &&
+ !wvif->wdev->tx_queue_stats.link_map_cache[i + 1]) {
+ unsigned long inactivity =
+ now - wvif->link_id_db[i].timestamp;
+
+ if (inactivity < max_inactivity)
+ continue;
+ max_inactivity = inactivity;
+ ret = i + 1;
+ }
+ }
+
+ if (ret) {
+ struct wfx_link_entry *entry = &wvif->link_id_db[ret - 1];
+
+ entry->status = WFX_LINK_RESERVE;
+ ether_addr_copy(entry->mac, mac);
+ memset(&entry->buffered, 0, WFX_MAX_TID);
+ skb_queue_head_init(&entry->rx_queue);
+ wfx_tx_lock(wvif->wdev);
+
+ if (!schedule_work(&wvif->link_id_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ dev_info(wvif->wdev->dev, "no more link-id available\n");
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return ret;
+}
+
+int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac)
+{
+ int i, ret = 0;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ if (ether_addr_equal(mac, wvif->link_id_db[i].mac) &&
+ wvif->link_id_db[i].status) {
+ wvif->link_id_db[i].timestamp = jiffies;
+ ret = i + 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return ret;
+}
+
+static int wfx_map_link(struct wfx_vif *wvif,
+ struct wfx_link_entry *link_entry, int sta_id)
+{
+ int ret;
+
+ ret = hif_map_link(wvif, link_entry->mac, 0, sta_id);
+
+ if (ret == 0)
+ /* Save the MAC address currently associated with the peer
+ * for future unmap request
+ */
+ ether_addr_copy(link_entry->old_mac, link_entry->mac);
+
+ return ret;
+}
+
+int wfx_unmap_link(struct wfx_vif *wvif, int sta_id)
+{
+ u8 *mac_addr = NULL;
+
+ if (sta_id)
+ mac_addr = wvif->link_id_db[sta_id - 1].old_mac;
+
+ return hif_map_link(wvif, mac_addr, 1, sta_id);
+}
+
+void wfx_link_id_gc_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, link_id_gc_work.work);
+ unsigned long now = jiffies;
+ unsigned long next_gc = -1;
+ long ttl;
+ u32 mask;
+ int i;
+
+ if (wvif->state != WFX_STATE_AP)
+ return;
+
+ wfx_tx_lock_flush(wvif->wdev);
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ bool need_reset = false;
+
+ mask = BIT(i + 1);
+ if (wvif->link_id_db[i].status == WFX_LINK_RESERVE ||
+ (wvif->link_id_db[i].status == WFX_LINK_HARD &&
+ !(wvif->link_id_map & mask))) {
+ if (wvif->link_id_map & mask) {
+ wvif->sta_asleep_mask &= ~mask;
+ wvif->pspoll_mask &= ~mask;
+ need_reset = true;
+ }
+ wvif->link_id_map |= mask;
+ if (wvif->link_id_db[i].status != WFX_LINK_HARD)
+ wvif->link_id_db[i].status = WFX_LINK_SOFT;
+
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (need_reset)
+ wfx_unmap_link(wvif, i + 1);
+ wfx_map_link(wvif, &wvif->link_id_db[i], i + 1);
+ next_gc = min(next_gc, WFX_LINK_ID_GC_TIMEOUT);
+ spin_lock_bh(&wvif->ps_state_lock);
+ } else if (wvif->link_id_db[i].status == WFX_LINK_SOFT) {
+ ttl = wvif->link_id_db[i].timestamp - now +
+ WFX_LINK_ID_GC_TIMEOUT;
+ if (ttl <= 0) {
+ need_reset = true;
+ wvif->link_id_db[i].status = WFX_LINK_OFF;
+ wvif->link_id_map &= ~mask;
+ wvif->sta_asleep_mask &= ~mask;
+ wvif->pspoll_mask &= ~mask;
+ spin_unlock_bh(&wvif->ps_state_lock);
+ wfx_unmap_link(wvif, i + 1);
+ spin_lock_bh(&wvif->ps_state_lock);
+ } else {
+ next_gc = min_t(unsigned long, next_gc, ttl);
+ }
+ }
+ if (need_reset)
+ skb_queue_purge(&wvif->link_id_db[i].rx_queue);
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (next_gc != -1)
+ schedule_delayed_work(&wvif->link_id_gc_work, next_gc);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+void wfx_link_id_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, link_id_work);
+
+ wfx_tx_flush(wvif->wdev);
+ wfx_link_id_gc_work(&wvif->link_id_gc_work.work);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+/* Tx implementation */
+
+static bool ieee80211_is_action_back(struct ieee80211_hdr *hdr)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+ if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
+ return false;
+ return true;
+}
+
+static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
+ struct wfx_tx_priv *tx_priv,
+ struct ieee80211_sta *sta)
+{
+ u32 mask = ~BIT(tx_priv->raw_link_id);
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ if (ieee80211_is_auth(hdr->frame_control)) {
+ wvif->sta_asleep_mask &= mask;
+ wvif->pspoll_mask &= mask;
+ }
+
+ if (tx_priv->link_id == WFX_LINK_ID_AFTER_DTIM &&
+ !wvif->mcast_buffered) {
+ wvif->mcast_buffered = true;
+ if (wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_start_work);
+ }
+
+ if (tx_priv->raw_link_id) {
+ wvif->link_id_db[tx_priv->raw_link_id - 1].timestamp = jiffies;
+ if (tx_priv->tid < WFX_MAX_TID)
+ wvif->link_id_db[tx_priv->raw_link_id - 1].buffered[tx_priv->tid]++;
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tx_priv->tid, true);
+}
+
+static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr)
+{
+ struct wfx_sta_priv *sta_priv =
+ sta ? (struct wfx_sta_priv *) &sta->drv_priv : NULL;
+ const u8 *da = ieee80211_get_DA(hdr);
+ int ret;
+
+ if (sta_priv && sta_priv->link_id)
+ return sta_priv->link_id;
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
+ return 0;
+ if (is_multicast_ether_addr(da))
+ return 0;
+ ret = wfx_find_link_id(wvif, da);
+ if (!ret)
+ ret = wfx_alloc_link_id(wvif, da);
+ if (!ret) {
+ dev_err(wvif->wdev->dev, "no more link-id available\n");
+ return WFX_LINK_ID_NO_ASSOC;
+ }
+ return ret;
+}
+
+static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+{
+ int i;
+ bool finished;
+
+ // Firmware is not able to mix rates with differents flags
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+ rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+ }
+
+ // Sort rates and remove duplicates
+ do {
+ finished = true;
+ for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+ if (rates[i + 1].idx == rates[i].idx &&
+ rates[i].idx != -1) {
+ rates[i].count =
+ max_t(int, rates[i].count,
+ rates[i + 1].count);
+ rates[i + 1].idx = -1;
+ rates[i + 1].count = 0;
+
+ finished = false;
+ }
+ if (rates[i + 1].idx > rates[i].idx) {
+ swap(rates[i + 1], rates[i]);
+ finished = false;
+ }
+ }
+ } while (!finished);
+ // All retries use long GI
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+}
+
+static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
+ struct ieee80211_tx_info *tx_info)
+{
+ bool tx_policy_renew = false;
+ u8 rate_id;
+
+ rate_id = wfx_tx_policy_get(wvif,
+ tx_info->driver_rates, &tx_policy_renew);
+ WARN(rate_id == WFX_INVALID_RATE_ID, "unable to get a valid Tx policy");
+
+ if (tx_policy_renew) {
+ /* FIXME: It's not so optimal to stop TX queues every now and
+ * then. Better to reimplement task scheduling with a counter.
+ */
+ wfx_tx_lock(wvif->wdev);
+ wfx_tx_queues_lock(wvif->wdev);
+ if (!schedule_work(&wvif->tx_policy_upload_work)) {
+ wfx_tx_queues_unlock(wvif->wdev);
+ wfx_tx_unlock(wvif->wdev);
+ }
+ }
+ return rate_id;
+}
+
+static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info)
+{
+ struct ieee80211_tx_rate *rate = &tx_info->driver_rates[0];
+ struct hif_ht_tx_parameters ret = { };
+
+ if (!(rate->flags & IEEE80211_TX_RC_MCS))
+ ret.frame_format = HIF_FRAME_FORMAT_NON_HT;
+ else if (!(rate->flags & IEEE80211_TX_RC_GREEN_FIELD))
+ ret.frame_format = HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
+ else
+ ret.frame_format = HIF_FRAME_FORMAT_GF_HT_11N;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ ret.short_gi = 1;
+ if (tx_info->flags & IEEE80211_TX_CTL_STBC)
+ ret.stbc = 0; // FIXME: Not yet supported by firmware?
+ return ret;
+}
+
+static u8 wfx_tx_get_tid(struct ieee80211_hdr *hdr)
+{
+ // FIXME: ieee80211_get_tid(hdr) should be sufficient for all cases.
+ if (!ieee80211_is_data(hdr->frame_control))
+ return WFX_MAX_TID;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ return ieee80211_get_tid(hdr);
+ else
+ return 0;
+}
+
+static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
+{
+ int mic_space;
+
+ if (!hw_key)
+ return 0;
+ mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0;
+ return hw_key->icv_len + mic_space;
+}
+
+static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct hif_msg *hif_msg;
+ struct hif_req_tx *req;
+ struct wfx_tx_priv *tx_priv;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int queue_id = tx_info->hw_queue;
+ size_t offset = (size_t) skb->data & 3;
+ int wmsg_len = sizeof(struct hif_msg) +
+ sizeof(struct hif_req_tx) + offset;
+
+ WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id");
+ wfx_tx_fixup_rates(tx_info->driver_rates);
+
+ // From now tx_info->control is unusable
+ memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
+ // Fill tx_priv
+ tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
+ tx_priv->tid = wfx_tx_get_tid(hdr);
+ tx_priv->raw_link_id = wfx_tx_get_raw_link_id(wvif, sta, hdr);
+ tx_priv->link_id = tx_priv->raw_link_id;
+ if (ieee80211_has_protected(hdr->frame_control))
+ tx_priv->hw_key = hw_key;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ tx_priv->link_id = WFX_LINK_ID_AFTER_DTIM;
+ if (sta && (sta->uapsd_queues & BIT(queue_id)))
+ tx_priv->link_id = WFX_LINK_ID_UAPSD;
+
+ // Fill hif_msg
+ WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
+ WARN(offset & 1, "attempt to transmit an unaligned frame");
+ skb_put(skb, wfx_tx_get_icv_len(tx_priv->hw_key));
+ skb_push(skb, wmsg_len);
+ memset(skb->data, 0, wmsg_len);
+ hif_msg = (struct hif_msg *)skb->data;
+ hif_msg->len = cpu_to_le16(skb->len);
+ hif_msg->id = HIF_REQ_ID_TX;
+ hif_msg->interface = wvif->id;
+ if (skb->len > wvif->wdev->hw_caps.size_inp_ch_buf) {
+ dev_warn(wvif->wdev->dev, "requested frame size (%d) is larger than maximum supported (%d)\n",
+ skb->len, wvif->wdev->hw_caps.size_inp_ch_buf);
+ skb_pull(skb, wmsg_len);
+ return -EIO;
+ }
+
+ // Fill tx request
+ req = (struct hif_req_tx *)hif_msg->body;
+ req->packet_id = queue_id << 16 |
+ IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ req->data_flags.fc_offset = offset;
+ req->queue_id.peer_sta_id = tx_priv->raw_link_id;
+ // Queue index are inverted between firmware and Linux
+ req->queue_id.queue_id = 3 - queue_id;
+ req->ht_tx_parameters = wfx_tx_get_tx_parms(wvif->wdev, tx_info);
+ req->tx_flags.retry_policy_index = wfx_tx_get_rate_id(wvif, tx_info);
+
+ // Auxiliary operations
+ wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
+ wfx_tx_queue_put(wvif->wdev, &wvif->wdev->tx_queue[queue_id], skb);
+ wfx_bh_request_tx(wvif->wdev);
+ return 0;
+}
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif;
+ struct ieee80211_sta *sta = control ? control->sta : NULL;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ size_t driver_data_room = FIELD_SIZEOF(struct ieee80211_tx_info,
+ rate_driver_data);
+
+ compiletime_assert(sizeof(struct wfx_tx_priv) <= driver_data_room,
+ "struct tx_priv is too large");
+ WARN(skb->next || skb->prev, "skb is already member of a list");
+ // control.vif can be NULL for injected frames
+ if (tx_info->control.vif)
+ wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv;
+ else
+ wvif = wvif_iterate(wdev, NULL);
+ if (WARN_ON(!wvif))
+ goto drop;
+ // FIXME: why?
+ if (ieee80211_is_action_back(hdr)) {
+ dev_info(wdev->dev, "drop BA action\n");
+ goto drop;
+ }
+ if (wfx_tx_inner(wvif, sta, skb))
+ goto drop;
+
+ return;
+
+drop:
+ ieee80211_tx_status_irqsafe(wdev->hw, skb);
+}
+
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
+{
+ int i;
+ int tx_count;
+ struct sk_buff *skb;
+ struct ieee80211_tx_rate *rate;
+ struct ieee80211_tx_info *tx_info;
+ const struct wfx_tx_priv *tx_priv;
+
+ skb = wfx_pending_get(wvif->wdev, arg->packet_id);
+ if (!skb) {
+ dev_warn(wvif->wdev->dev,
+ "received unknown packet_id (%#.8x) from chip\n",
+ arg->packet_id);
+ return;
+ }
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_priv = wfx_skb_tx_priv(skb);
+ _trace_tx_stats(arg, skb,
+ wfx_pending_get_pkt_us_delay(wvif->wdev, skb));
+
+ // You can touch to tx_priv, but don't touch to tx_info->status.
+ tx_count = arg->ack_failures;
+ if (!arg->status || arg->ack_failures)
+ tx_count += 1; // Also report success
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ rate = &tx_info->status.rates[i];
+ if (rate->idx < 0)
+ break;
+ if (tx_count < rate->count && arg->status && arg->ack_failures)
+ dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n",
+ rate->count, tx_count);
+ if (tx_count <= rate->count && tx_count &&
+ arg->txed_rate != wfx_get_hw_rate(wvif->wdev, rate))
+ dev_dbg(wvif->wdev->dev,
+ "inconsistent tx_info rates: %d != %d\n",
+ arg->txed_rate,
+ wfx_get_hw_rate(wvif->wdev, rate));
+ if (tx_count > rate->count) {
+ tx_count -= rate->count;
+ } else if (!tx_count) {
+ rate->count = 0;
+ rate->idx = -1;
+ } else {
+ rate->count = tx_count;
+ tx_count = 0;
+ }
+ }
+ if (tx_count)
+ dev_dbg(wvif->wdev->dev,
+ "%d more retries than expected\n", tx_count);
+ skb_trim(skb, skb->len - wfx_tx_get_icv_len(tx_priv->hw_key));
+
+ // From now, you can touch to tx_info->status, but do not touch to
+ // tx_priv anymore
+ // FIXME: use ieee80211_tx_info_clear_status()
+ memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
+ memset(tx_info->pad, 0, sizeof(tx_info->pad));
+
+ if (!arg->status) {
+ if (wvif->bss_loss_state &&
+ arg->packet_id == wvif->bss_loss_confirm_id)
+ wfx_cqm_bssloss_sm(wvif, 0, 1, 0);
+ tx_info->status.tx_time =
+ arg->media_delay - arg->tx_queue_delay;
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ } else if (arg->status == HIF_REQUEUE) {
+ /* "REQUEUE" means "implicit suspend" */
+ struct hif_ind_suspend_resume_tx suspend = {
+ .suspend_resume_flags.resume = 0,
+ .suspend_resume_flags.bc_mc_only = 1,
+ };
+
+ WARN(!arg->tx_result_flags.requeue, "incoherent status and result_flags");
+ wfx_suspend_resume(wvif, &suspend);
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ } else {
+ if (wvif->bss_loss_state &&
+ arg->packet_id == wvif->bss_loss_confirm_id)
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 1);
+ }
+ wfx_pending_remove(wvif->wdev, skb);
+}
+
+static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb,
+ struct hif_req_tx *req)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int tid = wfx_tx_get_tid(hdr);
+ int raw_link_id = req->queue_id.peer_sta_id;
+ u8 *buffered;
+
+ if (raw_link_id && tid < WFX_MAX_TID) {
+ buffered = wvif->link_id_db[raw_link_id - 1].buffered;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ WARN(!buffered[tid], "inconsistent notification");
+ buffered[tid]--;
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (!buffered[tid]) {
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tid, false);
+ rcu_read_unlock();
+ }
+ }
+}
+
+void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ unsigned int offset = sizeof(struct hif_req_tx) +
+ sizeof(struct hif_msg) +
+ req->data_flags.fc_offset;
+
+ WARN_ON(!wvif);
+ skb_pull(skb, offset);
+ wfx_notify_buffered_tx(wvif, skb, req);
+ wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
+ ieee80211_tx_status_irqsafe(wdev->hw, skb);
+}
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
new file mode 100644
index 000000000000..29faa5640516
--- /dev/null
+++ b/drivers/staging/wfx/data_tx.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_DATA_TX_H
+#define WFX_DATA_TX_H
+
+#include <linux/list.h>
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+#include "hif_api_mib.h"
+
+// FIXME: use IEEE80211_NUM_TIDS
+#define WFX_MAX_TID 8
+
+struct wfx_tx_priv;
+struct wfx_dev;
+struct wfx_vif;
+
+enum wfx_link_status {
+ WFX_LINK_OFF,
+ WFX_LINK_RESERVE,
+ WFX_LINK_SOFT,
+ WFX_LINK_HARD,
+};
+
+struct wfx_link_entry {
+ unsigned long timestamp;
+ enum wfx_link_status status;
+ u8 mac[ETH_ALEN];
+ u8 old_mac[ETH_ALEN];
+ u8 buffered[WFX_MAX_TID];
+ struct sk_buff_head rx_queue;
+};
+
+struct tx_policy {
+ struct list_head link;
+ u8 rates[12];
+ u8 usage_count;
+ u8 uploaded;
+};
+
+struct tx_policy_cache {
+ struct tx_policy cache[HIF_MIB_NUM_TX_RATE_RETRY_POLICIES];
+ // FIXME: use a trees and drop hash from tx_policy
+ struct list_head used;
+ struct list_head free;
+ spinlock_t lock;
+};
+
+struct wfx_tx_priv {
+ ktime_t xmit_timestamp;
+ struct ieee80211_key_conf *hw_key;
+ u8 link_id;
+ u8 raw_link_id;
+ u8 tid;
+} __packed;
+
+void wfx_tx_policy_init(struct wfx_vif *wvif);
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg);
+void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
+
+int wfx_unmap_link(struct wfx_vif *wvif, int link_id);
+void wfx_link_id_work(struct work_struct *work);
+void wfx_link_id_gc_work(struct work_struct *work);
+int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac);
+
+static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info;
+
+ if (!skb)
+ return NULL;
+ tx_info = IEEE80211_SKB_CB(skb);
+ return (struct wfx_tx_priv *)tx_info->rate_driver_data;
+}
+
+static inline struct hif_req_tx *wfx_skb_txreq(struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *) hif->body;
+
+ return req;
+}
+
+#endif /* WFX_DATA_TX_H */
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
new file mode 100644
index 000000000000..d17a75242365
--- /dev/null
+++ b/drivers/staging/wfx/debug.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Debugfs interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+
+#include "debug.h"
+#include "wfx.h"
+#include "sta.h"
+#include "main.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+
+#define CREATE_TRACE_POINTS
+#include "traces.h"
+
+static const struct trace_print_flags hif_msg_print_map[] = {
+ hif_msg_list,
+};
+
+static const struct trace_print_flags hif_mib_print_map[] = {
+ hif_mib_list,
+};
+
+static const struct trace_print_flags wfx_reg_print_map[] = {
+ wfx_reg_list,
+};
+
+static const char *get_symbol(unsigned long val,
+ const struct trace_print_flags *symbol_array)
+{
+ int i;
+
+ for (i = 0; symbol_array[i].mask != -1; i++) {
+ if (val == symbol_array[i].mask)
+ return symbol_array[i].name;
+ }
+
+ return "unknown";
+}
+
+const char *get_hif_name(unsigned long id)
+{
+ return get_symbol(id, hif_msg_print_map);
+}
+
+const char *get_mib_name(unsigned long id)
+{
+ return get_symbol(id, hif_mib_print_map);
+}
+
+const char *get_reg_name(unsigned long id)
+{
+ return get_symbol(id, wfx_reg_print_map);
+}
+
+static int wfx_counters_show(struct seq_file *seq, void *v)
+{
+ int ret;
+ struct wfx_dev *wdev = seq->private;
+ struct hif_mib_extended_count_table counters;
+
+ ret = hif_get_counters_table(wdev, &counters);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -EIO;
+
+#define PUT_COUNTER(name) \
+ seq_printf(seq, "%24s %d\n", #name ":",\
+ le32_to_cpu(counters.count_##name))
+
+ PUT_COUNTER(tx_packets);
+ PUT_COUNTER(tx_multicast_frames);
+ PUT_COUNTER(tx_frames_success);
+ PUT_COUNTER(tx_frame_failures);
+ PUT_COUNTER(tx_frames_retried);
+ PUT_COUNTER(tx_frames_multi_retried);
+
+ PUT_COUNTER(rts_success);
+ PUT_COUNTER(rts_failures);
+ PUT_COUNTER(ack_failures);
+
+ PUT_COUNTER(rx_packets);
+ PUT_COUNTER(rx_frames_success);
+ PUT_COUNTER(rx_packet_errors);
+ PUT_COUNTER(plcp_errors);
+ PUT_COUNTER(fcs_errors);
+ PUT_COUNTER(rx_decryption_failures);
+ PUT_COUNTER(rx_mic_failures);
+ PUT_COUNTER(rx_no_key_failures);
+ PUT_COUNTER(rx_frame_duplicates);
+ PUT_COUNTER(rx_multicast_frames);
+ PUT_COUNTER(rx_cmacicv_errors);
+ PUT_COUNTER(rx_cmac_replays);
+ PUT_COUNTER(rx_mgmt_ccmp_replays);
+
+ PUT_COUNTER(rx_beacon);
+ PUT_COUNTER(miss_beacon);
+
+#undef PUT_COUNTER
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_counters);
+
+static const char * const channel_names[] = {
+ [0] = "1M",
+ [1] = "2M",
+ [2] = "5.5M",
+ [3] = "11M",
+ /* Entries 4 and 5 does not exist */
+ [6] = "6M",
+ [7] = "9M",
+ [8] = "12M",
+ [9] = "18M",
+ [10] = "24M",
+ [11] = "36M",
+ [12] = "48M",
+ [13] = "54M",
+ [14] = "MCS0",
+ [15] = "MCS1",
+ [16] = "MCS2",
+ [17] = "MCS3",
+ [18] = "MCS4",
+ [19] = "MCS5",
+ [20] = "MCS6",
+ [21] = "MCS7",
+};
+
+static int wfx_rx_stats_show(struct seq_file *seq, void *v)
+{
+ struct wfx_dev *wdev = seq->private;
+ struct hif_rx_stats *st = &wdev->rx_stats;
+ int i;
+
+ mutex_lock(&wdev->rx_stats_lock);
+ seq_printf(seq, "Timestamp: %dus\n", st->date);
+ seq_printf(seq, "Low power clock: frequency %uHz, external %s\n",
+ st->pwr_clk_freq,
+ st->is_ext_pwr_clk ? "yes" : "no");
+ seq_printf(seq,
+ "N. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
+ st->nb_rx_frame, st->per_total, st->throughput);
+ seq_puts(seq, " Num. of PER RSSI SNR CFO\n");
+ seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n");
+ for (i = 0; i < ARRAY_SIZE(channel_names); i++) {
+ if (channel_names[i])
+ seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n",
+ channel_names[i], st->nb_rx_by_rate[i],
+ st->per[i], st->rssi[i] / 100,
+ st->snr[i] / 100, st->cfo[i]);
+ }
+ mutex_unlock(&wdev->rx_stats_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats);
+
+static ssize_t wfx_send_pds_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wfx_dev *wdev = file->private_data;
+ char *buf;
+ int ret;
+
+ if (*ppos != 0) {
+ dev_dbg(wdev->dev, "PDS data must be written in one transaction");
+ return -EBUSY;
+ }
+ buf = memdup_user(user_buf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+ *ppos = *ppos + count;
+ ret = wfx_send_pds(wdev, buf, count);
+ kfree(buf);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static const struct file_operations wfx_send_pds_fops = {
+ .open = simple_open,
+ .write = wfx_send_pds_write,
+};
+
+static ssize_t wfx_burn_slk_key_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wfx_dev *wdev = file->private_data;
+
+ dev_info(wdev->dev, "this driver does not support secure link\n");
+ return -EINVAL;
+}
+
+static const struct file_operations wfx_burn_slk_key_fops = {
+ .open = simple_open,
+ .write = wfx_burn_slk_key_write,
+};
+
+struct dbgfs_hif_msg {
+ struct wfx_dev *wdev;
+ struct completion complete;
+ u8 reply[1024];
+ int ret;
+};
+
+static ssize_t wfx_send_hif_msg_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+ struct wfx_dev *wdev = context->wdev;
+ struct hif_msg *request;
+
+ if (completion_done(&context->complete)) {
+ dev_dbg(wdev->dev, "read previous result before start a new one\n");
+ return -EBUSY;
+ }
+ if (count < sizeof(struct hif_msg))
+ return -EINVAL;
+
+ // wfx_cmd_send() chekc that reply buffer is wide enough, but do not
+ // return precise length read. User have to know how many bytes should
+ // be read. Filling reply buffer with a memory pattern may help user.
+ memset(context->reply, 0xFF, sizeof(context->reply));
+ request = memdup_user(user_buf, count);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ if (request->len != count) {
+ kfree(request);
+ return -EINVAL;
+ }
+ context->ret = wfx_cmd_send(wdev, request, context->reply,
+ sizeof(context->reply), false);
+
+ kfree(request);
+ complete(&context->complete);
+ return count;
+}
+
+static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+ int ret;
+
+ if (count > sizeof(context->reply))
+ return -EINVAL;
+ ret = wait_for_completion_interruptible(&context->complete);
+ if (ret)
+ return ret;
+ if (context->ret < 0)
+ return context->ret;
+ // Be carefull, write() is waiting for a full message while read()
+ // only return a payload
+ if (copy_to_user(user_buf, context->reply, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static int wfx_send_hif_msg_open(struct inode *inode, struct file *file)
+{
+ struct dbgfs_hif_msg *context = kzalloc(sizeof(*context), GFP_KERNEL);
+
+ if (!context)
+ return -ENOMEM;
+ context->wdev = inode->i_private;
+ init_completion(&context->complete);
+ file->private_data = context;
+ return 0;
+}
+
+static int wfx_send_hif_msg_release(struct inode *inode, struct file *file)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+
+ kfree(context);
+ return 0;
+}
+
+static const struct file_operations wfx_send_hif_msg_fops = {
+ .open = wfx_send_hif_msg_open,
+ .release = wfx_send_hif_msg_release,
+ .write = wfx_send_hif_msg_write,
+ .read = wfx_send_hif_msg_read,
+};
+
+int wfx_debug_init(struct wfx_dev *wdev)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir);
+ debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops);
+ debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops);
+ debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops);
+ debugfs_create_file("burn_slk_key", 0200, d, wdev,
+ &wfx_burn_slk_key_fops);
+ debugfs_create_file("send_hif_msg", 0600, d, wdev,
+ &wfx_send_hif_msg_fops);
+
+ return 0;
+}
diff --git a/drivers/staging/wfx/debug.h b/drivers/staging/wfx/debug.h
new file mode 100644
index 000000000000..6f2f84d64c9e
--- /dev/null
+++ b/drivers/staging/wfx/debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Debugfs interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2011, ST-Ericsson
+ */
+#ifndef WFX_DEBUG_H
+#define WFX_DEBUG_H
+
+struct wfx_dev;
+
+int wfx_debug_init(struct wfx_dev *wdev);
+
+const char *get_hif_name(unsigned long id);
+const char *get_mib_name(unsigned long id);
+const char *get_reg_name(unsigned long id);
+
+#endif /* WFX_DEBUG_H */
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
new file mode 100644
index 000000000000..dbf8bda71ff7
--- /dev/null
+++ b/drivers/staging/wfx/fwio.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Firmware loading.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/bitfield.h>
+
+#include "fwio.h"
+#include "wfx.h"
+#include "hwio.h"
+
+// Addresses below are in SRAM area
+#define WFX_DNLD_FIFO 0x09004000
+#define DNLD_BLOCK_SIZE 0x0400
+#define DNLD_FIFO_SIZE 0x8000 // (32 * DNLD_BLOCK_SIZE)
+// Download Control Area (DCA)
+#define WFX_DCA_IMAGE_SIZE 0x0900C000
+#define WFX_DCA_PUT 0x0900C004
+#define WFX_DCA_GET 0x0900C008
+#define WFX_DCA_HOST_STATUS 0x0900C00C
+#define HOST_READY 0x87654321
+#define HOST_INFO_READ 0xA753BD99
+#define HOST_UPLOAD_PENDING 0xABCDDCBA
+#define HOST_UPLOAD_COMPLETE 0xD4C64A99
+#define HOST_OK_TO_JUMP 0x174FC882
+#define WFX_DCA_NCP_STATUS 0x0900C010
+#define NCP_NOT_READY 0x12345678
+#define NCP_READY 0x87654321
+#define NCP_INFO_READY 0xBD53EF99
+#define NCP_DOWNLOAD_PENDING 0xABCDDCBA
+#define NCP_DOWNLOAD_COMPLETE 0xCAFEFECA
+#define NCP_AUTH_OK 0xD4C64A99
+#define NCP_AUTH_FAIL 0x174FC882
+#define NCP_PUB_KEY_RDY 0x7AB41D19
+#define WFX_DCA_FW_SIGNATURE 0x0900C014
+#define FW_SIGNATURE_SIZE 0x40
+#define WFX_DCA_FW_HASH 0x0900C054
+#define FW_HASH_SIZE 0x08
+#define WFX_DCA_FW_VERSION 0x0900C05C
+#define FW_VERSION_SIZE 0x04
+#define WFX_DCA_RESERVED 0x0900C060
+#define DCA_RESERVED_SIZE 0x20
+#define WFX_STATUS_INFO 0x0900C080
+#define WFX_BOOTLOADER_LABEL 0x0900C084
+#define BOOTLOADER_LABEL_SIZE 0x3C
+#define WFX_PTE_INFO 0x0900C0C0
+#define PTE_INFO_KEYSET_IDX 0x0D
+#define PTE_INFO_SIZE 0x10
+#define WFX_ERR_INFO 0x0900C0D0
+#define ERR_INVALID_SEC_TYPE 0x05
+#define ERR_SIG_VERIF_FAILED 0x0F
+#define ERR_AES_CTRL_KEY 0x10
+#define ERR_ECC_PUB_KEY 0x11
+#define ERR_MAC_KEY 0x18
+
+#define DCA_TIMEOUT 50 // milliseconds
+#define WAKEUP_TIMEOUT 200 // milliseconds
+
+static const char * const fwio_error_strings[] = {
+ [ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption",
+ [ERR_SIG_VERIF_FAILED] = "Signature verification failed",
+ [ERR_AES_CTRL_KEY] = "AES control key not initialized",
+ [ERR_ECC_PUB_KEY] = "ECC public key not initialized",
+ [ERR_MAC_KEY] = "MAC key not initialized",
+};
+
+/*
+ * request_firmware() allocate data using vmalloc(). It is not compatible with
+ * underlying hardware that use DMA. Function below detect this case and
+ * allocate a bounce buffer if necessary.
+ *
+ * Notice that, in doubt, you can enable CONFIG_DEBUG_SG to ask kernel to
+ * detect this problem at runtime (else, kernel silently fail).
+ *
+ * NOTE: it may also be possible to use 'pages' from struct firmware and avoid
+ * bounce buffer
+ */
+static int sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf,
+ size_t len)
+{
+ int ret;
+ const u8 *tmp;
+
+ if (!virt_addr_valid(buf)) {
+ tmp = kmemdup(buf, len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ } else {
+ tmp = buf;
+ }
+ ret = sram_buf_write(wdev, addr, tmp, len);
+ if (!virt_addr_valid(buf))
+ kfree(tmp);
+ return ret;
+}
+
+int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
+ const struct firmware **fw, int *file_offset)
+{
+ int keyset_file;
+ char filename[256];
+ const char *data;
+ int ret;
+
+ snprintf(filename, sizeof(filename), "%s_%02X.sec", wdev->pdata.file_fw,
+ keyset_chip);
+ ret = firmware_request_nowarn(fw, filename, wdev->dev);
+ if (ret) {
+ dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n",
+ filename, wdev->pdata.file_fw);
+ snprintf(filename, sizeof(filename), "%s.sec",
+ wdev->pdata.file_fw);
+ ret = request_firmware(fw, filename, wdev->dev);
+ if (ret) {
+ dev_err(wdev->dev, "can't load %s\n", filename);
+ *fw = NULL;
+ return ret;
+ }
+ }
+
+ data = (*fw)->data;
+ if (memcmp(data, "KEYSET", 6) != 0) {
+ // Legacy firmware format
+ *file_offset = 0;
+ keyset_file = 0x90;
+ } else {
+ *file_offset = 8;
+ keyset_file = (hex_to_bin(data[6]) * 16) | hex_to_bin(data[7]);
+ if (keyset_file < 0) {
+ dev_err(wdev->dev, "%s corrupted\n", filename);
+ release_firmware(*fw);
+ *fw = NULL;
+ return -EINVAL;
+ }
+ }
+ if (keyset_file != keyset_chip) {
+ dev_err(wdev->dev, "firmware keyset is incompatible with chip (file: 0x%02X, chip: 0x%02X)\n",
+ keyset_file, keyset_chip);
+ release_firmware(*fw);
+ *fw = NULL;
+ return -ENODEV;
+ }
+ wdev->keyset = keyset_file;
+ return 0;
+}
+
+static int wait_ncp_status(struct wfx_dev *wdev, u32 status)
+{
+ ktime_t now, start;
+ u32 reg;
+ int ret;
+
+ start = ktime_get();
+ for (;;) {
+ ret = sram_reg_read(wdev, WFX_DCA_NCP_STATUS, &reg);
+ if (ret < 0)
+ return -EIO;
+ now = ktime_get();
+ if (reg == status)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
+ return -ETIMEDOUT;
+ }
+ if (ktime_compare(now, start))
+ dev_dbg(wdev->dev, "chip answer after %lldus\n",
+ ktime_us_delta(now, start));
+ else
+ dev_dbg(wdev->dev, "chip answer immediately\n");
+ return 0;
+}
+
+static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len)
+{
+ int ret;
+ u32 offs, bytes_done;
+ ktime_t now, start;
+
+ if (len % DNLD_BLOCK_SIZE) {
+ dev_err(wdev->dev, "firmware size is not aligned. Buffer overrun will occur\n");
+ return -EIO;
+ }
+ offs = 0;
+ while (offs < len) {
+ start = ktime_get();
+ for (;;) {
+ ret = sram_reg_read(wdev, WFX_DCA_GET, &bytes_done);
+ if (ret < 0)
+ return ret;
+ now = ktime_get();
+ if (offs +
+ DNLD_BLOCK_SIZE - bytes_done < DNLD_FIFO_SIZE)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
+ return -ETIMEDOUT;
+ }
+ if (ktime_compare(now, start))
+ dev_dbg(wdev->dev, "answer after %lldus\n",
+ ktime_us_delta(now, start));
+
+ ret = sram_write_dma_safe(wdev, WFX_DNLD_FIFO +
+ (offs % DNLD_FIFO_SIZE),
+ data + offs, DNLD_BLOCK_SIZE);
+ if (ret < 0)
+ return ret;
+
+ // WFx seems to not support writing 0 in this register during
+ // first loop
+ offs += DNLD_BLOCK_SIZE;
+ ret = sram_reg_write(wdev, WFX_DCA_PUT, offs);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void print_boot_status(struct wfx_dev *wdev)
+{
+ u32 val32;
+
+ sram_reg_read(wdev, WFX_STATUS_INFO, &val32);
+ if (val32 == 0x12345678) {
+ dev_info(wdev->dev, "no error reported by secure boot\n");
+ } else {
+ sram_reg_read(wdev, WFX_ERR_INFO, &val32);
+ if (val32 < ARRAY_SIZE(fwio_error_strings) &&
+ fwio_error_strings[val32])
+ dev_info(wdev->dev, "secure boot error: %s\n",
+ fwio_error_strings[val32]);
+ else
+ dev_info(wdev->dev,
+ "secure boot error: Unknown (0x%02x)\n",
+ val32);
+ }
+}
+
+static int load_firmware_secure(struct wfx_dev *wdev)
+{
+ const struct firmware *fw = NULL;
+ int header_size;
+ int fw_offset;
+ ktime_t start;
+ u8 *buf;
+ int ret;
+
+ BUILD_BUG_ON(PTE_INFO_SIZE > BOOTLOADER_LABEL_SIZE);
+ buf = kmalloc(BOOTLOADER_LABEL_SIZE + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_READY);
+ ret = wait_ncp_status(wdev, NCP_INFO_READY);
+ if (ret)
+ goto error;
+
+ sram_buf_read(wdev, WFX_BOOTLOADER_LABEL, buf, BOOTLOADER_LABEL_SIZE);
+ buf[BOOTLOADER_LABEL_SIZE] = 0;
+ dev_dbg(wdev->dev, "bootloader: \"%s\"\n", buf);
+
+ sram_buf_read(wdev, WFX_PTE_INFO, buf, PTE_INFO_SIZE);
+ ret = get_firmware(wdev, buf[PTE_INFO_KEYSET_IDX], &fw, &fw_offset);
+ if (ret)
+ goto error;
+ header_size = fw_offset + FW_SIGNATURE_SIZE + FW_HASH_SIZE;
+
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_INFO_READ);
+ ret = wait_ncp_status(wdev, NCP_READY);
+ if (ret)
+ goto error;
+
+ sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); // Fifo init
+ sram_write_dma_safe(wdev, WFX_DCA_FW_VERSION, "\x01\x00\x00\x00",
+ FW_VERSION_SIZE);
+ sram_write_dma_safe(wdev, WFX_DCA_FW_SIGNATURE, fw->data + fw_offset,
+ FW_SIGNATURE_SIZE);
+ sram_write_dma_safe(wdev, WFX_DCA_FW_HASH,
+ fw->data + fw_offset + FW_SIGNATURE_SIZE,
+ FW_HASH_SIZE);
+ sram_reg_write(wdev, WFX_DCA_IMAGE_SIZE, fw->size - header_size);
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_PENDING);
+ ret = wait_ncp_status(wdev, NCP_DOWNLOAD_PENDING);
+ if (ret)
+ goto error;
+
+ start = ktime_get();
+ ret = upload_firmware(wdev, fw->data + header_size,
+ fw->size - header_size);
+ if (ret)
+ goto error;
+ dev_dbg(wdev->dev, "firmware load after %lldus\n",
+ ktime_us_delta(ktime_get(), start));
+
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_COMPLETE);
+ ret = wait_ncp_status(wdev, NCP_AUTH_OK);
+ // Legacy ROM support
+ if (ret < 0)
+ ret = wait_ncp_status(wdev, NCP_PUB_KEY_RDY);
+ if (ret < 0)
+ goto error;
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_OK_TO_JUMP);
+
+error:
+ kfree(buf);
+ if (fw)
+ release_firmware(fw);
+ if (ret)
+ print_boot_status(wdev);
+ return ret;
+}
+
+static int init_gpr(struct wfx_dev *wdev)
+{
+ int ret, i;
+ static const struct {
+ int index;
+ u32 value;
+ } gpr_init[] = {
+ { 0x07, 0x208775 },
+ { 0x08, 0x2EC020 },
+ { 0x09, 0x3C3C3C },
+ { 0x0B, 0x322C44 },
+ { 0x0C, 0xA06497 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(gpr_init); i++) {
+ ret = igpr_reg_write(wdev, gpr_init[i].index,
+ gpr_init[i].value);
+ if (ret < 0)
+ return ret;
+ dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index,
+ gpr_init[i].value);
+ }
+ return 0;
+}
+
+int wfx_init_device(struct wfx_dev *wdev)
+{
+ int ret;
+ int hw_revision, hw_type;
+ int wakeup_timeout = 50; // ms
+ ktime_t now, start;
+ u32 reg;
+
+ reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_WORD_MODE2;
+ if (wdev->pdata.use_rising_clk)
+ reg |= CFG_CLK_RISE_EDGE;
+ ret = config_reg_write(wdev, reg);
+ if (ret < 0) {
+ dev_err(wdev->dev, "bus returned an error during first write access. Host configuration error?\n");
+ return -EIO;
+ }
+
+ ret = config_reg_read(wdev, &reg);
+ if (ret < 0) {
+ dev_err(wdev->dev, "bus returned an error during first read access. Bus configuration error?\n");
+ return -EIO;
+ }
+ if (reg == 0 || reg == ~0) {
+ dev_err(wdev->dev, "chip mute. Bus configuration error or chip wasn't reset?\n");
+ return -EIO;
+ }
+ dev_dbg(wdev->dev, "initial config register value: %08x\n", reg);
+
+ hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg);
+ if (hw_revision == 0 || hw_revision > 2) {
+ dev_err(wdev->dev, "bad hardware revision number: %d\n",
+ hw_revision);
+ return -ENODEV;
+ }
+ hw_type = FIELD_GET(CFG_DEVICE_ID_TYPE, reg);
+ if (hw_type == 1) {
+ dev_notice(wdev->dev, "development hardware detected\n");
+ wakeup_timeout = 2000;
+ }
+
+ ret = init_gpr(wdev);
+ if (ret < 0)
+ return ret;
+
+ ret = control_reg_write(wdev, CTRL_WLAN_WAKEUP);
+ if (ret < 0)
+ return -EIO;
+ start = ktime_get();
+ for (;;) {
+ ret = control_reg_read(wdev, &reg);
+ now = ktime_get();
+ if (reg & CTRL_WLAN_READY)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, wakeup_timeout))) {
+ dev_err(wdev->dev, "chip didn't wake up. Chip wasn't reset?\n");
+ return -ETIMEDOUT;
+ }
+ }
+ dev_dbg(wdev->dev, "chip wake up after %lldus\n",
+ ktime_us_delta(now, start));
+
+ ret = config_reg_write_bits(wdev, CFG_CPU_RESET, 0);
+ if (ret < 0)
+ return ret;
+ ret = load_firmware_secure(wdev);
+ if (ret < 0)
+ return ret;
+ ret = config_reg_write_bits(wdev,
+ CFG_DIRECT_ACCESS_MODE |
+ CFG_IRQ_ENABLE_DATA |
+ CFG_IRQ_ENABLE_WRDY,
+ CFG_IRQ_ENABLE_DATA);
+ return ret;
+}
diff --git a/drivers/staging/wfx/fwio.h b/drivers/staging/wfx/fwio.h
new file mode 100644
index 000000000000..6028f92503fe
--- /dev/null
+++ b/drivers/staging/wfx/fwio.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Firmware loading.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_FWIO_H
+#define WFX_FWIO_H
+
+struct wfx_dev;
+
+int wfx_init_device(struct wfx_dev *wdev);
+
+#endif /* WFX_FWIO_H */
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
new file mode 100644
index 000000000000..c15831de4ff4
--- /dev/null
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * WFx hardware interface definitions
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_CMD_H
+#define WFX_HIF_API_CMD_H
+
+#include "hif_api_general.h"
+
+#define HIF_NUM_AC 4
+
+#define HIF_API_SSID_SIZE API_SSID_SIZE
+
+enum hif_requests_ids {
+ HIF_REQ_ID_RESET = 0x0a,
+ HIF_REQ_ID_READ_MIB = 0x05,
+ HIF_REQ_ID_WRITE_MIB = 0x06,
+ HIF_REQ_ID_START_SCAN = 0x07,
+ HIF_REQ_ID_STOP_SCAN = 0x08,
+ HIF_REQ_ID_TX = 0x04,
+ HIF_REQ_ID_JOIN = 0x0b,
+ HIF_REQ_ID_SET_PM_MODE = 0x10,
+ HIF_REQ_ID_SET_BSS_PARAMS = 0x11,
+ HIF_REQ_ID_ADD_KEY = 0x0c,
+ HIF_REQ_ID_REMOVE_KEY = 0x0d,
+ HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_REQ_ID_START = 0x17,
+ HIF_REQ_ID_BEACON_TRANSMIT = 0x18,
+ HIF_REQ_ID_UPDATE_IE = 0x1b,
+ HIF_REQ_ID_MAP_LINK = 0x1c,
+};
+
+enum hif_confirmations_ids {
+ HIF_CNF_ID_RESET = 0x0a,
+ HIF_CNF_ID_READ_MIB = 0x05,
+ HIF_CNF_ID_WRITE_MIB = 0x06,
+ HIF_CNF_ID_START_SCAN = 0x07,
+ HIF_CNF_ID_STOP_SCAN = 0x08,
+ HIF_CNF_ID_TX = 0x04,
+ HIF_CNF_ID_MULTI_TRANSMIT = 0x1e,
+ HIF_CNF_ID_JOIN = 0x0b,
+ HIF_CNF_ID_SET_PM_MODE = 0x10,
+ HIF_CNF_ID_SET_BSS_PARAMS = 0x11,
+ HIF_CNF_ID_ADD_KEY = 0x0c,
+ HIF_CNF_ID_REMOVE_KEY = 0x0d,
+ HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_CNF_ID_START = 0x17,
+ HIF_CNF_ID_BEACON_TRANSMIT = 0x18,
+ HIF_CNF_ID_UPDATE_IE = 0x1b,
+ HIF_CNF_ID_MAP_LINK = 0x1c,
+};
+
+enum hif_indications_ids {
+ HIF_IND_ID_RX = 0x84,
+ HIF_IND_ID_SCAN_CMPL = 0x86,
+ HIF_IND_ID_JOIN_COMPLETE = 0x8f,
+ HIF_IND_ID_SET_PM_MODE_CMPL = 0x89,
+ HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c,
+ HIF_IND_ID_EVENT = 0x85
+};
+
+union hif_commands_ids {
+ enum hif_requests_ids request;
+ enum hif_confirmations_ids confirmation;
+ enum hif_indications_ids indication;
+};
+
+enum hif_status {
+ HIF_STATUS_SUCCESS = 0x0,
+ HIF_STATUS_FAILURE = 0x1,
+ HIF_INVALID_PARAMETER = 0x2,
+ HIF_STATUS_WARNING = 0x3,
+ HIF_ERROR_UNSUPPORTED_MSG_ID = 0x4,
+ HIF_STATUS_DECRYPTFAILURE = 0x10,
+ HIF_STATUS_MICFAILURE = 0x11,
+ HIF_STATUS_NO_KEY_FOUND = 0x12,
+ HIF_STATUS_RETRY_EXCEEDED = 0x13,
+ HIF_STATUS_TX_LIFETIME_EXCEEDED = 0x14,
+ HIF_REQUEUE = 0x15,
+ HIF_STATUS_REFUSED = 0x16,
+ HIF_STATUS_BUSY = 0x17
+};
+
+struct hif_reset_flags {
+ u8 reset_stat:1;
+ u8 reset_all_int:1;
+ u8 reserved1:6;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_req_reset {
+ struct hif_reset_flags reset_flags;
+} __packed;
+
+struct hif_cnf_reset {
+ u32 status;
+} __packed;
+
+struct hif_req_read_mib {
+ u16 mib_id;
+ u16 reserved;
+} __packed;
+
+struct hif_cnf_read_mib {
+ u32 status;
+ u16 mib_id;
+ u16 length;
+ u8 mib_data[];
+} __packed;
+
+struct hif_req_write_mib {
+ u16 mib_id;
+ u16 length;
+ u8 mib_data[];
+} __packed;
+
+struct hif_cnf_write_mib {
+ u32 status;
+} __packed;
+
+struct hif_ie_flags {
+ u8 beacon:1;
+ u8 probe_resp:1;
+ u8 probe_req:1;
+ u8 reserved1:5;
+ u8 reserved2;
+} __packed;
+
+struct hif_ie_tlv {
+ u8 type;
+ u8 length;
+ u8 data[];
+} __packed;
+
+struct hif_req_update_ie {
+ struct hif_ie_flags ie_flags;
+ u16 num_i_es;
+ struct hif_ie_tlv ie[];
+} __packed;
+
+struct hif_cnf_update_ie {
+ u32 status;
+} __packed;
+
+struct hif_scan_type {
+ u8 type:1;
+ u8 mode:1;
+ u8 reserved:6;
+} __packed;
+
+struct hif_scan_flags {
+ u8 fbg:1;
+ u8 reserved1:1;
+ u8 pre:1;
+ u8 reserved2:5;
+} __packed;
+
+struct hif_auto_scan_param {
+ u16 interval;
+ u8 reserved;
+ s8 rssi_thr;
+} __packed;
+
+struct hif_ssid_def {
+ u32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+} __packed;
+
+#define HIF_API_MAX_NB_SSIDS 2
+#define HIF_API_MAX_NB_CHANNELS 14
+
+struct hif_req_start_scan {
+ u8 band;
+ struct hif_scan_type scan_type;
+ struct hif_scan_flags scan_flags;
+ u8 max_transmit_rate;
+ struct hif_auto_scan_param auto_scan_param;
+ u8 num_of_probe_requests;
+ u8 probe_delay;
+ u8 num_of_ssi_ds;
+ u8 num_of_channels;
+ u32 min_channel_time;
+ u32 max_channel_time;
+ s32 tx_power_level;
+ u8 ssid_and_channel_lists[];
+} __packed;
+
+struct hif_start_scan_req_cstnbssid_body {
+ u8 band;
+ struct hif_scan_type scan_type;
+ struct hif_scan_flags scan_flags;
+ u8 max_transmit_rate;
+ struct hif_auto_scan_param auto_scan_param;
+ u8 num_of_probe_requests;
+ u8 probe_delay;
+ u8 num_of_ssi_ds;
+ u8 num_of_channels;
+ u32 min_channel_time;
+ u32 max_channel_time;
+ s32 tx_power_level;
+ struct hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS];
+ u8 channel_list[];
+} __packed;
+
+struct hif_cnf_start_scan {
+ u32 status;
+} __packed;
+
+struct hif_cnf_stop_scan {
+ u32 status;
+} __packed;
+
+enum hif_pm_mode_status {
+ HIF_PM_MODE_ACTIVE = 0x0,
+ HIF_PM_MODE_PS = 0x1,
+ HIF_PM_MODE_UNDETERMINED = 0x2
+};
+
+struct hif_ind_scan_cmpl {
+ u32 status;
+ u8 pm_mode;
+ u8 num_channels_completed;
+ u16 reserved;
+} __packed;
+
+enum hif_queue_id {
+ HIF_QUEUE_ID_BACKGROUND = 0x0,
+ HIF_QUEUE_ID_BESTEFFORT = 0x1,
+ HIF_QUEUE_ID_VIDEO = 0x2,
+ HIF_QUEUE_ID_VOICE = 0x3
+};
+
+enum hif_frame_format {
+ HIF_FRAME_FORMAT_NON_HT = 0x0,
+ HIF_FRAME_FORMAT_MIXED_FORMAT_HT = 0x1,
+ HIF_FRAME_FORMAT_GF_HT_11N = 0x2
+};
+
+enum hif_stbc {
+ HIF_STBC_NOT_ALLOWED = 0x0,
+ HIF_STBC_ALLOWED = 0x1
+};
+
+struct hif_queue {
+ u8 queue_id:2;
+ u8 peer_sta_id:4;
+ u8 reserved:2;
+} __packed;
+
+struct hif_data_flags {
+ u8 more:1;
+ u8 fc_offset:3;
+ u8 reserved:4;
+} __packed;
+
+struct hif_tx_flags {
+ u8 start_exp:1;
+ u8 reserved:3;
+ u8 retry_policy_index:4;
+} __packed;
+
+struct hif_ht_tx_parameters {
+ u8 frame_format:4;
+ u8 fec_coding:1;
+ u8 short_gi:1;
+ u8 reserved1:1;
+ u8 stbc:1;
+ u8 reserved2;
+ u8 aggregation:1;
+ u8 reserved3:7;
+ u8 reserved4;
+} __packed;
+
+struct hif_req_tx {
+ u32 packet_id;
+ u8 max_tx_rate;
+ struct hif_queue queue_id;
+ struct hif_data_flags data_flags;
+ struct hif_tx_flags tx_flags;
+ u32 reserved;
+ u32 expire_time;
+ struct hif_ht_tx_parameters ht_tx_parameters;
+ u8 frame[];
+} __packed;
+
+enum hif_qos_ackplcy {
+ HIF_QOS_ACKPLCY_NORMAL = 0x0,
+ HIF_QOS_ACKPLCY_TXNOACK = 0x1,
+ HIF_QOS_ACKPLCY_NOEXPACK = 0x2,
+ HIF_QOS_ACKPLCY_BLCKACK = 0x3
+};
+
+struct hif_tx_result_flags {
+ u8 aggr:1;
+ u8 requeue:1;
+ u8 ack_policy:2;
+ u8 txop_limit:1;
+ u8 reserved1:3;
+ u8 reserved2;
+} __packed;
+
+struct hif_cnf_tx {
+ u32 status;
+ u32 packet_id;
+ u8 txed_rate;
+ u8 ack_failures;
+ struct hif_tx_result_flags tx_result_flags;
+ u32 media_delay;
+ u32 tx_queue_delay;
+} __packed;
+
+struct hif_cnf_multi_transmit {
+ u32 num_tx_confs;
+ struct hif_cnf_tx tx_conf_payload[];
+} __packed;
+
+enum hif_ri_flags_encrypt {
+ HIF_RI_FLAGS_UNENCRYPTED = 0x0,
+ HIF_RI_FLAGS_WEP_ENCRYPTED = 0x1,
+ HIF_RI_FLAGS_TKIP_ENCRYPTED = 0x2,
+ HIF_RI_FLAGS_AES_ENCRYPTED = 0x3,
+ HIF_RI_FLAGS_WAPI_ENCRYPTED = 0x4
+};
+
+struct hif_rx_flags {
+ u8 encryp:3;
+ u8 in_aggr:1;
+ u8 first_aggr:1;
+ u8 last_aggr:1;
+ u8 defrag:1;
+ u8 beacon:1;
+ u8 tim:1;
+ u8 bitmap:1;
+ u8 match_ssid:1;
+ u8 match_bssid:1;
+ u8 more:1;
+ u8 reserved1:1;
+ u8 ht:1;
+ u8 stbc:1;
+ u8 match_uc_addr:1;
+ u8 match_mc_addr:1;
+ u8 match_bc_addr:1;
+ u8 key_type:1;
+ u8 key_index:4;
+ u8 reserved2:1;
+ u8 peer_sta_id:4;
+ u8 reserved3:2;
+ u8 reserved4:1;
+} __packed;
+
+struct hif_ind_rx {
+ u32 status;
+ u16 channel_number;
+ u8 rxed_rate;
+ u8 rcpi_rssi;
+ struct hif_rx_flags rx_flags;
+ u8 frame[];
+} __packed;
+
+
+struct hif_req_edca_queue_params {
+ u8 queue_id;
+ u8 reserved1;
+ u8 aifsn;
+ u8 reserved2;
+ u16 cw_min;
+ u16 cw_max;
+ u16 tx_op_limit;
+ u16 allowed_medium_time;
+ u32 reserved3;
+} __packed;
+
+struct hif_cnf_edca_queue_params {
+ u32 status;
+} __packed;
+
+enum hif_ap_mode {
+ HIF_MODE_IBSS = 0x0,
+ HIF_MODE_BSS = 0x1
+};
+
+enum hif_preamble {
+ HIF_PREAMBLE_LONG = 0x0,
+ HIF_PREAMBLE_SHORT = 0x1,
+ HIF_PREAMBLE_SHORT_LONG12 = 0x2
+};
+
+struct hif_join_flags {
+ u8 reserved1:2;
+ u8 force_no_beacon:1;
+ u8 force_with_ind:1;
+ u8 reserved2:4;
+} __packed;
+
+struct hif_req_join {
+ u8 mode;
+ u8 band;
+ u16 channel_number;
+ u8 bssid[ETH_ALEN];
+ u16 atim_window;
+ u8 preamble_type;
+ u8 probe_for_join;
+ u8 reserved;
+ struct hif_join_flags join_flags;
+ u32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ u32 beacon_interval;
+ u32 basic_rate_set;
+} __packed;
+
+struct hif_cnf_join {
+ u32 status;
+} __packed;
+
+struct hif_ind_join_complete {
+ u32 status;
+} __packed;
+
+struct hif_bss_flags {
+ u8 lost_count_only:1;
+ u8 reserved:7;
+} __packed;
+
+struct hif_req_set_bss_params {
+ struct hif_bss_flags bss_flags;
+ u8 beacon_lost_count;
+ u16 aid;
+ u32 operational_rate_set;
+} __packed;
+
+struct hif_cnf_set_bss_params {
+ u32 status;
+} __packed;
+
+struct hif_pm_mode {
+ u8 enter_psm:1;
+ u8 reserved:6;
+ u8 fast_psm:1;
+} __packed;
+
+struct hif_req_set_pm_mode {
+ struct hif_pm_mode pm_mode;
+ u8 fast_psm_idle_period;
+ u8 ap_psm_change_period;
+ u8 min_auto_ps_poll_period;
+} __packed;
+
+struct hif_cnf_set_pm_mode {
+ u32 status;
+} __packed;
+
+struct hif_ind_set_pm_mode_cmpl {
+ u32 status;
+ u8 pm_mode;
+ u8 reserved[3];
+} __packed;
+
+
+struct hif_req_start {
+ u8 mode;
+ u8 band;
+ u16 channel_number;
+ u32 reserved1;
+ u32 beacon_interval;
+ u8 dtim_period;
+ u8 preamble_type;
+ u8 reserved2;
+ u8 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ u32 basic_rate_set;
+} __packed;
+
+struct hif_cnf_start {
+ u32 status;
+} __packed;
+
+enum hif_beacon {
+ HIF_BEACON_STOP = 0x0,
+ HIF_BEACON_START = 0x1
+};
+
+struct hif_req_beacon_transmit {
+ u8 enable_beaconing;
+ u8 reserved[3];
+} __packed;
+
+struct hif_cnf_beacon_transmit {
+ u32 status;
+} __packed;
+
+enum hif_sta_map_direction {
+ HIF_STA_MAP = 0x0,
+ HIF_STA_UNMAP = 0x1
+};
+
+struct hif_map_link_flags {
+ u8 map_direction:1;
+ u8 mfpc:1;
+ u8 reserved:6;
+} __packed;
+
+struct hif_req_map_link {
+ u8 mac_addr[ETH_ALEN];
+ struct hif_map_link_flags map_link_flags;
+ u8 peer_sta_id;
+} __packed;
+
+struct hif_cnf_map_link {
+ u32 status;
+} __packed;
+
+struct hif_suspend_resume_flags {
+ u8 resume:1;
+ u8 reserved1:2;
+ u8 bc_mc_only:1;
+ u8 reserved2:4;
+ u8 reserved3;
+} __packed;
+
+struct hif_ind_suspend_resume_tx {
+ struct hif_suspend_resume_flags suspend_resume_flags;
+ u16 peer_sta_set;
+} __packed;
+
+
+#define MAX_KEY_ENTRIES 24
+#define HIF_API_WEP_KEY_DATA_SIZE 16
+#define HIF_API_TKIP_KEY_DATA_SIZE 16
+#define HIF_API_RX_MIC_KEY_SIZE 8
+#define HIF_API_TX_MIC_KEY_SIZE 8
+#define HIF_API_AES_KEY_DATA_SIZE 16
+#define HIF_API_WAPI_KEY_DATA_SIZE 16
+#define HIF_API_MIC_KEY_DATA_SIZE 16
+#define HIF_API_IGTK_KEY_DATA_SIZE 16
+#define HIF_API_RX_SEQUENCE_COUNTER_SIZE 8
+#define HIF_API_IPN_SIZE 8
+
+enum hif_key_type {
+ HIF_KEY_TYPE_WEP_DEFAULT = 0x0,
+ HIF_KEY_TYPE_WEP_PAIRWISE = 0x1,
+ HIF_KEY_TYPE_TKIP_GROUP = 0x2,
+ HIF_KEY_TYPE_TKIP_PAIRWISE = 0x3,
+ HIF_KEY_TYPE_AES_GROUP = 0x4,
+ HIF_KEY_TYPE_AES_PAIRWISE = 0x5,
+ HIF_KEY_TYPE_WAPI_GROUP = 0x6,
+ HIF_KEY_TYPE_WAPI_PAIRWISE = 0x7,
+ HIF_KEY_TYPE_IGTK_GROUP = 0x8,
+ HIF_KEY_TYPE_NONE = 0x9
+};
+
+struct hif_wep_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved;
+ u8 key_length;
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_wep_group_key {
+ u8 key_id;
+ u8 key_length;
+ u8 reserved[2];
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_tkip_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE];
+} __packed;
+
+struct hif_tkip_group_key {
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+} __packed;
+
+struct hif_aes_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_aes_group_key {
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+} __packed;
+
+struct hif_wapi_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 key_id;
+ u8 reserved;
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_wapi_group_key {
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+} __packed;
+
+struct hif_igtk_group_key {
+ u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 ipn[HIF_API_IPN_SIZE];
+} __packed;
+
+union hif_privacy_key_data {
+ struct hif_wep_pairwise_key wep_pairwise_key;
+ struct hif_wep_group_key wep_group_key;
+ struct hif_tkip_pairwise_key tkip_pairwise_key;
+ struct hif_tkip_group_key tkip_group_key;
+ struct hif_aes_pairwise_key aes_pairwise_key;
+ struct hif_aes_group_key aes_group_key;
+ struct hif_wapi_pairwise_key wapi_pairwise_key;
+ struct hif_wapi_group_key wapi_group_key;
+ struct hif_igtk_group_key igtk_group_key;
+};
+
+struct hif_req_add_key {
+ u8 type;
+ u8 entry_index;
+ u8 int_id:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ union hif_privacy_key_data key;
+} __packed;
+
+struct hif_cnf_add_key {
+ u32 status;
+} __packed;
+
+struct hif_req_remove_key {
+ u8 entry_index;
+ u8 reserved[3];
+} __packed;
+
+struct hif_cnf_remove_key {
+ u32 status;
+} __packed;
+
+enum hif_event_ind {
+ HIF_EVENT_IND_BSSLOST = 0x1,
+ HIF_EVENT_IND_BSSREGAINED = 0x2,
+ HIF_EVENT_IND_RCPI_RSSI = 0x3,
+ HIF_EVENT_IND_PS_MODE_ERROR = 0x4,
+ HIF_EVENT_IND_INACTIVITY = 0x5
+};
+
+enum hif_ps_mode_error {
+ HIF_PS_ERROR_NO_ERROR = 0,
+ HIF_PS_ERROR_AP_NOT_RESP_TO_POLL = 1,
+ HIF_PS_ERROR_AP_NOT_RESP_TO_UAPSD_TRIGGER = 2,
+ HIF_PS_ERROR_AP_SENT_UNICAST_IN_DOZE = 3,
+ HIF_PS_ERROR_AP_NO_DATA_AFTER_TIM = 4
+};
+
+union hif_event_data {
+ u8 rcpi_rssi;
+ u32 ps_mode_error;
+ u32 peer_sta_set;
+};
+
+struct hif_ind_event {
+ u32 event_id;
+ union hif_event_data event_data;
+} __packed;
+
+
+#endif
diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h
new file mode 100644
index 000000000000..a069c3a21b4d
--- /dev/null
+++ b/drivers/staging/wfx/hif_api_general.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * WFx hardware interface definitions
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_GENERAL_H
+#define WFX_HIF_API_GENERAL_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#else
+#include <net/ethernet.h>
+#include <stdint.h>
+#define __packed __attribute__((__packed__))
+#endif
+
+#define API_SSID_SIZE 32
+
+#define HIF_ID_IS_INDICATION 0x80
+#define HIF_COUNTER_MAX 7
+
+struct hif_msg {
+ u16 len;
+ u8 id;
+ u8 reserved:1;
+ u8 interface:2;
+ u8 seqnum:3;
+ u8 encrypted:2;
+ u8 body[];
+} __packed;
+
+enum hif_general_requests_ids {
+ HIF_REQ_ID_CONFIGURATION = 0x09,
+ HIF_REQ_ID_CONTROL_GPIO = 0x26,
+ HIF_REQ_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_REQ_ID_SL_CONFIGURE = 0x29,
+ HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_REQ_ID_PTA_SETTINGS = 0x2b,
+ HIF_REQ_ID_PTA_PRIORITY = 0x2c,
+ HIF_REQ_ID_PTA_STATE = 0x2d,
+ HIF_REQ_ID_SHUT_DOWN = 0x32,
+};
+
+enum hif_general_confirmations_ids {
+ HIF_CNF_ID_CONFIGURATION = 0x09,
+ HIF_CNF_ID_CONTROL_GPIO = 0x26,
+ HIF_CNF_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_CNF_ID_SL_CONFIGURE = 0x29,
+ HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_CNF_ID_PTA_SETTINGS = 0x2b,
+ HIF_CNF_ID_PTA_PRIORITY = 0x2c,
+ HIF_CNF_ID_PTA_STATE = 0x2d,
+ HIF_CNF_ID_SHUT_DOWN = 0x32,
+};
+
+enum hif_general_indications_ids {
+ HIF_IND_ID_EXCEPTION = 0xe0,
+ HIF_IND_ID_STARTUP = 0xe1,
+ HIF_IND_ID_WAKEUP = 0xe2,
+ HIF_IND_ID_GENERIC = 0xe3,
+ HIF_IND_ID_ERROR = 0xe4,
+ HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5
+};
+
+enum hif_hi_status {
+ HI_STATUS_SUCCESS = 0x0000,
+ HI_STATUS_FAILURE = 0x0001,
+ HI_INVALID_PARAMETER = 0x0002,
+ HI_STATUS_GPIO_WARNING = 0x0003,
+ HI_ERROR_UNSUPPORTED_MSG_ID = 0x0004,
+ SL_MAC_KEY_STATUS_SUCCESS = 0x005A,
+ SL_MAC_KEY_STATUS_FAILED_KEY_ALREADY_BURNED = 0x006B,
+ SL_MAC_KEY_STATUS_FAILED_RAM_MODE_NOT_ALLOWED = 0x007C,
+ SL_MAC_KEY_STATUS_FAILED_UNKNOWN_MODE = 0x008D,
+ SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS = 0x009E,
+ SL_PUB_KEY_EXCHANGE_STATUS_FAILED = 0x00AF,
+ PREVENT_ROLLBACK_CNF_SUCCESS = 0x1234,
+ PREVENT_ROLLBACK_CNF_WRONG_MAGIC_WORD = 0x1256
+};
+
+enum hif_api_rate_index {
+ API_RATE_INDEX_B_1MBPS = 0,
+ API_RATE_INDEX_B_2MBPS = 1,
+ API_RATE_INDEX_B_5P5MBPS = 2,
+ API_RATE_INDEX_B_11MBPS = 3,
+ API_RATE_INDEX_PBCC_22MBPS = 4,
+ API_RATE_INDEX_PBCC_33MBPS = 5,
+ API_RATE_INDEX_G_6MBPS = 6,
+ API_RATE_INDEX_G_9MBPS = 7,
+ API_RATE_INDEX_G_12MBPS = 8,
+ API_RATE_INDEX_G_18MBPS = 9,
+ API_RATE_INDEX_G_24MBPS = 10,
+ API_RATE_INDEX_G_36MBPS = 11,
+ API_RATE_INDEX_G_48MBPS = 12,
+ API_RATE_INDEX_G_54MBPS = 13,
+ API_RATE_INDEX_N_6P5MBPS = 14,
+ API_RATE_INDEX_N_13MBPS = 15,
+ API_RATE_INDEX_N_19P5MBPS = 16,
+ API_RATE_INDEX_N_26MBPS = 17,
+ API_RATE_INDEX_N_39MBPS = 18,
+ API_RATE_INDEX_N_52MBPS = 19,
+ API_RATE_INDEX_N_58P5MBPS = 20,
+ API_RATE_INDEX_N_65MBPS = 21,
+ API_RATE_NUM_ENTRIES = 22
+};
+
+
+enum hif_fw_type {
+ HIF_FW_TYPE_ETF = 0x0,
+ HIF_FW_TYPE_WFM = 0x1,
+ HIF_FW_TYPE_WSM = 0x2
+};
+
+struct hif_capabilities {
+ u8 link_mode:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+} __packed;
+
+struct hif_otp_regul_sel_mode_info {
+ u8 region_sel_mode:4;
+ u8 reserved:4;
+} __packed;
+
+struct hif_otp_phy_info {
+ u8 phy1_region:3;
+ u8 phy0_region:3;
+ u8 otp_phy_ver:2;
+} __packed;
+
+#define API_OPN_SIZE 14
+#define API_UID_SIZE 8
+#define API_DISABLED_CHANNEL_LIST_SIZE 2
+#define API_FIRMWARE_LABEL_SIZE 128
+
+struct hif_ind_startup {
+ u32 status;
+ u16 hardware_id;
+ u8 opn[API_OPN_SIZE];
+ u8 uid[API_UID_SIZE];
+ u16 num_inp_ch_bufs;
+ u16 size_inp_ch_buf;
+ u8 num_links_ap;
+ u8 num_interfaces;
+ u8 mac_addr[2][ETH_ALEN];
+ u8 api_version_minor;
+ u8 api_version_major;
+ struct hif_capabilities capabilities;
+ u8 firmware_build;
+ u8 firmware_minor;
+ u8 firmware_major;
+ u8 firmware_type;
+ u8 disabled_channel_list[API_DISABLED_CHANNEL_LIST_SIZE];
+ struct hif_otp_regul_sel_mode_info regul_sel_mode_info;
+ struct hif_otp_phy_info otp_phy_info;
+ u32 supported_rate_mask;
+ u8 firmware_label[API_FIRMWARE_LABEL_SIZE];
+} __packed;
+
+struct hif_ind_wakeup {
+} __packed;
+
+struct hif_req_configuration {
+ u16 length;
+ u8 pds_data[];
+} __packed;
+
+struct hif_cnf_configuration {
+ u32 status;
+} __packed;
+
+enum hif_gpio_mode {
+ HIF_GPIO_MODE_D0 = 0x0,
+ HIF_GPIO_MODE_D1 = 0x1,
+ HIF_GPIO_MODE_OD0 = 0x2,
+ HIF_GPIO_MODE_OD1 = 0x3,
+ HIF_GPIO_MODE_TRISTATE = 0x4,
+ HIF_GPIO_MODE_TOGGLE = 0x5,
+ HIF_GPIO_MODE_READ = 0x6
+};
+
+struct hif_req_control_gpio {
+ u8 gpio_label;
+ u8 gpio_mode;
+} __packed;
+
+enum hif_gpio_error {
+ HIF_GPIO_ERROR_0 = 0x0,
+ HIF_GPIO_ERROR_1 = 0x1,
+ HIF_GPIO_ERROR_2 = 0x2
+};
+
+struct hif_cnf_control_gpio {
+ u32 status;
+ u32 value;
+} __packed;
+
+enum hif_generic_indication_type {
+ HIF_GENERIC_INDICATION_TYPE_RAW = 0x0,
+ HIF_GENERIC_INDICATION_TYPE_STRING = 0x1,
+ HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2
+};
+
+struct hif_rx_stats {
+ u32 nb_rx_frame;
+ u32 nb_crc_frame;
+ u32 per_total;
+ u32 throughput;
+ u32 nb_rx_by_rate[API_RATE_NUM_ENTRIES];
+ u16 per[API_RATE_NUM_ENTRIES];
+ s16 snr[API_RATE_NUM_ENTRIES];
+ s16 rssi[API_RATE_NUM_ENTRIES];
+ s16 cfo[API_RATE_NUM_ENTRIES];
+ u32 date;
+ u32 pwr_clk_freq;
+ u8 is_ext_pwr_clk;
+ s8 current_temp;
+} __packed;
+
+union hif_indication_data {
+ struct hif_rx_stats rx_stats;
+ u8 raw_data[1];
+};
+
+struct hif_ind_generic {
+ u32 indication_type;
+ union hif_indication_data indication_data;
+} __packed;
+
+
+#define HIF_EXCEPTION_DATA_SIZE 124
+
+struct hif_ind_exception {
+ u8 data[HIF_EXCEPTION_DATA_SIZE];
+} __packed;
+
+
+enum hif_error {
+ HIF_ERROR_FIRMWARE_ROLLBACK = 0x0,
+ HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x1,
+ HIF_ERROR_OUTDATED_SESSION_KEY = 0x2,
+ HIF_ERROR_INVALID_SESSION_KEY = 0x3,
+ HIF_ERROR_OOR_VOLTAGE = 0x4,
+ HIF_ERROR_PDS_VERSION = 0x5,
+ HIF_ERROR_OOR_TEMPERATURE = 0x6,
+ HIF_ERROR_REQ_DURING_KEY_EXCHANGE = 0x7,
+ HIF_ERROR_MULTI_TX_CNF_SECURELINK = 0x8,
+ HIF_ERROR_SECURELINK_OVERFLOW = 0x9,
+ HIF_ERROR_SECURELINK_DECRYPTION = 0xa
+};
+
+struct hif_ind_error {
+ u32 type;
+ u8 data[];
+} __packed;
+
+enum hif_secure_link_state {
+ SEC_LINK_UNAVAILABLE = 0x0,
+ SEC_LINK_RESERVED = 0x1,
+ SEC_LINK_EVAL = 0x2,
+ SEC_LINK_ENFORCED = 0x3
+};
+
+enum hif_sl_encryption_type {
+ NO_ENCRYPTION = 0,
+ TX_ENCRYPTION = 1,
+ RX_ENCRYPTION = 2,
+ HP_ENCRYPTION = 3
+};
+
+struct hif_sl_msg_hdr {
+ u32 seqnum:30;
+ u32 encrypted:2;
+} __packed;
+
+struct hif_sl_msg {
+ struct hif_sl_msg_hdr hdr;
+ u16 len;
+ u8 payload[];
+} __packed;
+
+#define AES_CCM_TAG_SIZE 16
+
+struct hif_sl_tag {
+ u8 tag[16];
+} __packed;
+
+enum hif_sl_mac_key_dest {
+ SL_MAC_KEY_DEST_OTP = 0x78,
+ SL_MAC_KEY_DEST_RAM = 0x87
+};
+
+#define API_KEY_VALUE_SIZE 32
+
+struct hif_req_set_sl_mac_key {
+ u8 otp_or_ram;
+ u8 key_value[API_KEY_VALUE_SIZE];
+} __packed;
+
+struct hif_cnf_set_sl_mac_key {
+ u32 status;
+} __packed;
+
+#define API_HOST_PUB_KEY_SIZE 32
+#define API_HOST_PUB_KEY_MAC_SIZE 64
+
+enum hif_sl_session_key_alg {
+ HIF_SL_CURVE25519 = 0x01,
+ HIF_SL_KDF = 0x02
+};
+
+struct hif_req_sl_exchange_pub_keys {
+ u8 algorithm:2;
+ u8 reserved1:6;
+ u8 reserved2[3];
+ u8 host_pub_key[API_HOST_PUB_KEY_SIZE];
+ u8 host_pub_key_mac[API_HOST_PUB_KEY_MAC_SIZE];
+} __packed;
+
+struct hif_cnf_sl_exchange_pub_keys {
+ u32 status;
+} __packed;
+
+#define API_NCP_PUB_KEY_SIZE 32
+#define API_NCP_PUB_KEY_MAC_SIZE 64
+
+struct hif_ind_sl_exchange_pub_keys {
+ u32 status;
+ u8 ncp_pub_key[API_NCP_PUB_KEY_SIZE];
+ u8 ncp_pub_key_mac[API_NCP_PUB_KEY_MAC_SIZE];
+} __packed;
+
+#define API_ENCR_BMP_SIZE 32
+
+struct hif_req_sl_configure {
+ u8 encr_bmp[API_ENCR_BMP_SIZE];
+ u8 disable_session_key_protection:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_cnf_sl_configure {
+ u32 status;
+} __packed;
+
+struct hif_req_prevent_rollback {
+ u32 magic_word;
+} __packed;
+
+struct hif_cnf_prevent_rollback {
+ u32 status;
+} __packed;
+
+enum hif_pta_mode {
+ PTA_1W_WLAN_MASTER = 0,
+ PTA_1W_COEX_MASTER = 1,
+ PTA_2W = 2,
+ PTA_3W = 3,
+ PTA_4W = 4
+};
+
+enum hif_signal_level {
+ SIGNAL_LOW = 0,
+ SIGNAL_HIGH = 1
+};
+
+enum hif_coex_type {
+ COEX_TYPE_GENERIC = 0,
+ COEX_TYPE_BLE = 1
+};
+
+enum hif_grant_state {
+ NO_GRANT = 0,
+ GRANT = 1
+};
+
+struct hif_req_pta_settings {
+ u8 pta_mode;
+ u8 request_signal_active_level;
+ u8 priority_signal_active_level;
+ u8 freq_signal_active_level;
+ u8 grant_signal_active_level;
+ u8 coex_type;
+ u8 default_grant_state;
+ u8 simultaneous_rx_accesses;
+ u8 priority_sampling_time;
+ u8 tx_rx_sampling_time;
+ u8 freq_sampling_time;
+ u8 grant_valid_time;
+ u8 fem_control_time;
+ u8 first_slot_time;
+ u16 periodic_tx_rx_sampling_time;
+ u16 coex_quota;
+ u16 wlan_quota;
+} __packed;
+
+struct hif_cnf_pta_settings {
+ u32 status;
+} __packed;
+
+enum hif_pta_priority {
+ HIF_PTA_PRIORITY_COEX_MAXIMIZED = 0x00000562,
+ HIF_PTA_PRIORITY_COEX_HIGH = 0x00000462,
+ HIF_PTA_PRIORITY_BALANCED = 0x00001461,
+ HIF_PTA_PRIORITY_WLAN_HIGH = 0x00001851,
+ HIF_PTA_PRIORITY_WLAN_MAXIMIZED = 0x00001A51
+};
+
+struct hif_req_pta_priority {
+ u32 priority;
+} __packed;
+
+struct hif_cnf_pta_priority {
+ u32 status;
+} __packed;
+
+enum hif_pta_state {
+ PTA_OFF = 0,
+ PTA_ON = 1
+};
+
+struct hif_req_pta_state {
+ u32 pta_state;
+} __packed;
+
+struct hif_cnf_pta_state {
+ u32 status;
+} __packed;
+
+#endif
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
new file mode 100644
index 000000000000..94b789ceb4ff
--- /dev/null
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * WFx hardware interface definitions
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_MIB_H
+#define WFX_HIF_API_MIB_H
+
+#include "hif_api_general.h"
+
+#define HIF_API_IPV4_ADDRESS_SIZE 4
+#define HIF_API_IPV6_ADDRESS_SIZE 16
+
+enum hif_mib_ids {
+ HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000,
+ HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001,
+ HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002,
+ HIF_MIB_ID_CCA_CONFIG = 0x2003,
+ HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010,
+ HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011,
+ HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013,
+ HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014,
+ HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016,
+ HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017,
+ HIF_MIB_ID_SET_DATA_FILTERING = 0x2018,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019,
+ HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A,
+ HIF_MIB_ID_RX_FILTER = 0x201B,
+ HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D,
+ HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030,
+ HIF_MIB_ID_TSF_COUNTER = 0x2031,
+ HIF_MIB_ID_STATISTICS_TABLE = 0x2032,
+ HIF_MIB_ID_COUNTERS_TABLE = 0x2033,
+ HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035,
+ HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040,
+ HIF_MIB_ID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME = 0x2041,
+ HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044,
+ HIF_MIB_ID_SLOT_TIME = 0x2045,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046,
+ HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047,
+ HIF_MIB_ID_TEMPLATE_FRAME = 0x2048,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A,
+ HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B,
+ HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050,
+ HIF_MIB_ID_SET_HT_PROTECTION = 0x2051,
+ HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052,
+ HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053,
+ HIF_MIB_ID_INACTIVITY_TIMER = 0x2054,
+ HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055,
+ HIF_MIB_ID_BEACON_STATS = 0x2056,
+};
+
+#define HIF_OP_POWER_MODE_MASK 0xf
+
+enum hif_op_power_mode {
+ HIF_OP_POWER_MODE_ACTIVE = 0x0,
+ HIF_OP_POWER_MODE_DOZE = 0x1,
+ HIF_OP_POWER_MODE_QUIESCENT = 0x2
+};
+
+struct hif_mib_gl_operational_power_mode {
+ u8 power_mode:4;
+ u8 reserved1:3;
+ u8 wup_ind_activation:1;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_mib_gl_block_ack_info {
+ u8 rx_buffer_size;
+ u8 rx_max_num_agreements;
+ u8 tx_buffer_size;
+ u8 tx_max_num_agreements;
+} __packed;
+
+struct hif_mib_gl_set_multi_msg {
+ u8 enable_multi_tx_conf:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+enum hif_cca_thr_mode {
+ HIF_CCA_THR_MODE_RELATIVE = 0x0,
+ HIF_CCA_THR_MODE_ABSOLUTE = 0x1
+};
+
+struct hif_mib_gl_cca_config {
+ u8 cca_thr_mode;
+ u8 reserved[3];
+} __packed;
+
+#define MAX_NUMBER_DATA_FILTERS 0xA
+
+#define MAX_NUMBER_IPV4_ADDR_CONDITIONS 0x4
+#define MAX_NUMBER_IPV6_ADDR_CONDITIONS 0x4
+#define MAX_NUMBER_MAC_ADDR_CONDITIONS 0x4
+#define MAX_NUMBER_UC_MC_BC_CONDITIONS 0x4
+#define MAX_NUMBER_ETHER_TYPE_CONDITIONS 0x4
+#define MAX_NUMBER_PORT_CONDITIONS 0x4
+#define MAX_NUMBER_MAGIC_CONDITIONS 0x4
+#define MAX_NUMBER_ARP_CONDITIONS 0x2
+#define MAX_NUMBER_NS_CONDITIONS 0x2
+
+struct hif_mib_ethertype_data_frame_condition {
+ u8 condition_idx;
+ u8 reserved;
+ u16 ether_type;
+} __packed;
+
+enum hif_udp_tcp_protocol {
+ HIF_PROTOCOL_UDP = 0x0,
+ HIF_PROTOCOL_TCP = 0x1,
+ HIF_PROTOCOL_BOTH_UDP_TCP = 0x2
+};
+
+enum hif_which_port {
+ HIF_PORT_DST = 0x0,
+ HIF_PORT_SRC = 0x1,
+ HIF_PORT_SRC_OR_DST = 0x2
+};
+
+struct hif_mib_ports_data_frame_condition {
+ u8 condition_idx;
+ u8 protocol;
+ u8 which_port;
+ u8 reserved1;
+ u16 port_number;
+ u8 reserved2[2];
+} __packed;
+
+#define HIF_API_MAGIC_PATTERN_SIZE 32
+
+struct hif_mib_magic_data_frame_condition {
+ u8 condition_idx;
+ u8 offset;
+ u8 magic_pattern_length;
+ u8 reserved;
+ u8 magic_pattern[HIF_API_MAGIC_PATTERN_SIZE];
+} __packed;
+
+enum hif_mac_addr_type {
+ HIF_MAC_ADDR_A1 = 0x0,
+ HIF_MAC_ADDR_A2 = 0x1,
+ HIF_MAC_ADDR_A3 = 0x2
+};
+
+struct hif_mib_mac_addr_data_frame_condition {
+ u8 condition_idx;
+ u8 address_type;
+ u8 mac_address[ETH_ALEN];
+} __packed;
+
+enum hif_ip_addr_mode {
+ HIF_IP_ADDR_SRC = 0x0,
+ HIF_IP_ADDR_DST = 0x1
+};
+
+struct hif_mib_ipv4_addr_data_frame_condition {
+ u8 condition_idx;
+ u8 address_mode;
+ u8 reserved[2];
+ u8 i_pv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_ipv6_addr_data_frame_condition {
+ u8 condition_idx;
+ u8 address_mode;
+ u8 reserved[2];
+ u8 i_pv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+} __packed;
+
+union hif_addr_type {
+ u8 value;
+ struct {
+ u8 type_unicast:1;
+ u8 type_multicast:1;
+ u8 type_broadcast:1;
+ u8 reserved:5;
+ } bits;
+};
+
+struct hif_mib_uc_mc_bc_data_frame_condition {
+ u8 condition_idx;
+ union hif_addr_type param;
+ u8 reserved[2];
+} __packed;
+
+struct hif_mib_config_data_filter {
+ u8 filter_idx;
+ u8 enable;
+ u8 reserved1[2];
+ u8 eth_type_cond;
+ u8 port_cond;
+ u8 magic_cond;
+ u8 mac_cond;
+ u8 ipv4_cond;
+ u8 ipv6_cond;
+ u8 uc_mc_bc_cond;
+ u8 reserved2;
+} __packed;
+
+struct hif_mib_set_data_filtering {
+ u8 default_filter;
+ u8 enable;
+ u8 reserved[2];
+} __packed;
+
+enum hif_arp_ns_frame_treatment {
+ HIF_ARP_NS_FILTERING_DISABLE = 0x0,
+ HIF_ARP_NS_FILTERING_ENABLE = 0x1,
+ HIF_ARP_NS_REPLY_ENABLE = 0x2
+};
+
+struct hif_mib_arp_ip_addr_table {
+ u8 condition_idx;
+ u8 arp_enable;
+ u8 reserved[2];
+ u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_ns_ip_addr_table {
+ u8 condition_idx;
+ u8 ns_enable;
+ u8 reserved[2];
+ u8 ipv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_rx_filter {
+ u8 reserved1:1;
+ u8 bssid_filter:1;
+ u8 reserved2:1;
+ u8 fwd_probe_req:1;
+ u8 keep_alive_filter:1;
+ u8 reserved3:3;
+ u8 reserved4[3];
+} __packed;
+
+#define HIF_API_OUI_SIZE 3
+#define HIF_API_MATCH_DATA_SIZE 3
+
+struct hif_ie_table_entry {
+ u8 ie_id;
+ u8 has_changed:1;
+ u8 no_longer:1;
+ u8 has_appeared:1;
+ u8 reserved:1;
+ u8 num_match_data:4;
+ u8 oui[HIF_API_OUI_SIZE];
+ u8 match_data[HIF_API_MATCH_DATA_SIZE];
+} __packed;
+
+struct hif_mib_bcn_filter_table {
+ u32 num_of_info_elmts;
+ struct hif_ie_table_entry ie_table[];
+} __packed;
+
+enum hif_beacon_filter {
+ HIF_BEACON_FILTER_DISABLE = 0x0,
+ HIF_BEACON_FILTER_ENABLE = 0x1,
+ HIF_BEACON_FILTER_AUTO_ERP = 0x2
+};
+
+struct hif_mib_bcn_filter_enable {
+ u32 enable;
+ u32 bcn_count;
+} __packed;
+
+struct hif_mib_group_seq_counter {
+ u32 bits4716;
+ u16 bits1500;
+ u16 reserved;
+} __packed;
+
+struct hif_mib_tsf_counter {
+ u32 tsf_counterlo;
+ u32 tsf_counterhi;
+} __packed;
+
+struct hif_mib_stats_table {
+ s16 latest_snr;
+ u8 latest_rcpi;
+ s8 latest_rssi;
+} __packed;
+
+struct hif_mib_extended_count_table {
+ u32 count_plcp_errors;
+ u32 count_fcs_errors;
+ u32 count_tx_packets;
+ u32 count_rx_packets;
+ u32 count_rx_packet_errors;
+ u32 count_rx_decryption_failures;
+ u32 count_rx_mic_failures;
+ u32 count_rx_no_key_failures;
+ u32 count_tx_multicast_frames;
+ u32 count_tx_frames_success;
+ u32 count_tx_frame_failures;
+ u32 count_tx_frames_retried;
+ u32 count_tx_frames_multi_retried;
+ u32 count_rx_frame_duplicates;
+ u32 count_rts_success;
+ u32 count_rts_failures;
+ u32 count_ack_failures;
+ u32 count_rx_multicast_frames;
+ u32 count_rx_frames_success;
+ u32 count_rx_cmacicv_errors;
+ u32 count_rx_cmac_replays;
+ u32 count_rx_mgmt_ccmp_replays;
+ u32 count_rx_bipmic_errors;
+ u32 count_rx_beacon;
+ u32 count_miss_beacon;
+ u32 reserved[15];
+} __packed;
+
+struct hif_mib_count_table {
+ u32 count_plcp_errors;
+ u32 count_fcs_errors;
+ u32 count_tx_packets;
+ u32 count_rx_packets;
+ u32 count_rx_packet_errors;
+ u32 count_rx_decryption_failures;
+ u32 count_rx_mic_failures;
+ u32 count_rx_no_key_failures;
+ u32 count_tx_multicast_frames;
+ u32 count_tx_frames_success;
+ u32 count_tx_frame_failures;
+ u32 count_tx_frames_retried;
+ u32 count_tx_frames_multi_retried;
+ u32 count_rx_frame_duplicates;
+ u32 count_rts_success;
+ u32 count_rts_failures;
+ u32 count_ack_failures;
+ u32 count_rx_multicast_frames;
+ u32 count_rx_frames_success;
+ u32 count_rx_cmacicv_errors;
+ u32 count_rx_cmac_replays;
+ u32 count_rx_mgmt_ccmp_replays;
+ u32 count_rx_bipmic_errors;
+} __packed;
+
+struct hif_mib_max_tx_power_level {
+ s32 max_tx_power_level_rf_port1;
+ s32 max_tx_power_level_rf_port2;
+} __packed;
+
+struct hif_mib_beacon_stats {
+ s32 latest_tbtt_diff;
+ u32 reserved[4];
+} __packed;
+
+struct hif_mib_mac_address {
+ u8 mac_addr[ETH_ALEN];
+ u16 reserved;
+} __packed;
+
+struct hif_mib_dot11_max_transmit_msdu_lifetime {
+ u32 max_life_time;
+} __packed;
+
+struct hif_mib_dot11_max_receive_lifetime {
+ u32 max_life_time;
+} __packed;
+
+struct hif_mib_wep_default_key_id {
+ u8 wep_default_key_id;
+ u8 reserved[3];
+} __packed;
+
+struct hif_mib_dot11_rts_threshold {
+ u32 threshold;
+} __packed;
+
+struct hif_mib_slot_time {
+ u32 slot_time;
+} __packed;
+
+struct hif_mib_current_tx_power_level {
+ s32 power_level;
+} __packed;
+
+struct hif_mib_non_erp_protection {
+ u8 use_cts_to_self:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+enum hif_tx_mode {
+ HIF_TX_MODE_MIXED = 0x0,
+ HIF_TX_MODE_GREENFIELD = 0x1
+};
+
+enum hif_tmplt {
+ HIF_TMPLT_PRBREQ = 0x0,
+ HIF_TMPLT_BCN = 0x1,
+ HIF_TMPLT_NULL = 0x2,
+ HIF_TMPLT_QOSNUL = 0x3,
+ HIF_TMPLT_PSPOLL = 0x4,
+ HIF_TMPLT_PRBRES = 0x5,
+ HIF_TMPLT_ARP = 0x6,
+ HIF_TMPLT_NA = 0x7
+};
+
+#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700
+
+struct hif_mib_template_frame {
+ u8 frame_type;
+ u8 init_rate:7;
+ u8 mode:1;
+ u16 frame_length;
+ u8 frame[HIF_API_MAX_TEMPLATE_FRAME_SIZE];
+} __packed;
+
+struct hif_mib_beacon_wake_up_period {
+ u8 wakeup_period_min;
+ u8 receive_dtim:1;
+ u8 reserved1:7;
+ u8 wakeup_period_max;
+ u8 reserved2;
+} __packed;
+
+struct hif_mib_rcpi_rssi_threshold {
+ u8 detection:1;
+ u8 rcpi_rssi:1;
+ u8 upperthresh:1;
+ u8 lowerthresh:1;
+ u8 reserved:4;
+ u8 lower_threshold;
+ u8 upper_threshold;
+ u8 rolling_average_count;
+} __packed;
+
+#define DEFAULT_BA_MAX_RX_BUFFER_SIZE 16
+
+struct hif_mib_block_ack_policy {
+ u8 block_ack_tx_tid_policy;
+ u8 reserved1;
+ u8 block_ack_rx_tid_policy;
+ u8 block_ack_rx_max_buffer_size;
+} __packed;
+
+struct hif_mib_override_int_rate {
+ u8 internal_tx_rate;
+ u8 non_erp_internal_tx_rate;
+ u8 reserved[2];
+} __packed;
+
+enum hif_mpdu_start_spacing {
+ HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0,
+ HIF_MPDU_START_SPACING_QUARTER = 0x1,
+ HIF_MPDU_START_SPACING_HALF = 0x2,
+ HIF_MPDU_START_SPACING_ONE = 0x3,
+ HIF_MPDU_START_SPACING_TWO = 0x4,
+ HIF_MPDU_START_SPACING_FOUR = 0x5,
+ HIF_MPDU_START_SPACING_EIGHT = 0x6,
+ HIF_MPDU_START_SPACING_SIXTEEN = 0x7
+};
+
+struct hif_mib_set_association_mode {
+ u8 preambtype_use:1;
+ u8 mode:1;
+ u8 rateset:1;
+ u8 spacing:1;
+ u8 reserved:4;
+ u8 preamble_type;
+ u8 mixed_or_greenfield_type;
+ u8 mpdu_start_spacing;
+ u32 basic_rate_set;
+} __packed;
+
+struct hif_mib_set_uapsd_information {
+ u8 trig_bckgrnd:1;
+ u8 trig_be:1;
+ u8 trig_video:1;
+ u8 trig_voice:1;
+ u8 reserved1:4;
+ u8 deliv_bckgrnd:1;
+ u8 deliv_be:1;
+ u8 deliv_video:1;
+ u8 deliv_voice:1;
+ u8 reserved2:4;
+ u16 min_auto_trigger_interval;
+ u16 max_auto_trigger_interval;
+ u16 auto_trigger_step;
+} __packed;
+
+struct hif_mib_tx_rate_retry_policy {
+ u8 policy_index;
+ u8 short_retry_count;
+ u8 long_retry_count;
+ u8 first_rate_sel:2;
+ u8 terminate:1;
+ u8 count_init:1;
+ u8 reserved1:4;
+ u8 rate_recovery_count;
+ u8 reserved2[3];
+ u8 rates[12];
+} __packed;
+
+#define HIF_MIB_NUM_TX_RATE_RETRY_POLICIES 15
+
+struct hif_mib_set_tx_rate_retry_policy {
+ u8 num_tx_rate_policies;
+ u8 reserved[3];
+ struct hif_mib_tx_rate_retry_policy tx_rate_retry_policy[];
+} __packed;
+
+struct hif_mib_protected_mgmt_policy {
+ u8 pmf_enable:1;
+ u8 unpmf_allowed:1;
+ u8 host_enc_auth_frames:1;
+ u8 reserved1:5;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_mib_set_ht_protection {
+ u8 dual_cts_prot:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_mib_keep_alive_period {
+ u16 keep_alive_period;
+ u8 reserved[2];
+} __packed;
+
+struct hif_mib_arp_keep_alive_period {
+ u16 arp_keep_alive_period;
+ u8 encr_type;
+ u8 reserved;
+ u8 sender_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+ u8 target_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_inactivity_timer {
+ u8 min_active_time;
+ u8 max_active_time;
+ u16 reserved;
+} __packed;
+
+struct hif_mib_interface_protection {
+ u8 use_cts_prot:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+#endif
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
new file mode 100644
index 000000000000..820de216be0c
--- /dev/null
+++ b/drivers/staging/wfx/hif_rx.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of chip-to-host event (aka indications) of WFxxx Split Mac
+ * (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+
+#include "hif_rx.h"
+#include "wfx.h"
+#include "scan.h"
+#include "bh.h"
+#include "sta.h"
+#include "data_rx.h"
+#include "secure_link.h"
+#include "hif_api_cmd.h"
+
+static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ // All confirm messages start with status
+ int status = le32_to_cpu(*((__le32 *) buf));
+ int cmd = hif->id;
+ int len = hif->len - 4; // drop header
+
+ WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
+
+ if (!wdev->hif_cmd.buf_send) {
+ dev_warn(wdev->dev, "unexpected confirmation: 0x%.2x\n", cmd);
+ return -EINVAL;
+ }
+
+ if (cmd != wdev->hif_cmd.buf_send->id) {
+ dev_warn(wdev->dev,
+ "chip response mismatch request: 0x%.2x vs 0x%.2x\n",
+ cmd, wdev->hif_cmd.buf_send->id);
+ return -EINVAL;
+ }
+
+ if (wdev->hif_cmd.buf_recv) {
+ if (wdev->hif_cmd.len_recv >= len)
+ memcpy(wdev->hif_cmd.buf_recv, buf, len);
+ else
+ status = -ENOMEM;
+ }
+ wdev->hif_cmd.ret = status;
+
+ if (!wdev->hif_cmd.async) {
+ complete(&wdev->hif_cmd.done);
+ } else {
+ wdev->hif_cmd.buf_send = NULL;
+ mutex_unlock(&wdev->hif_cmd.lock);
+ if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
+ mutex_unlock(&wdev->hif_cmd.key_renew_lock);
+ }
+ return status;
+}
+
+static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
+{
+ struct hif_cnf_tx *body = buf;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ if (!wvif)
+ return -EFAULT;
+
+ wfx_tx_confirm_cb(wvif, body);
+ return 0;
+}
+
+static int hif_multi_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_cnf_multi_transmit *body = buf;
+ struct hif_cnf_tx *buf_loc = (struct hif_cnf_tx *) &body->tx_conf_payload;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ int count = body->num_tx_confs;
+ int i;
+
+ WARN(count <= 0, "corrupted message");
+ WARN_ON(!wvif);
+ if (!wvif)
+ return -EFAULT;
+
+ for (i = 0; i < count; ++i) {
+ wfx_tx_confirm_cb(wvif, buf_loc);
+ buf_loc++;
+ }
+ return 0;
+}
+
+static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_startup *body = buf;
+
+ if (body->status || body->firmware_type > 4) {
+ dev_err(wdev->dev, "received invalid startup indication");
+ return -EINVAL;
+ }
+ memcpy(&wdev->hw_caps, body, sizeof(struct hif_ind_startup));
+ le32_to_cpus(&wdev->hw_caps.status);
+ le16_to_cpus(&wdev->hw_caps.hardware_id);
+ le16_to_cpus(&wdev->hw_caps.num_inp_ch_bufs);
+ le16_to_cpus(&wdev->hw_caps.size_inp_ch_buf);
+
+ complete(&wdev->firmware_ready);
+ return 0;
+}
+
+static int hif_wakeup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ if (!wdev->pdata.gpio_wakeup
+ || !gpiod_get_value(wdev->pdata.gpio_wakeup)) {
+ dev_warn(wdev->dev, "unexpected wake-up indication\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int hif_keys_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_sl_exchange_pub_keys *body = buf;
+
+ // Compatibility with legacy secure link
+ if (body->status == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ body->status = 0;
+ if (body->status)
+ dev_warn(wdev->dev, "secure link negociation error\n");
+ wfx_sl_check_pubkey(wdev, body->ncp_pub_key, body->ncp_pub_key_mac);
+ return 0;
+}
+
+static int hif_receive_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf, struct sk_buff *skb)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_rx *body = buf;
+
+ if (!wvif) {
+ dev_warn(wdev->dev, "ignore rx data for non-existent vif %d\n",
+ hif->interface);
+ return 0;
+ }
+ skb_pull(skb, sizeof(struct hif_msg) + sizeof(struct hif_ind_rx));
+ wfx_rx_cb(wvif, body, skb);
+
+ return 0;
+}
+
+static int hif_event_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_event *body = buf;
+ struct wfx_hif_event *event;
+ int first;
+
+ WARN_ON(!wvif);
+ if (!wvif)
+ return 0;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ memcpy(&event->evt, body, sizeof(struct hif_ind_event));
+ spin_lock(&wvif->event_queue_lock);
+ first = list_empty(&wvif->event_queue);
+ list_add_tail(&event->link, &wvif->event_queue);
+ spin_unlock(&wvif->event_queue_lock);
+
+ if (first)
+ schedule_work(&wvif->event_handler_work);
+
+ return 0;
+}
+
+static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ complete(&wvif->set_pm_mode_complete);
+
+ return 0;
+}
+
+static int hif_scan_complete_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_scan_cmpl *body = buf;
+
+ WARN_ON(!wvif);
+ wfx_scan_complete_cb(wvif, body);
+
+ return 0;
+}
+
+static int hif_join_complete_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ dev_warn(wdev->dev, "unattended JoinCompleteInd\n");
+
+ return 0;
+}
+
+static int hif_suspend_resume_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_suspend_resume_tx *body = buf;
+
+ WARN_ON(!wvif);
+ wfx_suspend_resume(wvif, body);
+
+ return 0;
+}
+
+static int hif_error_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_error *body = buf;
+ u8 *pRollback = (u8 *) body->data;
+ u32 *pStatus = (u32 *) body->data;
+
+ switch (body->type) {
+ case HIF_ERROR_FIRMWARE_ROLLBACK:
+ dev_err(wdev->dev,
+ "asynchronous error: firmware rollback error %d\n",
+ *pRollback);
+ break;
+ case HIF_ERROR_FIRMWARE_DEBUG_ENABLED:
+ dev_err(wdev->dev, "asynchronous error: firmware debug feature enabled\n");
+ break;
+ case HIF_ERROR_OUTDATED_SESSION_KEY:
+ dev_err(wdev->dev, "asynchronous error: secure link outdated key: %#.8x\n",
+ *pStatus);
+ break;
+ case HIF_ERROR_INVALID_SESSION_KEY:
+ dev_err(wdev->dev, "asynchronous error: invalid session key\n");
+ break;
+ case HIF_ERROR_OOR_VOLTAGE:
+ dev_err(wdev->dev, "asynchronous error: out-of-range overvoltage: %#.8x\n",
+ *pStatus);
+ break;
+ case HIF_ERROR_PDS_VERSION:
+ dev_err(wdev->dev,
+ "asynchronous error: wrong PDS payload or version: %#.8x\n",
+ *pStatus);
+ break;
+ default:
+ dev_err(wdev->dev, "asynchronous error: unknown (%d)\n",
+ body->type);
+ break;
+ }
+ return 0;
+}
+
+static int hif_generic_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_generic *body = buf;
+
+ switch (body->indication_type) {
+ case HIF_GENERIC_INDICATION_TYPE_RAW:
+ return 0;
+ case HIF_GENERIC_INDICATION_TYPE_STRING:
+ dev_info(wdev->dev, "firmware says: %s\n",
+ (char *) body->indication_data.raw_data);
+ return 0;
+ case HIF_GENERIC_INDICATION_TYPE_RX_STATS:
+ mutex_lock(&wdev->rx_stats_lock);
+ // Older firmware send a generic indication beside RxStats
+ if (!wfx_api_older_than(wdev, 1, 4))
+ dev_info(wdev->dev, "Rx test ongoing. Temperature: %d°C\n",
+ body->indication_data.rx_stats.current_temp);
+ memcpy(&wdev->rx_stats, &body->indication_data.rx_stats,
+ sizeof(wdev->rx_stats));
+ mutex_unlock(&wdev->rx_stats_lock);
+ return 0;
+ default:
+ dev_err(wdev->dev,
+ "generic_indication: unknown indication type: %#.8x\n",
+ body->indication_type);
+ return -EIO;
+ }
+}
+
+static int hif_exception_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ size_t len = hif->len - 4; // drop header
+ dev_err(wdev->dev, "firmware exception\n");
+ print_hex_dump_bytes("Dump: ", DUMP_PREFIX_NONE, buf, len);
+ wdev->chip_frozen = 1;
+
+ return -1;
+}
+
+static const struct {
+ int msg_id;
+ int (*handler)(struct wfx_dev *wdev, struct hif_msg *hif, void *buf);
+} hif_handlers[] = {
+ /* Confirmations */
+ { HIF_CNF_ID_TX, hif_tx_confirm },
+ { HIF_CNF_ID_MULTI_TRANSMIT, hif_multi_tx_confirm },
+ /* Indications */
+ { HIF_IND_ID_STARTUP, hif_startup_indication },
+ { HIF_IND_ID_WAKEUP, hif_wakeup_indication },
+ { HIF_IND_ID_JOIN_COMPLETE, hif_join_complete_indication },
+ { HIF_IND_ID_SET_PM_MODE_CMPL, hif_pm_mode_complete_indication },
+ { HIF_IND_ID_SCAN_CMPL, hif_scan_complete_indication },
+ { HIF_IND_ID_SUSPEND_RESUME_TX, hif_suspend_resume_indication },
+ { HIF_IND_ID_SL_EXCHANGE_PUB_KEYS, hif_keys_indication },
+ { HIF_IND_ID_EVENT, hif_event_indication },
+ { HIF_IND_ID_GENERIC, hif_generic_indication },
+ { HIF_IND_ID_ERROR, hif_error_indication },
+ { HIF_IND_ID_EXCEPTION, hif_exception_indication },
+ // FIXME: allocate skb_p from hif_receive_indication and make it generic
+ //{ HIF_IND_ID_RX, hif_receive_indication },
+};
+
+void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ int i;
+ struct hif_msg *hif = (struct hif_msg *) skb->data;
+ int hif_id = hif->id;
+
+ if (hif_id == HIF_IND_ID_RX) {
+ // hif_receive_indication take care of skb lifetime
+ hif_receive_indication(wdev, hif, hif->body, skb);
+ return;
+ }
+ // Note: mutex_is_lock cause an implicit memory barrier that protect
+ // buf_send
+ if (mutex_is_locked(&wdev->hif_cmd.lock)
+ && wdev->hif_cmd.buf_send
+ && wdev->hif_cmd.buf_send->id == hif_id) {
+ hif_generic_confirm(wdev, hif, hif->body);
+ goto free;
+ }
+ for (i = 0; i < ARRAY_SIZE(hif_handlers); i++) {
+ if (hif_handlers[i].msg_id == hif_id) {
+ if (hif_handlers[i].handler)
+ hif_handlers[i].handler(wdev, hif, hif->body);
+ goto free;
+ }
+ }
+ dev_err(wdev->dev, "unsupported HIF ID %02x\n", hif_id);
+free:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/staging/wfx/hif_rx.h b/drivers/staging/wfx/hif_rx.h
new file mode 100644
index 000000000000..f07c10c8c6bd
--- /dev/null
+++ b/drivers/staging/wfx/hif_rx.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of chip-to-host event (aka indications) of WFxxx Split Mac
+ * (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_RX_H
+#define WFX_HIF_RX_H
+
+struct wfx_dev;
+struct sk_buff;
+
+void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
new file mode 100644
index 000000000000..cb7cddcb9815
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
+ * Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+
+#include "hif_tx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "hwio.h"
+#include "debug.h"
+#include "sta.h"
+
+void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd)
+{
+ init_completion(&hif_cmd->ready);
+ init_completion(&hif_cmd->done);
+ mutex_init(&hif_cmd->lock);
+ mutex_init(&hif_cmd->key_renew_lock);
+}
+
+static void wfx_fill_header(struct hif_msg *hif, int if_id, unsigned int cmd,
+ size_t size)
+{
+ if (if_id == -1)
+ if_id = 2;
+
+ WARN(cmd > 0x3f, "invalid WSM command %#.2x", cmd);
+ WARN(size > 0xFFF, "requested buffer is too large: %zu bytes", size);
+ WARN(if_id > 0x3, "invalid interface ID %d", if_id);
+
+ hif->len = cpu_to_le16(size + 4);
+ hif->id = cmd;
+ hif->interface = if_id;
+}
+
+static void *wfx_alloc_hif(size_t body_len, struct hif_msg **hif)
+{
+ *hif = kzalloc(sizeof(struct hif_msg) + body_len, GFP_KERNEL);
+ if (*hif)
+ return (*hif)->body;
+ else
+ return NULL;
+}
+
+int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
+ size_t reply_len, bool async)
+{
+ const char *mib_name = "";
+ const char *mib_sep = "";
+ int cmd = request->id;
+ int vif = request->interface;
+ int ret;
+
+ WARN(wdev->hif_cmd.buf_recv && wdev->hif_cmd.async, "API usage error");
+
+ // Do not wait for any reply if chip is frozen
+ if (wdev->chip_frozen)
+ return -ETIMEDOUT;
+
+ if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
+ mutex_lock(&wdev->hif_cmd.key_renew_lock);
+
+ mutex_lock(&wdev->hif_cmd.lock);
+ WARN(wdev->hif_cmd.buf_send, "data locking error");
+
+ // Note: call to complete() below has an implicit memory barrier that
+ // hopefully protect buf_send
+ wdev->hif_cmd.buf_send = request;
+ wdev->hif_cmd.buf_recv = reply;
+ wdev->hif_cmd.len_recv = reply_len;
+ wdev->hif_cmd.async = async;
+ complete(&wdev->hif_cmd.ready);
+
+ wfx_bh_request_tx(wdev);
+
+ // NOTE: no timeout is catched async is enabled
+ if (async)
+ return 0;
+
+ ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ);
+ if (!ret) {
+ dev_err(wdev->dev, "chip is abnormally long to answer\n");
+ reinit_completion(&wdev->hif_cmd.ready);
+ ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 3 * HZ);
+ }
+ if (!ret) {
+ dev_err(wdev->dev, "chip did not answer\n");
+ wfx_pending_dump_old_frames(wdev, 3000);
+ wdev->chip_frozen = 1;
+ reinit_completion(&wdev->hif_cmd.done);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = wdev->hif_cmd.ret;
+ }
+
+ wdev->hif_cmd.buf_send = NULL;
+ mutex_unlock(&wdev->hif_cmd.lock);
+
+ if (ret &&
+ (cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) {
+ mib_name = get_mib_name(((u16 *) request)[2]);
+ mib_sep = "/";
+ }
+ if (ret < 0)
+ dev_err(wdev->dev,
+ "WSM request %s%s%s (%#.2x) on vif %d returned error %d\n",
+ get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
+ if (ret > 0)
+ dev_warn(wdev->dev,
+ "WSM request %s%s%s (%#.2x) on vif %d returned status %d\n",
+ get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
+
+ if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
+ mutex_unlock(&wdev->hif_cmd.key_renew_lock);
+ return ret;
+}
+
+// This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any
+// request anymore. We need to slightly hack struct wfx_hif_cmd for that job. Be
+// carefull to only call this funcion during device unregister.
+int hif_shutdown(struct wfx_dev *wdev)
+{
+ int ret;
+ struct hif_msg *hif;
+
+ wfx_alloc_hif(0, &hif);
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, true);
+ // After this command, chip won't reply. Be sure to give enough time to
+ // bh to send buffer:
+ msleep(100);
+ wdev->hif_cmd.buf_send = NULL;
+ if (wdev->pdata.gpio_wakeup)
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
+ else
+ control_reg_write(wdev, 0);
+ mutex_unlock(&wdev->hif_cmd.lock);
+ kfree(hif);
+ return ret;
+}
+
+int hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len)
+{
+ int ret;
+ size_t buf_len = sizeof(struct hif_req_configuration) + len;
+ struct hif_msg *hif;
+ struct hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif);
+
+ body->length = cpu_to_le16(len);
+ memcpy(body->pds_data, conf, len);
+ wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_reset(struct wfx_vif *wvif, bool reset_stat)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ body->reset_flags.reset_stat = reset_stat;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
+ size_t val_len)
+{
+ int ret;
+ struct hif_msg *hif;
+ int buf_len = sizeof(struct hif_cnf_read_mib) + val_len;
+ struct hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif);
+ struct hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL);
+
+ body->mib_id = cpu_to_le16(mib_id);
+ wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, reply, buf_len, false);
+
+ if (!ret && mib_id != reply->mib_id) {
+ dev_warn(wdev->dev,
+ "%s: confirmation mismatch request\n", __func__);
+ ret = -EIO;
+ }
+ if (ret == -ENOMEM)
+ dev_err(wdev->dev,
+ "buffer is too small to receive %s (%zu < %d)\n",
+ get_mib_name(mib_id), val_len, reply->length);
+ if (!ret)
+ memcpy(val, &reply->mib_data, reply->length);
+ else
+ memset(val, 0xFF, val_len);
+ kfree(hif);
+ kfree(reply);
+ return ret;
+}
+
+int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
+ size_t val_len)
+{
+ int ret;
+ struct hif_msg *hif;
+ int buf_len = sizeof(struct hif_req_write_mib) + val_len;
+ struct hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif);
+
+ body->mib_id = cpu_to_le16(mib_id);
+ body->length = cpu_to_le16(val_len);
+ memcpy(&body->mib_data, val, val_len);
+ wfx_fill_header(hif, vif_id, HIF_REQ_ID_WRITE_MIB, buf_len);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg)
+{
+ int ret, i;
+ struct hif_msg *hif;
+ struct hif_ssid_def *ssids;
+ size_t buf_len = sizeof(struct hif_req_start_scan) +
+ arg->scan_req.num_of_channels * sizeof(u8) +
+ arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
+ struct hif_req_start_scan *body = wfx_alloc_hif(buf_len, &hif);
+ u8 *ptr = (u8 *) body + sizeof(*body);
+
+ WARN(arg->scan_req.num_of_channels > HIF_API_MAX_NB_CHANNELS, "invalid params");
+ WARN(arg->scan_req.num_of_ssi_ds > 2, "invalid params");
+ WARN(arg->scan_req.band > 1, "invalid params");
+
+ // FIXME: This API is unnecessary complex, fixing NumOfChannels and
+ // adding a member SsidDef at end of struct hif_req_start_scan would
+ // simplify that a lot.
+ memcpy(body, &arg->scan_req, sizeof(*body));
+ cpu_to_le32s(&body->min_channel_time);
+ cpu_to_le32s(&body->max_channel_time);
+ cpu_to_le32s(&body->tx_power_level);
+ memcpy(ptr, arg->ssids,
+ arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def));
+ ssids = (struct hif_ssid_def *) ptr;
+ for (i = 0; i < body->num_of_ssi_ds; ++i)
+ cpu_to_le32s(&ssids[i].ssid_length);
+ ptr += arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
+ memcpy(ptr, arg->ch, arg->scan_req.num_of_channels * sizeof(u8));
+ ptr += arg->scan_req.num_of_channels * sizeof(u8);
+ WARN(buf_len != ptr - (u8 *) body, "allocation size mismatch");
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_stop_scan(struct wfx_vif *wvif)
+{
+ int ret;
+ struct hif_msg *hif;
+ // body associated to HIF_REQ_ID_STOP_SCAN is empty
+ wfx_alloc_hif(0, &hif);
+
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body, arg, sizeof(struct hif_req_join));
+ cpu_to_le16s(&body->channel_number);
+ cpu_to_le16s(&body->atim_window);
+ cpu_to_le32s(&body->ssid_length);
+ cpu_to_le32s(&body->beacon_interval);
+ cpu_to_le32s(&body->basic_rate_set);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_set_bss_params(struct wfx_vif *wvif,
+ const struct hif_req_set_bss_params *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ memcpy(body, arg, sizeof(*body));
+ cpu_to_le16s(&body->aid);
+ cpu_to_le32s(&body->operational_rate_set);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS,
+ sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ // FIXME: only send necessary bits
+ struct hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ // FIXME: swap bytes as necessary in body
+ memcpy(body, arg, sizeof(*body));
+ if (wfx_api_older_than(wdev, 1, 5))
+ // Legacy firmwares expect that add_key to be sent on right
+ // interface.
+ wfx_fill_header(hif, arg->int_id, HIF_REQ_ID_ADD_KEY,
+ sizeof(*body));
+ else
+ wfx_fill_header(hif, -1, HIF_REQ_ID_ADD_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_remove_key(struct wfx_dev *wdev, int idx)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ body->entry_index = idx;
+ wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_set_edca_queue_params(struct wfx_vif *wvif,
+ const struct hif_req_edca_queue_params *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ // NOTE: queues numerotation are not the same between WFx and Linux
+ memcpy(body, arg, sizeof(*body));
+ cpu_to_le16s(&body->cw_min);
+ cpu_to_le16s(&body->cw_max);
+ cpu_to_le16s(&body->tx_op_limit);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS,
+ sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body, arg, sizeof(*body));
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body, arg, sizeof(*body));
+ cpu_to_le16s(&body->channel_number);
+ cpu_to_le32s(&body->beacon_interval);
+ cpu_to_le32s(&body->basic_rate_set);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_beacon_transmit(struct wfx_vif *wvif, bool enable_beaconing)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ body->enable_beaconing = enable_beaconing ? 1 : 0;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT,
+ sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (mac_addr)
+ ether_addr_copy(body->mac_addr, mac_addr);
+ body->map_link_flags = *(struct hif_map_link_flags *) &flags;
+ body->peer_sta_id = sta_id;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
+ const u8 *ies, size_t ies_len)
+{
+ int ret;
+ struct hif_msg *hif;
+ int buf_len = sizeof(struct hif_req_update_ie) + ies_len;
+ struct hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
+
+ memcpy(&body->ie_flags, target_frame, sizeof(struct hif_ie_flags));
+ body->num_i_es = cpu_to_le16(1);
+ memcpy(body->ie, ies, ies_len);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_sl_send_pub_keys(struct wfx_dev *wdev, const uint8_t *pubkey,
+ const uint8_t *pubkey_hmac)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_sl_exchange_pub_keys *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ body->algorithm = HIF_SL_CURVE25519;
+ memcpy(body->host_pub_key, pubkey, sizeof(body->host_pub_key));
+ memcpy(body->host_pub_key_mac, pubkey_hmac,
+ sizeof(body->host_pub_key_mac));
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS,
+ sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ // Compatibility with legacy secure link
+ if (ret == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ ret = 0;
+ return ret;
+}
+
+int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_sl_configure *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body->encr_bmp, bitmap, sizeof(body->encr_bmp));
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SL_CONFIGURE, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
+ int destination)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_set_sl_mac_key *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ memcpy(body->key_value, slk_key, sizeof(body->key_value));
+ body->otp_or_ram = destination;
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SET_SL_MAC_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ // Compatibility with legacy secure link
+ if (ret == SL_MAC_KEY_STATUS_SUCCESS)
+ ret = 0;
+ return ret;
+}
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
new file mode 100644
index 000000000000..f61ae7b0d41c
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
+ * Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_TX_H
+#define WFX_HIF_TX_H
+
+#include "hif_api_cmd.h"
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_scan_params {
+ struct hif_req_start_scan scan_req;
+ struct hif_ssid_def *ssids;
+ u8 *ch;
+};
+
+struct wfx_hif_cmd {
+ struct mutex lock;
+ struct mutex key_renew_lock;
+ struct completion ready;
+ struct completion done;
+ bool async;
+ struct hif_msg *buf_send;
+ void *buf_recv;
+ size_t len_recv;
+ int ret;
+};
+
+void wfx_init_hif_cmd(struct wfx_hif_cmd *wfx_hif_cmd);
+int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
+ void *reply, size_t reply_len, bool async);
+
+int hif_shutdown(struct wfx_dev *wdev);
+int hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len);
+int hif_reset(struct wfx_vif *wvif, bool reset_stat);
+int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *buf, size_t buf_size);
+int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *buf, size_t buf_size);
+int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg);
+int hif_stop_scan(struct wfx_vif *wvif);
+int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg);
+int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
+int hif_set_bss_params(struct wfx_vif *wvif,
+ const struct hif_req_set_bss_params *arg);
+int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg);
+int hif_remove_key(struct wfx_dev *wdev, int idx);
+int hif_set_edca_queue_params(struct wfx_vif *wvif,
+ const struct hif_req_edca_queue_params *arg);
+int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg);
+int hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
+int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id);
+int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
+ const u8 *ies, size_t ies_len);
+int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
+ int destination);
+int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap);
+int hif_sl_send_pub_keys(struct wfx_dev *wdev,
+ const u8 *pubkey, const u8 *pubkey_hmac);
+
+#endif
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
new file mode 100644
index 000000000000..bb091e395ff5
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of host-to-chip MIBs of WFxxx Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_TX_MIB_H
+#define WFX_HIF_TX_MIB_H
+
+#include <linux/etherdevice.h>
+
+#include "wfx.h"
+#include "hif_tx.h"
+#include "hif_api_mib.h"
+
+static inline int hif_set_output_power(struct wfx_vif *wvif, int power_level)
+{
+ __le32 val = cpu_to_le32(power_level);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
+ unsigned int dtim_interval,
+ unsigned int listen_interval)
+{
+ struct hif_mib_beacon_wake_up_period val = {
+ .wakeup_period_min = dtim_interval,
+ .receive_dtim = 0,
+ .wakeup_period_max = cpu_to_le16(listen_interval),
+ };
+
+ if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
+ return -EINVAL;
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
+ struct hif_mib_rcpi_rssi_threshold *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD, arg, sizeof(*arg));
+}
+
+static inline int hif_get_counters_table(struct wfx_dev *wdev,
+ struct hif_mib_extended_count_table *arg)
+{
+ if (wfx_api_older_than(wdev, 1, 3)) {
+ // extended_count_table is wider than count_table
+ memset(arg, 0xFF, sizeof(*arg));
+ return hif_read_mib(wdev, 0, HIF_MIB_ID_COUNTERS_TABLE,
+ arg, sizeof(struct hif_mib_count_table));
+ } else {
+ return hif_read_mib(wdev, 0,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, arg,
+ sizeof(struct hif_mib_extended_count_table));
+ }
+}
+
+static inline int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
+{
+ struct hif_mib_mac_address msg = { };
+
+ if (mac)
+ ether_addr_copy(msg.mac_addr, mac);
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
+ &msg, sizeof(msg));
+}
+
+static inline int hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid,
+ bool fwd_probe_req)
+{
+ struct hif_mib_rx_filter val = { };
+
+ if (filter_bssid)
+ val.bssid_filter = 1;
+ if (fwd_probe_req)
+ val.fwd_probe_req = 1;
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_beacon_filter_table(struct wfx_vif *wvif,
+ int tbl_len,
+ struct hif_ie_table_entry *tbl)
+{
+ int ret;
+ struct hif_mib_bcn_filter_table *val;
+ int buf_len = struct_size(val, ie_table, tbl_len);
+
+ val = kzalloc(buf_len, GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+ val->num_of_info_elmts = cpu_to_le32(tbl_len);
+ memcpy(val->ie_table, tbl, tbl_len * sizeof(*tbl));
+ ret = hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_TABLE, val, buf_len);
+ kfree(val);
+ return ret;
+}
+
+static inline int hif_beacon_filter_control(struct wfx_vif *wvif,
+ int enable, int beacon_count)
+{
+ struct hif_mib_bcn_filter_enable arg = {
+ .enable = cpu_to_le32(enable),
+ .bcn_count = cpu_to_le32(beacon_count),
+ };
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_set_operational_mode(struct wfx_dev *wdev,
+ enum hif_op_power_mode mode)
+{
+ struct hif_mib_gl_operational_power_mode val = {
+ .power_mode = mode,
+ .wup_ind_activation = 1,
+ };
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_template_frame(struct wfx_vif *wvif,
+ struct hif_mib_template_frame *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
+{
+ struct hif_mib_protected_mgmt_policy val = { };
+
+ WARN(required && !capable, "incoherent arguments");
+ if (capable) {
+ val.pmf_enable = 1;
+ val.host_enc_auth_frames = 1;
+ }
+ if (!required)
+ val.unpmf_allowed = 1;
+ cpu_to_le32s((u32 *) &val);
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
+ u8 tx_tid_policy, u8 rx_tid_policy)
+{
+ struct hif_mib_block_ack_policy val = {
+ .block_ack_tx_tid_policy = tx_tid_policy,
+ .block_ack_rx_tid_policy = rx_tid_policy,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_association_mode(struct wfx_vif *wvif,
+ struct hif_mib_set_association_mode *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE, arg, sizeof(*arg));
+}
+
+static inline int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
+ struct hif_mib_set_tx_rate_retry_policy *arg)
+{
+ size_t size = struct_size(arg, tx_rate_retry_policy,
+ arg->num_tx_rate_policies);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+}
+
+static inline int hif_set_mac_addr_condition(struct wfx_vif *wvif,
+ struct hif_mib_mac_addr_data_frame_condition *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
+ struct hif_mib_uc_mc_bc_data_frame_condition *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_set_config_data_filter(struct wfx_vif *wvif,
+ struct hif_mib_config_data_filter *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CONFIG_DATA_FILTER, arg, sizeof(*arg));
+}
+
+static inline int hif_set_data_filtering(struct wfx_vif *wvif,
+ struct hif_mib_set_data_filtering *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_DATA_FILTERING, arg, sizeof(*arg));
+}
+
+static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
+{
+ struct hif_mib_keep_alive_period arg = {
+ .keep_alive_period = cpu_to_le16(period),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD,
+ &arg, sizeof(arg));
+};
+
+static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif,
+ struct hif_mib_arp_ip_addr_table *fp)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
+ fp, sizeof(*fp));
+}
+
+static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev,
+ bool enabled)
+{
+ __le32 arg = enabled ? cpu_to_le32(1) : 0;
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_set_uapsd_info(struct wfx_vif *wvif,
+ struct hif_mib_set_uapsd_information *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
+{
+ __le32 arg = enable ? cpu_to_le32(1) : 0;
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
+}
+
+static inline int hif_slot_time(struct wfx_vif *wvif, int val)
+{
+ __le32 arg = cpu_to_le32(val);
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool val)
+{
+ struct hif_mib_set_ht_protection arg = {
+ .dual_cts_prot = val,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_HT_PROTECTION,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
+{
+ __le32 arg = cpu_to_le32(val);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_rts_threshold(struct wfx_vif *wvif, int val)
+{
+ __le32 arg = cpu_to_le32(val > 0 ? val : 0xFFFF);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
+}
+
+#endif
diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c
new file mode 100644
index 000000000000..47e04c59ed93
--- /dev/null
+++ b/drivers/staging/wfx/hwio.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Low-level I/O functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "hwio.h"
+#include "wfx.h"
+#include "bus.h"
+#include "traces.h"
+
+/*
+ * Internal helpers.
+ *
+ * About CONFIG_VMAP_STACK:
+ * When CONFIG_VMAP_STACK is enabled, it is not possible to run DMA on stack
+ * allocated data. Functions below that work with registers (aka functions
+ * ending with "32") automatically reallocate buffers with kmalloc. However,
+ * functions that work with arbitrary length buffers let's caller to handle
+ * memory location. In doubt, enable CONFIG_DEBUG_SG to detect badly located
+ * buffer.
+ */
+
+static int read32(struct wfx_dev *wdev, int reg, u32 *val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ *val = ~0; // Never return undefined value
+ if (!tmp)
+ return -ENOMEM;
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, tmp,
+ sizeof(u32));
+ if (ret >= 0)
+ *val = le32_to_cpu(*tmp);
+ kfree(tmp);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static int write32(struct wfx_dev *wdev, int reg, u32 val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ *tmp = cpu_to_le32(val);
+ ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, tmp,
+ sizeof(u32));
+ kfree(tmp);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static int read32_locked(struct wfx_dev *wdev, int reg, u32 *val)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = read32(wdev, reg, val);
+ _trace_io_read32(reg, *val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int write32_locked(struct wfx_dev *wdev, int reg, u32 val)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = write32(wdev, reg, val);
+ _trace_io_write32(reg, val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int write32_bits_locked(struct wfx_dev *wdev, int reg, u32 mask, u32 val)
+{
+ int ret;
+ u32 val_r, val_w;
+
+ WARN_ON(~mask & val);
+ val &= mask;
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = read32(wdev, reg, &val_r);
+ _trace_io_read32(reg, val_r);
+ if (ret < 0)
+ goto err;
+ val_w = (val_r & ~mask) | val;
+ if (val_w != val_r) {
+ ret = write32(wdev, reg, val_w);
+ _trace_io_write32(reg, val_w);
+ }
+err:
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf,
+ size_t len)
+{
+ int ret;
+ int i;
+ u32 cfg;
+ u32 prefetch;
+
+ WARN_ON(len >= 0x2000);
+ WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
+
+ if (reg == WFX_REG_AHB_DPORT)
+ prefetch = CFG_PREFETCH_AHB;
+ else if (reg == WFX_REG_SRAM_DPORT)
+ prefetch = CFG_PREFETCH_SRAM;
+ else
+ return -ENODEV;
+
+ ret = write32(wdev, WFX_REG_BASE_ADDR, addr);
+ if (ret < 0)
+ goto err;
+
+ ret = read32(wdev, WFX_REG_CONFIG, &cfg);
+ if (ret < 0)
+ goto err;
+
+ ret = write32(wdev, WFX_REG_CONFIG, cfg | prefetch);
+ if (ret < 0)
+ goto err;
+
+ for (i = 0; i < 20; i++) {
+ ret = read32(wdev, WFX_REG_CONFIG, &cfg);
+ if (ret < 0)
+ goto err;
+ if (!(cfg & prefetch))
+ break;
+ udelay(200);
+ }
+ if (i == 20) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, buf, len);
+
+err:
+ if (ret < 0)
+ memset(buf, 0xFF, len); // Never return undefined value
+ return ret;
+}
+
+static int indirect_write(struct wfx_dev *wdev, int reg, u32 addr,
+ const void *buf, size_t len)
+{
+ int ret;
+
+ WARN_ON(len >= 0x2000);
+ WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
+ ret = write32(wdev, WFX_REG_BASE_ADDR, addr);
+ if (ret < 0)
+ return ret;
+
+ return wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, buf, len);
+}
+
+static int indirect_read_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ void *buf, size_t len)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_read(wdev, reg, addr, buf, len);
+ _trace_io_ind_read(reg, addr, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ const void *buf, size_t len)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_write(wdev, reg, addr, buf, len);
+ _trace_io_ind_write(reg, addr, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ u32 *val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_read(wdev, reg, addr, tmp, sizeof(u32));
+ *val = cpu_to_le32(*tmp);
+ _trace_io_ind_read32(reg, addr, *val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ kfree(tmp);
+ return ret;
+}
+
+static int indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ u32 val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ *tmp = cpu_to_le32(val);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_write(wdev, reg, addr, tmp, sizeof(u32));
+ _trace_io_ind_write32(reg, addr, val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ kfree(tmp);
+ return ret;
+}
+
+int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len)
+{
+ int ret;
+
+ WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv,
+ WFX_REG_IN_OUT_QUEUE, buf, len);
+ _trace_io_read(WFX_REG_IN_OUT_QUEUE, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len)
+{
+ int ret;
+
+ WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv,
+ WFX_REG_IN_OUT_QUEUE, buf, len);
+ _trace_io_write(WFX_REG_IN_OUT_QUEUE, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+int sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
+{
+ return indirect_read_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
+}
+
+int ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
+{
+ return indirect_read_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
+}
+
+int sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
+{
+ return indirect_write_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
+}
+
+int ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
+{
+ return indirect_write_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
+}
+
+int sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
+{
+ return indirect_read32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
+}
+
+int ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
+{
+ return indirect_read32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
+}
+
+int sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
+{
+ return indirect_write32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
+}
+
+int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
+{
+ return indirect_write32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
+}
+
+int config_reg_read(struct wfx_dev *wdev, u32 *val)
+{
+ return read32_locked(wdev, WFX_REG_CONFIG, val);
+}
+
+int config_reg_write(struct wfx_dev *wdev, u32 val)
+{
+ return write32_locked(wdev, WFX_REG_CONFIG, val);
+}
+
+int config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
+{
+ return write32_bits_locked(wdev, WFX_REG_CONFIG, mask, val);
+}
+
+int control_reg_read(struct wfx_dev *wdev, u32 *val)
+{
+ return read32_locked(wdev, WFX_REG_CONTROL, val);
+}
+
+int control_reg_write(struct wfx_dev *wdev, u32 val)
+{
+ return write32_locked(wdev, WFX_REG_CONTROL, val);
+}
+
+int control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
+{
+ return write32_bits_locked(wdev, WFX_REG_CONTROL, mask, val);
+}
+
+int igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val)
+{
+ int ret;
+
+ *val = ~0; // Never return undefined value
+ ret = write32_locked(wdev, WFX_REG_SET_GEN_R_W, IGPR_RW | index << 24);
+ if (ret)
+ return ret;
+ ret = read32_locked(wdev, WFX_REG_SET_GEN_R_W, val);
+ if (ret)
+ return ret;
+ *val &= IGPR_VALUE;
+ return ret;
+}
+
+int igpr_reg_write(struct wfx_dev *wdev, int index, u32 val)
+{
+ return write32_locked(wdev, WFX_REG_SET_GEN_R_W, index << 24 | val);
+}
diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h
new file mode 100644
index 000000000000..b2c1a66de963
--- /dev/null
+++ b/drivers/staging/wfx/hwio.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Low-level API.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_HWIO_H
+#define WFX_HWIO_H
+
+#include <linux/types.h>
+
+struct wfx_dev;
+
+int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len);
+int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t buf_len);
+
+int sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len);
+int sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len);
+
+int ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len);
+int ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len);
+
+int sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val);
+int sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
+
+int ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val);
+int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
+
+#define CFG_ERR_SPI_FRAME 0x00000001 // only with SPI
+#define CFG_ERR_SDIO_BUF_MISMATCH 0x00000001 // only with SDIO
+#define CFG_ERR_BUF_UNDERRUN 0x00000002
+#define CFG_ERR_DATA_IN_TOO_LARGE 0x00000004
+#define CFG_ERR_HOST_NO_OUT_QUEUE 0x00000008
+#define CFG_ERR_BUF_OVERRUN 0x00000010
+#define CFG_ERR_DATA_OUT_TOO_LARGE 0x00000020
+#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040
+#define CFG_ERR_HOST_CRC_MISS 0x00000080 // only with SDIO
+#define CFG_SPI_IGNORE_CS 0x00000080 // only with SPI
+/* Bytes ordering (only writable in SPI): */
+#define CFG_WORD_MODE_MASK 0x00000300
+/*
+ * B1,B0,B3,B2 (In SPI, register address and
+ * CONFIG data always use this mode)
+ */
+#define CFG_WORD_MODE0 0x00000000
+#define CFG_WORD_MODE1 0x00000100 // B3,B2,B1,B0
+#define CFG_WORD_MODE2 0x00000200 // B0,B1,B2,B3 (SDIO)
+#define CFG_DIRECT_ACCESS_MODE 0x00000400 // Direct or queue access mode
+#define CFG_PREFETCH_AHB 0x00000800
+#define CFG_DISABLE_CPU_CLK 0x00001000
+#define CFG_PREFETCH_SRAM 0x00002000
+#define CFG_CPU_RESET 0x00004000
+#define CFG_SDIO_DISABLE_IRQ 0x00008000 // only with SDIO
+#define CFG_IRQ_ENABLE_DATA 0x00010000
+#define CFG_IRQ_ENABLE_WRDY 0x00020000
+#define CFG_CLK_RISE_EDGE 0x00040000
+#define CFG_SDIO_DISABLE_CRC_CHK 0x00080000 // only with SDIO
+#define CFG_RESERVED 0x00F00000
+#define CFG_DEVICE_ID_MAJOR 0x07000000
+#define CFG_DEVICE_ID_RESERVED 0x78000000
+#define CFG_DEVICE_ID_TYPE 0x80000000
+int config_reg_read(struct wfx_dev *wdev, u32 *val);
+int config_reg_write(struct wfx_dev *wdev, u32 val);
+int config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val);
+
+#define CTRL_NEXT_LEN_MASK 0x00000FFF
+#define CTRL_WLAN_WAKEUP 0x00001000
+#define CTRL_WLAN_READY 0x00002000
+int control_reg_read(struct wfx_dev *wdev, u32 *val);
+int control_reg_write(struct wfx_dev *wdev, u32 val);
+int control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val);
+
+#define IGPR_RW 0x80000000
+#define IGPR_INDEX 0x7F000000
+#define IGPR_VALUE 0x00FFFFFF
+int igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val);
+int igpr_reg_write(struct wfx_dev *wdev, int index, u32 val);
+
+#endif /* WFX_HWIO_H */
diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c
new file mode 100644
index 000000000000..96adfa330604
--- /dev/null
+++ b/drivers/staging/wfx/key.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Key management related functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "key.h"
+#include "wfx.h"
+#include "hif_tx_mib.h"
+
+static int wfx_alloc_key(struct wfx_dev *wdev)
+{
+ int idx;
+
+ idx = ffs(~wdev->key_map) - 1;
+ if (idx < 0 || idx >= MAX_KEY_ENTRIES)
+ return -1;
+
+ wdev->key_map |= BIT(idx);
+ wdev->keys[idx].entry_index = idx;
+ return idx;
+}
+
+static void wfx_free_key(struct wfx_dev *wdev, int idx)
+{
+ WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation");
+ memset(&wdev->keys[idx], 0, sizeof(wdev->keys[idx]));
+ wdev->key_map &= ~BIT(idx);
+}
+
+static u8 fill_wep_pair(struct hif_wep_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
+ msg->key_length = key->keylen;
+ memcpy(msg->key_data, key->key, key->keylen);
+ ether_addr_copy(msg->peer_address, peer_addr);
+ return HIF_KEY_TYPE_WEP_PAIRWISE;
+}
+
+static u8 fill_wep_group(struct hif_wep_group_key *msg,
+ struct ieee80211_key_conf *key)
+{
+ WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
+ msg->key_id = key->keyidx;
+ msg->key_length = key->keylen;
+ memcpy(msg->key_data, key->key, key->keylen);
+ return HIF_KEY_TYPE_WEP_DEFAULT;
+}
+
+static u8 fill_tkip_pair(struct hif_tkip_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->tkip_key_data)
+ + sizeof(msg->tx_mic_key)
+ + sizeof(msg->rx_mic_key), "inconsistent data");
+ memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
+ keybuf += sizeof(msg->tkip_key_data);
+ memcpy(msg->tx_mic_key, keybuf, sizeof(msg->tx_mic_key));
+ keybuf += sizeof(msg->tx_mic_key);
+ memcpy(msg->rx_mic_key, keybuf, sizeof(msg->rx_mic_key));
+ ether_addr_copy(msg->peer_address, peer_addr);
+ return HIF_KEY_TYPE_TKIP_PAIRWISE;
+}
+
+static u8 fill_tkip_group(struct hif_tkip_group_key *msg,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq,
+ enum nl80211_iftype iftype)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->tkip_key_data)
+ + 2 * sizeof(msg->rx_mic_key), "inconsistent data");
+ msg->key_id = key->keyidx;
+ memcpy(msg->rx_sequence_counter,
+ &seq->tkip.iv16, sizeof(seq->tkip.iv16));
+ memcpy(msg->rx_sequence_counter + sizeof(u16),
+ &seq->tkip.iv32, sizeof(seq->tkip.iv32));
+ memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
+ keybuf += sizeof(msg->tkip_key_data);
+ if (iftype == NL80211_IFTYPE_AP)
+ // Use Tx MIC Key
+ memcpy(msg->rx_mic_key, keybuf + 0, sizeof(msg->rx_mic_key));
+ else
+ // Use Rx MIC Key
+ memcpy(msg->rx_mic_key, keybuf + 8, sizeof(msg->rx_mic_key));
+ return HIF_KEY_TYPE_TKIP_GROUP;
+}
+
+static u8 fill_ccmp_pair(struct hif_aes_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
+ ether_addr_copy(msg->peer_address, peer_addr);
+ memcpy(msg->aes_key_data, key->key, key->keylen);
+ return HIF_KEY_TYPE_AES_PAIRWISE;
+}
+
+static u8 fill_ccmp_group(struct hif_aes_group_key *msg,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
+{
+ WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
+ memcpy(msg->aes_key_data, key->key, key->keylen);
+ memcpy(msg->rx_sequence_counter, seq->ccmp.pn, sizeof(seq->ccmp.pn));
+ memreverse(msg->rx_sequence_counter, sizeof(seq->ccmp.pn));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_AES_GROUP;
+}
+
+static u8 fill_sms4_pair(struct hif_wapi_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->wapi_key_data)
+ + sizeof(msg->mic_key_data), "inconsistent data");
+ ether_addr_copy(msg->peer_address, peer_addr);
+ memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
+ keybuf += sizeof(msg->wapi_key_data);
+ memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_WAPI_PAIRWISE;
+}
+
+static u8 fill_sms4_group(struct hif_wapi_group_key *msg,
+ struct ieee80211_key_conf *key)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->wapi_key_data)
+ + sizeof(msg->mic_key_data), "inconsistent data");
+ memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
+ keybuf += sizeof(msg->wapi_key_data);
+ memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_WAPI_GROUP;
+}
+
+static u8 fill_aes_cmac_group(struct hif_igtk_group_key *msg,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
+{
+ WARN(key->keylen != sizeof(msg->igtk_key_data), "inconsistent data");
+ memcpy(msg->igtk_key_data, key->key, key->keylen);
+ memcpy(msg->ipn, seq->aes_cmac.pn, sizeof(seq->aes_cmac.pn));
+ memreverse(msg->ipn, sizeof(seq->aes_cmac.pn));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_IGTK_GROUP;
+}
+
+static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int ret;
+ struct hif_req_add_key *k;
+ struct ieee80211_key_seq seq;
+ struct wfx_dev *wdev = wvif->wdev;
+ int idx = wfx_alloc_key(wvif->wdev);
+ bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+
+ WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data");
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+ if (idx < 0)
+ return -EINVAL;
+ k = &wdev->keys[idx];
+ k->int_id = wvif->id;
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ if (pairwise)
+ k->type = fill_wep_pair(&k->key.wep_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_wep_group(&k->key.wep_group_key, key);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ if (pairwise)
+ k->type = fill_tkip_pair(&k->key.tkip_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_tkip_group(&k->key.tkip_group_key, key,
+ &seq, wvif->vif->type);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) {
+ if (pairwise)
+ k->type = fill_ccmp_pair(&k->key.aes_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_ccmp_group(&k->key.aes_group_key, key,
+ &seq);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) {
+ if (pairwise)
+ k->type = fill_sms4_pair(&k->key.wapi_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_sms4_group(&k->key.wapi_group_key, key);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ k->type = fill_aes_cmac_group(&k->key.igtk_group_key, key,
+ &seq);
+ } else {
+ dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher);
+ wfx_free_key(wdev, idx);
+ return -EOPNOTSUPP;
+ }
+ ret = hif_add_key(wdev, k);
+ if (ret) {
+ wfx_free_key(wdev, idx);
+ return -EOPNOTSUPP;
+ }
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+ key->hw_key_idx = idx;
+ return 0;
+}
+
+static int wfx_remove_key(struct wfx_vif *wvif, struct ieee80211_key_conf *key)
+{
+ WARN(key->hw_key_idx >= MAX_KEY_ENTRIES, "corrupted hw_key_idx");
+ wfx_free_key(wvif->wdev, key->hw_key_idx);
+ return hif_remove_key(wvif->wdev, key->hw_key_idx);
+}
+
+int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int ret = -EOPNOTSUPP;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ if (cmd == SET_KEY)
+ ret = wfx_add_key(wvif, sta, key);
+ if (cmd == DISABLE_KEY)
+ ret = wfx_remove_key(wvif, key);
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ return ret;
+}
+
+int wfx_upload_keys(struct wfx_vif *wvif)
+{
+ int i;
+ struct hif_req_add_key *key;
+ struct wfx_dev *wdev = wvif->wdev;
+
+ for (i = 0; i < ARRAY_SIZE(wdev->keys); i++) {
+ if (wdev->key_map & BIT(i)) {
+ key = &wdev->keys[i];
+ if (key->int_id == wvif->id)
+ hif_add_key(wdev, key);
+ }
+ }
+ return 0;
+}
+
+void wfx_wep_key_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, wep_key_work);
+
+ wfx_tx_flush(wvif->wdev);
+ hif_wep_default_key_id(wvif, wvif->wep_default_key_id);
+ wfx_pending_requeue(wvif->wdev, wvif->wep_pending_skb);
+ wvif->wep_pending_skb = NULL;
+ wfx_tx_unlock(wvif->wdev);
+}
diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h
new file mode 100644
index 000000000000..9436ccdf4d3b
--- /dev/null
+++ b/drivers/staging/wfx/key.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_KEY_H
+#define WFX_KEY_H
+
+#include <net/mac80211.h>
+
+struct wfx_dev;
+struct wfx_vif;
+
+int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int wfx_upload_keys(struct wfx_vif *wvif);
+void wfx_wep_key_work(struct work_struct *work);
+
+#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
new file mode 100644
index 000000000000..986a2ef678b9
--- /dev/null
+++ b/drivers/staging/wfx/main.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Device probe and register.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies).
+ * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/spi/spi.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+
+#include "main.h"
+#include "wfx.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "bus.h"
+#include "bh.h"
+#include "sta.h"
+#include "key.h"
+#include "debug.h"
+#include "data_tx.h"
+#include "secure_link.h"
+#include "hif_tx_mib.h"
+#include "hif_api_cmd.h"
+
+#define WFX_PDS_MAX_SIZE 1500
+
+MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WFx");
+MODULE_AUTHOR("Jérôme Pouiller <jerome.pouiller@silabs.com>");
+MODULE_LICENSE("GPL");
+
+static int gpio_wakeup = -2;
+module_param(gpio_wakeup, int, 0644);
+MODULE_PARM_DESC(gpio_wakeup, "gpio number for wakeup. -1 for none.");
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+}
+
+static struct ieee80211_rate wfx_rates[] = {
+ RATETAB_ENT(10, 0, 0),
+ RATETAB_ENT(20, 1, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(55, 2, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(110, 3, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(60, 6, 0),
+ RATETAB_ENT(90, 7, 0),
+ RATETAB_ENT(120, 8, 0),
+ RATETAB_ENT(180, 9, 0),
+ RATETAB_ENT(240, 10, 0),
+ RATETAB_ENT(360, 11, 0),
+ RATETAB_ENT(480, 12, 0),
+ RATETAB_ENT(540, 13, 0),
+};
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_channel wfx_2ghz_chantable[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_supported_band wfx_band_2ghz = {
+ .channels = wfx_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(wfx_2ghz_chantable),
+ .bitrates = wfx_rates,
+ .n_bitrates = ARRAY_SIZE(wfx_rates),
+ .ht_cap = {
+ // Receive caps
+ .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_MAX_AMSDU |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
+ .ht_supported = 1,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+ .mcs = {
+ .rx_mask = { 0xFF }, // MCS0 to MCS7
+ .rx_highest = 65,
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+};
+
+static const struct ieee80211_iface_limit wdev_iface_limits[] = {
+ { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination wfx_iface_combinations[] = {
+ {
+ .num_different_channels = 2,
+ .max_interfaces = 2,
+ .limits = wdev_iface_limits,
+ .n_limits = ARRAY_SIZE(wdev_iface_limits),
+ }
+};
+
+static const struct ieee80211_ops wfx_ops = {
+ .start = wfx_start,
+ .stop = wfx_stop,
+ .add_interface = wfx_add_interface,
+ .remove_interface = wfx_remove_interface,
+ .config = wfx_config,
+ .tx = wfx_tx,
+ .conf_tx = wfx_conf_tx,
+ .hw_scan = wfx_hw_scan,
+ .sta_add = wfx_sta_add,
+ .sta_remove = wfx_sta_remove,
+ .sta_notify = wfx_sta_notify,
+ .set_tim = wfx_set_tim,
+ .set_key = wfx_set_key,
+ .set_rts_threshold = wfx_set_rts_threshold,
+ .bss_info_changed = wfx_bss_info_changed,
+ .prepare_multicast = wfx_prepare_multicast,
+ .configure_filter = wfx_configure_filter,
+ .ampdu_action = wfx_ampdu_action,
+ .flush = wfx_flush,
+ .add_chanctx = wfx_add_chanctx,
+ .remove_chanctx = wfx_remove_chanctx,
+ .change_chanctx = wfx_change_chanctx,
+ .assign_vif_chanctx = wfx_assign_vif_chanctx,
+ .unassign_vif_chanctx = wfx_unassign_vif_chanctx,
+};
+
+bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
+{
+ if (wdev->hw_caps.api_version_major < major)
+ return true;
+ if (wdev->hw_caps.api_version_major > major)
+ return false;
+ if (wdev->hw_caps.api_version_minor < minor)
+ return true;
+ return false;
+}
+
+struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
+ const char *label)
+{
+ struct gpio_desc *ret;
+ char label_buf[256];
+
+ if (override >= 0) {
+ snprintf(label_buf, sizeof(label_buf), "wfx_%s", label);
+ ret = ERR_PTR(devm_gpio_request_one(dev, override,
+ GPIOF_OUT_INIT_LOW,
+ label_buf));
+ if (!ret)
+ ret = gpio_to_desc(override);
+ } else if (override == -1) {
+ ret = NULL;
+ } else {
+ ret = devm_gpiod_get(dev, label, GPIOD_OUT_LOW);
+ }
+ if (IS_ERR(ret) || !ret) {
+ if (!ret || PTR_ERR(ret) == -ENOENT)
+ dev_warn(dev, "gpio %s is not defined\n", label);
+ else
+ dev_warn(dev,
+ "error while requesting gpio %s\n", label);
+ ret = NULL;
+ } else {
+ dev_dbg(dev,
+ "using gpio %d for %s\n", desc_to_gpio(ret), label);
+ }
+ return ret;
+}
+
+/* NOTE: wfx_send_pds() destroy buf */
+int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len)
+{
+ int ret;
+ int start, brace_level, i;
+
+ start = 0;
+ brace_level = 0;
+ if (buf[0] != '{') {
+ dev_err(wdev->dev, "valid PDS start with '{'. Did you forget to compress it?\n");
+ return -EINVAL;
+ }
+ for (i = 1; i < len - 1; i++) {
+ if (buf[i] == '{')
+ brace_level++;
+ if (buf[i] == '}')
+ brace_level--;
+ if (buf[i] == '}' && !brace_level) {
+ i++;
+ if (i - start + 1 > WFX_PDS_MAX_SIZE)
+ return -EFBIG;
+ buf[start] = '{';
+ buf[i] = 0;
+ dev_dbg(wdev->dev, "send PDS '%s}'\n", buf + start);
+ buf[i] = '}';
+ ret = hif_configuration(wdev, buf + start,
+ i - start + 1);
+ if (ret == HIF_STATUS_FAILURE) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: invalid data (unsupported options?)\n", start, i);
+ return -EINVAL;
+ }
+ if (ret == -ETIMEDOUT) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip didn't reply (corrupted file?)\n", start, i);
+ return ret;
+ }
+ if (ret) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip returned an unknown error\n", start, i);
+ return -EIO;
+ }
+ buf[i] = ',';
+ start = i;
+ }
+ }
+ return 0;
+}
+
+static int wfx_send_pdata_pds(struct wfx_dev *wdev)
+{
+ int ret = 0;
+ const struct firmware *pds;
+ unsigned char *tmp_buf;
+
+ ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev);
+ if (ret) {
+ dev_err(wdev->dev, "can't load PDS file %s\n",
+ wdev->pdata.file_pds);
+ return ret;
+ }
+ tmp_buf = kmemdup(pds->data, pds->size, GFP_KERNEL);
+ ret = wfx_send_pds(wdev, tmp_buf, pds->size);
+ kfree(tmp_buf);
+ release_firmware(pds);
+ return ret;
+}
+
+struct wfx_dev *wfx_init_common(struct device *dev,
+ const struct wfx_platform_data *pdata,
+ const struct hwbus_ops *hwbus_ops,
+ void *hwbus_priv)
+{
+ struct ieee80211_hw *hw;
+ struct wfx_dev *wdev;
+
+ hw = ieee80211_alloc_hw(sizeof(struct wfx_dev), &wfx_ops);
+ if (!hw)
+ return NULL;
+
+ SET_IEEE80211_DEV(hw, dev);
+
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ hw->vif_data_size = sizeof(struct wfx_vif);
+ hw->sta_data_size = sizeof(struct wfx_sta_priv);
+ hw->queues = 4;
+ hw->max_rates = 8;
+ hw->max_rate_tries = 15;
+ hw->extra_tx_headroom = sizeof(struct hif_sl_msg_hdr) +
+ sizeof(struct hif_msg)
+ + sizeof(struct hif_req_tx)
+ + 4 /* alignment */ + 8 /* TKIP IV */;
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ hw->wiphy->max_ap_assoc_sta = WFX_MAX_STA_IN_AP_MODE;
+ hw->wiphy->max_scan_ssids = 2;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations);
+ hw->wiphy->iface_combinations = wfx_iface_combinations;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL);
+ // FIXME: also copy wfx_rates and wfx_2ghz_chantable
+ memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz,
+ sizeof(wfx_band_2ghz));
+
+ wdev = hw->priv;
+ wdev->hw = hw;
+ wdev->dev = dev;
+ wdev->hwbus_ops = hwbus_ops;
+ wdev->hwbus_priv = hwbus_priv;
+ memcpy(&wdev->pdata, pdata, sizeof(*pdata));
+ of_property_read_string(dev->of_node, "config-file",
+ &wdev->pdata.file_pds);
+ wdev->pdata.gpio_wakeup = wfx_get_gpio(dev, gpio_wakeup, "wakeup");
+ wfx_sl_fill_pdata(dev, &wdev->pdata);
+
+ mutex_init(&wdev->conf_mutex);
+ mutex_init(&wdev->rx_stats_lock);
+ init_completion(&wdev->firmware_ready);
+ wfx_init_hif_cmd(&wdev->hif_cmd);
+ wfx_tx_queues_init(wdev);
+
+ return wdev;
+}
+
+void wfx_free_common(struct wfx_dev *wdev)
+{
+ mutex_destroy(&wdev->rx_stats_lock);
+ mutex_destroy(&wdev->conf_mutex);
+ wfx_tx_queues_deinit(wdev);
+ ieee80211_free_hw(wdev->hw);
+}
+
+int wfx_probe(struct wfx_dev *wdev)
+{
+ int i;
+ int err;
+ const void *macaddr;
+ struct gpio_desc *gpio_saved;
+
+ // During first part of boot, gpio_wakeup cannot yet been used. So
+ // prevent bh() to touch it.
+ gpio_saved = wdev->pdata.gpio_wakeup;
+ wdev->pdata.gpio_wakeup = NULL;
+
+ wfx_bh_register(wdev);
+
+ err = wfx_init_device(wdev);
+ if (err)
+ goto err1;
+
+ err = wait_for_completion_interruptible_timeout(&wdev->firmware_ready,
+ 10 * HZ);
+ if (err <= 0) {
+ if (err == 0) {
+ dev_err(wdev->dev, "timeout while waiting for startup indication. IRQ configuration error?\n");
+ err = -ETIMEDOUT;
+ } else if (err == -ERESTARTSYS) {
+ dev_info(wdev->dev, "probe interrupted by user\n");
+ }
+ goto err1;
+ }
+
+ // FIXME: fill wiphy::hw_version
+ dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n",
+ wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor,
+ wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label,
+ wdev->hw_caps.api_version_major,
+ wdev->hw_caps.api_version_minor,
+ wdev->keyset, *((u32 *) &wdev->hw_caps.capabilities));
+ snprintf(wdev->hw->wiphy->fw_version,
+ sizeof(wdev->hw->wiphy->fw_version),
+ "%d.%d.%d",
+ wdev->hw_caps.firmware_major,
+ wdev->hw_caps.firmware_minor,
+ wdev->hw_caps.firmware_build);
+
+ if (wfx_api_older_than(wdev, 1, 0)) {
+ dev_err(wdev->dev,
+ "unsupported firmware API version (expect 1 while firmware returns %d)\n",
+ wdev->hw_caps.api_version_major);
+ err = -ENOTSUPP;
+ goto err1;
+ }
+
+ err = wfx_sl_init(wdev);
+ if (err && wdev->hw_caps.capabilities.link_mode == SEC_LINK_ENFORCED) {
+ dev_err(wdev->dev,
+ "chip require secure_link, but can't negociate it\n");
+ goto err1;
+ }
+
+ if (wdev->hw_caps.regul_sel_mode_info.region_sel_mode) {
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[11].flags |= IEEE80211_CHAN_NO_IR;
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[12].flags |= IEEE80211_CHAN_NO_IR;
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[13].flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ dev_dbg(wdev->dev, "sending configuration file %s\n",
+ wdev->pdata.file_pds);
+ err = wfx_send_pdata_pds(wdev);
+ if (err < 0)
+ goto err1;
+
+ wdev->pdata.gpio_wakeup = gpio_saved;
+ if (wdev->pdata.gpio_wakeup) {
+ dev_dbg(wdev->dev,
+ "enable 'quiescent' power mode with gpio %d and PDS file %s\n",
+ desc_to_gpio(wdev->pdata.gpio_wakeup),
+ wdev->pdata.file_pds);
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 1);
+ control_reg_write(wdev, 0);
+ hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT);
+ } else {
+ hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE);
+ }
+
+ hif_use_multi_tx_conf(wdev, true);
+
+ for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) {
+ eth_zero_addr(wdev->addresses[i].addr);
+ macaddr = of_get_mac_address(wdev->dev->of_node);
+ if (!IS_ERR_OR_NULL(macaddr)) {
+ ether_addr_copy(wdev->addresses[i].addr, macaddr);
+ wdev->addresses[i].addr[ETH_ALEN - 1] += i;
+ } else {
+ ether_addr_copy(wdev->addresses[i].addr,
+ wdev->hw_caps.mac_addr[i]);
+ }
+ if (!is_valid_ether_addr(wdev->addresses[i].addr)) {
+ dev_warn(wdev->dev, "using random MAC address\n");
+ eth_random_addr(wdev->addresses[i].addr);
+ }
+ dev_info(wdev->dev, "MAC address %d: %pM\n", i,
+ wdev->addresses[i].addr);
+ }
+ wdev->hw->wiphy->n_addresses = ARRAY_SIZE(wdev->addresses);
+ wdev->hw->wiphy->addresses = wdev->addresses;
+
+ err = ieee80211_register_hw(wdev->hw);
+ if (err)
+ goto err1;
+
+ err = wfx_debug_init(wdev);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ ieee80211_unregister_hw(wdev->hw);
+ ieee80211_free_hw(wdev->hw);
+err1:
+ wfx_bh_unregister(wdev);
+ return err;
+}
+
+void wfx_release(struct wfx_dev *wdev)
+{
+ ieee80211_unregister_hw(wdev->hw);
+ hif_shutdown(wdev);
+ wfx_bh_unregister(wdev);
+ wfx_sl_deinit(wdev);
+}
+
+static int __init wfx_core_init(void)
+{
+ int ret = 0;
+
+ if (IS_ENABLED(CONFIG_SPI))
+ ret = spi_register_driver(&wfx_spi_driver);
+ if (IS_ENABLED(CONFIG_MMC) && !ret)
+ ret = sdio_register_driver(&wfx_sdio_driver);
+ return ret;
+}
+module_init(wfx_core_init);
+
+static void __exit wfx_core_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MMC))
+ sdio_unregister_driver(&wfx_sdio_driver);
+ if (IS_ENABLED(CONFIG_SPI))
+ spi_unregister_driver(&wfx_spi_driver);
+}
+module_exit(wfx_core_exit);
diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h
new file mode 100644
index 000000000000..875f8c227803
--- /dev/null
+++ b/drivers/staging/wfx/main.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device probe and register.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#ifndef WFX_MAIN_H
+#define WFX_MAIN_H
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+
+#include "bus.h"
+#include "hif_api_general.h"
+
+struct wfx_dev;
+
+struct wfx_platform_data {
+ /* Keyset and ".sec" extention will appended to this string */
+ const char *file_fw;
+ const char *file_pds;
+ struct gpio_desc *gpio_wakeup;
+ /*
+ * if true HIF D_out is sampled on the rising edge of the clock
+ * (intended to be used in 50Mhz SDIO)
+ */
+ bool use_rising_clk;
+};
+
+struct wfx_dev *wfx_init_common(struct device *dev,
+ const struct wfx_platform_data *pdata,
+ const struct hwbus_ops *hwbus_ops,
+ void *hwbus_priv);
+void wfx_free_common(struct wfx_dev *wdev);
+
+int wfx_probe(struct wfx_dev *wdev);
+void wfx_release(struct wfx_dev *wdev);
+
+struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
+ const char *label);
+bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor);
+int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len);
+
+#endif
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
new file mode 100644
index 000000000000..c7ee90888f69
--- /dev/null
+++ b/drivers/staging/wfx/queue.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * O(1) TX queue with built-in allocator.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "queue.h"
+#include "wfx.h"
+#include "sta.h"
+#include "data_tx.h"
+
+void wfx_tx_lock(struct wfx_dev *wdev)
+{
+ atomic_inc(&wdev->tx_lock);
+}
+
+void wfx_tx_unlock(struct wfx_dev *wdev)
+{
+ int tx_lock = atomic_dec_return(&wdev->tx_lock);
+
+ WARN(tx_lock < 0, "inconsistent tx_lock value");
+ if (!tx_lock)
+ wfx_bh_request_tx(wdev);
+}
+
+void wfx_tx_flush(struct wfx_dev *wdev)
+{
+ int ret;
+
+ WARN(!atomic_read(&wdev->tx_lock), "tx_lock is not locked");
+
+ // Do not wait for any reply if chip is frozen
+ if (wdev->chip_frozen)
+ return;
+
+ mutex_lock(&wdev->hif_cmd.lock);
+ ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
+ !wdev->hif.tx_buffers_used,
+ msecs_to_jiffies(3000));
+ if (!ret) {
+ dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
+ wdev->hif.tx_buffers_used);
+ wfx_pending_dump_old_frames(wdev, 3000);
+ // FIXME: drop pending frames here
+ wdev->chip_frozen = 1;
+ }
+ mutex_unlock(&wdev->hif_cmd.lock);
+}
+
+void wfx_tx_lock_flush(struct wfx_dev *wdev)
+{
+ wfx_tx_lock(wdev);
+ wfx_tx_flush(wdev);
+}
+
+void wfx_tx_queues_lock(struct wfx_dev *wdev)
+{
+ int i;
+ struct wfx_queue *queue;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ if (queue->tx_locked_cnt++ == 0)
+ ieee80211_stop_queue(wdev->hw, queue->queue_id);
+ spin_unlock_bh(&queue->queue.lock);
+ }
+}
+
+void wfx_tx_queues_unlock(struct wfx_dev *wdev)
+{
+ int i;
+ struct wfx_queue *queue;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ WARN(!queue->tx_locked_cnt, "queue already unlocked");
+ if (--queue->tx_locked_cnt == 0)
+ ieee80211_wake_queue(wdev->hw, queue->queue_id);
+ spin_unlock_bh(&queue->queue.lock);
+ }
+}
+
+/* If successful, LOCKS the TX queue! */
+void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
+{
+ int i;
+ bool done;
+ struct wfx_queue *queue;
+ struct sk_buff *item;
+ struct wfx_dev *wdev = wvif->wdev;
+ struct hif_msg *hif;
+
+ if (wvif->wdev->chip_frozen) {
+ wfx_tx_lock_flush(wdev);
+ wfx_tx_queues_clear(wdev);
+ return;
+ }
+
+ do {
+ done = true;
+ wfx_tx_lock_flush(wdev);
+ for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ skb_queue_walk(&queue->queue, item) {
+ hif = (struct hif_msg *) item->data;
+ if (hif->interface == wvif->id)
+ done = false;
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ }
+ if (!done) {
+ wfx_tx_unlock(wdev);
+ msleep(20);
+ }
+ } while (!done);
+}
+
+static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
+ struct sk_buff_head *gc_list)
+{
+ int i;
+ struct sk_buff *item;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&queue->queue.lock);
+ while ((item = __skb_dequeue(&queue->queue)) != NULL)
+ skb_queue_head(gc_list, item);
+ spin_lock_bh(&stats->pending.lock);
+ for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
+ stats->link_map_cache[i] -= queue->link_map_cache[i];
+ queue->link_map_cache[i] = 0;
+ }
+ spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&queue->queue.lock);
+}
+
+void wfx_tx_queues_clear(struct wfx_dev *wdev)
+{
+ int i;
+ struct sk_buff *item;
+ struct sk_buff_head gc_list;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ skb_queue_head_init(&gc_list);
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i)
+ wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
+ wake_up(&stats->wait_link_id_empty);
+ while ((item = skb_dequeue(&gc_list)) != NULL)
+ wfx_skb_dtor(wdev, item);
+}
+
+void wfx_tx_queues_init(struct wfx_dev *wdev)
+{
+ int i;
+
+ memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
+ memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
+ skb_queue_head_init(&wdev->tx_queue_stats.pending);
+ init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ wdev->tx_queue[i].queue_id = i;
+ skb_queue_head_init(&wdev->tx_queue[i].queue);
+ }
+}
+
+void wfx_tx_queues_deinit(struct wfx_dev *wdev)
+{
+ WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
+ wfx_tx_queues_clear(wdev);
+}
+
+size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
+ u32 link_id_map)
+{
+ size_t ret;
+ int i, bit;
+
+ if (!link_id_map)
+ return 0;
+
+ spin_lock_bh(&queue->queue.lock);
+ if (link_id_map == (u32)-1) {
+ ret = skb_queue_len(&queue->queue);
+ } else {
+ ret = 0;
+ for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
+ ++i, bit <<= 1) {
+ if (link_id_map & bit)
+ ret += queue->link_map_cache[i];
+ }
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ return ret;
+}
+
+void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
+ struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+
+ WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
+ spin_lock_bh(&queue->queue.lock);
+ __skb_queue_tail(&queue->queue, skb);
+
+ ++queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ ++stats->link_map_cache[tx_priv->link_id];
+ spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&queue->queue.lock);
+}
+
+static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
+ struct wfx_queue *queue,
+ u32 link_id_map)
+{
+ struct sk_buff *skb = NULL;
+ struct sk_buff *item;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv;
+ bool wakeup_stats = false;
+
+ spin_lock_bh(&queue->queue.lock);
+ skb_queue_walk(&queue->queue, item) {
+ tx_priv = wfx_skb_tx_priv(item);
+ if (link_id_map & BIT(tx_priv->link_id)) {
+ skb = item;
+ break;
+ }
+ }
+ WARN_ON(!skb);
+ if (skb) {
+ tx_priv = wfx_skb_tx_priv(skb);
+ tx_priv->xmit_timestamp = ktime_get();
+ __skb_unlink(skb, &queue->queue);
+ --queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ __skb_queue_tail(&stats->pending, skb);
+ if (!--stats->link_map_cache[tx_priv->link_id])
+ wakeup_stats = true;
+ spin_unlock_bh(&stats->pending.lock);
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ if (wakeup_stats)
+ wake_up(&stats->wait_link_id_empty);
+ return skb;
+}
+
+int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+ struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ spin_lock_bh(&queue->queue.lock);
+ ++queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ ++stats->link_map_cache[tx_priv->link_id];
+ __skb_unlink(skb, &stats->pending);
+ spin_unlock_bh(&stats->pending.lock);
+ __skb_queue_tail(&queue->queue, skb);
+ spin_unlock_bh(&queue->queue.lock);
+ return 0;
+}
+
+int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&stats->pending.lock);
+ __skb_unlink(skb, &stats->pending);
+ spin_unlock_bh(&stats->pending.lock);
+ wfx_skb_dtor(wdev, skb);
+
+ return 0;
+}
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
+{
+ struct sk_buff *skb;
+ struct hif_req_tx *req;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&stats->pending.lock);
+ skb_queue_walk(&stats->pending, skb) {
+ req = wfx_skb_txreq(skb);
+ if (req->packet_id == packet_id) {
+ spin_unlock_bh(&stats->pending.lock);
+ return skb;
+ }
+ }
+ spin_unlock_bh(&stats->pending.lock);
+ WARN(1, "cannot find packet in pending queue");
+ return NULL;
+}
+
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv;
+ struct hif_req_tx *req;
+ struct sk_buff *skb;
+ bool first = true;
+
+ spin_lock_bh(&stats->pending.lock);
+ skb_queue_walk(&stats->pending, skb) {
+ tx_priv = wfx_skb_tx_priv(skb);
+ req = wfx_skb_txreq(skb);
+ if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
+ limit_ms))) {
+ if (first) {
+ dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
+ limit_ms);
+ first = false;
+ }
+ dev_info(wdev->dev, " id %08x sent %lldms ago\n",
+ req->packet_id,
+ ktime_ms_delta(now, tx_priv->xmit_timestamp));
+ }
+ }
+ spin_unlock_bh(&stats->pending.lock);
+}
+
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
+ struct sk_buff *skb)
+{
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+
+ return ktime_us_delta(now, tx_priv->xmit_timestamp);
+}
+
+bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
+{
+ int i;
+ struct sk_buff_head *queue;
+ bool ret = true;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ queue = &wdev->tx_queue[i].queue;
+ spin_lock_bh(&queue->lock);
+ if (!skb_queue_empty(queue))
+ ret = false;
+ spin_unlock_bh(&queue->lock);
+ }
+ return ret;
+}
+
+static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
+ struct wfx_queue *queue)
+{
+ bool handled = false;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+ struct hif_req_tx *req = wfx_skb_txreq(skb);
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
+
+ enum {
+ do_probe,
+ do_drop,
+ do_wep,
+ do_tx,
+ } action = do_tx;
+
+ switch (wvif->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (wvif->state < WFX_STATE_PRE_STA)
+ action = do_drop;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!wvif->state) {
+ action = do_drop;
+ } else if (!(BIT(tx_priv->raw_link_id) &
+ (BIT(0) | wvif->link_id_map))) {
+ dev_warn(wvif->wdev->dev, "a frame with expired link-id is dropped\n");
+ action = do_drop;
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (wvif->state != WFX_STATE_IBSS)
+ action = do_drop;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ default:
+ action = do_drop;
+ break;
+ }
+
+ if (action == do_tx) {
+ if (ieee80211_is_nullfunc(frame->frame_control)) {
+ mutex_lock(&wvif->bss_loss_lock);
+ if (wvif->bss_loss_state) {
+ wvif->bss_loss_confirm_id = req->packet_id;
+ req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
+ }
+ mutex_unlock(&wvif->bss_loss_lock);
+ } else if (ieee80211_has_protected(frame->frame_control) &&
+ tx_priv->hw_key &&
+ tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
+ (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ action = do_wep;
+ }
+ }
+
+ switch (action) {
+ case do_drop:
+ wfx_pending_remove(wvif->wdev, skb);
+ handled = true;
+ break;
+ case do_wep:
+ wfx_tx_lock(wvif->wdev);
+ wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
+ wvif->wep_pending_skb = skb;
+ if (!schedule_work(&wvif->wep_key_work))
+ wfx_tx_unlock(wvif->wdev);
+ handled = true;
+ break;
+ case do_tx:
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ return handled;
+}
+
+static int wfx_get_prio_queue(struct wfx_vif *wvif,
+ u32 tx_allowed_mask, int *total)
+{
+ static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
+ BIT(WFX_LINK_ID_UAPSD);
+ struct hif_req_edca_queue_params *edca;
+ unsigned int score, best = -1;
+ int winner = -1;
+ int i;
+
+ /* search for a winner using edca params */
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ int queued;
+
+ edca = &wvif->edca.params[i];
+ queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
+ tx_allowed_mask);
+ if (!queued)
+ continue;
+ *total += queued;
+ score = ((edca->aifsn + edca->cw_min) << 16) +
+ ((edca->cw_max - edca->cw_min) *
+ (get_random_int() & 0xFFFF));
+ if (score < best && (winner < 0 || i != 3)) {
+ best = score;
+ winner = i;
+ }
+ }
+
+ /* override winner if bursting */
+ if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
+ winner != wvif->wdev->tx_burst_idx &&
+ !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
+ tx_allowed_mask & urgent) &&
+ wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
+ winner = wvif->wdev->tx_burst_idx;
+
+ return winner;
+}
+
+static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
+ struct wfx_queue **queue_p,
+ u32 *tx_allowed_mask_p,
+ bool *more)
+{
+ int idx;
+ u32 tx_allowed_mask;
+ int total = 0;
+
+ /* Search for a queue with multicast frames buffered */
+ if (wvif->mcast_tx) {
+ tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
+ idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
+ if (idx >= 0) {
+ *more = total > 1;
+ goto found;
+ }
+ }
+
+ /* Search for unicast traffic */
+ tx_allowed_mask = ~wvif->sta_asleep_mask;
+ tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
+ if (wvif->sta_asleep_mask) {
+ tx_allowed_mask |= wvif->pspoll_mask;
+ tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
+ } else {
+ tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
+ }
+ idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
+ if (idx < 0)
+ return -ENOENT;
+
+found:
+ *queue_p = &wvif->wdev->tx_queue[idx];
+ *tx_allowed_mask_p = tx_allowed_mask;
+ return 0;
+}
+
+struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
+{
+ struct sk_buff *skb;
+ struct hif_msg *hif = NULL;
+ struct hif_req_tx *req = NULL;
+ struct wfx_queue *queue = NULL;
+ struct wfx_queue *vif_queue = NULL;
+ u32 tx_allowed_mask = 0;
+ u32 vif_tx_allowed_mask = 0;
+ const struct wfx_tx_priv *tx_priv = NULL;
+ struct wfx_vif *wvif;
+ /* More is used only for broadcasts. */
+ bool more = false;
+ bool vif_more = false;
+ int not_found;
+ int burst;
+
+ for (;;) {
+ int ret = -ENOENT;
+ int queue_num;
+ struct ieee80211_hdr *hdr;
+
+ if (atomic_read(&wdev->tx_lock))
+ return NULL;
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ spin_lock_bh(&wvif->ps_state_lock);
+
+ not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
+ &vif_tx_allowed_mask,
+ &vif_more);
+
+ if (wvif->mcast_buffered && (not_found || !vif_more) &&
+ (wvif->mcast_tx ||
+ !wvif->sta_asleep_mask)) {
+ wvif->mcast_buffered = false;
+ if (wvif->mcast_tx) {
+ wvif->mcast_tx = false;
+ schedule_work(&wvif->mcast_stop_work);
+ }
+ }
+
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (vif_more) {
+ more = true;
+ tx_allowed_mask = vif_tx_allowed_mask;
+ queue = vif_queue;
+ ret = 0;
+ break;
+ } else if (!not_found) {
+ if (queue && queue != vif_queue)
+ dev_info(wdev->dev, "vifs disagree about queue priority\n");
+ tx_allowed_mask |= vif_tx_allowed_mask;
+ queue = vif_queue;
+ ret = 0;
+ }
+ }
+
+ if (ret)
+ return NULL;
+
+ queue_num = queue - wdev->tx_queue;
+
+ skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
+ if (!skb)
+ continue;
+ tx_priv = wfx_skb_tx_priv(skb);
+ hif = (struct hif_msg *) skb->data;
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ WARN_ON(!wvif);
+
+ if (hif_handle_tx_data(wvif, skb, queue))
+ continue; /* Handled by WSM */
+
+ wvif->pspoll_mask &= ~BIT(tx_priv->raw_link_id);
+
+ /* allow bursting if txop is set */
+ if (wvif->edca.params[queue_num].tx_op_limit)
+ burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
+ else
+ burst = 1;
+
+ /* store index of bursting queue */
+ if (burst > 1)
+ wdev->tx_burst_idx = queue_num;
+ else
+ wdev->tx_burst_idx = -1;
+
+ /* more buffered multicast/broadcast frames
+ * ==> set MoreData flag in IEEE 802.11 header
+ * to inform PS STAs
+ */
+ if (more) {
+ req = (struct hif_req_tx *) hif->body;
+ hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ }
+ return hif;
+ }
+}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
new file mode 100644
index 000000000000..21566e48b2c2
--- /dev/null
+++ b/drivers/staging/wfx/queue.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * O(1) TX queue with built-in allocator.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_QUEUE_H
+#define WFX_QUEUE_H
+
+#include <linux/skbuff.h>
+
+#include "hif_api_cmd.h"
+
+#define WFX_MAX_STA_IN_AP_MODE 14
+#define WFX_LINK_ID_AFTER_DTIM (WFX_MAX_STA_IN_AP_MODE + 1)
+#define WFX_LINK_ID_UAPSD (WFX_MAX_STA_IN_AP_MODE + 2)
+#define WFX_LINK_ID_MAX (WFX_MAX_STA_IN_AP_MODE + 3)
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_queue {
+ struct sk_buff_head queue;
+ int tx_locked_cnt;
+ int link_map_cache[WFX_LINK_ID_MAX];
+ u8 queue_id;
+};
+
+struct wfx_queue_stats {
+ int link_map_cache[WFX_LINK_ID_MAX];
+ struct sk_buff_head pending;
+ wait_queue_head_t wait_link_id_empty;
+};
+
+void wfx_tx_lock(struct wfx_dev *wdev);
+void wfx_tx_unlock(struct wfx_dev *wdev);
+void wfx_tx_flush(struct wfx_dev *wdev);
+void wfx_tx_lock_flush(struct wfx_dev *wdev);
+
+void wfx_tx_queues_init(struct wfx_dev *wdev);
+void wfx_tx_queues_deinit(struct wfx_dev *wdev);
+void wfx_tx_queues_lock(struct wfx_dev *wdev);
+void wfx_tx_queues_unlock(struct wfx_dev *wdev);
+void wfx_tx_queues_clear(struct wfx_dev *wdev);
+bool wfx_tx_queues_is_empty(struct wfx_dev *wdev);
+void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
+struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
+
+void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
+ struct sk_buff *skb);
+size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
+int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
+int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb);
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
+ struct sk_buff *skb);
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms);
+
+#endif /* WFX_QUEUE_H */
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
new file mode 100644
index 000000000000..35fcf9119f96
--- /dev/null
+++ b/drivers/staging/wfx/scan.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Scan related functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "scan.h"
+#include "wfx.h"
+#include "sta.h"
+#include "hif_tx_mib.h"
+
+static void __ieee80211_scan_completed_compat(struct ieee80211_hw *hw,
+ bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted ? 1 : 0,
+ };
+
+ ieee80211_scan_completed(hw, &info);
+}
+
+static void wfx_scan_restart_delayed(struct wfx_vif *wvif)
+{
+ if (wvif->delayed_unjoin) {
+ wvif->delayed_unjoin = false;
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else if (wvif->delayed_link_loss) {
+ wvif->delayed_link_loss = 0;
+ wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
+ }
+}
+
+static int wfx_scan_start(struct wfx_vif *wvif, struct wfx_scan_params *scan)
+{
+ int ret;
+ int tmo = 500;
+
+ if (wvif->state == WFX_STATE_PRE_STA)
+ return -EBUSY;
+
+ tmo += scan->scan_req.num_of_channels *
+ ((20 * (scan->scan_req.max_channel_time)) + 10);
+ atomic_set(&wvif->scan.in_progress, 1);
+ atomic_set(&wvif->wdev->scan_in_progress, 1);
+
+ schedule_delayed_work(&wvif->scan.timeout, msecs_to_jiffies(tmo));
+ ret = hif_scan(wvif, scan);
+ if (ret) {
+ wfx_scan_failed_cb(wvif);
+ atomic_set(&wvif->scan.in_progress, 0);
+ atomic_set(&wvif->wdev->scan_in_progress, 0);
+ cancel_delayed_work_sync(&wvif->scan.timeout);
+ wfx_scan_restart_delayed(wvif);
+ }
+ return ret;
+}
+
+int wfx_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct sk_buff *skb;
+ int i, ret;
+ struct hif_mib_template_frame *p;
+
+ if (!wvif)
+ return -EINVAL;
+
+ if (wvif->state == WFX_STATE_AP)
+ return -EOPNOTSUPP;
+
+ if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
+ req->n_ssids = 0;
+
+ if (req->n_ssids > HIF_API_MAX_NB_SSIDS)
+ return -EINVAL;
+
+ skb = ieee80211_probereq_get(hw, wvif->vif->addr, NULL, 0, req->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (req->ie_len)
+ memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
+
+ mutex_lock(&wdev->conf_mutex);
+
+ p = (struct hif_mib_template_frame *)skb_push(skb, 4);
+ p->frame_type = HIF_TMPLT_PRBREQ;
+ p->frame_length = cpu_to_le16(skb->len - 4);
+ ret = hif_set_template_frame(wvif, p);
+ skb_pull(skb, 4);
+
+ if (!ret)
+ /* Host want to be the probe responder. */
+ ret = wfx_fwd_probe_req(wvif, true);
+ if (ret) {
+ mutex_unlock(&wdev->conf_mutex);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ wfx_tx_lock_flush(wdev);
+
+ WARN(wvif->scan.req, "unexpected concurrent scan");
+ wvif->scan.req = req;
+ wvif->scan.n_ssids = 0;
+ wvif->scan.status = 0;
+ wvif->scan.begin = &req->channels[0];
+ wvif->scan.curr = wvif->scan.begin;
+ wvif->scan.end = &req->channels[req->n_channels];
+ wvif->scan.output_power = wdev->output_power;
+
+ for (i = 0; i < req->n_ssids; ++i) {
+ struct hif_ssid_def *dst = &wvif->scan.ssids[wvif->scan.n_ssids];
+
+ memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
+ dst->ssid_length = req->ssids[i].ssid_len;
+ ++wvif->scan.n_ssids;
+ }
+
+ mutex_unlock(&wdev->conf_mutex);
+
+ if (skb)
+ dev_kfree_skb(skb);
+ schedule_work(&wvif->scan.work);
+ return 0;
+}
+
+void wfx_scan_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan.work);
+ struct ieee80211_channel **it;
+ struct wfx_scan_params scan = {
+ .scan_req.scan_type.type = 0, /* Foreground */
+ };
+ struct ieee80211_channel *first;
+ bool first_run = (wvif->scan.begin == wvif->scan.curr &&
+ wvif->scan.begin != wvif->scan.end);
+ int i;
+
+ down(&wvif->scan.lock);
+ mutex_lock(&wvif->wdev->conf_mutex);
+
+ if (first_run) {
+ if (wvif->state == WFX_STATE_STA &&
+ !(wvif->powersave_mode.pm_mode.enter_psm)) {
+ struct hif_req_set_pm_mode pm = wvif->powersave_mode;
+
+ pm.pm_mode.enter_psm = 1;
+ wfx_set_pm(wvif, &pm);
+ }
+ }
+
+ if (!wvif->scan.req || wvif->scan.curr == wvif->scan.end) {
+ if (wvif->scan.output_power != wvif->wdev->output_power)
+ hif_set_output_power(wvif,
+ wvif->wdev->output_power * 10);
+
+ if (wvif->scan.status < 0)
+ dev_warn(wvif->wdev->dev, "scan failed\n");
+ else if (wvif->scan.req)
+ dev_dbg(wvif->wdev->dev, "scan completed\n");
+ else
+ dev_dbg(wvif->wdev->dev, "scan canceled\n");
+
+ wvif->scan.req = NULL;
+ wfx_scan_restart_delayed(wvif);
+ wfx_tx_unlock(wvif->wdev);
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ __ieee80211_scan_completed_compat(wvif->wdev->hw,
+ wvif->scan.status ? 1 : 0);
+ up(&wvif->scan.lock);
+ if (wvif->state == WFX_STATE_STA &&
+ !(wvif->powersave_mode.pm_mode.enter_psm))
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ return;
+ }
+ first = *wvif->scan.curr;
+
+ for (it = wvif->scan.curr + 1, i = 1;
+ it != wvif->scan.end && i < HIF_API_MAX_NB_CHANNELS;
+ ++it, ++i) {
+ if ((*it)->band != first->band)
+ break;
+ if (((*it)->flags ^ first->flags) &
+ IEEE80211_CHAN_NO_IR)
+ break;
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
+ (*it)->max_power != first->max_power)
+ break;
+ }
+ scan.scan_req.band = first->band;
+
+ if (wvif->scan.req->no_cck)
+ scan.scan_req.max_transmit_rate = API_RATE_INDEX_G_6MBPS;
+ else
+ scan.scan_req.max_transmit_rate = API_RATE_INDEX_B_1MBPS;
+ scan.scan_req.num_of_probe_requests =
+ (first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2;
+ scan.scan_req.num_of_ssi_ds = wvif->scan.n_ssids;
+ scan.ssids = &wvif->scan.ssids[0];
+ scan.scan_req.num_of_channels = it - wvif->scan.curr;
+ scan.scan_req.probe_delay = 100;
+ // FIXME: Check if FW can do active scan while joined.
+ if (wvif->state == WFX_STATE_STA) {
+ scan.scan_req.scan_type.type = 1;
+ scan.scan_req.scan_flags.fbg = 1;
+ }
+
+ scan.ch = kcalloc(scan.scan_req.num_of_channels,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!scan.ch) {
+ wvif->scan.status = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < scan.scan_req.num_of_channels; ++i)
+ scan.ch[i] = wvif->scan.curr[i]->hw_value;
+
+ if (wvif->scan.curr[0]->flags & IEEE80211_CHAN_NO_IR) {
+ scan.scan_req.min_channel_time = 50;
+ scan.scan_req.max_channel_time = 150;
+ } else {
+ scan.scan_req.min_channel_time = 10;
+ scan.scan_req.max_channel_time = 50;
+ }
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
+ wvif->scan.output_power != first->max_power) {
+ wvif->scan.output_power = first->max_power;
+ hif_set_output_power(wvif, wvif->scan.output_power * 10);
+ }
+ wvif->scan.status = wfx_scan_start(wvif, &scan);
+ kfree(scan.ch);
+ if (wvif->scan.status)
+ goto fail;
+ wvif->scan.curr = it;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ return;
+
+fail:
+ wvif->scan.curr = wvif->scan.end;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ up(&wvif->scan.lock);
+ schedule_work(&wvif->scan.work);
+}
+
+static void wfx_scan_complete(struct wfx_vif *wvif)
+{
+ up(&wvif->scan.lock);
+ atomic_set(&wvif->wdev->scan_in_progress, 0);
+
+ wfx_scan_work(&wvif->scan.work);
+}
+
+void wfx_scan_failed_cb(struct wfx_vif *wvif)
+{
+ if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
+ wvif->scan.status = -EIO;
+ schedule_work(&wvif->scan.timeout.work);
+ }
+}
+
+void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg)
+{
+ if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
+ wvif->scan.status = 1;
+ schedule_work(&wvif->scan.timeout.work);
+ }
+}
+
+void wfx_scan_timeout(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ scan.timeout.work);
+
+ if (atomic_xchg(&wvif->scan.in_progress, 0)) {
+ if (wvif->scan.status > 0) {
+ wvif->scan.status = 0;
+ } else if (!wvif->scan.status) {
+ dev_warn(wvif->wdev->dev, "timeout waiting for scan complete notification\n");
+ wvif->scan.status = -ETIMEDOUT;
+ wvif->scan.curr = wvif->scan.end;
+ hif_stop_scan(wvif);
+ }
+ wfx_scan_complete(wvif);
+ }
+}
diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h
new file mode 100644
index 000000000000..b4ddd0771a9b
--- /dev/null
+++ b/drivers/staging/wfx/scan.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Scan related functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_SCAN_H
+#define WFX_SCAN_H
+
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_scan {
+ struct semaphore lock;
+ struct work_struct work;
+ struct delayed_work timeout;
+ struct cfg80211_scan_request *req;
+ struct ieee80211_channel **begin;
+ struct ieee80211_channel **curr;
+ struct ieee80211_channel **end;
+ struct hif_ssid_def ssids[HIF_API_MAX_NB_SSIDS];
+ int output_power;
+ int n_ssids;
+ int status;
+ atomic_t in_progress;
+};
+
+int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+void wfx_scan_work(struct work_struct *work);
+void wfx_scan_timeout(struct work_struct *work);
+void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg);
+void wfx_scan_failed_cb(struct wfx_vif *wvif);
+
+#endif /* WFX_SCAN_H */
diff --git a/drivers/staging/wfx/secure_link.h b/drivers/staging/wfx/secure_link.h
new file mode 100644
index 000000000000..666b26e5308d
--- /dev/null
+++ b/drivers/staging/wfx/secure_link.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, Silicon Laboratories, Inc.
+ */
+#ifndef WFX_SECURE_LINK_H
+#define WFX_SECURE_LINK_H
+
+#include <linux/of.h>
+
+#include "hif_api_general.h"
+
+struct wfx_dev;
+
+
+struct sl_context {
+};
+
+static inline bool wfx_is_secure_command(struct wfx_dev *wdev, int cmd_id)
+{
+ return false;
+}
+
+static inline int wfx_sl_decode(struct wfx_dev *wdev, struct hif_sl_msg *m)
+{
+ return -EIO;
+}
+
+static inline int wfx_sl_encode(struct wfx_dev *wdev, struct hif_msg *input,
+ struct hif_sl_msg *output)
+{
+ return -EIO;
+}
+
+static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev, u8 *ncp_pubkey,
+ u8 *ncp_pubmac)
+{
+ return -EIO;
+}
+
+static inline void wfx_sl_fill_pdata(struct device *dev,
+ struct wfx_platform_data *pdata)
+{
+ if (of_find_property(dev->of_node, "slk_key", NULL))
+ dev_err(dev, "secure link is not supported by this driver, ignoring provided key\n");
+}
+
+static inline int wfx_sl_init(struct wfx_dev *wdev)
+{
+ return -EIO;
+}
+
+static inline void wfx_sl_deinit(struct wfx_dev *wdev)
+{
+}
+
+
+#endif
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
new file mode 100644
index 000000000000..29848a202ab4
--- /dev/null
+++ b/drivers/staging/wfx/sta.c
@@ -0,0 +1,1684 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "sta.h"
+#include "wfx.h"
+#include "fwio.h"
+#include "bh.h"
+#include "key.h"
+#include "scan.h"
+#include "debug.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+
+#define TXOP_UNIT 32
+#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2
+
+static u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
+{
+ int i;
+ u32 ret = 0;
+ // WFx only support 2GHz
+ struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (rates & BIT(i)) {
+ if (i >= sband->n_bitrates)
+ dev_warn(wdev->dev, "unsupported basic rate\n");
+ else
+ ret |= BIT(sband->bitrates[i].hw_value);
+ }
+ }
+ return ret;
+}
+
+static void __wfx_free_event_queue(struct list_head *list)
+{
+ struct wfx_hif_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, list, link) {
+ list_del(&event->link);
+ kfree(event);
+ }
+}
+
+static void wfx_free_event_queue(struct wfx_vif *wvif)
+{
+ LIST_HEAD(list);
+
+ spin_lock(&wvif->event_queue_lock);
+ list_splice_init(&wvif->event_queue, &list);
+ spin_unlock(&wvif->event_queue_lock);
+
+ __wfx_free_event_queue(&list);
+}
+
+void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
+{
+ int tx = 0;
+
+ mutex_lock(&wvif->bss_loss_lock);
+ wvif->delayed_link_loss = 0;
+ cancel_work_sync(&wvif->bss_params_work);
+
+ /* If we have a pending unjoin */
+ if (wvif->delayed_unjoin)
+ goto end;
+
+ if (init) {
+ schedule_delayed_work(&wvif->bss_loss_work, HZ);
+ wvif->bss_loss_state = 0;
+
+ if (!atomic_read(&wvif->wdev->tx_lock))
+ tx = 1;
+ } else if (good) {
+ cancel_delayed_work_sync(&wvif->bss_loss_work);
+ wvif->bss_loss_state = 0;
+ schedule_work(&wvif->bss_params_work);
+ } else if (bad) {
+ /* FIXME Should we just keep going until we time out? */
+ if (wvif->bss_loss_state < 3)
+ tx = 1;
+ } else {
+ cancel_delayed_work_sync(&wvif->bss_loss_work);
+ wvif->bss_loss_state = 0;
+ }
+
+ /* Spit out a NULL packet to our AP if necessary */
+ // FIXME: call ieee80211_beacon_loss/ieee80211_connection_loss instead
+ if (tx) {
+ struct sk_buff *skb;
+
+ wvif->bss_loss_state++;
+
+ skb = ieee80211_nullfunc_get(wvif->wdev->hw, wvif->vif, false);
+ if (!skb)
+ goto end;
+ memset(IEEE80211_SKB_CB(skb), 0,
+ sizeof(*IEEE80211_SKB_CB(skb)));
+ IEEE80211_SKB_CB(skb)->control.vif = wvif->vif;
+ IEEE80211_SKB_CB(skb)->driver_rates[0].idx = 0;
+ IEEE80211_SKB_CB(skb)->driver_rates[0].count = 1;
+ IEEE80211_SKB_CB(skb)->driver_rates[1].idx = -1;
+ wfx_tx(wvif->wdev->hw, NULL, skb);
+ }
+end:
+ mutex_unlock(&wvif->bss_loss_lock);
+}
+
+static int wfx_set_uapsd_param(struct wfx_vif *wvif,
+ const struct wfx_edca_params *arg)
+{
+ /* Here's the mapping AC [queue, bit]
+ * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]
+ */
+
+ if (arg->uapsd_enable[IEEE80211_AC_VO])
+ wvif->uapsd_info.trig_voice = 1;
+ else
+ wvif->uapsd_info.trig_voice = 0;
+
+ if (arg->uapsd_enable[IEEE80211_AC_VI])
+ wvif->uapsd_info.trig_video = 1;
+ else
+ wvif->uapsd_info.trig_video = 0;
+
+ if (arg->uapsd_enable[IEEE80211_AC_BE])
+ wvif->uapsd_info.trig_be = 1;
+ else
+ wvif->uapsd_info.trig_be = 0;
+
+ if (arg->uapsd_enable[IEEE80211_AC_BK])
+ wvif->uapsd_info.trig_bckgrnd = 1;
+ else
+ wvif->uapsd_info.trig_bckgrnd = 0;
+
+ /* Currently pseudo U-APSD operation is not supported, so setting
+ * MinAutoTriggerInterval, MaxAutoTriggerInterval and
+ * AutoTriggerStep to 0
+ */
+ wvif->uapsd_info.min_auto_trigger_interval = 0;
+ wvif->uapsd_info.max_auto_trigger_interval = 0;
+ wvif->uapsd_info.auto_trigger_step = 0;
+
+ return hif_set_uapsd_info(wvif, &wvif->uapsd_info);
+}
+
+int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
+{
+ wvif->fwd_probe_req = enable;
+ return hif_set_rx_filter(wvif, wvif->filter_bssid,
+ wvif->fwd_probe_req);
+}
+
+static int wfx_set_mcast_filter(struct wfx_vif *wvif,
+ struct wfx_grp_addr_table *fp)
+{
+ int i, ret;
+ struct hif_mib_config_data_filter config = { };
+ struct hif_mib_set_data_filtering filter_data = { };
+ struct hif_mib_mac_addr_data_frame_condition filter_addr_val = { };
+ struct hif_mib_uc_mc_bc_data_frame_condition filter_addr_type = { };
+
+ // Temporary workaround for filters
+ return hif_set_data_filtering(wvif, &filter_data);
+
+ if (!fp->enable) {
+ filter_data.enable = 0;
+ return hif_set_data_filtering(wvif, &filter_data);
+ }
+
+ // A1 Address match on list
+ for (i = 0; i < fp->num_addresses; i++) {
+ filter_addr_val.condition_idx = i;
+ filter_addr_val.address_type = HIF_MAC_ADDR_A1;
+ ether_addr_copy(filter_addr_val.mac_address,
+ fp->address_list[i]);
+ ret = hif_set_mac_addr_condition(wvif,
+ &filter_addr_val);
+ if (ret)
+ return ret;
+ config.mac_cond |= 1 << i;
+ }
+
+ // Accept unicast and broadcast
+ filter_addr_type.condition_idx = 0;
+ filter_addr_type.param.bits.type_unicast = 1;
+ filter_addr_type.param.bits.type_broadcast = 1;
+ ret = hif_set_uc_mc_bc_condition(wvif, &filter_addr_type);
+ if (ret)
+ return ret;
+
+ config.uc_mc_bc_cond = 1;
+ config.filter_idx = 0; // TODO #define MULTICAST_FILTERING 0
+ config.enable = 1;
+ ret = hif_set_config_data_filter(wvif, &config);
+ if (ret)
+ return ret;
+
+ // discard all data frames except match filter
+ filter_data.enable = 1;
+ filter_data.default_filter = 1; // discard all
+ ret = hif_set_data_filtering(wvif, &filter_data);
+
+ return ret;
+}
+
+void wfx_update_filtering(struct wfx_vif *wvif)
+{
+ int ret;
+ bool is_sta = wvif->vif && NL80211_IFTYPE_STATION == wvif->vif->type;
+ bool filter_bssid = wvif->filter_bssid;
+ bool fwd_probe_req = wvif->fwd_probe_req;
+ struct hif_mib_bcn_filter_enable bf_ctrl;
+ struct hif_ie_table_entry filter_ies[] = {
+ {
+ .ie_id = WLAN_EID_VENDOR_SPECIFIC,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ .oui = { 0x50, 0x6F, 0x9A },
+ }, {
+ .ie_id = WLAN_EID_HT_OPERATION,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ }, {
+ .ie_id = WLAN_EID_ERP_INFO,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ }
+ };
+ int n_filter_ies;
+
+ if (wvif->state == WFX_STATE_PASSIVE)
+ return;
+
+ if (wvif->disable_beacon_filter) {
+ bf_ctrl.enable = 0;
+ bf_ctrl.bcn_count = 1;
+ n_filter_ies = 0;
+ } else if (!is_sta) {
+ bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE |
+ HIF_BEACON_FILTER_AUTO_ERP;
+ bf_ctrl.bcn_count = 0;
+ n_filter_ies = 2;
+ } else {
+ bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE;
+ bf_ctrl.bcn_count = 0;
+ n_filter_ies = 3;
+ }
+
+ ret = hif_set_rx_filter(wvif, filter_bssid, fwd_probe_req);
+ if (!ret)
+ ret = hif_set_beacon_filter_table(wvif, n_filter_ies,
+ filter_ies);
+ if (!ret)
+ ret = hif_beacon_filter_control(wvif, bf_ctrl.enable,
+ bf_ctrl.bcn_count);
+ if (!ret)
+ ret = wfx_set_mcast_filter(wvif, &wvif->mcast_filter);
+ if (ret)
+ dev_err(wvif->wdev->dev, "update filtering failed: %d\n", ret);
+}
+
+static void wfx_update_filtering_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ update_filtering_work);
+
+ wfx_update_filtering(wvif);
+}
+
+u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ int i;
+ struct netdev_hw_addr *ha;
+ struct wfx_vif *wvif = NULL;
+ struct wfx_dev *wdev = hw->priv;
+ int count = netdev_hw_addr_list_count(mc_list);
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ memset(&wvif->mcast_filter, 0x00, sizeof(wvif->mcast_filter));
+ if (!count ||
+ count > ARRAY_SIZE(wvif->mcast_filter.address_list))
+ continue;
+
+ i = 0;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ ether_addr_copy(wvif->mcast_filter.address_list[i],
+ ha->addr);
+ i++;
+ }
+ wvif->mcast_filter.enable = true;
+ wvif->mcast_filter.num_addresses = count;
+ }
+
+ return 0;
+}
+
+void wfx_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 unused)
+{
+ struct wfx_vif *wvif = NULL;
+ struct wfx_dev *wdev = hw->priv;
+
+ *total_flags &= FIF_OTHER_BSS | FIF_FCSFAIL | FIF_PROBE_REQ;
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ down(&wvif->scan.lock);
+ wvif->filter_bssid = (*total_flags &
+ (FIF_OTHER_BSS | FIF_PROBE_REQ)) ? 0 : 1;
+ wvif->disable_beacon_filter = !(*total_flags & FIF_PROBE_REQ);
+ wfx_fwd_probe_req(wvif, true);
+ wfx_update_filtering(wvif);
+ up(&wvif->scan.lock);
+ }
+}
+
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ int ret = 0;
+ /* To prevent re-applying PM request OID again and again*/
+ u16 old_uapsd_flags, new_uapsd_flags;
+ struct hif_req_edca_queue_params *edca;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ if (queue < hw->queues) {
+ old_uapsd_flags = *((u16 *) &wvif->uapsd_info);
+ edca = &wvif->edca.params[queue];
+
+ wvif->edca.uapsd_enable[queue] = params->uapsd;
+ edca->aifsn = params->aifs;
+ edca->cw_min = params->cw_min;
+ edca->cw_max = params->cw_max;
+ edca->tx_op_limit = params->txop * TXOP_UNIT;
+ edca->allowed_medium_time = 0;
+ ret = hif_set_edca_queue_params(wvif, edca);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (wvif->vif->type == NL80211_IFTYPE_STATION) {
+ ret = wfx_set_uapsd_param(wvif, &wvif->edca);
+ new_uapsd_flags = *((u16 *) &wvif->uapsd_info);
+ if (!ret && wvif->setbssparams_done &&
+ wvif->state == WFX_STATE_STA &&
+ old_uapsd_flags != new_uapsd_flags)
+ ret = wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&wdev->conf_mutex);
+ return ret;
+}
+
+int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+{
+ struct hif_req_set_pm_mode pm = *arg;
+ u16 uapsd_flags;
+ int ret;
+
+ if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
+ return 0;
+
+ memcpy(&uapsd_flags, &wvif->uapsd_info, sizeof(uapsd_flags));
+
+ if (uapsd_flags != 0)
+ pm.pm_mode.fast_psm = 0;
+
+ // Kernel disable PowerSave when multiple vifs are in use. In contrary,
+ // it is absolutly necessary to enable PowerSave for WF200
+ if (wvif_count(wvif->wdev) > 1) {
+ pm.pm_mode.enter_psm = 1;
+ pm.pm_mode.fast_psm = 0;
+ }
+
+ if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
+ msecs_to_jiffies(300)))
+ dev_warn(wvif->wdev->dev,
+ "timeout while waiting of set_pm_mode_complete\n");
+ ret = hif_set_pm(wvif, &pm);
+ // FIXME: why ?
+ if (-ETIMEDOUT == wvif->scan.status)
+ wvif->scan.status = 1;
+ return ret;
+}
+
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = NULL;
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ hif_rts_threshold(wvif, value);
+ return 0;
+}
+
+/* If successful, LOCKS the TX queue! */
+static int __wfx_flush(struct wfx_dev *wdev, bool drop)
+{
+ int ret;
+
+ for (;;) {
+ if (drop) {
+ wfx_tx_queues_clear(wdev);
+ } else {
+ ret = wait_event_timeout(
+ wdev->tx_queue_stats.wait_link_id_empty,
+ wfx_tx_queues_is_empty(wdev),
+ 2 * HZ);
+ }
+
+ if (!drop && ret <= 0) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ ret = 0;
+
+ wfx_tx_lock_flush(wdev);
+ if (!wfx_tx_queues_is_empty(wdev)) {
+ /* Highly unlikely: WSM requeued frames. */
+ wfx_tx_unlock(wdev);
+ continue;
+ }
+ break;
+ }
+ return ret;
+}
+
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif;
+
+ if (vif) {
+ wvif = (struct wfx_vif *) vif->drv_priv;
+ if (wvif->vif->type == NL80211_IFTYPE_MONITOR)
+ drop = true;
+ if (wvif->vif->type == NL80211_IFTYPE_AP &&
+ !wvif->enable_beacon)
+ drop = true;
+ }
+
+ // FIXME: only flush requested vif
+ if (!__wfx_flush(wdev, drop))
+ wfx_tx_unlock(wdev);
+}
+
+/* WSM callbacks */
+
+static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
+{
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110
+ */
+ int rcpi_rssi;
+ int cqm_evt;
+
+ rcpi_rssi = raw_rcpi_rssi / 2 - 110;
+ if (rcpi_rssi <= wvif->cqm_rssi_thold)
+ cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else
+ cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ ieee80211_cqm_rssi_notify(wvif->vif, cqm_evt, rcpi_rssi, GFP_KERNEL);
+}
+
+static void wfx_event_handler_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, event_handler_work);
+ struct wfx_hif_event *event;
+
+ LIST_HEAD(list);
+
+ spin_lock(&wvif->event_queue_lock);
+ list_splice_init(&wvif->event_queue, &list);
+ spin_unlock(&wvif->event_queue_lock);
+
+ list_for_each_entry(event, &list, link) {
+ switch (event->evt.event_id) {
+ case HIF_EVENT_IND_BSSLOST:
+ cancel_work_sync(&wvif->unjoin_work);
+ if (!down_trylock(&wvif->scan.lock)) {
+ wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
+ up(&wvif->scan.lock);
+ } else {
+ /* Scan is in progress. Delay reporting.
+ * Scan complete will trigger bss_loss_work
+ */
+ wvif->delayed_link_loss = 1;
+ /* Also start a watchdog. */
+ schedule_delayed_work(&wvif->bss_loss_work,
+ 5 * HZ);
+ }
+ break;
+ case HIF_EVENT_IND_BSSREGAINED:
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ cancel_work_sync(&wvif->unjoin_work);
+ break;
+ case HIF_EVENT_IND_RCPI_RSSI:
+ wfx_event_report_rssi(wvif,
+ event->evt.event_data.rcpi_rssi);
+ break;
+ case HIF_EVENT_IND_PS_MODE_ERROR:
+ dev_warn(wvif->wdev->dev,
+ "error while processing power save request\n");
+ break;
+ default:
+ dev_warn(wvif->wdev->dev,
+ "unhandled event indication: %.2x\n",
+ event->evt.event_id);
+ break;
+ }
+ }
+ __wfx_free_event_queue(&list);
+}
+
+static void wfx_bss_loss_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ bss_loss_work.work);
+
+ ieee80211_connection_loss(wvif->vif);
+}
+
+static void wfx_bss_params_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ bss_params_work);
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ wvif->bss_params.bss_flags.lost_count_only = 1;
+ hif_set_bss_params(wvif, &wvif->bss_params);
+ wvif->bss_params.bss_flags.lost_count_only = 0;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+}
+
+static void wfx_set_beacon_wakeup_period_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ set_beacon_wakeup_period_work);
+
+ hif_set_beacon_wakeup_period(wvif, wvif->dtim_period,
+ wvif->dtim_period);
+}
+
+static void wfx_do_unjoin(struct wfx_vif *wvif)
+{
+ mutex_lock(&wvif->wdev->conf_mutex);
+
+ if (atomic_read(&wvif->scan.in_progress)) {
+ if (wvif->delayed_unjoin)
+ dev_dbg(wvif->wdev->dev,
+ "delayed unjoin is already scheduled\n");
+ else
+ wvif->delayed_unjoin = true;
+ goto done;
+ }
+
+ wvif->delayed_link_loss = false;
+
+ if (!wvif->state)
+ goto done;
+
+ if (wvif->state == WFX_STATE_AP)
+ goto done;
+
+ cancel_work_sync(&wvif->update_filtering_work);
+ cancel_work_sync(&wvif->set_beacon_wakeup_period_work);
+ wvif->state = WFX_STATE_PASSIVE;
+
+ /* Unjoin is a reset. */
+ wfx_tx_flush(wvif->wdev);
+ hif_keep_alive_period(wvif, 0);
+ hif_reset(wvif, false);
+ hif_set_output_power(wvif, wvif->wdev->output_power * 10);
+ wvif->dtim_period = 0;
+ hif_set_macaddr(wvif, wvif->vif->addr);
+ wfx_free_event_queue(wvif);
+ cancel_work_sync(&wvif->event_handler_work);
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+
+ /* Disable Block ACKs */
+ hif_set_block_ack_policy(wvif, 0, 0);
+
+ wvif->disable_beacon_filter = false;
+ wfx_update_filtering(wvif);
+ memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
+ wvif->setbssparams_done = false;
+ memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
+
+done:
+ mutex_unlock(&wvif->wdev->conf_mutex);
+}
+
+static void wfx_set_mfp(struct wfx_vif *wvif,
+ struct cfg80211_bss *bss)
+{
+ const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
+ const int pairwise_cipher_suite_size = 4 / sizeof(u16);
+ const int akm_suite_size = 4 / sizeof(u16);
+ const u16 *ptr = NULL;
+ bool mfpc = false;
+ bool mfpr = false;
+
+ /* 802.11w protected mgmt frames */
+
+ /* retrieve MFPC and MFPR flags from beacon or PBRSP */
+
+ rcu_read_lock();
+ if (bss)
+ ptr = (const u16 *) ieee80211_bss_get_ie(bss,
+ WLAN_EID_RSN);
+
+ if (ptr) {
+ ptr += pairwise_cipher_suite_count_offset;
+ ptr += 1 + pairwise_cipher_suite_size * *ptr;
+ ptr += 1 + akm_suite_size * *ptr;
+ mfpr = *ptr & BIT(6);
+ mfpc = *ptr & BIT(7);
+ }
+ rcu_read_unlock();
+
+ hif_set_mfp(wvif, mfpc, mfpr);
+}
+
+/* MUST be called with tx_lock held! It will be unlocked for us. */
+static void wfx_do_join(struct wfx_vif *wvif)
+{
+ const u8 *bssid;
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+ struct cfg80211_bss *bss = NULL;
+ struct hif_req_join join = {
+ .mode = conf->ibss_joined ? HIF_MODE_IBSS : HIF_MODE_BSS,
+ .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
+ .probe_for_join = 1,
+ .atim_window = 0,
+ .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
+ conf->basic_rates),
+ };
+
+ if (wvif->channel->flags & IEEE80211_CHAN_NO_IR)
+ join.probe_for_join = 0;
+
+ if (wvif->state)
+ wfx_do_unjoin(wvif);
+
+ bssid = wvif->vif->bss_conf.bssid;
+
+ bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
+ bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+
+ if (!bss && !conf->ibss_joined) {
+ wfx_tx_unlock(wvif->wdev);
+ return;
+ }
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+
+ /* Under the conf lock: check scan status and
+ * bail out if it is in progress.
+ */
+ if (atomic_read(&wvif->scan.in_progress)) {
+ wfx_tx_unlock(wvif->wdev);
+ goto done_put;
+ }
+
+ /* Sanity check basic rates */
+ if (!join.basic_rate_set)
+ join.basic_rate_set = 7;
+
+ /* Sanity check beacon interval */
+ if (!wvif->beacon_int)
+ wvif->beacon_int = 1;
+
+ join.beacon_interval = wvif->beacon_int;
+
+ // DTIM period will be set on first Beacon
+ wvif->dtim_period = 0;
+
+ join.channel_number = wvif->channel->hw_value;
+ memcpy(join.bssid, bssid, sizeof(join.bssid));
+
+ if (!conf->ibss_joined) {
+ const u8 *ssidie;
+
+ rcu_read_lock();
+ ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (ssidie) {
+ join.ssid_length = ssidie[1];
+ memcpy(join.ssid, &ssidie[2], join.ssid_length);
+ }
+ rcu_read_unlock();
+ }
+
+ wfx_tx_flush(wvif->wdev);
+
+ if (wvif_count(wvif->wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+
+ wfx_set_mfp(wvif, bss);
+
+ /* Perform actual join */
+ wvif->wdev->tx_burst_idx = -1;
+ if (hif_join(wvif, &join)) {
+ ieee80211_connection_loss(wvif->vif);
+ wvif->join_complete_status = -1;
+ /* Tx lock still held, unjoin will clear it. */
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ wvif->join_complete_status = 0;
+ if (wvif->vif->type == NL80211_IFTYPE_ADHOC)
+ wvif->state = WFX_STATE_IBSS;
+ else
+ wvif->state = WFX_STATE_PRE_STA;
+ wfx_tx_unlock(wvif->wdev);
+
+ /* Upload keys */
+ wfx_upload_keys(wvif);
+
+ /* Due to beacon filtering it is possible that the
+ * AP's beacon is not known for the mac80211 stack.
+ * Disable filtering temporary to make sure the stack
+ * receives at least one
+ */
+ wvif->disable_beacon_filter = true;
+ }
+ wfx_update_filtering(wvif);
+
+done_put:
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ if (bss)
+ cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
+}
+
+static void wfx_unjoin_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, unjoin_work);
+
+ wfx_do_unjoin(wvif);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_link_entry *entry;
+ struct sk_buff *skb;
+
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
+ return 0;
+
+ sta_priv->vif_id = wvif->id;
+ sta_priv->link_id = wfx_find_link_id(wvif, sta->addr);
+ if (!sta_priv->link_id) {
+ dev_warn(wdev->dev, "mo more link-id available\n");
+ return -ENOENT;
+ }
+
+ entry = &wvif->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&wvif->ps_state_lock);
+ if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ wvif->sta_asleep_mask |= BIT(sta_priv->link_id);
+ entry->status = WFX_LINK_HARD;
+ while ((skb = skb_dequeue(&entry->rx_queue)))
+ ieee80211_rx_irqsafe(wdev->hw, skb);
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return 0;
+}
+
+int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_link_entry *entry;
+
+ if (wvif->vif->type != NL80211_IFTYPE_AP || !sta_priv->link_id)
+ return 0;
+
+ entry = &wvif->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&wvif->ps_state_lock);
+ entry->status = WFX_LINK_RESERVE;
+ entry->timestamp = jiffies;
+ wfx_tx_lock(wdev);
+ if (!schedule_work(&wvif->link_id_work))
+ wfx_tx_unlock(wdev);
+ spin_unlock_bh(&wvif->ps_state_lock);
+ flush_work(&wvif->link_id_work);
+ return 0;
+}
+
+static void wfx_set_cts_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_cts_work);
+ u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
+ struct hif_ie_flags target_frame = {
+ .beacon = 1,
+ };
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ erp_ie[2] = wvif->erp_info;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+
+ hif_erp_use_protection(wvif, erp_ie[2] & WLAN_ERP_USE_PROTECTION);
+
+ if (wvif->vif->type != NL80211_IFTYPE_STATION)
+ hif_update_ie(wvif, &target_frame, erp_ie, sizeof(erp_ie));
+}
+
+static int wfx_start_ap(struct wfx_vif *wvif)
+{
+ int ret;
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+ struct hif_req_start start = {
+ .channel_number = wvif->channel->hw_value,
+ .beacon_interval = conf->beacon_int,
+ .dtim_period = conf->dtim_period,
+ .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
+ .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
+ conf->basic_rates),
+ };
+
+ memset(start.ssid, 0, sizeof(start.ssid));
+ if (!conf->hidden_ssid) {
+ start.ssid_length = conf->ssid_len;
+ memcpy(start.ssid, conf->ssid, start.ssid_length);
+ }
+
+ wvif->beacon_int = conf->beacon_int;
+ wvif->dtim_period = conf->dtim_period;
+
+ memset(&wvif->link_id_db, 0, sizeof(wvif->link_id_db));
+
+ wvif->wdev->tx_burst_idx = -1;
+ ret = hif_start(wvif, &start);
+ if (!ret)
+ ret = wfx_upload_keys(wvif);
+ if (!ret) {
+ if (wvif_count(wvif->wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ wvif->state = WFX_STATE_AP;
+ wfx_update_filtering(wvif);
+ }
+ return ret;
+}
+
+static int wfx_update_beaconing(struct wfx_vif *wvif)
+{
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+
+ if (wvif->vif->type == NL80211_IFTYPE_AP) {
+ /* TODO: check if changed channel, band */
+ if (wvif->state != WFX_STATE_AP ||
+ wvif->beacon_int != conf->beacon_int) {
+ wfx_tx_lock_flush(wvif->wdev);
+ if (wvif->state != WFX_STATE_PASSIVE)
+ hif_reset(wvif, false);
+ wvif->state = WFX_STATE_PASSIVE;
+ wfx_start_ap(wvif);
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ }
+ }
+ return 0;
+}
+
+static int wfx_upload_beacon(struct wfx_vif *wvif)
+{
+ int ret = 0;
+ struct sk_buff *skb = NULL;
+ struct ieee80211_mgmt *mgmt;
+ struct hif_mib_template_frame *p;
+
+ if (wvif->vif->type == NL80211_IFTYPE_STATION ||
+ wvif->vif->type == NL80211_IFTYPE_MONITOR ||
+ wvif->vif->type == NL80211_IFTYPE_UNSPECIFIED)
+ goto done;
+
+ skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
+
+ if (!skb)
+ return -ENOMEM;
+
+ p = (struct hif_mib_template_frame *) skb_push(skb, 4);
+ p->frame_type = HIF_TMPLT_BCN;
+ p->init_rate = API_RATE_INDEX_B_1MBPS; /* 1Mbps DSSS */
+ p->frame_length = cpu_to_le16(skb->len - 4);
+
+ ret = hif_set_template_frame(wvif, p);
+
+ skb_pull(skb, 4);
+
+ if (ret)
+ goto done;
+ /* TODO: Distill probe resp; remove TIM and any other beacon-specific
+ * IEs
+ */
+ mgmt = (void *)skb->data;
+ mgmt->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
+
+ p->frame_type = HIF_TMPLT_PRBRES;
+
+ ret = hif_set_template_frame(wvif, p);
+ wfx_fwd_probe_req(wvif, false);
+
+done:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int wfx_is_ht(const struct wfx_ht_info *ht_info)
+{
+ return ht_info->channel_type != NL80211_CHAN_NO_HT;
+}
+
+static int wfx_ht_greenfield(const struct wfx_ht_info *ht_info)
+{
+ return wfx_is_ht(ht_info) &&
+ (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ht_info->operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+}
+
+static int wfx_ht_ampdu_density(const struct wfx_ht_info *ht_info)
+{
+ if (!wfx_is_ht(ht_info))
+ return 0;
+ return ht_info->ht_cap.ampdu_density;
+}
+
+static void wfx_join_finalize(struct wfx_vif *wvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct hif_mib_set_association_mode association_mode = { };
+
+ if (info->dtim_period)
+ wvif->dtim_period = info->dtim_period;
+ wvif->beacon_int = info->beacon_int;
+
+ rcu_read_lock();
+ if (info->bssid && !info->ibss_joined)
+ sta = ieee80211_find_sta(wvif->vif, info->bssid);
+ if (sta) {
+ wvif->ht_info.ht_cap = sta->ht_cap;
+ wvif->bss_params.operational_rate_set =
+ wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
+ wvif->ht_info.operation_mode = info->ht_operation_mode;
+ } else {
+ memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
+ wvif->bss_params.operational_rate_set = -1;
+ }
+ rcu_read_unlock();
+
+ /* Non Greenfield stations present */
+ if (wvif->ht_info.operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
+ hif_dual_cts_protection(wvif, true);
+ else
+ hif_dual_cts_protection(wvif, false);
+
+ association_mode.preambtype_use = 1;
+ association_mode.mode = 1;
+ association_mode.rateset = 1;
+ association_mode.spacing = 1;
+ association_mode.preamble_type = info->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG;
+ association_mode.basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates));
+ association_mode.mixed_or_greenfield_type = wfx_ht_greenfield(&wvif->ht_info);
+ association_mode.mpdu_start_spacing = wfx_ht_ampdu_density(&wvif->ht_info);
+
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ cancel_work_sync(&wvif->unjoin_work);
+
+ wvif->bss_params.beacon_lost_count = 20;
+ wvif->bss_params.aid = info->aid;
+
+ if (wvif->dtim_period < 1)
+ wvif->dtim_period = 1;
+
+ hif_set_association_mode(wvif, &association_mode);
+
+ if (!info->ibss_joined) {
+ hif_keep_alive_period(wvif, 30 /* sec */);
+ hif_set_bss_params(wvif, &wvif->bss_params);
+ wvif->setbssparams_done = true;
+ wfx_set_beacon_wakeup_period_work(&wvif->set_beacon_wakeup_period_work);
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+}
+
+void wfx_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ bool do_join = false;
+ int i;
+ int nb_arp_addr;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ /* TODO: BSS_CHANGED_QOS */
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ struct hif_mib_arp_ip_addr_table filter = { };
+
+ nb_arp_addr = info->arp_addr_cnt;
+ if (nb_arp_addr <= 0 || nb_arp_addr > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
+ nb_arp_addr = 0;
+
+ for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
+ filter.condition_idx = i;
+ if (i < nb_arp_addr) {
+ // Caution: type of arp_addr_list[i] is __be32
+ memcpy(filter.ipv4_address,
+ &info->arp_addr_list[i],
+ sizeof(filter.ipv4_address));
+ filter.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
+ } else {
+ filter.arp_enable = HIF_ARP_NS_FILTERING_DISABLE;
+ }
+ hif_set_arp_ipv4_filter(wvif, &filter);
+ }
+ }
+
+ if (changed &
+ (BSS_CHANGED_BEACON | BSS_CHANGED_AP_PROBE_RESP |
+ BSS_CHANGED_BSSID | BSS_CHANGED_SSID | BSS_CHANGED_IBSS)) {
+ wvif->beacon_int = info->beacon_int;
+ wfx_update_beaconing(wvif);
+ wfx_upload_beacon(wvif);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED &&
+ wvif->state != WFX_STATE_IBSS) {
+ if (wvif->enable_beacon != info->enable_beacon) {
+ hif_beacon_transmit(wvif, info->enable_beacon);
+ wvif->enable_beacon = info->enable_beacon;
+ }
+ }
+
+ /* assoc/disassoc, or maybe AID changed */
+ if (changed & BSS_CHANGED_ASSOC) {
+ wfx_tx_lock_flush(wdev);
+ wvif->wep_default_key_id = -1;
+ wfx_tx_unlock(wdev);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC && !info->assoc &&
+ (wvif->state == WFX_STATE_STA || wvif->state == WFX_STATE_IBSS)) {
+ /* Shedule unjoin work */
+ wfx_tx_lock(wdev);
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wdev);
+ } else {
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ if (info->ibss_joined)
+ do_join = true;
+ else if (wvif->state == WFX_STATE_AP)
+ wfx_update_beaconing(wvif);
+ }
+
+ if (changed & BSS_CHANGED_BSSID)
+ do_join = true;
+
+ if (changed &
+ (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID |
+ BSS_CHANGED_IBSS | BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_HT)) {
+ if (info->assoc) {
+ if (wvif->state < WFX_STATE_PRE_STA) {
+ ieee80211_connection_loss(vif);
+ mutex_unlock(&wdev->conf_mutex);
+ return;
+ } else if (wvif->state == WFX_STATE_PRE_STA) {
+ wvif->state = WFX_STATE_STA;
+ }
+ } else {
+ do_join = true;
+ }
+
+ if (info->assoc || info->ibss_joined)
+ wfx_join_finalize(wvif, info);
+ else
+ memset(&wvif->bss_params, 0,
+ sizeof(wvif->bss_params));
+ }
+ }
+
+ /* ERP Protection */
+ if (changed & (BSS_CHANGED_ASSOC |
+ BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_ERP_PREAMBLE)) {
+ u32 prev_erp_info = wvif->erp_info;
+
+ if (info->use_cts_prot)
+ wvif->erp_info |= WLAN_ERP_USE_PROTECTION;
+ else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
+ wvif->erp_info &= ~WLAN_ERP_USE_PROTECTION;
+
+ if (info->use_short_preamble)
+ wvif->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
+ else
+ wvif->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
+
+ if (prev_erp_info != wvif->erp_info)
+ schedule_work(&wvif->set_cts_work);
+ }
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT))
+ hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
+ struct hif_mib_rcpi_rssi_threshold th = {
+ .rolling_average_count = 8,
+ .detection = 1,
+ };
+
+ wvif->cqm_rssi_thold = info->cqm_rssi_thold;
+
+ if (!info->cqm_rssi_thold && !info->cqm_rssi_hyst) {
+ th.upperthresh = 1;
+ th.lowerthresh = 1;
+ } else {
+ /* FIXME It's not a correct way of setting threshold.
+ * Upper and lower must be set equal here and adjusted
+ * in callback. However current implementation is much
+ * more reliable and stable.
+ */
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110
+ */
+ th.upper_threshold = info->cqm_rssi_thold + info->cqm_rssi_hyst;
+ th.upper_threshold = (th.upper_threshold + 110) * 2;
+ th.lower_threshold = info->cqm_rssi_thold;
+ th.lower_threshold = (th.lower_threshold + 110) * 2;
+ }
+ hif_set_rcpi_rssi_threshold(wvif, &th);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER &&
+ info->txpower != wdev->output_power) {
+ wdev->output_power = info->txpower;
+ hif_set_output_power(wvif, wdev->output_power * 10);
+ }
+ mutex_unlock(&wdev->conf_mutex);
+
+ if (do_join) {
+ wfx_tx_lock_flush(wdev);
+ wfx_do_join(wvif); /* Will unlock it for us */
+ }
+}
+
+static void wfx_ps_notify(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd,
+ int link_id)
+{
+ u32 bit, prev;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ /* Zero link id means "for all link IDs" */
+ if (link_id) {
+ bit = BIT(link_id);
+ } else if (notify_cmd != STA_NOTIFY_AWAKE) {
+ dev_warn(wvif->wdev->dev, "unsupported notify command\n");
+ bit = 0;
+ } else {
+ bit = wvif->link_id_map;
+ }
+ prev = wvif->sta_asleep_mask & bit;
+
+ switch (notify_cmd) {
+ case STA_NOTIFY_SLEEP:
+ if (!prev) {
+ if (wvif->mcast_buffered && !wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_start_work);
+ wvif->sta_asleep_mask |= bit;
+ }
+ break;
+ case STA_NOTIFY_AWAKE:
+ if (prev) {
+ wvif->sta_asleep_mask &= ~bit;
+ wvif->pspoll_mask &= ~bit;
+ if (link_id && !wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_stop_work);
+ wfx_bh_request_tx(wvif->wdev);
+ }
+ break;
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+}
+
+void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd, struct ieee80211_sta *sta)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+
+ wfx_ps_notify(wvif, notify_cmd, sta_priv->link_id);
+}
+
+static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
+{
+ struct sk_buff *skb;
+ struct hif_ie_flags target_frame = {
+ .beacon = 1,
+ };
+ u16 tim_offset, tim_length;
+ u8 *tim_ptr;
+
+ skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
+ &tim_offset, &tim_length);
+ if (!skb) {
+ if (!__wfx_flush(wvif->wdev, true))
+ wfx_tx_unlock(wvif->wdev);
+ return -ENOENT;
+ }
+ tim_ptr = skb->data + tim_offset;
+
+ if (tim_offset && tim_length >= 6) {
+ /* Ignore DTIM count from mac80211:
+ * firmware handles DTIM internally.
+ */
+ tim_ptr[2] = 0;
+
+ /* Set/reset aid0 bit */
+ if (aid0_bit_set)
+ tim_ptr[4] |= 1;
+ else
+ tim_ptr[4] &= ~1;
+ }
+
+ hif_update_ie(wvif, &target_frame, tim_ptr, tim_length);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static void wfx_set_tim_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_tim_work);
+
+ wfx_set_tim_impl(wvif, wvif->aid0_bit_set);
+}
+
+int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
+
+ schedule_work(&wvif->set_tim_work);
+ return 0;
+}
+
+static void wfx_mcast_start_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ mcast_start_work);
+ long tmo = wvif->dtim_period * TU_TO_JIFFIES(wvif->beacon_int + 20);
+
+ cancel_work_sync(&wvif->mcast_stop_work);
+ if (!wvif->aid0_bit_set) {
+ wfx_tx_lock_flush(wvif->wdev);
+ wfx_set_tim_impl(wvif, true);
+ wvif->aid0_bit_set = true;
+ mod_timer(&wvif->mcast_timeout, jiffies + tmo);
+ wfx_tx_unlock(wvif->wdev);
+ }
+}
+
+static void wfx_mcast_stop_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ mcast_stop_work);
+
+ if (wvif->aid0_bit_set) {
+ del_timer_sync(&wvif->mcast_timeout);
+ wfx_tx_lock_flush(wvif->wdev);
+ wvif->aid0_bit_set = false;
+ wfx_set_tim_impl(wvif, false);
+ wfx_tx_unlock(wvif->wdev);
+ }
+}
+
+static void wfx_mcast_timeout(struct timer_list *t)
+{
+ struct wfx_vif *wvif = from_timer(wvif, t, mcast_timeout);
+
+ dev_warn(wvif->wdev->dev, "multicast delivery timeout\n");
+ spin_lock_bh(&wvif->ps_state_lock);
+ wvif->mcast_tx = wvif->aid0_bit_set && wvif->mcast_buffered;
+ if (wvif->mcast_tx)
+ wfx_bh_request_tx(wvif->wdev);
+ spin_unlock_bh(&wvif->ps_state_lock);
+}
+
+int wfx_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ /* Aggregation is implemented fully in firmware,
+ * including block ack negotiation. Do not allow
+ * mac80211 stack to do anything: it interferes with
+ * the firmware.
+ */
+
+ /* Note that we still need this function stubbed. */
+
+ return -ENOTSUPP;
+}
+
+void wfx_suspend_resume(struct wfx_vif *wvif,
+ struct hif_ind_suspend_resume_tx *arg)
+{
+ if (arg->suspend_resume_flags.bc_mc_only) {
+ bool cancel_tmo = false;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ if (!arg->suspend_resume_flags.resume)
+ wvif->mcast_tx = false;
+ else
+ wvif->mcast_tx = wvif->aid0_bit_set &&
+ wvif->mcast_buffered;
+ if (wvif->mcast_tx) {
+ cancel_tmo = true;
+ wfx_bh_request_tx(wvif->wdev);
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (cancel_tmo)
+ del_timer_sync(&wvif->mcast_timeout);
+ } else if (arg->suspend_resume_flags.resume) {
+ // FIXME: should change each station status independently
+ wfx_ps_notify(wvif, STA_NOTIFY_AWAKE, 0);
+ wfx_bh_request_tx(wvif->wdev);
+ } else {
+ // FIXME: should change each station status independently
+ wfx_ps_notify(wvif, STA_NOTIFY_SLEEP, 0);
+ }
+}
+
+int wfx_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+ return 0;
+}
+
+void wfx_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+}
+
+void wfx_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ u32 changed)
+{
+}
+
+int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct ieee80211_channel *ch = conf->def.chan;
+
+ WARN(wvif->channel, "channel overwrite");
+ wvif->channel = ch;
+ wvif->ht_info.channel_type = cfg80211_get_chandef_type(&conf->def);
+
+ return 0;
+}
+
+void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct ieee80211_channel *ch = conf->def.chan;
+
+ WARN(wvif->channel != ch, "channel mismatch");
+ wvif->channel = NULL;
+}
+
+int wfx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ int ret = 0;
+ struct wfx_dev *wdev = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct wfx_vif *wvif;
+
+ // FIXME: Interface id should not been hardcoded
+ wvif = wdev_to_wvif(wdev, 0);
+ if (!wvif) {
+ WARN(1, "interface 0 does not exist anymore");
+ return 0;
+ }
+
+ down(&wvif->scan.lock);
+ mutex_lock(&wdev->conf_mutex);
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ wdev->output_power = conf->power_level;
+ hif_set_output_power(wvif, wdev->output_power * 10);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ memset(&wvif->powersave_mode, 0,
+ sizeof(wvif->powersave_mode));
+ if (conf->flags & IEEE80211_CONF_PS) {
+ wvif->powersave_mode.pm_mode.enter_psm = 1;
+ if (conf->dynamic_ps_timeout > 0) {
+ wvif->powersave_mode.pm_mode.fast_psm = 1;
+ /*
+ * Firmware does not support more than
+ * 128ms
+ */
+ wvif->powersave_mode.fast_psm_idle_period =
+ min(conf->dynamic_ps_timeout *
+ 2, 255);
+ }
+ }
+ if (wvif->state == WFX_STATE_STA && wvif->bss_params.aid)
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+ wvif = wdev_to_wvif(wdev, 0);
+ }
+
+ mutex_unlock(&wdev->conf_mutex);
+ up(&wvif->scan.lock);
+ return ret;
+}
+
+int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ int i;
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ // FIXME: parameters are set by kernel juste after interface_add.
+ // Keep struct hif_req_edca_queue_params blank?
+ struct hif_req_edca_queue_params default_edca_params[] = {
+ [IEEE80211_AC_VO] = {
+ .queue_id = HIF_QUEUE_ID_VOICE,
+ .aifsn = 2,
+ .cw_min = 3,
+ .cw_max = 7,
+ .tx_op_limit = TXOP_UNIT * 47,
+ },
+ [IEEE80211_AC_VI] = {
+ .queue_id = HIF_QUEUE_ID_VIDEO,
+ .aifsn = 2,
+ .cw_min = 7,
+ .cw_max = 15,
+ .tx_op_limit = TXOP_UNIT * 94,
+ },
+ [IEEE80211_AC_BE] = {
+ .queue_id = HIF_QUEUE_ID_BESTEFFORT,
+ .aifsn = 3,
+ .cw_min = 15,
+ .cw_max = 1023,
+ .tx_op_limit = TXOP_UNIT * 0,
+ },
+ [IEEE80211_AC_BK] = {
+ .queue_id = HIF_QUEUE_ID_BACKGROUND,
+ .aifsn = 7,
+ .cw_min = 15,
+ .cw_max = 1023,
+ .tx_op_limit = TXOP_UNIT * 0,
+ },
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(default_edca_params) != ARRAY_SIZE(wvif->edca.params));
+ if (wfx_api_older_than(wdev, 2, 0)) {
+ default_edca_params[IEEE80211_AC_BE].queue_id = HIF_QUEUE_ID_BACKGROUND;
+ default_edca_params[IEEE80211_AC_BK].queue_id = HIF_QUEUE_ID_BESTEFFORT;
+ }
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_UAPSD |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ mutex_unlock(&wdev->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ if (!wdev->vif[i]) {
+ wdev->vif[i] = vif;
+ wvif->id = i;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(wdev->vif)) {
+ mutex_unlock(&wdev->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+ // FIXME: prefer use of container_of() to get vif
+ wvif->vif = vif;
+ wvif->wdev = wdev;
+
+ INIT_WORK(&wvif->link_id_work, wfx_link_id_work);
+ INIT_DELAYED_WORK(&wvif->link_id_gc_work, wfx_link_id_gc_work);
+
+ spin_lock_init(&wvif->ps_state_lock);
+ INIT_WORK(&wvif->set_tim_work, wfx_set_tim_work);
+
+ INIT_WORK(&wvif->mcast_start_work, wfx_mcast_start_work);
+ INIT_WORK(&wvif->mcast_stop_work, wfx_mcast_stop_work);
+ timer_setup(&wvif->mcast_timeout, wfx_mcast_timeout, 0);
+
+ wvif->setbssparams_done = false;
+ mutex_init(&wvif->bss_loss_lock);
+ INIT_DELAYED_WORK(&wvif->bss_loss_work, wfx_bss_loss_work);
+
+ wvif->wep_default_key_id = -1;
+ INIT_WORK(&wvif->wep_key_work, wfx_wep_key_work);
+
+ sema_init(&wvif->scan.lock, 1);
+ INIT_WORK(&wvif->scan.work, wfx_scan_work);
+ INIT_DELAYED_WORK(&wvif->scan.timeout, wfx_scan_timeout);
+
+ spin_lock_init(&wvif->event_queue_lock);
+ INIT_LIST_HEAD(&wvif->event_queue);
+ INIT_WORK(&wvif->event_handler_work, wfx_event_handler_work);
+
+ init_completion(&wvif->set_pm_mode_complete);
+ complete(&wvif->set_pm_mode_complete);
+ INIT_WORK(&wvif->set_beacon_wakeup_period_work,
+ wfx_set_beacon_wakeup_period_work);
+ INIT_WORK(&wvif->update_filtering_work, wfx_update_filtering_work);
+ INIT_WORK(&wvif->bss_params_work, wfx_bss_params_work);
+ INIT_WORK(&wvif->set_cts_work, wfx_set_cts_work);
+ INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work);
+
+ mutex_unlock(&wdev->conf_mutex);
+
+ hif_set_macaddr(wvif, vif->addr);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ memcpy(&wvif->edca.params[i], &default_edca_params[i],
+ sizeof(default_edca_params[i]));
+ wvif->edca.uapsd_enable[i] = false;
+ hif_set_edca_queue_params(wvif, &wvif->edca.params[i]);
+ }
+ wfx_set_uapsd_param(wvif, &wvif->edca);
+
+ wfx_tx_policy_init(wvif);
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ // Combo mode does not support Block Acks. We can re-enable them
+ if (wvif_count(wdev) == 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ else
+ hif_set_block_ack_policy(wvif, 0x00, 0x00);
+ // Combo force powersave mode. We can re-enable it now
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+ return 0;
+}
+
+void wfx_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ int i;
+
+ // If scan is in progress, stop it
+ while (down_trylock(&wvif->scan.lock))
+ schedule();
+ up(&wvif->scan.lock);
+ wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
+
+ mutex_lock(&wdev->conf_mutex);
+ switch (wvif->state) {
+ case WFX_STATE_PRE_STA:
+ case WFX_STATE_STA:
+ case WFX_STATE_IBSS:
+ wfx_tx_lock_flush(wdev);
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wdev);
+ break;
+ case WFX_STATE_AP:
+ for (i = 0; wvif->link_id_map; ++i) {
+ if (wvif->link_id_map & BIT(i)) {
+ wfx_unmap_link(wvif, i);
+ wvif->link_id_map &= ~BIT(i);
+ }
+ }
+ memset(wvif->link_id_db, 0, sizeof(wvif->link_id_db));
+ wvif->sta_asleep_mask = 0;
+ wvif->enable_beacon = false;
+ wvif->mcast_tx = false;
+ wvif->aid0_bit_set = false;
+ wvif->mcast_buffered = false;
+ wvif->pspoll_mask = 0;
+ /* reset.link_id = 0; */
+ hif_reset(wvif, false);
+ break;
+ default:
+ break;
+ }
+
+ wvif->state = WFX_STATE_PASSIVE;
+ wfx_tx_queues_wait_empty_vif(wvif);
+ wfx_tx_unlock(wdev);
+
+ /* FIXME: In add to reset MAC address, try to reset interface */
+ hif_set_macaddr(wvif, NULL);
+
+ cancel_delayed_work_sync(&wvif->scan.timeout);
+
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ cancel_work_sync(&wvif->unjoin_work);
+ cancel_delayed_work_sync(&wvif->link_id_gc_work);
+ del_timer_sync(&wvif->mcast_timeout);
+ wfx_free_event_queue(wvif);
+
+ wdev->vif[wvif->id] = NULL;
+ wvif->vif = NULL;
+
+ mutex_unlock(&wdev->conf_mutex);
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ // Combo mode does not support Block Acks. We can re-enable them
+ if (wvif_count(wdev) == 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ else
+ hif_set_block_ack_policy(wvif, 0x00, 0x00);
+ // Combo force powersave mode. We can re-enable it now
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+}
+
+int wfx_start(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+
+void wfx_stop(struct ieee80211_hw *hw)
+{
+ struct wfx_dev *wdev = hw->priv;
+
+ wfx_tx_lock_flush(wdev);
+ mutex_lock(&wdev->conf_mutex);
+ wfx_tx_queues_clear(wdev);
+ mutex_unlock(&wdev->conf_mutex);
+ wfx_tx_unlock(wdev);
+ WARN(atomic_read(&wdev->tx_lock), "tx_lock is locked");
+}
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
new file mode 100644
index 000000000000..4ccf1b17632b
--- /dev/null
+++ b/drivers/staging/wfx/sta.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_STA_H
+#define WFX_STA_H
+
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+
+struct wfx_dev;
+struct wfx_vif;
+
+enum wfx_state {
+ WFX_STATE_PASSIVE = 0,
+ WFX_STATE_PRE_STA,
+ WFX_STATE_STA,
+ WFX_STATE_IBSS,
+ WFX_STATE_AP,
+};
+
+struct wfx_ht_info {
+ struct ieee80211_sta_ht_cap ht_cap;
+ enum nl80211_channel_type channel_type;
+ u16 operation_mode;
+};
+
+struct wfx_hif_event {
+ struct list_head link;
+ struct hif_ind_event evt;
+};
+
+struct wfx_edca_params {
+ /* NOTE: index is a linux queue id. */
+ struct hif_req_edca_queue_params params[IEEE80211_NUM_ACS];
+ bool uapsd_enable[IEEE80211_NUM_ACS];
+};
+
+struct wfx_grp_addr_table {
+ bool enable;
+ int num_addresses;
+ u8 address_list[8][ETH_ALEN];
+};
+
+struct wfx_sta_priv {
+ int link_id;
+ int vif_id;
+};
+
+// mac80211 interface
+int wfx_start(struct ieee80211_hw *hw);
+void wfx_stop(struct ieee80211_hw *hw);
+int wfx_config(struct ieee80211_hw *hw, u32 changed);
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list);
+void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 unused);
+
+int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed);
+int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta);
+int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int wfx_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf, u32 changed);
+int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf);
+
+// WSM Callbacks
+void wfx_suspend_resume(struct wfx_vif *wvif,
+ struct hif_ind_suspend_resume_tx *arg);
+
+// Other Helpers
+void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad);
+void wfx_update_filtering(struct wfx_vif *wvif);
+int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
+int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable);
+
+#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
new file mode 100644
index 000000000000..3f6198ab2235
--- /dev/null
+++ b/drivers/staging/wfx/traces.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Tracepoints definitions.
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wfx
+
+#if !defined(_WFX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _WFX_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <net/mac80211.h>
+
+#include "bus.h"
+#include "hif_api_cmd.h"
+#include "hif_api_mib.h"
+
+/* The hell below need some explanations. For each symbolic number, we need to
+ * define it with TRACE_DEFINE_ENUM() and in a list for __print_symbolic.
+ *
+ * 1. Define a new macro that call TRACE_DEFINE_ENUM():
+ *
+ * #define xxx_name(sym) TRACE_DEFINE_ENUM(sym);
+ *
+ * 2. Define list of all symbols:
+ *
+ * #define list_names \
+ * ... \
+ * xxx_name(XXX) \
+ * ...
+ *
+ * 3. Instanciate that list_names:
+ *
+ * list_names
+ *
+ * 4. Redefine xxx_name() as a entry of array for __print_symbolic()
+ *
+ * #undef xxx_name
+ * #define xxx_name(msg) { msg, #msg },
+ *
+ * 5. list_name can now nearlu be used with __print_symbolic() but,
+ * __print_symbolic() dislike last comma of list. So we define a new list
+ * with a dummy element:
+ *
+ * #define list_for_print_symbolic list_names { -1, NULL }
+ */
+
+#define _hif_msg_list \
+ hif_cnf_name(ADD_KEY) \
+ hif_cnf_name(BEACON_TRANSMIT) \
+ hif_cnf_name(EDCA_QUEUE_PARAMS) \
+ hif_cnf_name(JOIN) \
+ hif_cnf_name(MAP_LINK) \
+ hif_cnf_name(READ_MIB) \
+ hif_cnf_name(REMOVE_KEY) \
+ hif_cnf_name(RESET) \
+ hif_cnf_name(SET_BSS_PARAMS) \
+ hif_cnf_name(SET_PM_MODE) \
+ hif_cnf_name(START) \
+ hif_cnf_name(START_SCAN) \
+ hif_cnf_name(STOP_SCAN) \
+ hif_cnf_name(TX) \
+ hif_cnf_name(MULTI_TRANSMIT) \
+ hif_cnf_name(UPDATE_IE) \
+ hif_cnf_name(WRITE_MIB) \
+ hif_cnf_name(CONFIGURATION) \
+ hif_cnf_name(CONTROL_GPIO) \
+ hif_cnf_name(PREVENT_ROLLBACK) \
+ hif_cnf_name(SET_SL_MAC_KEY) \
+ hif_cnf_name(SL_CONFIGURE) \
+ hif_cnf_name(SL_EXCHANGE_PUB_KEYS) \
+ hif_cnf_name(SHUT_DOWN) \
+ hif_ind_name(EVENT) \
+ hif_ind_name(JOIN_COMPLETE) \
+ hif_ind_name(RX) \
+ hif_ind_name(SCAN_CMPL) \
+ hif_ind_name(SET_PM_MODE_CMPL) \
+ hif_ind_name(SUSPEND_RESUME_TX) \
+ hif_ind_name(SL_EXCHANGE_PUB_KEYS) \
+ hif_ind_name(ERROR) \
+ hif_ind_name(EXCEPTION) \
+ hif_ind_name(GENERIC) \
+ hif_ind_name(WAKEUP) \
+ hif_ind_name(STARTUP)
+
+#define hif_msg_list_enum _hif_msg_list
+
+#undef hif_cnf_name
+#undef hif_ind_name
+#define hif_cnf_name(msg) TRACE_DEFINE_ENUM(HIF_CNF_ID_##msg);
+#define hif_ind_name(msg) TRACE_DEFINE_ENUM(HIF_IND_ID_##msg);
+hif_msg_list_enum
+#undef hif_cnf_name
+#undef hif_ind_name
+#define hif_cnf_name(msg) { HIF_CNF_ID_##msg, #msg },
+#define hif_ind_name(msg) { HIF_IND_ID_##msg, #msg },
+#define hif_msg_list hif_msg_list_enum { -1, NULL }
+
+#define _hif_mib_list \
+ hif_mib_name(ARP_IP_ADDRESSES_TABLE) \
+ hif_mib_name(ARP_KEEP_ALIVE_PERIOD) \
+ hif_mib_name(BEACON_FILTER_ENABLE) \
+ hif_mib_name(BEACON_FILTER_TABLE) \
+ hif_mib_name(BEACON_WAKEUP_PERIOD) \
+ hif_mib_name(BLOCK_ACK_POLICY) \
+ hif_mib_name(CONFIG_DATA_FILTER) \
+ hif_mib_name(COUNTERS_TABLE) \
+ hif_mib_name(CURRENT_TX_POWER_LEVEL) \
+ hif_mib_name(DOT11_MAC_ADDRESS) \
+ hif_mib_name(DOT11_MAX_RECEIVE_LIFETIME) \
+ hif_mib_name(DOT11_MAX_TRANSMIT_MSDU_LIFETIME) \
+ hif_mib_name(DOT11_RTS_THRESHOLD) \
+ hif_mib_name(DOT11_WEP_DEFAULT_KEY_ID) \
+ hif_mib_name(GL_BLOCK_ACK_INFO) \
+ hif_mib_name(GL_OPERATIONAL_POWER_MODE) \
+ hif_mib_name(GL_SET_MULTI_MSG) \
+ hif_mib_name(INACTIVITY_TIMER) \
+ hif_mib_name(INTERFACE_PROTECTION) \
+ hif_mib_name(IPV4_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(IPV6_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(KEEP_ALIVE_PERIOD) \
+ hif_mib_name(MAC_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(NON_ERP_PROTECTION) \
+ hif_mib_name(NS_IP_ADDRESSES_TABLE) \
+ hif_mib_name(OVERRIDE_INTERNAL_TX_RATE) \
+ hif_mib_name(PROTECTED_MGMT_POLICY) \
+ hif_mib_name(RX_FILTER) \
+ hif_mib_name(RCPI_RSSI_THRESHOLD) \
+ hif_mib_name(SET_ASSOCIATION_MODE) \
+ hif_mib_name(SET_DATA_FILTERING) \
+ hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \
+ hif_mib_name(SET_HT_PROTECTION) \
+ hif_mib_name(MAGIC_DATAFRAME_CONDITION) \
+ hif_mib_name(SET_TX_RATE_RETRY_POLICY) \
+ hif_mib_name(SET_UAPSD_INFORMATION) \
+ hif_mib_name(PORT_DATAFRAME_CONDITION) \
+ hif_mib_name(SLOT_TIME) \
+ hif_mib_name(STATISTICS_TABLE) \
+ hif_mib_name(TEMPLATE_FRAME) \
+ hif_mib_name(TSF_COUNTER) \
+ hif_mib_name(UC_MC_BC_DATAFRAME_CONDITION)
+
+#define hif_mib_list_enum _hif_mib_list
+
+#undef hif_mib_name
+#define hif_mib_name(mib) TRACE_DEFINE_ENUM(HIF_MIB_ID_##mib);
+hif_mib_list_enum
+#undef hif_mib_name
+#define hif_mib_name(mib) { HIF_MIB_ID_##mib, #mib },
+#define hif_mib_list hif_mib_list_enum { -1, NULL }
+
+DECLARE_EVENT_CLASS(hif_data,
+ TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv),
+ TP_STRUCT__entry(
+ __field(int, tx_fill_level)
+ __field(int, msg_id)
+ __field(const char *, msg_type)
+ __field(int, msg_len)
+ __field(int, buf_len)
+ __field(int, if_id)
+ __field(int, mib)
+ __array(u8, buf, 128)
+ ),
+ TP_fast_assign(
+ int header_len;
+
+ __entry->tx_fill_level = tx_fill_level;
+ __entry->msg_len = hif->len;
+ __entry->msg_id = hif->id;
+ __entry->if_id = hif->interface;
+ if (is_recv)
+ __entry->msg_type = __entry->msg_id & 0x80 ? "IND" : "CNF";
+ else
+ __entry->msg_type = "REQ";
+ if (!is_recv &&
+ (__entry->msg_id == HIF_REQ_ID_READ_MIB ||
+ __entry->msg_id == HIF_REQ_ID_WRITE_MIB)) {
+ __entry->mib = le16_to_cpup((u16 *) hif->body);
+ header_len = 4;
+ } else {
+ __entry->mib = -1;
+ header_len = 0;
+ }
+ __entry->buf_len = min_t(int, __entry->msg_len,
+ sizeof(__entry->buf))
+ - sizeof(struct hif_msg) - header_len;
+ memcpy(__entry->buf, hif->body + header_len, __entry->buf_len);
+ ),
+ TP_printk("%d:%d:%s_%s%s%s: %s%s (%d bytes)",
+ __entry->tx_fill_level,
+ __entry->if_id,
+ __print_symbolic(__entry->msg_id, hif_msg_list),
+ __entry->msg_type,
+ __entry->mib != -1 ? "/" : "",
+ __entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "",
+ __print_hex(__entry->buf, __entry->buf_len),
+ __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
+ __entry->msg_len
+ )
+);
+DEFINE_EVENT(hif_data, hif_send,
+ TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv));
+#define _trace_hif_send(hif, tx_fill_level)\
+ trace_hif_send(hif, tx_fill_level, false)
+DEFINE_EVENT(hif_data, hif_recv,
+ TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv));
+#define _trace_hif_recv(hif, tx_fill_level)\
+ trace_hif_recv(hif, tx_fill_level, true)
+
+#define wfx_reg_list_enum \
+ wfx_reg_name(WFX_REG_CONFIG, "CONFIG") \
+ wfx_reg_name(WFX_REG_CONTROL, "CONTROL") \
+ wfx_reg_name(WFX_REG_IN_OUT_QUEUE, "QUEUE") \
+ wfx_reg_name(WFX_REG_AHB_DPORT, "AHB") \
+ wfx_reg_name(WFX_REG_BASE_ADDR, "BASE_ADDR") \
+ wfx_reg_name(WFX_REG_SRAM_DPORT, "SRAM") \
+ wfx_reg_name(WFX_REG_SET_GEN_R_W, "SET_GEN_R_W") \
+ wfx_reg_name(WFX_REG_FRAME_OUT, "FRAME_OUT")
+
+#undef wfx_reg_name
+#define wfx_reg_name(sym, name) TRACE_DEFINE_ENUM(sym);
+wfx_reg_list_enum
+#undef wfx_reg_name
+#define wfx_reg_name(sym, name) { sym, name },
+#define wfx_reg_list wfx_reg_list_enum { -1, NULL }
+
+DECLARE_EVENT_CLASS(io_data,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len),
+ TP_STRUCT__entry(
+ __field(int, reg)
+ __field(int, addr)
+ __field(int, msg_len)
+ __field(int, buf_len)
+ __array(u8, buf, 32)
+ __array(u8, addr_str, 10)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->addr = addr;
+ __entry->msg_len = len;
+ __entry->buf_len = min_t(int, sizeof(__entry->buf),
+ __entry->msg_len);
+ memcpy(__entry->buf, io_buf, __entry->buf_len);
+ if (addr >= 0)
+ snprintf(__entry->addr_str, 10, "/%08x", addr);
+ else
+ __entry->addr_str[0] = 0;
+ ),
+ TP_printk("%s%s: %s%s (%d bytes)",
+ __print_symbolic(__entry->reg, wfx_reg_list),
+ __entry->addr_str,
+ __print_hex(__entry->buf, __entry->buf_len),
+ __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
+ __entry->msg_len
+ )
+);
+DEFINE_EVENT(io_data, io_write,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len));
+#define _trace_io_ind_write(reg, addr, io_buf, len)\
+ trace_io_write(reg, addr, io_buf, len)
+#define _trace_io_write(reg, io_buf, len) trace_io_write(reg, -1, io_buf, len)
+DEFINE_EVENT(io_data, io_read,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len));
+#define _trace_io_ind_read(reg, addr, io_buf, len)\
+ trace_io_read(reg, addr, io_buf, len)
+#define _trace_io_read(reg, io_buf, len) trace_io_read(reg, -1, io_buf, len)
+
+DECLARE_EVENT_CLASS(io_data32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val),
+ TP_STRUCT__entry(
+ __field(int, reg)
+ __field(int, addr)
+ __field(int, val)
+ __array(u8, addr_str, 10)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->addr = addr;
+ __entry->val = val;
+ if (addr >= 0)
+ snprintf(__entry->addr_str, 10, "/%08x", addr);
+ else
+ __entry->addr_str[0] = 0;
+ ),
+ TP_printk("%s%s: %08x",
+ __print_symbolic(__entry->reg, wfx_reg_list),
+ __entry->addr_str,
+ __entry->val
+ )
+);
+DEFINE_EVENT(io_data32, io_write32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val));
+#define _trace_io_ind_write32(reg, addr, val) trace_io_write32(reg, addr, val)
+#define _trace_io_write32(reg, val) trace_io_write32(reg, -1, val)
+DEFINE_EVENT(io_data32, io_read32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val));
+#define _trace_io_ind_read32(reg, addr, val) trace_io_read32(reg, addr, val)
+#define _trace_io_read32(reg, val) trace_io_read32(reg, -1, val)
+
+DECLARE_EVENT_CLASS(piggyback,
+ TP_PROTO(u32 val, bool ignored),
+ TP_ARGS(val, ignored),
+ TP_STRUCT__entry(
+ __field(int, val)
+ __field(bool, ignored)
+ ),
+ TP_fast_assign(
+ __entry->val = val;
+ __entry->ignored = ignored;
+ ),
+ TP_printk("CONTROL: %08x%s",
+ __entry->val,
+ __entry->ignored ? " (ignored)" : ""
+ )
+);
+DEFINE_EVENT(piggyback, piggyback,
+ TP_PROTO(u32 val, bool ignored),
+ TP_ARGS(val, ignored));
+#define _trace_piggyback(val, ignored) trace_piggyback(val, ignored)
+
+TRACE_EVENT(bh_stats,
+ TP_PROTO(int ind, int req, int cnf, int busy, bool release),
+ TP_ARGS(ind, req, cnf, busy, release),
+ TP_STRUCT__entry(
+ __field(int, ind)
+ __field(int, req)
+ __field(int, cnf)
+ __field(int, busy)
+ __field(bool, release)
+ ),
+ TP_fast_assign(
+ __entry->ind = ind;
+ __entry->req = req;
+ __entry->cnf = cnf;
+ __entry->busy = busy;
+ __entry->release = release;
+ ),
+ TP_printk("IND/REQ/CNF:%3d/%3d/%3d, REQ in progress:%3d, WUP: %s",
+ __entry->ind,
+ __entry->req,
+ __entry->cnf,
+ __entry->busy,
+ __entry->release ? "release" : "keep"
+ )
+);
+#define _trace_bh_stats(ind, req, cnf, busy, release)\
+ trace_bh_stats(ind, req, cnf, busy, release)
+
+TRACE_EVENT(tx_stats,
+ TP_PROTO(struct hif_cnf_tx *tx_cnf, struct sk_buff *skb, int delay),
+ TP_ARGS(tx_cnf, skb, delay),
+ TP_STRUCT__entry(
+ __field(int, pkt_id)
+ __field(int, delay_media)
+ __field(int, delay_queue)
+ __field(int, delay_fw)
+ __field(int, ack_failures)
+ __field(int, flags)
+ __array(int, rate, 4)
+ __array(int, tx_count, 4)
+ ),
+ TP_fast_assign(
+ // Keep sync with wfx_rates definition in main.c
+ static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9,
+ 10, 11, 12, 13 };
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rates = tx_info->driver_rates;
+ int i;
+
+ __entry->pkt_id = tx_cnf->packet_id;
+ __entry->delay_media = tx_cnf->media_delay;
+ __entry->delay_queue = tx_cnf->tx_queue_delay;
+ __entry->delay_fw = delay;
+ __entry->ack_failures = tx_cnf->ack_failures;
+ if (!tx_cnf->status || __entry->ack_failures)
+ __entry->ack_failures += 1;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->rate[i] = rates[i].idx;
+ else
+ __entry->rate[i] = hw_rate[rates[i].idx];
+ __entry->tx_count[i] = rates[i].count;
+ }
+ __entry->flags = 0;
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->flags |= 0x01;
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ __entry->flags |= 0x02;
+ if (rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
+ __entry->flags |= 0x04;
+ if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ __entry->flags |= 0x08;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ __entry->flags |= 0x10;
+ if (tx_cnf->status)
+ __entry->flags |= 0x20;
+ if (tx_cnf->status == HIF_REQUEUE)
+ __entry->flags |= 0x40;
+ ),
+ TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus",
+ __entry->pkt_id,
+ __print_flags(__entry->flags, NULL,
+ { 0x01, "M" }, { 0x02, "S" }, { 0x04, "G" },
+ { 0x08, "R" }, { 0x10, "D" }, { 0x20, "F" },
+ { 0x40, "Q" }),
+ __entry->rate[0],
+ __entry->tx_count[0],
+ __entry->rate[1],
+ __entry->tx_count[1],
+ __entry->rate[2],
+ __entry->tx_count[2],
+ __entry->rate[3],
+ __entry->tx_count[3],
+ __entry->ack_failures,
+ __entry->delay_media,
+ __entry->delay_queue,
+ __entry->delay_fw
+ )
+);
+#define _trace_tx_stats(tx_cnf, skb, delay) trace_tx_stats(tx_cnf, skb, delay)
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE traces
+
+#include <trace/define_trace.h>
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
new file mode 100644
index 000000000000..781a8c8ba982
--- /dev/null
+++ b/drivers/staging/wfx/wfx.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common private data for Silicon Labs WFx chips.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#ifndef WFX_H
+#define WFX_H
+
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/nospec.h>
+#include <net/mac80211.h>
+
+#include "bh.h"
+#include "data_tx.h"
+#include "main.h"
+#include "queue.h"
+#include "secure_link.h"
+#include "sta.h"
+#include "scan.h"
+#include "hif_tx.h"
+#include "hif_api_general.h"
+
+struct hwbus_ops;
+
+struct wfx_dev {
+ struct wfx_platform_data pdata;
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif[2];
+ struct mac_address addresses[2];
+ const struct hwbus_ops *hwbus_ops;
+ void *hwbus_priv;
+
+ u8 keyset;
+ struct completion firmware_ready;
+ struct hif_ind_startup hw_caps;
+ struct wfx_hif hif;
+ struct sl_context sl;
+ int chip_frozen;
+ struct mutex conf_mutex;
+
+ struct wfx_hif_cmd hif_cmd;
+ struct wfx_queue tx_queue[4];
+ struct wfx_queue_stats tx_queue_stats;
+ int tx_burst_idx;
+ atomic_t tx_lock;
+
+ u32 key_map;
+ struct hif_req_add_key keys[MAX_KEY_ENTRIES];
+
+ struct hif_rx_stats rx_stats;
+ struct mutex rx_stats_lock;
+
+ int output_power;
+ atomic_t scan_in_progress;
+};
+
+struct wfx_vif {
+ struct wfx_dev *wdev;
+ struct ieee80211_vif *vif;
+ struct ieee80211_channel *channel;
+ int id;
+ enum wfx_state state;
+
+ int delayed_link_loss;
+ int bss_loss_state;
+ u32 bss_loss_confirm_id;
+ struct mutex bss_loss_lock;
+ struct delayed_work bss_loss_work;
+
+ u32 link_id_map;
+ struct wfx_link_entry link_id_db[WFX_MAX_STA_IN_AP_MODE];
+ struct delayed_work link_id_gc_work;
+ struct work_struct link_id_work;
+
+ bool aid0_bit_set;
+ bool mcast_tx;
+ bool mcast_buffered;
+ struct wfx_grp_addr_table mcast_filter;
+ struct timer_list mcast_timeout;
+ struct work_struct mcast_start_work;
+ struct work_struct mcast_stop_work;
+
+ s8 wep_default_key_id;
+ struct sk_buff *wep_pending_skb;
+ struct work_struct wep_key_work;
+
+ struct tx_policy_cache tx_policy_cache;
+ struct work_struct tx_policy_upload_work;
+
+ u32 sta_asleep_mask;
+ u32 pspoll_mask;
+ spinlock_t ps_state_lock;
+ struct work_struct set_tim_work;
+
+ int dtim_period;
+ int beacon_int;
+ bool enable_beacon;
+ struct work_struct set_beacon_wakeup_period_work;
+
+ bool filter_bssid;
+ bool fwd_probe_req;
+ bool disable_beacon_filter;
+ struct work_struct update_filtering_work;
+
+ u32 erp_info;
+ int cqm_rssi_thold;
+ bool setbssparams_done;
+ struct wfx_ht_info ht_info;
+ struct wfx_edca_params edca;
+ struct hif_mib_set_uapsd_information uapsd_info;
+ struct hif_req_set_bss_params bss_params;
+ struct work_struct bss_params_work;
+ struct work_struct set_cts_work;
+
+ int join_complete_status;
+ bool delayed_unjoin;
+ struct work_struct unjoin_work;
+
+ struct wfx_scan scan;
+
+ struct hif_req_set_pm_mode powersave_mode;
+ struct completion set_pm_mode_complete;
+
+ struct list_head event_queue;
+ spinlock_t event_queue_lock;
+ struct work_struct event_handler_work;
+};
+
+static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
+{
+ if (vif_id >= ARRAY_SIZE(wdev->vif)) {
+ dev_dbg(wdev->dev, "requesting non-existent vif: %d\n", vif_id);
+ return NULL;
+ }
+ vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif));
+ if (!wdev->vif[vif_id]) {
+ dev_dbg(wdev->dev, "requesting non-allocated vif: %d\n",
+ vif_id);
+ return NULL;
+ }
+ return (struct wfx_vif *) wdev->vif[vif_id]->drv_priv;
+}
+
+static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev,
+ struct wfx_vif *cur)
+{
+ int i;
+ int mark = 0;
+ struct wfx_vif *tmp;
+
+ if (!cur)
+ mark = 1;
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ tmp = wdev_to_wvif(wdev, i);
+ if (mark && tmp)
+ return tmp;
+ if (tmp == cur)
+ mark = 1;
+ }
+ return NULL;
+}
+
+static inline int wvif_count(struct wfx_dev *wdev)
+{
+ int i;
+ int ret = 0;
+ struct wfx_vif *wvif;
+
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ wvif = wdev_to_wvif(wdev, i);
+ if (wvif)
+ ret++;
+ }
+ return ret;
+}
+
+static inline void memreverse(u8 *src, u8 length)
+{
+ u8 *lo = src;
+ u8 *hi = src + length - 1;
+ u8 swap;
+
+ while (lo < hi) {
+ swap = *lo;
+ *lo++ = *hi;
+ *hi-- = swap;
+ }
+}
+
+static inline int memzcmp(void *src, unsigned int size)
+{
+ u8 *buf = src;
+
+ if (!size)
+ return 0;
+ if (*buf)
+ return 1;
+ return memcmp(buf, buf + 1, size - 1);
+}
+
+#endif /* WFX_H */
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index a5a8e806b98e..a3305a0a888a 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -4,11 +4,11 @@ obj-$(CONFIG_WILC1000) += wilc1000.o
ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
-DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
-wilc1000-objs := wilc_wfi_cfgoperations.o wilc_netdev.o wilc_mon.o \
- wilc_hif.o wilc_wlan_cfg.o wilc_wlan.o
+wilc1000-objs := cfg80211.o netdev.o mon.o \
+ hif.o wlan_cfg.o wlan.o
obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
-wilc1000-sdio-objs += wilc_sdio.o
+wilc1000-sdio-objs += sdio.o
obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
-wilc1000-spi-objs += wilc_spi.o
+wilc1000-spi-objs += spi.o
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/cfg80211.c
index 22f21831649b..4863e516ff13 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/cfg80211.c
@@ -4,7 +4,7 @@
* All rights reserved.
*/
-#include "wilc_wfi_cfgoperations.h"
+#include "cfg80211.h"
#define FRAME_TYPE_ID 0
#define ACTION_CAT_ID 24
@@ -137,6 +137,7 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
struct wilc *wl = vif->wilc;
struct host_if_drv *wfi_drv = priv->hif_drv;
struct wilc_conn_info *conn_info = &wfi_drv->conn_info;
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
vif->connecting = false;
@@ -158,12 +159,16 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
memcpy(priv->associated_bss, conn_info->bssid,
ETH_ALEN);
- cfg80211_connect_result(dev, conn_info->bssid,
- conn_info->req_ies,
- conn_info->req_ies_len,
- conn_info->resp_ies,
- conn_info->resp_ies_len, connect_status,
- GFP_KERNEL);
+ cfg80211_ref_bss(wiphy, vif->bss);
+ cfg80211_connect_bss(dev, conn_info->bssid, vif->bss,
+ conn_info->req_ies,
+ conn_info->req_ies_len,
+ conn_info->resp_ies,
+ conn_info->resp_ies_len,
+ connect_status, GFP_KERNEL,
+ NL80211_TIMEOUT_UNSPECIFIED);
+
+ vif->bss = NULL;
} else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
u16 reason = 0;
@@ -186,15 +191,15 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
}
}
-static struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl)
+struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl)
{
- int i;
+ struct wilc_vif *vif;
- for (i = 0; i < wl->vif_num; i++)
- if (wl->vif[i])
- return wl->vif[i];
+ vif = list_first_or_null_rcu(&wl->vif_list, typeof(*vif), list);
+ if (!vif)
+ return ERR_PTR(-EINVAL);
- return ERR_PTR(-EINVAL);
+ return vif;
}
static int set_channel(struct wiphy *wiphy,
@@ -204,11 +209,12 @@ static int set_channel(struct wiphy *wiphy,
struct wilc_vif *vif;
u32 channelnum;
int result;
+ int srcu_idx;
- mutex_lock(&wl->vif_mutex);
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return PTR_ERR(vif);
}
@@ -219,7 +225,7 @@ static int set_channel(struct wiphy *wiphy,
if (result)
netdev_err(vif->ndev, "Error in setting channel\n");
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return result;
}
@@ -405,6 +411,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
goto out_put_bss;
}
kfree(join_params);
+ vif->bss = bss;
cfg80211_put_bss(wiphy, bss);
return 0;
@@ -450,6 +457,8 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev,
ret = -EINVAL;
}
+ vif->bss = NULL;
+
return ret;
}
@@ -620,29 +629,26 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
bool pairwise,
const u8 *mac_addr)
{
- struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(netdev);
struct wilc_priv *priv = &vif->priv;
- if (netdev == wl->vif[0]->ndev) {
- if (priv->wilc_gtk[key_index]) {
- kfree(priv->wilc_gtk[key_index]->key);
- priv->wilc_gtk[key_index]->key = NULL;
- kfree(priv->wilc_gtk[key_index]->seq);
- priv->wilc_gtk[key_index]->seq = NULL;
+ if (priv->wilc_gtk[key_index]) {
+ kfree(priv->wilc_gtk[key_index]->key);
+ priv->wilc_gtk[key_index]->key = NULL;
+ kfree(priv->wilc_gtk[key_index]->seq);
+ priv->wilc_gtk[key_index]->seq = NULL;
- kfree(priv->wilc_gtk[key_index]);
- priv->wilc_gtk[key_index] = NULL;
- }
+ kfree(priv->wilc_gtk[key_index]);
+ priv->wilc_gtk[key_index] = NULL;
+ }
- if (priv->wilc_ptk[key_index]) {
- kfree(priv->wilc_ptk[key_index]->key);
- priv->wilc_ptk[key_index]->key = NULL;
- kfree(priv->wilc_ptk[key_index]->seq);
- priv->wilc_ptk[key_index]->seq = NULL;
- kfree(priv->wilc_ptk[key_index]);
- priv->wilc_ptk[key_index] = NULL;
- }
+ if (priv->wilc_ptk[key_index]) {
+ kfree(priv->wilc_ptk[key_index]->key);
+ priv->wilc_ptk[key_index]->key = NULL;
+ kfree(priv->wilc_ptk[key_index]->seq);
+ priv->wilc_ptk[key_index]->seq = NULL;
+ kfree(priv->wilc_ptk[key_index]);
+ priv->wilc_ptk[key_index] = NULL;
}
if (key_index <= 3 && priv->wep_key_len[key_index]) {
@@ -752,33 +758,19 @@ static int change_bss(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-struct wilc_vif *wilc_get_interface(struct wilc *wl)
-{
- int i;
- struct wilc_vif *vif = NULL;
-
- mutex_lock(&wl->vif_mutex);
- for (i = 0; i < wl->vif_num; i++) {
- if (wl->vif[i]) {
- vif = wl->vif[i];
- break;
- }
- }
- mutex_unlock(&wl->vif_mutex);
- return vif;
-}
-
static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- int ret;
+ int ret = -EINVAL;
struct cfg_param_attr cfg_param_val;
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
struct wilc_priv *priv;
+ int srcu_idx;
- vif = wilc_get_interface(wl);
- if (!vif)
- return -EINVAL;
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ vif = wilc_get_wl_to_vif(wl);
+ if (IS_ERR(vif))
+ goto out;
priv = &vif->priv;
cfg_param_val.flag = 0;
@@ -808,7 +800,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
} else {
netdev_err(vif->ndev,
"Fragmentation threshold out of range\n");
- return -EINVAL;
+ goto out;
}
}
@@ -821,7 +813,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
cfg_param_val.rts_threshold = wiphy->rts_threshold;
} else {
netdev_err(vif->ndev, "RTS threshold out of range\n");
- return -EINVAL;
+ goto out;
}
}
@@ -829,6 +821,8 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
if (ret)
netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
+out:
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1144,7 +1138,7 @@ static int remain_on_channel(struct wiphy *wiphy,
cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_KERNEL);
mod_timer(&vif->hif_drv->remain_on_ch_timer,
- jiffies + msecs_to_jiffies(duration));
+ jiffies + msecs_to_jiffies(duration + 1000));
return ret;
}
@@ -1419,8 +1413,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE)
wilc_wfi_deinit_mon_interface(wl, true);
vif->iftype = WILC_STATION_MODE;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- WILC_STATION_MODE, vif->idx);
+
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_STATION_MODE, vif->idx);
memset(priv->assoc_stainfo.sta_associated_bss, 0,
WILC_MAX_NUM_STA * ETH_ALEN);
@@ -1432,8 +1428,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
priv->wdev.iftype = type;
vif->monitor_flag = 0;
vif->iftype = WILC_CLIENT_MODE;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- WILC_STATION_MODE, vif->idx);
+
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_STATION_MODE, vif->idx);
break;
case NL80211_IFTYPE_AP:
@@ -1450,8 +1448,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
dev->ieee80211_ptr->iftype = type;
priv->wdev.iftype = type;
vif->iftype = WILC_GO_MODE;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- WILC_AP_MODE, vif->idx);
+
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_AP_MODE, vif->idx);
break;
default:
@@ -1557,20 +1557,16 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static int wilc_get_vif_from_type(struct wilc *wl, int type)
+static struct wilc_vif *wilc_get_vif_from_type(struct wilc *wl, int type)
{
- int i;
+ struct wilc_vif *vif;
- mutex_lock(&wl->vif_mutex);
- for (i = 0; i < wl->vif_num; i++) {
- if (wl->vif[i]->iftype == type) {
- mutex_unlock(&wl->vif_mutex);
- return i;
- }
+ list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ if (vif->iftype == type)
+ return vif;
}
- mutex_unlock(&wl->vif_mutex);
- return -EINVAL;
+ return NULL;
}
static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
@@ -1583,29 +1579,36 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
struct wilc_vif *vif;
struct wireless_dev *wdev;
int iftype;
- int ret;
if (type == NL80211_IFTYPE_MONITOR) {
struct net_device *ndev;
- int ap_index = wilc_get_vif_from_type(wl, WILC_AP_MODE);
-
- if (ap_index < 0) {
- ap_index = wilc_get_vif_from_type(wl, WILC_GO_MODE);
- if (ap_index < 0)
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ vif = wilc_get_vif_from_type(wl, WILC_AP_MODE);
+ if (!vif) {
+ vif = wilc_get_vif_from_type(wl, WILC_GO_MODE);
+ if (!vif) {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
+ }
}
- vif = wl->vif[ap_index];
- if (vif->monitor_flag)
+ if (vif->monitor_flag) {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
+ }
ndev = wilc_wfi_init_mon_interface(wl, name, vif->ndev);
- if (ndev)
+ if (ndev) {
vif->monitor_flag = 1;
- else
+ } else {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ERR_PTR(-EINVAL);
+ }
wdev = &vif->priv.wdev;
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return wdev;
}
@@ -1613,9 +1616,10 @@ validate_interface:
mutex_lock(&wl->vif_mutex);
if (wl->vif_num == WILC_NUM_CONCURRENT_IFC) {
pr_err("Reached maximum number of interface\n");
- ret = -EINVAL;
- goto out_err;
+ mutex_unlock(&wl->vif_mutex);
+ return ERR_PTR(-EINVAL);
}
+ mutex_unlock(&wl->vif_mutex);
switch (type) {
case NL80211_IFTYPE_STATION:
@@ -1625,30 +1629,20 @@ validate_interface:
iftype = WILC_AP_MODE;
break;
default:
- ret = -EOPNOTSUPP;
- goto out_err;
+ return ERR_PTR(-EOPNOTSUPP);
}
vif = wilc_netdev_ifc_init(wl, name, iftype, type, true);
- if (IS_ERR(vif)) {
- ret = PTR_ERR(vif);
- goto out_err;
- }
-
- mutex_unlock(&wl->vif_mutex);
+ if (IS_ERR(vif))
+ return ERR_CAST(vif);
return &vif->priv.wdev;
-
-out_err:
- mutex_unlock(&wl->vif_mutex);
- return ERR_PTR(ret);
}
static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
- int i;
if (wdev->iftype == NL80211_IFTYPE_AP ||
wdev->iftype == NL80211_IFTYPE_P2P_GO)
@@ -1658,22 +1652,12 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
- mutex_lock(&wl->vif_mutex);
wilc_set_operation_mode(vif, 0, 0, 0);
- for (i = vif->idx; i < wl->vif_num; i++) {
- if ((i + 1) >= wl->vif_num) {
- wl->vif[i] = NULL;
- } else {
- vif = wl->vif[i + 1];
- vif->idx = i;
- wl->vif[i] = vif;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- vif->iftype, vif->idx);
- }
- }
+ mutex_lock(&wl->vif_mutex);
+ list_del_rcu(&vif->list);
wl->vif_num--;
mutex_unlock(&wl->vif_mutex);
-
+ synchronize_srcu(&wl->srcu);
return 0;
}
@@ -1698,25 +1682,39 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
+ int srcu_idx;
- mutex_lock(&wl->vif_mutex);
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return;
}
netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm)
{
int ret;
+ int srcu_idx;
s32 tx_power = MBM_TO_DBM(mbm);
- struct wilc_vif *vif = netdev_priv(wdev->netdev);
+ struct wilc *wl = wiphy_priv(wiphy);
+ struct wilc_vif *vif;
+
+ if (!wl->initialized)
+ return -EIO;
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ vif = wilc_get_wl_to_vif(wl);
+ if (IS_ERR(vif)) {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+ return -EINVAL;
+ }
+
+ netdev_info(vif->ndev, "Setting tx power %d\n", tx_power);
if (tx_power < 0)
tx_power = 0;
else if (tx_power > 18)
@@ -1724,6 +1722,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
ret = wilc_set_tx_power(vif, tx_power);
if (ret)
netdev_err(vif->ndev, "Failed to set tx power\n");
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1803,6 +1802,17 @@ static void wlan_init_locks(struct wilc *wl)
init_completion(&wl->cfg_event);
init_completion(&wl->sync_event);
init_completion(&wl->txq_thread_started);
+ init_srcu_struct(&wl->srcu);
+}
+
+void wlan_deinit_locks(struct wilc *wilc)
+{
+ mutex_destroy(&wilc->hif_cs);
+ mutex_destroy(&wilc->rxq_cs);
+ mutex_destroy(&wilc->cfg_cmd_lock);
+ mutex_destroy(&wilc->txq_add_to_head_cs);
+ mutex_destroy(&wilc->vif_mutex);
+ cleanup_srcu_struct(&wilc->srcu);
}
int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
@@ -1816,6 +1826,8 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
if (!wl)
return -EINVAL;
+ wlan_init_locks(wl);
+
ret = wilc_wlan_cfg_init(wl);
if (ret)
goto free_wl;
@@ -1826,6 +1838,7 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
wl->chip_ps_state = WILC_CHIP_WAKEDUP;
INIT_LIST_HEAD(&wl->txq_head.list);
INIT_LIST_HEAD(&wl->rxq_head.list);
+ INIT_LIST_HEAD(&wl->vif_list);
wl->hif_workqueue = create_singlethread_workqueue("WILC_wq");
if (!wl->hif_workqueue) {
@@ -1839,8 +1852,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
goto free_hq;
}
- wlan_init_locks(wl);
-
return 0;
free_hq:
@@ -1850,6 +1861,7 @@ free_cfg:
wilc_wlan_cfg_deinit(wl);
free_wl:
+ wlan_deinit_locks(wl);
wiphy_unregister(wl->wiphy);
wiphy_free(wl->wiphy);
return ret;
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/cfg80211.h
index 234faaabdb82..5e5d63f70df2 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/cfg80211.h
@@ -4,9 +4,9 @@
* All rights reserved.
*/
-#ifndef NM_WFI_CFGOPERATIONS
-#define NM_WFI_CFGOPERATIONS
-#include "wilc_wfi_netdevice.h"
+#ifndef WILC_CFG80211_H
+#define WILC_CFG80211_H
+#include "netdev.h"
struct wiphy *wilc_cfg_alloc(void);
int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
@@ -24,4 +24,6 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
u16 frame_type, bool reg);
struct wilc_vif *wilc_get_interface(struct wilc *wl);
+struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl);
+void wlan_deinit_locks(struct wilc *wilc);
#endif
diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/hif.c
index f2b7d5a1be17..349e45d58ec9 100644
--- a/drivers/staging/wilc1000/wilc_hif.c
+++ b/drivers/staging/wilc1000/hif.c
@@ -4,7 +4,7 @@
* All rights reserved.
*/
-#include "wilc_wfi_netdevice.h"
+#include "netdev.h"
#define WILC_HIF_SCAN_TIMEOUT_MS 5000
#define WILC_HIF_CONNECT_TIMEOUT_MS 9500
@@ -32,7 +32,7 @@ struct wilc_op_mode {
};
struct wilc_reg_frame {
- bool reg;
+ u8 reg;
u8 reg_id;
__le16 frame_type;
} __packed;
@@ -183,11 +183,17 @@ int wilc_get_vif_idx(struct wilc_vif *vif)
static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
{
int index = idx - 1;
+ struct wilc_vif *vif;
if (index < 0 || index >= WILC_NUM_CONCURRENT_IFC)
return NULL;
- return wilc->vif[index];
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->idx == index)
+ return vif;
+ }
+
+ return NULL;
}
static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
@@ -473,20 +479,27 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
if (rates_ie) {
rates_len = rates_ie[1];
+ if (rates_len > WILC_MAX_RATES_SUPPORTED)
+ rates_len = WILC_MAX_RATES_SUPPORTED;
param->supp_rates[0] = rates_len;
memcpy(&param->supp_rates[1], rates_ie + 2, rates_len);
}
- supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies->data,
- ies->len);
- if (supp_rates_ie) {
- if (supp_rates_ie[1] > (WILC_MAX_RATES_SUPPORTED - rates_len))
- param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED;
- else
- param->supp_rates[0] += supp_rates_ie[1];
+ if (rates_len < WILC_MAX_RATES_SUPPORTED) {
+ supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
+ ies->data, ies->len);
+ if (supp_rates_ie) {
+ u8 ext_rates = supp_rates_ie[1];
- memcpy(&param->supp_rates[rates_len + 1], supp_rates_ie + 2,
- (param->supp_rates[0] - rates_len));
+ if (ext_rates > (WILC_MAX_RATES_SUPPORTED - rates_len))
+ param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED;
+ else
+ param->supp_rates[0] += ext_rates;
+
+ memcpy(&param->supp_rates[rates_len + 1],
+ supp_rates_ie + 2,
+ (param->supp_rates[0] - rates_len));
+ }
}
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
@@ -544,7 +557,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->mode_802_11i = 2;
param->rsn_found = true;
- //extract RSN capabilities
+ /* extract RSN capabilities */
offset += (rsn_ie[offset] * 4) + 2;
offset += (rsn_ie[offset] * 4) + 2;
memcpy(param->rsn_cap, &rsn_ie[offset], 2);
@@ -1776,7 +1789,9 @@ void wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
wid.val = (u8 *)&reg_frame;
memset(&reg_frame, 0x0, sizeof(reg_frame));
- reg_frame.reg = reg;
+
+ if (reg)
+ reg_frame.reg = 1;
switch (frame_type) {
case IEEE80211_STYPE_ACTION:
diff --git a/drivers/staging/wilc1000/wilc_hif.h b/drivers/staging/wilc1000/hif.h
index ac5fe57f872b..22ee6fffd599 100644
--- a/drivers/staging/wilc1000/wilc_hif.h
+++ b/drivers/staging/wilc1000/hif.h
@@ -4,10 +4,10 @@
* All rights reserved.
*/
-#ifndef HOST_INT_H
-#define HOST_INT_H
+#ifndef WILC_HIF_H
+#define WILC_HIF_H
#include <linux/ieee80211.h>
-#include "wilc_wlan_if.h"
+#include "wlan_if.h"
enum {
WILC_IDLE_MODE = 0x0,
diff --git a/drivers/staging/wilc1000/wilc_mon.c b/drivers/staging/wilc1000/mon.c
index d6f14f69ad64..48ac33f06f63 100644
--- a/drivers/staging/wilc1000/wilc_mon.c
+++ b/drivers/staging/wilc1000/mon.c
@@ -4,7 +4,7 @@
* All rights reserved.
*/
-#include "wilc_wfi_cfgoperations.h"
+#include "cfg80211.h"
struct wilc_wfi_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
@@ -220,7 +220,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
{
struct wilc_wfi_mon_priv *priv;
- /*If monitor interface is already initialized, return it*/
+ /* If monitor interface is already initialized, return it */
if (wl->monitor_dev)
return wl->monitor_dev;
diff --git a/drivers/staging/wilc1000/wilc_netdev.c b/drivers/staging/wilc1000/netdev.c
index 508acb8bb089..d2c0b0f7cf63 100644
--- a/drivers/staging/wilc1000/wilc_netdev.c
+++ b/drivers/staging/wilc1000/netdev.c
@@ -10,8 +10,8 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
-#include "wilc_wfi_cfgoperations.h"
-#include "wilc_wlan_cfg.h"
+#include "cfg80211.h"
+#include "wlan_cfg.h"
#define WILC_MULTICAST_TABLE_SIZE 8
@@ -97,29 +97,25 @@ void wilc_mac_indicate(struct wilc *wilc)
static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
{
u8 *bssid, *bssid1;
- int i = 0;
struct net_device *ndev = NULL;
+ struct wilc_vif *vif;
bssid = mac_header + 10;
bssid1 = mac_header + 4;
- mutex_lock(&wilc->vif_mutex);
- for (i = 0; i < wilc->vif_num; i++) {
- if (wilc->vif[i]->mode == WILC_STATION_MODE)
- if (ether_addr_equal_unaligned(bssid,
- wilc->vif[i]->bssid)) {
- ndev = wilc->vif[i]->ndev;
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->mode == WILC_STATION_MODE)
+ if (ether_addr_equal_unaligned(bssid, vif->bssid)) {
+ ndev = vif->ndev;
goto out;
}
- if (wilc->vif[i]->mode == WILC_AP_MODE)
- if (ether_addr_equal_unaligned(bssid1,
- wilc->vif[i]->bssid)) {
- ndev = wilc->vif[i]->ndev;
+ if (vif->mode == WILC_AP_MODE)
+ if (ether_addr_equal_unaligned(bssid1, vif->bssid)) {
+ ndev = vif->ndev;
goto out;
}
}
out:
- mutex_unlock(&wilc->vif_mutex);
return ndev;
}
@@ -137,13 +133,16 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
{
- u8 i = 0;
+ int srcu_idx;
u8 ret_val = 0;
+ struct wilc_vif *vif;
- for (i = 0; i < wilc->vif_num; i++)
- if (!is_zero_ether_addr(wilc->vif[i]->bssid))
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (!is_zero_ether_addr(vif->bssid))
ret_val++;
-
+ }
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
return ret_val;
}
@@ -167,16 +166,16 @@ static int wilc_txq_task(void *vp)
do {
ret = wilc_wlan_handle_txq(wl, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
- int i;
+ int srcu_idx;
struct wilc_vif *ifc;
- mutex_lock(&wl->vif_mutex);
- for (i = 0; i < wl->vif_num; i++) {
- ifc = wl->vif[i];
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ list_for_each_entry_rcu(ifc, &wl->vif_list,
+ list) {
if (ifc->mac_opened && ifc->ndev)
netif_wake_queue(ifc->ndev);
}
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
} while (ret == -ENOBUFS && !wl->close);
}
@@ -424,18 +423,6 @@ fail:
return -1;
}
-static void wlan_deinit_locks(struct net_device *dev)
-{
- struct wilc_vif *vif = netdev_priv(dev);
- struct wilc *wilc = vif->wilc;
-
- mutex_destroy(&wilc->hif_cs);
- mutex_destroy(&wilc->rxq_cs);
- mutex_destroy(&wilc->cfg_cmd_lock);
- mutex_destroy(&wilc->txq_add_to_head_cs);
- mutex_destroy(&wilc->vif_mutex);
-}
-
static void wlan_deinitialize_threads(struct net_device *dev)
{
struct wilc_vif *vif = netdev_priv(dev);
@@ -477,7 +464,6 @@ static void wilc_wlan_deinitialize(struct net_device *dev)
wilc_wlan_stop(wl, vif);
wilc_wlan_cleanup(dev);
- wlan_deinit_locks(dev);
wl->initialized = false;
@@ -738,14 +724,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
wilc_tx_complete);
if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) {
- int i;
+ int srcu_idx;
+ struct wilc_vif *vif;
- mutex_lock(&wilc->vif_mutex);
- for (i = 0; i < wilc->vif_num; i++) {
- if (wilc->vif[i]->mac_opened)
- netif_stop_queue(wilc->vif[i]->ndev);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->mac_opened)
+ netif_stop_queue(vif->ndev);
}
- mutex_unlock(&wilc->vif_mutex);
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
return 0;
@@ -823,26 +810,22 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
{
- int i = 0;
+ int srcu_idx;
struct wilc_vif *vif;
- mutex_lock(&wilc->vif_mutex);
- for (i = 0; i < wilc->vif_num; i++) {
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
u16 type = le16_to_cpup((__le16 *)buff);
- vif = netdev_priv(wilc->vif[i]->ndev);
- if ((type == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
- (type == vif->frame_reg[1].type && vif->frame_reg[1].reg)) {
+ if (vif->priv.p2p_listen_state &&
+ ((type == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
+ (type == vif->frame_reg[1].type && vif->frame_reg[1].reg)))
wilc_wfi_p2p_rx(vif, buff, size);
- break;
- }
- if (vif->monitor_flag) {
+ if (vif->monitor_flag)
wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size);
- break;
- }
}
- mutex_unlock(&wilc->vif_mutex);
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
static const struct net_device_ops wilc_netdev_ops = {
@@ -856,7 +839,8 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
- int i;
+ struct wilc_vif *vif;
+ int srcu_idx;
if (!wilc)
return;
@@ -866,21 +850,57 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->firmware = NULL;
}
- for (i = 0; i < wilc->vif_num; i++) {
- if (wilc->vif[i] && wilc->vif[i]->ndev)
- unregister_netdev(wilc->vif[i]->ndev);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->ndev)
+ unregister_netdev(vif->ndev);
}
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
wilc_wfi_deinit_mon_interface(wilc, false);
flush_workqueue(wilc->hif_workqueue);
destroy_workqueue(wilc->hif_workqueue);
+
+ do {
+ mutex_lock(&wilc->vif_mutex);
+ if (wilc->vif_num <= 0) {
+ mutex_unlock(&wilc->vif_mutex);
+ break;
+ }
+ vif = wilc_get_wl_to_vif(wilc);
+ if (!IS_ERR(vif))
+ list_del_rcu(&vif->list);
+
+ wilc->vif_num--;
+ mutex_unlock(&wilc->vif_mutex);
+ synchronize_srcu(&wilc->srcu);
+ } while (1);
+
wilc_wlan_cfg_deinit(wilc);
+ wlan_deinit_locks(wilc);
kfree(wilc->bus_data);
wiphy_unregister(wilc->wiphy);
wiphy_free(wilc->wiphy);
}
EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
+static u8 wilc_get_available_idx(struct wilc *wl)
+{
+ int idx = 0;
+ struct wilc_vif *vif;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ if (vif->idx == 0)
+ idx = 1;
+ else
+ idx = 0;
+ }
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+ return idx;
+}
+
struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
int vif_type, enum nl80211_iftype type,
bool rtnl_locked)
@@ -921,10 +941,14 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
- vif->wilc->vif[wl->vif_num] = vif;
- vif->idx = wl->vif_num;
- wl->vif_num += 1;
+ vif->idx = wilc_get_available_idx(wl);
vif->mac_opened = 0;
+ mutex_lock(&wl->vif_mutex);
+ list_add_tail_rcu(&vif->list, &wl->vif_list);
+ wl->vif_num += 1;
+ mutex_unlock(&wl->vif_mutex);
+ synchronize_srcu(&wl->srcu);
+
return vif;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/netdev.h
index 978a8bdbfc40..cd8f0d72caaa 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/netdev.h
@@ -4,8 +4,8 @@
* All rights reserved.
*/
-#ifndef WILC_WFI_NETDEVICE
-#define WILC_WFI_NETDEVICE
+#ifndef WILC_NETDEV_H
+#define WILC_NETDEV_H
#include <linux/tcp.h>
#include <linux/ieee80211.h>
@@ -14,9 +14,9 @@
#include <linux/if_arp.h>
#include <linux/gpio/consumer.h>
-#include "wilc_hif.h"
-#include "wilc_wlan.h"
-#include "wilc_wlan_cfg.h"
+#include "hif.h"
+#include "wlan.h"
+#include "wlan_cfg.h"
#define FLOW_CONTROL_LOWER_THRESHOLD 128
#define FLOW_CONTROL_UPPER_THRESHOLD 256
@@ -60,7 +60,7 @@ struct sta_info {
u8 sta_associated_bss[WILC_MAX_NUM_STA][ETH_ALEN];
};
-/*Parameters needed for host interface for remaining on channel*/
+/* Parameters needed for host interface for remaining on channel */
struct wilc_wfi_p2p_listen_params {
struct ieee80211_channel *listen_ch;
u32 listen_duration;
@@ -145,11 +145,13 @@ struct wilc_priv {
struct wilc_pmkid_attr pmkid_list;
u8 wep_key[4][WLAN_KEY_LEN_WEP104];
u8 wep_key_len[4];
+
/* The real interface that the monitor is on */
struct net_device *real_ndev;
struct wilc_wfi_key *wilc_gtk[WILC_MAX_NUM_STA];
struct wilc_wfi_key *wilc_ptk[WILC_MAX_NUM_STA];
u8 wilc_groupkey;
+
/* mutexes */
struct mutex scan_req_lock;
bool p2p_listen_state;
@@ -208,6 +210,8 @@ struct wilc_vif {
struct tcp_ack_filter ack_filter;
bool connecting;
struct wilc_priv priv;
+ struct list_head list;
+ struct cfg80211_bss *bss;
};
struct wilc {
@@ -221,16 +225,22 @@ struct wilc {
int dev_irq_num;
int close;
u8 vif_num;
- struct wilc_vif *vif[WILC_NUM_CONCURRENT_IFC];
- /*protect vif list*/
+ struct list_head vif_list;
+
+ /* protect vif list */
struct mutex vif_mutex;
+ struct srcu_struct srcu;
u8 open_ifcs;
- /*protect head of transmit queue*/
+
+ /* protect head of transmit queue */
struct mutex txq_add_to_head_cs;
- /*protect txq_entry_t transmit queue*/
+
+ /* protect txq_entry_t transmit queue */
spinlock_t txq_spinlock;
- /*protect rxq_entry_t receiver queue*/
+
+ /* protect rxq_entry_t receiver queue */
struct mutex rxq_cs;
+
/* lock to protect hif access */
struct mutex hif_cs;
@@ -242,6 +252,7 @@ struct wilc {
struct task_struct *txq_thread;
int quit;
+
/* lock to protect issue of wid command to firmware */
struct mutex cfg_cmd_lock;
struct wilc_cfg_frame cfg_frame;
@@ -268,6 +279,7 @@ struct wilc {
struct wilc_cfg cfg;
void *bus_data;
struct net_device *monitor_dev;
+
/* deinit lock */
struct mutex deinit_lock;
u8 sta_ch;
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/sdio.c
index c787c5da8f2b..319e039380b0 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/sdio.c
@@ -8,8 +8,8 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/host.h>
-#include "wilc_wfi_netdevice.h"
-#include "wilc_wfi_cfgoperations.h"
+#include "netdev.h"
+#include "cfg80211.h"
#define SDIO_MODALIAS "wilc1000_sdio"
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/spi.c
index 3c1ae9e9f9aa..55f8757325f0 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/spi.c
@@ -4,10 +4,11 @@
* All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/spi/spi.h>
-#include "wilc_wfi_netdevice.h"
-#include "wilc_wfi_cfgoperations.h"
+#include "netdev.h"
+#include "cfg80211.h"
struct wilc_spi {
int crc_off;
@@ -132,6 +133,12 @@ static int wilc_bus_probe(struct spi_device *spi)
wilc->bus_data = spi_priv;
wilc->gpio_irq = gpio;
+ wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
+ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (!IS_ERR(wilc->rtc_clk))
+ clk_prepare_enable(wilc->rtc_clk);
+
return 0;
}
@@ -142,6 +149,10 @@ static int wilc_bus_remove(struct spi_device *spi)
/* free the GPIO in module remove */
if (wilc->gpio_irq)
gpiod_put(wilc->gpio_irq);
+
+ if (!IS_ERR(wilc->rtc_clk))
+ clk_disable_unprepare(wilc->rtc_clk);
+
wilc_netdev_cleanup(wilc);
return 0;
}
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wlan.c
index 771d8cb68dc1..d3de76126b78 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wlan.c
@@ -6,8 +6,8 @@
#include <linux/if_ether.h>
#include <linux/ip.h>
-#include "wilc_wfi_cfgoperations.h"
-#include "wilc_wlan_cfg.h"
+#include "cfg80211.h"
+#include "wlan_cfg.h"
static inline bool is_wilc1000(u32 id)
{
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wlan.h
index 7469fa47d588..1f6957cf2e9c 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wlan.h
@@ -190,7 +190,7 @@
#define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM)
#define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM)
-/*time for expiring the completion of cfg packets*/
+/* time for expiring the completion of cfg packets */
#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(2000)
#define IS_MANAGMEMENT 0x100
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wlan_cfg.c
index 3f53807cee0f..6f6b286788d1 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wlan_cfg.c
@@ -4,10 +4,10 @@
* All rights reserved.
*/
-#include "wilc_wlan_if.h"
-#include "wilc_wlan.h"
-#include "wilc_wlan_cfg.h"
-#include "wilc_wfi_netdevice.h"
+#include "wlan_if.h"
+#include "wlan.h"
+#include "wlan_cfg.h"
+#include "netdev.h"
enum cfg_cmd_type {
CFG_BYTE_CMD = 0,
@@ -44,6 +44,11 @@ static const struct wilc_cfg_str g_cfg_str[] = {
{WID_NIL, NULL}
};
+#define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
+#define WILC_RESP_MSG_TYPE_STATUS_INFO 'I'
+#define WILC_RESP_MSG_TYPE_NETWORK_INFO 'N'
+#define WILC_RESP_MSG_TYPE_SCAN_COMPLETE 'S'
+
/********************************************
*
* Configuration Functions
@@ -360,33 +365,26 @@ void wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
size -= 4;
rsp->type = 0;
- /*
- * The valid types of response messages are
- * 'R' (Response),
- * 'I' (Information), and
- * 'N' (Network Information)
- */
-
switch (msg_type) {
- case 'R':
+ case WILC_RESP_MSG_TYPE_CONFIG_REPLY:
wilc_wlan_parse_response_frame(wilc, frame, size);
rsp->type = WILC_CFG_RSP;
rsp->seq_no = msg_id;
break;
- case 'I':
+ case WILC_RESP_MSG_TYPE_STATUS_INFO:
wilc_wlan_parse_info_frame(wilc, frame);
rsp->type = WILC_CFG_RSP_STATUS;
rsp->seq_no = msg_id;
- /*call host interface info parse as well*/
+ /* call host interface info parse as well */
wilc_gnrl_async_info_received(wilc, frame - 4, size + 4);
break;
- case 'N':
+ case WILC_RESP_MSG_TYPE_NETWORK_INFO:
wilc_network_info_received(wilc, frame - 4, size + 4);
break;
- case 'S':
+ case WILC_RESP_MSG_TYPE_SCAN_COMPLETE:
wilc_scan_complete_received(wilc, frame - 4, size + 4);
break;
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.h b/drivers/staging/wilc1000/wlan_cfg.h
index 614c5673f232..614c5673f232 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.h
+++ b/drivers/staging/wilc1000/wlan_cfg.h
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wlan_if.h
index 70eac586f80c..7c7ee66c35f5 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wlan_if.h
@@ -750,10 +750,10 @@ enum {
WID_REMOVE_KEY = 0x301E,
WID_ASSOC_REQ_INFO = 0x301F,
WID_ASSOC_RES_INFO = 0x3020,
- WID_MANUFACTURER = 0x3026, /*Added for CAPI tool */
- WID_MODEL_NAME = 0x3027, /*Added for CAPI tool */
- WID_MODEL_NUM = 0x3028, /*Added for CAPI tool */
- WID_DEVICE_NAME = 0x3029, /*Added for CAPI tool */
+ WID_MANUFACTURER = 0x3026, /* Added for CAPI tool */
+ WID_MODEL_NAME = 0x3027, /* Added for CAPI tool */
+ WID_MODEL_NUM = 0x3028, /* Added for CAPI tool */
+ WID_DEVICE_NAME = 0x3029, /* Added for CAPI tool */
/* NMAC String WID list */
WID_SET_OPERATION_MODE = 0x3079,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 5ff740a8837d..bdd7f414fdbb 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1181,8 +1181,6 @@ struct hfa384x_usbctlx {
ctlx_cmdcb_t cmdcb; /* Async command callback */
ctlx_usercb_t usercb; /* Async user callback, */
void *usercb_data; /* at CTLX completion */
-
- int variant; /* Identifies cmd variant */
};
struct hfa384x_usbctlxq {
@@ -1337,7 +1335,9 @@ struct hfa384x {
* interface
*/
- struct hfa384x_caplevel cap_act_sta_mfi; /* sta f/w to modem interface */
+ struct hfa384x_caplevel cap_act_sta_mfi; /*
+ * sta f/w to modem interface
+ */
struct hfa384x_caplevel cap_act_ap_cfi; /*
* ap f/w to controller
@@ -1359,7 +1359,9 @@ struct hfa384x {
struct hfa384x_inf_frame *scanresults;
- struct prism2sta_authlist authlist; /* Authenticated station list. */
+ struct prism2sta_authlist authlist; /*
+ * Authenticated station list.
+ */
unsigned int accessmode; /* Access mode. */
struct prism2sta_accesslist allow; /* Allowed station list. */
struct prism2sta_accesslist deny; /* Denied station list. */
@@ -1370,12 +1372,13 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb);
void hfa384x_destroy(struct hfa384x *hw);
int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime,
- int genesis);
+ int genesis);
int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport);
int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport);
int hfa384x_drvr_flashdl_enable(struct hfa384x *hw);
int hfa384x_drvr_flashdl_disable(struct hfa384x *hw);
-int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len);
+int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf,
+ u32 len);
int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len);
int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr);
int hfa384x_drvr_ramdl_disable(struct hfa384x *hw);
@@ -1383,7 +1386,8 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len);
int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len);
int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len);
-static inline int hfa384x_drvr_getconfig16(struct hfa384x *hw, u16 rid, void *val)
+static inline int
+hfa384x_drvr_getconfig16(struct hfa384x *hw, u16 rid, void *val)
{
int result = 0;
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 28d372a0663a..b71756ab0394 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -293,13 +293,11 @@ void dbprint_urb(struct urb *urb)
pr_debug("urb->transfer_buffer_length=0x%08x\n",
urb->transfer_buffer_length);
pr_debug("urb->actual_length=0x%08x\n", urb->actual_length);
- pr_debug("urb->bandwidth=0x%08x\n", urb->bandwidth);
pr_debug("urb->setup_packet(ctl)=0x%08x\n",
(unsigned int)urb->setup_packet);
pr_debug("urb->start_frame(iso/irq)=0x%08x\n", urb->start_frame);
pr_debug("urb->interval(irq)=0x%08x\n", urb->interval);
pr_debug("urb->error_count(iso)=0x%08x\n", urb->error_count);
- pr_debug("urb->timeout=0x%08x\n", urb->timeout);
pr_debug("urb->context=0x%08x\n", (unsigned int)urb->context);
pr_debug("urb->complete=0x%08x\n", (unsigned int)urb->complete);
}
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 8bd92bba0ac1..51d917c8cdc8 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -49,6 +49,7 @@
/*================================================================*/
/* System Includes */
+#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/random.h>
@@ -61,61 +62,6 @@
#define WEP_KEY(x) (((x) & 0xC0) >> 6)
-static const u32 wep_crc32_table[256] = {
- 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
- 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
- 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
- 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
- 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
- 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
- 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
- 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
- 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
- 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
- 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
- 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
- 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
- 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
- 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
- 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
- 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
- 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
- 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
- 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
- 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
- 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
- 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
- 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
- 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
- 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
- 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
- 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
- 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
- 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
- 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
- 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
- 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
- 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
- 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
- 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
- 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
- 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
- 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
- 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
- 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
- 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
- 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
- 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
- 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
- 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
- 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
- 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
- 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
- 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
- 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
- 0x2d02ef8dL
-};
-
/* keylen in bytes! */
int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen)
@@ -184,7 +130,6 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
}
/* Apply the RC4 to the data, update the CRC32 */
- crc = ~0;
i = 0;
j = 0;
for (k = 0; k < len; k++) {
@@ -192,9 +137,8 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
j = (j + s[i]) & 0xff;
swap(i, j);
buf[k] ^= s[(s[i] + s[j]) & 0xff];
- crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8);
}
- crc = ~crc;
+ crc = ~crc32_le(~0, buf, len);
/* now let's check the crc */
c_crc[0] = crc;
@@ -257,17 +201,15 @@ int wep_encrypt(struct wlandevice *wlandev, u8 *buf,
}
/* Update CRC32 then apply RC4 to the data */
- crc = ~0;
i = 0;
j = 0;
for (k = 0; k < len; k++) {
- crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8);
i = (i + 1) & 0xff;
j = (j + s[i]) & 0xff;
swap(i, j);
dst[k] = buf[k] ^ s[(s[i] + s[j]) & 0xff];
}
- crc = ~crc;
+ crc = ~crc32_le(~0, buf, len);
/* now let's encrypt the crc */
icv[0] = crc;
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index b5ba176004c1..352556f6870a 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -137,7 +137,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
{
struct wlandevice *wlandev;
- wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ wlandev = usb_get_intfdata(interface);
if (wlandev) {
LIST_HEAD(cleanlist);
struct hfa384x_usbctlx *ctlx, *temp;
@@ -222,7 +222,7 @@ static int prism2sta_suspend(struct usb_interface *interface,
struct hfa384x *hw = NULL;
struct wlandevice *wlandev;
- wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ wlandev = usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
@@ -245,7 +245,7 @@ static int prism2sta_resume(struct usb_interface *interface)
struct hfa384x *hw = NULL;
struct wlandevice *wlandev;
- wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ wlandev = usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 54bb1ebd8eb5..af35251232eb 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -297,7 +297,6 @@ int cxgbit_ddp_init(struct cxgbit_device *cdev)
struct cxgb4_lld_info *lldi = &cdev->lldi;
struct net_device *ndev = cdev->lldi.ports[0];
struct cxgbi_tag_format tformat;
- unsigned int ppmax;
int ret, i;
if (!lldi->vr->iscsi.size) {
@@ -305,8 +304,6 @@ int cxgbit_ddp_init(struct cxgbit_device *cdev)
return -EACCES;
}
- ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
-
memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d19e051f2bc2..7251a87bb576 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1165,7 +1165,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
@@ -2002,7 +2004,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -2189,24 +2193,22 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
goto empty_sendtargets;
}
- if (strncmp("SendTargets", text_in, 11) != 0) {
+ if (strncmp("SendTargets=", text_in, 12) != 0) {
pr_err("Received Text Data that is not"
" SendTargets, cannot continue.\n");
goto reject;
}
+ /* '=' confirmed in strncmp */
text_ptr = strchr(text_in, '=');
- if (!text_ptr) {
- pr_err("No \"=\" separator found in Text Data,"
- " cannot continue.\n");
- goto reject;
- }
- if (!strncmp("=All", text_ptr, 4)) {
+ BUG_ON(!text_ptr);
+ if (!strncmp("=All", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
- pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
+ pr_err("Unable to locate valid SendTargets%s value\n",
+ text_ptr);
goto reject;
}
@@ -4232,6 +4234,8 @@ int iscsit_close_connection(
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
+ target_sess_cmd_list_set_waiting(sess->se_sess);
+ target_wait_for_sess_cmds(sess->se_sess);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 51ddca2033e0..0e54627d9aa8 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -18,6 +18,22 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
+static char *chap_get_digest_name(const int digest_type)
+{
+ switch (digest_type) {
+ case CHAP_DIGEST_MD5:
+ return "md5";
+ case CHAP_DIGEST_SHA1:
+ return "sha1";
+ case CHAP_DIGEST_SHA256:
+ return "sha256";
+ case CHAP_DIGEST_SHA3_256:
+ return "sha3-256";
+ default:
+ return NULL;
+ }
+}
+
static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -25,16 +41,21 @@ static int chap_gen_challenge(
unsigned int *c_len)
{
int ret;
- unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+ unsigned char *challenge_asciihex;
struct iscsi_chap *chap = conn->auth_protocol;
- memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+ challenge_asciihex = kzalloc(chap->challenge_len * 2 + 1, GFP_KERNEL);
+ if (!challenge_asciihex)
+ return -ENOMEM;
- ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ memset(chap->challenge, 0, MAX_CHAP_CHALLENGE_LEN);
+
+ ret = get_random_bytes_wait(chap->challenge, chap->challenge_len);
if (unlikely(ret))
- return ret;
+ goto out;
+
bin2hex(challenge_asciihex, chap->challenge,
- CHAP_CHALLENGE_LENGTH);
+ chap->challenge_len);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
*/
@@ -43,12 +64,29 @@ static int chap_gen_challenge(
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
+
+out:
+ kfree(challenge_asciihex);
+ return ret;
+}
+
+static int chap_test_algorithm(const char *name)
+{
+ struct crypto_shash *tfm;
+
+ tfm = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(tfm))
+ return -1;
+
+ crypto_free_shash(tfm);
return 0;
}
static int chap_check_algorithm(const char *a_str)
{
- char *tmp, *orig, *token;
+ char *tmp, *orig, *token, *digest_name;
+ long digest_type;
+ int r = CHAP_DIGEST_UNKNOWN;
tmp = kstrdup(a_str, GFP_KERNEL);
if (!tmp) {
@@ -70,15 +108,24 @@ static int chap_check_algorithm(const char *a_str)
if (!token)
goto out;
- if (!strncmp(token, "5", 1)) {
- pr_debug("Selected MD5 Algorithm\n");
- kfree(orig);
- return CHAP_DIGEST_MD5;
+ if (kstrtol(token, 10, &digest_type))
+ continue;
+
+ digest_name = chap_get_digest_name(digest_type);
+ if (!digest_name)
+ continue;
+
+ pr_debug("Selected %s Algorithm\n", digest_name);
+ if (chap_test_algorithm(digest_name) < 0) {
+ pr_err("failed to allocate %s algo\n", digest_name);
+ } else {
+ r = digest_type;
+ goto out;
}
}
out:
kfree(orig);
- return CHAP_DIGEST_UNKNOWN;
+ return r;
}
static void chap_close(struct iscsi_conn *conn)
@@ -94,7 +141,7 @@ static struct iscsi_chap *chap_server_open(
char *aic_str,
unsigned int *aic_len)
{
- int ret;
+ int digest_type;
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
@@ -109,17 +156,19 @@ static struct iscsi_chap *chap_server_open(
return NULL;
chap = conn->auth_protocol;
- ret = chap_check_algorithm(a_str);
- switch (ret) {
+ digest_type = chap_check_algorithm(a_str);
+ switch (digest_type) {
case CHAP_DIGEST_MD5:
- pr_debug("[server] Got CHAP_A=5\n");
- /*
- * Send back CHAP_A set to MD5.
- */
- *aic_len = sprintf(aic_str, "CHAP_A=5");
- *aic_len += 1;
- chap->digest_type = CHAP_DIGEST_MD5;
- pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ chap->digest_size = MD5_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA1:
+ chap->digest_size = SHA1_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA256:
+ chap->digest_size = SHA256_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA3_256:
+ chap->digest_size = SHA3_256_SIGNATURE_SIZE;
break;
case CHAP_DIGEST_UNKNOWN:
default:
@@ -128,6 +177,16 @@ static struct iscsi_chap *chap_server_open(
return NULL;
}
+ chap->digest_name = chap_get_digest_name(digest_type);
+
+ /* Tie the challenge length to the digest size */
+ chap->challenge_len = chap->digest_size;
+
+ pr_debug("[server] Got CHAP_A=%d\n", digest_type);
+ *aic_len = sprintf(aic_str, "CHAP_A=%d", digest_type);
+ *aic_len += 1;
+ pr_debug("[server] Sending CHAP_A=%d\n", digest_type);
+
/*
* Set Identifier.
*/
@@ -146,7 +205,7 @@ static struct iscsi_chap *chap_server_open(
return chap;
}
-static int chap_server_compute_md5(
+static int chap_server_compute_hash(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
@@ -155,36 +214,57 @@ static int chap_server_compute_md5(
{
unsigned long id;
unsigned char id_as_uchar;
- unsigned char digest[MD5_SIGNATURE_SIZE];
- unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
- unsigned char identifier[10], *challenge = NULL;
- unsigned char *challenge_binhex = NULL;
- unsigned char client_digest[MD5_SIGNATURE_SIZE];
- unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char type;
+ unsigned char identifier[10], *initiatorchg = NULL;
+ unsigned char *initiatorchg_binhex = NULL;
+ unsigned char *digest = NULL;
+ unsigned char *response = NULL;
+ unsigned char *client_digest = NULL;
+ unsigned char *server_digest = NULL;
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_shash *tfm = NULL;
struct shash_desc *desc = NULL;
- int auth_ret = -1, ret, challenge_len;
+ int auth_ret = -1, ret, initiatorchg_len;
+
+ digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!digest) {
+ pr_err("Unable to allocate the digest buffer\n");
+ goto out;
+ }
+
+ response = kzalloc(chap->digest_size * 2 + 2, GFP_KERNEL);
+ if (!response) {
+ pr_err("Unable to allocate the response buffer\n");
+ goto out;
+ }
+
+ client_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!client_digest) {
+ pr_err("Unable to allocate the client_digest buffer\n");
+ goto out;
+ }
+
+ server_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!server_digest) {
+ pr_err("Unable to allocate the server_digest buffer\n");
+ goto out;
+ }
memset(identifier, 0, 10);
memset(chap_n, 0, MAX_CHAP_N_SIZE);
memset(chap_r, 0, MAX_RESPONSE_LENGTH);
- memset(digest, 0, MD5_SIGNATURE_SIZE);
- memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
- memset(client_digest, 0, MD5_SIGNATURE_SIZE);
- memset(server_digest, 0, MD5_SIGNATURE_SIZE);
- challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge) {
+ initiatorchg = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg) {
pr_err("Unable to allocate challenge buffer\n");
goto out;
}
- challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge_binhex) {
- pr_err("Unable to allocate challenge_binhex buffer\n");
+ initiatorchg_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg_binhex) {
+ pr_err("Unable to allocate initiatorchg_binhex buffer\n");
goto out;
}
/*
@@ -219,18 +299,18 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_R.\n");
goto out;
}
- if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ if (strlen(chap_r) != chap->digest_size * 2) {
pr_err("Malformed CHAP_R\n");
goto out;
}
- if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
pr_err("Malformed CHAP_R\n");
goto out;
}
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- tfm = crypto_alloc_shash("md5", 0, 0);
+ tfm = crypto_alloc_shash(chap->digest_name, 0, 0);
if (IS_ERR(tfm)) {
tfm = NULL;
pr_err("Unable to allocate struct crypto_shash\n");
@@ -265,21 +345,23 @@ static int chap_server_compute_md5(
}
ret = crypto_shash_finup(desc, chap->challenge,
- CHAP_CHALLENGE_LENGTH, server_digest);
+ chap->challenge_len, server_digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for challenge\n");
goto out;
}
- bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
- pr_debug("[server] MD5 Server Digest: %s\n", response);
+ bin2hex(response, server_digest, chap->digest_size);
+ pr_debug("[server] %s Server Digest: %s\n",
+ chap->digest_name, response);
- if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
- pr_debug("[server] MD5 Digests do not match!\n\n");
+ if (memcmp(server_digest, client_digest, chap->digest_size) != 0) {
+ pr_debug("[server] %s Digests do not match!\n\n",
+ chap->digest_name);
goto out;
} else
- pr_debug("[server] MD5 Digests match, CHAP connection"
- " successful.\n\n");
+ pr_debug("[server] %s Digests match, CHAP connection"
+ " successful.\n\n", chap->digest_name);
/*
* One way authentication has succeeded, return now if mutual
* authentication is not enabled.
@@ -317,7 +399,7 @@ static int chap_server_compute_md5(
* Get CHAP_C.
*/
if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
- challenge, &type) < 0) {
+ initiatorchg, &type) < 0) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
@@ -326,26 +408,28 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_C.\n");
goto out;
}
- challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
- if (!challenge_len) {
+ initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
+ if (!initiatorchg_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
}
- if (challenge_len > 1024) {
+ if (initiatorchg_len > 1024) {
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
- if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ if (hex2bin(initiatorchg_binhex, initiatorchg, initiatorchg_len) < 0) {
pr_err("Malformed CHAP_C\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
+ pr_debug("[server] Got CHAP_C=%s\n", initiatorchg);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
* the target.
*/
- if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+ if (initiatorchg_len == chap->challenge_len &&
+ !memcmp(initiatorchg_binhex, chap->challenge,
+ initiatorchg_len)) {
pr_err("initiator CHAP_C matches target CHAP_C, failing"
" login attempt\n");
goto out;
@@ -377,7 +461,7 @@ static int chap_server_compute_md5(
/*
* Convert received challenge to binary hex.
*/
- ret = crypto_shash_finup(desc, challenge_binhex, challenge_len,
+ ret = crypto_shash_finup(desc, initiatorchg_binhex, initiatorchg_len,
digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for ma challenge\n");
@@ -393,7 +477,7 @@ static int chap_server_compute_md5(
/*
* Convert response from binary hex to ascii hext.
*/
- bin2hex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, chap->digest_size);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
@@ -403,33 +487,15 @@ out:
kzfree(desc);
if (tfm)
crypto_free_shash(tfm);
- kfree(challenge);
- kfree(challenge_binhex);
+ kfree(initiatorchg);
+ kfree(initiatorchg_binhex);
+ kfree(digest);
+ kfree(response);
+ kfree(server_digest);
+ kfree(client_digest);
return auth_ret;
}
-static int chap_got_response(
- struct iscsi_conn *conn,
- struct iscsi_node_auth *auth,
- char *nr_in_ptr,
- char *nr_out_ptr,
- unsigned int *nr_out_len)
-{
- struct iscsi_chap *chap = conn->auth_protocol;
-
- switch (chap->digest_type) {
- case CHAP_DIGEST_MD5:
- if (chap_server_compute_md5(conn, auth, nr_in_ptr,
- nr_out_ptr, nr_out_len) < 0)
- return -1;
- return 0;
- default:
- pr_err("Unknown CHAP digest type %d!\n",
- chap->digest_type);
- return -1;
- }
-}
-
u32 chap_main_loop(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
@@ -448,7 +514,7 @@ u32 chap_main_loop(
return 0;
} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
convert_null_to_semi(in_text, *in_len);
- if (chap_got_response(conn, auth, in_text, out_text,
+ if (chap_server_compute_hash(conn, auth, in_text, out_text,
out_len) < 0) {
chap_close(conn);
return 2;
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index d5600ac30b53..fc75c1c20e23 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -6,14 +6,19 @@
#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
-#define CHAP_DIGEST_SHA 6
+#define CHAP_DIGEST_SHA1 6
+#define CHAP_DIGEST_SHA256 7
+#define CHAP_DIGEST_SHA3_256 8
-#define CHAP_CHALLENGE_LENGTH 16
+#define MAX_CHAP_CHALLENGE_LEN 32
#define CHAP_CHALLENGE_STR_LEN 4096
-#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
+#define MAX_RESPONSE_LENGTH 128 /* sufficient for SHA3 256 */
#define MAX_CHAP_N_SIZE 512
#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
+#define SHA1_SIGNATURE_SIZE 20 /* 20 bytes in a SHA1 message digest */
+#define SHA256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA256 message digest */
+#define SHA3_256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA3 256 message digest */
#define CHAP_STAGE_CLIENT_A 1
#define CHAP_STAGE_SERVER_AIC 2
@@ -28,9 +33,11 @@ extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *,
int *, int *);
struct iscsi_chap {
- unsigned char digest_type;
unsigned char id;
- unsigned char challenge[CHAP_CHALLENGE_LENGTH];
+ unsigned char challenge[MAX_CHAP_CHALLENGE_LEN];
+ unsigned int challenge_len;
+ unsigned char *digest_name;
+ unsigned int digest_size;
unsigned int authenticate_target;
unsigned int chap_state;
} ____cacheline_aligned;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index daf47f38e081..240c4c4344f6 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -93,9 +93,6 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define OFMARKER "OFMarker"
#define IFMARKINT "IFMarkInt"
#define OFMARKINT "OFMarkInt"
-#define X_EXTENSIONKEY "X-com.sbei.version"
-#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
-#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 3c79411c4cd0..6b4b354c88aa 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -118,7 +118,7 @@ static int srp_get_pr_transport_id(
memset(buf + 8, 0, leading_zero_bytes);
rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
if (rc < 0) {
- pr_debug("hex2bin failed for %s: %d\n", __func__, rc);
+ pr_debug("hex2bin failed for %s: %d\n", p, rc);
return rc;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index e5a71addbb06..d24e0a3ba3ff 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -32,9 +32,6 @@
extern struct se_device *g_lun0_dev;
-static DEFINE_SPINLOCK(tpg_lock);
-static LIST_HEAD(tpg_list);
-
/* __core_tpg_get_initiator_node_acl():
*
* mutex_lock(&tpg->acl_node_mutex); must be held when calling
@@ -475,7 +472,6 @@ int core_tpg_register(
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
- INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->session_lock);
mutex_init(&se_tpg->tpg_lun_mutex);
@@ -494,10 +490,6 @@ int core_tpg_register(
}
}
- spin_lock_bh(&tpg_lock);
- list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
- spin_unlock_bh(&tpg_lock);
-
pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
"Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
@@ -519,10 +511,6 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
- spin_lock_bh(&tpg_lock);
- list_del(&se_tpg->se_tpg_node);
- spin_unlock_bh(&tpg_lock);
-
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7f06a62f8661..ea482d4b1f00 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -584,6 +584,15 @@ void transport_free_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(transport_free_session);
+static int target_release_res(struct se_device *dev, void *data)
+{
+ struct se_session *sess = data;
+
+ if (dev->reservation_holder == sess)
+ target_release_reservation(dev);
+ return 0;
+}
+
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
@@ -600,6 +609,12 @@ void transport_deregister_session(struct se_session *se_sess)
se_sess->fabric_sess_ptr = NULL;
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
+ /*
+ * Since the session is being removed, release SPC-2
+ * reservations held by the session that is disappearing.
+ */
+ target_for_each_device(target_release_res, se_sess);
+
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->fabric_name);
/*
@@ -1243,6 +1258,19 @@ target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
return TCM_NO_SENSE;
}
+/**
+ * target_cmd_size_check - Check whether there will be a residual.
+ * @cmd: SCSI command.
+ * @size: Data buffer size derived from CDB. The data buffer size provided by
+ * the SCSI transport driver is available in @cmd->data_length.
+ *
+ * Compare the data buffer size from the CDB with the data buffer limit from the transport
+ * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
+ *
+ * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
+ *
+ * Return: TCM_NO_SENSE
+ */
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 35be1be87d2a..0b9dfa6b17bc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -499,7 +499,7 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
schedule_delayed_work(&tcmu_unmap_work, 0);
/* try to get new page from the mm */
- page = alloc_page(GFP_KERNEL);
+ page = alloc_page(GFP_NOIO);
if (!page)
goto err_alloc;
@@ -573,7 +573,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
+ tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
if (!tcmu_cmd)
return NULL;
@@ -584,7 +584,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
- GFP_KERNEL);
+ GFP_NOIO);
if (!tcmu_cmd->dbi) {
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
return NULL;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index b9b1e92c6f8d..425c1070de08 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -467,7 +467,6 @@ int target_xcopy_setup_pt(void)
}
memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
- INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 0f16d9ffd8d1..37d22e39fd8d 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -675,7 +675,7 @@ static const struct file_operations tee_fops = {
.open = tee_open,
.release = tee_release,
.unlocked_ioctl = tee_ioctl,
- .compat_ioctl = tee_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static void tee_release_device(struct device *dev)
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index e0575d29023a..b831fc77cf64 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -2,7 +2,7 @@
/*
* gov_bang_bang.c - A simple thermal throttling governor using hysteresis
*
- * Copyright (C) 2014 Peter Feuerer <peter@piie.net>
+ * Copyright (C) 2014 Peter Kaestle <peter@piie.net>
*
* Based on step_wise.c with following Copyrights:
* Copyright (C) 2012 Intel Corp
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 8bf8e031f0bc..fdd77bb4628d 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -33,9 +33,9 @@ static int tb_port_enable_tmu(struct tb_port *port, bool enable)
* Legacy devices need to have TMU access enabled before port
* space can be fully accessed.
*/
- if (tb_switch_is_lr(sw))
+ if (tb_switch_is_light_ridge(sw))
offset = 0x26;
- else if (tb_switch_is_er(sw))
+ else if (tb_switch_is_eagle_ridge(sw))
offset = 0x2a;
else
return 0;
@@ -60,7 +60,7 @@ static void tb_port_dummy_read(struct tb_port *port)
* reading stale data on next read perform one dummy read after
* port capabilities are walked.
*/
- if (tb_switch_is_lr(port->sw)) {
+ if (tb_switch_is_light_ridge(port->sw)) {
u32 dummy;
tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 2ec1af8f7968..d97813e80e5f 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -962,8 +962,8 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
- tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
- space, offset);
+ tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
+ route, space, offset);
break;
default:
@@ -988,8 +988,8 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
- tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
- space, offset);
+ tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
+ route, space, offset);
break;
default:
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index ee5196479854..8dd7de0cc826 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -514,17 +514,6 @@ int tb_drom_read(struct tb_switch *sw)
* no entries). Hardcode the configuration here.
*/
tb_drom_read_uid_only(sw, &sw->uid);
-
- sw->ports[1].link_nr = 0;
- sw->ports[2].link_nr = 1;
- sw->ports[1].dual_link_port = &sw->ports[2];
- sw->ports[2].dual_link_port = &sw->ports[1];
-
- sw->ports[3].link_nr = 0;
- sw->ports[4].link_nr = 1;
- sw->ports[3].dual_link_port = &sw->ports[4];
- sw->ports[4].dual_link_port = &sw->ports[3];
-
return 0;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 245588f691e7..13e88109742e 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/x86/apple.h>
@@ -43,6 +44,10 @@
#define ICM_APPROVE_TIMEOUT 10000 /* ms */
#define ICM_MAX_LINK 4
+static bool start_icm;
+module_param(start_icm, bool, 0444);
+MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
+
/**
* struct icm - Internal connection manager private data
* @request_lock: Makes sure only one message is send to ICM at time
@@ -147,6 +152,17 @@ static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
return NULL;
}
+static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
+{
+ const struct intel_vss *vss;
+
+ vss = parse_intel_vss(ep_name, size);
+ if (vss)
+ return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+
+ return false;
+}
+
static inline struct tb *icm_to_tb(struct icm *icm)
{
return ((void *)icm - sizeof(struct tb));
@@ -339,6 +355,14 @@ static void icm_veto_end(struct tb *tb)
}
}
+static bool icm_firmware_running(const struct tb_nhi *nhi)
+{
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ return !!(val & REG_FW_STS_ICM_EN);
+}
+
static bool icm_fr_is_supported(struct tb *tb)
{
return !x86_apple_machine;
@@ -562,58 +586,42 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0;
}
-static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route,
- const uuid_t *uuid, const u8 *ep_name,
- size_t ep_name_size, u8 connection_id,
- u8 connection_key, u8 link, u8 depth,
- enum tb_security_level security_level,
- bool authorized, bool boot)
+static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
+ const uuid_t *uuid)
{
- const struct intel_vss *vss;
+ struct tb *tb = parent_sw->tb;
struct tb_switch *sw;
- int ret;
-
- pm_runtime_get_sync(&parent_sw->dev);
- sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
- if (IS_ERR(sw))
- goto out;
+ sw = tb_switch_alloc(tb, &parent_sw->dev, route);
+ if (IS_ERR(sw)) {
+ tb_warn(tb, "failed to allocate switch at %llx\n", route);
+ return sw;
+ }
sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
if (!sw->uuid) {
- tb_sw_warn(sw, "cannot allocate memory for switch\n");
tb_switch_put(sw);
- goto out;
+ return ERR_PTR(-ENOMEM);
}
- sw->connection_id = connection_id;
- sw->connection_key = connection_key;
- sw->link = link;
- sw->depth = depth;
- sw->authorized = authorized;
- sw->security_level = security_level;
- sw->boot = boot;
+
init_completion(&sw->rpm_complete);
+ return sw;
+}
- vss = parse_intel_vss(ep_name, ep_name_size);
- if (vss)
- sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
+{
+ u64 route = tb_route(sw);
+ int ret;
/* Link the two switches now */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
ret = tb_switch_add(sw);
- if (ret) {
+ if (ret)
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
- tb_switch_put(sw);
- sw = ERR_PTR(ret);
- }
-
-out:
- pm_runtime_mark_last_busy(&parent_sw->dev);
- pm_runtime_put_autosuspend(&parent_sw->dev);
- return sw;
+ return ret;
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -697,11 +705,11 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
(const struct icm_fr_event_device_connected *)hdr;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
+ bool boot, dual_lane, speed_gen3;
struct icm *icm = tb_priv(tb);
bool authorized = false;
struct tb_xdomain *xd;
u8 link, depth;
- bool boot;
u64 route;
int ret;
@@ -714,6 +722,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
@@ -811,10 +821,27 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id,
- pkg->connection_key, link, depth, security_level,
- authorized, boot);
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->connection_key = pkg->connection_key;
+ sw->link = link;
+ sw->depth = depth;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
@@ -1142,10 +1169,10 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
{
const struct icm_tr_event_device_connected *pkg =
(const struct icm_tr_event_device_connected *)hdr;
+ bool authorized, boot, dual_lane, speed_gen3;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
struct tb_xdomain *xd;
- bool authorized, boot;
u64 route;
icm_postpone_rescan(tb);
@@ -1163,6 +1190,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
@@ -1205,11 +1234,27 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
return;
}
- sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0,
- security_level, authorized, boot);
- if (!IS_ERR(sw) && force_rtd3)
- sw->rpm = true;
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = force_rtd3;
+ if (!sw->rpm)
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
+ sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
@@ -1349,9 +1394,12 @@ static bool icm_ar_is_supported(struct tb *tb)
/*
* Starting from Alpine Ridge we can use ICM on Apple machines
* as well. We just need to reset and re-enable it first.
+ * However, only start it if explicitly asked by the user.
*/
- if (!x86_apple_machine)
+ if (icm_firmware_running(tb->nhi))
return true;
+ if (!start_icm)
+ return false;
/*
* Find the upstream PCIe port in case we need to do reset
@@ -1704,8 +1752,7 @@ static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
u32 val;
/* Check if the ICM firmware is already running */
- val = ioread32(nhi->iobase + REG_FW_STS);
- if (val & REG_FW_STS_ICM_EN)
+ if (icm_firmware_running(nhi))
return 0;
dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
@@ -1893,14 +1940,12 @@ static int icm_suspend(struct tb *tb)
*/
static void icm_unplug_children(struct tb_switch *sw)
{
- unsigned int i;
+ struct tb_port *port;
if (tb_route(sw))
sw->is_unplugged = true;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
-
+ tb_switch_for_each_port(sw, port) {
if (port->xdomain)
port->xdomain->is_unplugged = true;
else if (tb_port_has_remote(port))
@@ -1936,11 +1981,9 @@ static void remove_unplugged_switch(struct tb_switch *sw)
static void icm_free_unplugged_children(struct tb_switch *sw)
{
- unsigned int i;
-
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ tb_switch_for_each_port(sw, port) {
if (port->xdomain && port->xdomain->is_unplugged) {
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
@@ -2216,7 +2259,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
- icm->is_supported = icm_ar_is_supported;
+ icm->is_supported = icm_fr_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
icm->device_connected = icm_icl_device_connected;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index ae1e92611c3e..bd44d50246d2 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -94,7 +94,7 @@ int tb_lc_configure_link(struct tb_switch *sw)
struct tb_port *up, *down;
int ret;
- if (!sw->config.enabled || !tb_route(sw))
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
return 0;
up = tb_upstream_port(sw);
@@ -124,7 +124,7 @@ void tb_lc_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw))
+ if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw))
return;
up = tb_upstream_port(sw);
@@ -177,3 +177,192 @@ int tb_lc_set_sleep(struct tb_switch *sw)
return 0;
}
+
+/**
+ * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
+ * @sw: Switch to check
+ *
+ * Checks whether conditions for lane bonding from parent to @sw are
+ * possible.
+ */
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ int cap, ret;
+ u32 val;
+
+ if (sw->generation < 2)
+ return false;
+
+ up = tb_upstream_port(sw);
+ cap = find_port_lc_cap(up);
+ if (cap < 0)
+ return false;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TB_LC_PORT_ATTR_BE);
+}
+
+static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
+ struct tb_port *in)
+{
+ struct tb_port *port;
+
+ /* The first DP IN port is sink 0 and second is sink 1 */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_is_dpin(port))
+ return in != port;
+ }
+
+ return -EINVAL;
+}
+
+static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
+{
+ u32 val, alloc;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Sink is available for CM/SW to use if the allocation valie is
+ * either 0 or 1.
+ */
+ if (!sink) {
+ alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
+ return 0;
+ } else {
+ alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
+ * @sw: Switch whose DP sink is queried
+ * @in: DP IN port to check
+ *
+ * Queries through LC SNK_ALLOCATION registers whether DP sink is available
+ * for the given DP IN port or not.
+ */
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
+{
+ int sink;
+
+ /*
+ * For older generations sink is always available as there is no
+ * allocation mechanism.
+ */
+ if (sw->generation < 3)
+ return true;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return false;
+
+ return !tb_lc_dp_sink_available(sw, sink);
+}
+
+/**
+ * tb_lc_dp_sink_alloc() - Allocate DP sink
+ * @sw: Switch whose DP sink is allocated
+ * @in: DP IN port the DP sink is allocated for
+ *
+ * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
+ * resource is available and allocation is successful returns %0. In all
+ * other cases returs negative errno. In particular %-EBUSY is returned if
+ * the resource was not available.
+ */
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink) {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
+ } else {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ }
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d allocated\n", sink);
+ return 0;
+}
+
+/**
+ * tb_lc_dp_sink_dealloc() - De-allocate DP sink
+ * @sw: Switch whose DP sink is de-allocated
+ * @in: DP IN port whose DP sink is de-allocated
+ *
+ * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
+ */
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ /* Needs to be owned by CM/SW */
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink)
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ else
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d de-allocated\n", sink);
+ return 0;
+}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index afe5f8391ebf..ad58559ea88e 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -220,7 +220,8 @@ err:
* Creates path between two ports starting with given @src_hopid. Reserves
* HopIDs for each port (they can be different from @src_hopid depending on
* how many HopIDs each port already have reserved). If there are dual
- * links on the path, prioritizes using @link_nr.
+ * links on the path, prioritizes using @link_nr but takes into account
+ * that the lanes may be bonded.
*
* Return: Returns a tb_path on success or NULL on failure.
*/
@@ -259,7 +260,9 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
if (!in_port)
goto err;
- if (in_port->dual_link_port && in_port->link_nr != link_nr)
+ /* When lanes are bonded primary link must be used */
+ if (!in_port->bonded && in_port->dual_link_port &&
+ in_port->link_nr != link_nr)
in_port = in_port->dual_link_port;
ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
@@ -271,8 +274,27 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
if (!out_port)
goto err;
- if (out_port->dual_link_port && out_port->link_nr != link_nr)
- out_port = out_port->dual_link_port;
+ /*
+ * Pick up right port when going from non-bonded to
+ * bonded or from bonded to non-bonded.
+ */
+ if (out_port->dual_link_port) {
+ if (!in_port->bonded && out_port->bonded &&
+ out_port->link_nr) {
+ /*
+ * Use primary link when going from
+ * non-bonded to bonded.
+ */
+ out_port = out_port->dual_link_port;
+ } else if (!out_port->bonded &&
+ out_port->link_nr != link_nr) {
+ /*
+ * If out port is not bonded follow
+ * link_nr.
+ */
+ out_port = out_port->dual_link_port;
+ }
+ }
if (i == num_hops - 1)
ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
@@ -535,3 +557,25 @@ bool tb_path_is_invalid(struct tb_path *path)
}
return false;
}
+
+/**
+ * tb_path_switch_on_path() - Does the path go through certain switch
+ * @path: Path to check
+ * @sw: Switch to check
+ *
+ * Goes over all hops on path and checks if @sw is any of them.
+ * Direction does not matter.
+ */
+bool tb_path_switch_on_path(const struct tb_path *path,
+ const struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 0; i < path->path_length; i++) {
+ if (path->hops[i].in_port->sw == sw ||
+ path->hops[i].out_port->sw == sw)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 5ea8db667e83..ca86a8e09c77 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -168,7 +168,7 @@ static int nvm_validate_and_write(struct tb_switch *sw)
static int nvm_authenticate_host(struct tb_switch *sw)
{
- int ret;
+ int ret = 0;
/*
* Root switch NVM upgrade requires that we disconnect the
@@ -176,6 +176,8 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* already).
*/
if (!sw->safe_mode) {
+ u32 status;
+
ret = tb_domain_disconnect_all_paths(sw->tb);
if (ret)
return ret;
@@ -184,7 +186,16 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* everything goes well so getting timeout is expected.
*/
ret = dma_port_flash_update_auth(sw->dma_port);
- return ret == -ETIMEDOUT ? 0 : ret;
+ if (!ret || ret == -ETIMEDOUT)
+ return 0;
+
+ /*
+ * Any error from update auth operation requires power
+ * cycling of the host router.
+ */
+ tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
+ if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
+ nvm_set_auth_status(sw, status);
}
/*
@@ -192,7 +203,7 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* switch.
*/
dma_port_power_cycle(sw->dma_port);
- return 0;
+ return ret;
}
static int nvm_authenticate_device(struct tb_switch *sw)
@@ -200,8 +211,16 @@ static int nvm_authenticate_device(struct tb_switch *sw)
int ret, retries = 10;
ret = dma_port_flash_update_auth(sw->dma_port);
- if (ret && ret != -ETIMEDOUT)
+ switch (ret) {
+ case 0:
+ case -ETIMEDOUT:
+ case -EACCES:
+ case -EINVAL:
+ /* Power cycle is required */
+ break;
+ default:
return ret;
+ }
/*
* Poll here for the authentication status. It takes some time
@@ -553,17 +572,17 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
if (credits == 0 || port->sw->is_unplugged)
return 0;
- nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
nfc_credits += credits;
- tb_port_dbg(port, "adding %d NFC credits to %lu",
- credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
+ tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
+ port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
- port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
+ port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
port->config.nfc_credits |= nfc_credits;
return tb_port_write(port, &port->config.nfc_credits,
- TB_CFG_PORT, 4, 1);
+ TB_CFG_PORT, ADP_CS_4, 1);
}
/**
@@ -578,14 +597,14 @@ int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
if (ret)
return ret;
- data &= ~TB_PORT_LCA_MASK;
- data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
+ data &= ~ADP_CS_5_LCA_MASK;
+ data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
- return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
+ return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
}
/**
@@ -645,6 +664,7 @@ static int tb_init_port(struct tb_port *port)
ida_init(&port->out_hopids);
}
+ INIT_LIST_HEAD(&port->list);
return 0;
}
@@ -775,6 +795,132 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
return next;
}
+static int tb_port_get_link_speed(struct tb_port *port)
+{
+ u32 val, speed;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
+ LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
+ return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
+}
+
+static int tb_port_get_link_width(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
+ LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
+}
+
+static bool tb_port_is_width_supported(struct tb_port *port, int width)
+{
+ u32 phy, widths;
+ int ret;
+
+ if (!port->cap_phy)
+ return false;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return ret;
+
+ widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+
+ return !!(widths & width);
+}
+
+static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
+ switch (width) {
+ case 1:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ case 2:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val |= LANE_ADP_CS_1_LB;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_lane_bonding_enable(struct tb_port *port)
+{
+ int ret;
+
+ /*
+ * Enable lane bonding for both links if not already enabled by
+ * for example the boot firmware.
+ */
+ ret = tb_port_get_link_width(port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port, 2);
+ if (ret)
+ return ret;
+ }
+
+ ret = tb_port_get_link_width(port->dual_link_port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port->dual_link_port, 2);
+ if (ret) {
+ tb_port_set_link_width(port, 1);
+ return ret;
+ }
+ }
+
+ port->bonded = true;
+ port->dual_link_port->bonded = true;
+
+ return 0;
+}
+
+static void tb_port_lane_bonding_disable(struct tb_port *port)
+{
+ port->dual_link_port->bonded = false;
+ port->bonded = false;
+
+ tb_port_set_link_width(port->dual_link_port, 1);
+ tb_port_set_link_width(port, 1);
+}
+
/**
* tb_port_is_enabled() - Is the adapter port enabled
* @port: Port to check
@@ -803,10 +949,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
{
u32 data;
- if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+ if (tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1))
return false;
- return !!(data & TB_PCI_EN);
+ return !!(data & ADP_PCIE_CS_0_PE);
}
/**
@@ -816,10 +963,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
*/
int tb_pci_port_enable(struct tb_port *port, bool enable)
{
- u32 word = enable ? TB_PCI_EN : 0x0;
+ u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
if (!port->cap_adap)
return -ENXIO;
- return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
+ return tb_port_write(port, &word, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1);
}
/**
@@ -833,11 +981,12 @@ int tb_dp_port_hpd_is_active(struct tb_port *port)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
- return !!(data & TB_DP_HDP);
+ return !!(data & ADP_DP_CS_2_HDP);
}
/**
@@ -851,12 +1000,14 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
if (ret)
return ret;
- data |= TB_DP_HPDC;
- return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+ data |= ADP_DP_CS_3_HDPC;
+ return tb_port_write(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
}
/**
@@ -874,20 +1025,23 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
u32 data[2];
int ret;
- ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
- data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
- data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
+ data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
- data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
- data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
- data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
+ data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
+ ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
+ data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
+ ADP_DP_CS_1_AUX_RX_HOPID_MASK;
- return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/**
@@ -898,11 +1052,11 @@ bool tb_dp_port_is_enabled(struct tb_port *port)
{
u32 data[2];
- if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+ if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
ARRAY_SIZE(data)))
return false;
- return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
+ return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
}
/**
@@ -918,18 +1072,18 @@ int tb_dp_port_enable(struct tb_port *port, bool enable)
u32 data[2];
int ret;
- ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
if (enable)
- data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
+ data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
else
- data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
+ data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
- return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/* switch utility functions */
@@ -986,7 +1140,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
u32 data;
int res;
- if (!sw->config.enabled)
+ if (tb_switch_is_icm(sw))
return 0;
sw->config.plug_events_delay = 0xff;
@@ -1114,6 +1268,15 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(device_name);
+static ssize_t
+generation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->generation);
+}
+static DEVICE_ATTR_RO(generation);
+
static ssize_t key_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1166,6 +1329,36 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(key, 0600, key_show, key_store);
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
+}
+
+/*
+ * Currently all lanes must run at the same speed but we expose here
+ * both directions to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
+static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
+
+static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->link_width);
+}
+
+/*
+ * Currently link has same amount of lanes both directions (1 or 2) but
+ * expose them separately to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
+static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
+
static void nvm_authenticate_start(struct tb_switch *sw)
{
struct pci_dev *root_port;
@@ -1246,8 +1439,6 @@ static ssize_t nvm_authenticate_store(struct device *dev,
*/
nvm_authenticate_start(sw);
ret = nvm_authenticate_host(sw);
- if (ret)
- nvm_authenticate_complete(sw);
} else {
ret = nvm_authenticate_device(sw);
}
@@ -1319,9 +1510,14 @@ static struct attribute *switch_attrs[] = {
&dev_attr_boot.attr,
&dev_attr_device.attr,
&dev_attr_device_name.attr,
+ &dev_attr_generation.attr,
&dev_attr_key.attr,
&dev_attr_nvm_authenticate.attr,
&dev_attr_nvm_version.attr,
+ &dev_attr_rx_speed.attr,
+ &dev_attr_rx_lanes.attr,
+ &dev_attr_tx_speed.attr,
+ &dev_attr_tx_lanes.attr,
&dev_attr_vendor.attr,
&dev_attr_vendor_name.attr,
&dev_attr_unique_id.attr,
@@ -1352,6 +1548,13 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
sw->security_level == TB_SECURITY_SECURE)
return attr->mode;
return 0;
+ } else if (attr == &dev_attr_rx_speed.attr ||
+ attr == &dev_attr_rx_lanes.attr ||
+ attr == &dev_attr_tx_speed.attr ||
+ attr == &dev_attr_tx_lanes.attr) {
+ if (tb_route(sw))
+ return attr->mode;
+ return 0;
} else if (attr == &dev_attr_nvm_authenticate.attr) {
if (sw->dma_port && !sw->no_nvm_upgrade)
return attr->mode;
@@ -1382,14 +1585,14 @@ static const struct attribute_group *switch_groups[] = {
static void tb_switch_release(struct device *dev)
{
struct tb_switch *sw = tb_to_switch(dev);
- int i;
+ struct tb_port *port;
dma_port_free(sw->dma_port);
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (!sw->ports[i].disabled) {
- ida_destroy(&sw->ports[i].in_hopids);
- ida_destroy(&sw->ports[i].out_hopids);
+ tb_switch_for_each_port(sw, port) {
+ if (!port->disabled) {
+ ida_destroy(&port->in_hopids);
+ ida_destroy(&port->out_hopids);
}
}
@@ -1690,13 +1893,16 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
int ret;
switch (sw->generation) {
- case 3:
- break;
-
case 2:
/* Only root switch can be upgraded */
if (tb_route(sw))
return 0;
+
+ /* fallthrough */
+ case 3:
+ ret = tb_switch_set_uuid(sw);
+ if (ret)
+ return ret;
break;
default:
@@ -1710,7 +1916,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
}
/* Root switch DMA port requires running firmware */
- if (!tb_route(sw) && sw->config.enabled)
+ if (!tb_route(sw) && !tb_switch_is_icm(sw))
return 0;
sw->dma_port = dma_port_alloc(sw);
@@ -1721,6 +1927,19 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
return 0;
/*
+ * If there is status already set then authentication failed
+ * when the dma_port_flash_update_auth() returned. Power cycling
+ * is not needed (it was done already) so only thing we do here
+ * is to unblock runtime PM of the root port.
+ */
+ nvm_get_auth_status(sw, &status);
+ if (status) {
+ if (!tb_route(sw))
+ nvm_authenticate_complete(sw);
+ return 0;
+ }
+
+ /*
* Check status of the previous flash authentication. If there
* is one we need to power cycle the switch in any case to make
* it functional again.
@@ -1735,9 +1954,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
- ret = tb_switch_set_uuid(sw);
- if (ret)
- return ret;
nvm_set_auth_status(sw, status);
}
@@ -1751,6 +1967,153 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
return -ESHUTDOWN;
}
+static void tb_switch_default_link_ports(struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 1; i <= sw->config.max_port_number; i += 2) {
+ struct tb_port *port = &sw->ports[i];
+ struct tb_port *subordinate;
+
+ if (!tb_port_is_null(port))
+ continue;
+
+ /* Check for the subordinate port */
+ if (i == sw->config.max_port_number ||
+ !tb_port_is_null(&sw->ports[i + 1]))
+ continue;
+
+ /* Link them if not already done so (by DROM) */
+ subordinate = &sw->ports[i + 1];
+ if (!port->dual_link_port && !subordinate->dual_link_port) {
+ port->link_nr = 0;
+ port->dual_link_port = subordinate;
+ subordinate->link_nr = 1;
+ subordinate->dual_link_port = port;
+
+ tb_sw_dbg(sw, "linked ports %d <-> %d\n",
+ port->port, subordinate->port);
+ }
+ }
+}
+
+static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+ const struct tb_port *up = tb_upstream_port(sw);
+
+ if (!up->dual_link_port || !up->dual_link_port->remote)
+ return false;
+
+ return tb_lc_lane_bonding_possible(sw);
+}
+
+static int tb_switch_update_link_attributes(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ bool change = false;
+ int ret;
+
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+
+ ret = tb_port_get_link_speed(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_speed != ret)
+ change = true;
+ sw->link_speed = ret;
+
+ ret = tb_port_get_link_width(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_width != ret)
+ change = true;
+ sw->link_width = ret;
+
+ /* Notify userspace that there is possible link attribute change */
+ if (device_is_registered(&sw->dev) && change)
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * tb_switch_lane_bonding_enable() - Enable lane bonding
+ * @sw: Switch to enable lane bonding
+ *
+ * Connection manager can call this function to enable lane bonding of a
+ * switch. If conditions are correct and both switches support the feature,
+ * lanes are bonded. It is safe to call this to any switch.
+ */
+int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+ u64 route = tb_route(sw);
+ int ret;
+
+ if (!route)
+ return 0;
+
+ if (!tb_switch_lane_bonding_possible(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(route, parent);
+
+ if (!tb_port_is_width_supported(up, 2) ||
+ !tb_port_is_width_supported(down, 2))
+ return 0;
+
+ ret = tb_port_lane_bonding_enable(up);
+ if (ret) {
+ tb_port_warn(up, "failed to enable lane bonding\n");
+ return ret;
+ }
+
+ ret = tb_port_lane_bonding_enable(down);
+ if (ret) {
+ tb_port_warn(down, "failed to enable lane bonding\n");
+ tb_port_lane_bonding_disable(up);
+ return ret;
+ }
+
+ tb_switch_update_link_attributes(sw);
+
+ tb_sw_dbg(sw, "lane bonding enabled\n");
+ return ret;
+}
+
+/**
+ * tb_switch_lane_bonding_disable() - Disable lane bonding
+ * @sw: Switch whose lane bonding to disable
+ *
+ * Disables lane bonding between @sw and parent. This can be called even
+ * if lanes were not bonded originally.
+ */
+void tb_switch_lane_bonding_disable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+
+ if (!tb_route(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ if (!up->bonded)
+ return;
+
+ down = tb_port_at(tb_route(sw), parent);
+
+ tb_port_lane_bonding_disable(up);
+ tb_port_lane_bonding_disable(down);
+
+ tb_switch_update_link_attributes(sw);
+ tb_sw_dbg(sw, "lane bonding disabled\n");
+}
+
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@@ -1775,21 +2138,25 @@ int tb_switch_add(struct tb_switch *sw)
* configuration based mailbox.
*/
ret = tb_switch_add_dma_port(sw);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to add DMA port\n");
return ret;
+ }
if (!sw->safe_mode) {
/* read drom */
ret = tb_drom_read(sw);
if (ret) {
- tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+ dev_err(&sw->dev, "reading DROM failed\n");
return ret;
}
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
ret = tb_switch_set_uuid(sw);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to set UUID\n");
return ret;
+ }
for (i = 0; i <= sw->config.max_port_number; i++) {
if (sw->ports[i].disabled) {
@@ -1797,14 +2164,24 @@ int tb_switch_add(struct tb_switch *sw)
continue;
}
ret = tb_init_port(&sw->ports[i]);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to initialize port %d\n", i);
return ret;
+ }
}
+
+ tb_switch_default_link_ports(sw);
+
+ ret = tb_switch_update_link_attributes(sw);
+ if (ret)
+ return ret;
}
ret = device_add(&sw->dev);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to add device: %d\n", ret);
return ret;
+ }
if (tb_route(sw)) {
dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
@@ -1816,6 +2193,7 @@ int tb_switch_add(struct tb_switch *sw)
ret = tb_switch_nvm_add(sw);
if (ret) {
+ dev_err(&sw->dev, "failed to add NVM devices\n");
device_del(&sw->dev);
return ret;
}
@@ -1842,7 +2220,7 @@ int tb_switch_add(struct tb_switch *sw)
*/
void tb_switch_remove(struct tb_switch *sw)
{
- int i;
+ struct tb_port *port;
if (sw->rpm) {
pm_runtime_get_sync(&sw->dev);
@@ -1850,13 +2228,13 @@ void tb_switch_remove(struct tb_switch *sw)
}
/* port 0 is the switch itself and never has a remote */
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i])) {
- tb_switch_remove(sw->ports[i].remote->sw);
- sw->ports[i].remote = NULL;
- } else if (sw->ports[i].xdomain) {
- tb_xdomain_remove(sw->ports[i].xdomain);
- sw->ports[i].xdomain = NULL;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port)) {
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ } else if (port->xdomain) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
}
}
@@ -1876,7 +2254,8 @@ void tb_switch_remove(struct tb_switch *sw)
*/
void tb_sw_set_unplugged(struct tb_switch *sw)
{
- int i;
+ struct tb_port *port;
+
if (sw == sw->tb->root_switch) {
tb_sw_WARN(sw, "cannot unplug root switch\n");
return;
@@ -1886,17 +2265,19 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
return;
}
sw->is_unplugged = true;
- for (i = 0; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_sw_set_unplugged(sw->ports[i].remote->sw);
- else if (sw->ports[i].xdomain)
- sw->ports[i].xdomain->is_unplugged = true;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_sw_set_unplugged(port->remote->sw);
+ else if (port->xdomain)
+ port->xdomain->is_unplugged = true;
}
}
int tb_switch_resume(struct tb_switch *sw)
{
- int i, err;
+ struct tb_port *port;
+ int err;
+
tb_sw_dbg(sw, "resuming switch\n");
/*
@@ -1944,9 +2325,7 @@ int tb_switch_resume(struct tb_switch *sw)
return err;
/* check for surviving downstream switches */
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
-
+ tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain)
continue;
@@ -1970,19 +2349,64 @@ int tb_switch_resume(struct tb_switch *sw)
void tb_switch_suspend(struct tb_switch *sw)
{
- int i, err;
+ struct tb_port *port;
+ int err;
+
err = tb_plug_events_active(sw, false);
if (err)
return;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_switch_suspend(sw->ports[i].remote->sw);
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_switch_suspend(port->remote->sw);
}
tb_lc_set_sleep(sw);
}
+/**
+ * tb_switch_query_dp_resource() - Query availability of DP resource
+ * @sw: Switch whose DP resource is queried
+ * @in: DP IN port
+ *
+ * Queries availability of DP resource for DP tunneling using switch
+ * specific means. Returns %true if resource is available.
+ */
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ return tb_lc_dp_sink_query(sw, in);
+}
+
+/**
+ * tb_switch_alloc_dp_resource() - Allocate available DP resource
+ * @sw: Switch whose DP resource is allocated
+ * @in: DP IN port
+ *
+ * Allocates DP resource for DP tunneling. The resource must be
+ * available for this to succeed (see tb_switch_query_dp_resource()).
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ return tb_lc_dp_sink_alloc(sw, in);
+}
+
+/**
+ * tb_switch_dealloc_dp_resource() - De-allocate DP resource
+ * @sw: Switch whose DP resource is de-allocated
+ * @in: DP IN port
+ *
+ * De-allocates DP resource that was previously allocated for DP
+ * tunneling.
+ */
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ if (tb_lc_dp_sink_dealloc(sw, in)) {
+ tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
+ in->port);
+ }
+}
+
struct tb_sw_lookup {
struct tb *tb;
u8 link;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1f7a9e1cc09c..ea8727f769d6 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -9,7 +9,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/platform_data/x86/apple.h>
#include "tb.h"
#include "tb_regs.h"
@@ -18,6 +17,7 @@
/**
* struct tb_cm - Simple Thunderbolt connection manager
* @tunnel_list: List of active tunnels
+ * @dp_resources: List of available DP resources for DP tunneling
* @hotplug_active: tb_handle_hotplug will stop progressing plug
* events and exit if this is not set (it needs to
* acquire the lock one more time). Used to drain wq
@@ -25,6 +25,7 @@
*/
struct tb_cm {
struct list_head tunnel_list;
+ struct list_head dp_resources;
bool hotplug_active;
};
@@ -56,17 +57,51 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
/* enumeration & hot plug handling */
+static void tb_add_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (!tb_switch_query_dp_resource(sw, port))
+ continue;
+
+ list_add_tail(&port->list, &tcm->dp_resources);
+ tb_port_dbg(port, "DP IN resource available\n");
+ }
+}
+
+static void tb_remove_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port, *tmp;
+
+ /* Clear children resources first */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_remove_dp_resources(port->remote->sw);
+ }
+
+ list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
+ if (port->sw == sw) {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ list_del_init(&port->list);
+ }
+ }
+}
+
static void tb_discover_tunnels(struct tb_switch *sw)
{
struct tb *tb = sw->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port;
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++) {
+ tb_switch_for_each_port(sw, port) {
struct tb_tunnel *tunnel = NULL;
- port = &sw->ports[i];
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port);
@@ -95,9 +130,9 @@ static void tb_discover_tunnels(struct tb_switch *sw)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
}
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_discover_tunnels(sw->ports[i].remote->sw);
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_discover_tunnels(port->remote->sw);
}
}
@@ -130,9 +165,10 @@ static void tb_scan_port(struct tb_port *port);
*/
static void tb_scan_switch(struct tb_switch *sw)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++)
- tb_scan_port(&sw->ports[i]);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port)
+ tb_scan_port(port);
}
/**
@@ -217,11 +253,16 @@ static void tb_scan_port(struct tb_port *port)
upstream_port->dual_link_port->remote = port->dual_link_port;
}
+ /* Enable lane bonding if supported */
+ if (tb_switch_lane_bonding_enable(sw))
+ tb_sw_warn(sw, "failed to enable lane bonding\n");
+
tb_scan_switch(sw);
}
-static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
- struct tb_port *src_port, struct tb_port *dst_port)
+static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
+ struct tb_port *src_port,
+ struct tb_port *dst_port)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
@@ -230,14 +271,32 @@ static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
if (tunnel->type == type &&
((src_port && src_port == tunnel->src_port) ||
(dst_port && dst_port == tunnel->dst_port))) {
- tb_tunnel_deactivate(tunnel);
- list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
- return 0;
+ return tunnel;
}
}
- return -ENODEV;
+ return NULL;
+}
+
+static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
+{
+ if (!tunnel)
+ return;
+
+ tb_tunnel_deactivate(tunnel);
+ list_del(&tunnel->list);
+
+ /*
+ * In case of DP tunnel make sure the DP IN resource is deallocated
+ * properly.
+ */
+ if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+
+ tb_switch_dealloc_dp_resource(in->sw, in);
+ }
+
+ tb_tunnel_free(tunnel);
}
/**
@@ -250,11 +309,8 @@ static void tb_free_invalid_tunnels(struct tb *tb)
struct tb_tunnel *n;
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_invalid(tunnel)) {
- tb_tunnel_deactivate(tunnel);
- list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
- }
+ if (tb_tunnel_is_invalid(tunnel))
+ tb_deactivate_and_free_tunnel(tunnel);
}
}
@@ -263,14 +319,15 @@ static void tb_free_invalid_tunnels(struct tb *tb)
*/
static void tb_free_unplugged_children(struct tb_switch *sw)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
if (port->remote->sw->is_unplugged) {
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
@@ -289,10 +346,13 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
static struct tb_port *tb_find_port(struct tb_switch *sw,
enum tb_port_type type)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++)
- if (sw->ports[i].config.type == type)
- return &sw->ports[i];
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->config.type == type)
+ return port;
+ }
+
return NULL;
}
@@ -304,18 +364,18 @@ static struct tb_port *tb_find_port(struct tb_switch *sw,
static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
enum tb_port_type type)
{
- int i;
+ struct tb_port *port;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_is_upstream_port(&sw->ports[i]))
+ tb_switch_for_each_port(sw, port) {
+ if (tb_is_upstream_port(port))
continue;
- if (sw->ports[i].config.type != type)
+ if (port->config.type != type)
continue;
- if (!sw->ports[i].cap_adap)
+ if (port->cap_adap)
continue;
- if (tb_port_is_enabled(&sw->ports[i]))
+ if (tb_port_is_enabled(port))
continue;
- return &sw->ports[i];
+ return port;
}
return NULL;
}
@@ -336,10 +396,13 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
* Hard-coded Thunderbolt port to PCIe down port mapping
* per controller.
*/
- if (tb_switch_is_cr(sw))
+ if (tb_switch_is_cactus_ridge(sw) ||
+ tb_switch_is_alpine_ridge(sw))
index = !phy_port ? 6 : 7;
- else if (tb_switch_is_fr(sw))
+ else if (tb_switch_is_falcon_ridge(sw))
index = !phy_port ? 6 : 8;
+ else if (tb_switch_is_titan_ridge(sw))
+ index = !phy_port ? 8 : 9;
else
goto out;
@@ -358,42 +421,162 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
+static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
{
- struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw = out->sw;
struct tb_tunnel *tunnel;
- struct tb_port *in;
+ int bw, available_bw = 40000;
- if (tb_port_is_enabled(out))
- return 0;
+ while (sw && sw != in->sw) {
+ bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
+ /* Leave 10% guard band */
+ bw -= bw / 10;
+
+ /*
+ * Check for any active DP tunnels that go through this
+ * switch and reduce their consumed bandwidth from
+ * available.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ int consumed_bw;
+
+ if (!tb_tunnel_switch_on_path(tunnel, sw))
+ continue;
+
+ consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
+ if (consumed_bw < 0)
+ return consumed_bw;
+
+ bw -= consumed_bw;
+ }
- do {
- sw = tb_to_switch(sw->dev.parent);
- if (!sw)
- return 0;
- in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
- } while (!in);
+ if (bw < available_bw)
+ available_bw = bw;
- tunnel = tb_tunnel_alloc_dp(tb, in, out);
+ sw = tb_switch_parent(sw);
+ }
+
+ return available_bw;
+}
+
+static void tb_tunnel_dp(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+ struct tb_tunnel *tunnel;
+ int available_bw;
+
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "in use\n");
+ continue;
+ }
+
+ tb_port_dbg(port, "available\n");
+
+ if (!in && tb_port_is_dpin(port))
+ in = port;
+ else if (!out && tb_port_is_dpout(port))
+ out = port;
+ }
+
+ if (!in) {
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+ return;
+ }
+ if (!out) {
+ tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
+ return;
+ }
+
+ if (tb_switch_alloc_dp_resource(in->sw, in)) {
+ tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
+ return;
+ }
+
+ /* Calculate available bandwidth between in and out */
+ available_bw = tb_available_bw(tcm, in, out);
+ if (available_bw < 0) {
+ tb_warn(tb, "failed to determine available bandwidth\n");
+ return;
+ }
+
+ tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
+ available_bw);
+
+ tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
if (!tunnel) {
- tb_port_dbg(out, "DP tunnel allocation failed\n");
- return -ENOMEM;
+ tb_port_dbg(out, "could not allocate DP tunnel\n");
+ goto dealloc_dp;
}
if (tb_tunnel_activate(tunnel)) {
tb_port_info(out, "DP tunnel activation failed, aborting\n");
tb_tunnel_free(tunnel);
- return -EIO;
+ goto dealloc_dp;
}
list_add_tail(&tunnel->list, &tcm->tunnel_list);
- return 0;
+ return;
+
+dealloc_dp:
+ tb_switch_dealloc_dp_resource(in->sw, in);
}
-static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
{
- tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+
+ if (tb_port_is_dpin(port)) {
+ tb_port_dbg(port, "DP IN resource unavailable\n");
+ in = port;
+ out = NULL;
+ } else {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ in = NULL;
+ out = port;
+ }
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
+ tb_deactivate_and_free_tunnel(tunnel);
+ list_del_init(&port->list);
+
+ /*
+ * See if there is another DP OUT port that can be used for
+ * to create another tunnel.
+ */
+ tb_tunnel_dp(tb);
+}
+
+static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *p;
+
+ if (tb_port_is_enabled(port))
+ return;
+
+ list_for_each_entry(p, &tcm->dp_resources, list) {
+ if (p == port)
+ return;
+ }
+
+ tb_port_dbg(port, "DP %s resource available\n",
+ tb_port_is_dpin(port) ? "IN" : "OUT");
+ list_add_tail(&port->list, &tcm->dp_resources);
+
+ /* Look for suitable DP IN <-> DP OUT pairs now */
+ tb_tunnel_dp(tb);
}
static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
@@ -468,6 +651,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
{
struct tb_port *dst_port;
+ struct tb_tunnel *tunnel;
struct tb_switch *sw;
sw = tb_to_switch(xd->dev.parent);
@@ -478,7 +662,8 @@ static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
* case of cable disconnect) so it is fine if we cannot find it
* here anymore.
*/
- tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+ tb_deactivate_and_free_tunnel(tunnel);
}
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
@@ -533,10 +718,14 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_port_dbg(port, "switch unplugged\n");
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
port->dual_link_port->remote = NULL;
+ /* Maybe we can create another DP tunnel */
+ tb_tunnel_dp(tb);
} else if (port->xdomain) {
struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
@@ -553,8 +742,8 @@ static void tb_handle_hotplug(struct work_struct *work)
port->xdomain = NULL;
__tb_disconnect_xdomain_paths(tb, xd);
tb_xdomain_put(xd);
- } else if (tb_port_is_dpout(port)) {
- tb_teardown_dp(tb, port);
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_unavailable(tb, port);
} else {
tb_port_dbg(port,
"got unplug event for disconnected port, ignoring\n");
@@ -567,8 +756,8 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_scan_port(port);
if (!port->remote)
tb_port_dbg(port, "hotplug: no switch found\n");
- } else if (tb_port_is_dpout(port)) {
- tb_tunnel_dp(tb, port);
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_available(tb, port);
}
}
@@ -681,6 +870,8 @@ static int tb_start(struct tb *tb)
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb->root_switch);
+ /* Add DP IN resources for the root switch */
+ tb_add_dp_resources(tb->root_switch);
/* Make the discovered switches available to the userspace */
device_for_each_child(&tb->root_switch->dev, NULL,
tb_scan_finalize_switch);
@@ -702,6 +893,21 @@ static int tb_suspend_noirq(struct tb *tb)
return 0;
}
+static void tb_restore_children(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+
+ if (tb_switch_lane_bonding_enable(port->remote->sw))
+ dev_warn(&sw->dev, "failed to restore lane bonding\n");
+
+ tb_restore_children(port->remote->sw);
+ }
+}
+
static int tb_resume_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
@@ -715,6 +921,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
+ tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
tb_tunnel_restart(tunnel);
if (!list_empty(&tcm->tunnel_list)) {
@@ -734,11 +941,10 @@ static int tb_resume_noirq(struct tb *tb)
static int tb_free_unplugged_xdomains(struct tb_switch *sw)
{
- int i, ret = 0;
-
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ int ret = 0;
+ tb_switch_for_each_port(sw, port) {
if (tb_is_upstream_port(port))
continue;
if (port->xdomain && port->xdomain->is_unplugged) {
@@ -783,9 +989,6 @@ struct tb *tb_probe(struct tb_nhi *nhi)
struct tb_cm *tcm;
struct tb *tb;
- if (!x86_apple_machine)
- return NULL;
-
tb = tb_domain_alloc(nhi, sizeof(*tcm));
if (!tb)
return NULL;
@@ -795,6 +998,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
tcm = tb_priv(tb);
INIT_LIST_HEAD(&tcm->tunnel_list);
+ INIT_LIST_HEAD(&tcm->dp_resources);
return tb;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 6407d529871d..ec851f20c571 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -61,6 +61,8 @@ struct tb_switch_nvm {
* @device: Device ID of the switch
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
+ * @link_speed: Speed of the link in Gb/s
+ * @link_width: Width of the link (1 or 2)
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
* @cap_lc: Offset to the link controller capability (%0 if not found)
@@ -97,6 +99,8 @@ struct tb_switch {
u16 device;
const char *vendor_name;
const char *device_name;
+ unsigned int link_speed;
+ unsigned int link_width;
unsigned int generation;
int cap_plug_events;
int cap_lc;
@@ -127,11 +131,13 @@ struct tb_switch {
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
* @port: Port number on switch
* @disabled: Disabled by eeprom
+ * @bonded: true if the port is bonded (two lanes combined as one)
* @dual_link_port: If the switch is connected using two ports, points
* to the other port.
* @link_nr: Is this primary or secondary port on the dual_link.
* @in_hopids: Currently allocated input HopIDs
* @out_hopids: Currently allocated output HopIDs
+ * @list: Used to link ports to DP resources list
*/
struct tb_port {
struct tb_regs_port_header config;
@@ -142,10 +148,12 @@ struct tb_port {
int cap_adap;
u8 port;
bool disabled;
+ bool bonded;
struct tb_port *dual_link_port;
u8 link_nr:1;
struct ida in_hopids;
struct ida out_hopids;
+ struct list_head list;
};
/**
@@ -399,7 +407,7 @@ static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
length);
}
-static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
+static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
if (sw->is_unplugged)
@@ -530,6 +538,17 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
+/**
+ * tb_switch_for_each_port() - Iterate over each switch port
+ * @sw: Switch whose ports to iterate
+ * @p: Port used as iterator
+ *
+ * Iterates over each switch port skipping the control port (port %0).
+ */
+#define tb_switch_for_each_port(sw, p) \
+ for ((p) = &(sw)->ports[1]; \
+ (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
+
static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
{
if (sw)
@@ -559,17 +578,17 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
return tb_to_switch(sw->dev.parent);
}
-static inline bool tb_switch_is_lr(const struct tb_switch *sw)
+static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{
return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
}
-static inline bool tb_switch_is_er(const struct tb_switch *sw)
+static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
{
return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
}
-static inline bool tb_switch_is_cr(const struct tb_switch *sw)
+static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
{
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
@@ -580,7 +599,7 @@ static inline bool tb_switch_is_cr(const struct tb_switch *sw)
}
}
-static inline bool tb_switch_is_fr(const struct tb_switch *sw)
+static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
{
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
@@ -591,6 +610,52 @@ static inline bool tb_switch_is_fr(const struct tb_switch *sw)
}
}
+static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * tb_switch_is_icm() - Is the switch handled by ICM firmware
+ * @sw: Switch to check
+ *
+ * In case there is a need to differentiate whether ICM firmware or SW CM
+ * is handling @sw this function can be called. It is valid to call this
+ * after tb_switch_alloc() and tb_switch_configure() has been called
+ * (latter only for SW CM case).
+ */
+static inline bool tb_switch_is_icm(const struct tb_switch *sw)
+{
+ return !sw->config.enabled;
+}
+
+int tb_switch_lane_bonding_enable(struct tb_switch *sw);
+void tb_switch_lane_bonding_disable(struct tb_switch *sw);
+
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
@@ -626,6 +691,8 @@ void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
bool tb_path_is_invalid(struct tb_path *path);
+bool tb_path_switch_on_path(const struct tb_path *path,
+ const struct tb_switch *sw);
int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
@@ -634,6 +701,10 @@ int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
int tb_lc_configure_link(struct tb_switch *sw);
void tb_lc_unconfigure_link(struct tb_switch *sw);
int tb_lc_set_sleep(struct tb_switch *sw);
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
static inline int tb_route_length(u64 route)
{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 4b641e4ee0c5..3705057723b6 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -122,6 +122,8 @@ struct icm_pkg_header {
#define ICM_FLAGS_NO_KEY BIT(1)
#define ICM_FLAGS_SLEVEL_SHIFT 3
#define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3)
+#define ICM_FLAGS_DUAL_LANE BIT(5)
+#define ICM_FLAGS_SPEED_GEN3 BIT(7)
#define ICM_FLAGS_WRITE BIT(7)
struct icm_pkg_driver_ready {
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index deb9d4a977b9..7ee45b73c7f7 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -211,37 +211,71 @@ struct tb_regs_port_header {
} __packed;
-/* DWORD 4 */
-#define TB_PORT_NFC_CREDITS_MASK GENMASK(19, 0)
-#define TB_PORT_MAX_CREDITS_SHIFT 20
-#define TB_PORT_MAX_CREDITS_MASK GENMASK(26, 20)
-/* DWORD 5 */
-#define TB_PORT_LCA_SHIFT 22
-#define TB_PORT_LCA_MASK GENMASK(28, 22)
+/* Basic adapter configuration registers */
+#define ADP_CS_4 0x04
+#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0)
+#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20)
+#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20
+#define ADP_CS_5 0x05
+#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
+#define ADP_CS_5_LCA_SHIFT 22
+
+/* Lane adapter registers */
+#define LANE_ADP_CS_0 0x00
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
+#define LANE_ADP_CS_1 0x01
+#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
+#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
+#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
+#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_LB BIT(15)
+#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
+#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
+#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
/* Display Port adapter registers */
-
-/* DWORD 0 */
-#define TB_DP_VIDEO_HOPID_SHIFT 16
-#define TB_DP_VIDEO_HOPID_MASK GENMASK(26, 16)
-#define TB_DP_AUX_EN BIT(30)
-#define TB_DP_VIDEO_EN BIT(31)
-/* DWORD 1 */
-#define TB_DP_AUX_TX_HOPID_MASK GENMASK(10, 0)
-#define TB_DP_AUX_RX_HOPID_SHIFT 11
-#define TB_DP_AUX_RX_HOPID_MASK GENMASK(21, 11)
-/* DWORD 2 */
-#define TB_DP_HDP BIT(6)
-/* DWORD 3 */
-#define TB_DP_HPDC BIT(9)
-/* DWORD 4 */
-#define TB_DP_LOCAL_CAP 0x4
-/* DWORD 5 */
-#define TB_DP_REMOTE_CAP 0x5
+#define ADP_DP_CS_0 0x00
+#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16)
+#define ADP_DP_CS_0_VIDEO_HOPID_SHIFT 16
+#define ADP_DP_CS_0_AE BIT(30)
+#define ADP_DP_CS_0_VE BIT(31)
+#define ADP_DP_CS_1_AUX_TX_HOPID_MASK GENMASK(10, 0)
+#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11)
+#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
+#define ADP_DP_CS_2 0x02
+#define ADP_DP_CS_2_HDP BIT(6)
+#define ADP_DP_CS_3 0x03
+#define ADP_DP_CS_3_HDPC BIT(9)
+#define DP_LOCAL_CAP 0x04
+#define DP_REMOTE_CAP 0x05
+#define DP_STATUS_CTRL 0x06
+#define DP_STATUS_CTRL_CMHS BIT(25)
+#define DP_STATUS_CTRL_UF BIT(26)
+#define DP_COMMON_CAP 0x07
+/*
+ * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP
+ * with exception of DPRX done.
+ */
+#define DP_COMMON_CAP_RATE_MASK GENMASK(11, 8)
+#define DP_COMMON_CAP_RATE_SHIFT 8
+#define DP_COMMON_CAP_RATE_RBR 0x0
+#define DP_COMMON_CAP_RATE_HBR 0x1
+#define DP_COMMON_CAP_RATE_HBR2 0x2
+#define DP_COMMON_CAP_RATE_HBR3 0x3
+#define DP_COMMON_CAP_LANES_MASK GENMASK(14, 12)
+#define DP_COMMON_CAP_LANES_SHIFT 12
+#define DP_COMMON_CAP_1_LANE 0x0
+#define DP_COMMON_CAP_2_LANES 0x1
+#define DP_COMMON_CAP_4_LANES 0x2
+#define DP_COMMON_CAP_DPRX_DONE BIT(31)
/* PCIe adapter registers */
-
-#define TB_PCI_EN BIT(31)
+#define ADP_PCIE_CS_0 0x00
+#define ADP_PCIE_CS_0_PE BIT(31)
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
struct tb_regs_hop {
@@ -278,8 +312,17 @@ struct tb_regs_hop {
#define TB_LC_DESC_PORT_SIZE_SHIFT 16
#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
#define TB_LC_FUSE 0x03
+#define TB_LC_SNK_ALLOCATION 0x10
+#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0)
+#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1
+#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
+#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
+#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
/* Link controller registers */
+#define TB_LC_PORT_ATTR 0x8d
+#define TB_LC_PORT_ATTR_BE BIT(12)
+
#define TB_LC_SX_CTRL 0x96
#define TB_LC_SX_CTRL_L1C BIT(16)
#define TB_LC_SX_CTRL_L2C BIT(20)
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 5a99234826e7..0d3463c4e24a 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -6,6 +6,7 @@
* Copyright (C) 2019, Intel Corporation
*/
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -90,6 +91,22 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
return 0;
}
+static int tb_initial_credits(const struct tb_switch *sw)
+{
+ /* If the path is complete sw is not NULL */
+ if (sw) {
+ /* More credits for faster link */
+ switch (sw->link_speed * sw->link_width) {
+ case 40:
+ return 32;
+ case 20:
+ return 24;
+ }
+ }
+
+ return 16;
+}
+
static void tb_pci_init_path(struct tb_path *path)
{
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
@@ -101,7 +118,8 @@ static void tb_pci_init_path(struct tb_path *path)
path->drop_packages = 0;
path->nfc_credits = 0;
path->hops[0].initial_credits = 7;
- path->hops[1].initial_credits = 16;
+ path->hops[1].initial_credits =
+ tb_initial_credits(path->hops[1].in_port->sw);
}
/**
@@ -225,11 +243,174 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
}
+static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
+{
+ int timeout = 10;
+ u32 val;
+ int ret;
+
+ /* Both ends need to support this */
+ if (!tb_switch_is_titan_ridge(in->sw) ||
+ !tb_switch_is_titan_ridge(out->sw))
+ return 0;
+
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
+
+ ret = tb_port_write(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ do {
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+ if (!(val & DP_STATUS_CTRL_CMHS))
+ return 0;
+ usleep_range(10, 100);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static inline u32 tb_dp_cap_get_rate(u32 val)
+{
+ u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
+
+ switch (rate) {
+ case DP_COMMON_CAP_RATE_RBR:
+ return 1620;
+ case DP_COMMON_CAP_RATE_HBR:
+ return 2700;
+ case DP_COMMON_CAP_RATE_HBR2:
+ return 5400;
+ case DP_COMMON_CAP_RATE_HBR3:
+ return 8100;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
+{
+ val &= ~DP_COMMON_CAP_RATE_MASK;
+ switch (rate) {
+ default:
+ WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
+ /* Fallthrough */
+ case 1620:
+ val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 2700:
+ val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 5400:
+ val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 8100:
+ val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static inline u32 tb_dp_cap_get_lanes(u32 val)
+{
+ u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
+
+ switch (lanes) {
+ case DP_COMMON_CAP_1_LANE:
+ return 1;
+ case DP_COMMON_CAP_2_LANES:
+ return 2;
+ case DP_COMMON_CAP_4_LANES:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
+{
+ val &= ~DP_COMMON_CAP_LANES_MASK;
+ switch (lanes) {
+ default:
+ WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
+ lanes);
+ /* Fallthrough */
+ case 1:
+ val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 2:
+ val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 4:
+ val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
+{
+ /* Tunneling removes the DP 8b/10b encoding */
+ return rate * lanes * 8 / 10;
+}
+
+static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
+ u32 out_rate, u32 out_lanes, u32 *new_rate,
+ u32 *new_lanes)
+{
+ static const u32 dp_bw[][2] = {
+ /* Mb/s, lanes */
+ { 8100, 4 }, /* 25920 Mb/s */
+ { 5400, 4 }, /* 17280 Mb/s */
+ { 8100, 2 }, /* 12960 Mb/s */
+ { 2700, 4 }, /* 8640 Mb/s */
+ { 5400, 2 }, /* 8640 Mb/s */
+ { 8100, 1 }, /* 6480 Mb/s */
+ { 1620, 4 }, /* 5184 Mb/s */
+ { 5400, 1 }, /* 4320 Mb/s */
+ { 2700, 2 }, /* 4320 Mb/s */
+ { 1620, 2 }, /* 2592 Mb/s */
+ { 2700, 1 }, /* 2160 Mb/s */
+ { 1620, 1 }, /* 1296 Mb/s */
+ };
+ unsigned int i;
+
+ /*
+ * Find a combination that can fit into max_bw and does not
+ * exceed the maximum rate and lanes supported by the DP OUT and
+ * DP IN adapters.
+ */
+ for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
+ if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
+ continue;
+
+ if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
+ continue;
+
+ if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
+ *new_rate = dp_bw[i][0];
+ *new_lanes = dp_bw[i][1];
+ return 0;
+ }
+ }
+
+ return -ENOSR;
+}
+
static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
{
+ u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- u32 in_dp_cap, out_dp_cap;
int ret;
/*
@@ -239,25 +420,71 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
if (in->sw->generation < 2 || out->sw->generation < 2)
return 0;
+ /*
+ * Perform connection manager handshake between IN and OUT ports
+ * before capabilities exchange can take place.
+ */
+ ret = tb_dp_cm_handshake(in, out);
+ if (ret)
+ return ret;
+
/* Read both DP_LOCAL_CAP registers */
ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
- in->cap_adap + TB_DP_LOCAL_CAP, 1);
+ in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
- out->cap_adap + TB_DP_LOCAL_CAP, 1);
+ out->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
/* Write IN local caps to OUT remote caps */
ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
- out->cap_adap + TB_DP_REMOTE_CAP, 1);
+ out->cap_adap + DP_REMOTE_CAP, 1);
if (ret)
return ret;
+ in_rate = tb_dp_cap_get_rate(in_dp_cap);
+ in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+ tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+
+ /*
+ * If the tunnel bandwidth is limited (max_bw is set) then see
+ * if we need to reduce bandwidth to fit there.
+ */
+ out_rate = tb_dp_cap_get_rate(out_dp_cap);
+ out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+ bw = tb_dp_bandwidth(out_rate, out_lanes);
+ tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ out_rate, out_lanes, bw);
+
+ if (tunnel->max_bw && bw > tunnel->max_bw) {
+ u32 new_rate, new_lanes, new_bw;
+
+ ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes,
+ out_rate, out_lanes, &new_rate,
+ &new_lanes);
+ if (ret) {
+ tb_port_info(out, "not enough bandwidth for DP tunnel\n");
+ return ret;
+ }
+
+ new_bw = tb_dp_bandwidth(new_rate, new_lanes);
+ tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+ new_rate, new_lanes, new_bw);
+
+ /*
+ * Set new rate and number of lanes before writing it to
+ * the IN port remote caps.
+ */
+ out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
+ out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
+ }
+
return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
- in->cap_adap + TB_DP_REMOTE_CAP, 1);
+ in->cap_adap + DP_REMOTE_CAP, 1);
}
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
@@ -297,6 +524,56 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
+static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
+{
+ struct tb_port *in = tunnel->src_port;
+ const struct tb_switch *sw = in->sw;
+ u32 val, rate = 0, lanes = 0;
+ int ret;
+
+ if (tb_switch_is_titan_ridge(sw)) {
+ int timeout = 10;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set
+ * for active tunnel.
+ */
+ do {
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE) {
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ break;
+ }
+ msleep(250);
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+ } else if (sw->generation >= 2) {
+ /*
+ * Read from the copied remote cap so that we take into
+ * account if capabilities were reduced during exchange.
+ */
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_REMOTE_CAP, 1);
+ if (ret)
+ return ret;
+
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ } else {
+ /* No bandwidth management for legacy devices */
+ return 0;
+ }
+
+ return tb_dp_bandwidth(rate, lanes);
+}
+
static void tb_dp_init_aux_path(struct tb_path *path)
{
int i;
@@ -324,12 +601,12 @@ static void tb_dp_init_video_path(struct tb_path *path, bool discover)
path->weight = 1;
if (discover) {
- path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
} else {
u32 max_credits;
- max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
- TB_PORT_MAX_CREDITS_SHIFT;
+ max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
/* Leave some credits for AUX path */
path->nfc_credits = min(max_credits - 2, 12U);
}
@@ -361,6 +638,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
tunnel->init = tb_dp_xchg_caps;
tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
@@ -419,6 +697,7 @@ err_free:
* @tb: Pointer to the domain structure
* @in: DP in adapter port
* @out: DP out adapter port
+ * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited)
*
* Allocates a tunnel between @in and @out that is capable of tunneling
* Display Port traffic.
@@ -426,7 +705,7 @@ err_free:
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out)
+ struct tb_port *out, int max_bw)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -441,8 +720,10 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->init = tb_dp_xchg_caps;
tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
tunnel->dst_port = out;
+ tunnel->max_bw = max_bw;
paths = tunnel->paths;
@@ -478,8 +759,8 @@ static u32 tb_dma_credits(struct tb_port *nhi)
{
u32 max_credits;
- max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
- TB_PORT_MAX_CREDITS_SHIFT;
+ max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
return min(max_credits, 13U);
}
@@ -689,3 +970,62 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
tb_path_deactivate(tunnel->paths[i]);
}
}
+
+/**
+ * tb_tunnel_switch_on_path() - Does the tunnel go through switch
+ * @tunnel: Tunnel to check
+ * @sw: Switch to check
+ *
+ * Returns true if @tunnel goes through @sw (direction does not matter),
+ * false otherwise.
+ */
+bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ continue;
+ if (tb_path_switch_on_path(tunnel->paths[i], sw))
+ return true;
+ }
+
+ return false;
+}
+
+static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ return false;
+ if (!tunnel->paths[i]->activated)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
+ * @tunnel: Tunnel to check
+ *
+ * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel
+ * is not active or does consume bandwidth.
+ */
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return 0;
+
+ if (tunnel->consumed_bandwidth) {
+ int ret = tunnel->consumed_bandwidth(tunnel);
+
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index c68bbcd3a62c..ba888da005f5 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -27,8 +27,11 @@ enum tb_tunnel_type {
* @npaths: Number of paths in @paths
* @init: Optional tunnel specific initialization
* @activate: Optional tunnel specific activation/deactivation
+ * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
* @list: Tunnels are linked using this field
* @type: Type of the tunnel
+ * @max_bw: Maximum bandwidth (Mb/s) available for the tunnel (only for DP).
+ * Only set if the bandwidth needs to be limited.
*/
struct tb_tunnel {
struct tb *tb;
@@ -38,8 +41,10 @@ struct tb_tunnel {
size_t npaths;
int (*init)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ int (*consumed_bandwidth)(struct tb_tunnel *tunnel);
struct list_head list;
enum tb_tunnel_type type;
+ unsigned int max_bw;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
@@ -47,7 +52,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down);
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out);
+ struct tb_port *out, int max_bw);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring,
int transmit_path, int receive_ring,
@@ -58,6 +63,9 @@ int tb_tunnel_activate(struct tb_tunnel *tunnel);
int tb_tunnel_restart(struct tb_tunnel *tunnel);
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
+bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_switch *sw);
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel);
static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
{
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 4e17a7c7bf0a..880d784398a3 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1404,10 +1404,9 @@ struct tb_xdomain_lookup {
static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
const struct tb_xdomain_lookup *lookup)
{
- int i;
+ struct tb_port *port;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ tb_switch_for_each_port(sw, port) {
struct tb_xdomain *xd;
if (port->xdomain) {
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index c7623f99ac0f..a312cb33a99b 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -82,20 +82,20 @@ config HW_CONSOLE
default y
config VT_HW_CONSOLE_BINDING
- bool "Support for binding and unbinding console drivers"
- depends on HW_CONSOLE
- ---help---
- The virtual terminal is the device that interacts with the physical
- terminal through console drivers. On these systems, at least one
- console driver is loaded. In other configurations, additional console
- drivers may be enabled, such as the framebuffer console. If more than
- 1 console driver is enabled, setting this to 'y' will allow you to
- select the console driver that will serve as the backend for the
- virtual terminals.
-
- See <file:Documentation/driver-api/console.rst> for more
- information. For framebuffer console users, please refer to
- <file:Documentation/fb/fbcon.rst>.
+ bool "Support for binding and unbinding console drivers"
+ depends on HW_CONSOLE
+ ---help---
+ The virtual terminal is the device that interacts with the physical
+ terminal through console drivers. On these systems, at least one
+ console driver is loaded. In other configurations, additional console
+ drivers may be enabled, such as the framebuffer console. If more than
+ 1 console driver is enabled, setting this to 'y' will allow you to
+ select the console driver that will serve as the backend for the
+ virtual terminals.
+
+ See <file:Documentation/driver-api/console.rst> for more
+ information. For framebuffer console users, please refer to
+ <file:Documentation/fb/fbcon.rst>.
config UNIX98_PTYS
bool "Unix98 PTY support" if EXPERT
@@ -173,15 +173,15 @@ config ROCKETPORT
depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
help
This driver supports Comtrol RocketPort and RocketModem PCI boards.
- These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
- modems. For information about the RocketPort/RocketModem boards
- and this driver read <file:Documentation/driver-api/serial/rocket.rst>.
+ These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
+ modems. For information about the RocketPort/RocketModem boards
+ and this driver read <file:Documentation/driver-api/serial/rocket.rst>.
To compile this driver as a module, choose M here: the
module will be called rocket.
If you want to compile this driver into the kernel, say Y here. If
- you don't have a Comtrol RocketPort/RocketModem card installed, say N.
+ you don't have a Comtrol RocketPort/RocketModem card installed, say N.
config CYCLADES
tristate "Cyclades async mux support"
@@ -437,8 +437,8 @@ config MIPS_EJTAG_FDC_KGDB
depends on MIPS_EJTAG_FDC_TTY && KGDB
default y
help
- This enables the use of KGDB over an FDC channel, allowing KGDB to be
- used remotely or when a serial port isn't available.
+ This enables the use of KGDB over an FDC channel, allowing KGDB to be
+ used remotely or when a serial port isn't available.
config MIPS_EJTAG_FDC_KGDB_CHAN
int "KGDB FDC channel"
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 8330fd809a05..13f63c01c589 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -22,18 +22,8 @@
*
*/
-/*
- * Serial driver configuration section. Here are the various options:
- *
- * SERIAL_PARANOIA_CHECK
- * Check the magic number for the async_structure where
- * ever possible.
- */
-
#include <linux/delay.h>
-#undef SERIAL_PARANOIA_CHECK
-
/* Set of debugging defines */
#undef SERIAL_DEBUG_INTR
@@ -132,28 +122,6 @@ static struct serial_state rs_table[1];
#define serial_isroot() (capable(CAP_SYS_ADMIN))
-
-static inline int serial_paranoia_check(struct serial_state *info,
- char *name, const char *routine)
-{
-#ifdef SERIAL_PARANOIA_CHECK
- static const char *badmagic =
- "Warning: bad magic number for serial struct (%s) in %s\n";
- static const char *badinfo =
- "Warning: null async_struct for (%s) in %s\n";
-
- if (!info) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (info->magic != SERIAL_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
/* some serial hardware definitions */
#define SDR_OVRUN (1<<15)
#define SDR_RBF (1<<14)
@@ -189,9 +157,6 @@ static void rs_stop(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_stop"))
- return;
-
local_irq_save(flags);
if (info->IER & UART_IER_THRI) {
info->IER &= ~UART_IER_THRI;
@@ -209,9 +174,6 @@ static void rs_start(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_start"))
- return;
-
local_irq_save(flags);
if (info->xmit.head != info->xmit.tail
&& info->xmit.buf
@@ -783,9 +745,6 @@ static int rs_put_char(struct tty_struct *tty, unsigned char ch)
info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_put_char"))
- return 0;
-
if (!info->xmit.buf)
return 0;
@@ -808,9 +767,6 @@ static void rs_flush_chars(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
- return;
-
if (info->xmit.head == info->xmit.tail
|| tty->stopped
|| tty->hw_stopped
@@ -833,9 +789,6 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_write"))
- return 0;
-
if (!info->xmit.buf)
return 0;
@@ -878,8 +831,6 @@ static int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_write_room"))
- return 0;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
@@ -887,8 +838,6 @@ static int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
- return 0;
return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
@@ -897,8 +846,6 @@ static void rs_flush_buffer(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
- return;
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
local_irq_restore(flags);
@@ -914,9 +861,6 @@ static void rs_send_xchar(struct tty_struct *tty, char ch)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_send_xchar"))
- return;
-
info->x_char = ch;
if (ch) {
/* Make sure transmit interrupts are on */
@@ -952,9 +896,6 @@ static void rs_throttle(struct tty_struct * tty)
printk("throttle %s ....\n", tty_name(tty));
#endif
- if (serial_paranoia_check(info, tty->name, "rs_throttle"))
- return;
-
if (I_IXOFF(tty))
rs_send_xchar(tty, STOP_CHAR(tty));
@@ -974,9 +915,6 @@ static void rs_unthrottle(struct tty_struct * tty)
printk("unthrottle %s ....\n", tty_name(tty));
#endif
- if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
- return;
-
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
@@ -1109,8 +1047,6 @@ static int rs_tiocmget(struct tty_struct *tty)
unsigned char control, status;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
if (tty_io_error(tty))
return -EIO;
@@ -1131,8 +1067,6 @@ static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
if (tty_io_error(tty))
return -EIO;
@@ -1155,12 +1089,8 @@ static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
*/
static int rs_break(struct tty_struct *tty, int break_state)
{
- struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_break"))
- return -EINVAL;
-
local_irq_save(flags);
if (break_state == -1)
custom.adkcon = AC_SETCLR | AC_UARTBRK;
@@ -1212,9 +1142,6 @@ static int rs_ioctl(struct tty_struct *tty,
DEFINE_WAIT(wait);
int ret;
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
-
if ((cmd != TIOCSERCONFIG) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
if (tty_io_error(tty))
@@ -1333,9 +1260,6 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
struct serial_state *state = tty->driver_data;
struct tty_port *port = &state->tport;
- if (serial_paranoia_check(state, tty->name, "rs_close"))
- return;
-
if (tty_port_close_start(port, tty, filp) == 0)
return;
@@ -1379,9 +1303,6 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
unsigned long orig_jiffies, char_time;
int lsr;
- if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
- return;
-
if (info->xmit_fifo_size == 0)
return; /* Just in case.... */
@@ -1440,9 +1361,6 @@ static void rs_hangup(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_hangup"))
- return;
-
rs_flush_buffer(tty);
shutdown(tty, info);
info->tport.count = 0;
@@ -1467,8 +1385,6 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
port->tty = tty;
tty->driver_data = info;
tty->port = port;
- if (serial_paranoia_check(info, tty->name, "rs_open"))
- return -ENODEV;
port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 4d22b911111f..6a3c97d345a0 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -70,26 +70,26 @@ config HVC_XEN_FRONTEND
Xen driver for secondary virtual consoles
config HVC_UDBG
- bool "udbg based fake hypervisor console"
- depends on PPC
- select HVC_DRIVER
- help
- This is meant to be used during HW bring up or debugging when
- no other console mechanism exist but udbg, to get you a quick
- console for userspace. Do NOT enable in production kernels.
+ bool "udbg based fake hypervisor console"
+ depends on PPC
+ select HVC_DRIVER
+ help
+ This is meant to be used during HW bring up or debugging when
+ no other console mechanism exist but udbg, to get you a quick
+ console for userspace. Do NOT enable in production kernels.
config HVC_DCC
- bool "ARM JTAG DCC console"
- depends on ARM || ARM64
- select HVC_DRIVER
- help
- This console uses the JTAG DCC on ARM to create a console under the HVC
- driver. This console is used through a JTAG only on ARM. If you don't have
- a JTAG then you probably don't want this option.
+ bool "ARM JTAG DCC console"
+ depends on ARM || ARM64
+ select HVC_DRIVER
+ help
+ This console uses the JTAG DCC on ARM to create a console under the HVC
+ driver. This console is used through a JTAG only on ARM. If you don't have
+ a JTAG then you probably don't want this option.
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
- depends on RISCV
+ depends on RISCV_SBI
select HVC_DRIVER
help
This enables support for console output via RISC-V SBI calls, which
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 02629a1f193d..8e0edb7d93fd 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. */
+#include <linux/console.h>
#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
#include <asm/dcc.h>
#include <asm/processor.h>
@@ -12,6 +15,31 @@
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
+static void dcc_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (__dcc_getstatus() & DCC_STATUS_TX)
+ cpu_relax();
+
+ __dcc_putchar(ch);
+}
+
+static void dcc_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, dcc_uart_console_putchar);
+}
+
+static int __init dcc_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->con->write = dcc_early_write;
+
+ return 0;
+}
+
+EARLYCON_DECLARE(dcc, dcc_early_console_setup);
+
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
{
int i;
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 5ba6816ebf81..fbaa4ec85560 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1222,22 +1222,28 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
*/
static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
{
- struct rocket_ports tmp;
- int board;
+ struct rocket_ports *tmp;
+ int board, ret = 0;
- memset(&tmp, 0, sizeof (tmp));
- tmp.tty_major = rocket_driver->major;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->tty_major = rocket_driver->major;
for (board = 0; board < 4; board++) {
- tmp.rocketModel[board].model = rocketModel[board].model;
- strcpy(tmp.rocketModel[board].modelString, rocketModel[board].modelString);
- tmp.rocketModel[board].numPorts = rocketModel[board].numPorts;
- tmp.rocketModel[board].loadrm2 = rocketModel[board].loadrm2;
- tmp.rocketModel[board].startingPortNumber = rocketModel[board].startingPortNumber;
- }
- if (copy_to_user(retports, &tmp, sizeof (*retports)))
- return -EFAULT;
- return 0;
+ tmp->rocketModel[board].model = rocketModel[board].model;
+ strcpy(tmp->rocketModel[board].modelString,
+ rocketModel[board].modelString);
+ tmp->rocketModel[board].numPorts = rocketModel[board].numPorts;
+ tmp->rocketModel[board].loadrm2 = rocketModel[board].loadrm2;
+ tmp->rocketModel[board].startingPortNumber =
+ rocketModel[board].startingPortNumber;
+ }
+ if (copy_to_user(retports, tmp, sizeof(*retports)))
+ ret = -EFAULT;
+ kfree(tmp);
+ return ret;
}
static int reset_rm2(struct r_port *info, void __user *arg)
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index a0ac16ee6575..226adeec2aed 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -552,16 +552,97 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
}
#ifdef CONFIG_ACPI
+
+#define SERDEV_ACPI_MAX_SCAN_DEPTH 32
+
+struct acpi_serdev_lookup {
+ acpi_handle device_handle;
+ acpi_handle controller_handle;
+ int n;
+ int index;
+};
+
+static int acpi_serdev_parse_resource(struct acpi_resource *ares, void *data)
+{
+ struct acpi_serdev_lookup *lookup = data;
+ struct acpi_resource_uart_serialbus *sb;
+ acpi_status status;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return 1;
+
+ if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+ return 1;
+
+ if (lookup->index != -1 && lookup->n++ != lookup->index)
+ return 1;
+
+ sb = &ares->data.uart_serial_bus;
+
+ status = acpi_get_handle(lookup->device_handle,
+ sb->resource_source.string_ptr,
+ &lookup->controller_handle);
+ if (ACPI_FAILURE(status))
+ return 1;
+
+ /*
+ * NOTE: Ideally, we would also want to retreive other properties here,
+ * once setting them before opening the device is supported by serdev.
+ */
+
+ return 1;
+}
+
+static int acpi_serdev_do_lookup(struct acpi_device *adev,
+ struct acpi_serdev_lookup *lookup)
+{
+ struct list_head resource_list;
+ int ret;
+
+ lookup->device_handle = acpi_device_handle(adev);
+ lookup->controller_handle = NULL;
+ lookup->n = 0;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_serdev_parse_resource, lookup);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
+ struct acpi_device *adev)
+{
+ struct acpi_serdev_lookup lookup;
+ int ret;
+
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return -EINVAL;
+
+ /* Look for UARTSerialBusV2 resource */
+ lookup.index = -1; // we only care for the last device
+
+ ret = acpi_serdev_do_lookup(adev, &lookup);
+ if (ret)
+ return ret;
+
+ /* Make sure controller and ResourceSource handle match */
+ if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
+ return -ENODEV;
+
+ return 0;
+}
+
static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
- struct acpi_device *adev)
+ struct acpi_device *adev)
{
- struct serdev_device *serdev = NULL;
+ struct serdev_device *serdev;
int err;
- if (acpi_bus_get_status(adev) || !adev->status.present ||
- acpi_device_enumerated(adev))
- return AE_OK;
-
serdev = serdev_device_alloc(ctrl);
if (!serdev) {
dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
@@ -583,7 +664,7 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
}
static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
+ void *data, void **return_value)
{
struct serdev_controller *ctrl = data;
struct acpi_device *adev;
@@ -591,22 +672,28 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
if (acpi_bus_get_device(handle, &adev))
return AE_OK;
+ if (acpi_device_enumerated(adev))
+ return AE_OK;
+
+ if (acpi_serdev_check_resources(ctrl, adev))
+ return AE_OK;
+
return acpi_serdev_register_device(ctrl, adev);
}
+
static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
{
acpi_status status;
- acpi_handle handle;
- handle = ACPI_HANDLE(ctrl->dev.parent);
- if (!handle)
+ if (!has_acpi_companion(ctrl->dev.parent))
return -ENODEV;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ SERDEV_ACPI_MAX_SCAN_DEPTH,
acpi_serdev_add_device, NULL, ctrl, NULL);
if (ACPI_FAILURE(status))
- dev_dbg(&ctrl->dev, "failed to enumerate serdev slaves\n");
+ dev_warn(&ctrl->dev, "failed to enumerate serdev slaves\n");
if (!ctrl->serdev)
return -ENODEV;
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 0438d9a905ce..6e67fd89445a 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -14,6 +14,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/clk.h>
@@ -22,6 +24,7 @@
#define ASPEED_VUART_GCRA 0x20
#define ASPEED_VUART_GCRA_VUART_EN BIT(0)
+#define ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY BIT(1)
#define ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD BIT(5)
#define ASPEED_VUART_GCRB 0x24
#define ASPEED_VUART_GCRB_HOST_SIRQ_MASK GENMASK(7, 4)
@@ -131,8 +134,53 @@ static ssize_t sirq_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(sirq);
+static ssize_t sirq_polarity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+ u8 reg;
+
+ reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+ reg &= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%u\n", reg ? 1 : 0);
+}
+
+static void aspeed_vuart_set_sirq_polarity(struct aspeed_vuart *vuart,
+ bool polarity)
+{
+ u8 reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+
+ if (polarity)
+ reg |= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
+ else
+ reg &= ~ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
+
+ writeb(reg, vuart->regs + ASPEED_VUART_GCRA);
+}
+
+static ssize_t sirq_polarity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ aspeed_vuart_set_sirq_polarity(vuart, val != 0);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(sirq_polarity);
+
static struct attribute *aspeed_vuart_attrs[] = {
&dev_attr_sirq.attr,
+ &dev_attr_sirq_polarity.attr,
&dev_attr_lpc_address.attr,
NULL,
};
@@ -302,8 +350,30 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
return 1;
}
+static void aspeed_vuart_auto_configure_sirq_polarity(
+ struct aspeed_vuart *vuart, struct device_node *syscon_np,
+ u32 reg_offset, u32 reg_mask)
+{
+ struct regmap *regmap;
+ u32 value;
+
+ regmap = syscon_node_to_regmap(syscon_np);
+ if (IS_ERR(regmap)) {
+ dev_warn(vuart->dev,
+ "could not get regmap for aspeed,sirq-polarity-sense\n");
+ return;
+ }
+ if (regmap_read(regmap, reg_offset, &value)) {
+ dev_warn(vuart->dev, "could not read hw strap table\n");
+ return;
+ }
+
+ aspeed_vuart_set_sirq_polarity(vuart, (value & reg_mask) == 0);
+}
+
static int aspeed_vuart_probe(struct platform_device *pdev)
{
+ struct of_phandle_args sirq_polarity_sense_args;
struct uart_8250_port port;
struct aspeed_vuart *vuart;
struct device_node *np;
@@ -402,6 +472,20 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
vuart->line = rc;
+ rc = of_parse_phandle_with_fixed_args(
+ np, "aspeed,sirq-polarity-sense", 2, 0,
+ &sirq_polarity_sense_args);
+ if (rc < 0) {
+ dev_dbg(&pdev->dev,
+ "aspeed,sirq-polarity-sense property not found\n");
+ } else {
+ aspeed_vuart_auto_configure_sirq_polarity(
+ vuart, sirq_polarity_sense_args.np,
+ sirq_polarity_sense_args.args[0],
+ BIT(sirq_polarity_sense_args.args[1]));
+ of_node_put(sirq_polarity_sense_args.np);
+ }
+
aspeed_vuart_set_enabled(vuart, true);
aspeed_vuart_set_host_tx_discard(vuart, true);
platform_set_drvdata(pdev, vuart);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 1c72fdc2dd37..aab3cccc6789 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -280,9 +280,6 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
- if (IS_ERR(d->clk))
- goto out;
-
clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, baud * 16);
if (rate < 0)
@@ -293,8 +290,10 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, rate);
clk_prepare_enable(d->clk);
- if (!ret)
- p->uartclk = rate;
+ if (ret)
+ goto out;
+
+ p->uartclk = rate;
out:
p->status &= ~UPSTAT_AUTOCTS;
@@ -386,10 +385,10 @@ static int dw8250_probe(struct platform_device *pdev)
{
struct uart_8250_port uart = {}, *up = &uart;
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int irq = platform_get_irq(pdev, 0);
struct uart_port *p = &up->port;
struct device *dev = &pdev->dev;
struct dw8250_data *data;
+ int irq;
int err;
u32 val;
@@ -398,11 +397,9 @@ static int dw8250_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "cannot get irq\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
return irq;
- }
spin_lock_init(&p->lock);
p->mapbase = regs->start;
@@ -472,19 +469,18 @@ static int dw8250_probe(struct platform_device *pdev)
device_property_read_u32(dev, "clock-frequency", &p->uartclk);
/* If there is separate baudclk, get the rate from it. */
- data->clk = devm_clk_get(dev, "baudclk");
- if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER)
- data->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR_OR_NULL(data->clk)) {
- err = clk_prepare_enable(data->clk);
- if (err)
- dev_warn(dev, "could not enable optional baudclk: %d\n",
- err);
- else
- p->uartclk = clk_get_rate(data->clk);
- }
+ data->clk = devm_clk_get_optional(dev, "baudclk");
+ if (data->clk == NULL)
+ data->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
+
+ err = clk_prepare_enable(data->clk);
+ if (err)
+ dev_warn(dev, "could not enable optional baudclk: %d\n", err);
+
+ if (data->clk)
+ p->uartclk = clk_get_rate(data->clk);
/* If no clock rate is defined, fail. */
if (!p->uartclk) {
@@ -493,17 +489,16 @@ static int dw8250_probe(struct platform_device *pdev)
goto err_clk;
}
- data->pclk = devm_clk_get(dev, "apb_pclk");
- if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
+ data->pclk = devm_clk_get_optional(dev, "apb_pclk");
+ if (IS_ERR(data->pclk)) {
+ err = PTR_ERR(data->pclk);
goto err_clk;
}
- if (!IS_ERR(data->pclk)) {
- err = clk_prepare_enable(data->pclk);
- if (err) {
- dev_err(dev, "could not enable apb_pclk\n");
- goto err_clk;
- }
+
+ err = clk_prepare_enable(data->pclk);
+ if (err) {
+ dev_err(dev, "could not enable apb_pclk\n");
+ goto err_clk;
}
data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
@@ -546,12 +541,10 @@ err_reset:
reset_control_assert(data->rst);
err_pclk:
- if (!IS_ERR(data->pclk))
- clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->pclk);
err_clk:
- if (!IS_ERR(data->clk))
- clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk);
return err;
}
@@ -567,11 +560,9 @@ static int dw8250_remove(struct platform_device *pdev)
reset_control_assert(data->rst);
- if (!IS_ERR(data->pclk))
- clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->pclk);
- if (!IS_ERR(data->clk))
- clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
@@ -604,11 +595,9 @@ static int dw8250_runtime_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
- if (!IS_ERR(data->clk))
- clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk);
- if (!IS_ERR(data->pclk))
- clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->pclk);
return 0;
}
@@ -617,11 +606,9 @@ static int dw8250_runtime_resume(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
- if (!IS_ERR(data->pclk))
- clk_prepare_enable(data->pclk);
+ clk_prepare_enable(data->pclk);
- if (!IS_ERR(data->clk))
- clk_prepare_enable(data->clk);
+ clk_prepare_enable(data->clk);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 597eb9d16f21..108cd55f9c4d 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -166,6 +166,23 @@ static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud,
serial_port_out(p, 0x2, quot_frac);
}
+static int xr17v35x_startup(struct uart_port *port)
+{
+ /*
+ * First enable access to IER [7:5], ISR [5:4], FCR [5:4],
+ * MCR [7:5] and MSR [7:0]
+ */
+ serial_port_out(port, UART_XR_EFR, UART_EFR_ECB);
+
+ /*
+ * Make sure all interrups are masked until initialization is
+ * complete and the FIFOs are cleared
+ */
+ serial_port_out(port, UART_IER, 0);
+
+ return serial8250_do_startup(port);
+}
+
static void exar_shutdown(struct uart_port *port)
{
unsigned char lsr;
@@ -212,6 +229,8 @@ static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev,
port->port.get_divisor = xr17v35x_get_divisor;
port->port.set_divisor = xr17v35x_set_divisor;
+
+ port->port.startup = xr17v35x_startup;
} else {
port->port.type = PORT_XR17D15X;
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 5f72ef3ea574..60eff3240c8a 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -221,17 +221,6 @@ static void qrk_serial_exit_dma(struct lpss8250 *lpss) {}
static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
{
- struct pci_dev *pdev = to_pci_dev(port->dev);
- int ret;
-
- pci_set_master(pdev);
-
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
- if (ret < 0)
- return ret;
-
- port->irq = pci_irq_vector(pdev, 0);
-
qrk_serial_setup_dma(lpss, port);
return 0;
}
@@ -293,16 +282,22 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
+ pci_set_master(pdev);
+
lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL);
if (!lpss)
return -ENOMEM;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
lpss->board = (struct lpss8250_board *)id->driver_data;
memset(&uart, 0, sizeof(struct uart_8250_port));
uart.port.dev = &pdev->dev;
- uart.port.irq = pdev->irq;
+ uart.port.irq = pci_irq_vector(pdev, 0);
uart.port.private_data = &lpss->data;
uart.port.type = PORT_16550A;
uart.port.iotype = UPIO_MEM;
@@ -337,6 +332,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_exit:
if (lpss->board->exit)
lpss->board->exit(lpss);
+ pci_free_irq_vectors(pdev);
return ret;
}
@@ -348,6 +344,7 @@ static void lpss8250_remove(struct pci_dev *pdev)
if (lpss->board->exit)
lpss->board->exit(lpss);
+ pci_free_irq_vectors(pdev);
}
static const struct lpss8250_board byt_board = {
diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c
index 8df89e9cd254..e985f344b2dd 100644
--- a/drivers/tty/serial/8250/8250_men_mcb.c
+++ b/drivers/tty/serial/8250/8250_men_mcb.c
@@ -174,3 +174,4 @@ MODULE_AUTHOR("Michael Moese <michael.moese@men.de");
MODULE_ALIAS("mcb:16z125");
MODULE_ALIAS("mcb:16z025");
MODULE_ALIAS("mcb:16z057");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index b411ba4eb5e9..4d067f515f74 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -544,7 +544,7 @@ static int mtk8250_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- data->rx_wakeup_irq = platform_get_irq(pdev, 1);
+ data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 0826cfdbd406..92fbf46ce3bd 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -48,6 +48,36 @@ static inline void tegra_serial_handle_break(struct uart_port *port)
}
#endif
+static int of_8250_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ /* Clamp the delays to [0, 100ms] */
+ rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+ rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
+
+ port->rs485 = *rs485;
+
+ /*
+ * Both serial8250_em485_init and serial8250_em485_destroy
+ * are idempotent
+ */
+ if (rs485->flags & SER_RS485_ENABLED) {
+ int ret = serial8250_em485_init(up);
+
+ if (ret) {
+ rs485->flags &= ~SER_RS485_ENABLED;
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
+ return ret;
+ }
+
+ serial8250_em485_destroy(up);
+
+ return 0;
+}
+
/*
* Fill a struct uart_port for a given device node
*/
@@ -178,6 +208,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->flags |= UPF_SKIP_TEST;
port->dev = &ofdev->dev;
+ port->rs485_config = of_8250_rs485_config;
switch (type) {
case PORT_TEGRA:
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 6adbadd6a56a..022924d5ad54 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -48,8 +48,6 @@ struct f815xxa_data {
int idx;
};
-#define PCI_NUM_BAR_RESOURCES 6
-
struct serial_private {
struct pci_dev *dev;
unsigned int nr;
@@ -89,7 +87,7 @@ setup_port(struct serial_private *priv, struct uart_8250_port *port,
{
struct pci_dev *dev = priv->dev;
- if (bar >= PCI_NUM_BAR_RESOURCES)
+ if (bar >= PCI_STD_NUM_BARS)
return -EINVAL;
if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
@@ -745,16 +743,8 @@ static int pci_ni8430_init(struct pci_dev *dev)
}
/* UART Port Control Register */
-#define NI16550_PCR_OFFSET 0x0f
-#define NI16550_PCR_RS422 0x00
-#define NI16550_PCR_ECHO_RS485 0x01
-#define NI16550_PCR_DTR_RS485 0x02
-#define NI16550_PCR_AUTO_RS485 0x03
-#define NI16550_PCR_WIRE_MODE_MASK 0x03
-#define NI16550_PCR_TXVR_ENABLE_BIT BIT(3)
-#define NI16550_PCR_RS485_TERMINATION_BIT BIT(6)
-#define NI16550_ACR_DTR_AUTO_DTR (0x2 << 3)
-#define NI16550_ACR_DTR_MANUAL_DTR (0x0 << 3)
+#define NI8430_PORTCON 0x0f
+#define NI8430_PORTCON_TXVR_ENABLE (1 << 3)
static int
pci_ni8430_setup(struct serial_private *priv,
@@ -776,117 +766,14 @@ pci_ni8430_setup(struct serial_private *priv,
return -ENOMEM;
/* enable the transceiver */
- writeb(readb(p + offset + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
- p + offset + NI16550_PCR_OFFSET);
+ writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
+ p + offset + NI8430_PORTCON);
iounmap(p);
return setup_port(priv, port, bar, offset, board->reg_shift);
}
-static int pci_ni8431_config_rs485(struct uart_port *port,
- struct serial_rs485 *rs485)
-{
- u8 pcr, acr;
- struct uart_8250_port *up;
-
- up = container_of(port, struct uart_8250_port, port);
- acr = up->acr;
- pcr = port->serial_in(port, NI16550_PCR_OFFSET);
- pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
-
- if (rs485->flags & SER_RS485_ENABLED) {
- /* RS-485 */
- if ((rs485->flags & SER_RS485_RX_DURING_TX) &&
- (rs485->flags & SER_RS485_RTS_ON_SEND)) {
- dev_dbg(port->dev, "Invalid 2-wire mode\n");
- return -EINVAL;
- }
-
- if (rs485->flags & SER_RS485_RX_DURING_TX) {
- /* Echo */
- dev_vdbg(port->dev, "2-wire DTR with echo\n");
- pcr |= NI16550_PCR_ECHO_RS485;
- acr |= NI16550_ACR_DTR_MANUAL_DTR;
- } else {
- /* Auto or DTR */
- if (rs485->flags & SER_RS485_RTS_ON_SEND) {
- /* Auto */
- dev_vdbg(port->dev, "2-wire Auto\n");
- pcr |= NI16550_PCR_AUTO_RS485;
- acr |= NI16550_ACR_DTR_AUTO_DTR;
- } else {
- /* DTR-controlled */
- /* No Echo */
- dev_vdbg(port->dev, "2-wire DTR no echo\n");
- pcr |= NI16550_PCR_DTR_RS485;
- acr |= NI16550_ACR_DTR_MANUAL_DTR;
- }
- }
- } else {
- /* RS-422 */
- dev_vdbg(port->dev, "4-wire\n");
- pcr |= NI16550_PCR_RS422;
- acr |= NI16550_ACR_DTR_MANUAL_DTR;
- }
-
- dev_dbg(port->dev, "write pcr: 0x%08x\n", pcr);
- port->serial_out(port, NI16550_PCR_OFFSET, pcr);
-
- up->acr = acr;
- port->serial_out(port, UART_SCR, UART_ACR);
- port->serial_out(port, UART_ICR, up->acr);
-
- /* Update the cache. */
- port->rs485 = *rs485;
-
- return 0;
-}
-
-static int pci_ni8431_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *uart, int idx)
-{
- u8 pcr, acr;
- struct pci_dev *dev = priv->dev;
- void __iomem *addr;
- unsigned int bar, offset = board->first_offset;
-
- if (idx >= board->num_ports)
- return 1;
-
- bar = FL_GET_BASE(board->flags);
- offset += idx * board->uart_offset;
-
- addr = pci_ioremap_bar(dev, bar);
- if (!addr)
- return -ENOMEM;
-
- /* enable the transceiver */
- writeb(readb(addr + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
- addr + NI16550_PCR_OFFSET);
-
- pcr = readb(addr + NI16550_PCR_OFFSET);
- pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
-
- /* set wire mode to default RS-422 */
- pcr |= NI16550_PCR_RS422;
- acr = NI16550_ACR_DTR_MANUAL_DTR;
-
- /* write port configuration to register */
- writeb(pcr, addr + NI16550_PCR_OFFSET);
-
- /* access and write to UART acr register */
- writeb(UART_ACR, addr + UART_SCR);
- writeb(acr, addr + UART_ICR);
-
- uart->port.rs485_config = &pci_ni8431_config_rs485;
-
- iounmap(addr);
-
- return setup_port(priv, uart, bar, offset, board->reg_shift);
-}
-
static int pci_netmos_9900_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -2023,15 +1910,6 @@ pci_moxa_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
-#define PCIE_DEVICE_ID_NI_PXIE8430_2328 0x74C2
-#define PCIE_DEVICE_ID_NI_PXIE8430_23216 0x74C1
-#define PCI_DEVICE_ID_NI_PXI8431_4852 0x7081
-#define PCI_DEVICE_ID_NI_PXI8431_4854 0x70DE
-#define PCI_DEVICE_ID_NI_PXI8431_4858 0x70E3
-#define PCI_DEVICE_ID_NI_PXI8433_4852 0x70E9
-#define PCI_DEVICE_ID_NI_PXI8433_4854 0x70ED
-#define PCIE_DEVICE_ID_NI_PXIE8431_4858 0x74C4
-#define PCIE_DEVICE_ID_NI_PXIE8431_48516 0x74C3
#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
@@ -2269,87 +2147,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_ni8430_setup,
.exit = pci_ni8430_exit,
},
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8430_2328,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8430_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8430_23216,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8430_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8431_4852,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8431_4854,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8431_4858,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8433_4852,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8433_4854,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8431_4858,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8431_48516,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
/* Quatech */
{
.vendor = PCI_VENDOR_ID_QUATECH,
@@ -3106,13 +2903,6 @@ enum pci_board_num_t {
pbn_ni8430_4,
pbn_ni8430_8,
pbn_ni8430_16,
- pbn_ni8430_pxie_8,
- pbn_ni8430_pxie_16,
- pbn_ni8431_2,
- pbn_ni8431_4,
- pbn_ni8431_8,
- pbn_ni8431_pxie_8,
- pbn_ni8431_pxie_16,
pbn_ADDIDATA_PCIe_1_3906250,
pbn_ADDIDATA_PCIe_2_3906250,
pbn_ADDIDATA_PCIe_4_3906250,
@@ -3765,55 +3555,6 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 0x10,
.first_offset = 0x800,
},
- [pbn_ni8430_pxie_16] = {
- .flags = FL_BASE0,
- .num_ports = 16,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8430_pxie_8] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_8] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 3686400,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_4] = {
- .flags = FL_BASE0,
- .num_ports = 4,
- .base_baud = 3686400,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_2] = {
- .flags = FL_BASE0,
- .num_ports = 2,
- .base_baud = 3686400,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_pxie_16] = {
- .flags = FL_BASE0,
- .num_ports = 16,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_pxie_8] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
/*
* ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com>
*/
@@ -4060,7 +3801,7 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
return -ENODEV;
num_iomem = num_port = 0;
- for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
num_port++;
if (first_port == -1)
@@ -4088,7 +3829,7 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
*/
first_port = -1;
num_port = 0;
- for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_flags(dev, i) & IORESOURCE_IO &&
pci_resource_len(dev, i) == 8 &&
(first_port == -1 || (first_port + num_port) == i)) {
@@ -5567,33 +5308,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_ni8430_4 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_2328,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8430_pxie_8 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_23216,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8430_pxie_16 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4852,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_2 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4854,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_4 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4858,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_8 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_4858,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_pxie_8 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_48516,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_pxie_16 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4852,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_2 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4854,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_4 },
/*
* MOXA
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 8407166610ce..90655910b0c7 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2114,20 +2114,6 @@ int serial8250_do_startup(struct uart_port *port)
enable_rsa(up);
#endif
- if (port->type == PORT_XR17V35X) {
- /*
- * First enable access to IER [7:5], ISR [5:4], FCR [5:4],
- * MCR [7:5] and MSR [7:0]
- */
- serial_port_out(port, UART_XR_EFR, UART_EFR_ECB);
-
- /*
- * Make sure all interrups are masked until initialization is
- * complete and the FIFOs are cleared
- */
- serial_port_out(port, UART_IER, 0);
- }
-
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 7ef60f8b6e2c..fab3d4f20667 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -243,6 +243,7 @@ config SERIAL_8250_ASPEED_VUART
tristate "Aspeed Virtual UART"
depends on SERIAL_8250
depends on OF
+ depends on REGMAP && MFD_SYSCON
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
@@ -334,7 +335,7 @@ config SERIAL_8250_BCM2835AUX
Features and limitations of the UART are
Registers are similar to 16650 registers,
- set bits in the control registers that are unsupported
+ set bits in the control registers that are unsupported
are ignored and read back as 0
7/8 bit operation with 1 start and 1 stop bit
8 symbols deep fifo for rx and tx
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 67a9eb3f94ce..99f5da3bf913 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -88,7 +88,7 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
config SERIAL_EARLYCON_RISCV_SBI
bool "Early console using RISC-V SBI"
- depends on RISCV
+ depends on RISCV_SBI
select SERIAL_CORE
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
@@ -287,26 +287,26 @@ config SERIAL_SAMSUNG_CONSOLE
boot time.)
config SERIAL_SIRFSOC
- tristate "SiRF SoC Platform Serial port support"
- depends on ARCH_SIRF
- select SERIAL_CORE
- help
- Support for the on-chip UART on the CSR SiRFprimaII series,
- providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
- provide all of these ports, depending on how the serial port
- pins are configured).
+ tristate "SiRF SoC Platform Serial port support"
+ depends on ARCH_SIRF
+ select SERIAL_CORE
+ help
+ Support for the on-chip UART on the CSR SiRFprimaII series,
+ providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
+ provide all of these ports, depending on how the serial port
+ pins are configured).
config SERIAL_SIRFSOC_CONSOLE
- bool "Support for console on SiRF SoC serial port"
- depends on SERIAL_SIRFSOC=y
- select SERIAL_CORE_CONSOLE
- help
- Even if you say Y here, the currently visible virtual console
- (/dev/tty0) will still be used as the system console by default, but
- you can alter that using a kernel command line option such as
- "console=ttySiRFx". (Try "man bootparam" or see the documentation of
- your boot loader about how to pass options to the kernel at
- boot time.)
+ bool "Support for console on SiRF SoC serial port"
+ depends on SERIAL_SIRFSOC=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySiRFx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
config SERIAL_TEGRA
tristate "NVIDIA Tegra20/30 SoC serial controller"
@@ -1078,41 +1078,41 @@ config SERIAL_SCCNXP_CONSOLE
Support for console on SCCNXP serial ports.
config SERIAL_SC16IS7XX_CORE
- tristate
+ tristate
config SERIAL_SC16IS7XX
- tristate "SC16IS7xx serial support"
- select SERIAL_CORE
- depends on (SPI_MASTER && !I2C) || I2C
- help
- This selects support for SC16IS7xx serial ports.
- Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
- SC16IS760 and SC16IS762. Select supported buses using options below.
+ tristate "SC16IS7xx serial support"
+ select SERIAL_CORE
+ depends on (SPI_MASTER && !I2C) || I2C
+ help
+ This selects support for SC16IS7xx serial ports.
+ Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
+ SC16IS760 and SC16IS762. Select supported buses using options below.
config SERIAL_SC16IS7XX_I2C
- bool "SC16IS7xx for I2C interface"
- depends on SERIAL_SC16IS7XX
- depends on I2C
- select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
- select REGMAP_I2C if I2C
- default y
- help
- Enable SC16IS7xx driver on I2C bus,
- If required say y, and say n to i2c if not required,
- Enabled by default to support oldconfig.
- You must select at least one bus for the driver to be built.
+ bool "SC16IS7xx for I2C interface"
+ depends on SERIAL_SC16IS7XX
+ depends on I2C
+ select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
+ select REGMAP_I2C if I2C
+ default y
+ help
+ Enable SC16IS7xx driver on I2C bus,
+ If required say y, and say n to i2c if not required,
+ Enabled by default to support oldconfig.
+ You must select at least one bus for the driver to be built.
config SERIAL_SC16IS7XX_SPI
- bool "SC16IS7xx for spi interface"
- depends on SERIAL_SC16IS7XX
- depends on SPI_MASTER
- select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
- select REGMAP_SPI if SPI_MASTER
- help
- Enable SC16IS7xx driver on SPI bus,
- If required say y, and say n to spi if not required,
- This is additional support to exsisting driver.
- You must select at least one bus for the driver to be built.
+ bool "SC16IS7xx for spi interface"
+ depends on SERIAL_SC16IS7XX
+ depends on SPI_MASTER
+ select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
+ select REGMAP_SPI if SPI_MASTER
+ help
+ Enable SC16IS7xx driver on SPI bus,
+ If required say y, and say n to spi if not required,
+ This is additional support to exsisting driver.
+ You must select at least one bus for the driver to be built.
config SERIAL_TIMBERDALE
tristate "Support for timberdale UART"
@@ -1212,7 +1212,7 @@ config SERIAL_ALTERA_UART_CONSOLE
Enable a Altera UART port to be the system console.
config SERIAL_IFX6X60
- tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
+ tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
depends on GPIOLIB || COMPILE_TEST
depends on SPI && HAS_DMA
help
@@ -1392,19 +1392,19 @@ config SERIAL_FSL_LPUART_CONSOLE
you can make it the console by answering Y to this option.
config SERIAL_FSL_LINFLEXUART
- tristate "Freescale linflexuart serial port support"
+ tristate "Freescale LINFlexD UART serial port support"
depends on PRINTK
select SERIAL_CORE
help
- Support for the on-chip linflexuart on some Freescale SOCs.
+ Support for the on-chip LINFlexD UART on some Freescale SOCs.
config SERIAL_FSL_LINFLEXUART_CONSOLE
- bool "Console on Freescale linflexuart serial port"
+ bool "Console on Freescale LINFlexD UART serial port"
depends on SERIAL_FSL_LINFLEXUART=y
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
- If you have enabled the linflexuart serial port on the Freescale
+ If you have enabled the LINFlexD UART serial port on the Freescale
SoCs, you can make it the console by answering Y to this option.
config SERIAL_CONEXANT_DIGICOLOR
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 863f47056539..d056ee6cca33 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o
obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
-obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
+obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX310X) += max310x.o
obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 3a7d1a66f79c..4b28134d596a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -414,7 +414,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
dma_cap_mask_t mask;
uap->dma_probed = true;
- chan = dma_request_slave_channel_reason(dev, "tx");
+ chan = dma_request_chan(dev, "tx");
if (IS_ERR(chan)) {
if (PTR_ERR(chan) == -EPROBE_DEFER) {
uap->dma_probed = false;
@@ -813,10 +813,8 @@ __acquires(&uap->port.lock)
if (!uap->using_tx_dma)
return;
- /* Avoid deadlock with the DMA engine callback */
- spin_unlock(&uap->port.lock);
- dmaengine_terminate_all(uap->dmatx.chan);
- spin_lock(&uap->port.lock);
+ dmaengine_terminate_async(uap->dmatx.chan);
+
if (uap->dmatx.queued) {
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
DMA_TO_DEVICE);
@@ -1236,10 +1234,6 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#else
/* Blank functions if the DMA engine is not available */
-static inline void pl011_dma_probe(struct uart_amba_port *uap)
-{
-}
-
static inline void pl011_dma_remove(struct uart_amba_port *uap)
{
}
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index a32f0d2afd59..205c31a61684 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Freescale linflexuart serial port driver
+ * Freescale LINFlexD UART serial port driver
*
* Copyright 2012-2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
@@ -940,5 +940,5 @@ static void __exit linflex_serial_exit(void)
module_init(linflex_serial_init);
module_exit(linflex_serial_exit);
-MODULE_DESCRIPTION("Freescale linflex serial port driver");
+MODULE_DESCRIPTION("Freescale LINFlexD serial port driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 537896c4d887..4e128d19e0ad 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -437,8 +437,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
}
sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl,
- sport->dma_tx_nents,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ ret, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
if (!sport->dma_tx_desc) {
dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
dev_err(dev, "Cannot prepare TX slave DMA!\n");
@@ -1280,6 +1280,57 @@ static int lpuart_config_rs485(struct uart_port *port,
return 0;
}
+static int lpuart32_config_rs485(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+
+ unsigned long modem = lpuart32_read(&sport->port, UARTMODIR)
+ & ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
+ lpuart32_write(&sport->port, modem, UARTMODIR);
+
+ /* clear unsupported configurations */
+ rs485->delay_rts_before_send = 0;
+ rs485->delay_rts_after_send = 0;
+ rs485->flags &= ~SER_RS485_RX_DURING_TX;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ /* Enable auto RS-485 RTS mode */
+ modem |= UARTMODEM_TXRTSE;
+
+ /*
+ * RTS needs to be logic HIGH either during transer _or_ after
+ * transfer, other variants are not supported by the hardware.
+ */
+
+ if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND)))
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+
+ if (rs485->flags & SER_RS485_RTS_ON_SEND &&
+ rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+
+ /*
+ * The hardware defaults to RTS logic HIGH while transfer.
+ * Switch polarity in case RTS shall be logic HIGH
+ * after transfer.
+ * Note: UART is assumed to be active high.
+ */
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem |= UARTMODEM_TXRTSPOL;
+ }
+
+ /* Store the new configuration */
+ sport->port.rs485 = *rs485;
+
+ lpuart32_write(&sport->port, modem, UARTMODIR);
+ return 0;
+}
+
static unsigned int lpuart_get_mctrl(struct uart_port *port)
{
unsigned int temp = 0;
@@ -1333,18 +1384,7 @@ static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- unsigned long temp;
-
- temp = lpuart32_read(port, UARTMODIR) &
- ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
-
- if (mctrl & TIOCM_RTS)
- temp |= UARTMODIR_RXRTSE;
-
- if (mctrl & TIOCM_CTS)
- temp |= UARTMODIR_TXCTSE;
- lpuart32_write(port, temp, UARTMODIR);
}
static void lpuart_break_ctl(struct uart_port *port, int break_state)
@@ -1889,11 +1929,18 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
ctrl |= UARTCTRL_M;
}
+ /*
+ * When auto RS-485 RTS mode is enabled,
+ * hardware flow control need to be disabled.
+ */
+ if (sport->port.rs485.flags & SER_RS485_ENABLED)
+ termios->c_cflag &= ~CRTSCTS;
+
if (termios->c_cflag & CRTSCTS) {
- modem |= UARTMODEM_RXRTSE | UARTMODEM_TXCTSE;
+ modem |= (UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
} else {
termios->c_cflag &= ~CRTSCTS;
- modem &= ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
+ modem &= ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
}
if (termios->c_cflag & CSTOPB)
@@ -2416,7 +2463,10 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.ops = &lpuart_pops;
sport->port.flags = UPF_BOOT_AUTOCONF;
- sport->port.rs485_config = lpuart_config_rs485;
+ if (lpuart_is_32(sport))
+ sport->port.rs485_config = lpuart32_config_rs485;
+ else
+ sport->port.rs485_config = lpuart_config_rs485;
sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->ipg_clk)) {
@@ -2470,7 +2520,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485.delay_rts_after_send)
dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
- lpuart_config_rs485(&sport->port, &sport->port.rs485);
+ sport->port.rs485_config(&sport->port, &sport->port.rs485);
sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
if (!sport->dma_tx_chan)
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index ffefd218761e..31033d517e82 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1230,6 +1230,9 @@ static int ifx_spi_spi_remove(struct spi_device *spi)
struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
/* stop activity */
tasklet_kill(&ifx_dev->io_work_tasklet);
+
+ pm_runtime_disable(&spi->dev);
+
/* free irq */
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 5e08f2657b90..a9e20e6c63ad 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -619,7 +619,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
dev_err(dev, "DMA mapping error for TX.\n");
return;
}
- desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
+ desc = dmaengine_prep_slave_sg(chan, sgl, ret,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!desc) {
dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
@@ -1034,8 +1034,6 @@ static void imx_uart_timeout(struct timer_list *t)
}
}
-#define RX_BUF_SIZE (PAGE_SIZE)
-
/*
* There are two kinds of RX DMA interrupts(such as in the MX6Q):
* [1] the RX DMA buffer is full.
@@ -1118,7 +1116,8 @@ static void imx_uart_dma_rx_callback(void *data)
}
/* RX DMA buffer periods */
-#define RX_DMA_PERIODS 4
+#define RX_DMA_PERIODS 16
+#define RX_BUF_SIZE (RX_DMA_PERIODS * PAGE_SIZE / 4)
static int imx_uart_start_rx_dma(struct imx_port *sport)
{
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index e5d3ebab6dae..4f53a4caabf6 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -930,3 +930,4 @@ MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MEN 16z135 High Speed UART");
MODULE_ALIAS("mcb:16z135");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 3657a24913fc..1cbae0768b1f 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -301,7 +301,7 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
dma = &msm_port->tx_dma;
/* allocate DMA resources, if available */
- dma->chan = dma_request_slave_channel_reason(dev, "tx");
+ dma->chan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan))
goto no_tx;
@@ -344,7 +344,7 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
dma = &msm_port->rx_dma;
/* allocate DMA resources, if available */
- dma->chan = dma_request_slave_channel_reason(dev, "rx");
+ dma->chan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->chan))
goto no_rx;
@@ -980,6 +980,7 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int mr;
/* reset everything */
msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
@@ -987,7 +988,10 @@ static void msm_reset(struct uart_port *port)
msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ mr = msm_read(port, UART_MR1);
+ mr &= ~UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
/* Disable DM modes */
if (msm_port->is_uartdm)
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 6157213a8359..c16234bca78f 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -233,6 +233,7 @@ struct eg20t_port {
struct dma_chan *chan_rx;
struct scatterlist *sg_tx_p;
int nent;
+ int orig_nent;
struct scatterlist sg_rx;
int tx_dma_use;
void *rx_buf_virt;
@@ -787,9 +788,10 @@ static void pch_dma_tx_complete(void *arg)
}
xmit->tail &= UART_XMIT_SIZE - 1;
async_tx_ack(priv->desc_tx);
- dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE);
+ dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
priv->tx_dma_use = 0;
priv->nent = 0;
+ priv->orig_nent = 0;
kfree(priv->sg_tx_p);
pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
}
@@ -1010,6 +1012,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__);
return 0;
}
+ priv->orig_nent = num;
priv->nent = nent;
for (i = 0; i < nent; i++, sg++) {
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 14c6306bc462..ff63728a95f4 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -9,10 +9,12 @@
#include <linux/console.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/qcom-geni-se.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
@@ -115,6 +117,7 @@ struct qcom_geni_serial_port {
bool brk;
unsigned int tx_remaining;
+ int wakeup_irq;
};
static const struct uart_ops qcom_geni_console_pops;
@@ -754,6 +757,15 @@ out_write_wakeup:
uart_write_wakeup(uport);
}
+static irqreturn_t qcom_geni_serial_wakeup_isr(int isr, void *dev)
+{
+ struct uart_port *uport = dev;
+
+ pm_wakeup_event(uport->dev, 2000);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
{
u32 m_irq_en;
@@ -830,7 +842,7 @@ static void qcom_geni_serial_shutdown(struct uart_port *uport)
if (uart_console(uport))
console_stop(uport->cons);
- free_irq(uport->irq, uport);
+ disable_irq(uport->irq);
spin_lock_irqsave(&uport->lock, flags);
qcom_geni_serial_stop_tx(uport);
qcom_geni_serial_stop_rx(uport);
@@ -890,21 +902,14 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
int ret;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- scnprintf(port->name, sizeof(port->name),
- "qcom_serial_%s%d",
- (uart_console(uport) ? "console" : "uart"), uport->line);
-
if (!port->setup) {
ret = qcom_geni_serial_port_setup(uport);
if (ret)
return ret;
}
+ enable_irq(uport->irq);
- ret = request_irq(uport->irq, qcom_geni_serial_isr, IRQF_TRIGGER_HIGH,
- port->name, uport);
- if (ret)
- dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
- return ret;
+ return 0;
}
static unsigned long get_clk_cfg(unsigned long clk_freq)
@@ -1297,11 +1302,44 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
+ scnprintf(port->name, sizeof(port->name), "qcom_geni_serial_%s%d",
+ (uart_console(uport) ? "console" : "uart"), uport->line);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
uport->irq = irq;
+ irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
+ IRQF_TRIGGER_HIGH, port->name, uport);
+ if (ret) {
+ dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+ return ret;
+ }
+
+ if (!console) {
+ port->wakeup_irq = platform_get_irq(pdev, 1);
+ if (port->wakeup_irq < 0) {
+ dev_err(&pdev->dev, "Failed to get wakeup IRQ %d\n",
+ port->wakeup_irq);
+ } else {
+ irq_set_status_flags(port->wakeup_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(uport->dev, port->wakeup_irq,
+ qcom_geni_serial_wakeup_isr,
+ IRQF_TRIGGER_FALLING, "uart_wakeup", uport);
+ if (ret) {
+ dev_err(uport->dev, "Failed to register wakeup IRQ ret %d\n",
+ ret);
+ return ret;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, port->wakeup_irq);
+ if (unlikely(ret))
+ dev_err(uport->dev, "%s:Failed to set IRQ wake:%d\n",
+ __func__, ret);
+ }
+ }
uport->private_data = drv;
platform_set_drvdata(pdev, port);
port->handle_rx = console ? handle_rx_console : handle_rx_uart;
@@ -1324,7 +1362,12 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
- return uart_suspend_port(uport->private_data, uport);
+ uart_suspend_port(uport->private_data, uport);
+
+ if (port->wakeup_irq > 0)
+ enable_irq(port->wakeup_irq);
+
+ return 0;
}
static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
@@ -1332,6 +1375,9 @@ static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
+ if (port->wakeup_irq > 0)
+ disable_irq(port->wakeup_irq);
+
return uart_resume_port(uport->private_data, uport);
}
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung_tty.c
index 83fd51607741..83fd51607741 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung_tty.c
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 2f599515c133..b6ace6290e23 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1122,8 +1122,7 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
int ret;
struct dma_slave_config dma_sconfig;
- dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
- dma_to_memory ? "rx" : "tx");
+ dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
dev_err(tup->uport.dev,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index c4a414a46c7f..b0a6eb106edb 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1111,7 +1111,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
if (!uport)
goto out;
- if (uport->type != PORT_UNKNOWN)
+ if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl)
uport->ops->break_ctl(uport, break_state);
ret = 0;
out:
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 22e5d4e13714..58bf9d496ba5 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -54,6 +54,7 @@
#ifdef CONFIG_SUPERH
#include <asm/sh_bios.h>
+#include <asm/platform_early.h>
#endif
#include "serial_mctrl_gpio.h"
@@ -3090,6 +3091,7 @@ static struct console serial_console = {
.data = &sci_uart_driver,
};
+#ifdef CONFIG_SUPERH
static struct console early_serial_console = {
.name = "early_ttySC",
.write = serial_console_write,
@@ -3118,6 +3120,7 @@ static int sci_probe_earlyprintk(struct platform_device *pdev)
register_console(&early_serial_console);
return 0;
}
+#endif
#define SCI_CONSOLE (&serial_console)
@@ -3318,8 +3321,10 @@ static int sci_probe(struct platform_device *dev)
* the special early probe. We don't have sufficient device state
* to make it beyond this yet.
*/
- if (is_early_platform_device(dev))
+#ifdef CONFIG_SUPERH
+ if (is_sh_early_platform_device(dev))
return sci_probe_earlyprintk(dev);
+#endif
if (dev->dev.of_node) {
p = sci_parse_dt(dev, &dev_id);
@@ -3414,8 +3419,8 @@ static void __exit sci_exit(void)
uart_unregister_driver(&sci_uart_driver);
}
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
-early_platform_init_buffer("earlyprintk", &sci_driver,
+#if defined(CONFIG_SUPERH) && defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
+sh_early_platform_init_buffer("earlyprintk", &sci_driver,
early_serial_buf, ARRAY_SIZE(early_serial_buf));
#endif
#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 004ca684d3ae..637b09d3fe79 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -120,7 +120,8 @@ static u32 uart_usp_ff_empty_mask(struct uart_port *port)
empty_bit = ilog2(port->fifosize) + 1;
return (1 << empty_bit);
}
-struct sirfsoc_uart_register sirfsoc_usp = {
+
+static struct sirfsoc_uart_register sirfsoc_usp = {
.uart_reg = {
.sirfsoc_mode1 = 0x0000,
.sirfsoc_mode2 = 0x0004,
@@ -186,7 +187,7 @@ struct sirfsoc_uart_register sirfsoc_usp = {
},
};
-struct sirfsoc_uart_register sirfsoc_uart = {
+static struct sirfsoc_uart_register sirfsoc_uart = {
.uart_reg = {
.sirfsoc_line_ctrl = 0x0040,
.sirfsoc_tx_rx_en = 0x004c,
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 771d11196523..31df23502562 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -919,6 +919,34 @@ static void sprd_pm(struct uart_port *port, unsigned int state,
}
}
+#ifdef CONFIG_CONSOLE_POLL
+static int sprd_poll_init(struct uart_port *port)
+{
+ if (port->state->pm_state != UART_PM_STATE_ON) {
+ sprd_pm(port, UART_PM_STATE_ON, 0);
+ port->state->pm_state = UART_PM_STATE_ON;
+ }
+
+ return 0;
+}
+
+static int sprd_poll_get_char(struct uart_port *port)
+{
+ while (!(serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK))
+ cpu_relax();
+
+ return serial_in(port, SPRD_RXD);
+}
+
+static void sprd_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ while (serial_in(port, SPRD_STS1) & SPRD_TX_FIFO_CNT_MASK)
+ cpu_relax();
+
+ serial_out(port, SPRD_TXD, ch);
+}
+#endif
+
static const struct uart_ops serial_sprd_ops = {
.tx_empty = sprd_tx_empty,
.get_mctrl = sprd_get_mctrl,
@@ -936,6 +964,11 @@ static const struct uart_ops serial_sprd_ops = {
.config_port = sprd_config_port,
.verify_port = sprd_verify_port,
.pm = sprd_pm,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = sprd_poll_init,
+ .poll_get_char = sprd_poll_get_char,
+ .poll_put_char = sprd_poll_put_char,
+#endif
};
#ifdef CONFIG_SERIAL_SPRD_CONSOLE
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index df90747ee3a8..2f72514d63ed 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -240,8 +240,8 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
* cleared by the sequence [read SR - read DR].
*/
if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
- stm32_clr_bits(port, ofs->icr, USART_ICR_ORECF |
- USART_ICR_PECF | USART_ICR_FECF);
+ writel_relaxed(sr & USART_SR_ERR_MASK,
+ port->membase + ofs->icr);
c = stm32_get_char(port, &sr, &stm32_port->last_res);
port->icount.rx++;
@@ -435,7 +435,7 @@ static void stm32_transmit_chars(struct uart_port *port)
if (ofs->icr == UNDEF_REG)
stm32_clr_bits(port, ofs->isr, USART_SR_TC);
else
- stm32_set_bits(port, ofs->icr, USART_ICR_TCCF);
+ writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
if (stm32_port->tx_ch)
stm32_transmit_chars_dma(port);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 06e79c11141d..7dbd0c471d92 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -22,7 +22,6 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
@@ -55,7 +54,6 @@
#define ULITE_CONTROL_RST_TX 0x01
#define ULITE_CONTROL_RST_RX 0x02
#define ULITE_CONTROL_IE 0x10
-#define UART_AUTOSUSPEND_TIMEOUT 3000
/* Static pointer to console port */
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
@@ -65,7 +63,6 @@ static struct uart_port *console_port;
struct uartlite_data {
const struct uartlite_reg_ops *reg_ops;
struct clk *clk;
- struct uart_driver *ulite_uart_driver;
};
struct uartlite_reg_ops {
@@ -393,12 +390,12 @@ static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
static void ulite_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- if (!state) {
- pm_runtime_get_sync(port->dev);
- } else {
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
- }
+ struct uartlite_data *pdata = port->private_data;
+
+ if (!state)
+ clk_enable(pdata->clk);
+ else
+ clk_disable(pdata->clk);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -697,9 +694,7 @@ static int ulite_release(struct device *dev)
int rc = 0;
if (port) {
- struct uartlite_data *pdata = port->private_data;
-
- rc = uart_remove_one_port(pdata->ulite_uart_driver, port);
+ rc = uart_remove_one_port(&ulite_uart_driver, port);
dev_set_drvdata(dev, NULL);
port->mapbase = 0;
}
@@ -717,11 +712,8 @@ static int __maybe_unused ulite_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
- if (port) {
- struct uartlite_data *pdata = port->private_data;
-
- uart_suspend_port(pdata->ulite_uart_driver, port);
- }
+ if (port)
+ uart_suspend_port(&ulite_uart_driver, port);
return 0;
}
@@ -736,41 +728,17 @@ static int __maybe_unused ulite_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
- if (port) {
- struct uartlite_data *pdata = port->private_data;
-
- uart_resume_port(pdata->ulite_uart_driver, port);
- }
+ if (port)
+ uart_resume_port(&ulite_uart_driver, port);
return 0;
}
-static int __maybe_unused ulite_runtime_suspend(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct uartlite_data *pdata = port->private_data;
-
- clk_disable(pdata->clk);
- return 0;
-};
-
-static int __maybe_unused ulite_runtime_resume(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct uartlite_data *pdata = port->private_data;
-
- clk_enable(pdata->clk);
- return 0;
-}
/* ---------------------------------------------------------------------
* Platform bus binding
*/
-static const struct dev_pm_ops ulite_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ulite_suspend, ulite_resume)
- SET_RUNTIME_PM_OPS(ulite_runtime_suspend,
- ulite_runtime_resume, NULL)
-};
+static SIMPLE_DEV_PM_OPS(ulite_pm_ops, ulite_suspend, ulite_resume);
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
@@ -795,22 +763,6 @@ static int ulite_probe(struct platform_device *pdev)
if (prop)
id = be32_to_cpup(prop);
#endif
- if (id < 0) {
- /* Look for a serialN alias */
- id = of_alias_get_id(pdev->dev.of_node, "serial");
- if (id < 0)
- id = 0;
- }
-
- if (!ulite_uart_driver.state) {
- dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
- ret = uart_register_driver(&ulite_uart_driver);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register driver\n");
- return ret;
- }
- }
-
pdata = devm_kzalloc(&pdev->dev, sizeof(struct uartlite_data),
GFP_KERNEL);
if (!pdata)
@@ -836,22 +788,24 @@ static int ulite_probe(struct platform_device *pdev)
pdata->clk = NULL;
}
- pdata->ulite_uart_driver = &ulite_uart_driver;
ret = clk_prepare_enable(pdata->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare clock\n");
return ret;
}
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ if (!ulite_uart_driver.state) {
+ dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
+ ret = uart_register_driver(&ulite_uart_driver);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register driver\n");
+ return ret;
+ }
+ }
ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
+ clk_disable(pdata->clk);
return ret;
}
@@ -860,14 +814,9 @@ static int ulite_remove(struct platform_device *pdev)
{
struct uart_port *port = dev_get_drvdata(&pdev->dev);
struct uartlite_data *pdata = port->private_data;
- int rc;
- clk_unprepare(pdata->clk);
- rc = ulite_release(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- return rc;
+ clk_disable_unprepare(pdata->clk);
+ return ulite_release(&pdev->dev);
}
/* work with hotplug and coldplug */
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 802c1210558f..d9f54c7d94f2 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -87,6 +87,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/poll.h>
+#include <linux/ppp-ioctl.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -1344,9 +1345,12 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
if (!tty->port)
tty->port = driver->ports[idx];
- WARN_RATELIMIT(!tty->port,
- "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
- __func__, tty->driver->name);
+ if (WARN_RATELIMIT(!tty->port,
+ "%s: %s driver does not set tty->port. This would crash the kernel. Fix the driver!\n",
+ __func__, tty->driver->name)) {
+ retval = -EINVAL;
+ goto err_release_lock;
+ }
retval = tty_ldisc_lock(tty, 5 * HZ);
if (retval)
@@ -1924,7 +1928,6 @@ EXPORT_SYMBOL_GPL(tty_kopen);
/**
* tty_open_by_driver - open a tty device
* @device: dev_t of device to open
- * @inode: inode of device file
* @filp: file pointer to tty
*
* Performs the driver lookup, checks for a reopen, or otherwise
@@ -1937,7 +1940,7 @@ EXPORT_SYMBOL_GPL(tty_kopen);
* - concurrent tty driver removal w/ lookup
* - concurrent tty removal from driver table
*/
-static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
+static struct tty_struct *tty_open_by_driver(dev_t device,
struct file *filp)
{
struct tty_struct *tty;
@@ -2029,7 +2032,7 @@ retry_open:
tty = tty_open_current_tty(device, filp);
if (!tty)
- tty = tty_open_by_driver(device, inode, filp);
+ tty = tty_open_by_driver(device, filp);
if (IS_ERR(tty)) {
tty_free_file(filp);
@@ -2755,6 +2758,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
int retval = -ENOIOCTLCMD;
switch (cmd) {
+ case TIOCOUTQ:
case TIOCSTI:
case TIOCGWINSZ:
case TIOCSWINSZ:
@@ -2810,6 +2814,9 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
#endif
case TIOCGSOFTCAR:
case TIOCSSOFTCAR:
+
+ case PPPIOCGCHAN:
+ case PPPIOCGUNIT:
return tty_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
case TIOCCONS:
case TIOCEXCL:
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 4c49f53afa3e..ec1f6a48121e 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -156,12 +156,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
* takes tty_ldiscs_lock to guard against ldisc races
*/
-#if defined(CONFIG_LDISC_AUTOLOAD)
- #define INITIAL_AUTOLOAD_STATE 1
-#else
- #define INITIAL_AUTOLOAD_STATE 0
-#endif
-static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
+static int tty_ldisc_autoload = IS_BUILTIN(CONFIG_LDISC_AUTOLOAD);
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 515fc095e3b4..15d33fa0c925 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1491,7 +1491,7 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type,
if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev))
kbd_rawcode(value);
- if (event_type == EV_KEY)
+ if (event_type == EV_KEY && event_code <= KEY_MAX)
kbd_keycode(event_code, value, HW_RAW(handle->dev));
spin_unlock(&kbd_event_lock);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 1f042346e722..778f83ea2249 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -456,6 +456,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
size_t ret;
char *con_buf;
+ if (use_unicode(inode))
+ return -EOPNOTSUPP;
+
con_buf = (char *) __get_free_page(GFP_KERNEL);
if (!con_buf)
return -ENOMEM;
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index ebcf1434e296..81c88f7bbbcb 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -151,8 +151,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
int i;
if (pdev->dev.of_node) {
- int irq;
-
/* alloc uioinfo for one device */
uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
if (!uioinfo) {
@@ -163,13 +161,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
pdev->dev.of_node);
uioinfo->version = "devicetree";
-
- /* Multiple IRQs are not supported */
- irq = platform_get_irq(pdev, 0);
- if (irq == -ENXIO)
- uioinfo->irq = UIO_IRQ_NONE;
- else
- uioinfo->irq = irq;
}
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
@@ -199,8 +190,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
mutex_init(&priv->alloc_lock);
if (!uioinfo->irq) {
+ /* Multiple IRQs are not supported */
ret = platform_get_irq(pdev, 0);
- if (ret < 0)
+ if (ret == -ENXIO && pdev->dev.of_node)
+ ret = UIO_IRQ_NONE;
+ else if (ret < 0)
goto bad1;
uioinfo->irq = ret;
}
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index d0331613a355..2a1e89d12ed9 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -43,4 +43,14 @@ config USB_CDNS3_PCI_WRAP
If you choose to build this driver as module it will
be dynamically linked and module will be called cdns3-pci.ko
+config USB_CDNS3_TI
+ tristate "Cadence USB3 support on TI platforms"
+ depends on ARCH_K3 || COMPILE_TEST
+ default USB_CDNS3
+ help
+ Say 'Y' or 'M' here if you are building for Texas Instruments
+ platforms that contain Cadence USB3 controller core.
+
+ e.g. J721e.
+
endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index a703547350bb..948e6b88d1a9 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -14,3 +14,4 @@ endif
cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
+obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
new file mode 100644
index 000000000000..c6a79ca15858
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * cdns3-ti.c - TI specific Glue layer for Cadence USB Controller
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+/* USB Wrapper register offsets */
+#define USBSS_PID 0x0
+#define USBSS_W1 0x4
+#define USBSS_STATIC_CONFIG 0x8
+#define USBSS_PHY_TEST 0xc
+#define USBSS_DEBUG_CTRL 0x10
+#define USBSS_DEBUG_INFO 0x14
+#define USBSS_DEBUG_LINK_STATE 0x18
+#define USBSS_DEVICE_CTRL 0x1c
+
+/* Wrapper 1 register bits */
+#define USBSS_W1_PWRUP_RST BIT(0)
+#define USBSS_W1_OVERCURRENT_SEL BIT(8)
+#define USBSS_W1_MODESTRAP_SEL BIT(9)
+#define USBSS_W1_OVERCURRENT BIT(16)
+#define USBSS_W1_MODESTRAP_MASK GENMASK(18, 17)
+#define USBSS_W1_MODESTRAP_SHIFT 17
+#define USBSS_W1_USB2_ONLY BIT(19)
+
+/* Static config register bits */
+#define USBSS1_STATIC_PLL_REF_SEL_MASK GENMASK(8, 5)
+#define USBSS1_STATIC_PLL_REF_SEL_SHIFT 5
+#define USBSS1_STATIC_LOOPBACK_MODE_MASK GENMASK(4, 3)
+#define USBSS1_STATIC_LOOPBACK_MODE_SHIFT 3
+#define USBSS1_STATIC_VBUS_SEL_MASK GENMASK(2, 1)
+#define USBSS1_STATIC_VBUS_SEL_SHIFT 1
+#define USBSS1_STATIC_LANE_REVERSE BIT(0)
+
+/* Modestrap modes */
+enum modestrap_mode { USBSS_MODESTRAP_MODE_NONE,
+ USBSS_MODESTRAP_MODE_HOST,
+ USBSS_MODESTRAP_MODE_PERIPHERAL};
+
+struct cdns_ti {
+ struct device *dev;
+ void __iomem *usbss;
+ int usb2_only:1;
+ int vbus_divider:1;
+ struct clk *usb2_refclk;
+ struct clk *lpm_clk;
+};
+
+static const int cdns_ti_rate_table[] = { /* in KHZ */
+ 9600,
+ 10000,
+ 12000,
+ 19200,
+ 20000,
+ 24000,
+ 25000,
+ 26000,
+ 38400,
+ 40000,
+ 58000,
+ 50000,
+ 52000,
+};
+
+static inline u32 cdns_ti_readl(struct cdns_ti *data, u32 offset)
+{
+ return readl(data->usbss + offset);
+}
+
+static inline void cdns_ti_writel(struct cdns_ti *data, u32 offset, u32 value)
+{
+ writel(value, data->usbss + offset);
+}
+
+static int cdns_ti_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct cdns_ti *data;
+ int error;
+ u32 reg;
+ int rate_code, i;
+ unsigned long rate;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
+ data->dev = dev;
+
+ data->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->usbss)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->usbss);
+ }
+
+ data->usb2_refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(data->usb2_refclk)) {
+ dev_err(dev, "can't get usb2_refclk\n");
+ return PTR_ERR(data->usb2_refclk);
+ }
+
+ data->lpm_clk = devm_clk_get(dev, "lpm");
+ if (IS_ERR(data->lpm_clk)) {
+ dev_err(dev, "can't get lpm_clk\n");
+ return PTR_ERR(data->lpm_clk);
+ }
+
+ rate = clk_get_rate(data->usb2_refclk);
+ rate /= 1000; /* To KHz */
+ for (i = 0; i < ARRAY_SIZE(cdns_ti_rate_table); i++) {
+ if (cdns_ti_rate_table[i] == rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(cdns_ti_rate_table)) {
+ dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
+ return -EINVAL;
+ }
+
+ rate_code = i;
+
+ pm_runtime_enable(dev);
+ error = pm_runtime_get_sync(dev);
+ if (error < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed: %d\n", error);
+ goto err_get;
+ }
+
+ /* assert RESET */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ reg &= ~USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* set static config */
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+ reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK;
+ reg |= rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT;
+
+ reg &= ~USBSS1_STATIC_VBUS_SEL_MASK;
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ if (data->vbus_divider)
+ reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT;
+
+ cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg);
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+
+ /* set USB2_ONLY mode if requested */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ data->usb2_only = device_property_read_bool(dev, "ti,usb2-only");
+ if (data->usb2_only)
+ reg |= USBSS_W1_USB2_ONLY;
+
+ /* set default modestrap */
+ reg |= USBSS_W1_MODESTRAP_SEL;
+ reg &= ~USBSS_W1_MODESTRAP_MASK;
+ reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* de-assert RESET */
+ reg |= USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ error = of_platform_populate(node, NULL, NULL, dev);
+ if (error) {
+ dev_err(dev, "failed to create children: %d\n", error);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pm_runtime_put_sync(data->dev);
+err_get:
+ pm_runtime_disable(data->dev);
+
+ return error;
+}
+
+static int cdns_ti_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int cdns_ti_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ device_for_each_child(dev, NULL, cdns_ti_remove_core);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_ti_of_match[] = {
+ { .compatible = "ti,j721e-usb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cdns_ti_of_match);
+
+static struct platform_driver cdns_ti_driver = {
+ .probe = cdns_ti_probe,
+ .remove = cdns_ti_remove,
+ .driver = {
+ .name = "cdns3-ti",
+ .of_match_table = cdns_ti_of_match,
+ },
+};
+
+module_platform_driver(cdns_ti_driver);
+
+MODULE_ALIAS("platform:cdns3-ti");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 TI Glue Layer");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index df8812c30640..d8e7eb2f97b9 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -274,11 +274,14 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
switch (event) {
case CI_HDRC_IMX_HSIC_ACTIVE_EVENT:
- ret = pinctrl_select_state(data->pinctrl,
- data->pinctrl_hsic_active);
- if (ret)
- dev_err(dev, "hsic_active select failed, err=%d\n",
- ret);
+ if (data->pinctrl) {
+ ret = pinctrl_select_state(data->pinctrl,
+ data->pinctrl_hsic_active);
+ if (ret)
+ dev_err(dev,
+ "hsic_active select failed, err=%d\n",
+ ret);
+ }
break;
case CI_HDRC_IMX_HSIC_SUSPEND_EVENT:
ret = imx_usbmisc_hsic_set_connect(data->usbmisc_data);
@@ -306,7 +309,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct pinctrl_state *pinctrl_hsic_idle;
of_id = of_match_device(ci_hdrc_imx_dt_ids, dev);
if (!of_id)
@@ -330,12 +332,42 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
pdata.flags |= CI_HDRC_IMX_IS_HSIC;
data->usbmisc_data->hsic = 1;
data->pinctrl = devm_pinctrl_get(dev);
- if (IS_ERR(data->pinctrl)) {
- dev_err(dev, "pinctrl get failed, err=%ld\n",
+ if (PTR_ERR(data->pinctrl) == -ENODEV)
+ data->pinctrl = NULL;
+ else if (IS_ERR(data->pinctrl)) {
+ if (PTR_ERR(data->pinctrl) != -EPROBE_DEFER)
+ dev_err(dev, "pinctrl get failed, err=%ld\n",
PTR_ERR(data->pinctrl));
return PTR_ERR(data->pinctrl);
}
+ data->hsic_pad_regulator =
+ devm_regulator_get_optional(dev, "hsic");
+ if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
+ /* no pad regualator is needed */
+ data->hsic_pad_regulator = NULL;
+ } else if (IS_ERR(data->hsic_pad_regulator)) {
+ if (PTR_ERR(data->hsic_pad_regulator) != -EPROBE_DEFER)
+ dev_err(dev,
+ "Get HSIC pad regulator error: %ld\n",
+ PTR_ERR(data->hsic_pad_regulator));
+ return PTR_ERR(data->hsic_pad_regulator);
+ }
+
+ if (data->hsic_pad_regulator) {
+ ret = regulator_enable(data->hsic_pad_regulator);
+ if (ret) {
+ dev_err(dev,
+ "Failed to enable HSIC pad regulator\n");
+ return ret;
+ }
+ }
+ }
+
+ /* HSIC pinctrl handling */
+ if (data->pinctrl) {
+ struct pinctrl_state *pinctrl_hsic_idle;
+
pinctrl_hsic_idle = pinctrl_lookup_state(data->pinctrl, "idle");
if (IS_ERR(pinctrl_hsic_idle)) {
dev_err(dev,
@@ -358,27 +390,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
PTR_ERR(data->pinctrl_hsic_active));
return PTR_ERR(data->pinctrl_hsic_active);
}
-
- data->hsic_pad_regulator = devm_regulator_get(dev, "hsic");
- if (PTR_ERR(data->hsic_pad_regulator) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
- /* no pad regualator is needed */
- data->hsic_pad_regulator = NULL;
- } else if (IS_ERR(data->hsic_pad_regulator)) {
- dev_err(dev, "Get HSIC pad regulator error: %ld\n",
- PTR_ERR(data->hsic_pad_regulator));
- return PTR_ERR(data->hsic_pad_regulator);
- }
-
- if (data->hsic_pad_regulator) {
- ret = regulator_enable(data->hsic_pad_regulator);
- if (ret) {
- dev_err(dev,
- "Failed to enable HSIC pad regulator\n");
- return ret;
- }
- }
}
if (pdata.flags & CI_HDRC_PMQOS)
@@ -433,6 +444,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
goto err_clk;
}
+ if (data->usbmisc_data) {
+ if (!IS_ERR(pdata.id_extcon.edev) ||
+ of_property_read_bool(np, "usb-role-switch"))
+ data->usbmisc_data->ext_id = 1;
+
+ if (!IS_ERR(pdata.vbus_extcon.edev) ||
+ of_property_read_bool(np, "usb-role-switch"))
+ data->usbmisc_data->ext_vbus = 1;
+ }
+
ret = imx_usbmisc_init_post(data->usbmisc_data);
if (ret) {
dev_err(dev, "usbmisc post failed, ret=%d\n", ret);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index c842e03f8767..de2aac9a2868 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -22,6 +22,8 @@ struct imx_usbmisc_data {
unsigned int evdo:1; /* set external vbus divider option */
unsigned int ulpi:1; /* connected to an ULPI phy */
unsigned int hsic:1; /* HSIC controlller */
+ unsigned int ext_id:1; /* ID from exteranl event */
+ unsigned int ext_vbus:1; /* Vbus from exteranl event */
};
int imx_usbmisc_init(struct imx_usbmisc_data *data);
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 12025358bb3c..0c9911d44ee5 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -24,35 +24,23 @@ struct tegra_udc_soc_info {
unsigned long flags;
};
-static const struct tegra_udc_soc_info tegra20_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra30_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra114_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra124_udc_soc_info = {
+static const struct tegra_udc_soc_info tegra_udc_soc_info = {
.flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
};
static const struct of_device_id tegra_udc_of_match[] = {
{
.compatible = "nvidia,tegra20-udc",
- .data = &tegra20_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra30-udc",
- .data = &tegra30_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra114-udc",
- .data = &tegra114_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra124-udc",
- .data = &tegra124_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
/* sentinel */
}
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 98ee575ee500..dce5db41501c 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -683,7 +683,7 @@ static int ci_get_platdata(struct device *dev,
if (platdata->dr_mode != USB_DR_MODE_PERIPHERAL) {
/* Get the vbus regulator */
- platdata->reg_vbus = devm_regulator_get(dev, "vbus");
+ platdata->reg_vbus = devm_regulator_get_optional(dev, "vbus");
if (PTR_ERR(platdata->reg_vbus) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (PTR_ERR(platdata->reg_vbus) == -ENODEV) {
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index fcc91a338875..e0376ee646ad 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -342,7 +342,7 @@ DEFINE_SHOW_ATTRIBUTE(ci_registers);
*/
void dbg_create_files(struct ci_hdrc *ci)
{
- ci->debugfs = debugfs_create_dir(dev_name(ci->dev), NULL);
+ ci->debugfs = debugfs_create_dir(dev_name(ci->dev), usb_debug_root);
debugfs_create_file("device", S_IRUGO, ci->debugfs, ci,
&ci_device_fops);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8f18e7b6cadf..ffaf46f5d062 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1524,42 +1524,53 @@ static const struct usb_ep_ops usb_ep_ops = {
/******************************************************************************
* GADGET block
*****************************************************************************/
+/**
+ * ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
+ */
+static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
+ unsigned long flags;
+
+ if (is_active) {
+ pm_runtime_get_sync(&_gadget->dev);
+ hw_device_reset(ci);
+ spin_lock_irqsave(&ci->lock, flags);
+ if (ci->driver) {
+ hw_device_state(ci, ci->ep0out->qh.dma);
+ usb_gadget_set_state(_gadget, USB_STATE_POWERED);
+ usb_udc_vbus_handler(_gadget, true);
+ }
+ spin_unlock_irqrestore(&ci->lock, flags);
+ } else {
+ usb_udc_vbus_handler(_gadget, false);
+ if (ci->driver)
+ ci->driver->disconnect(&ci->gadget);
+ hw_device_state(ci, 0);
+ if (ci->platdata->notify_event)
+ ci->platdata->notify_event(ci,
+ CI_HDRC_CONTROLLER_STOPPED_EVENT);
+ _gadget_stop_activity(&ci->gadget);
+ pm_runtime_put_sync(&_gadget->dev);
+ usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
+ }
+}
+
static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
- int gadget_ready = 0;
spin_lock_irqsave(&ci->lock, flags);
ci->vbus_active = is_active;
- if (ci->driver)
- gadget_ready = 1;
spin_unlock_irqrestore(&ci->lock, flags);
if (ci->usb_phy)
usb_phy_set_charger_state(ci->usb_phy, is_active ?
USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
- if (gadget_ready) {
- if (is_active) {
- pm_runtime_get_sync(&_gadget->dev);
- hw_device_reset(ci);
- hw_device_state(ci, ci->ep0out->qh.dma);
- usb_gadget_set_state(_gadget, USB_STATE_POWERED);
- usb_udc_vbus_handler(_gadget, true);
- } else {
- usb_udc_vbus_handler(_gadget, false);
- if (ci->driver)
- ci->driver->disconnect(&ci->gadget);
- hw_device_state(ci, 0);
- if (ci->platdata->notify_event)
- ci->platdata->notify_event(ci,
- CI_HDRC_CONTROLLER_STOPPED_EVENT);
- _gadget_stop_activity(&ci->gadget);
- pm_runtime_put_sync(&_gadget->dev);
- usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
- }
- }
+ if (ci->driver)
+ ci_hdrc_gadget_connect(_gadget, is_active);
return 0;
}
@@ -1612,7 +1623,7 @@ static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
}
/* Change Data+ pullup status
- * this func is used by usb_gadget_connect/disconnet
+ * this func is used by usb_gadget_connect/disconnect
*/
static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
@@ -1785,18 +1796,10 @@ static int ci_udc_start(struct usb_gadget *gadget,
return retval;
}
- pm_runtime_get_sync(&ci->gadget.dev);
- if (ci->vbus_active) {
- hw_device_reset(ci);
- } else {
+ if (ci->vbus_active)
+ ci_hdrc_gadget_connect(gadget, 1);
+ else
usb_udc_vbus_handler(&ci->gadget, false);
- pm_runtime_put_sync(&ci->gadget.dev);
- return retval;
- }
-
- retval = hw_device_state(ci, ci->ep0out->qh.dma);
- if (retval)
- pm_runtime_put_sync(&ci->gadget.dev);
return retval;
}
@@ -1826,6 +1829,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
unsigned long flags;
spin_lock_irqsave(&ci->lock, flags);
+ ci->driver = NULL;
if (ci->vbus_active) {
hw_device_state(ci, 0);
@@ -1838,7 +1842,6 @@ static int ci_udc_stop(struct usb_gadget *gadget)
pm_runtime_put(&ci->gadget.dev);
}
- ci->driver = NULL;
spin_unlock_irqrestore(&ci->lock, flags);
ci_udc_stop_for_otg_fsm(ci);
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 078c1fdce493..e81e33c26e6c 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -100,6 +100,9 @@
#define MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID MX7D_USB_VBUS_WAKEUP_SOURCE(2)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_SESS_END MX7D_USB_VBUS_WAKEUP_SOURCE(3)
+#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
+ MX6_BM_ID_WAKEUP)
+
struct usbmisc_ops {
/* It's called once when probe a usb device */
int (*init)(struct imx_usbmisc_data *data);
@@ -330,14 +333,25 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
return 0;
}
+static u32 usbmisc_wakeup_setting(struct imx_usbmisc_data *data)
+{
+ u32 wakeup_setting = MX6_USB_OTG_WAKEUP_BITS;
+
+ if (data->ext_id)
+ wakeup_setting &= ~MX6_BM_ID_WAKEUP;
+
+ if (data->ext_vbus)
+ wakeup_setting &= ~MX6_BM_VBUS_WAKEUP;
+
+ return wakeup_setting;
+}
+
static int usbmisc_imx6q_set_wakeup
(struct imx_usbmisc_data *data, bool enabled)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
- u32 wakeup_setting = (MX6_BM_WAKEUP_ENABLE |
- MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP);
int ret = 0;
if (data->index > 3)
@@ -346,11 +360,12 @@ static int usbmisc_imx6q_set_wakeup
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + data->index * 4);
if (enabled) {
- val |= wakeup_setting;
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
+ val |= usbmisc_wakeup_setting(data);
} else {
if (val & MX6_BM_WAKEUP_INTR)
pr_debug("wakeup int at ci_hdrc.%d\n", data->index);
- val &= ~wakeup_setting;
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
}
writel(val, usbmisc->base + data->index * 4);
spin_unlock_irqrestore(&usbmisc->lock, flags);
@@ -547,17 +562,17 @@ static int usbmisc_imx7d_set_wakeup
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
- u32 wakeup_setting = (MX6_BM_WAKEUP_ENABLE |
- MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP);
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base);
if (enabled) {
- writel(val | wakeup_setting, usbmisc->base);
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
+ val |= usbmisc_wakeup_setting(data);
+ writel(val, usbmisc->base);
} else {
if (val & MX6_BM_WAKEUP_INTR)
dev_dbg(data->dev, "wakeup int\n");
- writel(val & ~wakeup_setting, usbmisc->base);
+ writel(val & ~MX6_USB_OTG_WAKEUP_BITS, usbmisc->base);
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 70afb2ca1eab..e3db6fbeadef 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -734,7 +734,7 @@ static const struct file_operations wdm_fops = {
.release = wdm_release,
.poll = wdm_poll,
.unlocked_ioctl = wdm_ioctl,
- .compat_ioctl = wdm_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index dcd7066ffba2..ffc9c6fdd7e1 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -2217,9 +2217,7 @@ static const struct file_operations fops = {
.release = usbtmc_release,
.flush = usbtmc_flush,
.unlocked_ioctl = usbtmc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = usbtmc_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.fasync = usbtmc_fasync,
.poll = usbtmc_poll,
.llseek = default_llseek,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 1ac1095bfeac..5f40117e68e7 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -805,10 +805,10 @@ int usb_get_configuration(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
int ncfg = dev->descriptor.bNumConfigurations;
- int result = -ENOMEM;
unsigned int cfgno, length;
unsigned char *bigbuffer;
struct usb_config_descriptor *desc;
+ int result;
if (ncfg > USB_MAXCONFIG) {
dev_warn(ddev, "too many configurations: %d, "
@@ -824,16 +824,16 @@ int usb_get_configuration(struct usb_device *dev)
length = ncfg * sizeof(struct usb_host_config);
dev->config = kzalloc(length, GFP_KERNEL);
if (!dev->config)
- goto err2;
+ return -ENOMEM;
length = ncfg * sizeof(char *);
dev->rawdescriptors = kzalloc(length, GFP_KERNEL);
if (!dev->rawdescriptors)
- goto err2;
+ return -ENOMEM;
desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
if (!desc)
- goto err2;
+ return -ENOMEM;
for (cfgno = 0; cfgno < ncfg; cfgno++) {
/* We grab just the first descriptor so we know how long
@@ -895,9 +895,7 @@ int usb_get_configuration(struct usb_device *dev)
err:
kfree(desc);
dev->descriptor.bNumConfigurations = cfgno;
-err2:
- if (result == -ENOMEM)
- dev_err(ddev, "out of memory\n");
+
return result;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 3f899552f6e3..12bb5722b420 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -764,8 +764,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
- else
+ else {
+ unsigned int old_suppress;
+
+ /* suppress uevents while claiming interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
+ }
if (err == 0)
set_bit(ifnum, &ps->ifclaimed);
return err;
@@ -785,7 +792,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
if (!intf)
err = -ENOENT;
else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
+ unsigned int old_suppress;
+
+ /* suppress uevents while releasing interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
usb_driver_release_interface(&usbfs_driver, intf);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
err = 0;
}
return err;
@@ -1550,10 +1563,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
uurb->buffer_length = le16_to_cpu(dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
- is_in = 1;
+ is_in = true;
uurb->endpoint |= USB_DIR_IN;
} else {
- is_in = 0;
+ is_in = false;
uurb->endpoint &= ~USB_DIR_IN;
}
if (is_in)
@@ -2685,18 +2698,6 @@ static long usbdev_ioctl(struct file *file, unsigned int cmd,
return ret;
}
-#ifdef CONFIG_COMPAT
-static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret;
-
- ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg));
-
- return ret;
-}
-#endif
-
/* No kernel lock - fine */
static __poll_t usbdev_poll(struct file *file,
struct poll_table_struct *wait)
@@ -2720,9 +2721,7 @@ const struct file_operations usbdev_file_operations = {
.read = usbdev_read,
.poll = usbdev_poll,
.unlocked_ioctl = usbdev_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = usbdev_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = usbdev_mmap,
.open = usbdev_open,
.release = usbdev_release,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 9e26b0143a59..9ae2a7a93df2 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -234,7 +234,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* UHCI */
int region;
- for (region = 0; region < PCI_ROM_RESOURCE; region++) {
+ for (region = 0; region < PCI_STD_NUM_BARS; region++) {
if (!(pci_resource_flags(dev, region) &
IORESOURCE_IO))
continue;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index f225eaa98ff8..281568d464f9 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1410,10 +1410,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (hcd->self.uses_pio_for_control)
return ret;
if (hcd_uses_dma(hcd)) {
- if (is_vmalloc_addr(urb->setup_packet)) {
- WARN_ONCE(1, "setup packet is not dma capable\n");
- return -EAGAIN;
- } else if (object_is_on_stack(urb->setup_packet)) {
+ if (object_is_on_stack(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is on stack\n");
return -EAGAIN;
}
@@ -1479,9 +1476,6 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_PAGE;
- } else if (is_vmalloc_addr(urb->transfer_buffer)) {
- WARN_ONCE(1, "transfer buffer not dma capable\n");
- ret = -EAGAIN;
} else if (object_is_on_stack(urb->transfer_buffer)) {
WARN_ONCE(1, "transfer buffer is on stack\n");
ret = -EAGAIN;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 236313f41f4a..1709895387b9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4930,6 +4930,91 @@ hub_power_remaining(struct usb_hub *hub)
return remaining;
}
+
+static int descriptors_changed(struct usb_device *udev,
+ struct usb_device_descriptor *old_device_descriptor,
+ struct usb_host_bos *old_bos)
+{
+ int changed = 0;
+ unsigned index;
+ unsigned serial_len = 0;
+ unsigned len;
+ unsigned old_length;
+ int length;
+ char *buf;
+
+ if (memcmp(&udev->descriptor, old_device_descriptor,
+ sizeof(*old_device_descriptor)) != 0)
+ return 1;
+
+ if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
+ return 1;
+ if (udev->bos) {
+ len = le16_to_cpu(udev->bos->desc->wTotalLength);
+ if (len != le16_to_cpu(old_bos->desc->wTotalLength))
+ return 1;
+ if (memcmp(udev->bos->desc, old_bos->desc, len))
+ return 1;
+ }
+
+ /* Since the idVendor, idProduct, and bcdDevice values in the
+ * device descriptor haven't changed, we will assume the
+ * Manufacturer and Product strings haven't changed either.
+ * But the SerialNumber string could be different (e.g., a
+ * different flash card of the same brand).
+ */
+ if (udev->serial)
+ serial_len = strlen(udev->serial) + 1;
+
+ len = serial_len;
+ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
+ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
+ len = max(len, old_length);
+ }
+
+ buf = kmalloc(len, GFP_NOIO);
+ if (!buf)
+ /* assume the worst */
+ return 1;
+
+ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
+ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
+ length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
+ old_length);
+ if (length != old_length) {
+ dev_dbg(&udev->dev, "config index %d, error %d\n",
+ index, length);
+ changed = 1;
+ break;
+ }
+ if (memcmp(buf, udev->rawdescriptors[index], old_length)
+ != 0) {
+ dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
+ index,
+ ((struct usb_config_descriptor *) buf)->
+ bConfigurationValue);
+ changed = 1;
+ break;
+ }
+ }
+
+ if (!changed && serial_len) {
+ length = usb_string(udev, udev->descriptor.iSerialNumber,
+ buf, serial_len);
+ if (length + 1 != serial_len) {
+ dev_dbg(&udev->dev, "serial string error %d\n",
+ length);
+ changed = 1;
+ } else if (memcmp(buf, udev->serial, length) != 0) {
+ dev_dbg(&udev->dev, "serial string changed\n");
+ changed = 1;
+ }
+ }
+
+ kfree(buf);
+ return changed;
+}
+
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
@@ -5167,7 +5252,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
+ struct usb_device_descriptor descriptor;
int status = -ENODEV;
+ int retval;
dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus,
portchange, portspeed(hub, portstatus));
@@ -5188,7 +5275,30 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if ((portstatus & USB_PORT_STAT_CONNECTION) && udev &&
udev->state != USB_STATE_NOTATTACHED) {
if (portstatus & USB_PORT_STAT_ENABLE) {
- status = 0; /* Nothing to do */
+ /*
+ * USB-3 connections are initialized automatically by
+ * the hostcontroller hardware. Therefore check for
+ * changed device descriptors before resuscitating the
+ * device.
+ */
+ descriptor = udev->descriptor;
+ retval = usb_get_device_descriptor(udev,
+ sizeof(udev->descriptor));
+ if (retval < 0) {
+ dev_dbg(&udev->dev,
+ "can't read device descriptor %d\n",
+ retval);
+ } else {
+ if (descriptors_changed(udev, &descriptor,
+ udev->bos)) {
+ dev_dbg(&udev->dev,
+ "device descriptor has changed\n");
+ /* for disconnect() calls */
+ udev->descriptor = descriptor;
+ } else {
+ status = 0; /* Nothing to do */
+ }
+ }
#ifdef CONFIG_PM
} else if (udev->state == USB_STATE_SUSPENDED &&
udev->persist_enabled) {
@@ -5550,90 +5660,6 @@ void usb_hub_cleanup(void)
usb_deregister(&hub_driver);
} /* usb_hub_cleanup() */
-static int descriptors_changed(struct usb_device *udev,
- struct usb_device_descriptor *old_device_descriptor,
- struct usb_host_bos *old_bos)
-{
- int changed = 0;
- unsigned index;
- unsigned serial_len = 0;
- unsigned len;
- unsigned old_length;
- int length;
- char *buf;
-
- if (memcmp(&udev->descriptor, old_device_descriptor,
- sizeof(*old_device_descriptor)) != 0)
- return 1;
-
- if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
- return 1;
- if (udev->bos) {
- len = le16_to_cpu(udev->bos->desc->wTotalLength);
- if (len != le16_to_cpu(old_bos->desc->wTotalLength))
- return 1;
- if (memcmp(udev->bos->desc, old_bos->desc, len))
- return 1;
- }
-
- /* Since the idVendor, idProduct, and bcdDevice values in the
- * device descriptor haven't changed, we will assume the
- * Manufacturer and Product strings haven't changed either.
- * But the SerialNumber string could be different (e.g., a
- * different flash card of the same brand).
- */
- if (udev->serial)
- serial_len = strlen(udev->serial) + 1;
-
- len = serial_len;
- for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
- old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
- len = max(len, old_length);
- }
-
- buf = kmalloc(len, GFP_NOIO);
- if (!buf)
- /* assume the worst */
- return 1;
-
- for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
- old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
- length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
- old_length);
- if (length != old_length) {
- dev_dbg(&udev->dev, "config index %d, error %d\n",
- index, length);
- changed = 1;
- break;
- }
- if (memcmp(buf, udev->rawdescriptors[index], old_length)
- != 0) {
- dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
- index,
- ((struct usb_config_descriptor *) buf)->
- bConfigurationValue);
- changed = 1;
- break;
- }
- }
-
- if (!changed && serial_len) {
- length = usb_string(udev, udev->descriptor.iSerialNumber,
- buf, serial_len);
- if (length + 1 != serial_len) {
- dev_dbg(&udev->dev, "serial string error %d\n",
- length);
- changed = 1;
- } else if (memcmp(buf, udev->serial, length) != 0) {
- dev_dbg(&udev->dev, "serial string changed\n");
- changed = 1;
- }
- }
-
- kfree(buf);
- return changed;
-}
-
/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
@@ -5814,7 +5840,7 @@ re_enumerate_no_bos:
/**
* usb_reset_device - warn interface drivers and perform a USB port reset
- * @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
+ * @udev: device to reset (not in NOTATTACHED state)
*
* Warns all drivers bound to registered interfaces (using their pre_reset
* method), performs the port reset, and then lets the drivers know that
@@ -5842,8 +5868,7 @@ int usb_reset_device(struct usb_device *udev)
struct usb_host_config *config = udev->actconfig;
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
- if (udev->state == USB_STATE_NOTATTACHED ||
- udev->state == USB_STATE_SUSPENDED) {
+ if (udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
udev->state);
return -EINVAL;
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8e41d70fd298..78a4925aa118 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -524,7 +524,7 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
greset |= GRSTCTL_CSFTRST;
dwc2_writel(hsotg, greset, GRSTCTL);
- if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 50)) {
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) {
dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n",
__func__);
return -EBUSY;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index d08d070a0fb6..968e03b89d04 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -134,7 +134,7 @@ struct dwc2_hsotg_req;
* @target_frame: Targeted frame num to setup next ISOC transfer
* @frame_overrun: Indicates SOF number overrun in DSTS
*
- * This is the driver's state for each registered enpoint, allowing it
+ * This is the driver's state for each registered endpoint, allowing it
* to keep track of transactions that need doing. Each endpoint has a
* lock to protect the state, to try and avoid using an overall lock
* for the host controller as much as possible.
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 7f62f4cdc265..b8f2790abf91 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -770,7 +770,7 @@ int dwc2_debugfs_init(struct dwc2_hsotg *hsotg)
int ret;
struct dentry *root;
- root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
+ root = debugfs_create_dir(dev_name(hsotg->dev), usb_debug_root);
hsotg->debug_root = root;
debugfs_create_file("params", 0444, root, hsotg, &params_fops);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 556a876c7896..206caa0ea1c6 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -97,24 +97,24 @@ config USB_DWC3_KEYSTONE
Say 'Y' or 'M' here if you have one such device
config USB_DWC3_MESON_G12A
- tristate "Amlogic Meson G12A Platforms"
- depends on OF && COMMON_CLK
- depends on ARCH_MESON || COMPILE_TEST
- default USB_DWC3
- select USB_ROLE_SWITCH
+ tristate "Amlogic Meson G12A Platforms"
+ depends on OF && COMMON_CLK
+ depends on ARCH_MESON || COMPILE_TEST
+ default USB_DWC3
+ select USB_ROLE_SWITCH
select REGMAP_MMIO
- help
- Support USB2/3 functionality in Amlogic G12A platforms.
- Say 'Y' or 'M' if you have one such device.
+ help
+ Support USB2/3 functionality in Amlogic G12A platforms.
+ Say 'Y' or 'M' if you have one such device.
config USB_DWC3_OF_SIMPLE
- tristate "Generic OF Simple Glue Layer"
- depends on OF && COMMON_CLK
- default USB_DWC3
- help
- Support USB2/3 functionality in simple SoC integrations.
- Currently supports Xilinx and Qualcomm DWC USB3 IP.
- Say 'Y' or 'M' if you have one such device.
+ tristate "Generic OF Simple Glue Layer"
+ depends on OF && COMMON_CLK
+ default USB_DWC3
+ help
+ Support USB2/3 functionality in simple SoC integrations.
+ Currently supports Xilinx and Qualcomm DWC USB3 IP.
+ Say 'Y' or 'M' if you have one such device.
config USB_DWC3_ST
tristate "STMicroelectronics Platforms"
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 97d6ae3c4df2..f561c6c9e8a9 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -566,8 +566,11 @@ static int dwc3_core_ulpi_init(struct dwc3 *dwc)
*/
static int dwc3_phy_setup(struct dwc3 *dwc)
{
+ unsigned int hw_mode;
u32 reg;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
/*
@@ -585,6 +588,14 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->revision > DWC3_REVISION_194A)
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ /*
+ * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
if (dwc->u2ss_inp3_quirk)
reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
@@ -668,6 +679,14 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->revision > DWC3_REVISION_194A)
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ /*
+ * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
if (dwc->dis_u2_susphy_quirk)
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
@@ -902,9 +921,12 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
*/
static int dwc3_core_init(struct dwc3 *dwc)
{
+ unsigned int hw_mode;
u32 reg;
int ret;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
/*
* Write Linux Version Code to our GUID register so it's easy to figure
* out which kernel version a bug was found.
@@ -940,6 +962,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err0a;
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
+ dwc->revision > DWC3_REVISION_194A) {
+ if (!dwc->dis_u3_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ }
+
+ if (!dwc->dis_u2_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+ }
+
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 9baabed87d61..e56beb9d1e36 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -112,7 +112,7 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
case DWC3_LINK_STATE_RESUME:
return "Resume";
default:
- return "UNKNOWN link state\n";
+ return "UNKNOWN link state";
}
}
@@ -141,7 +141,7 @@ dwc3_gadget_hs_link_string(enum dwc3_link_state link_state)
case DWC3_LINK_STATE_RESUME:
return "Resume";
default:
- return "UNKNOWN link state\n";
+ return "UNKNOWN link state";
}
}
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 1c792710348f..4fe8b1e1485c 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -916,7 +916,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
- root = debugfs_create_dir(dev_name(dwc->dev), NULL);
+ root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
dwc->root = root;
debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index bdac3e7d7b18..e64754be47b4 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -110,12 +110,9 @@ err_resetc_put:
return ret;
}
-static int dwc3_of_simple_remove(struct platform_device *pdev)
+static void __dwc3_of_simple_teardown(struct dwc3_of_simple *simple)
{
- struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
-
- of_platform_depopulate(dev);
+ of_platform_depopulate(simple->dev);
clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
clk_bulk_put_all(simple->num_clocks, simple->clks);
@@ -126,13 +123,27 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
reset_control_put(simple->resets);
- pm_runtime_disable(dev);
- pm_runtime_put_noidle(dev);
- pm_runtime_set_suspended(dev);
+ pm_runtime_disable(simple->dev);
+ pm_runtime_put_noidle(simple->dev);
+ pm_runtime_set_suspended(simple->dev);
+}
+
+static int dwc3_of_simple_remove(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
return 0;
}
+static void dwc3_of_simple_shutdown(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
+}
+
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
@@ -190,6 +201,7 @@ MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
static struct platform_driver dwc3_of_simple_driver = {
.probe = dwc3_of_simple_probe,
.remove = dwc3_of_simple_remove,
+ .shutdown = dwc3_of_simple_shutdown,
.driver = {
.name = "dwc3-of-simple",
.of_match_table = of_dwc3_simple_match,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5ec54b69c29c..3b4f67000315 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -794,9 +794,9 @@ static int set_config(struct usb_composite_dev *cdev,
result = 0;
}
- INFO(cdev, "%s config #%d: %s\n",
- usb_speed_string(gadget->speed),
- number, c ? c->label : "unconfigured");
+ DBG(cdev, "%s config #%d: %s\n",
+ usb_speed_string(gadget->speed),
+ number, c ? c->label : "unconfigured");
if (!c)
goto done;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 33852c2b29d1..ab9ac48a751a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1544,6 +1544,7 @@ static struct config_group *gadgets_make(
gi->composite.resume = NULL;
gi->composite.max_speed = USB_SPEED_SUPER;
+ spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
INIT_LIST_HEAD(&gi->string_list);
INIT_LIST_HEAD(&gi->available_func);
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 9fc98de83624..7c152c28b26c 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -771,6 +771,24 @@ static struct configfs_item_operations acm_item_ops = {
.release = acm_attr_release,
};
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+static ssize_t f_acm_console_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return gserial_set_console(to_f_serial_opts(item)->port_num,
+ page, count);
+}
+
+static ssize_t f_acm_console_show(struct config_item *item, char *page)
+{
+ return gserial_get_console(to_f_serial_opts(item)->port_num, page);
+}
+
+CONFIGFS_ATTR(f_acm_, console);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
@@ -779,6 +797,9 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_acm_, port_num);
static struct configfs_attribute *acm_attrs[] = {
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ &f_acm_attr_console,
+#endif
&f_acm_attr_port_num,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 59d9d512dcda..ce1d0235969c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1352,14 +1352,6 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
return ret;
}
-#ifdef CONFIG_COMPAT
-static long ffs_epfile_compat_ioctl(struct file *file, unsigned code,
- unsigned long value)
-{
- return ffs_epfile_ioctl(file, code, value);
-}
-#endif
-
static const struct file_operations ffs_epfile_operations = {
.llseek = no_llseek,
@@ -1368,9 +1360,7 @@ static const struct file_operations ffs_epfile_operations = {
.read_iter = ffs_epfile_read_iter,
.release = ffs_epfile_release,
.unlocked_ioctl = ffs_epfile_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ffs_epfile_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index 55b7f57d2dc7..ab26d84ed95e 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -432,7 +432,7 @@ static struct usb_function_instance *obex_alloc_inst(void)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = obex_free_inst;
- ret = gserial_alloc_line(&opts->port_num);
+ ret = gserial_alloc_line_no_console(&opts->port_num);
if (ret) {
kfree(opts);
return ERR_PTR(ret);
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index c860f30a0ea2..1406255d0865 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -266,6 +266,24 @@ static struct configfs_item_operations serial_item_ops = {
.release = serial_attr_release,
};
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+static ssize_t f_serial_console_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return gserial_set_console(to_f_serial_opts(item)->port_num,
+ page, count);
+}
+
+static ssize_t f_serial_console_show(struct config_item *item, char *page)
+{
+ return gserial_get_console(to_f_serial_opts(item)->port_num, page);
+}
+
+CONFIGFS_ATTR(f_serial_, console);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
@@ -274,6 +292,9 @@ static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_serial_, port_num);
static struct configfs_attribute *acm_attrs[] = {
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ &f_serial_attr_console,
+#endif
&f_serial_attr_port_num,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 7f01f78b1d23..36504931b2d1 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -846,7 +846,7 @@ static void uasp_set_alt(struct f_uas *fu)
fu->flags = USBG_IS_UAS;
- if (gadget->speed == USB_SPEED_SUPER)
+ if (gadget->speed >= USB_SPEED_SUPER)
fu->flags |= USBG_USE_STREAMS;
config_ep_by_speed(gadget, f, fu->ep_in);
@@ -2093,6 +2093,16 @@ static void tcm_delayed_set_alt(struct work_struct *wq)
usb_composite_setup_continue(fu->function.config->cdev);
}
+static int tcm_get_alt(struct usb_function *f, unsigned intf)
+{
+ if (intf == bot_intf_desc.bInterfaceNumber)
+ return USB_G_ALT_INT_BBB;
+ if (intf == uasp_intf_desc.bInterfaceNumber)
+ return USB_G_ALT_INT_UAS;
+
+ return -EOPNOTSUPP;
+}
+
static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_uas *fu = to_f_uas(f);
@@ -2300,6 +2310,7 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
fu->function.bind = tcm_bind;
fu->function.unbind = tcm_unbind;
fu->function.set_alt = tcm_set_alt;
+ fu->function.get_alt = tcm_get_alt;
fu->function.setup = tcm_setup;
fu->function.disable = tcm_disable;
fu->function.free_func = tcm_free;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 56906d15fb55..7ec6a996af26 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -585,7 +585,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
sprintf(card->longname, "%s %i", card_name, card->dev->id);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
+ NULL, 0, BUFF_SIZE_MAX);
err = snd_card_register(card);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 65f634ec7fc2..f986e5c55974 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -82,14 +82,13 @@
#define GS_CONSOLE_BUF_SIZE 8192
/* console info */
-struct gscons_info {
- struct gs_port *port;
- struct task_struct *console_thread;
- struct kfifo con_buf;
- /* protect the buf and busy flag */
- spinlock_t con_lock;
- int req_busy;
- struct usb_request *console_req;
+struct gs_console {
+ struct console console;
+ struct work_struct work;
+ spinlock_t lock;
+ struct usb_request *req;
+ struct kfifo buf;
+ size_t missed;
};
/*
@@ -101,8 +100,10 @@ struct gs_port {
spinlock_t port_lock; /* guard port_* access */
struct gserial *port_usb;
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ struct gs_console *console;
+#endif
- bool openclose; /* open/close in progress */
u8 port_num;
struct list_head read_pool;
@@ -586,82 +587,45 @@ static int gs_open(struct tty_struct *tty, struct file *file)
{
int port_num = tty->index;
struct gs_port *port;
- int status;
-
- do {
- mutex_lock(&ports[port_num].lock);
- port = ports[port_num].port;
- if (!port)
- status = -ENODEV;
- else {
- spin_lock_irq(&port->port_lock);
-
- /* already open? Great. */
- if (port->port.count) {
- status = 0;
- port->port.count++;
-
- /* currently opening/closing? wait ... */
- } else if (port->openclose) {
- status = -EBUSY;
-
- /* ... else we do the work */
- } else {
- status = -EAGAIN;
- port->openclose = true;
- }
- spin_unlock_irq(&port->port_lock);
- }
- mutex_unlock(&ports[port_num].lock);
+ int status = 0;
- switch (status) {
- default:
- /* fully handled */
- return status;
- case -EAGAIN:
- /* must do the work */
- break;
- case -EBUSY:
- /* wait for EAGAIN task to finish */
- msleep(1);
- /* REVISIT could have a waitchannel here, if
- * concurrent open performance is important
- */
- break;
- }
- } while (status != -EAGAIN);
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
+ if (!port) {
+ status = -ENODEV;
+ goto out;
+ }
- /* Do the "real open" */
spin_lock_irq(&port->port_lock);
/* allocate circular buffer on first open */
if (!kfifo_initialized(&port->port_write_buf)) {
spin_unlock_irq(&port->port_lock);
+
+ /*
+ * portmaster's mutex still protects from simultaneous open(),
+ * and close() can't happen, yet.
+ */
+
status = kfifo_alloc(&port->port_write_buf,
WRITE_BUF_SIZE, GFP_KERNEL);
- spin_lock_irq(&port->port_lock);
-
if (status) {
pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
- port->port_num, tty, file);
- port->openclose = false;
- goto exit_unlock_port;
+ port_num, tty, file);
+ goto out;
}
- }
- /* REVISIT if REMOVED (ports[].port NULL), abort the open
- * to let rmmod work faster (but this way isn't wrong).
- */
+ spin_lock_irq(&port->port_lock);
+ }
- /* REVISIT maybe wait for "carrier detect" */
+ /* already open? Great. */
+ if (port->port.count++)
+ goto exit_unlock_port;
tty->driver_data = port;
port->port.tty = tty;
- port->port.count = 1;
- port->openclose = false;
-
/* if connected, start the I/O stream */
if (port->port_usb) {
struct gserial *gser = port->port_usb;
@@ -675,20 +639,21 @@ static int gs_open(struct tty_struct *tty, struct file *file)
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
- status = 0;
-
exit_unlock_port:
spin_unlock_irq(&port->port_lock);
+out:
+ mutex_unlock(&ports[port_num].lock);
return status;
}
-static int gs_writes_finished(struct gs_port *p)
+static int gs_close_flush_done(struct gs_port *p)
{
int cond;
- /* return true on disconnect or empty buffer */
+ /* return true on disconnect or empty buffer or if raced with open() */
spin_lock_irq(&p->port_lock);
- cond = (p->port_usb == NULL) || !kfifo_len(&p->port_write_buf);
+ cond = p->port_usb == NULL || !kfifo_len(&p->port_write_buf) ||
+ p->port.count > 1;
spin_unlock_irq(&p->port_lock);
return cond;
@@ -702,6 +667,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
spin_lock_irq(&port->port_lock);
if (port->port.count != 1) {
+raced_with_open:
if (port->port.count == 0)
WARN_ON(1);
else
@@ -711,12 +677,6 @@ static void gs_close(struct tty_struct *tty, struct file *file)
pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
- /* mark port as closing but in use; we can drop port lock
- * and sleep if necessary
- */
- port->openclose = true;
- port->port.count = 0;
-
gser = port->port_usb;
if (gser && gser->disconnect)
gser->disconnect(gser);
@@ -727,9 +687,13 @@ static void gs_close(struct tty_struct *tty, struct file *file)
if (kfifo_len(&port->port_write_buf) > 0 && gser) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->drain_wait,
- gs_writes_finished(port),
+ gs_close_flush_done(port),
GS_CLOSE_TIMEOUT * HZ);
spin_lock_irq(&port->port_lock);
+
+ if (port->port.count != 1)
+ goto raced_with_open;
+
gser = port->port_usb;
}
@@ -742,10 +706,9 @@ static void gs_close(struct tty_struct *tty, struct file *file)
else
kfifo_reset(&port->port_write_buf);
+ port->port.count = 0;
port->port.tty = NULL;
- port->openclose = false;
-
pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
port->port_num, tty, file);
@@ -889,36 +852,9 @@ static struct tty_driver *gs_tty_driver;
#ifdef CONFIG_U_SERIAL_CONSOLE
-static struct gscons_info gscons_info;
-static struct console gserial_cons;
-
-static struct usb_request *gs_request_new(struct usb_ep *ep)
-{
- struct usb_request *req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (!req)
- return NULL;
-
- req->buf = kmalloc(ep->maxpacket, GFP_ATOMIC);
- if (!req->buf) {
- usb_ep_free_request(ep, req);
- return NULL;
- }
-
- return req;
-}
-
-static void gs_request_free(struct usb_request *req, struct usb_ep *ep)
+static void gs_console_complete_out(struct usb_ep *ep, struct usb_request *req)
{
- if (!req)
- return;
-
- kfree(req->buf);
- usb_ep_free_request(ep, req);
-}
-
-static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
-{
- struct gscons_info *info = &gscons_info;
+ struct gs_console *cons = req->context;
switch (req->status) {
default:
@@ -927,12 +863,12 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
/* fall through */
case 0:
/* normal completion */
- spin_lock(&info->con_lock);
- info->req_busy = 0;
- spin_unlock(&info->con_lock);
-
- wake_up_process(info->console_thread);
+ spin_lock(&cons->lock);
+ req->length = 0;
+ schedule_work(&cons->work);
+ spin_unlock(&cons->lock);
break;
+ case -ECONNRESET:
case -ESHUTDOWN:
/* disconnect */
pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
@@ -940,190 +876,250 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
}
}
-static int gs_console_connect(int port_num)
+static void __gs_console_push(struct gs_console *cons)
{
- struct gscons_info *info = &gscons_info;
- struct gs_port *port;
+ struct usb_request *req = cons->req;
struct usb_ep *ep;
+ size_t size;
- if (port_num != gserial_cons.index) {
- pr_err("%s: port num [%d] is not support console\n",
- __func__, port_num);
- return -ENXIO;
- }
+ if (!req)
+ return; /* disconnected */
- port = ports[port_num].port;
- ep = port->port_usb->in;
- if (!info->console_req) {
- info->console_req = gs_request_new(ep);
- if (!info->console_req)
- return -ENOMEM;
- info->console_req->complete = gs_complete_out;
+ if (req->length)
+ return; /* busy */
+
+ ep = cons->console.data;
+ size = kfifo_out(&cons->buf, req->buf, ep->maxpacket);
+ if (!size)
+ return;
+
+ if (cons->missed && ep->maxpacket >= 64) {
+ char buf[64];
+ size_t len;
+
+ len = sprintf(buf, "\n[missed %zu bytes]\n", cons->missed);
+ kfifo_in(&cons->buf, buf, len);
+ cons->missed = 0;
}
- info->port = port;
- spin_lock(&info->con_lock);
- info->req_busy = 0;
- spin_unlock(&info->con_lock);
- pr_vdebug("port[%d] console connect!\n", port_num);
- return 0;
+ req->length = size;
+ if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ req->length = 0;
}
-static void gs_console_disconnect(struct usb_ep *ep)
+static void gs_console_work(struct work_struct *work)
{
- struct gscons_info *info = &gscons_info;
- struct usb_request *req = info->console_req;
+ struct gs_console *cons = container_of(work, struct gs_console, work);
+
+ spin_lock_irq(&cons->lock);
- gs_request_free(req, ep);
- info->console_req = NULL;
+ __gs_console_push(cons);
+
+ spin_unlock_irq(&cons->lock);
}
-static int gs_console_thread(void *data)
+static void gs_console_write(struct console *co,
+ const char *buf, unsigned count)
{
- struct gscons_info *info = &gscons_info;
- struct gs_port *port;
+ struct gs_console *cons = container_of(co, struct gs_console, console);
+ unsigned long flags;
+ size_t n;
+
+ spin_lock_irqsave(&cons->lock, flags);
+
+ n = kfifo_in(&cons->buf, buf, count);
+ if (n < count)
+ cons->missed += count - n;
+
+ if (cons->req && !cons->req->length)
+ schedule_work(&cons->work);
+
+ spin_unlock_irqrestore(&cons->lock, flags);
+}
+
+static struct tty_driver *gs_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return gs_tty_driver;
+}
+
+static int gs_console_connect(struct gs_port *port)
+{
+ struct gs_console *cons = port->console;
struct usb_request *req;
struct usb_ep *ep;
- int xfer, ret, count, size;
-
- do {
- port = info->port;
- set_current_state(TASK_INTERRUPTIBLE);
- if (!port || !port->port_usb
- || !port->port_usb->in || !info->console_req)
- goto sched;
-
- req = info->console_req;
- ep = port->port_usb->in;
-
- spin_lock_irq(&info->con_lock);
- count = kfifo_len(&info->con_buf);
- size = ep->maxpacket;
-
- if (count > 0 && !info->req_busy) {
- set_current_state(TASK_RUNNING);
- if (count < size)
- size = count;
-
- xfer = kfifo_out(&info->con_buf, req->buf, size);
- req->length = xfer;
-
- spin_unlock(&info->con_lock);
- ret = usb_ep_queue(ep, req, GFP_ATOMIC);
- spin_lock(&info->con_lock);
- if (ret < 0)
- info->req_busy = 0;
- else
- info->req_busy = 1;
-
- spin_unlock_irq(&info->con_lock);
- } else {
- spin_unlock_irq(&info->con_lock);
-sched:
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- break;
- }
- schedule();
- }
- } while (1);
+
+ if (!cons)
+ return 0;
+
+ ep = port->port_usb->in;
+ req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+ req->complete = gs_console_complete_out;
+ req->context = cons;
+ req->length = 0;
+
+ spin_lock(&cons->lock);
+ cons->req = req;
+ cons->console.data = ep;
+ spin_unlock(&cons->lock);
+
+ pr_debug("ttyGS%d: console connected!\n", port->port_num);
+
+ schedule_work(&cons->work);
return 0;
}
-static int gs_console_setup(struct console *co, char *options)
+static void gs_console_disconnect(struct gs_port *port)
{
- struct gscons_info *info = &gscons_info;
- int status;
+ struct gs_console *cons = port->console;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ if (!cons)
+ return;
- info->port = NULL;
- info->console_req = NULL;
- info->req_busy = 0;
- spin_lock_init(&info->con_lock);
+ spin_lock(&cons->lock);
- status = kfifo_alloc(&info->con_buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
- if (status) {
- pr_err("%s: allocate console buffer failed\n", __func__);
- return status;
- }
+ req = cons->req;
+ ep = cons->console.data;
+ cons->req = NULL;
+
+ spin_unlock(&cons->lock);
- info->console_thread = kthread_create(gs_console_thread,
- co, "gs_console");
- if (IS_ERR(info->console_thread)) {
- pr_err("%s: cannot create console thread\n", __func__);
- kfifo_free(&info->con_buf);
- return PTR_ERR(info->console_thread);
+ if (!req)
+ return;
+
+ usb_ep_dequeue(ep, req);
+ gs_free_req(ep, req);
+}
+
+static int gs_console_init(struct gs_port *port)
+{
+ struct gs_console *cons;
+ int err;
+
+ if (port->console)
+ return 0;
+
+ cons = kzalloc(sizeof(*port->console), GFP_KERNEL);
+ if (!cons)
+ return -ENOMEM;
+
+ strcpy(cons->console.name, "ttyGS");
+ cons->console.write = gs_console_write;
+ cons->console.device = gs_console_device;
+ cons->console.flags = CON_PRINTBUFFER;
+ cons->console.index = port->port_num;
+
+ INIT_WORK(&cons->work, gs_console_work);
+ spin_lock_init(&cons->lock);
+
+ err = kfifo_alloc(&cons->buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
+ if (err) {
+ pr_err("ttyGS%d: allocate console buffer failed\n", port->port_num);
+ kfree(cons);
+ return err;
}
- wake_up_process(info->console_thread);
+
+ port->console = cons;
+ register_console(&cons->console);
+
+ spin_lock_irq(&port->port_lock);
+ if (port->port_usb)
+ gs_console_connect(port);
+ spin_unlock_irq(&port->port_lock);
return 0;
}
-static void gs_console_write(struct console *co,
- const char *buf, unsigned count)
+static void gs_console_exit(struct gs_port *port)
{
- struct gscons_info *info = &gscons_info;
- unsigned long flags;
+ struct gs_console *cons = port->console;
+
+ if (!cons)
+ return;
+
+ unregister_console(&cons->console);
- spin_lock_irqsave(&info->con_lock, flags);
- kfifo_in(&info->con_buf, buf, count);
- spin_unlock_irqrestore(&info->con_lock, flags);
+ spin_lock_irq(&port->port_lock);
+ if (cons->req)
+ gs_console_disconnect(port);
+ spin_unlock_irq(&port->port_lock);
- wake_up_process(info->console_thread);
+ cancel_work_sync(&cons->work);
+ kfifo_free(&cons->buf);
+ kfree(cons);
+ port->console = NULL;
}
-static struct tty_driver *gs_console_device(struct console *co, int *index)
+ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count)
{
- struct tty_driver **p = (struct tty_driver **)co->data;
+ struct gs_port *port;
+ bool enable;
+ int ret;
- if (!*p)
- return NULL;
+ ret = strtobool(page, &enable);
+ if (ret)
+ return ret;
- *index = co->index;
- return *p;
-}
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
-static struct console gserial_cons = {
- .name = "ttyGS",
- .write = gs_console_write,
- .device = gs_console_device,
- .setup = gs_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &gs_tty_driver,
-};
+ if (WARN_ON(port == NULL)) {
+ ret = -ENXIO;
+ goto out;
+ }
-static void gserial_console_init(void)
-{
- register_console(&gserial_cons);
+ if (enable)
+ ret = gs_console_init(port);
+ else
+ gs_console_exit(port);
+out:
+ mutex_unlock(&ports[port_num].lock);
+
+ return ret < 0 ? ret : count;
}
+EXPORT_SYMBOL_GPL(gserial_set_console);
-static void gserial_console_exit(void)
+ssize_t gserial_get_console(unsigned char port_num, char *page)
{
- struct gscons_info *info = &gscons_info;
+ struct gs_port *port;
+ ssize_t ret;
- unregister_console(&gserial_cons);
- if (!IS_ERR_OR_NULL(info->console_thread))
- kthread_stop(info->console_thread);
- kfifo_free(&info->con_buf);
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
+
+ if (WARN_ON(port == NULL))
+ ret = -ENXIO;
+ else
+ ret = sprintf(page, "%u\n", !!port->console);
+
+ mutex_unlock(&ports[port_num].lock);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(gserial_get_console);
#else
-static int gs_console_connect(int port_num)
+static int gs_console_connect(struct gs_port *port)
{
return 0;
}
-static void gs_console_disconnect(struct usb_ep *ep)
+static void gs_console_disconnect(struct gs_port *port)
{
}
-static void gserial_console_init(void)
+static int gs_console_init(struct gs_port *port)
{
+ return -ENOSYS;
}
-static void gserial_console_exit(void)
+static void gs_console_exit(struct gs_port *port)
{
}
@@ -1172,8 +1168,9 @@ static int gs_closed(struct gs_port *port)
int cond;
spin_lock_irq(&port->port_lock);
- cond = (port->port.count == 0) && !port->openclose;
+ cond = port->port.count == 0;
spin_unlock_irq(&port->port_lock);
+
return cond;
}
@@ -1197,18 +1194,19 @@ void gserial_free_line(unsigned char port_num)
return;
}
port = ports[port_num].port;
+ gs_console_exit(port);
ports[port_num].port = NULL;
mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
tty_unregister_device(gs_tty_driver, port_num);
- gserial_console_exit();
}
EXPORT_SYMBOL_GPL(gserial_free_line);
-int gserial_alloc_line(unsigned char *line_num)
+int gserial_alloc_line_no_console(unsigned char *line_num)
{
struct usb_cdc_line_coding coding;
+ struct gs_port *port;
struct device *tty_dev;
int ret;
int port_num;
@@ -1231,24 +1229,35 @@ int gserial_alloc_line(unsigned char *line_num)
/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
- tty_dev = tty_port_register_device(&ports[port_num].port->port,
+ port = ports[port_num].port;
+ tty_dev = tty_port_register_device(&port->port,
gs_tty_driver, port_num, NULL);
if (IS_ERR(tty_dev)) {
- struct gs_port *port;
pr_err("%s: failed to register tty for port %d, err %ld\n",
__func__, port_num, PTR_ERR(tty_dev));
ret = PTR_ERR(tty_dev);
- port = ports[port_num].port;
+ mutex_lock(&ports[port_num].lock);
ports[port_num].port = NULL;
+ mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
goto err;
}
*line_num = port_num;
- gserial_console_init();
err:
return ret;
}
+EXPORT_SYMBOL_GPL(gserial_alloc_line_no_console);
+
+int gserial_alloc_line(unsigned char *line_num)
+{
+ int ret = gserial_alloc_line_no_console(line_num);
+
+ if (!ret && !*line_num)
+ gs_console_init(ports[*line_num].port);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(gserial_alloc_line);
/**
@@ -1327,7 +1336,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
gser->disconnect(gser);
}
- status = gs_console_connect(port_num);
+ status = gs_console_connect(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
@@ -1359,12 +1368,14 @@ void gserial_disconnect(struct gserial *gser)
/* tell the TTY glue not to do I/O here any more */
spin_lock_irqsave(&port->port_lock, flags);
+ gs_console_disconnect(port);
+
/* REVISIT as above: how best to track this? */
port->port_line_coding = gser->port_line_coding;
port->port_usb = NULL;
gser->ioport = NULL;
- if (port->port.count > 0 || port->openclose) {
+ if (port->port.count > 0) {
wake_up_interruptible(&port->drain_wait);
if (port->port.tty)
tty_hangup(port->port.tty);
@@ -1377,7 +1388,7 @@ void gserial_disconnect(struct gserial *gser)
/* finally, free any unused/unusable I/O buffers */
spin_lock_irqsave(&port->port_lock, flags);
- if (port->port.count == 0 && !port->openclose)
+ if (port->port.count == 0)
kfifo_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool, NULL);
gs_free_requests(gser->out, &port->read_queue, NULL);
@@ -1386,7 +1397,6 @@ void gserial_disconnect(struct gserial *gser)
port->read_allocated = port->read_started =
port->write_allocated = port->write_started = 0;
- gs_console_disconnect(gser->in);
spin_unlock_irqrestore(&port->port_lock, flags);
}
EXPORT_SYMBOL_GPL(gserial_disconnect);
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index 9acaac1cbb75..e5b08ab8cf7a 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -54,9 +54,17 @@ struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
void gs_free_req(struct usb_ep *, struct usb_request *req);
/* management of individual TTY ports */
+int gserial_alloc_line_no_console(unsigned char *port_line);
int gserial_alloc_line(unsigned char *port_line);
void gserial_free_line(unsigned char port_line);
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count);
+ssize_t gserial_get_console(unsigned char port_num, char *page);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
/* connect/disconnect is handled by individual functions */
int gserial_connect(struct gserial *, u8 port_num);
void gserial_disconnect(struct gserial *);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 69ff7f8c86f5..119a4e47681f 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -149,21 +149,21 @@ config USB_ETH_RNDIS
is given in comments found in that info file.
config USB_ETH_EEM
- bool "Ethernet Emulation Model (EEM) support"
- depends on USB_ETH
+ bool "Ethernet Emulation Model (EEM) support"
+ depends on USB_ETH
select USB_LIBCOMPOSITE
select USB_F_EEM
- help
- CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
- and therefore can be supported by more hardware. Technically ECM and
- EEM are designed for different applications. The ECM model extends
- the network interface to the target (e.g. a USB cable modem), and the
- EEM model is for mobile devices to communicate with hosts using
- ethernet over USB. For Linux gadgets, however, the interface with
- the host is the same (a usbX device), so the differences are minimal.
-
- If you say "y" here, the Ethernet gadget driver will use the EEM
- protocol rather than ECM. If unsure, say "n".
+ help
+ CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
+ and therefore can be supported by more hardware. Technically ECM and
+ EEM are designed for different applications. The ECM model extends
+ the network interface to the target (e.g. a USB cable modem), and the
+ EEM model is for mobile devices to communicate with hosts using
+ ethernet over USB. For Linux gadgets, however, the interface with
+ the host is the same (a usbX device), so the differences are minimal.
+
+ If you say "y" here, the Ethernet gadget driver will use the EEM
+ protocol rather than ECM. If unsure, say "n".
config USB_G_NCM
tristate "Network Control Model (NCM) support"
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index af16672d5118..59be2d8417c9 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -105,7 +105,6 @@ static struct usb_function *f_msg;
*/
static int acm_ms_do_config(struct usb_configuration *c)
{
- struct fsg_opts *opts;
int status;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -113,8 +112,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- opts = fsg_opts_from_func_inst(fi_msg);
-
f_acm = usb_get_function(f_acm_inst);
if (IS_ERR(f_acm))
return PTR_ERR(f_acm);
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index fd5595ac5bf7..f18f77584fc2 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -105,7 +105,6 @@ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
static int msg_do_config(struct usb_configuration *c)
{
- struct fsg_opts *opts;
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -113,8 +112,6 @@ static int msg_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- opts = fsg_opts_from_func_inst(fi_msg);
-
f_msg = usb_get_function(fi_msg);
if (IS_ERR(f_msg))
return PTR_ERR(f_msg);
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index de30d7628eef..da44f89f5e73 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -97,6 +97,36 @@ static unsigned n_ports = 1;
module_param(n_ports, uint, 0);
MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
+static bool enable = true;
+
+static int switch_gserial_enable(bool do_enable);
+
+static int enable_set(const char *s, const struct kernel_param *kp)
+{
+ bool do_enable;
+ int ret;
+
+ if (!s) /* called for no-arg enable == default */
+ return 0;
+
+ ret = strtobool(s, &do_enable);
+ if (ret || enable == do_enable)
+ return ret;
+
+ ret = switch_gserial_enable(do_enable);
+ if (!ret)
+ enable = do_enable;
+
+ return ret;
+}
+
+static const struct kernel_param_ops enable_ops = {
+ .set = enable_set,
+ .get = param_get_bool,
+};
+
+module_param_cb(enable, &enable_ops, &enable, 0644);
+
/*-------------------------------------------------------------------------*/
static struct usb_configuration serial_config_driver = {
@@ -240,6 +270,19 @@ static struct usb_composite_driver gserial_driver = {
.unbind = gs_unbind,
};
+static int switch_gserial_enable(bool do_enable)
+{
+ if (!serial_config_driver.label)
+ /* init() was not called, yet */
+ return 0;
+
+ if (do_enable)
+ return usb_composite_probe(&gserial_driver);
+
+ usb_composite_unregister(&gserial_driver);
+ return 0;
+}
+
static int __init init(void)
{
/* We *could* export two configs; that'd be much cleaner...
@@ -266,12 +309,16 @@ static int __init init(void)
}
strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label;
+ if (!enable)
+ return 0;
+
return usb_composite_probe(&gserial_driver);
}
module_init(init);
static void __exit cleanup(void)
{
- usb_composite_unregister(&gserial_driver);
+ if (enable)
+ usb_composite_unregister(&gserial_driver);
}
module_exit(cleanup);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index d354036ff6c8..ae70ce29d5e4 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -120,10 +120,10 @@ config USB_FOTG210_UDC
dynamically linked module called "fotg210_udc".
config USB_GR_UDC
- tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
- depends on HAS_DMA
- help
- Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
+ tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
+ depends on HAS_DMA
+ help
+ Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
VHDL IP core library.
config USB_OMAP
@@ -441,6 +441,17 @@ config USB_GADGET_XILINX
dynamically linked module called "udc-xilinx" and force all
gadget drivers to also be dynamically linked.
+config USB_TEGRA_XUDC
+ tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on PHY_TEGRA_XUSB
+ help
+ Enables NVIDIA Tegra USB 3.0 device mode controller driver.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "tegra_xudc" and force all
+ gadget drivers to also be dynamically linked.
+
source "drivers/usb/gadget/udc/aspeed-vhub/Kconfig"
#
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index 897f648f3cf1..f6777e654a8e 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
+obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_RENESAS_USB3) += renesas_usb3.o
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 194ffb1ed462..1b2b548c59a0 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1808,7 +1808,6 @@ static int at91udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct at91_udc *udc;
int retval;
- struct resource *res;
struct at91_ep *ep;
int i;
@@ -1839,8 +1838,7 @@ static int at91udc_probe(struct platform_device *pdev)
ep->is_pingpong = 1;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->udp_baseaddr = devm_ioremap_resource(dev, res);
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr))
return PTR_ERR(udc->udp_baseaddr);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 1d0d8952a74b..8a42768e3213 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/ctype.h>
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/delay.h>
@@ -226,7 +227,7 @@ static void usba_init_debugfs(struct usba_udc *udc)
struct dentry *root;
struct resource *regs_resource;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 97b16463f3ef..54501814dc3f 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2248,7 +2248,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
return;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
@@ -2282,7 +2282,6 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
struct bcm63xx_udc *udc;
- struct resource *res;
int rc = -ENOMEM, i, irq;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
@@ -2298,13 +2297,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->usbd_regs = devm_ioremap_resource(dev, res);
+ udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->usbd_regs))
return PTR_ERR(udc->usbd_regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- udc->iudma_regs = devm_ioremap_resource(dev, res);
+ udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(udc->iudma_regs))
return PTR_ERR(udc->iudma_regs);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index cc4a16e253ac..02a3a774670b 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -480,7 +480,6 @@ static void bdc_phy_exit(struct bdc *bdc)
static int bdc_probe(struct platform_device *pdev)
{
struct bdc *bdc;
- struct resource *res;
int ret = -ENOMEM;
int irq;
u32 temp;
@@ -508,8 +507,7 @@ static int bdc_probe(struct platform_device *pdev)
bdc->clk = clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdc->regs = devm_ioremap_resource(dev, res);
+ bdc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdc->regs)) {
dev_err(dev, "ioremap error\n");
return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 7bfd58c846f7..248426a3e88a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -195,7 +195,7 @@ static void handle_link_state_change(struct bdc *bdc, u32 uspc)
break;
case BDC_LINK_STATE_U0:
if (bdc->devstatus & REMOTE_WAKEUP_ISSUED) {
- bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
+ bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
if (bdc->gadget.speed == USB_SPEED_SUPER) {
bdc_function_wake_fh(bdc, 0);
bdc->devstatus |= FUNC_WAKE_ISSUED;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 3d499d93c083..4c9d1e49d5ed 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1321,7 +1321,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
u32 this_sg;
bool next_sg;
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
rbuf = req->req.buf + req->req.actual;
if (!urb->num_sgs) {
@@ -1409,7 +1409,7 @@ top:
/* FIXME update emulated data toggle too */
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
if (unlikely(len == 0))
is_short = 1;
else {
@@ -1830,7 +1830,7 @@ restart:
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
- if (usb_pipein(urb->pipe))
+ if (usb_urb_dir_in(urb))
address |= USB_DIR_IN;
ep = find_endpoint(dum, address);
if (!ep) {
@@ -2385,7 +2385,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
s = "?";
break;
} s; }),
- ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
+ ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype(urb->pipe)) { \
case PIPE_CONTROL: \
@@ -2725,7 +2725,7 @@ static struct platform_driver dummy_hcd_driver = {
};
/*-------------------------------------------------------------------------*/
-#define MAX_NUM_UDC 2
+#define MAX_NUM_UDC 32
static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.h b/drivers/usb/gadget/udc/fsl_qe_udc.h
index 2c537a904ee7..53ca0ff7c2cb 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.h
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.h
@@ -333,8 +333,8 @@ struct qe_udc {
u32 resume_state; /* USB state to resume*/
u32 usb_state; /* USB current state */
u32 usb_next_state; /* USB next state */
- u32 ep0_state; /* Enpoint zero state */
- u32 ep0_dir; /* Enpoint zero direction: can be
+ u32 ep0_state; /* Endpoint zero state */
+ u32 ep0_dir; /* Endpoint zero direction: can be
USB_DIR_IN or USB_DIR_OUT*/
u32 usb_sof_count; /* SOF count */
u32 errors; /* USB ERRORs count */
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 9a05863b2876..ec6eda426223 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1052,10 +1052,11 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
u32 bitmask;
struct ep_queue_head *qh;
- ep = container_of(_ep, struct fsl_ep, ep);
- if (!_ep || (!ep->ep.desc && ep_index(ep) != 0))
+ if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF))
return -ENODEV;
+ ep = container_of(_ep, struct fsl_ep, ep);
+
udc = (struct fsl_udc *)ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
@@ -1208,7 +1209,7 @@ static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
}
/* Change Data+ pullup status
- * this func is used by usb_gadget_connect/disconnet
+ * this func is used by usb_gadget_connect/disconnect
*/
static int fsl_pullup(struct usb_gadget *gadget, int is_on)
{
@@ -1595,14 +1596,13 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
struct fsl_req *curr_req)
{
struct ep_td_struct *curr_td;
- int td_complete, actual, remaining_length, j, tmp;
+ int actual, remaining_length, j, tmp;
int status = 0;
int errors = 0;
struct ep_queue_head *curr_qh = &udc->ep_qh[pipe];
int direction = pipe % 2;
curr_td = curr_req->head;
- td_complete = 0;
actual = curr_req->req.length;
for (j = 0; j < curr_req->dtd_count; j++) {
@@ -1647,11 +1647,9 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
status = -EPROTO;
break;
} else {
- td_complete++;
break;
}
} else {
- td_complete++;
VDBG("dTD transmitted successful");
}
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 7a0e9a58c2d8..64d80c65bb96 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -29,6 +29,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/dma-mapping.h>
@@ -208,7 +209,7 @@ static void gr_dfs_create(struct gr_udc *dev)
{
const char *name = "gr_udc_state";
- dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
+ dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops);
}
@@ -2118,7 +2119,6 @@ static int gr_request_irq(struct gr_udc *dev, int irq)
static int gr_probe(struct platform_device *pdev)
{
struct gr_udc *dev;
- struct resource *res;
struct gr_regs __iomem *regs;
int retval;
u32 status;
@@ -2128,8 +2128,7 @@ static int gr_probe(struct platform_device *pdev)
return -ENOMEM;
dev->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index bf6c81e2f8cc..d14b2bb3f67c 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3000,7 +3000,6 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct lpc32xx_udc *udc;
int retval, i;
- struct resource *res;
dma_addr_t dma_handle;
struct device_node *isp1301_node;
@@ -3048,9 +3047,6 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
* IORESOURCE_IRQ, USB device interrupt number
* IORESOURCE_IRQ, USB transceiver interrupt number
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
spin_lock_init(&udc->lock);
@@ -3061,7 +3057,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
return udc->udp_irq[i];
}
- udc->udp_baseaddr = devm_ioremap_resource(dev, res);
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr)) {
dev_err(udc->dev, "IO map failure\n");
return PTR_ERR(udc->udp_baseaddr);
diff --git a/drivers/usb/gadget/udc/mv_u3d.h b/drivers/usb/gadget/udc/mv_u3d.h
index 982625b7197a..66b84f792f64 100644
--- a/drivers/usb/gadget/udc/mv_u3d.h
+++ b/drivers/usb/gadget/udc/mv_u3d.h
@@ -138,7 +138,7 @@ struct mv_u3d_op_regs {
u32 doorbell; /* doorbell register */
};
-/* control enpoint enable registers */
+/* control endpoint enable registers */
struct epxcr {
u32 epxoutcr0; /* ep out control 0 register */
u32 epxoutcr1; /* ep out control 1 register */
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 265dab2bbfac..3344fb8c4181 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1519,7 +1519,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
td = phys_to_virt(addr);
addr2 = (dma_addr_t)td->next;
dma_pool_free(dev->data_requests, td, addr);
- td->next = 0x00;
addr = addr2;
}
req->chain_len = 1;
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index d4be53559f2e..cfafdd92c2a8 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2321,7 +2321,6 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
struct pxa25x_udc *dev = &memory;
int retval, irq;
u32 chiprev;
- struct resource *res;
pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
@@ -2367,8 +2366,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
return PTR_ERR(dev->regs);
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 014233252299..78902d13fc27 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -207,7 +207,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
{
struct dentry *root;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
debugfs_create_file("udcstate", 0400, root, udc, &state_dbg_fops);
@@ -2356,7 +2356,6 @@ MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
*/
static int pxa_udc_probe(struct platform_device *pdev)
{
- struct resource *regs;
struct pxa_udc *udc = &memory;
int retval = 0, gpio;
struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
@@ -2378,8 +2377,7 @@ static int pxa_udc_probe(struct platform_device *pdev)
udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ udc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
udc->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 11e25a3f4f1f..582a16165ea9 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1838,7 +1838,7 @@ static int r8a66597_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
char clk_name[8];
- struct resource *res, *ires;
+ struct resource *ires;
int irq;
void __iomem *reg = NULL;
struct r8a66597 *r8a66597 = NULL;
@@ -1846,8 +1846,7 @@ static int r8a66597_probe(struct platform_device *pdev)
int i;
unsigned long irq_trigger;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 33703140233a..c5c3c14df67a 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -775,6 +775,18 @@ static void usb3_irq_epc_int_1_resume(struct renesas_usb3 *usb3)
usb3_transition_to_default_state(usb3, false);
}
+static void usb3_irq_epc_int_1_suspend(struct renesas_usb3 *usb3)
+{
+ usb3_disable_irq_1(usb3, USB_INT_1_B2_SPND);
+
+ if (usb3->gadget.speed != USB_SPEED_UNKNOWN &&
+ usb3->gadget.state != USB_STATE_NOTATTACHED) {
+ if (usb3->driver && usb3->driver->suspend)
+ usb3->driver->suspend(&usb3->gadget);
+ usb_gadget_set_state(&usb3->gadget, USB_STATE_SUSPENDED);
+ }
+}
+
static void usb3_irq_epc_int_1_disable(struct renesas_usb3 *usb3)
{
usb3_stop_usb3_connection(usb3);
@@ -860,6 +872,9 @@ static void usb3_irq_epc_int_1(struct renesas_usb3 *usb3, u32 int_sta_1)
if (int_sta_1 & USB_INT_1_B2_RSUM)
usb3_irq_epc_int_1_resume(usb3);
+ if (int_sta_1 & USB_INT_1_B2_SPND)
+ usb3_irq_epc_int_1_suspend(usb3);
+
if (int_sta_1 & USB_INT_1_SPEED)
usb3_irq_epc_int_1_speed(usb3);
@@ -2536,7 +2551,7 @@ static const struct file_operations renesas_usb3_b_device_fops = {
static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
struct device *dev)
{
- usb3->dentry = debugfs_create_dir(dev_name(dev), NULL);
+ usb3->dentry = debugfs_create_dir(dev_name(dev), usb_debug_root);
debugfs_create_file("b_device", 0644, usb3->dentry, usb3,
&renesas_usb3_b_device_fops);
@@ -2733,7 +2748,6 @@ static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
- struct resource *res;
int irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
@@ -2752,8 +2766,7 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (!usb3)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- usb3->reg = devm_ioremap_resource(&pdev->dev, res);
+ usb3->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usb3->reg))
return PTR_ERR(usb3->reg);
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 858993c73442..21252fbc0319 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1263,7 +1263,6 @@ static const struct usb_gadget_ops s3c_hsudc_gadget_ops = {
static int s3c_hsudc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct s3c_hsudc *hsudc;
struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev);
int ret, i;
@@ -1290,9 +1289,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
goto err_supplies;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- hsudc->regs = devm_ioremap_resource(&pdev->dev, res);
+ hsudc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsudc->regs)) {
ret = PTR_ERR(hsudc->regs);
goto err_res;
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index f82208fbc249..0507a2ca0f55 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1978,7 +1978,8 @@ static int __init udc_init(void)
dprintk(DEBUG_NORMAL, "%s\n", gadget_name);
- s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name, NULL);
+ s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name,
+ usb_debug_root);
retval = platform_driver_register(&udc_driver_24x0);
if (retval)
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
new file mode 100644
index 000000000000..634c2c19a176
--- /dev/null
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -0,0 +1,3810 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra XUSB device mode controller
+ *
+ * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2015, Google Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/tegra/xusb.h>
+#include <linux/pm_domain.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/role.h>
+#include <linux/workqueue.h>
+
+/* XUSB_DEV registers */
+#define SPARAM 0x000
+#define SPARAM_ERSTMAX_MASK GENMASK(20, 16)
+#define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK)
+#define DB 0x004
+#define DB_TARGET_MASK GENMASK(15, 8)
+#define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
+#define DB_STREAMID_MASK GENMASK(31, 16)
+#define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
+#define ERSTSZ 0x008
+#define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
+#define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
+#define ERSTXBALO(x) (0x010 + 8 * (x))
+#define ERSTXBAHI(x) (0x014 + 8 * (x))
+#define ERDPLO 0x020
+#define ERDPLO_EHB BIT(3)
+#define ERDPHI 0x024
+#define EREPLO 0x028
+#define EREPLO_ECS BIT(0)
+#define EREPLO_SEGI BIT(1)
+#define EREPHI 0x02c
+#define CTRL 0x030
+#define CTRL_RUN BIT(0)
+#define CTRL_LSE BIT(1)
+#define CTRL_IE BIT(4)
+#define CTRL_SMI_EVT BIT(5)
+#define CTRL_SMI_DSE BIT(6)
+#define CTRL_EWE BIT(7)
+#define CTRL_DEVADDR_MASK GENMASK(30, 24)
+#define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
+#define CTRL_ENABLE BIT(31)
+#define ST 0x034
+#define ST_RC BIT(0)
+#define ST_IP BIT(4)
+#define RT_IMOD 0x038
+#define RT_IMOD_IMODI_MASK GENMASK(15, 0)
+#define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
+#define RT_IMOD_IMODC_MASK GENMASK(31, 16)
+#define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
+#define PORTSC 0x03c
+#define PORTSC_CCS BIT(0)
+#define PORTSC_PED BIT(1)
+#define PORTSC_PR BIT(4)
+#define PORTSC_PLS_SHIFT 5
+#define PORTSC_PLS_MASK GENMASK(8, 5)
+#define PORTSC_PLS_U0 0x0
+#define PORTSC_PLS_U2 0x2
+#define PORTSC_PLS_U3 0x3
+#define PORTSC_PLS_DISABLED 0x4
+#define PORTSC_PLS_RXDETECT 0x5
+#define PORTSC_PLS_INACTIVE 0x6
+#define PORTSC_PLS_RESUME 0xf
+#define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
+#define PORTSC_PS_SHIFT 10
+#define PORTSC_PS_MASK GENMASK(13, 10)
+#define PORTSC_PS_UNDEFINED 0x0
+#define PORTSC_PS_FS 0x1
+#define PORTSC_PS_LS 0x2
+#define PORTSC_PS_HS 0x3
+#define PORTSC_PS_SS 0x4
+#define PORTSC_LWS BIT(16)
+#define PORTSC_CSC BIT(17)
+#define PORTSC_WRC BIT(19)
+#define PORTSC_PRC BIT(21)
+#define PORTSC_PLC BIT(22)
+#define PORTSC_CEC BIT(23)
+#define PORTSC_WPR BIT(30)
+#define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
+ PORTSC_PLC | PORTSC_CEC)
+#define ECPLO 0x040
+#define ECPHI 0x044
+#define MFINDEX 0x048
+#define MFINDEX_FRAME_SHIFT 3
+#define MFINDEX_FRAME_MASK GENMASK(13, 3)
+#define PORTPM 0x04c
+#define PORTPM_L1S_MASK GENMASK(1, 0)
+#define PORTPM_L1S_DROP 0x0
+#define PORTPM_L1S_ACCEPT 0x1
+#define PORTPM_L1S_NYET 0x2
+#define PORTPM_L1S_STALL 0x3
+#define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
+#define PORTPM_RWE BIT(3)
+#define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
+#define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
+#define PORTPM_FLA BIT(24)
+#define PORTPM_VBA BIT(25)
+#define PORTPM_WOC BIT(26)
+#define PORTPM_WOD BIT(27)
+#define PORTPM_U1E BIT(28)
+#define PORTPM_U2E BIT(29)
+#define PORTPM_FRWE BIT(30)
+#define PORTPM_PNG_CYA BIT(31)
+#define EP_HALT 0x050
+#define EP_PAUSE 0x054
+#define EP_RELOAD 0x058
+#define EP_STCHG 0x05c
+#define DEVNOTIF_LO 0x064
+#define DEVNOTIF_LO_TRIG BIT(0)
+#define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
+#define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
+#define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
+#define DEVNOTIF_HI 0x068
+#define PORTHALT 0x06c
+#define PORTHALT_HALT_LTSSM BIT(0)
+#define PORTHALT_HALT_REJECT BIT(1)
+#define PORTHALT_STCHG_REQ BIT(20)
+#define PORTHALT_STCHG_INTR_EN BIT(24)
+#define PORT_TM 0x070
+#define EP_THREAD_ACTIVE 0x074
+#define EP_STOPPED 0x078
+#define HSFSPI_COUNT0 0x100
+#define HSFSPI_COUNT13 0x134
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
+ HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
+#define BLCG 0x840
+#define SSPX_CORE_CNT0 0x610
+#define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
+#define SSPX_CORE_CNT30 0x688
+#define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
+ SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
+#define SSPX_CORE_CNT32 0x690
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
+ SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
+#define SSPX_CORE_PADCTL4 0x750
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
+ SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
+#define BLCG_DFPCI BIT(0)
+#define BLCG_UFPCI BIT(1)
+#define BLCG_FE BIT(2)
+#define BLCG_COREPLL_PWRDN BIT(8)
+#define BLCG_IOPLL_0_PWRDN BIT(9)
+#define BLCG_IOPLL_1_PWRDN BIT(10)
+#define BLCG_IOPLL_2_PWRDN BIT(11)
+#define BLCG_ALL 0x1ff
+#define CFG_DEV_SSPI_XFER 0x858
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
+ CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
+#define CFG_DEV_FE 0x85c
+#define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
+#define CFG_DEV_FE_PORTREGSEL_SS_PI 1
+#define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
+#define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
+#define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
+
+/* FPCI registers */
+#define XUSB_DEV_CFG_1 0x004
+#define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
+#define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
+#define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
+#define XUSB_DEV_CFG_4 0x010
+#define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
+#define XUSB_DEV_CFG_5 0x014
+
+/* IPFS registers */
+#define XUSB_DEV_CONFIGURATION_0 0x180
+#define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
+#define XUSB_DEV_INTR_MASK_0 0x188
+#define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
+
+struct tegra_xudc_ep_context {
+ __le32 info0;
+ __le32 info1;
+ __le32 deq_lo;
+ __le32 deq_hi;
+ __le32 tx_info;
+ __le32 rsvd[11];
+};
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+
+#define EP_TYPE_INVALID 0
+#define EP_TYPE_ISOCH_OUT 1
+#define EP_TYPE_BULK_OUT 2
+#define EP_TYPE_INTERRUPT_OUT 3
+#define EP_TYPE_CONTROL 4
+#define EP_TYPE_ISCOH_IN 5
+#define EP_TYPE_BULK_IN 6
+#define EP_TYPE_INTERRUPT_IN 7
+
+#define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
+static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
+{ \
+ return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
+} \
+static inline void \
+ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ ctx->member = cpu_to_le32(tmp); \
+}
+
+BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
+BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
+BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
+BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
+BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
+BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
+BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
+BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
+BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
+BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
+BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
+BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
+BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
+BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
+BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff)
+BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
+BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
+BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
+BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
+BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
+
+static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
+{
+ return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
+ (ep_ctx_read_deq_lo(ctx) << 4);
+}
+
+static inline void
+ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
+{
+ ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
+ ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
+}
+
+struct tegra_xudc_trb {
+ __le32 data_lo;
+ __le32 data_hi;
+ __le32 status;
+ __le32 control;
+};
+
+#define TRB_TYPE_RSVD 0
+#define TRB_TYPE_NORMAL 1
+#define TRB_TYPE_SETUP_STAGE 2
+#define TRB_TYPE_DATA_STAGE 3
+#define TRB_TYPE_STATUS_STAGE 4
+#define TRB_TYPE_ISOCH 5
+#define TRB_TYPE_LINK 6
+#define TRB_TYPE_TRANSFER_EVENT 32
+#define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
+#define TRB_TYPE_STREAM 48
+#define TRB_TYPE_SETUP_PACKET_EVENT 63
+
+#define TRB_CMPL_CODE_INVALID 0
+#define TRB_CMPL_CODE_SUCCESS 1
+#define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
+#define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
+#define TRB_CMPL_CODE_USB_TRANS_ERR 4
+#define TRB_CMPL_CODE_TRB_ERR 5
+#define TRB_CMPL_CODE_STALL 6
+#define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
+#define TRB_CMPL_CODE_SHORT_PACKET 13
+#define TRB_CMPL_CODE_RING_UNDERRUN 14
+#define TRB_CMPL_CODE_RING_OVERRUN 15
+#define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
+#define TRB_CMPL_CODE_STOPPED 26
+#define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
+#define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
+#define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
+#define TRB_CMPL_CODE_HOST_REJECTED 221
+#define TRB_CMPL_CODE_CTRL_DIR_ERR 222
+#define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
+
+#define BUILD_TRB_RW(name, member, shift, mask) \
+static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
+{ \
+ return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
+} \
+static inline void \
+trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ trb->member = cpu_to_le32(tmp); \
+}
+
+BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
+BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
+BUILD_TRB_RW(seq_num, status, 0, 0xffff)
+BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
+BUILD_TRB_RW(td_size, status, 17, 0x1f)
+BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
+BUILD_TRB_RW(cycle, control, 0, 0x1)
+BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
+BUILD_TRB_RW(isp, control, 2, 0x1)
+BUILD_TRB_RW(chain, control, 4, 0x1)
+BUILD_TRB_RW(ioc, control, 5, 0x1)
+BUILD_TRB_RW(type, control, 10, 0x3f)
+BUILD_TRB_RW(stream_id, control, 16, 0xffff)
+BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
+BUILD_TRB_RW(tlbpc, control, 16, 0xf)
+BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
+BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
+BUILD_TRB_RW(sia, control, 31, 0x1)
+
+static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
+{
+ return ((u64)trb_read_data_hi(trb) << 32) |
+ trb_read_data_lo(trb);
+}
+
+static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
+{
+ trb_write_data_lo(trb, lower_32_bits(addr));
+ trb_write_data_hi(trb, upper_32_bits(addr));
+}
+
+struct tegra_xudc_request {
+ struct usb_request usb_req;
+
+ size_t buf_queued;
+ unsigned int trbs_queued;
+ unsigned int trbs_needed;
+ bool need_zlp;
+
+ struct tegra_xudc_trb *first_trb;
+ struct tegra_xudc_trb *last_trb;
+
+ struct list_head list;
+};
+
+struct tegra_xudc_ep {
+ struct tegra_xudc *xudc;
+ struct usb_ep usb_ep;
+ unsigned int index;
+ char name[8];
+
+ struct tegra_xudc_ep_context *context;
+
+#define XUDC_TRANSFER_RING_SIZE 64
+ struct tegra_xudc_trb *transfer_ring;
+ dma_addr_t transfer_ring_phys;
+
+ unsigned int enq_ptr;
+ unsigned int deq_ptr;
+ bool pcs;
+ bool ring_full;
+ bool stream_rejected;
+
+ struct list_head queue;
+ const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+};
+
+struct tegra_xudc_sel_timing {
+ __u8 u1sel;
+ __u8 u1pel;
+ __le16 u2sel;
+ __le16 u2pel;
+};
+
+enum tegra_xudc_setup_state {
+ WAIT_FOR_SETUP,
+ DATA_STAGE_XFER,
+ DATA_STAGE_RECV,
+ STATUS_STAGE_XFER,
+ STATUS_STAGE_RECV,
+};
+
+struct tegra_xudc_setup_packet {
+ struct usb_ctrlrequest ctrl_req;
+ unsigned int seq_num;
+};
+
+struct tegra_xudc_save_regs {
+ u32 ctrl;
+ u32 portpm;
+};
+
+struct tegra_xudc {
+ struct device *dev;
+ const struct tegra_xudc_soc *soc;
+ struct tegra_xusb_padctl *padctl;
+
+ spinlock_t lock;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+#define XUDC_NR_EVENT_RINGS 2
+#define XUDC_EVENT_RING_SIZE 4096
+ struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
+ dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
+ unsigned int event_ring_index;
+ unsigned int event_ring_deq_ptr;
+ bool ccs;
+
+#define XUDC_NR_EPS 32
+ struct tegra_xudc_ep ep[XUDC_NR_EPS];
+ struct tegra_xudc_ep_context *ep_context;
+ dma_addr_t ep_context_phys;
+
+ struct device *genpd_dev_device;
+ struct device *genpd_dev_ss;
+ struct device_link *genpd_dl_device;
+ struct device_link *genpd_dl_ss;
+
+ struct dma_pool *transfer_ring_pool;
+
+ bool queued_setup_packet;
+ struct tegra_xudc_setup_packet setup_packet;
+ enum tegra_xudc_setup_state setup_state;
+ u16 setup_seq_num;
+
+ u16 dev_addr;
+ u16 isoch_delay;
+ struct tegra_xudc_sel_timing sel_timing;
+ u8 test_mode_pattern;
+ u16 status_buf;
+ struct tegra_xudc_request *ep0_req;
+
+ bool pullup;
+
+ unsigned int nr_enabled_eps;
+ unsigned int nr_isoch_eps;
+
+ unsigned int device_state;
+ unsigned int resume_state;
+
+ int irq;
+
+ void __iomem *base;
+ resource_size_t phys_base;
+ void __iomem *ipfs;
+ void __iomem *fpci;
+
+ struct regulator_bulk_data *supplies;
+
+ struct clk_bulk_data *clks;
+
+ enum usb_role device_mode;
+ struct usb_role_switch *usb_role_sw;
+ struct work_struct usb_role_sw_work;
+
+ struct phy *usb3_phy;
+ struct phy *utmi_phy;
+
+ struct tegra_xudc_save_regs saved_regs;
+ bool suspended;
+ bool powergated;
+
+ struct completion disconnect_complete;
+
+ bool selfpowered;
+
+#define TOGGLE_VBUS_WAIT_MS 100
+ struct delayed_work plc_reset_work;
+ bool wait_csc;
+
+ struct delayed_work port_reset_war_work;
+ bool wait_for_sec_prc;
+};
+
+#define XUDC_TRB_MAX_BUFFER_SIZE 65536
+#define XUDC_MAX_ISOCH_EPS 4
+#define XUDC_INTERRUPT_MODERATION_US 0
+
+static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+struct tegra_xudc_soc {
+ const char * const *supply_names;
+ unsigned int num_supplies;
+ const char * const *clock_names;
+ unsigned int num_clks;
+ bool u1_enable;
+ bool u2_enable;
+ bool lpm_enable;
+ bool invalid_seq_num;
+ bool pls_quirk;
+ bool port_reset_quirk;
+ bool has_ipfs;
+};
+
+static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->fpci + offset);
+}
+
+static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->fpci + offset);
+}
+
+static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->ipfs + offset);
+}
+
+static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->ipfs + offset);
+}
+
+static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->base + offset);
+}
+
+static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->base + offset);
+}
+
+static inline int xudc_readl_poll(struct tegra_xudc *xudc,
+ unsigned int offset, u32 mask, u32 val)
+{
+ u32 regval;
+
+ return readl_poll_timeout_atomic(xudc->base + offset, regval,
+ (regval & mask) == val, 1, 100);
+}
+
+static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct tegra_xudc, gadget);
+}
+
+static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct tegra_xudc_ep, usb_ep);
+}
+
+static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
+{
+ return container_of(req, struct tegra_xudc_request, usb_req);
+}
+
+static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(xudc->dev,
+ "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
+ type, trb, trb->data_lo, trb->data_hi, trb->status,
+ trb->control);
+}
+
+static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
+{
+ int err;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ err = phy_power_on(xudc->utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "utmi power on failed %d\n", err);
+
+ err = phy_power_on(xudc->usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "usb3 phy power on failed %d\n", err);
+
+ dev_dbg(xudc->dev, "device mode on\n");
+
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
+
+ xudc->device_mode = USB_ROLE_DEVICE;
+}
+
+static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
+{
+ bool connected = false;
+ u32 pls, val;
+ int err;
+
+ dev_dbg(xudc->dev, "device mode off\n");
+
+ connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
+
+ reinit_completion(&xudc->disconnect_complete);
+
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, false);
+
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ /* Direct link to U0 if disconnected in RESUME or U2. */
+ if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
+ (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
+ val = xudc_readl(xudc, PORTPM);
+ val |= PORTPM_FRWE;
+ xudc_writel(xudc, val, PORTPM);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ xudc->device_mode = USB_ROLE_NONE;
+
+ /* Wait for disconnect event. */
+ if (connected)
+ wait_for_completion(&xudc->disconnect_complete);
+
+ /* Make sure interrupt handler has completed before powergating. */
+ synchronize_irq(xudc->irq);
+
+ err = phy_power_off(xudc->utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "utmi_phy power off failed %d\n", err);
+
+ err = phy_power_off(xudc->usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "usb3_phy power off failed %d\n", err);
+
+ pm_runtime_put(xudc->dev);
+}
+
+static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
+{
+ struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
+ usb_role_sw_work);
+
+ if (!xudc->usb_role_sw ||
+ usb_role_switch_get_role(xudc->usb_role_sw) == USB_ROLE_DEVICE)
+ tegra_xudc_device_mode_on(xudc);
+ else
+ tegra_xudc_device_mode_off(xudc);
+
+}
+
+static int tegra_xudc_usb_role_sw_set(struct device *dev, enum usb_role role)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ dev_dbg(dev, "%s role is %d\n", __func__, role);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (!xudc->suspended)
+ schedule_work(&xudc->usb_role_sw_work);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return 0;
+}
+
+static void tegra_xudc_plc_reset_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
+ plc_reset_work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->wait_csc) {
+ u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ if (pls == PORTSC_PLS_INACTIVE) {
+ dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl,
+ false);
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
+ xudc->wait_csc = false;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static void tegra_xudc_port_reset_war_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc =
+ container_of(dwork, struct tegra_xudc, port_reset_war_work);
+ unsigned long flags;
+ u32 pls;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if ((xudc->device_mode == USB_ROLE_DEVICE)
+ && xudc->wait_for_sec_prc) {
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+ dev_dbg(xudc->dev, "pls = %x\n", pls);
+
+ if (pls == PORTSC_PLS_DISABLED) {
+ dev_dbg(xudc->dev, "toggle vbus\n");
+ /* PRC doesn't complete in 100ms, toggle the vbus */
+ ret = tegra_phy_xusb_utmi_port_reset(xudc->utmi_phy);
+ if (ret == 1)
+ xudc->wait_for_sec_prc = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ unsigned int index;
+
+ index = trb - ep->transfer_ring;
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return 0;
+
+ return (ep->transfer_ring_phys + index * sizeof(*trb));
+}
+
+static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
+ dma_addr_t addr)
+{
+ struct tegra_xudc_trb *trb;
+ unsigned int index;
+
+ index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return NULL;
+
+ trb = &ep->transfer_ring[index];
+
+ return trb;
+}
+
+static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_writel(xudc, BIT(ep), EP_RELOAD);
+ xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
+}
+
+static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+
+ xudc_writel(xudc, 0, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!val)
+ return;
+ xudc_writel(xudc, 0, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
+ xudc_writel(xudc, BIT(ep), EP_STOPPED);
+}
+
+static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
+}
+
+static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req, int status)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
+ req, ep->index, status);
+
+ if (likely(req->usb_req.status == -EINPROGRESS))
+ req->usb_req.status = status;
+
+ list_del_init(&req->list);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ spin_unlock(&xudc->lock);
+ usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
+ spin_lock(&xudc->lock);
+}
+
+static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
+{
+ struct tegra_xudc_request *req;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ tegra_xudc_req_done(ep, req, status);
+ }
+}
+
+static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
+{
+ if (ep->ring_full)
+ return 0;
+
+ if (ep->deq_ptr > ep->enq_ptr)
+ return ep->deq_ptr - ep->enq_ptr - 1;
+
+ return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
+}
+
+static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb,
+ bool ioc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ dma_addr_t buf_addr;
+ size_t len;
+
+ len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
+ req->buf_queued);
+ if (len > 0)
+ buf_addr = req->usb_req.dma + req->buf_queued;
+ else
+ buf_addr = 0;
+
+ trb_write_data_ptr(trb, buf_addr);
+
+ trb_write_transfer_len(trb, len);
+ trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
+
+ if (req->trbs_queued == req->trbs_needed - 1 ||
+ (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
+ trb_write_chain(trb, 0);
+ else
+ trb_write_chain(trb, 1);
+
+ trb_write_ioc(trb, ioc);
+
+ if (usb_endpoint_dir_out(ep->desc) ||
+ (usb_endpoint_xfer_control(ep->desc) &&
+ (xudc->setup_state == DATA_STAGE_RECV)))
+ trb_write_isp(trb, 1);
+ else
+ trb_write_isp(trb, 0);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == DATA_STAGE_RECV)
+ trb_write_type(trb, TRB_TYPE_DATA_STAGE);
+ else
+ trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
+
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == STATUS_STAGE_XFER)
+ trb_write_data_stage_dir(trb, 1);
+ else
+ trb_write_data_stage_dir(trb, 0);
+ } else if (usb_endpoint_xfer_isoc(ep->desc)) {
+ trb_write_type(trb, TRB_TYPE_ISOCH);
+ trb_write_sia(trb, 1);
+ trb_write_frame_id(trb, 0);
+ trb_write_tlbpc(trb, 0);
+ } else if (usb_ss_max_streams(ep->comp_desc)) {
+ trb_write_type(trb, TRB_TYPE_STREAM);
+ trb_write_stream_id(trb, req->usb_req.stream_id);
+ } else {
+ trb_write_type(trb, TRB_TYPE_NORMAL);
+ trb_write_stream_id(trb, 0);
+ }
+
+ trb_write_cycle(trb, ep->pcs);
+
+ req->trbs_queued++;
+ req->buf_queued += len;
+
+ dump_trb(xudc, "TRANSFER", trb);
+}
+
+static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ unsigned int i, count, available;
+ bool wait_td = false;
+
+ available = ep_available_trbs(ep);
+ count = req->trbs_needed - req->trbs_queued;
+ if (available < count) {
+ count = available;
+ ep->ring_full = true;
+ }
+
+ /*
+ * To generate zero-length packet on USB bus, SW needs schedule a
+ * standalone zero-length TD. According to HW's behavior, SW needs
+ * to schedule TDs in different ways for different endpoint types.
+ *
+ * For control endpoint:
+ * - Data stage TD (IOC = 1, CH = 0)
+ * - Ring doorbell and wait transfer event
+ * - Data stage TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ *
+ * For bulk and interrupt endpoints:
+ * - Normal transfer TD (IOC = 0, CH = 0)
+ * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ */
+
+ if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
+ wait_td = true;
+
+ if (!req->first_trb)
+ req->first_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ for (i = 0; i < count; i++) {
+ struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
+ bool ioc = false;
+
+ if ((i == count - 1) || (wait_td && i == count - 2))
+ ioc = true;
+
+ tegra_xudc_queue_one_trb(ep, req, trb, ioc);
+ req->last_trb = trb;
+
+ ep->enq_ptr++;
+ if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
+ trb = &ep->transfer_ring[ep->enq_ptr];
+ trb_write_cycle(trb, ep->pcs);
+ ep->pcs = !ep->pcs;
+ ep->enq_ptr = 0;
+ }
+
+ if (ioc)
+ break;
+ }
+
+ return count;
+}
+
+static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ u32 val;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ val = DB_TARGET(ep->index);
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ val |= DB_STREAMID(xudc->setup_seq_num);
+ } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
+ struct tegra_xudc_request *req;
+
+ /* Don't ring doorbell if the stream has been rejected. */
+ if (ep->stream_rejected)
+ return;
+
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ val |= DB_STREAMID(req->usb_req.stream_id);
+ }
+
+ dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
+ xudc_writel(xudc, val, DB);
+}
+
+static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc_request *req;
+ bool trbs_queued = false;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (ep->ring_full)
+ break;
+
+ if (tegra_xudc_queue_trbs(ep, req) > 0)
+ trbs_queued = true;
+ }
+
+ if (trbs_queued)
+ tegra_xudc_ep_ring_doorbell(ep);
+}
+
+static int
+__tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ int err;
+
+ if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "control EP has pending transfers\n");
+ return -EINVAL;
+ }
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to map request: %d\n", err);
+ return err;
+ }
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ req->need_zlp = false;
+ req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
+ XUDC_TRB_MAX_BUFFER_SIZE);
+ if (req->usb_req.length == 0)
+ req->trbs_needed++;
+
+ if (!usb_endpoint_xfer_isoc(ep->desc) &&
+ req->usb_req.zero && req->usb_req.length &&
+ ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
+ req->trbs_needed++;
+ req->need_zlp = true;
+ }
+
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ list_add_tail(&req->list, &ep->queue);
+
+ tegra_xudc_ep_kick_queue(ep);
+
+ return 0;
+}
+
+static int
+tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
+ gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_queue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc_trb *trb = req->first_trb;
+ bool pcs_enq = trb_read_cycle(trb);
+ bool pcs;
+
+ /*
+ * Clear out all the TRBs part of or after the cancelled request,
+ * and must correct trb cycle bit to the last un-enqueued state.
+ */
+ while (trb != &ep->transfer_ring[ep->enq_ptr]) {
+ pcs = trb_read_cycle(trb);
+ memset(trb, 0, sizeof(*trb));
+ trb_write_cycle(trb, !pcs);
+ trb++;
+
+ if (trb_read_type(trb) == TRB_TYPE_LINK)
+ trb = ep->transfer_ring;
+ }
+
+ /* Requests will be re-queued at the start of the cancelled request. */
+ ep->enq_ptr = req->first_trb - ep->transfer_ring;
+ /*
+ * Retrieve the correct cycle bit state from the first trb of
+ * the cancelled request.
+ */
+ ep->pcs = pcs_enq;
+ ep->ring_full = false;
+ list_for_each_entry_continue(req, &ep->queue, list) {
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ }
+}
+
+/*
+ * Determine if the given TRB is in the range [first trb, last trb] for the
+ * given request.
+ */
+static bool trb_in_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
+ req->first_trb, req->last_trb, trb);
+
+ if (trb >= req->first_trb && (trb <= req->last_trb ||
+ req->last_trb < req->first_trb))
+ return true;
+
+ if (trb < req->first_trb && trb <= req->last_trb &&
+ req->last_trb < req->first_trb)
+ return true;
+
+ return false;
+}
+
+/*
+ * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
+ * for the given endpoint and request.
+ */
+static bool trb_before_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
+ __func__, req->first_trb, req->last_trb, enq_trb, trb);
+
+ if (trb < req->first_trb && (enq_trb <= trb ||
+ req->first_trb < enq_trb))
+ return true;
+
+ if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
+ return true;
+
+ return false;
+}
+
+static int
+__tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ struct tegra_xudc_request *r;
+ struct tegra_xudc_trb *deq_trb;
+ bool busy, kick_queue = false;
+ int ret = 0;
+
+ /* Make sure the request is actually queued to this endpoint. */
+ list_for_each_entry(r, &ep->queue, list) {
+ if (r == req)
+ break;
+ }
+
+ if (r != req)
+ return -EINVAL;
+
+ /* Request hasn't been queued in the transfer ring yet. */
+ if (!req->trbs_queued) {
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ return 0;
+ }
+
+ /* Halt DMA for this endpiont. */
+ if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
+ ep_pause(xudc, ep->index);
+ ep_wait_for_inactive(xudc, ep->index);
+ }
+
+ deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
+ /* Is the hardware processing the TRB at the dequeue pointer? */
+ busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
+
+ if (trb_in_request(ep, req, deq_trb) && busy) {
+ /*
+ * Request has been partially completed or it hasn't
+ * started processing yet.
+ */
+ dma_addr_t deq_ptr;
+
+ squeeze_transfer_ring(ep, req);
+
+ req->usb_req.actual = ep_ctx_read_edtla(ep->context);
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+
+ /* EDTLA is > 0: request has been partially completed */
+ if (req->usb_req.actual > 0) {
+ /*
+ * Abort the pending transfer and update the dequeue
+ * pointer
+ */
+ ep_ctx_write_edtla(ep->context, 0);
+ ep_ctx_write_partial_td(ep->context, 0);
+ ep_ctx_write_data_offset(ep->context, 0);
+
+ deq_ptr = trb_virt_to_phys(ep,
+ &ep->transfer_ring[ep->enq_ptr]);
+
+ if (dma_mapping_error(xudc->dev, deq_ptr)) {
+ ret = -EINVAL;
+ } else {
+ ep_ctx_write_deq_ptr(ep->context, deq_ptr);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+ ep_reload(xudc, ep->index);
+ }
+ }
+ } else if (trb_before_request(ep, req, deq_trb) && busy) {
+ /* Request hasn't started processing yet. */
+ squeeze_transfer_ring(ep, req);
+
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+ } else {
+ /*
+ * Request has completed, but we haven't processed the
+ * completion event yet.
+ */
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ ret = -EINVAL;
+ }
+
+ /* Resume the endpoint. */
+ ep_unpause(xudc, ep->index);
+
+ if (kick_queue)
+ tegra_xudc_ep_kick_queue(ep);
+
+ return ret;
+}
+
+static int
+tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_dequeue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (!ep->desc)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
+ dev_err(xudc->dev, "can't halt isoc EP\n");
+ return -ENOTSUPP;
+ }
+
+ if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
+ dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
+ halt ? "halted" : "not halted");
+ return 0;
+ }
+
+ if (halt) {
+ ep_halt(xudc, ep->index);
+ } else {
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_seq_num(ep->context, 0);
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ tegra_xudc_ep_ring_doorbell(ep);
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ if (value && usb_endpoint_dir_in(ep->desc) &&
+ !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "can't halt EP with requests pending\n");
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_set_halt(ep, value);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
+{
+ const struct usb_endpoint_descriptor *desc = ep->desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
+ struct tegra_xudc *xudc = ep->xudc;
+ u16 maxpacket, maxburst = 0, esit = 0;
+ u32 val;
+
+ maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (!usb_endpoint_xfer_control(desc))
+ maxburst = comp_desc->bMaxBurst;
+
+ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
+ esit = le16_to_cpu(comp_desc->wBytesPerInterval);
+ } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc))) {
+ if (xudc->gadget.speed == USB_SPEED_HIGH) {
+ maxburst = (usb_endpoint_maxp(desc) >> 11) & 0x3;
+ if (maxburst == 0x3) {
+ dev_warn(xudc->dev,
+ "invalid endpoint maxburst\n");
+ maxburst = 0x2;
+ }
+ }
+ esit = maxpacket * (maxburst + 1);
+ }
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_interval(ep->context, desc->bInterval);
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (usb_endpoint_xfer_isoc(desc)) {
+ ep_ctx_write_mult(ep->context,
+ comp_desc->bmAttributes & 0x3);
+ }
+
+ if (usb_endpoint_xfer_bulk(desc)) {
+ ep_ctx_write_max_pstreams(ep->context,
+ comp_desc->bmAttributes &
+ 0x1f);
+ ep_ctx_write_lsa(ep->context, 1);
+ }
+ }
+
+ if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
+ val = usb_endpoint_type(desc);
+ else
+ val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
+
+ ep_ctx_write_type(ep->context, val);
+ ep_ctx_write_cerr(ep->context, 0x3);
+ ep_ctx_write_max_packet_size(ep->context, maxpacket);
+ ep_ctx_write_max_burst_size(ep->context, maxburst);
+
+ ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+
+ /* Select a reasonable average TRB length based on endpoint type. */
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ val = 8;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ val = 1024;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_ISOC:
+ default:
+ val = 3072;
+ break;
+ }
+
+ ep_ctx_write_avg_trb_len(ep->context, val);
+ ep_ctx_write_max_esit_payload(ep->context, esit);
+
+ ep_ctx_write_cerrcnt(ep->context, 0x3);
+}
+
+static void setup_link_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ trb_write_data_ptr(trb, ep->transfer_ring_phys);
+ trb_write_type(trb, TRB_TYPE_LINK);
+ trb_write_toggle_cycle(trb, 1);
+}
+
+static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_err(xudc->dev, "endpoint %u already disabled\n",
+ ep->index);
+ return -EINVAL;
+ }
+
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
+
+ xudc->nr_enabled_eps--;
+ if (usb_endpoint_xfer_isoc(ep->desc))
+ xudc->nr_isoch_eps--;
+
+ ep->desc = NULL;
+ ep->comp_desc = NULL;
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+ if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
+ xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
+
+ /*
+ * If this is the last endpoint disabled in a de-configure request,
+ * switch back to address state.
+ */
+ if ((xudc->device_state == USB_STATE_CONFIGURED) &&
+ (xudc->nr_enabled_eps == 1)) {
+ u32 val;
+
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ dev_info(xudc->dev, "ep %u disabled\n", ep->index);
+
+ return 0;
+}
+
+static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_disable(ep);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ unsigned int i;
+ u32 val;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER &&
+ !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
+ return -EINVAL;
+
+ /* Disable the EP if it is not disabled */
+ if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
+ __tegra_xudc_ep_disable(ep);
+
+ ep->desc = desc;
+ ep->comp_desc = ep->usb_ep.comp_desc;
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
+ dev_err(xudc->dev, "too many isoch endpoints\n");
+ return -EBUSY;
+ }
+ xudc->nr_isoch_eps++;
+ }
+
+ memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
+ sizeof(*ep->transfer_ring));
+ setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
+
+ ep->enq_ptr = 0;
+ ep->deq_ptr = 0;
+ ep->pcs = true;
+ ep->ring_full = false;
+ xudc->nr_enabled_eps++;
+
+ tegra_xudc_ep_context_setup(ep);
+
+ /*
+ * No need to reload and un-halt EP0. This will be done automatically
+ * once a valid SETUP packet is received.
+ */
+ if (usb_endpoint_xfer_control(desc))
+ goto out;
+
+ /*
+ * Transition to configured state once the first non-control
+ * endpoint is enabled.
+ */
+ if (xudc->device_state == USB_STATE_ADDRESS) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+
+ xudc->device_state = USB_STATE_CONFIGURED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ /*
+ * Pause all bulk endpoints when enabling an isoch endpoint
+ * to ensure the isoch endpoint is allocated enough bandwidth.
+ */
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_pause(xudc, i);
+ }
+ }
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_unpause(xudc, i);
+ }
+ }
+
+out:
+ dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
+ usb_ep_type_string(usb_endpoint_type(ep->desc)),
+ usb_endpoint_dir_in(ep->desc) ? "in" : "out");
+
+ return 0;
+}
+
+static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_enable(ep, desc);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *
+tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+
+ req = kzalloc(sizeof(*req), gfp);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->list);
+
+ return &req->usb_req;
+}
+
+static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
+ struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req = to_xudc_req(usb_req);
+
+ kfree(req);
+}
+
+static struct usb_ep_ops tegra_xudc_ep_ops = {
+ .enable = tegra_xudc_ep_enable,
+ .disable = tegra_xudc_ep_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EBUSY;
+}
+
+static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
+{
+ return -EBUSY;
+}
+
+static struct usb_ep_ops tegra_xudc_ep0_ops = {
+ .enable = tegra_xudc_ep0_enable,
+ .disable = tegra_xudc_ep0_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
+ MFINDEX_FRAME_SHIFT;
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ ep_unpause_all(xudc);
+
+ /* Direct link to U0. */
+ val = xudc_readl(xudc, PORTSC);
+ if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ if (xudc->device_state == USB_STATE_SUSPENDED) {
+ xudc->device_state = xudc->resume_state;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ xudc->resume_state = 0;
+ }
+
+ /*
+ * Doorbells may be dropped if they are sent too soon (< ~200ns)
+ * after unpausing the endpoint. Wait for 500ns just to be safe.
+ */
+ ndelay(500);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
+}
+
+static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+ val = xudc_readl(xudc, PORTPM);
+ dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
+ val, gadget->speed);
+
+ if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
+ (val & PORTPM_RWE)) ||
+ ((xudc->gadget.speed == USB_SPEED_SUPER) &&
+ (val & PORTPM_FRWE))) {
+ tegra_xudc_resume_device_state(xudc);
+
+ /* Send Device Notification packet. */
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
+ | DEVNOTIF_LO_TRIG;
+ xudc_writel(xudc, 0, DEVNOTIF_HI);
+ xudc_writel(xudc, val, DEVNOTIF_LO);
+ }
+ }
+
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (is_on != xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ if (is_on)
+ val |= CTRL_ENABLE;
+ else
+ val &= ~CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ xudc->pullup = is_on;
+ dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ if (!driver)
+ return -EINVAL;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->driver) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
+ if (ret < 0)
+ goto unlock;
+
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_IE | CTRL_LSE;
+ xudc_writel(xudc, val, CTRL);
+
+ val = xudc_readl(xudc, PORTHALT);
+ val |= PORTHALT_STCHG_INTR_EN;
+ xudc_writel(xudc, val, PORTHALT);
+
+ if (xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ xudc->driver = driver;
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_IE | CTRL_ENABLE);
+ xudc_writel(xudc, val, CTRL);
+
+ __tegra_xudc_ep_disable(&xudc->ep[0]);
+
+ xudc->driver = NULL;
+ dev_dbg(xudc->dev, "Gadget stopped");
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+
+ dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
+ xudc->selfpowered = !!is_on;
+
+ return 0;
+}
+
+static struct usb_gadget_ops tegra_xudc_gadget_ops = {
+ .get_frame = tegra_xudc_gadget_get_frame,
+ .wakeup = tegra_xudc_gadget_wakeup,
+ .pullup = tegra_xudc_gadget_pullup,
+ .udc_start = tegra_xudc_gadget_start,
+ .udc_stop = tegra_xudc_gadget_stop,
+ .set_selfpowered = tegra_xudc_set_selfpowered,
+};
+
+static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
+static int
+tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = NULL;
+ xudc->ep0_req->usb_req.dma = 0;
+ xudc->ep0_req->usb_req.length = 0;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static int
+tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = buf;
+ xudc->ep0_req->usb_req.length = len;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
+{
+ switch (xudc->setup_state) {
+ case DATA_STAGE_XFER:
+ xudc->setup_state = STATUS_STAGE_RECV;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ case DATA_STAGE_RECV:
+ xudc->setup_state = STATUS_STAGE_XFER;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ default:
+ xudc->setup_state = WAIT_FOR_SETUP;
+ break;
+ }
+}
+
+static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ spin_unlock(&xudc->lock);
+ ret = xudc->driver->setup(&xudc->gadget, ctrl);
+ spin_lock(&xudc->lock);
+
+ return ret;
+}
+
+static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if (xudc->test_mode_pattern) {
+ xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
+ xudc->test_mode_pattern = 0;
+ }
+}
+
+static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+ u32 feature = le16_to_cpu(ctrl->wValue);
+ u32 index = le16_to_cpu(ctrl->wIndex);
+ u32 val, ep;
+ int ret;
+
+ if (le16_to_cpu(ctrl->wLength) != 0)
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ switch (feature) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
+ (xudc->device_state == USB_STATE_DEFAULT))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if (set)
+ val |= PORTPM_RWE;
+ else
+ val &= ~PORTPM_RWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ case USB_DEVICE_U2_ENABLE:
+ if ((xudc->device_state != USB_STATE_CONFIGURED) ||
+ (xudc->gadget.speed != USB_SPEED_SUPER))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if ((feature == USB_DEVICE_U1_ENABLE) &&
+ xudc->soc->u1_enable) {
+ if (set)
+ val |= PORTPM_U1E;
+ else
+ val &= ~PORTPM_U1E;
+ }
+
+ if ((feature == USB_DEVICE_U2_ENABLE) &&
+ xudc->soc->u2_enable) {
+ if (set)
+ val |= PORTPM_U2E;
+ else
+ val &= ~PORTPM_U2E;
+ }
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (xudc->gadget.speed != USB_SPEED_HIGH)
+ return -EINVAL;
+
+ if (!set)
+ return -EINVAL;
+
+ xudc->test_mode_pattern = index >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->device_state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ switch (feature) {
+ case USB_INTRF_FUNC_SUSPEND:
+ if (set) {
+ val = xudc_readl(xudc, PORTPM);
+
+ if (index & USB_INTRF_FUNC_SUSPEND_RW)
+ val |= PORTPM_FRWE;
+ else
+ val &= ~PORTPM_FRWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ return tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) ||
+ ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (index != 0)))
+ return -EINVAL;
+
+ ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
+}
+
+static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep_context *ep_ctx;
+ u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
+ u16 status = 0;
+
+ if (!(ctrl->bRequestType & USB_DIR_IN))
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 2))
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ val = xudc_readl(xudc, PORTPM);
+
+ if (xudc->selfpowered)
+ status |= BIT(USB_DEVICE_SELF_POWERED);
+
+ if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (val & PORTPM_RWE))
+ status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (val & PORTPM_U1E)
+ status |= BIT(USB_DEV_STAT_U1_ENABLED);
+ if (val & PORTPM_U2E)
+ status |= BIT(USB_DEV_STAT_U2_ENABLED);
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ status |= USB_INTRF_STAT_FUNC_RW_CAP;
+ val = xudc_readl(xudc, PORTPM);
+ if (val & PORTPM_FRWE)
+ status |= USB_INTRF_STAT_FUNC_RW;
+ }
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+ ep_ctx = &xudc->ep_context[ep];
+
+ if ((xudc->device_state != USB_STATE_CONFIGURED) &&
+ ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
+ return -EINVAL;
+
+ if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
+ return -EINVAL;
+
+ if (xudc_readl(xudc, EP_HALT) & BIT(ep))
+ status |= BIT(USB_ENDPOINT_HALT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xudc->status_buf = cpu_to_le16(status);
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
+ sizeof(xudc->status_buf),
+ no_op_complete);
+}
+
+static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with SEL values */
+}
+
+static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_DEFAULT)
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 6))
+ return -EINVAL;
+
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
+ sizeof(xudc->sel_timing),
+ set_sel_complete);
+}
+
+static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with isoch delay */
+}
+
+static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ u32 delay = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ xudc->isoch_delay = delay;
+
+ return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
+}
+
+static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) &&
+ (xudc->dev_addr != 0)) {
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (xudc->dev_addr == 0)) {
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+}
+
+static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u32 val, addr = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ dev_dbg(xudc->dev, "set address: %u\n", addr);
+
+ xudc->dev_addr = addr;
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_DEVADDR_MASK);
+ val |= CTRL_DEVADDR(addr);
+ xudc_writel(xudc, val, CTRL);
+
+ ep_ctx_write_devaddr(ep0->context, addr);
+
+ return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
+}
+
+static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
+ ret = tegra_xudc_ep0_get_status(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
+ ret = tegra_xudc_ep0_set_address(xudc, ctrl);
+ break;
+ case USB_REQ_SET_SEL:
+ dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
+ ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
+ ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
+ ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ /*
+ * In theory we need to clear RUN bit before status stage of
+ * deconfig request sent, but this seems to be causing problems.
+ * Clear RUN once all endpoints are disabled instead.
+ */
+ fallthrough;
+ default:
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ break;
+ }
+
+ return ret;
+}
+
+static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl,
+ u16 seq_num)
+{
+ int ret;
+
+ xudc->setup_seq_num = seq_num;
+
+ /* Ensure EP0 is unhalted. */
+ ep_unhalt(xudc, 0);
+
+ /*
+ * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
+ * are invalid. Halt EP0 until we get a valid packet.
+ */
+ if (xudc->soc->invalid_seq_num &&
+ (seq_num == 0xfffe || seq_num == 0xffff)) {
+ dev_warn(xudc->dev, "invalid sequence number detected\n");
+ ep_halt(xudc, 0);
+ return;
+ }
+
+ if (ctrl->wLength)
+ xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
+ DATA_STAGE_XFER : DATA_STAGE_RECV;
+ else
+ xudc->setup_state = STATUS_STAGE_XFER;
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
+ else
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+
+ if (ret < 0) {
+ dev_warn(xudc->dev, "setup request failed: %d\n", ret);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ ep_halt(xudc, 0);
+ }
+}
+
+static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
+ u16 seq_num = trb_read_seq_num(event);
+
+ if (xudc->setup_state != WAIT_FOR_SETUP) {
+ /*
+ * The controller is in the process of handling another
+ * setup request. Queue subsequent requests and handle
+ * the last one once the controller reports a sequence
+ * number error.
+ */
+ memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
+ xudc->setup_packet.seq_num = seq_num;
+ xudc->queued_setup_packet = true;
+ } else {
+ tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
+ }
+}
+
+static struct tegra_xudc_request *
+trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_request *req;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (!req->trbs_queued)
+ break;
+
+ if (trb_in_request(ep, req, trb))
+ return req;
+ }
+
+ return NULL;
+}
+
+static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
+ struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *event)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_trb *trb;
+ bool short_packet;
+
+ short_packet = (trb_read_cmpl_code(event) ==
+ TRB_CMPL_CODE_SHORT_PACKET);
+
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ req = trb_to_request(ep, trb);
+
+ /*
+ * TDs are complete on short packet or when the completed TRB is the
+ * last TRB in the TD (the CHAIN bit is unset).
+ */
+ if (req && (short_packet || (!trb_read_chain(trb) &&
+ (req->trbs_needed == req->trbs_queued)))) {
+ struct tegra_xudc_trb *last = req->last_trb;
+ unsigned int residual;
+
+ residual = trb_read_transfer_len(event);
+ req->usb_req.actual = req->usb_req.length - residual;
+
+ dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
+ req->usb_req.actual, req->usb_req.length);
+
+ tegra_xudc_req_done(ep, req, 0);
+
+ if (ep->desc && usb_endpoint_xfer_control(ep->desc))
+ tegra_xudc_ep0_req_done(xudc);
+
+ /*
+ * Advance the dequeue pointer past the end of the current TD
+ * on short packet completion.
+ */
+ if (short_packet) {
+ ep->deq_ptr = (last - ep->transfer_ring) + 1;
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ }
+ } else if (!req) {
+ dev_warn(xudc->dev, "transfer event on dequeued request\n");
+ }
+
+ if (ep->desc)
+ tegra_xudc_ep_kick_queue(ep);
+}
+
+static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ unsigned int ep_index = trb_read_endpoint_id(event);
+ struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
+ struct tegra_xudc_trb *trb;
+ u16 comp_code;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
+ ep_index);
+ return;
+ }
+
+ /* Update transfer ring dequeue pointer. */
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ comp_code = trb_read_cmpl_code(event);
+ if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
+ ep->deq_ptr = (trb - ep->transfer_ring) + 1;
+
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ ep->ring_full = false;
+ }
+
+ switch (comp_code) {
+ case TRB_CMPL_CODE_SUCCESS:
+ case TRB_CMPL_CODE_SHORT_PACKET:
+ tegra_xudc_handle_transfer_completion(xudc, ep, event);
+ break;
+ case TRB_CMPL_CODE_HOST_REJECTED:
+ dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
+
+ ep->stream_rejected = true;
+ break;
+ case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
+ dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
+
+ if (ep->stream_rejected) {
+ ep->stream_rejected = false;
+ /*
+ * An EP is stopped when a stream is rejected. Wait
+ * for the EP to report that it is stopped and then
+ * un-stop it.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ }
+ tegra_xudc_ep_ring_doorbell(ep);
+ break;
+ case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
+ /*
+ * Wait for the EP to be stopped so the controller stops
+ * processing doorbells.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ ep->enq_ptr = ep->deq_ptr;
+ tegra_xudc_ep_nuke(ep, -EIO);
+ /* FALLTHROUGH */
+ case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
+ case TRB_CMPL_CODE_CTRL_DIR_ERR:
+ case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
+ case TRB_CMPL_CODE_RING_UNDERRUN:
+ case TRB_CMPL_CODE_RING_OVERRUN:
+ case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
+ case TRB_CMPL_CODE_USB_TRANS_ERR:
+ case TRB_CMPL_CODE_TRB_ERR:
+ dev_err(xudc->dev, "completion error %#x on EP %u\n",
+ comp_code, ep_index);
+
+ ep_halt(xudc, ep_index);
+ break;
+ case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
+ dev_info(xudc->dev, "sequence number error\n");
+
+ /*
+ * Kill any queued control request and skip to the last
+ * setup packet we received.
+ */
+ tegra_xudc_ep_nuke(ep, -EINVAL);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ if (!xudc->queued_setup_packet)
+ break;
+
+ tegra_xudc_handle_ep0_setup_packet(xudc,
+ &xudc->setup_packet.ctrl_req,
+ xudc->setup_packet.seq_num);
+ xudc->queued_setup_packet = false;
+ break;
+ case TRB_CMPL_CODE_STOPPED:
+ dev_dbg(xudc->dev, "stop completion code on EP %u\n",
+ ep_index);
+
+ /* Disconnected. */
+ tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
+ break;
+ default:
+ dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
+ comp_code, ep_index);
+ break;
+ }
+}
+
+static void tegra_xudc_reset(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ dma_addr_t deq_ptr;
+ unsigned int i;
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ep_unpause_all(xudc);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
+
+ /*
+ * Reset sequence number and dequeue pointer to flush the transfer
+ * ring.
+ */
+ ep0->deq_ptr = ep0->enq_ptr;
+ ep0->ring_full = false;
+
+ xudc->setup_seq_num = 0;
+ xudc->queued_setup_packet = false;
+
+ ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num);
+
+ deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
+
+ if (!dma_mapping_error(xudc->dev, deq_ptr)) {
+ ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
+ ep_ctx_write_dcs(ep0->context, ep0->pcs);
+ }
+
+ ep_unhalt_all(xudc);
+ ep_reload(xudc, 0);
+ ep_unpause(xudc, 0);
+}
+
+static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u16 maxpacket;
+ u32 val;
+
+ val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
+ switch (val) {
+ case PORTSC_PS_LS:
+ xudc->gadget.speed = USB_SPEED_LOW;
+ break;
+ case PORTSC_PS_FS:
+ xudc->gadget.speed = USB_SPEED_FULL;
+ break;
+ case PORTSC_PS_HS:
+ xudc->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case PORTSC_PS_SS:
+ xudc->gadget.speed = USB_SPEED_SUPER;
+ break;
+ default:
+ xudc->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ maxpacket = 512;
+ else
+ maxpacket = 64;
+
+ ep_ctx_write_max_packet_size(ep0->context, maxpacket);
+ tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
+ usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
+
+ if (!xudc->soc->u1_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U1TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (!xudc->soc->u2_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U2TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (xudc->gadget.speed <= USB_SPEED_HIGH) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_L1S_MASK);
+ if (xudc->soc->lpm_enable)
+ val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
+ else
+ val |= PORTPM_L1S(PORTPM_L1S_NYET);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ val = xudc_readl(xudc, ST);
+ if (val & ST_RC)
+ xudc_writel(xudc, ST_RC, ST);
+}
+
+static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver && xudc->driver->disconnect) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->disconnect(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+
+ xudc->device_state = USB_STATE_NOTATTACHED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ complete(&xudc->disconnect_complete);
+}
+
+static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver) {
+ spin_unlock(&xudc->lock);
+ usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
+ spin_lock(&xudc->lock);
+ }
+
+ tegra_xudc_port_connect(xudc);
+}
+
+static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port suspend\n");
+
+ xudc->resume_state = xudc->device_state;
+ xudc->device_state = USB_STATE_SUSPENDED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ if (xudc->driver->suspend) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->suspend(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port resume\n");
+
+ tegra_xudc_resume_device_state(xudc);
+
+ if (xudc->driver->resume) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->resume(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~PORTSC_CHANGE_MASK;
+ val |= flag;
+ xudc_writel(xudc, val, PORTSC);
+}
+
+static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ u32 portsc, porthalt;
+
+ porthalt = xudc_readl(xudc, PORTHALT);
+ if ((porthalt & PORTHALT_STCHG_REQ) &&
+ (porthalt & PORTHALT_HALT_LTSSM)) {
+ dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
+ porthalt &= ~PORTHALT_HALT_LTSSM;
+ xudc_writel(xudc, porthalt, PORTHALT);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+#define TOGGLE_VBUS_WAIT_MS 100
+ if (xudc->soc->port_reset_quirk) {
+ schedule_delayed_work(&xudc->port_reset_war_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_for_sec_prc = 1;
+ }
+ }
+
+ if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+ tegra_xudc_port_reset(xudc);
+ cancel_delayed_work(&xudc->port_reset_war_work);
+ xudc->wait_for_sec_prc = 0;
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_WRC) {
+ dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
+ if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
+ tegra_xudc_port_reset(xudc);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_CSC) {
+ dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CSC);
+
+ if (portsc & PORTSC_CCS)
+ tegra_xudc_port_connect(xudc);
+ else
+ tegra_xudc_port_disconnect(xudc);
+
+ if (xudc->wait_csc) {
+ cancel_delayed_work(&xudc->plc_reset_work);
+ xudc->wait_csc = false;
+ }
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_PLC) {
+ u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
+
+ dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PLC);
+ switch (pls) {
+ case PORTSC_PLS_U3:
+ tegra_xudc_port_suspend(xudc);
+ break;
+ case PORTSC_PLS_U0:
+ if (xudc->gadget.speed < USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_RESUME:
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_INACTIVE:
+ schedule_delayed_work(&xudc->plc_reset_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_csc = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (portsc & PORTSC_CEC) {
+ dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CEC);
+ }
+
+ dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
+}
+
+static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
+ (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
+ __tegra_xudc_handle_port_status(xudc);
+}
+
+static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ u32 type = trb_read_type(event);
+
+ dump_trb(xudc, "EVENT", event);
+
+ switch (type) {
+ case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
+ tegra_xudc_handle_port_status(xudc);
+ break;
+ case TRB_TYPE_TRANSFER_EVENT:
+ tegra_xudc_handle_transfer_event(xudc, event);
+ break;
+ case TRB_TYPE_SETUP_PACKET_EVENT:
+ tegra_xudc_handle_ep0_event(xudc, event);
+ break;
+ default:
+ dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
+ break;
+ }
+}
+
+static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_trb *event;
+ dma_addr_t erdp;
+
+ while (true) {
+ event = xudc->event_ring[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr;
+
+ if (trb_read_cycle(event) != xudc->ccs)
+ break;
+
+ tegra_xudc_handle_event(xudc, event);
+
+ xudc->event_ring_deq_ptr++;
+ if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
+ xudc->event_ring_deq_ptr = 0;
+ xudc->event_ring_index++;
+ }
+
+ if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
+ xudc->event_ring_index = 0;
+ xudc->ccs = !xudc->ccs;
+ }
+ }
+
+ erdp = xudc->event_ring_phys[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr * sizeof(*event);
+
+ xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
+ xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
+}
+
+static irqreturn_t tegra_xudc_irq(int irq, void *data)
+{
+ struct tegra_xudc *xudc = data;
+ unsigned long flags;
+ u32 val;
+
+ val = xudc_readl(xudc, ST);
+ if (!(val & ST_IP))
+ return IRQ_NONE;
+ xudc_writel(xudc, ST_IP, ST);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ tegra_xudc_process_event_ring(xudc);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ ep->xudc = xudc;
+ ep->index = index;
+ ep->context = &xudc->ep_context[index];
+ INIT_LIST_HEAD(&ep->queue);
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return 0;
+
+ ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
+ GFP_KERNEL,
+ &ep->transfer_ring_phys);
+ if (!ep->transfer_ring)
+ return -ENOMEM;
+
+ if (index) {
+ snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
+ (index % 2 == 0) ? "out" : "in");
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+ ep->usb_ep.max_streams = 16;
+ ep->usb_ep.ops = &tegra_xudc_ep_ops;
+ ep->usb_ep.caps.type_bulk = true;
+ ep->usb_ep.caps.type_int = true;
+ if (index & 1)
+ ep->usb_ep.caps.dir_in = true;
+ else
+ ep->usb_ep.caps.dir_out = true;
+ list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
+ } else {
+ strscpy(ep->name, "ep0", 3);
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
+ ep->usb_ep.ops = &tegra_xudc_ep0_ops;
+ ep->usb_ep.caps.type_control = true;
+ ep->usb_ep.caps.dir_in = true;
+ ep->usb_ep.caps.dir_out = true;
+ }
+
+ return 0;
+}
+
+static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return;
+
+ dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
+ ep->transfer_ring_phys);
+}
+
+static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
+{
+ struct usb_request *req;
+ unsigned int i;
+ int err;
+
+ xudc->ep_context =
+ dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
+ sizeof(*xudc->ep_context),
+ &xudc->ep_context_phys, GFP_KERNEL);
+ if (!xudc->ep_context)
+ return -ENOMEM;
+
+ xudc->transfer_ring_pool =
+ dmam_pool_create(dev_name(xudc->dev), xudc->dev,
+ XUDC_TRANSFER_RING_SIZE *
+ sizeof(struct tegra_xudc_trb),
+ sizeof(struct tegra_xudc_trb), 0);
+ if (!xudc->transfer_ring_pool) {
+ err = -ENOMEM;
+ goto free_ep_context;
+ }
+
+ INIT_LIST_HEAD(&xudc->gadget.ep_list);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ err = tegra_xudc_alloc_ep(xudc, i);
+ if (err < 0)
+ goto free_eps;
+ }
+
+ req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
+ if (!req) {
+ err = -ENOMEM;
+ goto free_eps;
+ }
+ xudc->ep0_req = to_xudc_req(req);
+
+ return 0;
+
+free_eps:
+ for (; i > 0; i--)
+ tegra_xudc_free_ep(xudc, i - 1);
+free_ep_context:
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+ return err;
+}
+
+static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
+{
+ xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
+ xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
+}
+
+static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
+ &xudc->ep0_req->usb_req);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_free_ep(xudc, i);
+
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+}
+
+static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ xudc->event_ring[i] =
+ dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ &xudc->event_ring_phys[i],
+ GFP_KERNEL);
+ if (!xudc->event_ring[i])
+ goto free_dma;
+ }
+
+ return 0;
+
+free_dma:
+ for (; i > 0; i--) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i - 1]),
+ xudc->event_ring[i - 1],
+ xudc->event_ring_phys[i - 1]);
+ }
+ return -ENOMEM;
+}
+
+static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ val = xudc_readl(xudc, SPARAM);
+ val &= ~(SPARAM_ERSTMAX_MASK);
+ val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS);
+ xudc_writel(xudc, val, SPARAM);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]));
+
+ val = xudc_readl(xudc, ERSTSZ);
+ val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
+ val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
+ xudc_writel(xudc, val, ERSTSZ);
+
+ xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBALO(i));
+ xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBAHI(i));
+ }
+
+ val = lower_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPLO);
+ val |= EREPLO_ECS;
+ xudc_writel(xudc, val, EREPLO);
+
+ val = upper_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPHI);
+ xudc_writel(xudc, val, EREPHI);
+
+ xudc->ccs = true;
+ xudc->event_ring_index = 0;
+ xudc->event_ring_deq_ptr = 0;
+}
+
+static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ xudc->event_ring[i],
+ xudc->event_ring_phys[i]);
+ }
+}
+
+static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ if (xudc->soc->has_ipfs) {
+ val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
+ val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
+ ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
+ usleep_range(10, 15);
+ }
+
+ /* Enable bus master */
+ val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
+ XUSB_DEV_CFG_1_BUS_MASTER_EN;
+ fpci_writel(xudc, val, XUSB_DEV_CFG_1);
+
+ /* Program BAR0 space */
+ val = fpci_readl(xudc, XUSB_DEV_CFG_4);
+ val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+ val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+
+ fpci_writel(xudc, val, XUSB_DEV_CFG_4);
+ fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
+
+ usleep_range(100, 200);
+
+ if (xudc->soc->has_ipfs) {
+ /* Enable interrupt assertion */
+ val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
+ val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
+ ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
+ }
+}
+
+static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
+{
+ u32 val, imod;
+
+ if (xudc->soc->has_ipfs) {
+ val = xudc_readl(xudc, BLCG);
+ val |= BLCG_ALL;
+ val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
+ BLCG_COREPLL_PWRDN);
+ val |= BLCG_IOPLL_0_PWRDN;
+ val |= BLCG_IOPLL_1_PWRDN;
+ val |= BLCG_IOPLL_2_PWRDN;
+
+ xudc_writel(xudc, val, BLCG);
+ }
+
+ /* Set a reasonable U3 exit timer value. */
+ val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
+ val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
+ val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
+ xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
+
+ /* Default ping LFPS tBurst is too large. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT0);
+ val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
+ val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
+ xudc_writel(xudc, val, SSPX_CORE_CNT0);
+
+ /* Default tPortConfiguration timeout is too small. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT30);
+ val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
+ val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
+ xudc_writel(xudc, val, SSPX_CORE_CNT30);
+
+ if (xudc->soc->lpm_enable) {
+ /* Set L1 resume duration to 95 us. */
+ val = xudc_readl(xudc, HSFSPI_COUNT13);
+ val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
+ val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
+ xudc_writel(xudc, val, HSFSPI_COUNT13);
+ }
+
+ /*
+ * Compliacne suite appears to be violating polling LFPS tBurst max
+ * of 1.4us. Send 1.45us instead.
+ */
+ val = xudc_readl(xudc, SSPX_CORE_CNT32);
+ val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
+ val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT32);
+
+ /* Direct HS/FS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Direct SS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Restore port instance. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /*
+ * Enable INFINITE_SS_RETRY to prevent device from entering
+ * Disabled.Error when attached to buggy SuperSpeed hubs.
+ */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val |= CFG_DEV_FE_INFINITE_SS_RETRY;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /* Set interrupt moderation. */
+ imod = XUDC_INTERRUPT_MODERATION_US * 4;
+ val = xudc_readl(xudc, RT_IMOD);
+ val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
+ val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
+ xudc_writel(xudc, val, RT_IMOD);
+
+ /* increase SSPI transaction timeout from 32us to 512us */
+ val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
+ val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
+ val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
+ xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
+}
+
+static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
+{
+ int err;
+
+ err = phy_init(xudc->utmi_phy);
+ if (err < 0) {
+ dev_err(xudc->dev, "utmi phy init failed: %d\n", err);
+ return err;
+ }
+
+ err = phy_init(xudc->usb3_phy);
+ if (err < 0) {
+ dev_err(xudc->dev, "usb3 phy init failed: %d\n", err);
+ goto exit_utmi_phy;
+ }
+
+ return 0;
+
+exit_utmi_phy:
+ phy_exit(xudc->utmi_phy);
+ return err;
+}
+
+static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
+{
+ phy_exit(xudc->usb3_phy);
+ phy_exit(xudc->utmi_phy);
+}
+
+static const char * const tegra210_xudc_supply_names[] = {
+ "hvdd-usb",
+ "avddio-usb",
+};
+
+static const char * const tegra210_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "hs_src",
+ "fs_src",
+};
+
+static const char * const tegra186_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "fs_src",
+};
+
+static struct tegra_xudc_soc tegra210_xudc_soc_data = {
+ .supply_names = tegra210_xudc_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
+ .clock_names = tegra210_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
+ .u1_enable = false,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = true,
+ .pls_quirk = true,
+ .port_reset_quirk = true,
+ .has_ipfs = true,
+};
+
+static struct tegra_xudc_soc tegra186_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .has_ipfs = false,
+};
+
+static const struct of_device_id tegra_xudc_of_match[] = {
+ {
+ .compatible = "nvidia,tegra210-xudc",
+ .data = &tegra210_xudc_soc_data
+ },
+ {
+ .compatible = "nvidia,tegra186-xudc",
+ .data = &tegra186_xudc_soc_data
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
+
+static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
+{
+ if (xudc->genpd_dl_ss)
+ device_link_del(xudc->genpd_dl_ss);
+ if (xudc->genpd_dl_device)
+ device_link_del(xudc->genpd_dl_device);
+ if (xudc->genpd_dev_ss)
+ dev_pm_domain_detach(xudc->genpd_dev_ss, true);
+ if (xudc->genpd_dev_device)
+ dev_pm_domain_detach(xudc->genpd_dev_device, true);
+}
+
+static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
+{
+ struct device *dev = xudc->dev;
+ int err;
+
+ xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev,
+ "dev");
+ if (IS_ERR(xudc->genpd_dev_device)) {
+ err = PTR_ERR(xudc->genpd_dev_device);
+ dev_err(dev, "failed to get dev pm-domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
+ if (IS_ERR(xudc->genpd_dev_ss)) {
+ err = PTR_ERR(xudc->genpd_dev_ss);
+ dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_device) {
+ dev_err(dev, "adding usb device device link failed!\n");
+ return -ENODEV;
+ }
+
+ xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_ss) {
+ dev_err(dev, "adding superspeed device link failed!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_probe(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc;
+ struct resource *res;
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+ unsigned int i;
+ int err;
+
+ xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_ATOMIC);
+ if (!xudc)
+ return -ENOMEM;
+
+ xudc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xudc);
+
+ xudc->soc = of_device_get_match_data(&pdev->dev);
+ if (!xudc->soc)
+ return -ENODEV;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ xudc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->base))
+ return PTR_ERR(xudc->base);
+ xudc->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fpci");
+ xudc->fpci = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->fpci))
+ return PTR_ERR(xudc->fpci);
+
+ if (xudc->soc->has_ipfs) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ipfs");
+ xudc->ipfs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->ipfs))
+ return PTR_ERR(xudc->ipfs);
+ }
+
+ xudc->irq = platform_get_irq(pdev, 0);
+ if (xudc->irq < 0) {
+ dev_err(xudc->dev, "failed to get IRQ: %d\n",
+ xudc->irq);
+ return xudc->irq;
+ }
+
+ err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
+ dev_name(&pdev->dev), xudc);
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
+ err);
+ return err;
+ }
+
+ xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks,
+ sizeof(*xudc->clks), GFP_KERNEL);
+ if (!xudc->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_clks; i++)
+ xudc->clks[i].id = xudc->soc->clock_names[i];
+
+ err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks,
+ xudc->clks);
+ if (err) {
+ dev_err(xudc->dev, "failed to request clks %d\n", err);
+ return err;
+ }
+
+ xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
+ sizeof(*xudc->supplies), GFP_KERNEL);
+ if (!xudc->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_supplies; i++)
+ xudc->supplies[i].supply = xudc->soc->supply_names[i];
+
+ err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err) {
+ dev_err(xudc->dev, "failed to request regulators %d\n", err);
+ return err;
+ }
+
+ xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
+ if (IS_ERR(xudc->padctl))
+ return PTR_ERR(xudc->padctl);
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
+ if (err) {
+ dev_err(xudc->dev, "failed to enable regulators %d\n", err);
+ goto put_padctl;
+ }
+
+ xudc->usb3_phy = devm_phy_optional_get(&pdev->dev, "usb3");
+ if (IS_ERR(xudc->usb3_phy)) {
+ err = PTR_ERR(xudc->usb3_phy);
+ dev_err(xudc->dev, "failed to get usb3 phy: %d\n", err);
+ goto disable_regulator;
+ }
+
+ xudc->utmi_phy = devm_phy_optional_get(&pdev->dev, "usb2");
+ if (IS_ERR(xudc->utmi_phy)) {
+ err = PTR_ERR(xudc->utmi_phy);
+ dev_err(xudc->dev, "failed to get usb2 phy: %d\n", err);
+ goto disable_regulator;
+ }
+
+ err = tegra_xudc_powerdomain_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_phy_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_alloc_event_ring(xudc);
+ if (err)
+ goto disable_phy;
+
+ err = tegra_xudc_alloc_eps(xudc);
+ if (err)
+ goto free_event_ring;
+
+ spin_lock_init(&xudc->lock);
+
+ init_completion(&xudc->disconnect_complete);
+
+ INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
+
+ INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
+
+ INIT_DELAYED_WORK(&xudc->port_reset_war_work,
+ tegra_xudc_port_reset_war_work);
+
+ if (of_property_read_bool(xudc->dev->of_node, "usb-role-switch")) {
+ role_sx_desc.set = tegra_xudc_usb_role_sw_set;
+ role_sx_desc.fwnode = dev_fwnode(xudc->dev);
+
+ xudc->usb_role_sw = usb_role_switch_register(xudc->dev,
+ &role_sx_desc);
+ if (IS_ERR(xudc->usb_role_sw)) {
+ err = PTR_ERR(xudc->usb_role_sw);
+ dev_err(xudc->dev, "Failed to register USB role SW: %d",
+ err);
+ goto free_eps;
+ }
+ } else {
+ /* Set the mode as device mode and this keeps phy always ON */
+ dev_info(xudc->dev, "Set usb role to device mode always");
+ schedule_work(&xudc->usb_role_sw_work);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ xudc->gadget.ops = &tegra_xudc_gadget_ops;
+ xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
+ xudc->gadget.name = "tegra-xudc";
+ xudc->gadget.max_speed = USB_SPEED_SUPER;
+
+ err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
+ goto free_eps;
+ }
+
+ return 0;
+
+free_eps:
+ tegra_xudc_free_eps(xudc);
+free_event_ring:
+ tegra_xudc_free_event_ring(xudc);
+disable_phy:
+ tegra_xudc_phy_exit(xudc);
+put_powerdomains:
+ tegra_xudc_powerdomain_remove(xudc);
+disable_regulator:
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+put_padctl:
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return err;
+}
+
+static int tegra_xudc_remove(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(xudc->dev);
+
+ cancel_delayed_work(&xudc->plc_reset_work);
+
+ if (xudc->usb_role_sw) {
+ usb_role_switch_unregister(xudc->usb_role_sw);
+ cancel_work_sync(&xudc->usb_role_sw_work);
+ }
+
+ usb_del_gadget_udc(&xudc->gadget);
+
+ tegra_xudc_free_eps(xudc);
+ tegra_xudc_free_event_ring(xudc);
+
+ tegra_xudc_powerdomain_remove(xudc);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ phy_power_off(xudc->utmi_phy);
+ phy_power_off(xudc->usb3_phy);
+
+ tegra_xudc_phy_exit(xudc);
+
+ pm_runtime_disable(xudc->dev);
+ pm_runtime_put(xudc->dev);
+
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+
+ dev_dbg(xudc->dev, "entering ELPG\n");
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ xudc->powergated = true;
+ xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
+ xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
+ xudc_writel(xudc, 0, CTRL);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ dev_dbg(xudc->dev, "entering ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+ int err;
+
+ dev_dbg(xudc->dev, "exiting ELPG\n");
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err < 0)
+ return err;
+
+ err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
+ if (err < 0)
+ return err;
+
+ tegra_xudc_fpci_ipfs_init(xudc);
+
+ tegra_xudc_device_params_init(xudc);
+
+ tegra_xudc_init_event_ring(xudc);
+
+ tegra_xudc_init_eps(xudc);
+
+ xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
+ xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->powergated = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ dev_dbg(xudc->dev, "exiting ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = true;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ flush_work(&xudc->usb_role_sw_work);
+
+ /* Forcibly disconnect before powergating. */
+ tegra_xudc_device_mode_off(xudc);
+
+ if (!pm_runtime_status_suspended(dev))
+ tegra_xudc_powergate(xudc);
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err;
+
+ err = tegra_xudc_unpowergate(xudc);
+ if (err < 0)
+ return err;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ schedule_work(&xudc->usb_role_sw_work);
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_powergate(xudc);
+}
+
+static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_unpowergate(xudc);
+}
+
+static const struct dev_pm_ops tegra_xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
+ SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
+ tegra_xudc_runtime_resume, NULL)
+};
+
+static struct platform_driver tegra_xudc_driver = {
+ .probe = tegra_xudc_probe,
+ .remove = tegra_xudc_remove,
+ .driver = {
+ .name = "tegra-xudc",
+ .pm = &tegra_xudc_pm_ops,
+ .of_match_table = tegra_xudc_of_match,
+ },
+};
+module_platform_driver(tegra_xudc_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
+MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 79b2e79dddd0..8d730180db06 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -38,9 +38,9 @@ config USB_XHCI_DBGCAP
before enabling this option. If unsure, say 'N'.
config USB_XHCI_PCI
- tristate
- depends on USB_PCI
- default y
+ tristate
+ depends on USB_PCI
+ default y
config USB_XHCI_PLATFORM
tristate "Generic xHCI driver for a platform device"
@@ -220,12 +220,12 @@ config USB_EHCI_HCD_ORION
Marvell PXA/MMP USB controller" for those.
config USB_EHCI_HCD_SPEAR
- tristate "Support for ST SPEAr on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && PLAT_SPEAR
- default y
- ---help---
- Enables support for the on-chip EHCI controller on
- ST SPEAr chips.
+ tristate "Support for ST SPEAr on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ ST SPEAr chips.
config USB_EHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip EHCI USB controller"
@@ -237,21 +237,21 @@ config USB_EHCI_HCD_STI
STMicroelectronics consumer electronics SoC's.
config USB_EHCI_HCD_AT91
- tristate "Support for Atmel on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && ARCH_AT91
- default y
- ---help---
- Enables support for the on-chip EHCI controller on
- Atmel chips.
+ tristate "Support for Atmel on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && ARCH_AT91
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ Atmel chips.
config USB_EHCI_TEGRA
- tristate "NVIDIA Tegra HCD support"
- depends on ARCH_TEGRA
- select USB_EHCI_ROOT_HUB_TT
- select USB_TEGRA_PHY
- help
- This driver enables support for the internal USB Host Controllers
- found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
+ tristate "NVIDIA Tegra HCD support"
+ depends on ARCH_TEGRA
+ select USB_EHCI_ROOT_HUB_TT
+ select USB_TEGRA_PHY
+ help
+ This driver enables support for the internal USB Host Controllers
+ found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
@@ -269,10 +269,10 @@ config USB_EHCI_SH
If you use the PCI EHCI controller, this option is not necessary.
config USB_EHCI_EXYNOS
- tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
- help
- Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
+ tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
tristate "EHCI support for Marvell PXA/MMP USB controller"
@@ -409,12 +409,12 @@ config USB_OHCI_HCD_OMAP1
Enables support for the OHCI controller on OMAP1/2 chips.
config USB_OHCI_HCD_SPEAR
- tristate "Support for ST SPEAr on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && PLAT_SPEAR
- default y
- ---help---
- Enables support for the on-chip OHCI controller on
- ST SPEAr chips.
+ tristate "Support for ST SPEAr on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ ST SPEAr chips.
config USB_OHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip OHCI USB controller"
@@ -426,12 +426,12 @@ config USB_OHCI_HCD_STI
STMicroelectronics consumer electronics SoC's.
config USB_OHCI_HCD_S3C2410
- tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
- depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
- default y
- ---help---
- Enables support for the on-chip OHCI controller on
- S3C24xx/S3C64xx chips.
+ tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ S3C24xx/S3C64xx chips.
config USB_OHCI_HCD_LPC32XX
tristate "Support for LPC on-chip OHCI USB controller"
@@ -440,8 +440,8 @@ config USB_OHCI_HCD_LPC32XX
depends on USB_ISP1301
default y
---help---
- Enables support for the on-chip OHCI controller on
- NXP chips.
+ Enables support for the on-chip OHCI controller on
+ NXP chips.
config USB_OHCI_HCD_PXA27X
tristate "Support for PXA27X/PXA3XX on-chip OHCI USB controller"
@@ -456,8 +456,8 @@ config USB_OHCI_HCD_AT91
depends on USB_OHCI_HCD && ARCH_AT91 && OF
default y
---help---
- Enables support for the on-chip OHCI controller on
- Atmel chips.
+ Enables support for the on-chip OHCI controller on
+ Atmel chips.
config USB_OHCI_HCD_OMAP3
tristate "OHCI support for OMAP3 and later chips"
@@ -545,7 +545,7 @@ config USB_OHCI_EXYNOS
tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
depends on ARCH_S5PV210 || ARCH_EXYNOS
help
- Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
@@ -609,8 +609,8 @@ config USB_UHCI_PLATFORM
default y if (ARCH_VT8500 || ARCH_ASPEED)
config USB_UHCI_ASPEED
- bool
- default y if ARCH_ASPEED
+ bool
+ default y if ARCH_ASPEED
config USB_FHCI_HCD
tristate "Freescale QE USB Host Controller support"
@@ -713,14 +713,14 @@ config USB_RENESAS_USBHS_HCD
module will be called renesas-usbhs.
config USB_IMX21_HCD
- tristate "i.MX21 HCD support"
- depends on ARM && ARCH_MXC
- help
- This driver enables support for the on-chip USB host in the
- i.MX21 processor.
-
- To compile this driver as a module, choose M here: the
- module will be called "imx21-hcd".
+ tristate "i.MX21 HCD support"
+ depends on ARM && ARCH_MXC
+ help
+ This driver enables support for the on-chip USB host in the
+ i.MX21 processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called "imx21-hcd".
config USB_HCD_BCMA
tristate "BCMA usb host driver"
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 2400a826397a..652fa29beb27 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -406,9 +406,12 @@ static int bcma_hcd_probe(struct bcma_device *core)
return -ENOMEM;
usb_dev->core = core;
- if (core->dev.of_node)
+ if (core->dev.of_node) {
usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
GPIOD_OUT_HIGH);
+ if (IS_ERR(usb_dev->gpio_desc))
+ return PTR_ERR(usb_dev->gpio_desc);
+ }
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 9e0c98d6bdb0..f967adf2d8df 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5646,8 +5646,10 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
return retval;
failed_dis_clk:
- if (!IS_ERR(fotg210->pclk))
+ if (!IS_ERR(fotg210->pclk)) {
clk_disable_unprepare(fotg210->pclk);
+ clk_put(fotg210->pclk);
+ }
failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
@@ -5665,8 +5667,10 @@ static int fotg210_hcd_remove(struct platform_device *pdev)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- if (!IS_ERR(fotg210->pclk))
+ if (!IS_ERR(fotg210->pclk)) {
clk_disable_unprepare(fotg210->pclk);
+ clk_put(fotg210->pclk);
+ }
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
index 7fcf1d9dd7f3..02a1344fbd6a 100644
--- a/drivers/usb/host/imx21-dbg.c
+++ b/drivers/usb/host/imx21-dbg.c
@@ -419,7 +419,7 @@ static void create_debug_files(struct imx21 *imx21)
{
struct dentry *root;
- root = debugfs_create_dir(dev_name(imx21->dev), NULL);
+ root = debugfs_create_dir(dev_name(imx21->dev), usb_debug_root);
imx21->debug_root = root;
debugfs_create_file("status", S_IRUGO, root, imx21, &debug_status_fops);
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 96f8daa11f25..4a3a2852523f 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2627,7 +2627,7 @@ static int isp1362_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp1362_hcd *isp1362_hcd;
- struct resource *addr, *data, *irq_res;
+ struct resource *data, *irq_res;
void __iomem *addr_reg;
void __iomem *data_reg;
int irq;
@@ -2651,8 +2651,7 @@ static int isp1362_probe(struct platform_device *pdev)
irq = irq_res->start;
- addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- addr_reg = devm_ioremap_resource(&pdev->dev, addr);
+ addr_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(addr_reg))
return PTR_ERR(addr_reg);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index fc35a7993b7b..b635c6a1b1a9 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -115,7 +115,6 @@ static void at91_start_hc(struct platform_device *pdev)
static void at91_stop_hc(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_regs __iomem *regs = hcd->regs;
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
dev_dbg(&pdev->dev, "stop\n");
@@ -123,7 +122,7 @@ static void at91_stop_hc(struct platform_device *pdev)
/*
* Put the USB host controller into reset.
*/
- writel(0, &regs->control);
+ usb_hcd_platform_shutdown(pdev);
/*
* Stop the USB clocks.
@@ -628,6 +627,7 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
+ msleep(1);
at91_stop_clock(ohci_at91);
}
@@ -642,8 +642,8 @@ ohci_hcd_at91_drv_resume(struct device *dev)
if (ohci_at91->wakeup)
disable_irq_wake(hcd->irq);
-
- at91_start_clock(ohci_at91);
+ else
+ at91_start_clock(ohci_at91);
ohci_resume(hcd, false);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index c561881d0e79..85878e8ad331 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -150,7 +150,7 @@ static void ohci_nxp_stop_hc(void)
static int ohci_hcd_nxp_probe(struct platform_device *pdev)
{
- struct usb_hcd *hcd = 0;
+ struct usb_hcd *hcd = NULL;
const struct hc_driver *driver = &ohci_nxp_hc_driver;
struct resource *res;
int ret = 0, irq;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index e67242e437ed..fe09b8626329 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -676,12 +676,12 @@ static int oxu_hub_control(struct usb_hcd *hcd,
*/
/* Low level read/write registers functions */
-static inline u32 oxu_readl(void *base, u32 reg)
+static inline u32 oxu_readl(void __iomem *base, u32 reg)
{
return readl(base + reg);
}
-static inline void oxu_writel(void *base, u32 reg, u32 val)
+static inline void oxu_writel(void __iomem *base, u32 reg, u32 val)
{
writel(val, base + reg);
}
@@ -4063,7 +4063,7 @@ static const struct hc_driver oxu_hc_driver = {
* Module stuff
*/
-static void oxu_configuration(struct platform_device *pdev, void *base)
+static void oxu_configuration(struct platform_device *pdev, void __iomem *base)
{
u32 tmp;
@@ -4093,7 +4093,7 @@ static void oxu_configuration(struct platform_device *pdev, void *base)
oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
}
-static int oxu_verify_id(struct platform_device *pdev, void *base)
+static int oxu_verify_id(struct platform_device *pdev, void __iomem *base)
{
u32 id;
static const char * const bo[] = {
@@ -4121,7 +4121,7 @@ static int oxu_verify_id(struct platform_device *pdev, void *base)
static const struct hc_driver oxu_hc_driver;
static struct usb_hcd *oxu_create(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
- void *base, int irq, int otg)
+ void __iomem *base, int irq, int otg)
{
struct device *dev = &pdev->dev;
@@ -4158,7 +4158,7 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev,
static int oxu_init(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
- void *base, int irq)
+ void __iomem *base, int irq)
{
struct oxu_info *info = platform_get_drvdata(pdev);
struct usb_hcd *hcd;
@@ -4207,7 +4207,7 @@ error_create_otg:
static int oxu_drv_probe(struct platform_device *pdev)
{
struct resource *res;
- void *base;
+ void __iomem *base;
unsigned long memstart, memlen;
int irq, ret;
struct oxu_info *info;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f6d04491df60..6c7f0a876b96 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -728,7 +728,7 @@ static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
if (!pio_enabled(pdev))
return;
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
base = pci_resource_start(pdev, i);
break;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 4efee34f154f..e9209e3e6248 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -71,7 +71,7 @@ INT_MODULE_PARM(testing, 0);
/* Some boards misreport power switching/overcurrent*/
static bool distrust_firmware = true;
module_param(distrust_firmware, bool, 0);
-MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
+MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurrent"
"t setup");
static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
/*
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1e0236e90687..a0025d23b257 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
+#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -212,7 +213,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e7aab31fd9a5..6475c3d3b43b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -280,6 +280,9 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
return;
xhci_dbg(xhci, "// Ding dong!\n");
+
+ trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
+
writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
readl(&xhci->dba->doorbell[0]);
@@ -401,6 +404,9 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
return;
+
+ trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
+
writel(DB_VALUE(ep_index, stream_id), db_addr);
/* The CPU has better things to do at this point than wait for a
* write-posting flush. It'll get there soon enough.
@@ -651,10 +657,8 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
xhci_urb_free_priv(urb_priv);
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock(&xhci->lock);
trace_xhci_urb_giveback(urb);
usb_hcd_giveback_urb(hcd, urb, status);
- spin_lock(&xhci->lock);
}
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
@@ -2741,6 +2745,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
}
/*
+ * Update Event Ring Dequeue Pointer:
+ * - When all events have finished
+ * - To avoid "Event Ring Full Error" condition
+ */
+static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ union xhci_trb *event_ring_deq)
+{
+ u64 temp_64;
+ dma_addr_t deq;
+
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ /* If necessary, update the HW's version of the event ring deq ptr. */
+ if (event_ring_deq != xhci->event_ring->dequeue) {
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
+ /*
+ * Per 4.9.4, Software writes to the ERDP register shall
+ * always advance the Event Ring Dequeue Pointer value.
+ */
+ if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
+ ((u64) deq & (u64) ~ERST_PTR_MASK))
+ return;
+
+ /* Update HC event ring dequeue pointer */
+ temp_64 &= ERST_PTR_MASK;
+ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C) */
+ temp_64 |= ERST_EHB;
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+}
+
+/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
@@ -2751,9 +2791,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
union xhci_trb *event_ring_deq;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
- dma_addr_t deq;
u64 temp_64;
u32 status;
+ int event_loop = 0;
spin_lock_irqsave(&xhci->lock, flags);
/* Check if the xHC generated the interrupt, or the irq is shared */
@@ -2807,24 +2847,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
- while (xhci_handle_event(xhci) > 0) {}
-
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- /* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != xhci->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
- if (deq == 0)
- xhci_warn(xhci, "WARN something wrong with SW event "
- "ring dequeue ptr.\n");
- /* Update HC event ring dequeue pointer */
- temp_64 &= ERST_PTR_MASK;
- temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ while (xhci_handle_event(xhci) > 0) {
+ if (event_loop++ < TRBS_PER_SEGMENT / 2)
+ continue;
+ xhci_update_erst_dequeue(xhci, event_ring_deq);
+ event_loop = 0;
}
- /* Clear the event handler busy flag (RW1C); event ring is empty. */
- temp_64 |= ERST_EHB;
- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_update_erst_dequeue(xhci, event_ring_deq);
ret = IRQ_HANDLED;
out:
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 2ff7c911fbd0..bf9065438320 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -42,19 +42,18 @@
#define XUSB_CFG_CSB_BASE_ADDR 0x800
/* FPCI mailbox registers */
-#define XUSB_CFG_ARU_MBOX_CMD 0x0e4
+/* XUSB_CFG_ARU_MBOX_CMD */
#define MBOX_DEST_FALC BIT(27)
#define MBOX_DEST_PME BIT(28)
#define MBOX_DEST_SMI BIT(29)
#define MBOX_DEST_XHCI BIT(30)
#define MBOX_INT_EN BIT(31)
-#define XUSB_CFG_ARU_MBOX_DATA_IN 0x0e8
+/* XUSB_CFG_ARU_MBOX_DATA_IN and XUSB_CFG_ARU_MBOX_DATA_OUT */
#define CMD_DATA_SHIFT 0
#define CMD_DATA_MASK 0xffffff
#define CMD_TYPE_SHIFT 24
#define CMD_TYPE_MASK 0xff
-#define XUSB_CFG_ARU_MBOX_DATA_OUT 0x0ec
-#define XUSB_CFG_ARU_MBOX_OWNER 0x0f0
+/* XUSB_CFG_ARU_MBOX_OWNER */
#define MBOX_OWNER_NONE 0
#define MBOX_OWNER_FW 1
#define MBOX_OWNER_SW 2
@@ -146,6 +145,13 @@ struct tegra_xusb_phy_type {
unsigned int num;
};
+struct tega_xusb_mbox_regs {
+ u16 cmd;
+ u16 data_in;
+ u16 data_out;
+ u16 owner;
+};
+
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
@@ -160,6 +166,8 @@ struct tegra_xusb_soc {
} usb2, ulpi, hsic, usb3;
} ports;
+ struct tega_xusb_mbox_regs mbox;
+
bool scale_ss_clock;
bool has_ipfs;
};
@@ -395,15 +403,15 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
* ACK/NAK messages.
*/
if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE) {
dev_err(tegra->dev, "mailbox is busy\n");
return -EBUSY;
}
- fpci_writel(tegra, MBOX_OWNER_SW, XUSB_CFG_ARU_MBOX_OWNER);
+ fpci_writel(tegra, MBOX_OWNER_SW, tegra->soc->mbox.owner);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_SW) {
dev_err(tegra->dev, "failed to acquire mailbox\n");
return -EBUSY;
@@ -413,17 +421,17 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
value = tegra_xusb_mbox_pack(msg);
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_DATA_IN);
+ fpci_writel(tegra, value, tegra->soc->mbox.data_in);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value = fpci_readl(tegra, tegra->soc->mbox.cmd);
value |= MBOX_INT_EN | MBOX_DEST_FALC;
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+ fpci_writel(tegra, value, tegra->soc->mbox.cmd);
if (wait_for_idle) {
unsigned long timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value == MBOX_OWNER_NONE)
break;
@@ -431,7 +439,7 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
if (time_after(jiffies, timeout))
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE)
return -ETIMEDOUT;
@@ -598,16 +606,16 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
mutex_lock(&tegra->lock);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_DATA_OUT);
+ value = fpci_readl(tegra, tegra->soc->mbox.data_out);
tegra_xusb_mbox_unpack(&msg, value);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value = fpci_readl(tegra, tegra->soc->mbox.cmd);
value &= ~MBOX_DEST_SMI;
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+ fpci_writel(tegra, value, tegra->soc->mbox.cmd);
/* clear mailbox owner if no ACK/NAK is required */
if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
- fpci_writel(tegra, MBOX_OWNER_NONE, XUSB_CFG_ARU_MBOX_OWNER);
+ fpci_writel(tegra, MBOX_OWNER_NONE, tegra->soc->mbox.owner);
tegra_xusb_mbox_handle(tegra, &msg);
@@ -755,7 +763,6 @@ static int tegra_xusb_runtime_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- tegra_xusb_phy_disable(tegra);
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
tegra_xusb_clk_disable(tegra);
@@ -779,16 +786,8 @@ static int tegra_xusb_runtime_resume(struct device *dev)
goto disable_clk;
}
- err = tegra_xusb_phy_enable(tegra);
- if (err < 0) {
- dev_err(dev, "failed to enable PHYs: %d\n", err);
- goto disable_regulator;
- }
-
return 0;
-disable_regulator:
- regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
disable_clk:
tegra_xusb_clk_disable(tegra);
return err;
@@ -970,7 +969,7 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb_mbox_msg msg;
- struct resource *res, *regs;
+ struct resource *regs;
struct tegra_xusb *tegra;
struct xhci_hcd *xhci;
unsigned int i, j, k;
@@ -992,14 +991,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (IS_ERR(tegra->regs))
return PTR_ERR(tegra->regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- tegra->fpci_base = devm_ioremap_resource(&pdev->dev, res);
+ tegra->fpci_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(tegra->fpci_base))
return PTR_ERR(tegra->fpci_base);
if (tegra->soc->has_ipfs) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- tegra->ipfs_base = devm_ioremap_resource(&pdev->dev, res);
+ tegra->ipfs_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(tegra->ipfs_base))
return PTR_ERR(tegra->ipfs_base);
}
@@ -1128,8 +1125,9 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_powerdomains;
}
- for (i = 0; i < tegra->soc->num_supplies; i++)
- tegra->supplies[i].supply = tegra->soc->supply_names[i];
+ regulator_bulk_set_supply_names(tegra->supplies,
+ tegra->soc->supply_names,
+ tegra->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, tegra->soc->num_supplies,
tegra->supplies);
@@ -1181,6 +1179,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
*/
platform_set_drvdata(pdev, tegra);
+ err = tegra_xusb_phy_enable(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err);
+ goto put_hcd;
+ }
+
pm_runtime_enable(&pdev->dev);
if (pm_runtime_enabled(&pdev->dev))
err = pm_runtime_get_sync(&pdev->dev);
@@ -1189,7 +1193,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to enable device: %d\n", err);
- goto disable_rpm;
+ goto disable_phy;
}
tegra_xusb_config(tegra, regs);
@@ -1275,9 +1279,11 @@ remove_usb2:
put_rpm:
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_xusb_runtime_suspend(&pdev->dev);
-disable_rpm:
- pm_runtime_disable(&pdev->dev);
+put_hcd:
usb_put_hcd(tegra->hcd);
+disable_phy:
+ tegra_xusb_phy_disable(tegra);
+ pm_runtime_disable(&pdev->dev);
put_powerdomains:
if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
@@ -1314,6 +1320,8 @@ static int tegra_xusb_remove(struct platform_device *pdev)
tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
}
+ tegra_xusb_phy_disable(tegra);
+
tegra_xusb_padctl_put(tegra->padctl);
return 0;
@@ -1375,6 +1383,12 @@ static const struct tegra_xusb_soc tegra124_soc = {
},
.scale_ss_clock = true,
.has_ipfs = true,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
};
MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
@@ -1407,6 +1421,12 @@ static const struct tegra_xusb_soc tegra210_soc = {
},
.scale_ss_clock = false,
.has_ipfs = true,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
};
MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
@@ -1432,12 +1452,48 @@ static const struct tegra_xusb_soc tegra186_soc = {
},
.scale_ss_clock = false,
.has_ipfs = false,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
+};
+
+static const char * const tegra194_supply_names[] = {
+};
+
+static const struct tegra_xusb_phy_type tegra194_phy_types[] = {
+ { .name = "usb3", .num = 4, },
+ { .name = "usb2", .num = 4, },
+};
+
+static const struct tegra_xusb_soc tegra194_soc = {
+ .firmware = "nvidia/tegra194/xusb.bin",
+ .supply_names = tegra194_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra194_supply_names),
+ .phy_types = tegra194_phy_types,
+ .num_types = ARRAY_SIZE(tegra194_phy_types),
+ .ports = {
+ .usb3 = { .offset = 0, .count = 4, },
+ .usb2 = { .offset = 4, .count = 4, },
+ },
+ .scale_ss_clock = false,
+ .has_ipfs = false,
+ .mbox = {
+ .cmd = 0x68,
+ .data_in = 0x6c,
+ .data_out = 0x70,
+ .owner = 0x74,
+ },
};
+MODULE_FIRMWARE("nvidia/tegra194/xusb.bin");
static const struct of_device_id tegra_xusb_of_match[] = {
{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
{ .compatible = "nvidia,tegra186-xusb", .data = &tegra186_soc },
+ { .compatible = "nvidia,tegra194-xusb", .data = &tegra194_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 052a269d86f2..56eb867803a6 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -560,6 +560,32 @@ DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
TP_ARGS(portnum, portsc)
);
+DECLARE_EVENT_CLASS(xhci_log_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell),
+ TP_STRUCT__entry(
+ __field(u32, slot)
+ __field(u32, doorbell)
+ ),
+ TP_fast_assign(
+ __entry->slot = slot;
+ __entry->doorbell = doorbell;
+ ),
+ TP_printk("Ring doorbell for %s",
+ xhci_decode_doorbell(__entry->slot, __entry->doorbell)
+ )
+);
+
+DEFINE_EVENT(xhci_log_doorbell, xhci_ring_ep_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell)
+);
+
+DEFINE_EVENT(xhci_log_doorbell, xhci_ring_host_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell)
+);
+
DECLARE_EVENT_CLASS(xhci_dbc_log_request,
TP_PROTO(struct dbc_request *req),
TP_ARGS(req),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6c17e3fe181a..6721d059f58a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -5301,7 +5301,8 @@ static const struct hc_driver xhci_hc_driver = {
* generic hardware linkage
*/
.irq = xhci_irq,
- .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
+ HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f9f88626a57a..dc6f62a4b197 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2580,6 +2580,35 @@ static inline const char *xhci_decode_portsc(u32 portsc)
return str;
}
+static inline const char *xhci_decode_doorbell(u32 slot, u32 doorbell)
+{
+ static char str[256];
+ u8 ep;
+ u16 stream;
+ int ret;
+
+ ep = (doorbell & 0xff);
+ stream = doorbell >> 16;
+
+ if (slot == 0) {
+ sprintf(str, "Command Ring %d", doorbell);
+ return str;
+ }
+ ret = sprintf(str, "Slot %d ", slot);
+ if (ep > 0 && ep < 32)
+ ret = sprintf(str + ret, "ep%d%s",
+ ep / 2,
+ ep % 2 ? "in" : "out");
+ else if (ep == 0 || ep < 248)
+ ret = sprintf(str + ret, "Reserved %d", ep);
+ else
+ ret = sprintf(str + ret, "Vendor Defined %d", ep);
+ if (stream)
+ ret = sprintf(str + ret, " Stream %d", stream);
+
+ return str;
+}
+
static inline const char *xhci_ep_state_string(u8 state)
{
switch (state) {
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 7a6b122c833f..360416680e82 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -566,7 +566,6 @@ static int
mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback)
{
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
- int err = 0;
int res;
MTS_DEBUG_GOT_HERE();
@@ -613,7 +612,7 @@ mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback
}
out:
- return err;
+ return 0;
}
static DEF_SCSI_QCMD(mts_scsi_queuecommand)
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 320fc4739835..579a21bd70ad 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1032,8 +1032,6 @@ static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
urb->status = -EOVERFLOW;
else if (FROM_DW3_CERR(ptd->dw3))
urb->status = -EPIPE; /* Stall */
- else if (ptd->dw3 & DW3_ERROR_BIT)
- urb->status = -EPROTO; /* XactErr */
else
urb->status = -EPROTO; /* Unknown */
/*
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 9bce583aada3..834b2494da73 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -181,8 +181,8 @@ config USB_TEST
including sample test device firmware and "how to use it".
config USB_EHSET_TEST_FIXTURE
- tristate "USB EHSET Test Fixture driver"
- help
+ tristate "USB EHSET Test Fixture driver"
+ help
Say Y here if you want to support the special test fixture device
used for the USB-IF Embedded Host High-Speed Electrical Test procedure.
@@ -233,17 +233,17 @@ config USB_HUB_USB251XB
Say Y or M here if you need to configure such a device via SMBus.
config USB_HSIC_USB3503
- tristate "USB3503 HSIC to USB20 Driver"
- depends on I2C
- select REGMAP_I2C
- help
- This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
+ tristate "USB3503 HSIC to USB20 Driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
config USB_HSIC_USB4604
- tristate "USB4604 HSIC to USB20 Driver"
- depends on I2C
- help
- This option enables support for SMSC USB4604 HSIC to USB 2.0 Driver.
+ tristate "USB4604 HSIC to USB20 Driver"
+ depends on I2C
+ help
+ This option enables support for SMSC USB4604 HSIC to USB 2.0 Driver.
config USB_LINK_LAYER_TEST
tristate "USB Link Layer Test driver"
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index ac92725458b5..ba1eaabc7796 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -164,7 +164,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
0,
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
- brightness = pdata->msgdata[1];
+ if (retval < 2) {
+ if (retval >= 0)
+ retval = -EMSGSIZE;
+ } else {
+ brightness = pdata->msgdata[1];
+ }
mutex_unlock(&pdata->sysfslock);
if (retval < 0)
@@ -299,6 +304,7 @@ error:
if (pdata) {
if (pdata->urb) {
usb_kill_urb(pdata->urb);
+ cancel_delayed_work_sync(&pdata->work);
if (pdata->urbdata)
usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 34e6cd6f40d3..87067c3d6109 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -384,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
!dev->reading,
(started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
- if (result < 0)
+ if (result < 0) {
+ usb_kill_urb(dev->urb);
goto out;
+ }
- if (result == 0)
+ if (result == 0) {
result = -ETIMEDOUT;
- else
+ usb_kill_urb(dev->urb);
+ } else {
result = dev->valid;
+ }
out:
/* Let the device go back to sleep eventually */
usb_autopm_put_interface(dev->interface);
@@ -526,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface,
static int chaoskey_resume(struct usb_interface *interface)
{
+ struct chaoskey *dev;
+ struct usb_device *udev = interface_to_usbdev(interface);
+
usb_dbg(interface, "resume");
+ dev = usb_get_intfdata(interface);
+
+ /*
+ * We may have lost power.
+ * In that case the device that needs a long time
+ * for the first requests needs an extended timeout
+ * again
+ */
+ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
+ dev->reads_started = false;
+
return 0;
}
#else
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index cdee3af33ad7..8a3d9c0c8d8b 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -333,7 +333,8 @@ static void ftdi_elan_abandon_completions(struct usb_ftdi *ftdi)
*respond->result = -ESHUTDOWN;
*respond->value = 0;
complete(&respond->wait_completion);
- } mutex_unlock(&ftdi->u132_lock);
+ }
+ mutex_unlock(&ftdi->u132_lock);
}
static void ftdi_elan_abandon_targets(struct usb_ftdi *ftdi)
@@ -763,7 +764,8 @@ static int ftdi_elan_total_command_size(struct usb_ftdi *ftdi, int command_size)
struct u132_command *command = &ftdi->command[COMMAND_MASK &
i++];
total_size += 5 + command->follows;
- } return total_size;
+ }
+ return total_size;
}
static int ftdi_elan_command_engine(struct usb_ftdi *ftdi)
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 20b0f91a5d9b..4afb5ddfd361 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -56,11 +56,10 @@ static const struct usb_device_id idmouse_table[] = {
#define FTIP_SCROLL 0x24
#define ftip_command(dev, command, value, index) \
- usb_control_msg (dev->udev, usb_sndctrlpipe (dev->udev, 0), command, \
+ usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), command, \
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, value, index, NULL, 0, 1000)
MODULE_DEVICE_TABLE(usb, idmouse_table);
-static DEFINE_MUTEX(open_disc_mutex);
/* structure to hold all of our device specific stuff */
struct usb_idmouse {
@@ -158,8 +157,8 @@ static int idmouse_create_image(struct usb_idmouse *dev)
/* loop over a blocking bulk read to get data from the device */
while (bytes_read < IMGSIZE) {
- result = usb_bulk_msg (dev->udev,
- usb_rcvbulkpipe (dev->udev, dev->bulk_in_endpointAddr),
+ result = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr),
dev->bulk_in_buffer + bytes_read,
dev->bulk_in_size, &bulk_read, 5000);
if (result < 0) {
@@ -223,21 +222,17 @@ static int idmouse_open(struct inode *inode, struct file *file)
int result;
/* get the interface from minor number and driver information */
- interface = usb_find_interface (&idmouse_driver, iminor (inode));
+ interface = usb_find_interface(&idmouse_driver, iminor(inode));
if (!interface)
return -ENODEV;
- mutex_lock(&open_disc_mutex);
/* get the device information block from the interface */
dev = usb_get_intfdata(interface);
- if (!dev) {
- mutex_unlock(&open_disc_mutex);
+ if (!dev)
return -ENODEV;
- }
/* lock this device */
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* check if already open */
if (dev->open) {
@@ -251,7 +246,7 @@ static int idmouse_open(struct inode *inode, struct file *file)
result = usb_autopm_get_interface(interface);
if (result)
goto error;
- result = idmouse_create_image (dev);
+ result = idmouse_create_image(dev);
usb_autopm_put_interface(interface);
if (result)
goto error;
@@ -280,27 +275,17 @@ static int idmouse_release(struct inode *inode, struct file *file)
if (dev == NULL)
return -ENODEV;
- mutex_lock(&open_disc_mutex);
/* lock our device */
mutex_lock(&dev->lock);
- /* are we really open? */
- if (dev->open <= 0) {
- mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
- return -ENODEV;
- }
-
--dev->open;
if (!dev->present) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
idmouse_delete(dev);
} else {
mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
}
return 0;
}
@@ -379,7 +364,6 @@ static int idmouse_probe(struct usb_interface *interface,
if (result) {
/* something prevented us from registering this device */
dev_err(&interface->dev, "Unable to allocate minor number.\n");
- usb_set_intfdata(interface, NULL);
idmouse_delete(dev);
return result;
}
@@ -392,19 +376,13 @@ static int idmouse_probe(struct usb_interface *interface,
static void idmouse_disconnect(struct usb_interface *interface)
{
- struct usb_idmouse *dev;
-
- /* get device structure */
- dev = usb_get_intfdata(interface);
+ struct usb_idmouse *dev = usb_get_intfdata(interface);
/* give back our minor */
usb_deregister_dev(interface, &idmouse_class);
- mutex_lock(&open_disc_mutex);
- usb_set_intfdata(interface, NULL);
/* lock the device */
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* prevent device read, write and ioctl */
dev->present = 0;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 23061f1526b4..ab4b98b04115 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -157,19 +157,19 @@ MODULE_PARM_DESC(interrupt_out_interval, "Interrupt out interval in ms");
#define LEGO_USB_TOWER_REQUEST_GET_VERSION 0xFD
struct tower_reset_reply {
- __le16 size; /* little-endian */
+ __le16 size;
__u8 err_code;
__u8 spare;
-} __attribute__ ((packed));
+};
struct tower_get_version_reply {
- __le16 size; /* little-endian */
+ __le16 size;
__u8 err_code;
__u8 spare;
__u8 major;
__u8 minor;
- __le16 build_no; /* little-endian */
-} __attribute__ ((packed));
+ __le16 build_no;
+};
/* table of devices that work with this driver */
@@ -178,7 +178,7 @@ static const struct usb_device_id tower_table[] = {
{ } /* Terminating entry */
};
-MODULE_DEVICE_TABLE (usb, tower_table);
+MODULE_DEVICE_TABLE(usb, tower_table);
#define LEGO_USB_TOWER_MINOR_BASE 160
@@ -186,13 +186,13 @@ MODULE_DEVICE_TABLE (usb, tower_table);
/* Structure to hold all of our device specific stuff */
struct lego_usb_tower {
struct mutex lock; /* locks this structure */
- struct usb_device* udev; /* save off the usb device pointer */
+ struct usb_device *udev; /* save off the usb device pointer */
unsigned char minor; /* the starting minor number for this device */
int open_count; /* number of times this port has been opened */
unsigned long disconnected:1;
- char* read_buffer;
+ char *read_buffer;
size_t read_buffer_length; /* this much came in */
size_t read_packet_length; /* this much will be returned on read */
spinlock_t read_buffer_lock;
@@ -202,16 +202,15 @@ struct lego_usb_tower {
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
- char* interrupt_in_buffer;
- struct usb_endpoint_descriptor* interrupt_in_endpoint;
- struct urb* interrupt_in_urb;
+ char *interrupt_in_buffer;
+ struct usb_endpoint_descriptor *interrupt_in_endpoint;
+ struct urb *interrupt_in_urb;
int interrupt_in_interval;
- int interrupt_in_running;
int interrupt_in_done;
- char* interrupt_out_buffer;
- struct usb_endpoint_descriptor* interrupt_out_endpoint;
- struct urb* interrupt_out_urb;
+ char *interrupt_out_buffer;
+ struct usb_endpoint_descriptor *interrupt_out_endpoint;
+ struct urb *interrupt_out_urb;
int interrupt_out_interval;
int interrupt_out_busy;
@@ -219,21 +218,20 @@ struct lego_usb_tower {
/* local function prototypes */
-static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos);
-static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
-static inline void tower_delete (struct lego_usb_tower *dev);
-static int tower_open (struct inode *inode, struct file *file);
-static int tower_release (struct inode *inode, struct file *file);
-static __poll_t tower_poll (struct file *file, poll_table *wait);
-static loff_t tower_llseek (struct file *file, loff_t off, int whence);
+static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos);
+static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
+static inline void tower_delete(struct lego_usb_tower *dev);
+static int tower_open(struct inode *inode, struct file *file);
+static int tower_release(struct inode *inode, struct file *file);
+static __poll_t tower_poll(struct file *file, poll_table *wait);
+static loff_t tower_llseek(struct file *file, loff_t off, int whence);
-static void tower_abort_transfers (struct lego_usb_tower *dev);
-static void tower_check_for_read_packet (struct lego_usb_tower *dev);
-static void tower_interrupt_in_callback (struct urb *urb);
-static void tower_interrupt_out_callback (struct urb *urb);
+static void tower_check_for_read_packet(struct lego_usb_tower *dev);
+static void tower_interrupt_in_callback(struct urb *urb);
+static void tower_interrupt_out_callback(struct urb *urb);
-static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id);
-static void tower_disconnect (struct usb_interface *interface);
+static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id);
+static void tower_disconnect(struct usb_interface *interface);
/* file operations needed when we register this driver */
@@ -288,23 +286,23 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
/**
* tower_delete
*/
-static inline void tower_delete (struct lego_usb_tower *dev)
+static inline void tower_delete(struct lego_usb_tower *dev)
{
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
- kfree (dev->read_buffer);
- kfree (dev->interrupt_in_buffer);
- kfree (dev->interrupt_out_buffer);
+ kfree(dev->read_buffer);
+ kfree(dev->interrupt_in_buffer);
+ kfree(dev->interrupt_out_buffer);
usb_put_dev(dev->udev);
- kfree (dev);
+ kfree(dev);
}
/**
* tower_open
*/
-static int tower_open (struct inode *inode, struct file *file)
+static int tower_open(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev = NULL;
int subminor;
@@ -314,7 +312,6 @@ static int tower_open (struct inode *inode, struct file *file)
int result;
reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
-
if (!reset_reply) {
retval = -ENOMEM;
goto exit;
@@ -323,8 +320,7 @@ static int tower_open (struct inode *inode, struct file *file)
nonseekable_open(inode, file);
subminor = iminor(inode);
- interface = usb_find_interface (&tower_driver, subminor);
-
+ interface = usb_find_interface(&tower_driver, subminor);
if (!interface) {
pr_err("error, can't find device for minor %d\n", subminor);
retval = -ENODEV;
@@ -351,15 +347,15 @@ static int tower_open (struct inode *inode, struct file *file)
}
/* reset the tower */
- result = usb_control_msg (dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- LEGO_USB_TOWER_REQUEST_RESET,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- reset_reply,
- sizeof(*reset_reply),
- 1000);
+ result = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ LEGO_USB_TOWER_REQUEST_RESET,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ reset_reply,
+ sizeof(*reset_reply),
+ 1000);
if (result < 0) {
dev_err(&dev->udev->dev,
"LEGO USB Tower reset control request failed\n");
@@ -370,24 +366,22 @@ static int tower_open (struct inode *inode, struct file *file)
/* initialize in direction */
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
- usb_fill_int_urb (dev->interrupt_in_urb,
- dev->udev,
- usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
- dev->interrupt_in_buffer,
- usb_endpoint_maxp(dev->interrupt_in_endpoint),
- tower_interrupt_in_callback,
- dev,
- dev->interrupt_in_interval);
-
- dev->interrupt_in_running = 1;
+ usb_fill_int_urb(dev->interrupt_in_urb,
+ dev->udev,
+ usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
+ dev->interrupt_in_buffer,
+ usb_endpoint_maxp(dev->interrupt_in_endpoint),
+ tower_interrupt_in_callback,
+ dev,
+ dev->interrupt_in_interval);
+
dev->interrupt_in_done = 0;
mb();
- retval = usb_submit_urb (dev->interrupt_in_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
if (retval) {
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_in_urb %d\n", retval);
- dev->interrupt_in_running = 0;
goto unlock_exit;
}
@@ -407,13 +401,12 @@ exit:
/**
* tower_release
*/
-static int tower_release (struct inode *inode, struct file *file)
+static int tower_release(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev;
int retval = 0;
dev = file->private_data;
-
if (dev == NULL) {
retval = -ENODEV;
goto exit;
@@ -421,56 +414,32 @@ static int tower_release (struct inode *inode, struct file *file)
mutex_lock(&dev->lock);
- if (dev->open_count != 1) {
- dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
- __func__);
- retval = -ENODEV;
- goto unlock_exit;
- }
-
if (dev->disconnected) {
/* the device was unplugged before the file was released */
/* unlock here as tower_delete frees dev */
mutex_unlock(&dev->lock);
- tower_delete (dev);
+ tower_delete(dev);
goto exit;
}
/* wait until write transfer is finished */
if (dev->interrupt_out_busy) {
- wait_event_interruptible_timeout (dev->write_wait, !dev->interrupt_out_busy, 2 * HZ);
+ wait_event_interruptible_timeout(dev->write_wait, !dev->interrupt_out_busy,
+ 2 * HZ);
}
- tower_abort_transfers (dev);
+
+ /* shutdown transfers */
+ usb_kill_urb(dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_out_urb);
+
dev->open_count = 0;
-unlock_exit:
mutex_unlock(&dev->lock);
exit:
return retval;
}
-
-/**
- * tower_abort_transfers
- * aborts transfers and frees associated data structures
- */
-static void tower_abort_transfers (struct lego_usb_tower *dev)
-{
- if (dev == NULL)
- return;
-
- /* shutdown transfer */
- if (dev->interrupt_in_running) {
- dev->interrupt_in_running = 0;
- mb();
- usb_kill_urb(dev->interrupt_in_urb);
- }
- if (dev->interrupt_out_busy)
- usb_kill_urb(dev->interrupt_out_urb);
-}
-
-
/**
* tower_check_for_read_packet
*
@@ -479,23 +448,23 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
* until it has been there unchanged for at least
* dev->packet_timeout_jiffies, or until the buffer is full.
*/
-static void tower_check_for_read_packet (struct lego_usb_tower *dev)
+static void tower_check_for_read_packet(struct lego_usb_tower *dev)
{
- spin_lock_irq (&dev->read_buffer_lock);
+ spin_lock_irq(&dev->read_buffer_lock);
if (!packet_timeout
|| time_after(jiffies, dev->read_last_arrival + dev->packet_timeout_jiffies)
|| dev->read_buffer_length == read_buffer_size) {
dev->read_packet_length = dev->read_buffer_length;
}
dev->interrupt_in_done = 0;
- spin_unlock_irq (&dev->read_buffer_lock);
+ spin_unlock_irq(&dev->read_buffer_lock);
}
/**
* tower_poll
*/
-static __poll_t tower_poll (struct file *file, poll_table *wait)
+static __poll_t tower_poll(struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
__poll_t mask = 0;
@@ -509,12 +478,10 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
poll_wait(file, &dev->write_wait, wait);
tower_check_for_read_packet(dev);
- if (dev->read_packet_length > 0) {
+ if (dev->read_packet_length > 0)
mask |= EPOLLIN | EPOLLRDNORM;
- }
- if (!dev->interrupt_out_busy) {
+ if (!dev->interrupt_out_busy)
mask |= EPOLLOUT | EPOLLWRNORM;
- }
return mask;
}
@@ -523,7 +490,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
/**
* tower_llseek
*/
-static loff_t tower_llseek (struct file *file, loff_t off, int whence)
+static loff_t tower_llseek(struct file *file, loff_t off, int whence)
{
return -ESPIPE; /* unseekable */
}
@@ -532,7 +499,7 @@ static loff_t tower_llseek (struct file *file, loff_t off, int whence)
/**
* tower_read
*/
-static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_read;
@@ -551,7 +518,6 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
- pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
@@ -561,21 +527,19 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
goto unlock_exit;
}
- if (read_timeout) {
+ if (read_timeout)
timeout = jiffies + msecs_to_jiffies(read_timeout);
- }
/* wait for data */
- tower_check_for_read_packet (dev);
+ tower_check_for_read_packet(dev);
while (dev->read_packet_length == 0) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible_timeout(dev->read_wait, dev->interrupt_in_done, dev->packet_timeout_jiffies);
- if (retval < 0) {
+ if (retval < 0)
goto unlock_exit;
- }
/* reset read timeout during read or write activity */
if (read_timeout
@@ -583,28 +547,27 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
timeout = jiffies + msecs_to_jiffies(read_timeout);
}
/* check for read timeout */
- if (read_timeout && time_after (jiffies, timeout)) {
+ if (read_timeout && time_after(jiffies, timeout)) {
retval = -ETIMEDOUT;
goto unlock_exit;
}
- tower_check_for_read_packet (dev);
+ tower_check_for_read_packet(dev);
}
/* copy the data from read_buffer into userspace */
bytes_to_read = min(count, dev->read_packet_length);
- if (copy_to_user (buffer, dev->read_buffer, bytes_to_read)) {
+ if (copy_to_user(buffer, dev->read_buffer, bytes_to_read)) {
retval = -EFAULT;
goto unlock_exit;
}
- spin_lock_irq (&dev->read_buffer_lock);
+ spin_lock_irq(&dev->read_buffer_lock);
dev->read_buffer_length -= bytes_to_read;
dev->read_packet_length -= bytes_to_read;
- for (i=0; i<dev->read_buffer_length; i++) {
+ for (i = 0; i < dev->read_buffer_length; i++)
dev->read_buffer[i] = dev->read_buffer[i+bytes_to_read];
- }
- spin_unlock_irq (&dev->read_buffer_lock);
+ spin_unlock_irq(&dev->read_buffer_lock);
retval = bytes_to_read;
@@ -620,7 +583,7 @@ exit:
/**
* tower_write
*/
-static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_write;
@@ -637,7 +600,6 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
- pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
@@ -653,10 +615,10 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
retval = -EAGAIN;
goto unlock_exit;
}
- retval = wait_event_interruptible (dev->write_wait, !dev->interrupt_out_busy);
- if (retval) {
+ retval = wait_event_interruptible(dev->write_wait,
+ !dev->interrupt_out_busy);
+ if (retval)
goto unlock_exit;
- }
}
/* write the data into interrupt_out_buffer from userspace */
@@ -664,7 +626,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
dev_dbg(&dev->udev->dev, "%s: count = %zd, bytes_to_write = %zd\n",
__func__, count, bytes_to_write);
- if (copy_from_user (dev->interrupt_out_buffer, buffer, bytes_to_write)) {
+ if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
goto unlock_exit;
}
@@ -682,7 +644,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
dev->interrupt_out_busy = 1;
wmb();
- retval = usb_submit_urb (dev->interrupt_out_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval) {
dev->interrupt_out_busy = 0;
dev_err(&dev->udev->dev,
@@ -703,7 +665,7 @@ exit:
/**
* tower_interrupt_in_callback
*/
-static void tower_interrupt_in_callback (struct urb *urb)
+static void tower_interrupt_in_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
@@ -729,9 +691,9 @@ static void tower_interrupt_in_callback (struct urb *urb)
if (urb->actual_length > 0) {
spin_lock_irqsave(&dev->read_buffer_lock, flags);
if (dev->read_buffer_length + urb->actual_length < read_buffer_size) {
- memcpy (dev->read_buffer + dev->read_buffer_length,
- dev->interrupt_in_buffer,
- urb->actual_length);
+ memcpy(dev->read_buffer + dev->read_buffer_length,
+ dev->interrupt_in_buffer,
+ urb->actual_length);
dev->read_buffer_length += urb->actual_length;
dev->read_last_arrival = jiffies;
dev_dbg(&dev->udev->dev, "%s: received %d bytes\n",
@@ -744,25 +706,21 @@ static void tower_interrupt_in_callback (struct urb *urb)
}
resubmit:
- /* resubmit if we're still running */
- if (dev->interrupt_in_running) {
- retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
- if (retval)
- dev_err(&dev->udev->dev,
- "%s: usb_submit_urb failed (%d)\n",
- __func__, retval);
+ retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
+ if (retval) {
+ dev_err(&dev->udev->dev, "%s: usb_submit_urb failed (%d)\n",
+ __func__, retval);
}
-
exit:
dev->interrupt_in_done = 1;
- wake_up_interruptible (&dev->read_wait);
+ wake_up_interruptible(&dev->read_wait);
}
/**
* tower_interrupt_out_callback
*/
-static void tower_interrupt_out_callback (struct urb *urb)
+static void tower_interrupt_out_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
@@ -790,48 +748,27 @@ static void tower_interrupt_out_callback (struct urb *urb)
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
-static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id)
+static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
- struct lego_usb_tower *dev = NULL;
+ struct lego_usb_tower *dev;
struct tower_get_version_reply *get_version_reply = NULL;
int retval = -ENOMEM;
int result;
/* allocate memory for our device state and initialize it */
-
- dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
-
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->lock);
-
dev->udev = usb_get_dev(udev);
- dev->open_count = 0;
- dev->disconnected = 0;
-
- dev->read_buffer = NULL;
- dev->read_buffer_length = 0;
- dev->read_packet_length = 0;
- spin_lock_init (&dev->read_buffer_lock);
+ spin_lock_init(&dev->read_buffer_lock);
dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout);
dev->read_last_arrival = jiffies;
-
- init_waitqueue_head (&dev->read_wait);
- init_waitqueue_head (&dev->write_wait);
-
- dev->interrupt_in_buffer = NULL;
- dev->interrupt_in_endpoint = NULL;
- dev->interrupt_in_urb = NULL;
- dev->interrupt_in_running = 0;
- dev->interrupt_in_done = 0;
-
- dev->interrupt_out_buffer = NULL;
- dev->interrupt_out_endpoint = NULL;
- dev->interrupt_out_urb = NULL;
- dev->interrupt_out_busy = 0;
+ init_waitqueue_head(&dev->read_wait);
+ init_waitqueue_head(&dev->write_wait);
result = usb_find_common_endpoints_reverse(interface->cur_altsetting,
NULL, NULL,
@@ -843,16 +780,16 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
goto error;
}
- dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL);
+ dev->read_buffer = kmalloc(read_buffer_size, GFP_KERNEL);
if (!dev->read_buffer)
goto error;
- dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
+ dev->interrupt_in_buffer = kmalloc(usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
- dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL);
+ dev->interrupt_out_buffer = kmalloc(write_buffer_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -862,22 +799,21 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
-
if (!get_version_reply) {
retval = -ENOMEM;
goto error;
}
/* get the firmware version and log it */
- result = usb_control_msg (udev,
- usb_rcvctrlpipe(udev, 0),
- LEGO_USB_TOWER_REQUEST_GET_VERSION,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- get_version_reply,
- sizeof(*get_version_reply),
- 1000);
+ result = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ LEGO_USB_TOWER_REQUEST_GET_VERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ get_version_reply,
+ sizeof(*get_version_reply),
+ 1000);
if (result != sizeof(*get_version_reply)) {
if (result >= 0)
result = -EIO;
@@ -892,10 +828,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
le16_to_cpu(get_version_reply->build_no));
/* we can register the device now, as it is ready */
- usb_set_intfdata (interface, dev);
-
- retval = usb_register_dev (interface, &tower_class);
+ usb_set_intfdata(interface, dev);
+ retval = usb_register_dev(interface, &tower_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
@@ -924,17 +859,17 @@ error:
*
* Called by the usb core when the device is removed from the system.
*/
-static void tower_disconnect (struct usb_interface *interface)
+static void tower_disconnect(struct usb_interface *interface)
{
struct lego_usb_tower *dev;
int minor;
- dev = usb_get_intfdata (interface);
+ dev = usb_get_intfdata(interface);
minor = dev->minor;
/* give back our minor and prevent further open() */
- usb_deregister_dev (interface, &tower_class);
+ usb_deregister_dev(interface, &tower_class);
/* stop I/O */
usb_poison_urb(dev->interrupt_in_urb);
@@ -945,7 +880,7 @@ static void tower_disconnect (struct usb_interface *interface)
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->lock);
- tower_delete (dev);
+ tower_delete(dev);
} else {
dev->disconnected = 1;
/* wake up pollers */
@@ -962,6 +897,4 @@ module_usb_driver(tower_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
-#endif
diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
index 9b632ab24f03..c16121276a21 100644
--- a/drivers/usb/misc/sisusbvga/Kconfig
+++ b/drivers/usb/misc/sisusbvga/Kconfig
@@ -4,7 +4,7 @@ config USB_SISUSBVGA
tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)"
depends on (USB_MUSB_HDRC || USB_EHCI_HCD)
select FONT_SUPPORT if USB_SISUSBVGA_CON
- ---help---
+ ---help---
Say Y here if you intend to attach a USB2VGA dongle based on a
Net2280 and a SiS315 chip.
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 6ca9111d150a..10c9e7f6273e 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
/* Internal Register Set Addresses & Default Values acc. to DS00001692C */
@@ -26,10 +27,6 @@
#define USB251XB_ADDR_PRODUCT_ID_LSB 0x02
#define USB251XB_ADDR_PRODUCT_ID_MSB 0x03
-#define USB251XB_DEF_PRODUCT_ID_12 0x2512 /* USB2512B/12Bi */
-#define USB251XB_DEF_PRODUCT_ID_13 0x2513 /* USB2513B/13Bi */
-#define USB251XB_DEF_PRODUCT_ID_14 0x2514 /* USB2514B/14Bi */
-#define USB251XB_DEF_PRODUCT_ID_17 0x2517 /* USB2517/17i */
#define USB251XB_ADDR_DEVICE_ID_LSB 0x04
#define USB251XB_ADDR_DEVICE_ID_MSB 0x05
@@ -74,7 +71,6 @@
#define USB251XB_ADDR_PRODUCT_STRING_LEN 0x14
#define USB251XB_ADDR_PRODUCT_STRING 0x54
-#define USB251XB_DEF_PRODUCT_STRING "USB251xB/xBi/7i"
#define USB251XB_ADDR_SERIAL_STRING_LEN 0x15
#define USB251XB_ADDR_SERIAL_STRING 0x92
@@ -116,6 +112,7 @@
struct usb251xb {
struct device *dev;
struct i2c_client *i2c;
+ struct regulator *vdd;
u8 skip_config;
struct gpio_desc *gpio_reset;
u16 vendor_id;
@@ -159,6 +156,14 @@ struct usb251xb_data {
char product_str[USB251XB_STRING_BUFSIZE / 2]; /* ASCII string */
};
+static const struct usb251xb_data usb2422_data = {
+ .product_id = 0x2422,
+ .port_cnt = 2,
+ .led_support = false,
+ .bat_support = true,
+ .product_str = "USB2422",
+};
+
static const struct usb251xb_data usb2512b_data = {
.product_id = 0x2512,
.port_cnt = 2,
@@ -261,20 +266,19 @@ static int usb251x_check_gpio_chip(struct usb251xb *hub)
}
#endif
-static void usb251xb_reset(struct usb251xb *hub, int state)
+static void usb251xb_reset(struct usb251xb *hub)
{
if (!hub->gpio_reset)
return;
i2c_lock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
- gpiod_set_value_cansleep(hub->gpio_reset, state);
+ gpiod_set_value_cansleep(hub->gpio_reset, 1);
+ usleep_range(1, 10); /* >=1us RESET_N asserted */
+ gpiod_set_value_cansleep(hub->gpio_reset, 0);
/* wait for hub recovery/stabilization */
- if (!state)
- usleep_range(500, 750); /* >=500us at power on */
- else
- usleep_range(1, 10); /* >=1us at power down */
+ usleep_range(500, 750); /* >=500us after RESET_N deasserted */
i2c_unlock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
}
@@ -292,7 +296,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[0] = 0x01;
i2c_wb[1] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 0);
+ usb251xb_reset(hub);
err = i2c_smbus_write_i2c_block_data(hub->i2c,
USB251XB_ADDR_STATUS_COMMAND, 2, i2c_wb);
@@ -342,7 +346,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[USB251XB_ADDR_PORT_MAP_7] = hub->port_map7;
i2c_wb[USB251XB_ADDR_STATUS_COMMAND] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 0);
+ usb251xb_reset(hub);
/* write registers */
for (i = 0; i < (USB251XB_I2C_REG_SZ / USB251XB_I2C_WRITE_SZ); i++) {
@@ -420,6 +424,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
return err;
}
+ hub->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(hub->vdd))
+ return PTR_ERR(hub->vdd);
+
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
@@ -593,6 +601,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
static const struct of_device_id usb251xb_of_match[] = {
{
+ .compatible = "microchip,usb2422",
+ .data = &usb2422_data,
+ }, {
.compatible = "microchip,usb2512b",
.data = &usb2512b_data,
}, {
@@ -665,6 +676,10 @@ static int usb251xb_probe(struct usb251xb *hub)
if (err)
return err;
+ err = regulator_enable(hub->vdd);
+ if (err)
+ return err;
+
err = usb251xb_connect(hub);
if (err) {
dev_err(dev, "Failed to connect hub (%d)\n", err);
@@ -692,7 +707,31 @@ static int usb251xb_i2c_probe(struct i2c_client *i2c,
return usb251xb_probe(hub);
}
+static int __maybe_unused usb251xb_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb251xb *hub = i2c_get_clientdata(client);
+
+ return regulator_disable(hub->vdd);
+}
+
+static int __maybe_unused usb251xb_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb251xb *hub = i2c_get_clientdata(client);
+ int err;
+
+ err = regulator_enable(hub->vdd);
+ if (err)
+ return err;
+
+ return usb251xb_connect(hub);
+}
+
+static SIMPLE_DEV_PM_OPS(usb251xb_pm_ops, usb251xb_suspend, usb251xb_resume);
+
static const struct i2c_device_id usb251xb_id[] = {
+ { "usb2422", 0 },
{ "usb2512b", 0 },
{ "usb2512bi", 0 },
{ "usb2513b", 0 },
@@ -709,6 +748,7 @@ static struct i2c_driver usb251xb_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(usb251xb_of_match),
+ .pm = &usb251xb_pm_ops,
},
.probe = usb251xb_i2c_probe,
.id_table = usb251xb_id,
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 4da216c99726..2be182bd793a 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -153,6 +153,15 @@ static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy)
set ? "SEND" : "CLEAR", decode_ep0_state(mtu));
}
+static void ep0_do_status_stage(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 value;
+
+ value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+ mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
+}
+
static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq);
static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
@@ -297,8 +306,7 @@ static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
ep0_load_test_packet(mtu);
/* send status before entering test mode. */
- value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
- mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
+ ep0_do_status_stage(mtu);
/* wait for ACK status sent by host */
readl_poll_timeout_atomic(mbase + U3D_EP0CSR, value,
@@ -632,7 +640,6 @@ __acquires(mtu->lock)
{
struct usb_ctrlrequest setup;
struct mtu3_request *mreq;
- void __iomem *mbase = mtu->mac_base;
int handled = 0;
ep0_read_setup(mtu, &setup);
@@ -664,14 +671,19 @@ finish:
if (mtu->test_mode) {
; /* nothing to do */
} else if (handled == USB_GADGET_DELAYED_STATUS) {
- /* handle the delay STATUS phase till receive ep_queue on ep0 */
- mtu->delayed_status = true;
- } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
- mtu3_writel(mbase, U3D_EP0CSR,
- (mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS)
- | EP0_SETUPPKTRDY | EP0_DATAEND);
+ mreq = next_ep0_request(mtu);
+ if (mreq) {
+ /* already asked us to continue delayed status */
+ ep0_do_status_stage(mtu);
+ ep0_req_giveback(mtu, &mreq->request);
+ } else {
+ /* do delayed STATUS stage till receive ep0_queue */
+ mtu->delayed_status = true;
+ }
+ } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
+ ep0_do_status_stage(mtu);
/* complete zlp request directly */
mreq = next_ep0_request(mtu);
if (mreq && !mreq->request.length)
@@ -802,12 +814,9 @@ static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq)
}
if (mtu->delayed_status) {
- u32 csr;
mtu->delayed_status = false;
- csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
- csr |= EP0_SETUPPKTRDY | EP0_DATAEND;
- mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+ ep0_do_status_stage(mtu);
/* needn't giveback the request for handling delay STATUS */
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index bd63450af76a..15cca912c53e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2431,14 +2431,12 @@ static int musb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq_byname(pdev, "mc");
- struct resource *iomem;
void __iomem *base;
if (irq <= 0)
return -ENODEV;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, iomem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index f42858e2b54c..7b6281ab62ed 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -325,7 +325,7 @@ void musb_init_debugfs(struct musb *musb)
{
struct dentry *root;
- root = debugfs_create_dir(dev_name(musb->controller), NULL);
+ root = debugfs_create_dir(dev_name(musb->controller), usb_debug_root);
musb->debugfs_root = root;
debugfs_create_file("regdump", S_IRUGO, root, musb, &musb_regdump_fops);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 327d4f7baaf7..88923175f71e 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -411,7 +411,7 @@ static int dsps_musb_dbg_init(struct musb *musb, struct dsps_glue *glue)
char buf[128];
sprintf(buf, "%s.dsps", dev_name(musb->controller));
- root = debugfs_create_dir(buf, NULL);
+ root = debugfs_create_dir(buf, usb_debug_root);
glue->dbgfs_root = root;
glue->regset.regs = dsps_musb_regs;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ffe462a657b1..f62ffaede1ab 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1085,7 +1085,6 @@ static int musb_gadget_disable(struct usb_ep *ep)
u8 epnum;
struct musb_ep *musb_ep;
void __iomem *epio;
- int status = 0;
musb_ep = to_musb_ep(ep);
musb = musb_ep->musb;
@@ -1118,7 +1117,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
musb_dbg(musb, "%s", musb_ep->end_point.name);
- return status;
+ return 0;
}
/*
@@ -1316,7 +1315,7 @@ done:
}
/*
- * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any
* data but will queue requests.
*
* exported to ep0 code
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 19871266312d..110e6e9ad621 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -66,15 +66,13 @@ static int keystone_usbphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct keystone_usbphy *k_phy;
- struct resource *res;
int ret;
k_phy = devm_kzalloc(dev, sizeof(*k_phy), GFP_KERNEL);
if (!k_phy)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- k_phy->phy_ctrl = devm_ioremap_resource(dev, res);
+ k_phy->phy_ctrl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(k_phy->phy_ctrl))
return PTR_ERR(k_phy->phy_ctrl);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 70b8c8248caf..67b39dc62b37 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -710,7 +710,6 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
static int mxs_phy_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *base;
struct clk *clk;
struct mxs_phy *mxs_phy;
@@ -723,8 +722,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
if (!of_id)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index a3c30b609433..d438b7871446 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -590,7 +590,7 @@ static int usbhs_probe(struct platform_device *pdev)
{
const struct renesas_usbhs_platform_info *info;
struct usbhs_priv *priv;
- struct resource *res, *irq_res;
+ struct resource *irq_res;
struct device *dev = &pdev->dev;
int ret, gpio;
u32 tmp;
@@ -619,8 +619,7 @@ static int usbhs_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 0824099b905e..ef1735d014da 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -161,11 +161,12 @@ struct usbhs_priv;
#define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */
#define VALID (1 << 3) /* USB Request Receive */
-#define DVSQ_MASK (0x3 << 4) /* Device State */
+#define DVSQ_MASK (0x7 << 4) /* Device State */
#define POWER_STATE (0 << 4)
#define DEFAULT_STATE (1 << 4)
#define ADDRESS_STATE (2 << 4)
#define CONFIGURATION_STATE (3 << 4)
+#define SUSPENDED_STATE (4 << 4)
#define CTSQ_MASK (0x7) /* Control Transfer Stage */
#define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 86637cd066cf..01c6a48c41bc 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -1273,11 +1273,11 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
*/
snprintf(name, sizeof(name), "ch%d", channel);
if (channel & 1) {
- fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
+ fifo->tx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->tx_chan))
fifo->tx_chan = NULL;
} else {
- fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
+ fifo->rx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->rx_chan))
fifo->rx_chan = NULL;
}
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 10fc65596014..b98112cefaa4 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -169,17 +169,7 @@ void usbhs_mod_remove(struct usbhs_priv *priv)
*/
int usbhs_status_get_device_state(struct usbhs_irq_state *irq_state)
{
- int state = irq_state->intsts0 & DVSQ_MASK;
-
- switch (state) {
- case POWER_STATE:
- case DEFAULT_STATE:
- case ADDRESS_STATE:
- case CONFIGURATION_STATE:
- return state;
- }
-
- return -EIO;
+ return (int)irq_state->intsts0 & DVSQ_MASK;
}
int usbhs_status_get_ctrl_stage(struct usbhs_irq_state *irq_state)
@@ -348,10 +338,6 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
* usbhs_interrupt
*/
- /*
- * it don't enable DVSE (intenb0) here
- * but "mod->irq_dev_state" will be called.
- */
if (info->irq_vbus)
intenb0 |= VBSE;
@@ -362,6 +348,9 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_ctrl_stage)
intenb0 |= CTRE;
+ if (mod->irq_dev_state)
+ intenb0 |= DVSE;
+
if (mod->irq_empty && mod->irq_bempsts) {
usbhs_write(priv, BEMPENB, mod->irq_bempsts);
intenb0 |= BEMPE;
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index cd38d74b3223..53489cafecc1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -457,12 +457,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ int state = usbhs_status_get_device_state(irq_state);
gpriv->gadget.speed = usbhs_bus_get_speed(priv);
- dev_dbg(dev, "state = %x : speed : %d\n",
- usbhs_status_get_device_state(irq_state),
- gpriv->gadget.speed);
+ dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed);
+
+ if (gpriv->gadget.speed != USB_SPEED_UNKNOWN &&
+ (state & SUSPENDED_STATE)) {
+ if (gpriv->driver && gpriv->driver->suspend)
+ gpriv->driver->suspend(&gpriv->gadget);
+ usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED);
+ }
return 0;
}
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 94b4e7db2b94..8273126ffdf4 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -175,6 +175,27 @@ void usb_role_switch_put(struct usb_role_switch *sw)
}
EXPORT_SYMBOL_GPL(usb_role_switch_put);
+/**
+ * usb_role_switch_find_by_fwnode - Find USB role switch with its fwnode
+ * @fwnode: fwnode of the USB Role Switch
+ *
+ * Finds and returns role switch with @fwnode. The reference count for the
+ * found switch is incremented.
+ */
+struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = class_find_device_by_fwnode(role_class, fwnode);
+
+ return dev ? to_role_switch(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
+
static umode_t
usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 67279c6bce33..ed4a18b435a0 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -269,19 +269,19 @@ config USB_SERIAL_F8153X
config USB_SERIAL_GARMIN
- tristate "USB Garmin GPS driver"
- help
- Say Y here if you want to connect to your Garmin GPS.
- Should work with most Garmin GPS devices which have a native USB port.
+ tristate "USB Garmin GPS driver"
+ help
+ Say Y here if you want to connect to your Garmin GPS.
+ Should work with most Garmin GPS devices which have a native USB port.
- See <http://sourceforge.net/projects/garmin-gps> for the latest
- version of the driver.
+ See <http://sourceforge.net/projects/garmin-gps> for the latest
+ version of the driver.
- To compile this driver as a module, choose M here: the
- module will be called garmin_gps.
+ To compile this driver as a module, choose M here: the
+ module will be called garmin_gps.
config USB_SERIAL_IPW
- tristate "USB IPWireless (3G UMTS TDD) Driver"
+ tristate "USB IPWireless (3G UMTS TDD) Driver"
select USB_SERIAL_WWAN
help
Say Y here if you want to use a IPWireless USB modem such as
@@ -341,20 +341,20 @@ config USB_SERIAL_KLSI
module will be called kl5kusb105.
config USB_SERIAL_KOBIL_SCT
- tristate "USB KOBIL chipcard reader"
- ---help---
- Say Y here if you want to use one of the following KOBIL USB chipcard
- readers:
-
- - USB TWIN
- - KAAN Standard Plus
- - KAAN SIM
- - SecOVID Reader Plus
- - B1 Professional
- - KAAN Professional
-
- Note that you need a current CT-API.
- To compile this driver as a module, choose M here: the
+ tristate "USB KOBIL chipcard reader"
+ ---help---
+ Say Y here if you want to use one of the following KOBIL USB chipcard
+ readers:
+
+ - USB TWIN
+ - KAAN Standard Plus
+ - KAAN SIM
+ - SecOVID Reader Plus
+ - B1 Professional
+ - KAAN Professional
+
+ Note that you need a current CT-API.
+ To compile this driver as a module, choose M here: the
module will be called kobil_sct.
config USB_SERIAL_MCT_U232
@@ -458,7 +458,7 @@ config USB_SERIAL_OTI6858
tristate "USB Ours Technology Inc. OTi-6858 USB To RS232 Bridge Controller"
help
Say Y here if you want to use the OTi-6858 single port USB to serial
- converter device.
+ converter device.
To compile this driver as a module, choose M here: the
module will be called oti6858.
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 3bb1fff02bed..df582fe855f0 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -48,12 +48,6 @@
#define CH341_BIT_DCD 0x08
#define CH341_BITS_MODEM_STAT 0x0f /* all bits */
-/*******************************/
-/* baudrate calculation factor */
-/*******************************/
-#define CH341_BAUDBASE_FACTOR 1532620800
-#define CH341_BAUDBASE_DIVMAX 3
-
/* Break support - the information used to implement this was gleaned from
* the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato.
*/
@@ -144,37 +138,96 @@ static int ch341_control_in(struct usb_device *dev,
return 0;
}
+#define CH341_CLKRATE 48000000
+#define CH341_CLK_DIV(ps, fact) (1 << (12 - 3 * (ps) - (fact)))
+#define CH341_MIN_RATE(ps) (CH341_CLKRATE / (CH341_CLK_DIV((ps), 1) * 512))
+
+static const speed_t ch341_min_rates[] = {
+ CH341_MIN_RATE(0),
+ CH341_MIN_RATE(1),
+ CH341_MIN_RATE(2),
+ CH341_MIN_RATE(3),
+};
+
+/*
+ * The device line speed is given by the following equation:
+ *
+ * baudrate = 48000000 / (2^(12 - 3 * ps - fact) * div), where
+ *
+ * 0 <= ps <= 3,
+ * 0 <= fact <= 1,
+ * 2 <= div <= 256 if fact = 0, or
+ * 9 <= div <= 256 if fact = 1
+ */
+static int ch341_get_divisor(speed_t speed)
+{
+ unsigned int fact, div, clk_div;
+ int ps;
+
+ /*
+ * Clamp to supported range, this makes the (ps < 0) and (div < 2)
+ * sanity checks below redundant.
+ */
+ speed = clamp(speed, 46U, 3000000U);
+
+ /*
+ * Start with highest possible base clock (fact = 1) that will give a
+ * divisor strictly less than 512.
+ */
+ fact = 1;
+ for (ps = 3; ps >= 0; ps--) {
+ if (speed > ch341_min_rates[ps])
+ break;
+ }
+
+ if (ps < 0)
+ return -EINVAL;
+
+ /* Determine corresponding divisor, rounding down. */
+ clk_div = CH341_CLK_DIV(ps, fact);
+ div = CH341_CLKRATE / (clk_div * speed);
+
+ /* Halve base clock (fact = 0) if required. */
+ if (div < 9 || div > 255) {
+ div /= 2;
+ clk_div *= 2;
+ fact = 0;
+ }
+
+ if (div < 2)
+ return -EINVAL;
+
+ /*
+ * Pick next divisor if resulting rate is closer to the requested one,
+ * scale up to avoid rounding errors on low rates.
+ */
+ if (16 * CH341_CLKRATE / (clk_div * div) - 16 * speed >=
+ 16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1)))
+ div++;
+
+ return (0x100 - div) << 8 | fact << 2 | ps;
+}
+
static int ch341_set_baudrate_lcr(struct usb_device *dev,
struct ch341_private *priv, u8 lcr)
{
- short a;
+ int val;
int r;
- unsigned long factor;
- short divisor;
if (!priv->baud_rate)
return -EINVAL;
- factor = (CH341_BAUDBASE_FACTOR / priv->baud_rate);
- divisor = CH341_BAUDBASE_DIVMAX;
-
- while ((factor > 0xfff0) && divisor) {
- factor >>= 3;
- divisor--;
- }
- if (factor > 0xfff0)
+ val = ch341_get_divisor(priv->baud_rate);
+ if (val < 0)
return -EINVAL;
- factor = 0x10000 - factor;
- a = (factor & 0xff00) | divisor;
-
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
*/
- a |= BIT(7);
+ val |= BIT(7);
- r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, val);
if (r)
return r;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 979bef9bfb6b..f5143eedbc48 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -125,6 +125,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 25e81faf4c24..9ad44a96dfe3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1033,6 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
/* Sienna devices */
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+ /* U-Blox devices */
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 22d66217cb41..e8373528264c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1558,3 +1558,10 @@
*/
#define UNJO_VID 0x22B7
#define UNJO_ISODEBUG_V1_PID 0x150D
+
+/*
+ * U-Blox products (http://www.u-blox.com).
+ */
+#define UBLOX_VID 0x1546
+#define UBLOX_C099F9P_ZED_PID 0x0502
+#define UBLOX_C099F9P_ODIN_PID 0x0503
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 18110225d506..2ec4eeacebc7 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1833,10 +1833,6 @@ static int mos7720_startup(struct usb_serial *serial)
product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
-
if (product == MOSCHIP_DEVICE_ID_7715) {
struct urb *urb = serial->port[0]->interrupt_in_urb;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index a698d46ba773..23f91d658cb4 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -89,17 +89,10 @@
/* For higher baud Rates use TIOCEXBAUD */
#define TIOCEXBAUD 0x5462
-/* vendor id and device id defines */
-
-/* The native mos7840/7820 component */
-#define USB_VENDOR_ID_MOSCHIP 0x9710
-#define MOSCHIP_DEVICE_ID_7840 0x7840
-#define MOSCHIP_DEVICE_ID_7843 0x7843
-#define MOSCHIP_DEVICE_ID_7820 0x7820
-#define MOSCHIP_DEVICE_ID_7810 0x7810
-/* The native component can have its vendor/device id's overridden
- * in vendor-specific implementations. Such devices can be handled
- * by making a change here, in id_table.
+/*
+ * Vendor id and device id defines
+ *
+ * NOTE: Do not add new defines, add entries directly to the id_table instead.
*/
#define USB_VENDOR_ID_BANDB 0x0856
#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
@@ -116,14 +109,6 @@
#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
-/* This driver also supports
- * ATEN UC2324 device using Moschip MCS7840
- * ATEN UC2322 device using Moschip MCS7820
- */
-#define USB_VENDOR_ID_ATENINTL 0x0557
-#define ATENINTL_DEVICE_ID_UC2324 0x2011
-#define ATENINTL_DEVICE_ID_UC2322 0x7820
-
/* Interrupt Routine Defines */
#define SERIAL_IIR_RLS 0x06
@@ -171,30 +156,37 @@
#define LED_OFF_MS 500
enum mos7840_flag {
- MOS7840_FLAG_CTRL_BUSY,
MOS7840_FLAG_LED_BUSY,
};
+#define MCS_PORT_MASK GENMASK(2, 0)
+#define MCS_PORTS(nr) ((nr) & MCS_PORT_MASK)
+#define MCS_LED BIT(3)
+
+#define MCS_DEVICE(vid, pid, flags) \
+ USB_DEVICE((vid), (pid)), .driver_info = (flags)
+
static const struct usb_device_id id_table[] = {
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7843)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
- {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
- {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
+ { MCS_DEVICE(0x0557, 0x2011, MCS_PORTS(4)) }, /* ATEN UC2324 */
+ { MCS_DEVICE(0x0557, 0x7820, MCS_PORTS(2)) }, /* ATEN UC2322 */
+ { MCS_DEVICE(0x110a, 0x2210, MCS_PORTS(2)) }, /* Moxa UPort 2210 */
+ { MCS_DEVICE(0x9710, 0x7810, MCS_PORTS(1) | MCS_LED) }, /* ASIX MCS7810 */
+ { MCS_DEVICE(0x9710, 0x7820, MCS_PORTS(2)) }, /* MosChip MCS7820 */
+ { MCS_DEVICE(0x9710, 0x7840, MCS_PORTS(4)) }, /* MosChip MCS7840 */
+ { MCS_DEVICE(0x9710, 0x7843, MCS_PORTS(3)) }, /* ASIX MCS7840 3 port */
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -206,19 +198,12 @@ struct moschip_port {
struct urb *read_urb; /* read URB for this port */
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
- char open;
- char open_ports;
struct usb_serial_port *port; /* loop back to the owner of this object */
/* Offsets */
__u8 SpRegOffset;
__u8 ControlRegOffset;
__u8 DcrRegOffset;
- /* for processing control URBS in interrupt context */
- struct urb *control_urb;
- struct usb_ctrlrequest *dr;
- char *ctrl_buf;
- int MsrLsr;
spinlock_t pool_lock;
struct urb *write_urb_pool[NUM_URBS];
@@ -360,150 +345,11 @@ static void mos7840_dump_serial_port(struct usb_serial_port *port,
/************************************************************************/
/************************************************************************/
-/* I N T E R F A C E F U N C T I O N S */
-/* I N T E R F A C E F U N C T I O N S */
-/************************************************************************/
-/************************************************************************/
-
-static inline void mos7840_set_port_private(struct usb_serial_port *port,
- struct moschip_port *data)
-{
- usb_set_serial_port_data(port, (void *)data);
-}
-
-static inline struct moschip_port *mos7840_get_port_private(struct
- usb_serial_port
- *port)
-{
- return (struct moschip_port *)usb_get_serial_port_data(port);
-}
-
-static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
-{
- struct moschip_port *mos7840_port;
- struct async_icount *icount;
- mos7840_port = port;
- if (new_msr &
- (MOS_MSR_DELTA_CTS | MOS_MSR_DELTA_DSR | MOS_MSR_DELTA_RI |
- MOS_MSR_DELTA_CD)) {
- icount = &mos7840_port->port->icount;
-
- /* update input line counters */
- if (new_msr & MOS_MSR_DELTA_CTS)
- icount->cts++;
- if (new_msr & MOS_MSR_DELTA_DSR)
- icount->dsr++;
- if (new_msr & MOS_MSR_DELTA_CD)
- icount->dcd++;
- if (new_msr & MOS_MSR_DELTA_RI)
- icount->rng++;
-
- wake_up_interruptible(&port->port->port.delta_msr_wait);
- }
-}
-
-static void mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr)
-{
- struct async_icount *icount;
-
- if (new_lsr & SERIAL_LSR_BI) {
- /*
- * Parity and Framing errors only count if they
- * occur exclusive of a break being
- * received.
- */
- new_lsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI);
- }
-
- /* update input line counters */
- icount = &port->port->icount;
- if (new_lsr & SERIAL_LSR_BI)
- icount->brk++;
- if (new_lsr & SERIAL_LSR_OE)
- icount->overrun++;
- if (new_lsr & SERIAL_LSR_PE)
- icount->parity++;
- if (new_lsr & SERIAL_LSR_FE)
- icount->frame++;
-}
-
-/************************************************************************/
-/************************************************************************/
/* U S B C A L L B A C K F U N C T I O N S */
/* U S B C A L L B A C K F U N C T I O N S */
/************************************************************************/
/************************************************************************/
-static void mos7840_control_callback(struct urb *urb)
-{
- unsigned char *data;
- struct moschip_port *mos7840_port;
- struct device *dev = &urb->dev->dev;
- __u8 regval = 0x0;
- int status = urb->status;
-
- mos7840_port = urb->context;
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
- goto out;
- default:
- dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
- goto out;
- }
-
- dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
- if (urb->actual_length < 1)
- goto out;
-
- dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
- mos7840_port->MsrLsr, mos7840_port->port_num);
- data = urb->transfer_buffer;
- regval = (__u8) data[0];
- dev_dbg(dev, "%s data is %x\n", __func__, regval);
- if (mos7840_port->MsrLsr == 0)
- mos7840_handle_new_msr(mos7840_port, regval);
- else if (mos7840_port->MsrLsr == 1)
- mos7840_handle_new_lsr(mos7840_port, regval);
-out:
- clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
-}
-
-static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
- __u16 *val)
-{
- struct usb_device *dev = mcs->port->serial->dev;
- struct usb_ctrlrequest *dr = mcs->dr;
- unsigned char *buffer = mcs->ctrl_buf;
- int ret;
-
- if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
- return -EBUSY;
-
- dr->bRequestType = MCS_RD_RTYPE;
- dr->bRequest = MCS_RDREQ;
- dr->wValue = cpu_to_le16(Wval); /* 0 */
- dr->wIndex = cpu_to_le16(reg);
- dr->wLength = cpu_to_le16(2);
-
- usb_fill_control_urb(mcs->control_urb, dev, usb_rcvctrlpipe(dev, 0),
- (unsigned char *)dr, buffer, 2,
- mos7840_control_callback, mcs);
- mcs->control_urb->transfer_buffer_length = 2;
- ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
- if (ret)
- clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
-
- return ret;
-}
-
static void mos7840_set_led_callback(struct urb *urb)
{
switch (urb->status) {
@@ -580,146 +426,6 @@ static void mos7840_led_activity(struct usb_serial_port *port)
}
/*****************************************************************************
- * mos7840_interrupt_callback
- * this is the callback function for when we have received data on the
- * interrupt endpoint.
- *****************************************************************************/
-
-static void mos7840_interrupt_callback(struct urb *urb)
-{
- int result;
- int length;
- struct moschip_port *mos7840_port;
- struct usb_serial *serial;
- __u16 Data;
- unsigned char *data;
- __u8 sp[5];
- int i, rv = 0;
- __u16 wval, wreg = 0;
- int status = urb->status;
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
- __func__, status);
- return;
- default:
- dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n",
- __func__, status);
- goto exit;
- }
-
- length = urb->actual_length;
- data = urb->transfer_buffer;
-
- serial = urb->context;
-
- /* Moschip get 5 bytes
- * Byte 1 IIR Port 1 (port.number is 0)
- * Byte 2 IIR Port 2 (port.number is 1)
- * Byte 3 IIR Port 3 (port.number is 2)
- * Byte 4 IIR Port 4 (port.number is 3)
- * Byte 5 FIFO status for both */
-
- if (length > 5) {
- dev_dbg(&urb->dev->dev, "%s", "Wrong data !!!\n");
- return;
- }
-
- sp[0] = (__u8) data[0];
- sp[1] = (__u8) data[1];
- sp[2] = (__u8) data[2];
- sp[3] = (__u8) data[3];
-
- for (i = 0; i < serial->num_ports; i++) {
- mos7840_port = mos7840_get_port_private(serial->port[i]);
- wval = ((__u16)serial->port[i]->port_number + 1) << 8;
- if (mos7840_port->open) {
- if (sp[i] & 0x01) {
- dev_dbg(&urb->dev->dev, "SP%d No Interrupt !!!\n", i);
- } else {
- switch (sp[i] & 0x0f) {
- case SERIAL_IIR_RLS:
- dev_dbg(&urb->dev->dev, "Serial Port %d: Receiver status error or \n", i);
- dev_dbg(&urb->dev->dev, "address bit detected in 9-bit mode\n");
- mos7840_port->MsrLsr = 1;
- wreg = LINE_STATUS_REGISTER;
- break;
- case SERIAL_IIR_MS:
- dev_dbg(&urb->dev->dev, "Serial Port %d: Modem status change\n", i);
- mos7840_port->MsrLsr = 0;
- wreg = MODEM_STATUS_REGISTER;
- break;
- }
- rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
- }
- }
- }
- if (!(rv < 0))
- /* the completion handler for the control urb will resubmit */
- return;
-exit:
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result) {
- dev_err(&urb->dev->dev,
- "%s - Error %d submitting interrupt urb\n",
- __func__, result);
- }
-}
-
-static int mos7840_port_paranoia_check(struct usb_serial_port *port,
- const char *function)
-{
- if (!port) {
- pr_debug("%s - port == NULL\n", function);
- return -1;
- }
- if (!port->serial) {
- pr_debug("%s - port->serial == NULL\n", function);
- return -1;
- }
-
- return 0;
-}
-
-/* Inline functions to check the sanity of a pointer that is passed to us */
-static int mos7840_serial_paranoia_check(struct usb_serial *serial,
- const char *function)
-{
- if (!serial) {
- pr_debug("%s - serial == NULL\n", function);
- return -1;
- }
- if (!serial->type) {
- pr_debug("%s - serial->type == NULL!\n", function);
- return -1;
- }
-
- return 0;
-}
-
-static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
- const char *function)
-{
- /* if no port was specified, or it fails a paranoia check */
- if (!port ||
- mos7840_port_paranoia_check(port, function) ||
- mos7840_serial_paranoia_check(port->serial, function)) {
- /* then say that we don't have a valid usb_serial thing,
- * which will end up genrating -ENODEV return values */
- return NULL;
- }
-
- return port->serial;
-}
-
-/*****************************************************************************
* mos7840_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
@@ -727,35 +433,18 @@ static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
static void mos7840_bulk_in_callback(struct urb *urb)
{
+ struct moschip_port *mos7840_port = urb->context;
+ struct usb_serial_port *port = mos7840_port->port;
int retval;
unsigned char *data;
- struct usb_serial *serial;
- struct usb_serial_port *port;
- struct moschip_port *mos7840_port;
int status = urb->status;
- mos7840_port = urb->context;
- if (!mos7840_port)
- return;
-
if (status) {
dev_dbg(&urb->dev->dev, "nonzero read bulk status received: %d\n", status);
mos7840_port->read_urb_busy = false;
return;
}
- port = mos7840_port->port;
- if (mos7840_port_paranoia_check(port, __func__)) {
- mos7840_port->read_urb_busy = false;
- return;
- }
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial) {
- mos7840_port->read_urb_busy = false;
- return;
- }
-
data = urb->transfer_buffer;
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
@@ -767,12 +456,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
dev_dbg(&port->dev, "icount.rx is %d:\n", port->icount.rx);
}
- if (!mos7840_port->read_urb) {
- dev_dbg(&port->dev, "%s", "URB KILLED !!!\n");
- mos7840_port->read_urb_busy = false;
- return;
- }
-
if (mos7840_port->has_led)
mos7840_led_activity(port);
@@ -793,14 +476,12 @@ static void mos7840_bulk_in_callback(struct urb *urb)
static void mos7840_bulk_out_data_callback(struct urb *urb)
{
- struct moschip_port *mos7840_port;
- struct usb_serial_port *port;
+ struct moschip_port *mos7840_port = urb->context;
+ struct usb_serial_port *port = mos7840_port->port;
int status = urb->status;
unsigned long flags;
int i;
- mos7840_port = urb->context;
- port = mos7840_port->port;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; i++) {
if (urb == mos7840_port->write_urb_pool[i]) {
@@ -815,11 +496,7 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
return;
}
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_port->open)
- tty_port_tty_wakeup(&port->port);
+ tty_port_tty_wakeup(&port->port);
}
@@ -836,32 +513,16 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
int response;
int j;
- struct usb_serial *serial;
struct urb *urb;
__u16 Data;
int status;
- struct moschip_port *mos7840_port;
- struct moschip_port *port0;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -ENODEV;
-
- serial = port->serial;
-
- if (mos7840_serial_paranoia_check(serial, __func__))
- return -ENODEV;
-
- mos7840_port = mos7840_get_port_private(port);
- port0 = mos7840_get_port_private(serial->port[0]);
-
- if (mos7840_port == NULL || port0 == NULL)
- return -ENODEV;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
- port0->open_ports++;
/* Initialising the write urb pool */
for (j = 0; j < NUM_URBS; ++j) {
@@ -1012,41 +673,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
Data);
- /* Check to see if we've set up our endpoint info yet *
- * (can't set it up in mos7840_startup as the structures *
- * were not set up at that time.) */
- if (port0->open_ports == 1) {
- /* FIXME: Buffer never NULL, so URB is not submitted. */
- if (serial->port[0]->interrupt_in_buffer == NULL) {
- /* set up interrupt urb */
- usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
- serial->dev,
- usb_rcvintpipe(serial->dev,
- serial->port[0]->interrupt_in_endpointAddress),
- serial->port[0]->interrupt_in_buffer,
- serial->port[0]->interrupt_in_urb->
- transfer_buffer_length,
- mos7840_interrupt_callback,
- serial,
- serial->port[0]->interrupt_in_urb->interval);
-
- /* start interrupt read for mos7840 */
- response =
- usb_submit_urb(serial->port[0]->interrupt_in_urb,
- GFP_KERNEL);
- if (response) {
- dev_err(&port->dev, "%s - Error %d submitting "
- "interrupt urb\n", __func__, response);
- }
-
- }
-
- }
-
- /* see if we've set up our endpoint info yet *
- * (can't set it up in mos7840_startup as the *
- * structures were not set up at that time.) */
-
dev_dbg(&port->dev, "port number is %d\n", port->port_number);
dev_dbg(&port->dev, "minor number is %d\n", port->minor);
dev_dbg(&port->dev, "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
@@ -1086,9 +712,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
/* initialize our port settings */
/* Must set to enable ints! */
mos7840_port->shadowMCR = MCR_MASTER_IE;
- /* send a open port command */
- mos7840_port->open = 1;
- /* mos7840_change_port_settings(mos7840_port,old_termios); */
return 0;
err:
@@ -1115,17 +738,10 @@ err:
static int mos7840_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
int chars = 0;
unsigned long flags;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return 0;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return 0;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
@@ -1147,25 +763,10 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
static void mos7840_close(struct usb_serial_port *port)
{
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
- struct moschip_port *port0;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int j;
__u16 Data;
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial)
- return;
-
- mos7840_port = mos7840_get_port_private(port);
- port0 = mos7840_get_port_private(serial->port[0]);
-
- if (mos7840_port == NULL || port0 == NULL)
- return;
-
for (j = 0; j < NUM_URBS; ++j)
usb_kill_urb(mos7840_port->write_urb_pool[j]);
@@ -1180,22 +781,11 @@ static void mos7840_close(struct usb_serial_port *port)
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
- port0->open_ports--;
- dev_dbg(&port->dev, "%s in close%d\n", __func__, port0->open_ports);
- if (port0->open_ports == 0) {
- if (serial->port[0]->interrupt_in_urb) {
- dev_dbg(&port->dev, "Shutdown interrupt_in_urb\n");
- usb_kill_urb(serial->port[0]->interrupt_in_urb);
- }
- }
-
Data = 0x0;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
Data = 0x00;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
-
- mos7840_port->open = 0;
}
/*****************************************************************************
@@ -1205,21 +795,8 @@ static void mos7840_close(struct usb_serial_port *port)
static void mos7840_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned char data;
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial)
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
if (break_state == -1)
data = mos7840_port->shadowLCR | LCR_SET_BREAK;
@@ -1244,17 +821,10 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
static int mos7840_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
int room = 0;
unsigned long flags;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return -1;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
@@ -1280,29 +850,17 @@ static int mos7840_write_room(struct tty_struct *tty)
static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
int status;
int i;
int bytes_sent = 0;
int transfer_size;
unsigned long flags;
-
- struct moschip_port *mos7840_port;
- struct usb_serial *serial;
struct urb *urb;
/* __u16 Data; */
const unsigned char *current_position = data;
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- serial = port->serial;
- if (mos7840_serial_paranoia_check(serial, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return -1;
-
/* try to find a free urb in the list */
urb = NULL;
@@ -1383,22 +941,9 @@ exit:
static void mos7840_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s", "port not opened\n");
- return;
- }
-
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
@@ -1425,19 +970,8 @@ static void mos7840_throttle(struct tty_struct *tty)
static void mos7840_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- struct moschip_port *mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
@@ -1460,15 +994,10 @@ static void mos7840_unthrottle(struct tty_struct *tty)
static int mos7840_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
unsigned int result;
__u16 msr;
__u16 mcr;
int status;
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -ENODEV;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
if (status < 0)
@@ -1493,15 +1022,10 @@ static int mos7840_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned int mcr;
int status;
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -ENODEV;
-
/* FIXME: What locks the port registers ? */
mcr = mos7840_port->shadowMCR;
if (clear & TIOCM_RTS)
@@ -1578,21 +1102,11 @@ static int mos7840_calc_baud_rate_divisor(struct usb_serial_port *port,
static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
int baudRate)
{
+ struct usb_serial_port *port = mos7840_port->port;
int divisor = 0;
int status;
__u16 Data;
__u16 clk_sel_val;
- struct usb_serial_port *port;
-
- if (mos7840_port == NULL)
- return -1;
-
- port = mos7840_port->port;
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- if (mos7840_serial_paranoia_check(port->serial, __func__))
- return -1;
dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudRate);
/* reset clk_uart_sel in spregOffset */
@@ -1681,6 +1195,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
static void mos7840_change_port_settings(struct tty_struct *tty,
struct moschip_port *mos7840_port, struct ktermios *old_termios)
{
+ struct usb_serial_port *port = mos7840_port->port;
int baud;
unsigned cflag;
__u8 lData;
@@ -1688,23 +1203,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
__u8 lStop;
int status;
__u16 Data;
- struct usb_serial_port *port;
-
- if (mos7840_port == NULL)
- return;
-
- port = mos7840_port->port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_serial_paranoia_check(port->serial, __func__))
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
lData = LCR_BITS_8;
lStop = LCR_STOP_1;
@@ -1839,37 +1337,13 @@ static void mos7840_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = port->serial;
-
- if (mos7840_serial_paranoia_check(serial, __func__))
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
/* change the port settings to the new ones specified */
mos7840_change_port_settings(tty, mos7840_port, old_termios);
- if (!mos7840_port->read_urb) {
- dev_dbg(&port->dev, "%s", "URB KILLED !!!!!\n");
- return;
- }
-
if (!mos7840_port->read_urb_busy) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
@@ -1916,7 +1390,7 @@ static int mos7840_get_serial_info(struct tty_struct *tty,
struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port = mos7840_get_port_private(port);
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
ss->type = PORT_16550A;
ss->line = mos7840_port->port->minor;
@@ -1939,15 +1413,6 @@ static int mos7840_ioctl(struct tty_struct *tty,
{
struct usb_serial_port *port = tty->driver_data;
void __user *argp = (void __user *)arg;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -1;
switch (cmd) {
/* return number of bytes available */
@@ -1962,6 +1427,13 @@ static int mos7840_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
+/*
+ * Check if GPO (pin 42) is connected to GPI (pin 33) as recommended by ASIX
+ * for MCS7810 by bit-banging a 16-bit word.
+ *
+ * Note that GPO is really RTS of the third port so this will toggle RTS of
+ * port two or three on two- and four-port devices.
+ */
static int mos7810_check(struct usb_serial *serial)
{
int i, pass_count = 0;
@@ -2019,16 +1491,12 @@ static int mos7810_check(struct usb_serial *serial)
static int mos7840_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
- u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ unsigned long device_flags = id->driver_info;
u8 *buf;
- int device_type;
- if (product == MOSCHIP_DEVICE_ID_7810 ||
- product == MOSCHIP_DEVICE_ID_7820 ||
- product == MOSCHIP_DEVICE_ID_7843) {
- device_type = product;
+ /* Skip device-type detection if we already have device flags. */
+ if (device_flags)
goto out;
- }
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
@@ -2040,15 +1508,15 @@ static int mos7840_probe(struct usb_serial *serial,
/* For a MCS7840 device GPIO0 must be set to 1 */
if (buf[0] & 0x01)
- device_type = MOSCHIP_DEVICE_ID_7840;
+ device_flags = MCS_PORTS(4);
else if (mos7810_check(serial))
- device_type = MOSCHIP_DEVICE_ID_7810;
+ device_flags = MCS_PORTS(1) | MCS_LED;
else
- device_type = MOSCHIP_DEVICE_ID_7820;
+ device_flags = MCS_PORTS(2);
kfree(buf);
out:
- usb_set_serial_data(serial, (void *)(unsigned long)device_type);
+ usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
@@ -2056,19 +1524,10 @@ out:
static int mos7840_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
- int device_type = (unsigned long)usb_get_serial_data(serial);
- int num_ports;
+ unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
+ int num_ports = MCS_PORTS(device_flags);
- if (device_type == MOSCHIP_DEVICE_ID_7843)
- num_ports = 3;
- else
- num_ports = (device_type >> 4) & 0x000F;
-
- /*
- * num_ports is currently never zero as device_type is one of
- * MOSCHIP_DEVICE_ID_78{1,2,4}0.
- */
- if (num_ports == 0)
+ if (num_ports == 0 || num_ports > 4)
return -ENODEV;
if (epds->num_bulk_in < num_ports || epds->num_bulk_out < num_ports) {
@@ -2079,10 +1538,27 @@ static int mos7840_calc_num_ports(struct usb_serial *serial,
return num_ports;
}
+static int mos7840_attach(struct usb_serial *serial)
+{
+ struct device *dev = &serial->interface->dev;
+ int status;
+ u16 val;
+
+ /* Zero Length flag enable */
+ val = 0x0f;
+ status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, val);
+ if (status < 0)
+ dev_dbg(dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
+ else
+ dev_dbg(dev, "ZLP_REG5 Writing success status%d\n", status);
+
+ return status;
+}
+
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- int device_type = (unsigned long)usb_get_serial_data(serial);
+ unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
struct moschip_port *mos7840_port;
int status;
int pnum;
@@ -2103,7 +1579,6 @@ static int mos7840_port_probe(struct usb_serial_port *port)
* common to all port */
mos7840_port->port = port;
- mos7840_set_port_private(port, mos7840_port);
spin_lock_init(&mos7840_port->pool_lock);
/* minor is not initialised until later by
@@ -2129,14 +1604,14 @@ static int mos7840_port_probe(struct usb_serial_port *port)
mos7840_port->DcrRegOffset = 0x16 + 3 * (phy_num - 2);
}
mos7840_dump_serial_port(port, mos7840_port);
- mos7840_set_port_private(port, mos7840_port);
+ usb_set_serial_port_data(port, mos7840_port);
/* enable rx_disable bit in control register */
status = mos7840_get_reg_sync(port,
mos7840_port->ControlRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
Data |= 0x08; /* setting driver done bit */
@@ -2148,7 +1623,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
@@ -2159,7 +1634,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 0), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status);
@@ -2168,7 +1643,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 1), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status);
@@ -2177,7 +1652,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 2), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status);
@@ -2186,7 +1661,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
@@ -2203,7 +1678,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
@@ -2217,7 +1692,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num)));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status);
} else {
@@ -2229,27 +1704,16 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status);
}
- mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL);
- mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
- mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest),
- GFP_KERNEL);
- if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf ||
- !mos7840_port->dr) {
- status = -ENOMEM;
- goto error;
- }
- mos7840_port->has_led = false;
+ mos7840_port->has_led = device_flags & MCS_LED;
/* Initialize LED timers */
- if (device_type == MOSCHIP_DEVICE_ID_7810) {
- mos7840_port->has_led = true;
-
+ if (mos7840_port->has_led) {
mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
GFP_KERNEL);
@@ -2269,29 +1733,11 @@ static int mos7840_port_probe(struct usb_serial_port *port)
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
}
-out:
- if (pnum == serial->num_ports - 1) {
- /* Zero Length flag enable */
- Data = 0x0f;
- status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
- if (status < 0) {
- dev_dbg(&port->dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
- goto error;
- } else
- dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
- MOS_WDR_TIMEOUT);
- }
return 0;
error:
kfree(mos7840_port->led_dr);
usb_free_urb(mos7840_port->led_urb);
- kfree(mos7840_port->dr);
- kfree(mos7840_port->ctrl_buf);
- usb_free_urb(mos7840_port->control_urb);
kfree(mos7840_port);
return status;
@@ -2299,9 +1745,7 @@ error:
static int mos7840_port_remove(struct usb_serial_port *port)
{
- struct moschip_port *mos7840_port;
-
- mos7840_port = mos7840_get_port_private(port);
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
if (mos7840_port->has_led) {
/* Turn off LED */
@@ -2314,10 +1758,7 @@ static int mos7840_port_remove(struct usb_serial_port *port)
usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port->led_dr);
}
- usb_kill_urb(mos7840_port->control_urb);
- usb_free_urb(mos7840_port->control_urb);
- kfree(mos7840_port->ctrl_buf);
- kfree(mos7840_port->dr);
+
kfree(mos7840_port);
return 0;
@@ -2340,18 +1781,17 @@ static struct usb_serial_driver moschip7840_4port_device = {
.unthrottle = mos7840_unthrottle,
.calc_num_ports = mos7840_calc_num_ports,
.probe = mos7840_probe,
+ .attach = mos7840_attach,
.ioctl = mos7840_ioctl,
.get_serial = mos7840_get_serial_info,
.set_termios = mos7840_set_termios,
.break_ctl = mos7840_break,
.tiocmget = mos7840_tiocmget,
.tiocmset = mos7840_tiocmset,
- .tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
- .read_int_callback = mos7840_interrupt_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 06ab016be0b6..e9491d400a24 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
#define DELL_PRODUCT_5821E 0x81d7
+#define DELL_PRODUCT_5821E_ESIM 0x81e0
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -1044,6 +1045,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1990,6 +1993,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+ { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9d27b76c5c6e..aab737e1e7b6 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -47,6 +47,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GC) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GB) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GT) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GL) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GE) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GS) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
@@ -130,9 +136,11 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define VENDOR_WRITE_REQUEST_TYPE 0x40
#define VENDOR_WRITE_REQUEST 0x01
+#define VENDOR_WRITE_NREQUEST 0x80
#define VENDOR_READ_REQUEST_TYPE 0xc0
#define VENDOR_READ_REQUEST 0x01
+#define VENDOR_READ_NREQUEST 0x81
#define UART_STATE_INDEX 8
#define UART_STATE_MSR_MASK 0x8b
@@ -148,11 +156,24 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define PL2303_FLOWCTRL_MASK 0xf0
+#define PL2303_READ_TYPE_HX_STATUS 0x8080
+
+#define PL2303_HXN_RESET_REG 0x07
+#define PL2303_HXN_RESET_UPSTREAM_PIPE 0x02
+#define PL2303_HXN_RESET_DOWNSTREAM_PIPE 0x01
+
+#define PL2303_HXN_FLOWCTRL_REG 0x0a
+#define PL2303_HXN_FLOWCTRL_MASK 0x1c
+#define PL2303_HXN_FLOWCTRL_NONE 0x1c
+#define PL2303_HXN_FLOWCTRL_RTS_CTS 0x18
+#define PL2303_HXN_FLOWCTRL_XON_XOFF 0x0c
+
static void pl2303_set_break(struct usb_serial_port *port, bool enable);
enum pl2303_type {
TYPE_01, /* Type 0 and 1 (difference unknown) */
TYPE_HX, /* HX version of the pl2303 chip */
+ TYPE_HXN, /* HXN version of the pl2303 chip */
TYPE_COUNT
};
@@ -184,16 +205,26 @@ static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
[TYPE_HX] = {
.max_baud_rate = 12000000,
},
+ [TYPE_HXN] = {
+ .max_baud_rate = 12000000,
+ },
};
static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
unsigned char buf[1])
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
+ u8 request;
int res;
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ request = VENDOR_READ_NREQUEST;
+ else
+ request = VENDOR_READ_REQUEST;
+
res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
+ request, VENDOR_READ_REQUEST_TYPE,
value, 0, buf, 1, 100);
if (res != 1) {
dev_err(dev, "%s - failed to read [%04x]: %d\n", __func__,
@@ -211,13 +242,20 @@ static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
+ u8 request;
int res;
dev_dbg(dev, "%s - [%04x] = %02x\n", __func__, value, index);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ request = VENDOR_WRITE_NREQUEST;
+ else
+ request = VENDOR_WRITE_REQUEST;
+
res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- VENDOR_WRITE_REQUEST, VENDOR_WRITE_REQUEST_TYPE,
+ request, VENDOR_WRITE_REQUEST_TYPE,
value, index, NULL, 0, 100);
if (res) {
dev_err(dev, "%s - failed to write [%04x]: %d\n", __func__,
@@ -230,6 +268,7 @@ static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int ret = 0;
u8 *buf;
@@ -237,7 +276,11 @@ static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
if (!buf)
return -ENOMEM;
- ret = pl2303_vendor_read(serial, reg | 0x80, buf);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ ret = pl2303_vendor_read(serial, reg, buf);
+ else
+ ret = pl2303_vendor_read(serial, reg | 0x80, buf);
+
if (ret)
goto out_free;
@@ -320,6 +363,7 @@ static int pl2303_startup(struct usb_serial *serial)
struct pl2303_serial_private *spriv;
enum pl2303_type type = TYPE_01;
unsigned char *buf;
+ int res;
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
@@ -341,26 +385,37 @@ static int pl2303_startup(struct usb_serial *serial)
type = TYPE_01; /* type 1 */
dev_dbg(&serial->interface->dev, "device type: %d\n", type);
+ if (type == TYPE_HX) {
+ res = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
+ PL2303_READ_TYPE_HX_STATUS, 0, buf, 1, 100);
+ if (res != 1)
+ type = TYPE_HXN;
+ }
+
spriv->type = &pl2303_type_data[type];
spriv->quirks = (unsigned long)usb_get_serial_data(serial);
spriv->quirks |= spriv->type->quirks;
usb_set_serial_data(serial, spriv);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_write(serial, 0x0404, 0);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_read(serial, 0x8383, buf);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_write(serial, 0x0404, 1);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_read(serial, 0x8383, buf);
- pl2303_vendor_write(serial, 0, 1);
- pl2303_vendor_write(serial, 1, 0);
- if (spriv->quirks & PL2303_QUIRK_LEGACY)
- pl2303_vendor_write(serial, 2, 0x24);
- else
- pl2303_vendor_write(serial, 2, 0x44);
+ if (type != TYPE_HXN) {
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 0);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 1);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_write(serial, 0, 1);
+ pl2303_vendor_write(serial, 1, 0);
+ if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ pl2303_vendor_write(serial, 2, 0x24);
+ else
+ pl2303_vendor_write(serial, 2, 0x44);
+ }
kfree(buf);
@@ -719,14 +774,31 @@ static void pl2303_set_termios(struct tty_struct *tty,
}
if (C_CRTSCTS(tty)) {
- if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ if (spriv->quirks & PL2303_QUIRK_LEGACY) {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x40);
- else
+ } else if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_RTS_CTS);
+ } else {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x60);
+ }
} else if (pl2303_enable_xonxoff(tty, spriv->type)) {
- pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_XON_XOFF);
+ } else {
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
+ }
} else {
- pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_NONE);
+ } else {
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
+ }
}
kfree(buf);
@@ -767,8 +839,14 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_clear_halt(serial->dev, port->read_urb->pipe);
} else {
/* reset upstream data pipes */
- pl2303_vendor_write(serial, 8, 0);
- pl2303_vendor_write(serial, 9, 0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_vendor_write(serial, PL2303_HXN_RESET_REG,
+ PL2303_HXN_RESET_UPSTREAM_PIPE |
+ PL2303_HXN_RESET_DOWNSTREAM_PIPE);
+ } else {
+ pl2303_vendor_write(serial, 8, 0);
+ pl2303_vendor_write(serial, 9, 0);
+ }
}
/* Setup termios */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index b0175f17d1a2..a019ea7e6e0e 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -9,6 +9,12 @@
#define PL2303_VENDOR_ID 0x067b
#define PL2303_PRODUCT_ID 0x2303
#define PL2303_PRODUCT_ID_TB 0x2304
+#define PL2303_PRODUCT_ID_GC 0x23a3
+#define PL2303_PRODUCT_ID_GB 0x23b3
+#define PL2303_PRODUCT_ID_GT 0x23c3
+#define PL2303_PRODUCT_ID_GL 0x23d3
+#define PL2303_PRODUCT_ID_GE 0x23e3
+#define PL2303_PRODUCT_ID_GS 0x23f3
#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
#define PL2303_PRODUCT_ID_DCU11 0x1234
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 8b1b73065421..98c1aa594e6c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -561,7 +561,7 @@ static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
residue = min(residue, transfer_length);
if (us->srb != NULL)
scsi_set_resid(us->srb, max(scsi_get_resid(us->srb),
- (int)residue));
+ residue));
}
if (bcs->Status != US_BULK_STAT_OK)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 54a3c8195c96..66a4dcbbb1fc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -369,8 +369,8 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
/* check for state-transition errors */
if (us->srb != NULL) {
- printk(KERN_ERR "usb-storage: Error in %s: us->srb = %p\n",
- __func__, us->srb);
+ dev_err(&us->pusb_intf->dev,
+ "Error in %s: us->srb = %p\n", __func__, us->srb);
return SCSI_MLQUEUE_HOST_BUSY;
}
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 96cb0409dd89..238a8088e17f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1284,8 +1284,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
} else {
residue = min(residue, transfer_length);
- scsi_set_resid(srb, max(scsi_get_resid(srb),
- (int) residue));
+ scsi_set_resid(srb, max(scsi_get_resid(srb), residue));
}
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 34538253f12c..95bba3ba6ac6 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -825,6 +825,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->wce_default_on = 1;
}
+ /* Some disks cannot handle READ_CAPACITY_16 */
+ if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
+ sdev->no_read_capacity_16 = 1;
+
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
@@ -834,6 +838,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->fix_capacity = 1;
/*
+ * in some cases we have to guess
+ */
+ if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
+ sdev->guess_capacity = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
@@ -859,7 +869,6 @@ static struct scsi_host_template uas_host_template = {
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.this_id = -1,
- .sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
.dma_boundary = PAGE_SIZE - 1,
};
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d0bdebd87ce3..1b23741036ee 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -87,12 +87,15 @@ UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
-/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+/*
+ * Initially Reported-by: Takeo Nakayama <javhera@gmx.com>
+ * UAS Ignore Reported by Steven Ellis <sellis@redhat.com>
+ */
UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
"JMicron",
"JMS566",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_REPORT_OPCODES),
+ US_FL_NO_REPORT_OPCODES | US_FL_IGNORE_UAS),
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 895e2418de53..b4f2aac7ae8a 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -50,6 +50,17 @@ source "drivers/usb/typec/tcpm/Kconfig"
source "drivers/usb/typec/ucsi/Kconfig"
+config TYPEC_HD3SS3220
+ tristate "TI HD3SS3220 Type-C DRP Port controller driver"
+ depends on I2C
+ depends on USB_ROLE_SWITCH
+ help
+ Say Y or M here if your system has TI HD3SS3220 Type-C DRP Port
+ controller driver.
+
+ If you choose to build this driver as a dynamically linked module, the
+ module will be called hd3ss3220.ko.
+
config TYPEC_TPS6598X
tristate "TI TPS6598x USB Power Delivery controller driver"
depends on I2C
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 6696b7263d61..7753a5c3cd46 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -4,5 +4,6 @@ typec-y := class.o mux.o bus.o
obj-$(CONFIG_TYPEC) += altmodes/
obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
+obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o
obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
obj-$(CONFIG_TYPEC) += mux/
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 94a3eda62add..7ece6ca6e690 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -53,6 +53,7 @@ struct typec_port {
struct typec_mux *mux;
const struct typec_capability *cap;
+ const struct typec_operations *ops;
};
#define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)
@@ -955,7 +956,7 @@ preferred_role_store(struct device *dev, struct device_attribute *attr,
return -EOPNOTSUPP;
}
- if (!port->cap->try_role) {
+ if (!port->ops || !port->ops->try_role) {
dev_dbg(dev, "Setting preferred role not supported\n");
return -EOPNOTSUPP;
}
@@ -968,7 +969,7 @@ preferred_role_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- ret = port->cap->try_role(port->cap, role);
+ ret = port->ops->try_role(port, role);
if (ret)
return ret;
@@ -999,7 +1000,7 @@ static ssize_t data_role_store(struct device *dev,
struct typec_port *port = to_typec_port(dev);
int ret;
- if (!port->cap->dr_set) {
+ if (!port->ops || !port->ops->dr_set) {
dev_dbg(dev, "data role swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1014,7 +1015,7 @@ static ssize_t data_role_store(struct device *dev,
goto unlock_and_ret;
}
- ret = port->cap->dr_set(port->cap, ret);
+ ret = port->ops->dr_set(port, ret);
if (ret)
goto unlock_and_ret;
@@ -1049,7 +1050,7 @@ static ssize_t power_role_store(struct device *dev,
return -EOPNOTSUPP;
}
- if (!port->cap->pr_set) {
+ if (!port->ops || !port->ops->pr_set) {
dev_dbg(dev, "power role swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1071,7 +1072,7 @@ static ssize_t power_role_store(struct device *dev,
goto unlock_and_ret;
}
- ret = port->cap->pr_set(port->cap, ret);
+ ret = port->ops->pr_set(port, ret);
if (ret)
goto unlock_and_ret;
@@ -1102,7 +1103,8 @@ port_type_store(struct device *dev, struct device_attribute *attr,
int ret;
enum typec_port_type type;
- if (!port->cap->port_type_set || port->cap->type != TYPEC_PORT_DRP) {
+ if (port->cap->type != TYPEC_PORT_DRP ||
+ !port->ops || !port->ops->port_type_set) {
dev_dbg(dev, "changing port type not supported\n");
return -EOPNOTSUPP;
}
@@ -1119,7 +1121,7 @@ port_type_store(struct device *dev, struct device_attribute *attr,
goto unlock_and_ret;
}
- ret = port->cap->port_type_set(port->cap, type);
+ ret = port->ops->port_type_set(port, type);
if (ret)
goto unlock_and_ret;
@@ -1175,7 +1177,7 @@ static ssize_t vconn_source_store(struct device *dev,
return -EOPNOTSUPP;
}
- if (!port->cap->vconn_set) {
+ if (!port->ops || !port->ops->vconn_set) {
dev_dbg(dev, "VCONN swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1184,7 +1186,7 @@ static ssize_t vconn_source_store(struct device *dev,
if (ret)
return ret;
- ret = port->cap->vconn_set(port->cap, (enum typec_role)source);
+ ret = port->ops->vconn_set(port, (enum typec_role)source);
if (ret)
return ret;
@@ -1278,6 +1280,7 @@ static void typec_release(struct device *dev)
ida_destroy(&port->mode_ids);
typec_switch_put(port->sw);
typec_mux_put(port->mux);
+ kfree(port->cap);
kfree(port);
}
@@ -1487,6 +1490,16 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
/* --------------------------------------- */
/**
+ * typec_get_drvdata - Return private driver data pointer
+ * @port: USB Type-C port
+ */
+void *typec_get_drvdata(struct typec_port *port)
+{
+ return dev_get_drvdata(&port->dev);
+}
+EXPORT_SYMBOL_GPL(typec_get_drvdata);
+
+/**
* typec_port_register_altmode - Register USB Type-C Port Alternate Mode
* @port: USB Type-C Port that supports the alternate mode
* @desc: Description of the alternate mode
@@ -1579,7 +1592,7 @@ struct typec_port *typec_register_port(struct device *parent,
mutex_init(&port->port_type_lock);
port->id = id;
- port->cap = cap;
+ port->ops = cap->ops;
port->port_type = cap->type;
port->prefer_role = cap->prefer_role;
@@ -1589,6 +1602,13 @@ struct typec_port *typec_register_port(struct device *parent,
port->dev.fwnode = cap->fwnode;
port->dev.type = &typec_port_dev_type;
dev_set_name(&port->dev, "port%d", id);
+ dev_set_drvdata(&port->dev, cap->driver_data);
+
+ port->cap = kmemdup(cap, sizeof(*cap), GFP_KERNEL);
+ if (!port->cap) {
+ put_device(&port->dev);
+ return ERR_PTR(-ENOMEM);
+ }
port->sw = typec_switch_get(&port->dev);
if (IS_ERR(port->sw)) {
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
new file mode 100644
index 000000000000..323dfa8160ab
--- /dev/null
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * TI HD3SS3220 Type-C DRP Port Controller Driver
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/usb/role.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/usb/typec.h>
+#include <linux/delay.h>
+
+#define HD3SS3220_REG_CN_STAT_CTRL 0x09
+#define HD3SS3220_REG_GEN_CTRL 0x0A
+#define HD3SS3220_REG_DEV_REV 0xA0
+
+/* Register HD3SS3220_REG_CN_STAT_CTRL*/
+#define HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CTRL_AS_DFP BIT(6)
+#define HD3SS3220_REG_CN_STAT_CTRL_AS_UFP BIT(7)
+#define HD3SS3220_REG_CN_STAT_CTRL_TO_ACCESSORY (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS BIT(4)
+
+/* Register HD3SS3220_REG_GEN_CTRL*/
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK (BIT(2) | BIT(1))
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT 0x00
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK BIT(1)
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC (BIT(2) | BIT(1))
+
+struct hd3ss3220 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct usb_role_switch *role_sw;
+ struct typec_port *port;
+};
+
+static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int src_pref)
+{
+ return regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK,
+ src_pref);
+}
+
+static enum usb_role hd3ss3220_get_attached_state(struct hd3ss3220 *hd3ss3220)
+{
+ unsigned int reg_val;
+ enum usb_role attached_state;
+ int ret;
+
+ ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL,
+ &reg_val);
+ if (ret < 0)
+ return ret;
+
+ switch (reg_val & HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK) {
+ case HD3SS3220_REG_CN_STAT_CTRL_AS_DFP:
+ attached_state = USB_ROLE_HOST;
+ break;
+ case HD3SS3220_REG_CN_STAT_CTRL_AS_UFP:
+ attached_state = USB_ROLE_DEVICE;
+ break;
+ default:
+ attached_state = USB_ROLE_NONE;
+ break;
+ }
+
+ return attached_state;
+}
+
+static int hd3ss3220_dr_set(struct typec_port *port, enum typec_data_role role)
+{
+ struct hd3ss3220 *hd3ss3220 = typec_get_drvdata(port);
+ enum usb_role role_val;
+ int pref, ret = 0;
+
+ if (role == TYPEC_HOST) {
+ role_val = USB_ROLE_HOST;
+ pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC;
+ } else {
+ role_val = USB_ROLE_DEVICE;
+ pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK;
+ }
+
+ ret = hd3ss3220_set_source_pref(hd3ss3220, pref);
+ usleep_range(10, 100);
+
+ usb_role_switch_set_role(hd3ss3220->role_sw, role_val);
+ typec_set_data_role(hd3ss3220->port, role);
+
+ return ret;
+}
+
+static const struct typec_operations hd3ss3220_ops = {
+ .dr_set = hd3ss3220_dr_set
+};
+
+static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
+{
+ enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
+
+ usb_role_switch_set_role(hd3ss3220->role_sw, role_state);
+ if (role_state == USB_ROLE_NONE)
+ hd3ss3220_set_source_pref(hd3ss3220,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
+
+ switch (role_state) {
+ case USB_ROLE_HOST:
+ typec_set_data_role(hd3ss3220->port, TYPEC_HOST);
+ break;
+ case USB_ROLE_DEVICE:
+ typec_set_data_role(hd3ss3220->port, TYPEC_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static irqreturn_t hd3ss3220_irq(struct hd3ss3220 *hd3ss3220)
+{
+ int err;
+
+ hd3ss3220_set_role(hd3ss3220);
+ err = regmap_update_bits_base(hd3ss3220->regmap,
+ HD3SS3220_REG_CN_STAT_CTRL,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
+ NULL, false, true);
+ if (err < 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hd3ss3220_irq_handler(int irq, void *data)
+{
+ struct i2c_client *client = to_i2c_client(data);
+ struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+
+ return hd3ss3220_irq(hd3ss3220);
+}
+
+static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0A,
+};
+
+static int hd3ss3220_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct typec_capability typec_cap = { };
+ struct hd3ss3220 *hd3ss3220;
+ struct fwnode_handle *connector;
+ int ret;
+ unsigned int data;
+
+ hd3ss3220 = devm_kzalloc(&client->dev, sizeof(struct hd3ss3220),
+ GFP_KERNEL);
+ if (!hd3ss3220)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, hd3ss3220);
+
+ hd3ss3220->dev = &client->dev;
+ hd3ss3220->regmap = devm_regmap_init_i2c(client, &config);
+ if (IS_ERR(hd3ss3220->regmap))
+ return PTR_ERR(hd3ss3220->regmap);
+
+ hd3ss3220_set_source_pref(hd3ss3220,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
+ connector = device_get_named_child_node(hd3ss3220->dev, "connector");
+ if (!connector)
+ return -ENODEV;
+
+ hd3ss3220->role_sw = fwnode_usb_role_switch_get(connector);
+ if (IS_ERR(hd3ss3220->role_sw)) {
+ ret = PTR_ERR(hd3ss3220->role_sw);
+ goto err_put_fwnode;
+ }
+
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = hd3ss3220;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DRD;
+ typec_cap.ops = &hd3ss3220_ops;
+ typec_cap.fwnode = connector;
+
+ hd3ss3220->port = typec_register_port(&client->dev, &typec_cap);
+ if (IS_ERR(hd3ss3220->port)) {
+ ret = PTR_ERR(hd3ss3220->port);
+ goto err_put_role;
+ }
+
+ hd3ss3220_set_role(hd3ss3220);
+ ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL, &data);
+ if (ret < 0)
+ goto err_unreg_port;
+
+ if (data & HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS) {
+ ret = regmap_write(hd3ss3220->regmap,
+ HD3SS3220_REG_CN_STAT_CTRL,
+ data | HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS);
+ if (ret < 0)
+ goto err_unreg_port;
+ }
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ hd3ss3220_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "hd3ss3220", &client->dev);
+ if (ret)
+ goto err_unreg_port;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, HD3SS3220_REG_DEV_REV);
+ if (ret < 0)
+ goto err_unreg_port;
+
+ fwnode_handle_put(connector);
+
+ dev_info(&client->dev, "probed revision=0x%x\n", ret);
+
+ return 0;
+err_unreg_port:
+ typec_unregister_port(hd3ss3220->port);
+err_put_role:
+ usb_role_switch_put(hd3ss3220->role_sw);
+err_put_fwnode:
+ fwnode_handle_put(connector);
+
+ return ret;
+}
+
+static int hd3ss3220_remove(struct i2c_client *client)
+{
+ struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+
+ typec_unregister_port(hd3ss3220->port);
+ usb_role_switch_put(hd3ss3220->role_sw);
+
+ return 0;
+}
+
+static const struct of_device_id dev_ids[] = {
+ { .compatible = "ti,hd3ss3220"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, dev_ids);
+
+static struct i2c_driver hd3ss3220_driver = {
+ .driver = {
+ .name = "hd3ss3220",
+ .of_match_table = of_match_ptr(dev_ids),
+ },
+ .probe = hd3ss3220_probe,
+ .remove = hd3ss3220_remove,
+};
+
+module_i2c_driver(hd3ss3220_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das@bp.renesas.com>");
+MODULE_DESCRIPTION("TI HD3SS3220 DRP Port Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 5f61d9977a15..56fc356bc55c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -380,9 +380,6 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SNK_UNATTACHED;
else if (port->try_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
- else if (port->tcpc->config &&
- port->tcpc->config->default_role == TYPEC_SINK)
- return SNK_UNATTACHED;
/* Fall through to return SRC_UNATTACHED */
} else if (port->port_type == TYPEC_PORT_SNK) {
return SNK_UNATTACHED;
@@ -390,12 +387,6 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SRC_UNATTACHED;
}
-static inline
-struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
-{
- return container_of(cap, struct tcpm_port, typec_caps);
-}
-
static bool tcpm_port_is_disconnected(struct tcpm_port *port)
{
return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
@@ -3970,10 +3961,9 @@ void tcpm_pd_hard_reset(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
-static int tcpm_dr_set(const struct typec_capability *cap,
- enum typec_data_role data)
+static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4038,10 +4028,9 @@ swap_unlock:
return ret;
}
-static int tcpm_pr_set(const struct typec_capability *cap,
- enum typec_role role)
+static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4082,10 +4071,9 @@ swap_unlock:
return ret;
}
-static int tcpm_vconn_set(const struct typec_capability *cap,
- enum typec_role role)
+static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4122,16 +4110,16 @@ swap_unlock:
return ret;
}
-static int tcpm_try_role(const struct typec_capability *cap, int role)
+static int tcpm_try_role(struct typec_port *p, int role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
struct tcpc_dev *tcpc = port->tcpc;
int ret = 0;
mutex_lock(&port->lock);
if (tcpc->try_role)
ret = tcpc->try_role(tcpc, role);
- if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
+ if (!ret)
port->try_role = role;
port->try_src_count = 0;
port->try_snk_count = 0;
@@ -4331,10 +4319,9 @@ static void tcpm_init(struct tcpm_port *port)
tcpm_set_state(port, PORT_RESET, 0);
}
-static int tcpm_port_type_set(const struct typec_capability *cap,
- enum typec_port_type type)
+static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
mutex_lock(&port->lock);
if (type == port->port_type)
@@ -4359,6 +4346,14 @@ port_unlock:
return 0;
}
+static const struct typec_operations tcpm_ops = {
+ .try_role = tcpm_try_role,
+ .dr_set = tcpm_dr_set,
+ .pr_set = tcpm_pr_set,
+ .vconn_set = tcpm_vconn_set,
+ .port_type_set = tcpm_port_type_set
+};
+
void tcpm_tcpc_reset(struct tcpm_port *port)
{
mutex_lock(&port->lock);
@@ -4368,34 +4363,6 @@ void tcpm_tcpc_reset(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
-static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
- unsigned int nr_pdo)
-{
- unsigned int i;
-
- if (nr_pdo > PDO_MAX_OBJECTS)
- nr_pdo = PDO_MAX_OBJECTS;
-
- for (i = 0; i < nr_pdo; i++)
- dest_pdo[i] = src_pdo[i];
-
- return nr_pdo;
-}
-
-static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
- unsigned int nr_vdo)
-{
- unsigned int i;
-
- if (nr_vdo > VDO_MAX_OBJECTS)
- nr_vdo = VDO_MAX_OBJECTS;
-
- for (i = 0; i < nr_vdo; i++)
- dest_vdo[i] = src_vdo[i];
-
- return nr_vdo;
-}
-
static int tcpm_fw_get_caps(struct tcpm_port *port,
struct fwnode_handle *fwnode)
{
@@ -4698,35 +4665,10 @@ static int devm_tcpm_psy_register(struct tcpm_port *port)
return PTR_ERR_OR_ZERO(port->psy);
}
-static int tcpm_copy_caps(struct tcpm_port *port,
- const struct tcpc_config *tcfg)
-{
- if (tcpm_validate_caps(port, tcfg->src_pdo, tcfg->nr_src_pdo) ||
- tcpm_validate_caps(port, tcfg->snk_pdo, tcfg->nr_snk_pdo))
- return -EINVAL;
-
- port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcfg->src_pdo,
- tcfg->nr_src_pdo);
- port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcfg->snk_pdo,
- tcfg->nr_snk_pdo);
-
- port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcfg->snk_vdo,
- tcfg->nr_snk_vdo);
-
- port->operating_snk_mw = tcfg->operating_snk_mw;
-
- port->typec_caps.prefer_role = tcfg->default_role;
- port->typec_caps.type = tcfg->type;
- port->typec_caps.data = tcfg->data;
- port->self_powered = tcfg->self_powered;
-
- return 0;
-}
-
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
- int i, err;
+ int err;
if (!dev || !tcpc ||
!tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
@@ -4759,24 +4701,16 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
tcpm_debugfs_init(port);
err = tcpm_fw_get_caps(port, tcpc->fwnode);
- if ((err < 0) && tcpc->config)
- err = tcpm_copy_caps(port, tcpc->config);
if (err < 0)
goto out_destroy_wq;
- if (!tcpc->config || !tcpc->config->try_role_hw)
- port->try_role = port->typec_caps.prefer_role;
- else
- port->try_role = TYPEC_NO_PREFERRED_ROLE;
+ port->try_role = port->typec_caps.prefer_role;
port->typec_caps.fwnode = tcpc->fwnode;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
- port->typec_caps.dr_set = tcpm_dr_set;
- port->typec_caps.pr_set = tcpm_pr_set;
- port->typec_caps.vconn_set = tcpm_vconn_set;
- port->typec_caps.try_role = tcpm_try_role;
- port->typec_caps.port_type_set = tcpm_port_type_set;
+ port->typec_caps.driver_data = port;
+ port->typec_caps.ops = &tcpm_ops;
port->partner_desc.identity = &port->partner_ident;
port->port_type = port->typec_caps.type;
@@ -4797,29 +4731,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
goto out_role_sw_put;
}
- if (tcpc->config && tcpc->config->alt_modes) {
- const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
-
- i = 0;
- while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
- struct typec_altmode *alt;
-
- alt = typec_port_register_altmode(port->typec_port,
- paltmode);
- if (IS_ERR(alt)) {
- tcpm_log(port,
- "%s: failed to register port alternate mode 0x%x",
- dev_name(dev), paltmode->svid);
- break;
- }
- typec_altmode_set_drvdata(alt, port);
- alt->ops = &tcpm_altmode_ops;
- port->port_altmode[i] = alt;
- i++;
- paltmode++;
- }
- }
-
mutex_lock(&port->lock);
tcpm_init(port);
mutex_unlock(&port->lock);
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index a38d1409f15b..0698addd1185 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -94,7 +94,6 @@ struct tps6598x {
struct typec_port *port;
struct typec_partner *partner;
struct usb_pd_identity partner_identity;
- struct typec_capability typec_cap;
};
/*
@@ -307,11 +306,10 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
return 0;
}
-static int
-tps6598x_dr_set(const struct typec_capability *cap, enum typec_data_role role)
+static int tps6598x_dr_set(struct typec_port *port, enum typec_data_role role)
{
- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
const char *cmd = (role == TYPEC_DEVICE) ? "SWUF" : "SWDF";
+ struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
@@ -338,11 +336,10 @@ out_unlock:
return ret;
}
-static int
-tps6598x_pr_set(const struct typec_capability *cap, enum typec_role role)
+static int tps6598x_pr_set(struct typec_port *port, enum typec_role role)
{
- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
const char *cmd = (role == TYPEC_SINK) ? "SWSk" : "SWSr";
+ struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
@@ -369,6 +366,11 @@ out_unlock:
return ret;
}
+static const struct typec_operations tps6598x_ops = {
+ .dr_set = tps6598x_dr_set,
+ .pr_set = tps6598x_pr_set,
+};
+
static irqreturn_t tps6598x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
@@ -448,6 +450,7 @@ static const struct regmap_config tps6598x_regmap_config = {
static int tps6598x_probe(struct i2c_client *client)
{
+ struct typec_capability typec_cap = { };
struct tps6598x *tps;
u32 status;
u32 conf;
@@ -492,40 +495,40 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- tps->typec_cap.revision = USB_TYPEC_REV_1_2;
- tps->typec_cap.pd_revision = 0x200;
- tps->typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
- tps->typec_cap.pr_set = tps6598x_pr_set;
- tps->typec_cap.dr_set = tps6598x_dr_set;
+ typec_cap.revision = USB_TYPEC_REV_1_2;
+ typec_cap.pd_revision = 0x200;
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = tps;
+ typec_cap.ops = &tps6598x_ops;
switch (TPS_SYSCONF_PORTINFO(conf)) {
case TPS_PORTINFO_SINK_ACCESSORY:
case TPS_PORTINFO_SINK:
- tps->typec_cap.type = TYPEC_PORT_SNK;
- tps->typec_cap.data = TYPEC_PORT_UFP;
+ typec_cap.type = TYPEC_PORT_SNK;
+ typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_UFP_DRD:
case TPS_PORTINFO_DRP_DFP_DRD:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_DRD;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DRD;
break;
case TPS_PORTINFO_DRP_UFP:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_UFP;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_DFP:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_DFP;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DFP;
break;
case TPS_PORTINFO_SOURCE:
- tps->typec_cap.type = TYPEC_PORT_SRC;
- tps->typec_cap.data = TYPEC_PORT_DFP;
+ typec_cap.type = TYPEC_PORT_SRC;
+ typec_cap.data = TYPEC_PORT_DFP;
break;
default:
return -ENODEV;
}
- tps->port = typec_register_port(&client->dev, &tps->typec_cap);
+ tps->port = typec_register_port(&client->dev, &typec_cap);
if (IS_ERR(tps->port))
return PTR_ERR(tps->port);
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index d99700cb4dca..d4d5189edfb8 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -48,7 +48,8 @@ struct ucsi_dp {
static int ucsi_displayport_enter(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
- struct ucsi_control ctrl;
+ struct ucsi *ucsi = dp->con->ucsi;
+ u64 command;
u8 cur = 0;
int ret;
@@ -59,25 +60,21 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
dev_warn(&p->dev,
"firmware doesn't support alternate mode overriding\n");
- mutex_unlock(&dp->con->lock);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto err_unlock;
}
- UCSI_CMD_GET_CURRENT_CAM(ctrl, dp->con->num);
- ret = ucsi_send_command(dp->con->ucsi, &ctrl, &cur, sizeof(cur));
+ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(dp->con->num);
+ ret = ucsi_send_command(ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
- if (dp->con->ucsi->ppm->data->version > 0x0100) {
- mutex_unlock(&dp->con->lock);
- return ret;
- }
+ if (ucsi->version > 0x0100)
+ goto err_unlock;
cur = 0xff;
}
if (cur != 0xff) {
- mutex_unlock(&dp->con->lock);
- if (dp->con->port_altmode[cur] == alt)
- return 0;
- return -EBUSY;
+ ret = dp->con->port_altmode[cur] == alt ? 0 : -EBUSY;
+ goto err_unlock;
}
/*
@@ -94,16 +91,17 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
dp->vdo_size = 1;
schedule_work(&dp->work);
-
+ ret = 0;
+err_unlock:
mutex_unlock(&dp->con->lock);
- return 0;
+ return ret;
}
static int ucsi_displayport_exit(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
- struct ucsi_control ctrl;
+ u64 command;
int ret = 0;
mutex_lock(&dp->con->lock);
@@ -117,8 +115,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
goto out_unlock;
}
- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
- ret = ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
+ ret = ucsi_send_command(dp->con->ucsi, command, NULL, 0);
if (ret < 0)
goto out_unlock;
@@ -172,14 +170,14 @@ static int ucsi_displayport_status_update(struct ucsi_dp *dp)
static int ucsi_displayport_configure(struct ucsi_dp *dp)
{
u32 pins = DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
- struct ucsi_control ctrl;
+ u64 command;
if (!dp->override)
return 0;
- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
+ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
- return ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+ return ucsi_send_command(dp->con->ucsi, command, NULL, 0);
}
static int ucsi_displayport_vdm(struct typec_altmode *alt,
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index 1dabafb74320..48ad1dc1b1b2 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -33,17 +33,6 @@ const char *ucsi_cmd_str(u64 raw_cmd)
return ucsi_cmd_strs[(cmd >= ARRAY_SIZE(ucsi_cmd_strs)) ? 0 : cmd];
}
-static const char * const ucsi_ack_strs[] = {
- [0] = "",
- [UCSI_ACK_EVENT] = "event",
- [UCSI_ACK_CMD] = "command",
-};
-
-const char *ucsi_ack_str(u8 ack)
-{
- return ucsi_ack_strs[(ack >= ARRAY_SIZE(ucsi_ack_strs)) ? 0 : ack];
-}
-
const char *ucsi_cci_str(u32 cci)
{
if (cci & GENMASK(7, 0)) {
diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h
index 783ec9c72055..a0d3a934d3d9 100644
--- a/drivers/usb/typec/ucsi/trace.h
+++ b/drivers/usb/typec/ucsi/trace.h
@@ -10,54 +10,18 @@
#include <linux/usb/typec_altmode.h>
const char *ucsi_cmd_str(u64 raw_cmd);
-const char *ucsi_ack_str(u8 ack);
const char *ucsi_cci_str(u32 cci);
const char *ucsi_recipient_str(u8 recipient);
-DECLARE_EVENT_CLASS(ucsi_log_ack,
- TP_PROTO(u8 ack),
- TP_ARGS(ack),
- TP_STRUCT__entry(
- __field(u8, ack)
- ),
- TP_fast_assign(
- __entry->ack = ack;
- ),
- TP_printk("ACK %s", ucsi_ack_str(__entry->ack))
-);
-
-DEFINE_EVENT(ucsi_log_ack, ucsi_ack,
- TP_PROTO(u8 ack),
- TP_ARGS(ack)
-);
-
-DECLARE_EVENT_CLASS(ucsi_log_control,
- TP_PROTO(struct ucsi_control *ctrl),
- TP_ARGS(ctrl),
- TP_STRUCT__entry(
- __field(u64, ctrl)
- ),
- TP_fast_assign(
- __entry->ctrl = ctrl->raw_cmd;
- ),
- TP_printk("control=%08llx (%s)", __entry->ctrl,
- ucsi_cmd_str(__entry->ctrl))
-);
-
-DEFINE_EVENT(ucsi_log_control, ucsi_command,
- TP_PROTO(struct ucsi_control *ctrl),
- TP_ARGS(ctrl)
-);
-
DECLARE_EVENT_CLASS(ucsi_log_command,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret),
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret),
TP_STRUCT__entry(
__field(u64, ctrl)
__field(int, ret)
),
TP_fast_assign(
- __entry->ctrl = ctrl->raw_cmd;
+ __entry->ctrl = command;
__entry->ret = ret;
),
TP_printk("%s -> %s (err=%d)", ucsi_cmd_str(__entry->ctrl),
@@ -66,30 +30,13 @@ DECLARE_EVENT_CLASS(ucsi_log_command,
);
DEFINE_EVENT(ucsi_log_command, ucsi_run_command,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret)
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret)
);
DEFINE_EVENT(ucsi_log_command, ucsi_reset_ppm,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret)
-);
-
-DECLARE_EVENT_CLASS(ucsi_log_cci,
- TP_PROTO(u32 cci),
- TP_ARGS(cci),
- TP_STRUCT__entry(
- __field(u32, cci)
- ),
- TP_fast_assign(
- __entry->cci = cci;
- ),
- TP_printk("CCI=%08x %s", __entry->cci, ucsi_cci_str(__entry->cci))
-);
-
-DEFINE_EVENT(ucsi_log_cci, ucsi_notify,
- TP_PROTO(u32 cci),
- TP_ARGS(cci)
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret)
);
DECLARE_EVENT_CLASS(ucsi_log_connector_status,
@@ -109,13 +56,13 @@ DECLARE_EVENT_CLASS(ucsi_log_connector_status,
TP_fast_assign(
__entry->port = port - 1;
__entry->change = status->change;
- __entry->opmode = status->pwr_op_mode;
- __entry->connected = status->connected;
- __entry->pwr_dir = status->pwr_dir;
- __entry->partner_flags = status->partner_flags;
- __entry->partner_type = status->partner_type;
+ __entry->opmode = UCSI_CONSTAT_PWR_OPMODE(status->flags);
+ __entry->connected = !!(status->flags & UCSI_CONSTAT_CONNECTED);
+ __entry->pwr_dir = !!(status->flags & UCSI_CONSTAT_PWR_DIR);
+ __entry->partner_flags = UCSI_CONSTAT_PARTNER_FLAGS(status->flags);
+ __entry->partner_type = UCSI_CONSTAT_PARTNER_TYPE(status->flags);
__entry->request_data_obj = status->request_data_obj;
- __entry->bc_status = status->bc_status;
+ __entry->bc_status = UCSI_CONSTAT_BC_STATUS(status->pwr_status);
),
TP_printk("port%d status: change=%04x, opmode=%x, connected=%d, "
"sourcing=%d, partner_flags=%x, partner_type=%x, "
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index ba288b964dc8..4459bc68aa33 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -17,9 +17,6 @@
#include "ucsi.h"
#include "trace.h"
-#define to_ucsi_connector(_cap_) container_of(_cap_, struct ucsi_connector, \
- typec_cap)
-
/*
* UCSI_TIMEOUT_MS - PPM communication timeout
*
@@ -39,169 +36,148 @@
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
-static inline int ucsi_sync(struct ucsi *ucsi)
+static int ucsi_acknowledge_command(struct ucsi *ucsi)
{
- if (ucsi->ppm && ucsi->ppm->sync)
- return ucsi->ppm->sync(ucsi->ppm);
- return 0;
+ u64 ctrl;
+
+ ctrl = UCSI_ACK_CC_CI;
+ ctrl |= UCSI_ACK_COMMAND_COMPLETE;
+
+ return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+}
+
+static int ucsi_acknowledge_connector_change(struct ucsi *ucsi)
+{
+ u64 ctrl;
+
+ ctrl = UCSI_ACK_CC_CI;
+ ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
+
+ return ucsi->ops->async_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
}
-static int ucsi_command(struct ucsi *ucsi, struct ucsi_control *ctrl)
+static int ucsi_exec_command(struct ucsi *ucsi, u64 command);
+
+static int ucsi_read_error(struct ucsi *ucsi)
{
+ u16 error;
int ret;
- trace_ucsi_command(ctrl);
+ /* Acknowlege the command that failed */
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
- set_bit(COMMAND_PENDING, &ucsi->flags);
+ ret = ucsi_exec_command(ucsi, UCSI_GET_ERROR_STATUS);
+ if (ret < 0)
+ return ret;
- ret = ucsi->ppm->cmd(ucsi->ppm, ctrl);
+ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
if (ret)
- goto err_clear_flag;
+ return ret;
- if (!wait_for_completion_timeout(&ucsi->complete,
- msecs_to_jiffies(UCSI_TIMEOUT_MS))) {
- dev_warn(ucsi->dev, "PPM NOT RESPONDING\n");
- ret = -ETIMEDOUT;
+ switch (error) {
+ case UCSI_ERROR_INCOMPATIBLE_PARTNER:
+ return -EOPNOTSUPP;
+ case UCSI_ERROR_CC_COMMUNICATION_ERR:
+ return -ECOMM;
+ case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
+ return -EPROTO;
+ case UCSI_ERROR_DEAD_BATTERY:
+ dev_warn(ucsi->dev, "Dead battery condition!\n");
+ return -EPERM;
+ case UCSI_ERROR_INVALID_CON_NUM:
+ case UCSI_ERROR_UNREGONIZED_CMD:
+ case UCSI_ERROR_INVALID_CMD_ARGUMENT:
+ dev_err(ucsi->dev, "possible UCSI driver bug %u\n", error);
+ return -EINVAL;
+ case UCSI_ERROR_OVERCURRENT:
+ dev_warn(ucsi->dev, "Overcurrent condition\n");
+ break;
+ case UCSI_ERROR_PARTNER_REJECTED_SWAP:
+ dev_warn(ucsi->dev, "Partner rejected swap\n");
+ break;
+ case UCSI_ERROR_HARD_RESET:
+ dev_warn(ucsi->dev, "Hard reset occurred\n");
+ break;
+ case UCSI_ERROR_PPM_POLICY_CONFLICT:
+ dev_warn(ucsi->dev, "PPM Policy conflict\n");
+ break;
+ case UCSI_ERROR_SWAP_REJECTED:
+ dev_warn(ucsi->dev, "Swap rejected\n");
+ break;
+ case UCSI_ERROR_UNDEFINED:
+ default:
+ dev_err(ucsi->dev, "unknown error %u\n", error);
+ break;
}
-err_clear_flag:
- clear_bit(COMMAND_PENDING, &ucsi->flags);
-
- return ret;
+ return -EIO;
}
-static int ucsi_ack(struct ucsi *ucsi, u8 ack)
+static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
{
- struct ucsi_control ctrl;
+ u32 cci;
int ret;
- trace_ucsi_ack(ack);
-
- set_bit(ACK_PENDING, &ucsi->flags);
+ ret = ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- UCSI_CMD_ACK(ctrl, ack);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
- goto out_clear_bit;
+ return ret;
- /* Waiting for ACK with ACK CMD, but not with EVENT for now */
- if (ack == UCSI_ACK_EVENT)
- goto out_clear_bit;
+ if (cci & UCSI_CCI_BUSY)
+ return -EBUSY;
- if (!wait_for_completion_timeout(&ucsi->complete,
- msecs_to_jiffies(UCSI_TIMEOUT_MS)))
- ret = -ETIMEDOUT;
+ if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
+ return -EIO;
-out_clear_bit:
- clear_bit(ACK_PENDING, &ucsi->flags);
+ if (cci & UCSI_CCI_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
- if (ret)
- dev_err(ucsi->dev, "%s: failed\n", __func__);
+ if (cci & UCSI_CCI_ERROR) {
+ if (cmd == UCSI_GET_ERROR_STATUS)
+ return -EIO;
+ return ucsi_read_error(ucsi);
+ }
- return ret;
+ return UCSI_CCI_LENGTH(cci);
}
-static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+static int ucsi_run_command(struct ucsi *ucsi, u64 command,
void *data, size_t size)
{
- struct ucsi_control _ctrl;
- u8 data_length;
- u16 error;
+ u8 length;
int ret;
- ret = ucsi_command(ucsi, ctrl);
- if (ret)
- goto err;
-
- switch (ucsi->status) {
- case UCSI_IDLE:
- ret = ucsi_sync(ucsi);
- if (ret)
- dev_warn(ucsi->dev, "%s: sync failed\n", __func__);
-
- if (data)
- memcpy(data, ucsi->ppm->data->message_in, size);
-
- data_length = ucsi->ppm->data->cci.data_length;
-
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
- if (!ret)
- ret = data_length;
- break;
- case UCSI_BUSY:
- /* The caller decides whether to cancel or not */
- ret = -EBUSY;
- break;
- case UCSI_ERROR:
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
- if (ret)
- break;
-
- _ctrl.raw_cmd = 0;
- _ctrl.cmd.cmd = UCSI_GET_ERROR_STATUS;
- ret = ucsi_command(ucsi, &_ctrl);
- if (ret) {
- dev_err(ucsi->dev, "reading error failed!\n");
- break;
- }
+ ret = ucsi_exec_command(ucsi, command);
+ if (ret < 0)
+ return ret;
- memcpy(&error, ucsi->ppm->data->message_in, sizeof(error));
+ length = ret;
- /* Something has really gone wrong */
- if (WARN_ON(ucsi->status == UCSI_ERROR)) {
- ret = -ENODEV;
- break;
- }
-
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+ if (data) {
+ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
if (ret)
- break;
-
- switch (error) {
- case UCSI_ERROR_INCOMPATIBLE_PARTNER:
- ret = -EOPNOTSUPP;
- break;
- case UCSI_ERROR_CC_COMMUNICATION_ERR:
- ret = -ECOMM;
- break;
- case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
- ret = -EPROTO;
- break;
- case UCSI_ERROR_DEAD_BATTERY:
- dev_warn(ucsi->dev, "Dead battery condition!\n");
- ret = -EPERM;
- break;
- /* The following mean a bug in this driver */
- case UCSI_ERROR_INVALID_CON_NUM:
- case UCSI_ERROR_UNREGONIZED_CMD:
- case UCSI_ERROR_INVALID_CMD_ARGUMENT:
- dev_warn(ucsi->dev,
- "%s: possible UCSI driver bug - error 0x%x\n",
- __func__, error);
- ret = -EINVAL;
- break;
- default:
- dev_warn(ucsi->dev,
- "%s: error without status\n", __func__);
- ret = -EIO;
- break;
- }
- break;
+ return ret;
}
-err:
- trace_ucsi_run_command(ctrl, ret);
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
- return ret;
+ return length;
}
-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
void *retval, size_t size)
{
int ret;
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi_run_command(ucsi, ctrl, retval, size);
+ ret = ucsi_run_command(ucsi, command, retval, size);
mutex_unlock(&ucsi->ppm_lock);
return ret;
@@ -210,11 +186,12 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
int ucsi_resume(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 command;
/* Restore UCSI notification enable mask after system resume */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL);
- return ucsi_send_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+
+ return ucsi_send_command(ucsi, command, NULL, 0);
}
EXPORT_SYMBOL_GPL(ucsi_resume);
/* -------------------------------------------------------------------------- */
@@ -222,15 +199,15 @@ EXPORT_SYMBOL_GPL(ucsi_resume);
void ucsi_altmode_update_active(struct ucsi_connector *con)
{
const struct typec_altmode *altmode = NULL;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
u8 cur;
int i;
- UCSI_CMD_GET_CURRENT_CAM(ctrl, con->num);
- ret = ucsi_run_command(con->ucsi, &ctrl, &cur, sizeof(cur));
+ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(con->ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
- if (con->ucsi->ppm->data->version > 0x0100) {
+ if (con->ucsi->version > 0x0100) {
dev_err(con->ucsi->dev,
"GET_CURRENT_CAM command failed\n");
return;
@@ -346,7 +323,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
int max_altmodes = UCSI_MAX_ALTMODES;
struct typec_altmode_desc desc;
struct ucsi_altmode alt[2];
- struct ucsi_control ctrl;
+ u64 command;
int num = 1;
int ret;
int len;
@@ -364,8 +341,11 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
for (i = 0; i < max_altmodes;) {
memset(alt, 0, sizeof(alt));
- UCSI_CMD_GET_ALTERNATE_MODES(ctrl, recipient, con->num, i, 1);
- len = ucsi_run_command(con->ucsi, &ctrl, alt, sizeof(alt));
+ command = UCSI_GET_ALTERNATE_MODES;
+ command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
+ command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_ALTMODE_OFFSET(i);
+ len = ucsi_run_command(con->ucsi, command, alt, sizeof(alt));
if (len <= 0)
return len;
@@ -427,7 +407,7 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
- switch (con->status.pwr_op_mode) {
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
break;
@@ -445,6 +425,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
static int ucsi_register_partner(struct ucsi_connector *con)
{
+ u8 pwr_opmode = UCSI_CONSTAT_PWR_OPMODE(con->status.flags);
struct typec_partner_desc desc;
struct typec_partner *partner;
@@ -453,7 +434,7 @@ static int ucsi_register_partner(struct ucsi_connector *con)
memset(&desc, 0, sizeof(desc));
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_DEBUG:
desc.accessory = TYPEC_ACCESSORY_DEBUG;
break;
@@ -464,7 +445,7 @@ static int ucsi_register_partner(struct ucsi_connector *con)
break;
}
- desc.usb_pd = con->status.pwr_op_mode == UCSI_CONSTAT_PWR_OPMODE_PD;
+ desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
partner = typec_register_partner(con->port, &desc);
if (IS_ERR(partner)) {
@@ -496,7 +477,7 @@ static void ucsi_partner_change(struct ucsi_connector *con)
if (!con->partner)
return;
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -521,29 +502,33 @@ static void ucsi_partner_change(struct ucsi_connector *con)
ucsi_altmode_update_active(con);
}
-static void ucsi_connector_change(struct work_struct *work)
+static void ucsi_handle_connector_change(struct work_struct *work)
{
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
work);
struct ucsi *ucsi = con->ucsi;
- struct ucsi_control ctrl;
+ enum typec_role role;
+ u64 command;
int ret;
mutex_lock(&con->lock);
- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
- ret = ucsi_send_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(ucsi, command, &con->status,
+ sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
__func__, ret);
goto out_unlock;
}
+ role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE)
ucsi_pwr_opmode_change(con);
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
- typec_set_pwr_role(con->port, con->status.pwr_dir);
+ typec_set_pwr_role(con->port, role);
/* Complete pending power role swap */
if (!completion_done(&con->complete))
@@ -551,9 +536,9 @@ static void ucsi_connector_change(struct work_struct *work)
}
if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
- typec_set_pwr_role(con->port, con->status.pwr_dir);
+ typec_set_pwr_role(con->port, role);
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -564,7 +549,7 @@ static void ucsi_connector_change(struct work_struct *work)
break;
}
- if (con->status.connected)
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED)
ucsi_register_partner(con);
else
ucsi_unregister_partner(con);
@@ -576,14 +561,15 @@ static void ucsi_connector_change(struct work_struct *work)
* Running GET_CAM_SUPPORTED command just to make sure the PPM
* does not get stuck in case it assumes we do so.
*/
- UCSI_CMD_GET_CAM_SUPPORTED(ctrl, con->num);
- ucsi_run_command(con->ucsi, &ctrl, NULL, 0);
+ command = UCSI_GET_CAM_SUPPORTED;
+ command |= UCSI_CONNECTOR_NUMBER(con->num);
+ ucsi_run_command(con->ucsi, command, NULL, 0);
}
if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
ucsi_partner_change(con);
- ret = ucsi_ack(ucsi, UCSI_ACK_EVENT);
+ ret = ucsi_acknowledge_connector_change(ucsi);
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
@@ -595,117 +581,83 @@ out_unlock:
}
/**
- * ucsi_notify - PPM notification handler
- * @ucsi: Source UCSI Interface for the notifications
- *
- * Handle notifications from PPM of @ucsi.
+ * ucsi_connector_change - Process Connector Change Event
+ * @ucsi: UCSI Interface
+ * @num: Connector number
*/
-void ucsi_notify(struct ucsi *ucsi)
+void ucsi_connector_change(struct ucsi *ucsi, u8 num)
{
- struct ucsi_cci *cci;
-
- /* There is no requirement to sync here, but no harm either. */
- ucsi_sync(ucsi);
-
- cci = &ucsi->ppm->data->cci;
-
- if (cci->error)
- ucsi->status = UCSI_ERROR;
- else if (cci->busy)
- ucsi->status = UCSI_BUSY;
- else
- ucsi->status = UCSI_IDLE;
+ struct ucsi_connector *con = &ucsi->connector[num - 1];
- if (cci->cmd_complete && test_bit(COMMAND_PENDING, &ucsi->flags)) {
- complete(&ucsi->complete);
- } else if (cci->ack_complete && test_bit(ACK_PENDING, &ucsi->flags)) {
- complete(&ucsi->complete);
- } else if (cci->connector_change) {
- struct ucsi_connector *con;
-
- con = &ucsi->connector[cci->connector_change - 1];
-
- if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
- schedule_work(&con->work);
- }
-
- trace_ucsi_notify(ucsi->ppm->data->raw_cci);
+ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
+ schedule_work(&con->work);
}
-EXPORT_SYMBOL_GPL(ucsi_notify);
+EXPORT_SYMBOL_GPL(ucsi_connector_change);
/* -------------------------------------------------------------------------- */
static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
{
- struct ucsi_control ctrl;
+ u64 command;
- UCSI_CMD_CONNECTOR_RESET(ctrl, con, hard);
+ command = UCSI_CONNECTOR_RESET | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= hard ? UCSI_CONNECTOR_RESET_HARD : 0;
- return ucsi_send_command(con->ucsi, &ctrl, NULL, 0);
+ return ucsi_send_command(con->ucsi, command, NULL, 0);
}
static int ucsi_reset_ppm(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 command = UCSI_PPM_RESET;
unsigned long tmo;
+ u32 cci;
int ret;
- ctrl.raw_cmd = 0;
- ctrl.cmd.cmd = UCSI_PPM_RESET;
- trace_ucsi_command(&ctrl);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
- if (ret)
- goto err;
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
+ sizeof(command));
+ if (ret < 0)
+ return ret;
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- /* Here sync is critical. */
- ret = ucsi_sync(ucsi);
- if (ret)
- goto err;
+ if (time_is_before_jiffies(tmo))
+ return -ETIMEDOUT;
- if (ucsi->ppm->data->cci.reset_complete)
- break;
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret)
+ return ret;
/* If the PPM is still doing something else, reset it again. */
- if (ucsi->ppm->data->raw_cci) {
- dev_warn_ratelimited(ucsi->dev,
- "Failed to reset PPM! Trying again..\n");
-
- trace_ucsi_command(&ctrl);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
- if (ret)
- goto err;
+ if (cci & ~UCSI_CCI_RESET_COMPLETE) {
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL,
+ &command,
+ sizeof(command));
+ if (ret < 0)
+ return ret;
}
- /* Letting the PPM settle down. */
msleep(20);
+ } while (!(cci & UCSI_CCI_RESET_COMPLETE));
- ret = -ETIMEDOUT;
- } while (time_is_after_jiffies(tmo));
-
-err:
- trace_ucsi_reset_ppm(&ctrl, ret);
-
- return ret;
+ return 0;
}
-static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
+static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
{
int ret;
- ret = ucsi_send_command(con->ucsi, ctrl, NULL, 0);
+ ret = ucsi_send_command(con->ucsi, command, NULL, 0);
if (ret == -ETIMEDOUT) {
- struct ucsi_control c;
+ u64 c;
/* PPM most likely stopped responding. Resetting everything. */
mutex_lock(&con->ucsi->ppm_lock);
ucsi_reset_ppm(con->ucsi);
mutex_unlock(&con->ucsi->ppm_lock);
- UCSI_CMD_SET_NTFY_ENABLE(c, UCSI_ENABLE_NTFY_ALL);
- ucsi_send_command(con->ucsi, &c, NULL, 0);
+ c = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ucsi_send_command(con->ucsi, c, NULL, 0);
ucsi_reset_connector(con, true);
}
@@ -713,11 +665,11 @@ static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
return ret;
}
-static int
-ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
+static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
{
- struct ucsi_connector *con = to_ucsi_connector(cap);
- struct ucsi_control ctrl;
+ struct ucsi_connector *con = typec_get_drvdata(port);
+ u8 partner_type;
+ u64 command;
int ret = 0;
mutex_lock(&con->lock);
@@ -727,14 +679,17 @@ ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
goto out_unlock;
}
- if ((con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
+ partner_type = UCSI_CONSTAT_PARTNER_TYPE(con->status.flags);
+ if ((partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
role == TYPEC_DEVICE) ||
- (con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
+ (partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
role == TYPEC_HOST))
goto out_unlock;
- UCSI_CMD_SET_UOR(ctrl, con, role);
- ret = ucsi_role_cmd(con, &ctrl);
+ command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_SET_UOR_ROLE(role);
+ command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS;
+ ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
@@ -748,11 +703,11 @@ out_unlock:
return ret < 0 ? ret : 0;
}
-static int
-ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
+static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
{
- struct ucsi_connector *con = to_ucsi_connector(cap);
- struct ucsi_control ctrl;
+ struct ucsi_connector *con = typec_get_drvdata(port);
+ enum typec_role cur_role;
+ u64 command;
int ret = 0;
mutex_lock(&con->lock);
@@ -762,11 +717,15 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
goto out_unlock;
}
- if (con->status.pwr_dir == role)
+ cur_role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
+ if (cur_role == role)
goto out_unlock;
- UCSI_CMD_SET_PDR(ctrl, con, role);
- ret = ucsi_role_cmd(con, &ctrl);
+ command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_SET_PDR_ROLE(role);
+ command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS;
+ ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
@@ -777,7 +736,8 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
}
/* Something has gone wrong while swapping the role */
- if (con->status.pwr_op_mode != UCSI_CONSTAT_PWR_OPMODE_PD) {
+ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
+ UCSI_CONSTAT_PWR_OPMODE_PD) {
ucsi_reset_connector(con, true);
ret = -EPROTO;
}
@@ -788,6 +748,11 @@ out_unlock:
return ret;
}
+static const struct typec_operations ucsi_ops = {
+ .dr_set = ucsi_dr_swap,
+ .pr_set = ucsi_pr_swap
+};
+
static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
{
struct fwnode_handle *fwnode;
@@ -804,18 +769,19 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
- INIT_WORK(&con->work, ucsi_connector_change);
+ INIT_WORK(&con->work, ucsi_handle_connector_change);
init_completion(&con->complete);
mutex_init(&con->lock);
con->num = index + 1;
con->ucsi = ucsi;
/* Get connector capability */
- UCSI_CMD_GET_CONNECTOR_CAPABILITY(ctrl, con->num);
- ret = ucsi_run_command(ucsi, &ctrl, &con->cap, sizeof(con->cap));
+ command = UCSI_GET_CONNECTOR_CAPABILITY;
+ command |= UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
return ret;
@@ -826,11 +792,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
else if (con->cap.op_mode & UCSI_CONCAP_OPMODE_UFP)
cap->data = TYPEC_PORT_UFP;
- if (con->cap.provider && con->cap.consumer)
+ if ((con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER) &&
+ (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER))
cap->type = TYPEC_PORT_DRP;
- else if (con->cap.provider)
+ else if (con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER)
cap->type = TYPEC_PORT_SRC;
- else if (con->cap.consumer)
+ else if (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER)
cap->type = TYPEC_PORT_SNK;
cap->revision = ucsi->cap.typec_version;
@@ -843,8 +810,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
*accessory = TYPEC_ACCESSORY_DEBUG;
cap->fwnode = ucsi_find_fwnode(con);
- cap->dr_set = ucsi_dr_swap;
- cap->pr_set = ucsi_pr_swap;
+ cap->driver_data = con;
+ cap->ops = &ucsi_ops;
/* Register the connector */
con->port = typec_register_port(ucsi->dev, cap);
@@ -858,17 +825,15 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
con->num);
/* Get the status */
- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
- ret = ucsi_run_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(ucsi, command, &con->status,
+ sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "con%d: failed to get status\n", con->num);
return 0;
}
- ucsi_pwr_opmode_change(con);
- typec_set_pwr_role(con->port, con->status.pwr_dir);
-
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -880,8 +845,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
}
/* Check if there is already something connected */
- if (con->status.connected)
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
+ typec_set_pwr_role(con->port,
+ !!(con->status.flags & UCSI_CONSTAT_PWR_DIR));
+ ucsi_pwr_opmode_change(con);
ucsi_register_partner(con);
+ }
if (con->partner) {
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
@@ -898,11 +867,16 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
return 0;
}
-static void ucsi_init(struct work_struct *work)
+/**
+ * ucsi_init - Initialize UCSI interface
+ * @ucsi: UCSI to be initialized
+ *
+ * Registers all ports @ucsi has and enables all notification events.
+ */
+int ucsi_init(struct ucsi *ucsi)
{
- struct ucsi *ucsi = container_of(work, struct ucsi, work);
struct ucsi_connector *con;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
int i;
@@ -916,15 +890,15 @@ static void ucsi_init(struct work_struct *work)
}
/* Enable basic notifications */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE |
- UCSI_ENABLE_NTFY_ERROR);
- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE;
+ command |= UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
/* Get PPM capabilities */
- UCSI_CMD_GET_CAPABILITY(ctrl);
- ret = ucsi_run_command(ucsi, &ctrl, &ucsi->cap, sizeof(ucsi->cap));
+ command = UCSI_GET_CAPABILITY;
+ ret = ucsi_run_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
if (ret < 0)
goto err_reset;
@@ -949,14 +923,14 @@ static void ucsi_init(struct work_struct *work)
}
/* Enable all notifications */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL);
- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
mutex_unlock(&ucsi->ppm_lock);
- return;
+ return 0;
err_unregister:
for (con = ucsi->connector; con->port; con++) {
@@ -970,59 +944,115 @@ err_reset:
ucsi_reset_ppm(ucsi);
err:
mutex_unlock(&ucsi->ppm_lock);
- dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ucsi_init);
+
+static void ucsi_init_work(struct work_struct *work)
+{
+ struct ucsi *ucsi = container_of(work, struct ucsi, work);
+ int ret;
+
+ ret = ucsi_init(ucsi);
+ if (ret)
+ dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
}
/**
- * ucsi_register_ppm - Register UCSI PPM Interface
- * @dev: Device interface to the PPM
- * @ppm: The PPM interface
- *
- * Allocates UCSI instance, associates it with @ppm and returns it to the
- * caller, and schedules initialization of the interface.
+ * ucsi_get_drvdata - Return private driver data pointer
+ * @ucsi: UCSI interface
+ */
+void *ucsi_get_drvdata(struct ucsi *ucsi)
+{
+ return ucsi->driver_data;
+}
+EXPORT_SYMBOL_GPL(ucsi_get_drvdata);
+
+/**
+ * ucsi_get_drvdata - Assign private driver data pointer
+ * @ucsi: UCSI interface
+ * @data: Private data pointer
*/
-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm)
+void ucsi_set_drvdata(struct ucsi *ucsi, void *data)
+{
+ ucsi->driver_data = data;
+}
+EXPORT_SYMBOL_GPL(ucsi_set_drvdata);
+
+/**
+ * ucsi_create - Allocate UCSI instance
+ * @dev: Device interface to the PPM (Platform Policy Manager)
+ * @ops: I/O routines
+ */
+struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
{
struct ucsi *ucsi;
+ if (!ops || !ops->read || !ops->sync_write || !ops->async_write)
+ return ERR_PTR(-EINVAL);
+
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
if (!ucsi)
return ERR_PTR(-ENOMEM);
- INIT_WORK(&ucsi->work, ucsi_init);
- init_completion(&ucsi->complete);
+ INIT_WORK(&ucsi->work, ucsi_init_work);
mutex_init(&ucsi->ppm_lock);
-
ucsi->dev = dev;
- ucsi->ppm = ppm;
+ ucsi->ops = ops;
+
+ return ucsi;
+}
+EXPORT_SYMBOL_GPL(ucsi_create);
+
+/**
+ * ucsi_destroy - Free UCSI instance
+ * @ucsi: UCSI instance to be freed
+ */
+void ucsi_destroy(struct ucsi *ucsi)
+{
+ kfree(ucsi);
+}
+EXPORT_SYMBOL_GPL(ucsi_destroy);
+
+/**
+ * ucsi_register - Register UCSI interface
+ * @ucsi: UCSI instance
+ */
+int ucsi_register(struct ucsi *ucsi)
+{
+ int ret;
+
+ ret = ucsi->ops->read(ucsi, UCSI_VERSION, &ucsi->version,
+ sizeof(ucsi->version));
+ if (ret)
+ return ret;
+
+ if (!ucsi->version)
+ return -ENODEV;
- /*
- * Communication with the PPM takes a lot of time. It is not reasonable
- * to initialize the driver here. Using a work for now.
- */
queue_work(system_long_wq, &ucsi->work);
- return ucsi;
+ return 0;
}
-EXPORT_SYMBOL_GPL(ucsi_register_ppm);
+EXPORT_SYMBOL_GPL(ucsi_register);
/**
- * ucsi_unregister_ppm - Unregister UCSI PPM Interface
- * @ucsi: struct ucsi associated with the PPM
+ * ucsi_unregister - Unregister UCSI interface
+ * @ucsi: UCSI interface to be unregistered
*
- * Unregister UCSI PPM that was created with ucsi_register().
+ * Unregister UCSI interface that was created with ucsi_register().
*/
-void ucsi_unregister_ppm(struct ucsi *ucsi)
+void ucsi_unregister(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
int i;
/* Make sure that we are not in the middle of driver initialization */
cancel_work_sync(&ucsi->work);
- /* Disable everything except command complete notification */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE)
- ucsi_send_command(ucsi, &ctrl, NULL, 0);
+ /* Disable notifications */
+ ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
@@ -1032,12 +1062,9 @@ void ucsi_unregister_ppm(struct ucsi *ucsi)
typec_unregister_port(ucsi->connector[i].port);
}
- ucsi_reset_ppm(ucsi);
-
kfree(ucsi->connector);
- kfree(ucsi);
}
-EXPORT_SYMBOL_GPL(ucsi_unregister_ppm);
+EXPORT_SYMBOL_GPL(ucsi_unregister);
MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index de87d0b8319d..8569bbd3762f 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -10,177 +10,55 @@
/* -------------------------------------------------------------------------- */
-/* Command Status and Connector Change Indication (CCI) data structure */
-struct ucsi_cci {
- u8:1; /* reserved */
- u8 connector_change:7;
- u8 data_length;
- u16:9; /* reserved */
- u16 not_supported:1;
- u16 cancel_complete:1;
- u16 reset_complete:1;
- u16 busy:1;
- u16 ack_complete:1;
- u16 error:1;
- u16 cmd_complete:1;
-} __packed;
-
-/* Default fields in CONTROL data structure */
-struct ucsi_command {
- u8 cmd;
- u8 length;
- u64 data:48;
-} __packed;
-
-/* ACK Command structure */
-struct ucsi_ack_cmd {
- u8 cmd;
- u8 length;
- u8 cci_ack:1;
- u8 cmd_ack:1;
- u8:6; /* reserved */
-} __packed;
-
-/* Connector Reset Command structure */
-struct ucsi_con_rst {
- u8 cmd;
- u8 length;
- u8 con_num:7;
- u8 hard_reset:1;
-} __packed;
-
-/* Set USB Operation Mode Command structure */
-struct ucsi_uor_cmd {
- u8 cmd;
- u8 length;
- u16 con_num:7;
- u16 role:3;
-#define UCSI_UOR_ROLE_DFP BIT(0)
-#define UCSI_UOR_ROLE_UFP BIT(1)
-#define UCSI_UOR_ROLE_DRP BIT(2)
- u16:6; /* reserved */
-} __packed;
-
-/* Get Alternate Modes Command structure */
-struct ucsi_altmode_cmd {
- u8 cmd;
- u8 length;
- u8 recipient;
-#define UCSI_RECIPIENT_CON 0
-#define UCSI_RECIPIENT_SOP 1
-#define UCSI_RECIPIENT_SOP_P 2
-#define UCSI_RECIPIENT_SOP_PP 3
- u8 con_num;
- u8 offset;
- u8 num_altmodes;
-} __packed;
+struct ucsi;
-struct ucsi_control {
- union {
- u64 raw_cmd;
- struct ucsi_command cmd;
- struct ucsi_uor_cmd uor;
- struct ucsi_ack_cmd ack;
- struct ucsi_con_rst con_rst;
- struct ucsi_altmode_cmd alt;
- };
+/* UCSI offsets (Bytes) */
+#define UCSI_VERSION 0
+#define UCSI_CCI 4
+#define UCSI_CONTROL 8
+#define UCSI_MESSAGE_IN 16
+#define UCSI_MESSAGE_OUT 32
+
+/* Command Status and Connector Change Indication (CCI) bits */
+#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 0)) >> 1)
+#define UCSI_CCI_LENGTH(_c_) (((_c_) & GENMASK(15, 8)) >> 8)
+#define UCSI_CCI_NOT_SUPPORTED BIT(25)
+#define UCSI_CCI_CANCEL_COMPLETE BIT(26)
+#define UCSI_CCI_RESET_COMPLETE BIT(27)
+#define UCSI_CCI_BUSY BIT(28)
+#define UCSI_CCI_ACK_COMPLETE BIT(29)
+#define UCSI_CCI_ERROR BIT(30)
+#define UCSI_CCI_COMMAND_COMPLETE BIT(31)
+
+/**
+ * struct ucsi_operations - UCSI I/O operations
+ * @read: Read operation
+ * @sync_write: Blocking write operation
+ * @async_write: Non-blocking write operation
+ *
+ * Read and write routines for UCSI interface. @sync_write must wait for the
+ * Command Completion Event from the PPM before returning, and @async_write must
+ * return immediately after sending the data to the PPM.
+ */
+struct ucsi_operations {
+ int (*read)(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len);
+ int (*sync_write)(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len);
+ int (*async_write)(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len);
};
-#define __UCSI_CMD(_ctrl_, _cmd_) \
-{ \
- (_ctrl_).raw_cmd = 0; \
- (_ctrl_).cmd.cmd = _cmd_; \
-}
-
-/* Helper for preparing ucsi_control for CONNECTOR_RESET command. */
-#define UCSI_CMD_CONNECTOR_RESET(_ctrl_, _con_, _hard_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_CONNECTOR_RESET) \
- (_ctrl_).con_rst.con_num = (_con_)->num; \
- (_ctrl_).con_rst.hard_reset = _hard_; \
-}
-
-/* Helper for preparing ucsi_control for ACK_CC_CI command. */
-#define UCSI_CMD_ACK(_ctrl_, _ack_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_ACK_CC_CI) \
- (_ctrl_).ack.cci_ack = ((_ack_) == UCSI_ACK_EVENT); \
- (_ctrl_).ack.cmd_ack = ((_ack_) == UCSI_ACK_CMD); \
-}
-
-/* Helper for preparing ucsi_control for SET_NOTIFY_ENABLE command. */
-#define UCSI_CMD_SET_NTFY_ENABLE(_ctrl_, _ntfys_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_SET_NOTIFICATION_ENABLE) \
- (_ctrl_).cmd.data = _ntfys_; \
-}
-
-/* Helper for preparing ucsi_control for GET_CAPABILITY command. */
-#define UCSI_CMD_GET_CAPABILITY(_ctrl_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CAPABILITY) \
-}
-
-/* Helper for preparing ucsi_control for GET_CONNECTOR_CAPABILITY command. */
-#define UCSI_CMD_GET_CONNECTOR_CAPABILITY(_ctrl_, _con_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_CAPABILITY) \
- (_ctrl_).cmd.data = _con_; \
-}
+struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops);
+void ucsi_destroy(struct ucsi *ucsi);
+int ucsi_register(struct ucsi *ucsi);
+void ucsi_unregister(struct ucsi *ucsi);
+void *ucsi_get_drvdata(struct ucsi *ucsi);
+void ucsi_set_drvdata(struct ucsi *ucsi, void *data);
-/* Helper for preparing ucsi_control for GET_ALTERNATE_MODES command. */
-#define UCSI_CMD_GET_ALTERNATE_MODES(_ctrl_, _r_, _con_num_, _o_, _num_)\
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_ALTERNATE_MODES) \
- _ctrl_.alt.recipient = (_r_); \
- _ctrl_.alt.con_num = (_con_num_); \
- _ctrl_.alt.offset = (_o_); \
- _ctrl_.alt.num_altmodes = (_num_) - 1; \
-}
+void ucsi_connector_change(struct ucsi *ucsi, u8 num);
-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
-#define UCSI_CMD_GET_CAM_SUPPORTED(_ctrl_, _con_) \
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_CAM_SUPPORTED) \
- _ctrl_.cmd.data = (_con_); \
-}
-
-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
-#define UCSI_CMD_GET_CURRENT_CAM(_ctrl_, _con_) \
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_CURRENT_CAM) \
- _ctrl_.cmd.data = (_con_); \
-}
-
-/* Helper for preparing ucsi_control for GET_CONNECTOR_STATUS command. */
-#define UCSI_CMD_GET_CONNECTOR_STATUS(_ctrl_, _con_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_STATUS) \
- (_ctrl_).cmd.data = _con_; \
-}
-
-#define __UCSI_ROLE(_ctrl_, _cmd_, _con_num_) \
-{ \
- __UCSI_CMD(_ctrl_, _cmd_) \
- (_ctrl_).uor.con_num = _con_num_; \
- (_ctrl_).uor.role = UCSI_UOR_ROLE_DRP; \
-}
-
-/* Helper for preparing ucsi_control for SET_UOR command. */
-#define UCSI_CMD_SET_UOR(_ctrl_, _con_, _role_) \
-{ \
- __UCSI_ROLE(_ctrl_, UCSI_SET_UOR, (_con_)->num) \
- (_ctrl_).uor.role |= (_role_) == TYPEC_HOST ? UCSI_UOR_ROLE_DFP : \
- UCSI_UOR_ROLE_UFP; \
-}
-
-/* Helper for preparing ucsi_control for SET_PDR command. */
-#define UCSI_CMD_SET_PDR(_ctrl_, _con_, _role_) \
-{ \
- __UCSI_ROLE(_ctrl_, UCSI_SET_PDR, (_con_)->num) \
- (_ctrl_).uor.role |= (_role_) == TYPEC_SOURCE ? UCSI_UOR_ROLE_DFP : \
- UCSI_UOR_ROLE_UFP; \
-}
+/* -------------------------------------------------------------------------- */
/* Commands */
#define UCSI_PPM_RESET 0x01
@@ -203,24 +81,49 @@ struct ucsi_control {
#define UCSI_GET_CONNECTOR_STATUS 0x12
#define UCSI_GET_ERROR_STATUS 0x13
-/* ACK_CC_CI commands */
-#define UCSI_ACK_EVENT 1
-#define UCSI_ACK_CMD 2
-
-/* Bits for SET_NOTIFICATION_ENABLE command */
-#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(0)
-#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(1)
-#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(2)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(5)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(6)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(7)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(8)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(9)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(11)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(12)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(14)
-#define UCSI_ENABLE_NTFY_ERROR BIT(15)
-#define UCSI_ENABLE_NTFY_ALL 0xdbe7
+#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
+
+/* CONNECTOR_RESET command bits */
+#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */
+
+/* ACK_CC_CI bits */
+#define UCSI_ACK_CONNECTOR_CHANGE BIT(16)
+#define UCSI_ACK_COMMAND_COMPLETE BIT(17)
+
+/* SET_NOTIFICATION_ENABLE command bits */
+#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16)
+#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17)
+#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26)
+#define UCSI_ENABLE_NTFY_ERROR BIT(27)
+#define UCSI_ENABLE_NTFY_ALL 0xdbe70000
+
+/* SET_UOR command bits */
+#define UCSI_SET_UOR_ROLE(_r_) (((_r_) == TYPEC_HOST ? 1 : 2) << 23)
+#define UCSI_SET_UOR_ACCEPT_ROLE_SWAPS BIT(25)
+
+/* SET_PDF command bits */
+#define UCSI_SET_PDR_ROLE(_r_) (((_r_) == TYPEC_SOURCE ? 1 : 2) << 23)
+#define UCSI_SET_PDR_ACCEPT_ROLE_SWAPS BIT(25)
+
+/* GET_ALTERNATE_MODES command bits */
+#define UCSI_GET_ALTMODE_RECIPIENT(_r_) ((u64)(_r_) << 16)
+#define UCSI_RECIPIENT_CON 0
+#define UCSI_RECIPIENT_SOP 1
+#define UCSI_RECIPIENT_SOP_P 2
+#define UCSI_RECIPIENT_SOP_PP 3
+#define UCSI_GET_ALTMODE_CONNECTOR_NUMBER(_r_) ((u64)(_r_) << 24)
+#define UCSI_GET_ALTMODE_OFFSET(_r_) ((u64)(_r_) << 32)
+#define UCSI_GET_ALTMODE_NUM_ALTMODES(_r_) ((u64)(_r_) << 40)
+
+/* -------------------------------------------------------------------------- */
/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
#define UCSI_ERROR_UNREGONIZED_CMD BIT(0)
@@ -230,6 +133,12 @@ struct ucsi_control {
#define UCSI_ERROR_CC_COMMUNICATION_ERR BIT(4)
#define UCSI_ERROR_DEAD_BATTERY BIT(5)
#define UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL BIT(6)
+#define UCSI_ERROR_OVERCURRENT BIT(7)
+#define UCSI_ERROR_UNDEFINED BIT(8)
+#define UCSI_ERROR_PARTNER_REJECTED_SWAP BIT(9)
+#define UCSI_ERROR_HARD_RESET BIT(10)
+#define UCSI_ERROR_PPM_POLICY_CONFLICT BIT(11)
+#define UCSI_ERROR_SWAP_REJECTED BIT(12)
/* Data structure filled by PPM in response to GET_CAPABILITY command. */
struct ucsi_capability {
@@ -241,8 +150,8 @@ struct ucsi_capability {
#define UCSI_CAP_ATTR_POWER_AC_SUPPLY BIT(8)
#define UCSI_CAP_ATTR_POWER_OTHER BIT(10)
#define UCSI_CAP_ATTR_POWER_VBUS BIT(14)
- u32 num_connectors:8;
- u32 features:24;
+ u8 num_connectors;
+ u8 features;
#define UCSI_CAP_SET_UOM BIT(0)
#define UCSI_CAP_SET_PDM BIT(1)
#define UCSI_CAP_ALT_MODE_DETAILS BIT(2)
@@ -251,8 +160,9 @@ struct ucsi_capability {
#define UCSI_CAP_CABLE_DETAILS BIT(5)
#define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS BIT(6)
#define UCSI_CAP_PD_RESET BIT(7)
+ u16 reserved_1;
u8 num_alt_modes;
- u8 reserved;
+ u8 reserved_2;
u16 bc_version;
u16 pd_version;
u16 typec_version;
@@ -269,9 +179,9 @@ struct ucsi_connector_capability {
#define UCSI_CONCAP_OPMODE_USB2 BIT(5)
#define UCSI_CONCAP_OPMODE_USB3 BIT(6)
#define UCSI_CONCAP_OPMODE_ALT_MODE BIT(7)
- u8 provider:1;
- u8 consumer:1;
- u8:6; /* reserved */
+ u8 flags;
+#define UCSI_CONCAP_FLAG_PROVIDER BIT(0)
+#define UCSI_CONCAP_FLAG_CONSUMER BIT(1)
} __packed;
struct ucsi_altmode {
@@ -283,18 +193,17 @@ struct ucsi_altmode {
struct ucsi_cable_property {
u16 speed_supported;
u8 current_capability;
- u8 vbus_in_cable:1;
- u8 active_cable:1;
- u8 directionality:1;
- u8 plug_type:2;
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
-#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
- u8 mode_support:1;
- u8:2; /* reserved */
- u8 latency:4;
- u8:4; /* reserved */
+ u8 flags;
+#define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
+#define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
+#define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
+#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
+#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
+#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
+ u8 latency;
} __packed;
/* Data structure filled by PPM in response to GET_CONNECTOR_STATUS command. */
@@ -311,83 +220,47 @@ struct ucsi_connector_status {
#define UCSI_CONSTAT_POWER_DIR_CHANGE BIT(12)
#define UCSI_CONSTAT_CONNECT_CHANGE BIT(14)
#define UCSI_CONSTAT_ERROR BIT(15)
- u16 pwr_op_mode:3;
-#define UCSI_CONSTAT_PWR_OPMODE_NONE 0
-#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1
-#define UCSI_CONSTAT_PWR_OPMODE_BC 2
-#define UCSI_CONSTAT_PWR_OPMODE_PD 3
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5
- u16 connected:1;
- u16 pwr_dir:1;
- u16 partner_flags:8;
-#define UCSI_CONSTAT_PARTNER_FLAG_USB BIT(0)
-#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE BIT(1)
- u16 partner_type:3;
-#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1
-#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
-#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
+ u16 flags;
+#define UCSI_CONSTAT_PWR_OPMODE(_f_) ((_f_) & GENMASK(2, 0))
+#define UCSI_CONSTAT_PWR_OPMODE_NONE 0
+#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1
+#define UCSI_CONSTAT_PWR_OPMODE_BC 2
+#define UCSI_CONSTAT_PWR_OPMODE_PD 3
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5
+#define UCSI_CONSTAT_CONNECTED BIT(3)
+#define UCSI_CONSTAT_PWR_DIR BIT(4)
+#define UCSI_CONSTAT_PARTNER_FLAGS(_f_) (((_f_) & GENMASK(12, 5)) >> 5)
+#define UCSI_CONSTAT_PARTNER_FLAG_USB 1
+#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE 2
+#define UCSI_CONSTAT_PARTNER_TYPE(_f_) (((_f_) & GENMASK(15, 13)) >> 13)
+#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1
+#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
+#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
u32 request_data_obj;
- u8 bc_status:2;
-#define UCSI_CONSTAT_BC_NOT_CHARGING 0
-#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
-#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
-#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
- u8 provider_cap_limit_reason:4;
-#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
-#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
- u8:2; /* reserved */
+ u8 pwr_status;
+#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(2, 0))
+#define UCSI_CONSTAT_BC_NOT_CHARGING 0
+#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
+#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
+#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
+#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_) & GENMASK(6, 3)) >> 3)
+#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
+#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
} __packed;
/* -------------------------------------------------------------------------- */
-struct ucsi;
-
-struct ucsi_data {
- u16 version;
- u16 reserved;
- union {
- u32 raw_cci;
- struct ucsi_cci cci;
- };
- struct ucsi_control ctrl;
- u32 message_in[4];
- u32 message_out[4];
-} __packed;
-
-/*
- * struct ucsi_ppm - Interface to UCSI Platform Policy Manager
- * @data: memory location to the UCSI data structures
- * @cmd: UCSI command execution routine
- * @sync: Refresh UCSI mailbox (the data structures)
- */
-struct ucsi_ppm {
- struct ucsi_data *data;
- int (*cmd)(struct ucsi_ppm *, struct ucsi_control *);
- int (*sync)(struct ucsi_ppm *);
-};
-
-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm);
-void ucsi_unregister_ppm(struct ucsi *ucsi);
-void ucsi_notify(struct ucsi *ucsi);
-
-/* -------------------------------------------------------------------------- */
-
-enum ucsi_status {
- UCSI_IDLE = 0,
- UCSI_BUSY,
- UCSI_ERROR,
-};
-
struct ucsi {
+ u16 version;
struct device *dev;
- struct ucsi_ppm *ppm;
+ struct driver_data *driver_data;
+
+ const struct ucsi_operations *ops;
- enum ucsi_status status;
- struct completion complete;
struct ucsi_capability cap;
struct ucsi_connector *connector;
@@ -426,7 +299,7 @@ struct ucsi_connector {
struct ucsi_connector_capability cap;
};
-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
void *retval, size_t size);
void ucsi_altmode_update_active(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index a18112a83fae..3f1786170098 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -19,7 +19,9 @@
struct ucsi_acpi {
struct device *dev;
struct ucsi *ucsi;
- struct ucsi_ppm ppm;
+ void __iomem *base;
+ struct completion complete;
+ unsigned long flags;
guid_t guid;
};
@@ -39,27 +41,73 @@ static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
return 0;
}
-static int ucsi_acpi_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+static int ucsi_acpi_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
{
- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (ret)
+ return ret;
+
+ memcpy(val, (const void __force *)(ua->base + offset), val_len);
+
+ return 0;
+}
+
+static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+ memcpy((void __force *)(ua->base + offset), val, val_len);
return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE);
}
-static int ucsi_acpi_sync(struct ucsi_ppm *ppm)
+static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ set_bit(COMMAND_PENDING, &ua->flags);
+
+ ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
+ if (ret)
+ goto out_clear_bit;
- return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (!wait_for_completion_timeout(&ua->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
+
+out_clear_bit:
+ clear_bit(COMMAND_PENDING, &ua->flags);
+
+ return ret;
}
+static const struct ucsi_operations ucsi_acpi_ops = {
+ .read = ucsi_acpi_read,
+ .sync_write = ucsi_acpi_sync_write,
+ .async_write = ucsi_acpi_async_write
+};
+
static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
{
struct ucsi_acpi *ua = data;
+ u32 cci;
+ int ret;
+
+ ret = ucsi_acpi_read(ua->ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret)
+ return;
- ucsi_notify(ua->ucsi);
+ if (test_bit(COMMAND_PENDING, &ua->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&ua->complete);
+ else if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
}
static int ucsi_acpi_probe(struct platform_device *pdev)
@@ -90,35 +138,39 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
* it can not be requested here, and we can not use
* devm_ioremap_resource().
*/
- ua->ppm.data = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ua->ppm.data)
+ ua->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!ua->base)
return -ENOMEM;
- if (!ua->ppm.data->version)
- return -ENODEV;
-
ret = guid_parse(UCSI_DSM_UUID, &ua->guid);
if (ret)
return ret;
- ua->ppm.cmd = ucsi_acpi_cmd;
- ua->ppm.sync = ucsi_acpi_sync;
+ init_completion(&ua->complete);
ua->dev = &pdev->dev;
+ ua->ucsi = ucsi_create(&pdev->dev, &ucsi_acpi_ops);
+ if (IS_ERR(ua->ucsi))
+ return PTR_ERR(ua->ucsi);
+
+ ucsi_set_drvdata(ua->ucsi, ua);
+
status = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify, ua);
if (ACPI_FAILURE(status)) {
dev_err(&pdev->dev, "failed to install notify handler\n");
+ ucsi_destroy(ua->ucsi);
return -ENODEV;
}
- ua->ucsi = ucsi_register_ppm(&pdev->dev, &ua->ppm);
- if (IS_ERR(ua->ucsi)) {
+ ret = ucsi_register(ua->ucsi);
+ if (ret) {
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
- return PTR_ERR(ua->ucsi);
+ ucsi_destroy(ua->ucsi);
+ return ret;
}
platform_set_drvdata(pdev, ua);
@@ -130,7 +182,8 @@ static int ucsi_acpi_remove(struct platform_device *pdev)
{
struct ucsi_acpi *ua = platform_get_drvdata(pdev);
- ucsi_unregister_ppm(ua->ucsi);
+ ucsi_unregister(ua->ucsi);
+ ucsi_destroy(ua->ucsi);
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index d772fce51905..3370b3fc37b1 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -176,8 +176,8 @@ struct ccg_resp {
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
- struct ucsi_ppm ppm;
struct i2c_client *client;
+
struct ccg_dev_info info;
/* version info for boot, primary and secondary */
struct version_info version[FW2 + 1];
@@ -196,6 +196,8 @@ struct ucsi_ccg {
/* fw build with vendor information */
u16 fw_build;
struct work_struct pm_work;
+
+ struct completion complete;
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -243,7 +245,7 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
return 0;
}
-static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
{
struct i2c_client *client = uc->client;
unsigned char *buf;
@@ -317,88 +319,85 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
return -ETIMEDOUT;
}
-static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
{
- u8 *ppm = (u8 *)uc->ppm.data;
- int status;
- u16 rab;
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
- status = ccg_write(uc, rab, ppm +
- offsetof(struct ucsi_data, message_out),
- sizeof(uc->ppm.data->message_out));
- if (status < 0)
- return status;
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
- return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
- sizeof(uc->ppm.data->ctrl));
+ return ccg_read(ucsi_get_drvdata(ucsi), reg, val, val_len);
}
-static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- u8 *ppm = (u8 *)uc->ppm.data;
- int status;
- u16 rab;
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
- status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
- sizeof(uc->ppm.data->cci));
- if (status < 0)
- return status;
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
- return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
- sizeof(uc->ppm.data->message_in));
+ return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
}
-static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- int status;
- unsigned char data;
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ int ret;
- status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
- if (status < 0)
- return status;
+ mutex_lock(&uc->lock);
+ pm_runtime_get_sync(uc->dev);
+ set_bit(DEV_CMD_PENDING, &uc->flags);
- return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
-}
+ ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
+ if (ret)
+ goto err_clear_bit;
-static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
-{
- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
- int status;
+ if (!wait_for_completion_timeout(&uc->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
- status = ucsi_ccg_recv_data(uc);
- if (status < 0)
- return status;
+err_clear_bit:
+ clear_bit(DEV_CMD_PENDING, &uc->flags);
+ pm_runtime_put_sync(uc->dev);
+ mutex_unlock(&uc->lock);
- /* ack interrupt to allow next command to run */
- return ucsi_ccg_ack_interrupt(uc);
+ return ret;
}
-static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
-{
- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
-
- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
- return ucsi_ccg_send_data(uc);
-}
+static const struct ucsi_operations ucsi_ccg_ops = {
+ .read = ucsi_ccg_read,
+ .sync_write = ucsi_ccg_sync_write,
+ .async_write = ucsi_ccg_async_write
+};
static irqreturn_t ccg_irq_handler(int irq, void *data)
{
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
struct ucsi_ccg *uc = data;
+ u8 intr_reg;
+ u32 cci;
+ int ret;
+
+ ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
+ if (ret)
+ return ret;
+
+ ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
+ if (ret)
+ goto err_clear_irq;
+
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
+
+ if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&uc->complete);
- ucsi_notify(uc->ucsi);
+err_clear_irq:
+ ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
return IRQ_HANDLED;
}
static void ccg_pm_workaround_work(struct work_struct *pm_work)
{
- struct ucsi_ccg *uc = container_of(pm_work, struct ucsi_ccg, pm_work);
-
- ucsi_notify(uc->ucsi);
+ ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
}
static int get_fw_info(struct ucsi_ccg *uc)
@@ -1027,10 +1026,10 @@ static int ccg_restart(struct ucsi_ccg *uc)
return status;
}
- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
- if (IS_ERR(uc->ucsi)) {
- dev_err(uc->dev, "ucsi_register_ppm failed\n");
- return PTR_ERR(uc->ucsi);
+ status = ucsi_register(uc->ucsi);
+ if (status) {
+ dev_err(uc->dev, "failed to register the interface\n");
+ return status;
}
return 0;
@@ -1047,7 +1046,7 @@ static void ccg_update_firmware(struct work_struct *work)
return;
if (flash_mode != FLASH_NOT_NEEDED) {
- ucsi_unregister_ppm(uc->ucsi);
+ ucsi_unregister(uc->ucsi);
free_irq(uc->irq, uc);
ccg_fw_update(uc, flash_mode);
@@ -1091,21 +1090,15 @@ static int ucsi_ccg_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct ucsi_ccg *uc;
int status;
- u16 rab;
uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
if (!uc)
return -ENOMEM;
- uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
- if (!uc->ppm.data)
- return -ENOMEM;
-
- uc->ppm.cmd = ucsi_ccg_cmd;
- uc->ppm.sync = ucsi_ccg_sync;
uc->dev = dev;
uc->client = client;
mutex_init(&uc->lock);
+ init_completion(&uc->complete);
INIT_WORK(&uc->work, ccg_update_firmware);
INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
@@ -1133,30 +1126,25 @@ static int ucsi_ccg_probe(struct i2c_client *client,
if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
uc->port_num++;
+ uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
+ if (IS_ERR(uc->ucsi))
+ return PTR_ERR(uc->ucsi);
+
+ ucsi_set_drvdata(uc->ucsi, uc);
+
status = request_threaded_irq(client->irq, NULL, ccg_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
dev_name(dev), uc);
if (status < 0) {
dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
- return status;
+ goto out_ucsi_destroy;
}
uc->irq = client->irq;
- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
- if (IS_ERR(uc->ucsi)) {
- dev_err(uc->dev, "ucsi_register_ppm failed\n");
- return PTR_ERR(uc->ucsi);
- }
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
- status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
- offsetof(struct ucsi_data, version),
- sizeof(uc->ppm.data->version));
- if (status < 0) {
- ucsi_unregister_ppm(uc->ucsi);
- return status;
- }
+ status = ucsi_register(uc->ucsi);
+ if (status)
+ goto out_free_irq;
i2c_set_clientdata(client, uc);
@@ -1167,6 +1155,13 @@ static int ucsi_ccg_probe(struct i2c_client *client,
pm_runtime_idle(uc->dev);
return 0;
+
+out_free_irq:
+ free_irq(uc->irq, uc);
+out_ucsi_destroy:
+ ucsi_destroy(uc->ucsi);
+
+ return status;
}
static int ucsi_ccg_remove(struct i2c_client *client)
@@ -1175,8 +1170,9 @@ static int ucsi_ccg_remove(struct i2c_client *client)
cancel_work_sync(&uc->pm_work);
cancel_work_sync(&uc->work);
- ucsi_unregister_ppm(uc->ucsi);
pm_runtime_disable(uc->dev);
+ ucsi_unregister(uc->ucsi);
+ ucsi_destroy(uc->ucsi);
free_irq(uc->irq, uc);
return 0;
diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
index 2f86b28fa3da..7bbae7a08642 100644
--- a/drivers/usb/usbip/Kconfig
+++ b/drivers/usb/usbip/Kconfig
@@ -4,6 +4,7 @@ config USBIP_CORE
tristate "USB/IP support"
depends on NET
select USB_COMMON
+ select SGL_ALLOC
---help---
This enables pushing USB packets over IP to allow remote
machines direct access to USB devices. It provides the
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 66edfeea68fe..e2b019532234 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -470,18 +470,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
if (pipe == -1)
return;
+ /*
+ * Smatch reported the error case where use_sg is true and buf_len is 0.
+ * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
+ * released by stub event handler and connection will be shut down.
+ */
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
return;
buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
+ if (use_sg && !buf_len) {
+ dev_err(&udev->dev, "sg buffer with zero length\n");
+ goto err_malloc;
+ }
+
/* allocate urb transfer buffer, if needed */
if (buf_len) {
if (use_sg) {
sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
if (!sgl)
goto err_malloc;
+
+ /* Check if the server's HCD supports SG */
+ if (!udev->bus->sg_tablesize) {
+ /*
+ * If the server's HCD doesn't support SG, break
+ * a single SG request into several URBs and map
+ * each SG list entry to corresponding URB
+ * buffer. The previously allocated SG list is
+ * stored in priv->sgl (If the server's HCD
+ * support SG, SG list is stored only in
+ * urb->sg) and it is used as an indicator that
+ * the server split single SG request into
+ * several URBs. Later, priv->sgl is used by
+ * stub_complete() and stub_send_ret_submit() to
+ * reassemble the divied URBs.
+ */
+ support_sg = 0;
+ num_urbs = nents;
+ priv->completed_urbs = 0;
+ pdu->u.cmd_submit.transfer_flags &=
+ ~URB_DMA_MAP_SG;
+ }
} else {
buffer = kzalloc(buf_len, GFP_KERNEL);
if (!buffer)
@@ -489,24 +521,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
}
}
- /* Check if the server's HCD supports SG */
- if (use_sg && !udev->bus->sg_tablesize) {
- /*
- * If the server's HCD doesn't support SG, break a single SG
- * request into several URBs and map each SG list entry to
- * corresponding URB buffer. The previously allocated SG
- * list is stored in priv->sgl (If the server's HCD support SG,
- * SG list is stored only in urb->sg) and it is used as an
- * indicator that the server split single SG request into
- * several URBs. Later, priv->sgl is used by stub_complete() and
- * stub_send_ret_submit() to reassemble the divied URBs.
- */
- support_sg = 0;
- num_urbs = nents;
- priv->completed_urbs = 0;
- pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
- }
-
/* allocate urb array */
priv->num_urbs = num_urbs;
priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 36010a82b359..b1c2f6781cb3 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -291,7 +291,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
kfree(iov);
usbip_event_add(&sdev->ud,
SDEV_EVENT_ERROR_TCP);
- return -1;
+ return -1;
}
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 02206162eaa9..379a02c36e37 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -110,13 +110,15 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
{
struct resource *res;
- int bar;
+ int i;
struct vfio_pci_dummy_resource *dummy_res;
INIT_LIST_HEAD(&vdev->dummy_resources_list);
- for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
- res = vdev->pdev->resource + bar;
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ int bar = i + PCI_STD_RESOURCES;
+
+ res = &vdev->pdev->resource[bar];
if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
goto no_mmap;
@@ -399,7 +401,8 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
vfio_config_free(vdev);
- for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ bar = i + PCI_STD_RESOURCES;
if (!vdev->barmap[bar])
continue;
pci_iounmap(pdev, vdev->barmap[bar]);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index f0891bd8444c..90c0b80f8acf 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -450,30 +450,32 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int i;
- __le32 *bar;
+ __le32 *vbar;
u64 mask;
- bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
+ vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
- for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) {
- if (!pci_resource_start(pdev, i)) {
- *bar = 0; /* Unmapped by host = unimplemented to user */
+ for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) {
+ int bar = i + PCI_STD_RESOURCES;
+
+ if (!pci_resource_start(pdev, bar)) {
+ *vbar = 0; /* Unmapped by host = unimplemented to user */
continue;
}
- mask = ~(pci_resource_len(pdev, i) - 1);
+ mask = ~(pci_resource_len(pdev, bar) - 1);
- *bar &= cpu_to_le32((u32)mask);
- *bar |= vfio_generate_bar_flags(pdev, i);
+ *vbar &= cpu_to_le32((u32)mask);
+ *vbar |= vfio_generate_bar_flags(pdev, bar);
- if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
- bar++;
- *bar &= cpu_to_le32((u32)(mask >> 32));
+ if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ vbar++;
+ *vbar &= cpu_to_le32((u32)(mask >> 32));
i++;
}
}
- bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
+ vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
/*
* NB. REGION_INFO will have reported zero size if we weren't able
@@ -483,14 +485,14 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
- *bar &= cpu_to_le32((u32)mask);
+ *vbar &= cpu_to_le32((u32)mask);
} else if (pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW) {
mask = ~(0x20000 - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
- *bar &= cpu_to_le32((u32)mask);
+ *vbar &= cpu_to_le32((u32)mask);
} else
- *bar = 0;
+ *vbar = 0;
vdev->bardirty = false;
}
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index ee6ee91718a4..8a2c7607d513 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -86,8 +86,8 @@ struct vfio_pci_reflck {
struct vfio_pci_device {
struct pci_dev *pdev;
- void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
- bool bar_mmap_supported[PCI_STD_RESOURCE_END + 1];
+ void __iomem *barmap[PCI_STD_NUM_BARS];
+ bool bar_mmap_supported[PCI_STD_NUM_BARS];
u8 *pci_config_map;
u8 *vconfig;
struct perm_bits *msi_perm;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 388597930b64..c8482624ca34 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1184,15 +1184,6 @@ static long vfio_fops_unl_ioctl(struct file *filep,
return ret;
}
-#ifdef CONFIG_COMPAT
-static long vfio_fops_compat_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
-{
- arg = (unsigned long)compat_ptr(arg);
- return vfio_fops_unl_ioctl(filep, cmd, arg);
-}
-#endif /* CONFIG_COMPAT */
-
static int vfio_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_container *container;
@@ -1275,9 +1266,7 @@ static const struct file_operations vfio_fops = {
.read = vfio_fops_read,
.write = vfio_fops_write,
.unlocked_ioctl = vfio_fops_unl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vfio_fops_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = vfio_fops_mmap,
};
@@ -1556,15 +1545,6 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
return ret;
}
-#ifdef CONFIG_COMPAT
-static long vfio_group_fops_compat_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
-{
- arg = (unsigned long)compat_ptr(arg);
- return vfio_group_fops_unl_ioctl(filep, cmd, arg);
-}
-#endif /* CONFIG_COMPAT */
-
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_group *group;
@@ -1620,9 +1600,7 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
static const struct file_operations vfio_group_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vfio_group_fops_unl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vfio_group_fops_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vfio_group_fops_open,
.release = vfio_group_fops_release,
};
@@ -1687,24 +1665,13 @@ static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
return device->ops->mmap(device->device_data, vma);
}
-#ifdef CONFIG_COMPAT
-static long vfio_device_fops_compat_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
-{
- arg = (unsigned long)compat_ptr(arg);
- return vfio_device_fops_unl_ioctl(filep, cmd, arg);
-}
-#endif /* CONFIG_COMPAT */
-
static const struct file_operations vfio_device_fops = {
.owner = THIS_MODULE,
.release = vfio_device_fops_release,
.read = vfio_device_fops_read,
.write = vfio_device_fops_write,
.unlocked_ioctl = vfio_device_fops_unl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vfio_device_fops_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = vfio_device_fops_mmap,
};
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 1a2dd53caade..e158159671fa 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1751,14 +1751,6 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
@@ -1794,9 +1786,7 @@ static const struct file_operations vhost_net_fops = {
.write_iter = vhost_net_chr_write_iter,
.poll = vhost_net_chr_poll,
.unlocked_ioctl = vhost_net_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_net_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vhost_net_open,
.llseek = noop_llseek,
};
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index a9caf1bc3c3e..0b949a14bce3 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1727,21 +1727,11 @@ vhost_scsi_ioctl(struct file *f,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vhost_scsi_fops = {
.owner = THIS_MODULE,
.release = vhost_scsi_release,
.unlocked_ioctl = vhost_scsi_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_scsi_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vhost_scsi_open,
.llseek = noop_llseek,
};
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 056308008288..e37c92d4d7ad 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -304,21 +304,11 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vhost_test_fops = {
.owner = THIS_MODULE,
.release = vhost_test_release,
.unlocked_ioctl = vhost_test_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_test_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vhost_test_open,
.llseek = noop_llseek,
};
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index dde392b91bb3..50de0642dea6 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -810,23 +810,13 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
.release = vhost_vsock_dev_release,
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_vsock_dev_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice vhost_vsock_misc = {
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 40676be2e46a..403707a3e503 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -99,7 +99,7 @@ config LCD_TOSA
config LCD_HP700
tristate "HP Jornada 700 series LCD Driver"
- depends on SA1100_JORNADA720_SSP && !PREEMPT
+ depends on SA1100_JORNADA720_SSP && !PREEMPTION
default y
help
If you have an HP Jornada 700 series handheld (710/720/728)
@@ -228,7 +228,7 @@ config BACKLIGHT_HP680
config BACKLIGHT_HP700
tristate "HP Jornada 700 series Backlight Driver"
- depends on SA1100_JORNADA720_SSP && !PREEMPT
+ depends on SA1100_JORNADA720_SSP && !PREEMPTION
default y
help
If you have an HP Jornada 700 series,
@@ -282,12 +282,12 @@ config BACKLIGHT_TOSA
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its backlight
-config BACKLIGHT_PM8941_WLED
- tristate "Qualcomm PM8941 WLED Driver"
+config BACKLIGHT_QCOM_WLED
+ tristate "Qualcomm PMIC WLED Driver"
select REGMAP
help
- If you have the Qualcomm PM8941, say Y to enable a driver for the
- WLED block.
+ If you have the Qualcomm PMIC, say Y to enable a driver for the
+ WLED block. Currently it supports PM8941 and PMI8998.
config BACKLIGHT_SAHARA
tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 63c507c07437..6f8777037c37 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -48,8 +48,8 @@ obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
-obj-$(CONFIG_BACKLIGHT_PM8941_WLED) += pm8941-wled.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
+obj-$(CONFIG_BACKLIGHT_QCOM_WLED) += qcom-wled.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_SKY81452) += sky81452-backlight.o
obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 18e053e4716c..75409ddfba3e 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -6,29 +6,23 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/fb.h>
-#include <linux/gpio.h> /* Only for legacy support */
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/gpio_backlight.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
struct gpio_backlight {
- struct device *dev;
struct device *fbdev;
-
struct gpio_desc *gpiod;
- int def_value;
};
-static int gpio_backlight_update_status(struct backlight_device *bl)
+static int gpio_backlight_get_next_brightness(struct backlight_device *bl)
{
- struct gpio_backlight *gbl = bl_get_data(bl);
int brightness = bl->props.brightness;
if (bl->props.power != FB_BLANK_UNBLANK ||
@@ -36,6 +30,14 @@ static int gpio_backlight_update_status(struct backlight_device *bl)
bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
brightness = 0;
+ return brightness;
+}
+
+static int gpio_backlight_update_status(struct backlight_device *bl)
+{
+ struct gpio_backlight *gbl = bl_get_data(bl);
+ int brightness = gpio_backlight_get_next_brightness(bl);
+
gpiod_set_value_cansleep(gbl->gpiod, brightness);
return 0;
@@ -55,105 +57,63 @@ static const struct backlight_ops gpio_backlight_ops = {
.check_fb = gpio_backlight_check_fb,
};
-static int gpio_backlight_probe_dt(struct platform_device *pdev,
- struct gpio_backlight *gbl)
-{
- struct device *dev = &pdev->dev;
- int ret;
-
- gbl->def_value = device_property_read_bool(dev, "default-on");
-
- gbl->gpiod = devm_gpiod_get(dev, NULL, GPIOD_ASIS);
- if (IS_ERR(gbl->gpiod)) {
- ret = PTR_ERR(gbl->gpiod);
-
- if (ret != -EPROBE_DEFER) {
- dev_err(dev,
- "Error: The gpios parameter is missing or invalid.\n");
- }
- return ret;
- }
-
- return 0;
-}
-
-static int gpio_backlight_initial_power_state(struct gpio_backlight *gbl)
-{
- struct device_node *node = gbl->dev->of_node;
-
- /* Not booted with device tree or no phandle link to the node */
- if (!node || !node->phandle)
- return gbl->def_value ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
-
- /* if the enable GPIO is disabled, do not enable the backlight */
- if (gpiod_get_value_cansleep(gbl->gpiod) == 0)
- return FB_BLANK_POWERDOWN;
-
- return FB_BLANK_UNBLANK;
-}
-
-
static int gpio_backlight_probe(struct platform_device *pdev)
{
- struct gpio_backlight_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct gpio_backlight_platform_data *pdata = dev_get_platdata(dev);
+ struct device_node *of_node = dev->of_node;
struct backlight_properties props;
struct backlight_device *bl;
struct gpio_backlight *gbl;
- int ret;
+ int ret, init_brightness, def_value;
- gbl = devm_kzalloc(&pdev->dev, sizeof(*gbl), GFP_KERNEL);
+ gbl = devm_kzalloc(dev, sizeof(*gbl), GFP_KERNEL);
if (gbl == NULL)
return -ENOMEM;
- gbl->dev = &pdev->dev;
+ if (pdata)
+ gbl->fbdev = pdata->fbdev;
- if (pdev->dev.fwnode) {
- ret = gpio_backlight_probe_dt(pdev, gbl);
- if (ret)
- return ret;
- } else if (pdata) {
- /*
- * Legacy platform data GPIO retrieveal. Do not expand
- * the use of this code path, currently only used by one
- * SH board.
- */
- unsigned long flags = GPIOF_DIR_OUT;
+ def_value = device_property_read_bool(dev, "default-on");
- gbl->fbdev = pdata->fbdev;
- gbl->def_value = pdata->def_value;
- flags |= gbl->def_value ? GPIOF_INIT_HIGH : GPIOF_INIT_LOW;
-
- ret = devm_gpio_request_one(gbl->dev, pdata->gpio, flags,
- pdata ? pdata->name : "backlight");
- if (ret < 0) {
- dev_err(&pdev->dev, "unable to request GPIO\n");
- return ret;
- }
- gbl->gpiod = gpio_to_desc(pdata->gpio);
- if (!gbl->gpiod)
- return -EINVAL;
- } else {
- dev_err(&pdev->dev,
- "failed to find platform data or device tree node.\n");
- return -ENODEV;
+ gbl->gpiod = devm_gpiod_get(dev, NULL, GPIOD_ASIS);
+ if (IS_ERR(gbl->gpiod)) {
+ ret = PTR_ERR(gbl->gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "Error: The gpios parameter is missing or invalid.\n");
+ return ret;
}
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = 1;
- bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev),
- &pdev->dev, gbl, &gpio_backlight_ops,
- &props);
+ bl = devm_backlight_device_register(dev, dev_name(dev), dev, gbl,
+ &gpio_backlight_ops, &props);
if (IS_ERR(bl)) {
- dev_err(&pdev->dev, "failed to register backlight\n");
+ dev_err(dev, "failed to register backlight\n");
return PTR_ERR(bl);
}
- bl->props.power = gpio_backlight_initial_power_state(gbl);
+ /* Set the initial power state */
+ if (!of_node || !of_node->phandle)
+ /* Not booted with device tree or no phandle link to the node */
+ bl->props.power = def_value ? FB_BLANK_UNBLANK
+ : FB_BLANK_POWERDOWN;
+ else if (gpiod_get_direction(gbl->gpiod) == 0 &&
+ gpiod_get_value_cansleep(gbl->gpiod) == 0)
+ bl->props.power = FB_BLANK_POWERDOWN;
+ else
+ bl->props.power = FB_BLANK_UNBLANK;
+
bl->props.brightness = 1;
- backlight_update_status(bl);
+ init_brightness = gpio_backlight_get_next_brightness(bl);
+ ret = gpiod_direction_output(gbl->gpiod, init_brightness);
+ if (ret) {
+ dev_err(dev, "failed to set initial brightness\n");
+ return ret;
+ }
platform_set_drvdata(pdev, bl);
return 0;
diff --git a/drivers/video/backlight/ipaq_micro_bl.c b/drivers/video/backlight/ipaq_micro_bl.c
index 1123f67c12b3..85b16cc82878 100644
--- a/drivers/video/backlight/ipaq_micro_bl.c
+++ b/drivers/video/backlight/ipaq_micro_bl.c
@@ -44,7 +44,7 @@ static const struct backlight_ops micro_bl_ops = {
.update_status = micro_bl_update_status,
};
-static struct backlight_properties micro_bl_props = {
+static const struct backlight_properties micro_bl_props = {
.type = BACKLIGHT_RAW,
.max_brightness = 255,
.power = FB_BLANK_UNBLANK,
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 2d8e8192e4e2..ee320883b710 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -12,6 +12,7 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
#include <linux/pwm.h>
#include <linux/platform_data/lm3630a_bl.h>
@@ -48,6 +49,7 @@ struct lm3630a_chip {
struct lm3630a_platform_data *pdata;
struct backlight_device *bleda;
struct backlight_device *bledb;
+ struct gpio_desc *enable_gpio;
struct regmap *regmap;
struct pwm_device *pwmd;
};
@@ -534,6 +536,13 @@ static int lm3630a_probe(struct i2c_client *client,
}
pchip->pdata = pdata;
+ pchip->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pchip->enable_gpio)) {
+ rval = PTR_ERR(pchip->enable_gpio);
+ return rval;
+ }
+
/* chip initialize */
rval = lm3630a_chip_init(pchip);
if (rval < 0) {
@@ -598,12 +607,14 @@ static const struct i2c_device_id lm3630a_id[] = {
{}
};
+MODULE_DEVICE_TABLE(i2c, lm3630a_id);
+
static const struct of_device_id lm3630a_match_table[] = {
{ .compatible = "ti,lm3630a", },
{ },
};
-MODULE_DEVICE_TABLE(i2c, lm3630a_id);
+MODULE_DEVICE_TABLE(of, lm3630a_match_table);
static struct i2c_driver lm3630a_i2c_driver = {
.driver = {
diff --git a/drivers/video/backlight/pm8941-wled.c b/drivers/video/backlight/pm8941-wled.c
deleted file mode 100644
index 82b85725a22a..000000000000
--- a/drivers/video/backlight/pm8941-wled.c
+++ /dev/null
@@ -1,424 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015, Sony Mobile Communications, AB.
- */
-
-#include <linux/kernel.h>
-#include <linux/backlight.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-
-/* From DT binding */
-#define PM8941_WLED_DEFAULT_BRIGHTNESS 2048
-
-#define PM8941_WLED_REG_VAL_BASE 0x40
-#define PM8941_WLED_REG_VAL_MAX 0xFFF
-
-#define PM8941_WLED_REG_MOD_EN 0x46
-#define PM8941_WLED_REG_MOD_EN_BIT BIT(7)
-#define PM8941_WLED_REG_MOD_EN_MASK BIT(7)
-
-#define PM8941_WLED_REG_SYNC 0x47
-#define PM8941_WLED_REG_SYNC_MASK 0x07
-#define PM8941_WLED_REG_SYNC_LED1 BIT(0)
-#define PM8941_WLED_REG_SYNC_LED2 BIT(1)
-#define PM8941_WLED_REG_SYNC_LED3 BIT(2)
-#define PM8941_WLED_REG_SYNC_ALL 0x07
-#define PM8941_WLED_REG_SYNC_CLEAR 0x00
-
-#define PM8941_WLED_REG_FREQ 0x4c
-#define PM8941_WLED_REG_FREQ_MASK 0x0f
-
-#define PM8941_WLED_REG_OVP 0x4d
-#define PM8941_WLED_REG_OVP_MASK 0x03
-
-#define PM8941_WLED_REG_BOOST 0x4e
-#define PM8941_WLED_REG_BOOST_MASK 0x07
-
-#define PM8941_WLED_REG_SINK 0x4f
-#define PM8941_WLED_REG_SINK_MASK 0xe0
-#define PM8941_WLED_REG_SINK_SHFT 0x05
-
-/* Per-'string' registers below */
-#define PM8941_WLED_REG_STR_OFFSET 0x10
-
-#define PM8941_WLED_REG_STR_MOD_EN_BASE 0x60
-#define PM8941_WLED_REG_STR_MOD_MASK BIT(7)
-#define PM8941_WLED_REG_STR_MOD_EN BIT(7)
-
-#define PM8941_WLED_REG_STR_SCALE_BASE 0x62
-#define PM8941_WLED_REG_STR_SCALE_MASK 0x1f
-
-#define PM8941_WLED_REG_STR_MOD_SRC_BASE 0x63
-#define PM8941_WLED_REG_STR_MOD_SRC_MASK 0x01
-#define PM8941_WLED_REG_STR_MOD_SRC_INT 0x00
-#define PM8941_WLED_REG_STR_MOD_SRC_EXT 0x01
-
-#define PM8941_WLED_REG_STR_CABC_BASE 0x66
-#define PM8941_WLED_REG_STR_CABC_MASK BIT(7)
-#define PM8941_WLED_REG_STR_CABC_EN BIT(7)
-
-struct pm8941_wled_config {
- u32 i_boost_limit;
- u32 ovp;
- u32 switch_freq;
- u32 num_strings;
- u32 i_limit;
- bool cs_out_en;
- bool ext_gen;
- bool cabc_en;
-};
-
-struct pm8941_wled {
- const char *name;
- struct regmap *regmap;
- u16 addr;
-
- struct pm8941_wled_config cfg;
-};
-
-static int pm8941_wled_update_status(struct backlight_device *bl)
-{
- struct pm8941_wled *wled = bl_get_data(bl);
- u16 val = bl->props.brightness;
- u8 ctrl = 0;
- int rc;
- int i;
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & BL_CORE_FBBLANK)
- val = 0;
-
- if (val != 0)
- ctrl = PM8941_WLED_REG_MOD_EN_BIT;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_MOD_EN,
- PM8941_WLED_REG_MOD_EN_MASK, ctrl);
- if (rc)
- return rc;
-
- for (i = 0; i < wled->cfg.num_strings; ++i) {
- u8 v[2] = { val & 0xff, (val >> 8) & 0xf };
-
- rc = regmap_bulk_write(wled->regmap,
- wled->addr + PM8941_WLED_REG_VAL_BASE + 2 * i,
- v, 2);
- if (rc)
- return rc;
- }
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_SYNC,
- PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_ALL);
- if (rc)
- return rc;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_SYNC,
- PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_CLEAR);
- return rc;
-}
-
-static int pm8941_wled_setup(struct pm8941_wled *wled)
-{
- int rc;
- int i;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_OVP,
- PM8941_WLED_REG_OVP_MASK, wled->cfg.ovp);
- if (rc)
- return rc;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_BOOST,
- PM8941_WLED_REG_BOOST_MASK, wled->cfg.i_boost_limit);
- if (rc)
- return rc;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_FREQ,
- PM8941_WLED_REG_FREQ_MASK, wled->cfg.switch_freq);
- if (rc)
- return rc;
-
- if (wled->cfg.cs_out_en) {
- u8 all = (BIT(wled->cfg.num_strings) - 1)
- << PM8941_WLED_REG_SINK_SHFT;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_SINK,
- PM8941_WLED_REG_SINK_MASK, all);
- if (rc)
- return rc;
- }
-
- for (i = 0; i < wled->cfg.num_strings; ++i) {
- u16 addr = wled->addr + PM8941_WLED_REG_STR_OFFSET * i;
-
- rc = regmap_update_bits(wled->regmap,
- addr + PM8941_WLED_REG_STR_MOD_EN_BASE,
- PM8941_WLED_REG_STR_MOD_MASK,
- PM8941_WLED_REG_STR_MOD_EN);
- if (rc)
- return rc;
-
- if (wled->cfg.ext_gen) {
- rc = regmap_update_bits(wled->regmap,
- addr + PM8941_WLED_REG_STR_MOD_SRC_BASE,
- PM8941_WLED_REG_STR_MOD_SRC_MASK,
- PM8941_WLED_REG_STR_MOD_SRC_EXT);
- if (rc)
- return rc;
- }
-
- rc = regmap_update_bits(wled->regmap,
- addr + PM8941_WLED_REG_STR_SCALE_BASE,
- PM8941_WLED_REG_STR_SCALE_MASK,
- wled->cfg.i_limit);
- if (rc)
- return rc;
-
- rc = regmap_update_bits(wled->regmap,
- addr + PM8941_WLED_REG_STR_CABC_BASE,
- PM8941_WLED_REG_STR_CABC_MASK,
- wled->cfg.cabc_en ?
- PM8941_WLED_REG_STR_CABC_EN : 0);
- if (rc)
- return rc;
- }
-
- return 0;
-}
-
-static const struct pm8941_wled_config pm8941_wled_config_defaults = {
- .i_boost_limit = 3,
- .i_limit = 20,
- .ovp = 2,
- .switch_freq = 5,
- .num_strings = 0,
- .cs_out_en = false,
- .ext_gen = false,
- .cabc_en = false,
-};
-
-struct pm8941_wled_var_cfg {
- const u32 *values;
- u32 (*fn)(u32);
- int size;
-};
-
-static const u32 pm8941_wled_i_boost_limit_values[] = {
- 105, 385, 525, 805, 980, 1260, 1400, 1680,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_boost_limit_cfg = {
- .values = pm8941_wled_i_boost_limit_values,
- .size = ARRAY_SIZE(pm8941_wled_i_boost_limit_values),
-};
-
-static const u32 pm8941_wled_ovp_values[] = {
- 35, 32, 29, 27,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_ovp_cfg = {
- .values = pm8941_wled_ovp_values,
- .size = ARRAY_SIZE(pm8941_wled_ovp_values),
-};
-
-static u32 pm8941_wled_num_strings_values_fn(u32 idx)
-{
- return idx + 1;
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_num_strings_cfg = {
- .fn = pm8941_wled_num_strings_values_fn,
- .size = 3,
-};
-
-static u32 pm8941_wled_switch_freq_values_fn(u32 idx)
-{
- return 19200 / (2 * (1 + idx));
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_switch_freq_cfg = {
- .fn = pm8941_wled_switch_freq_values_fn,
- .size = 16,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_limit_cfg = {
- .size = 26,
-};
-
-static u32 pm8941_wled_values(const struct pm8941_wled_var_cfg *cfg, u32 idx)
-{
- if (idx >= cfg->size)
- return UINT_MAX;
- if (cfg->fn)
- return cfg->fn(idx);
- if (cfg->values)
- return cfg->values[idx];
- return idx;
-}
-
-static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
-{
- struct pm8941_wled_config *cfg = &wled->cfg;
- u32 val;
- int rc;
- u32 c;
- int i;
- int j;
-
- const struct {
- const char *name;
- u32 *val_ptr;
- const struct pm8941_wled_var_cfg *cfg;
- } u32_opts[] = {
- {
- "qcom,current-boost-limit",
- &cfg->i_boost_limit,
- .cfg = &pm8941_wled_i_boost_limit_cfg,
- },
- {
- "qcom,current-limit",
- &cfg->i_limit,
- .cfg = &pm8941_wled_i_limit_cfg,
- },
- {
- "qcom,ovp",
- &cfg->ovp,
- .cfg = &pm8941_wled_ovp_cfg,
- },
- {
- "qcom,switching-freq",
- &cfg->switch_freq,
- .cfg = &pm8941_wled_switch_freq_cfg,
- },
- {
- "qcom,num-strings",
- &cfg->num_strings,
- .cfg = &pm8941_wled_num_strings_cfg,
- },
- };
- const struct {
- const char *name;
- bool *val_ptr;
- } bool_opts[] = {
- { "qcom,cs-out", &cfg->cs_out_en, },
- { "qcom,ext-gen", &cfg->ext_gen, },
- { "qcom,cabc", &cfg->cabc_en, },
- };
-
- rc = of_property_read_u32(dev->of_node, "reg", &val);
- if (rc || val > 0xffff) {
- dev_err(dev, "invalid IO resources\n");
- return rc ? rc : -EINVAL;
- }
- wled->addr = val;
-
- rc = of_property_read_string(dev->of_node, "label", &wled->name);
- if (rc)
- wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
-
- *cfg = pm8941_wled_config_defaults;
- for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
- rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
- if (rc == -EINVAL) {
- continue;
- } else if (rc) {
- dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
- return rc;
- }
-
- c = UINT_MAX;
- for (j = 0; c != val; j++) {
- c = pm8941_wled_values(u32_opts[i].cfg, j);
- if (c == UINT_MAX) {
- dev_err(dev, "invalid value for '%s'\n",
- u32_opts[i].name);
- return -EINVAL;
- }
- }
-
- dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
- *u32_opts[i].val_ptr = j;
- }
-
- for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
- if (of_property_read_bool(dev->of_node, bool_opts[i].name))
- *bool_opts[i].val_ptr = true;
- }
-
- cfg->num_strings = cfg->num_strings + 1;
-
- return 0;
-}
-
-static const struct backlight_ops pm8941_wled_ops = {
- .update_status = pm8941_wled_update_status,
-};
-
-static int pm8941_wled_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- struct backlight_device *bl;
- struct pm8941_wled *wled;
- struct regmap *regmap;
- u32 val;
- int rc;
-
- regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!regmap) {
- dev_err(&pdev->dev, "Unable to get regmap\n");
- return -EINVAL;
- }
-
- wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
- if (!wled)
- return -ENOMEM;
-
- wled->regmap = regmap;
-
- rc = pm8941_wled_configure(wled, &pdev->dev);
- if (rc)
- return rc;
-
- rc = pm8941_wled_setup(wled);
- if (rc)
- return rc;
-
- val = PM8941_WLED_DEFAULT_BRIGHTNESS;
- of_property_read_u32(pdev->dev.of_node, "default-brightness", &val);
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.brightness = val;
- props.max_brightness = PM8941_WLED_REG_VAL_MAX;
- bl = devm_backlight_device_register(&pdev->dev, wled->name,
- &pdev->dev, wled,
- &pm8941_wled_ops, &props);
- return PTR_ERR_OR_ZERO(bl);
-};
-
-static const struct of_device_id pm8941_wled_match_table[] = {
- { .compatible = "qcom,pm8941-wled" },
- {}
-};
-MODULE_DEVICE_TABLE(of, pm8941_wled_match_table);
-
-static struct platform_driver pm8941_wled_driver = {
- .probe = pm8941_wled_probe,
- .driver = {
- .name = "pm8941-wled",
- .of_match_table = pm8941_wled_match_table,
- },
-};
-
-module_platform_driver(pm8941_wled_driver);
-
-MODULE_DESCRIPTION("pm8941 wled driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 746eebc411df..efb4efc2a13d 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -125,8 +125,9 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
state.duty_cycle = compute_duty_cycle(pb, brightness);
pwm_apply_state(pb->pwm, &state);
pwm_backlight_power_on(pb);
- } else
+ } else {
pwm_backlight_power_off(pb);
+ }
if (pb->notify_after)
pb->notify_after(pb->dev, brightness);
@@ -148,15 +149,16 @@ static const struct backlight_ops pwm_backlight_ops = {
};
#ifdef CONFIG_OF
-#define PWM_LUMINANCE_SCALE 10000 /* luminance scale */
+#define PWM_LUMINANCE_SHIFT 16
+#define PWM_LUMINANCE_SCALE (1 << PWM_LUMINANCE_SHIFT) /* luminance scale */
/*
* CIE lightness to PWM conversion.
*
* The CIE 1931 lightness formula is what actually describes how we perceive
* light:
- * Y = (L* / 902.3) if L* ≤ 0.08856
- * Y = ((L* + 16) / 116)^3 if L* > 0.08856
+ * Y = (L* / 903.3) if L* ≤ 8
+ * Y = ((L* + 16) / 116)^3 if L* > 8
*
* Where Y is the luminance, the amount of light coming out of the screen, and
* is a number between 0.0 and 1.0; and L* is the lightness, how bright a human
@@ -165,16 +167,25 @@ static const struct backlight_ops pwm_backlight_ops = {
* The following function does the fixed point maths needed to implement the
* above formula.
*/
-static u64 cie1931(unsigned int lightness, unsigned int scale)
+static u64 cie1931(unsigned int lightness)
{
u64 retval;
+ /*
+ * @lightness is given as a number between 0 and 1, expressed
+ * as a fixed-point number in scale
+ * PWM_LUMINANCE_SCALE. Convert to a percentage, still
+ * expressed as a fixed-point number, so the above formulas
+ * can be applied.
+ */
lightness *= 100;
- if (lightness <= (8 * scale)) {
- retval = DIV_ROUND_CLOSEST_ULL(lightness * 10, 9023);
+ if (lightness <= (8 * PWM_LUMINANCE_SCALE)) {
+ retval = DIV_ROUND_CLOSEST(lightness * 10, 9033);
} else {
- retval = int_pow((lightness + (16 * scale)) / 116, 3);
- retval = DIV_ROUND_CLOSEST_ULL(retval, (scale * scale));
+ retval = (lightness + (16 * PWM_LUMINANCE_SCALE)) / 116;
+ retval *= retval * retval;
+ retval += 1ULL << (2*PWM_LUMINANCE_SHIFT - 1);
+ retval >>= 2*PWM_LUMINANCE_SHIFT;
}
return retval;
@@ -208,8 +219,7 @@ int pwm_backlight_brightness_default(struct device *dev,
/* Fill the table using the cie1931 algorithm */
for (i = 0; i < data->max_brightness; i++) {
retval = cie1931((i * PWM_LUMINANCE_SCALE) /
- data->max_brightness, PWM_LUMINANCE_SCALE) *
- period;
+ data->max_brightness) * period;
retval = DIV_ROUND_CLOSEST_ULL(retval, PWM_LUMINANCE_SCALE);
if (retval > UINT_MAX)
return -EINVAL;
@@ -564,18 +574,17 @@ static int pwm_backlight_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
if (data->levels) {
+ pb->levels = data->levels;
+
/*
* For the DT case, only when brightness levels is defined
* data->levels is filled. For the non-DT case, data->levels
* can come from platform data, however is not usual.
*/
- for (i = 0; i <= data->max_brightness; i++) {
+ for (i = 0; i <= data->max_brightness; i++)
if (data->levels[i] > pb->scale)
pb->scale = data->levels[i];
- pb->levels = data->levels;
- }
-
if (pwm_backlight_is_linear(data))
props.scale = BACKLIGHT_SCALE_LINEAR;
else
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
new file mode 100644
index 000000000000..d46052d8ff41
--- /dev/null
+++ b/drivers/video/backlight/qcom-wled.c
@@ -0,0 +1,1296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015, Sony Mobile Communications, AB.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/kernel.h>
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+/* From DT binding */
+#define WLED_MAX_STRINGS 4
+
+#define WLED_DEFAULT_BRIGHTNESS 2048
+#define WLED_SOFT_START_DLY_US 10000
+#define WLED3_SINK_REG_BRIGHT_MAX 0xFFF
+
+/* WLED3/WLED4 control registers */
+#define WLED3_CTRL_REG_FAULT_STATUS 0x08
+#define WLED3_CTRL_REG_ILIM_FAULT_BIT BIT(0)
+#define WLED3_CTRL_REG_OVP_FAULT_BIT BIT(1)
+#define WLED4_CTRL_REG_SC_FAULT_BIT BIT(2)
+
+#define WLED3_CTRL_REG_INT_RT_STS 0x10
+#define WLED3_CTRL_REG_OVP_FAULT_STATUS BIT(1)
+
+#define WLED3_CTRL_REG_MOD_EN 0x46
+#define WLED3_CTRL_REG_MOD_EN_MASK BIT(7)
+#define WLED3_CTRL_REG_MOD_EN_SHIFT 7
+
+#define WLED3_CTRL_REG_FEEDBACK_CONTROL 0x48
+
+#define WLED3_CTRL_REG_FREQ 0x4c
+#define WLED3_CTRL_REG_FREQ_MASK GENMASK(3, 0)
+
+#define WLED3_CTRL_REG_OVP 0x4d
+#define WLED3_CTRL_REG_OVP_MASK GENMASK(1, 0)
+
+#define WLED3_CTRL_REG_ILIMIT 0x4e
+#define WLED3_CTRL_REG_ILIMIT_MASK GENMASK(2, 0)
+
+/* WLED3/WLED4 sink registers */
+#define WLED3_SINK_REG_SYNC 0x47
+#define WLED3_SINK_REG_SYNC_CLEAR 0x00
+
+#define WLED3_SINK_REG_CURR_SINK 0x4f
+#define WLED3_SINK_REG_CURR_SINK_MASK GENMASK(7, 5)
+#define WLED3_SINK_REG_CURR_SINK_SHFT 5
+
+/* WLED3 specific per-'string' registers below */
+#define WLED3_SINK_REG_BRIGHT(n) (0x40 + n)
+
+#define WLED3_SINK_REG_STR_MOD_EN(n) (0x60 + (n * 0x10))
+#define WLED3_SINK_REG_STR_MOD_MASK BIT(7)
+
+#define WLED3_SINK_REG_STR_FULL_SCALE_CURR(n) (0x62 + (n * 0x10))
+#define WLED3_SINK_REG_STR_FULL_SCALE_CURR_MASK GENMASK(4, 0)
+
+#define WLED3_SINK_REG_STR_MOD_SRC(n) (0x63 + (n * 0x10))
+#define WLED3_SINK_REG_STR_MOD_SRC_MASK BIT(0)
+#define WLED3_SINK_REG_STR_MOD_SRC_INT 0x00
+#define WLED3_SINK_REG_STR_MOD_SRC_EXT 0x01
+
+#define WLED3_SINK_REG_STR_CABC(n) (0x66 + (n * 0x10))
+#define WLED3_SINK_REG_STR_CABC_MASK BIT(7)
+
+/* WLED4 specific control registers */
+#define WLED4_CTRL_REG_SHORT_PROTECT 0x5e
+#define WLED4_CTRL_REG_SHORT_EN_MASK BIT(7)
+
+#define WLED4_CTRL_REG_SEC_ACCESS 0xd0
+#define WLED4_CTRL_REG_SEC_UNLOCK 0xa5
+
+#define WLED4_CTRL_REG_TEST1 0xe2
+#define WLED4_CTRL_REG_TEST1_EXT_FET_DTEST2 0x09
+
+/* WLED4 specific sink registers */
+#define WLED4_SINK_REG_CURR_SINK 0x46
+#define WLED4_SINK_REG_CURR_SINK_MASK GENMASK(7, 4)
+#define WLED4_SINK_REG_CURR_SINK_SHFT 4
+
+/* WLED4 specific per-'string' registers below */
+#define WLED4_SINK_REG_STR_MOD_EN(n) (0x50 + (n * 0x10))
+#define WLED4_SINK_REG_STR_MOD_MASK BIT(7)
+
+#define WLED4_SINK_REG_STR_FULL_SCALE_CURR(n) (0x52 + (n * 0x10))
+#define WLED4_SINK_REG_STR_FULL_SCALE_CURR_MASK GENMASK(3, 0)
+
+#define WLED4_SINK_REG_STR_MOD_SRC(n) (0x53 + (n * 0x10))
+#define WLED4_SINK_REG_STR_MOD_SRC_MASK BIT(0)
+#define WLED4_SINK_REG_STR_MOD_SRC_INT 0x00
+#define WLED4_SINK_REG_STR_MOD_SRC_EXT 0x01
+
+#define WLED4_SINK_REG_STR_CABC(n) (0x56 + (n * 0x10))
+#define WLED4_SINK_REG_STR_CABC_MASK BIT(7)
+
+#define WLED4_SINK_REG_BRIGHT(n) (0x57 + (n * 0x10))
+
+struct wled_var_cfg {
+ const u32 *values;
+ u32 (*fn)(u32);
+ int size;
+};
+
+struct wled_u32_opts {
+ const char *name;
+ u32 *val_ptr;
+ const struct wled_var_cfg *cfg;
+};
+
+struct wled_bool_opts {
+ const char *name;
+ bool *val_ptr;
+};
+
+struct wled_config {
+ u32 boost_i_limit;
+ u32 ovp;
+ u32 switch_freq;
+ u32 num_strings;
+ u32 string_i_limit;
+ u32 enabled_strings[WLED_MAX_STRINGS];
+ bool cs_out_en;
+ bool ext_gen;
+ bool cabc;
+ bool external_pfet;
+ bool auto_detection_enabled;
+};
+
+struct wled {
+ const char *name;
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock; /* Lock to avoid race from thread irq handler */
+ ktime_t last_short_event;
+ ktime_t start_ovp_fault_time;
+ u16 ctrl_addr;
+ u16 sink_addr;
+ u16 max_string_count;
+ u16 auto_detection_ovp_count;
+ u32 brightness;
+ u32 max_brightness;
+ u32 short_count;
+ u32 auto_detect_count;
+ bool disabled_by_short;
+ bool has_short_detect;
+ int short_irq;
+ int ovp_irq;
+
+ struct wled_config cfg;
+ struct delayed_work ovp_work;
+ int (*wled_set_brightness)(struct wled *wled, u16 brightness);
+};
+
+static int wled3_set_brightness(struct wled *wled, u16 brightness)
+{
+ int rc, i;
+ u8 v[2];
+
+ v[0] = brightness & 0xff;
+ v[1] = (brightness >> 8) & 0xf;
+
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ rc = regmap_bulk_write(wled->regmap, wled->ctrl_addr +
+ WLED3_SINK_REG_BRIGHT(i), v, 2);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int wled4_set_brightness(struct wled *wled, u16 brightness)
+{
+ int rc, i;
+ u16 low_limit = wled->max_brightness * 4 / 1000;
+ u8 v[2];
+
+ /* WLED4's lower limit of operation is 0.4% */
+ if (brightness > 0 && brightness < low_limit)
+ brightness = low_limit;
+
+ v[0] = brightness & 0xff;
+ v[1] = (brightness >> 8) & 0xf;
+
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ rc = regmap_bulk_write(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_BRIGHT(i), v, 2);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void wled_ovp_work(struct work_struct *work)
+{
+ struct wled *wled = container_of(work,
+ struct wled, ovp_work.work);
+ enable_irq(wled->ovp_irq);
+}
+
+static int wled_module_enable(struct wled *wled, int val)
+{
+ int rc;
+
+ if (wled->disabled_by_short)
+ return -ENXIO;
+
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK,
+ val << WLED3_CTRL_REG_MOD_EN_SHIFT);
+ if (rc < 0)
+ return rc;
+
+ if (wled->ovp_irq > 0) {
+ if (val) {
+ /*
+ * The hardware generates a storm of spurious OVP
+ * interrupts during soft start operations. So defer
+ * enabling the IRQ for 10ms to ensure that the
+ * soft start is complete.
+ */
+ schedule_delayed_work(&wled->ovp_work, HZ / 100);
+ } else {
+ if (!cancel_delayed_work_sync(&wled->ovp_work))
+ disable_irq(wled->ovp_irq);
+ }
+ }
+
+ return 0;
+}
+
+static int wled_sync_toggle(struct wled *wled)
+{
+ int rc;
+ unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_SINK_REG_SYNC,
+ mask, mask);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_SINK_REG_SYNC,
+ mask, WLED3_SINK_REG_SYNC_CLEAR);
+
+ return rc;
+}
+
+static int wled_update_status(struct backlight_device *bl)
+{
+ struct wled *wled = bl_get_data(bl);
+ u16 brightness = bl->props.brightness;
+ int rc = 0;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK)
+ brightness = 0;
+
+ mutex_lock(&wled->lock);
+ if (brightness) {
+ rc = wled->wled_set_brightness(wled, brightness);
+ if (rc < 0) {
+ dev_err(wled->dev, "wled failed to set brightness rc:%d\n",
+ rc);
+ goto unlock_mutex;
+ }
+
+ rc = wled_sync_toggle(wled);
+ if (rc < 0) {
+ dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
+ goto unlock_mutex;
+ }
+ }
+
+ if (!!brightness != !!wled->brightness) {
+ rc = wled_module_enable(wled, !!brightness);
+ if (rc < 0) {
+ dev_err(wled->dev, "wled enable failed rc:%d\n", rc);
+ goto unlock_mutex;
+ }
+ }
+
+ wled->brightness = brightness;
+
+unlock_mutex:
+ mutex_unlock(&wled->lock);
+
+ return rc;
+}
+
+#define WLED_SHORT_DLY_MS 20
+#define WLED_SHORT_CNT_MAX 5
+#define WLED_SHORT_RESET_CNT_DLY_US USEC_PER_SEC
+
+static irqreturn_t wled_short_irq_handler(int irq, void *_wled)
+{
+ struct wled *wled = _wled;
+ int rc;
+ s64 elapsed_time;
+
+ wled->short_count++;
+ mutex_lock(&wled->lock);
+ rc = wled_module_enable(wled, false);
+ if (rc < 0) {
+ dev_err(wled->dev, "wled disable failed rc:%d\n", rc);
+ goto unlock_mutex;
+ }
+
+ elapsed_time = ktime_us_delta(ktime_get(),
+ wled->last_short_event);
+ if (elapsed_time > WLED_SHORT_RESET_CNT_DLY_US)
+ wled->short_count = 1;
+
+ if (wled->short_count > WLED_SHORT_CNT_MAX) {
+ dev_err(wled->dev, "Short triggered %d times, disabling WLED forever!\n",
+ wled->short_count);
+ wled->disabled_by_short = true;
+ goto unlock_mutex;
+ }
+
+ wled->last_short_event = ktime_get();
+
+ msleep(WLED_SHORT_DLY_MS);
+ rc = wled_module_enable(wled, true);
+ if (rc < 0)
+ dev_err(wled->dev, "wled enable failed rc:%d\n", rc);
+
+unlock_mutex:
+ mutex_unlock(&wled->lock);
+
+ return IRQ_HANDLED;
+}
+
+#define AUTO_DETECT_BRIGHTNESS 200
+
+static void wled_auto_string_detection(struct wled *wled)
+{
+ int rc = 0, i;
+ u32 sink_config = 0, int_sts;
+ u8 sink_test = 0, sink_valid = 0, val;
+
+ /* Read configured sink configuration */
+ rc = regmap_read(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_CURR_SINK, &sink_config);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read SINK configuration rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ /* Disable the module before starting detection */
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK, 0);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to disable WLED module rc=%d\n", rc);
+ goto failed_detect;
+ }
+
+ /* Set low brightness across all sinks */
+ rc = wled4_set_brightness(wled, AUTO_DETECT_BRIGHTNESS);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to set brightness for auto detection rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ if (wled->cfg.cabc) {
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ rc = regmap_update_bits(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_CABC(i),
+ WLED4_SINK_REG_STR_CABC_MASK,
+ 0);
+ if (rc < 0)
+ goto failed_detect;
+ }
+ }
+
+ /* Disable all sinks */
+ rc = regmap_write(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK, 0);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to disable all sinks rc=%d\n", rc);
+ goto failed_detect;
+ }
+
+ /* Iterate through the strings one by one */
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ sink_test = BIT((WLED4_SINK_REG_CURR_SINK_SHFT + i));
+
+ /* Enable feedback control */
+ rc = regmap_write(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_FEEDBACK_CONTROL, i + 1);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to enable feedback for SINK %d rc = %d\n",
+ i + 1, rc);
+ goto failed_detect;
+ }
+
+ /* Enable the sink */
+ rc = regmap_write(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_CURR_SINK, sink_test);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to configure SINK %d rc=%d\n",
+ i + 1, rc);
+ goto failed_detect;
+ }
+
+ /* Enable the module */
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK,
+ WLED3_CTRL_REG_MOD_EN_MASK);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to enable WLED module rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ usleep_range(WLED_SOFT_START_DLY_US,
+ WLED_SOFT_START_DLY_US + 1000);
+
+ rc = regmap_read(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_INT_RT_STS, &int_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Error in reading WLED3_CTRL_INT_RT_STS rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ if (int_sts & WLED3_CTRL_REG_OVP_FAULT_STATUS)
+ dev_dbg(wled->dev, "WLED OVP fault detected with SINK %d\n",
+ i + 1);
+ else
+ sink_valid |= sink_test;
+
+ /* Disable the module */
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK, 0);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to disable WLED module rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+ }
+
+ if (!sink_valid) {
+ dev_err(wled->dev, "No valid WLED sinks found\n");
+ wled->disabled_by_short = true;
+ goto failed_detect;
+ }
+
+ if (sink_valid != sink_config) {
+ dev_warn(wled->dev, "%x is not a valid sink configuration - using %x instead\n",
+ sink_config, sink_valid);
+ sink_config = sink_valid;
+ }
+
+ /* Write the new sink configuration */
+ rc = regmap_write(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK,
+ sink_config);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to reconfigure the default sink rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ /* Enable valid sinks */
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ if (wled->cfg.cabc) {
+ rc = regmap_update_bits(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_CABC(i),
+ WLED4_SINK_REG_STR_CABC_MASK,
+ WLED4_SINK_REG_STR_CABC_MASK);
+ if (rc < 0)
+ goto failed_detect;
+ }
+
+ if (sink_config & BIT(WLED4_SINK_REG_CURR_SINK_SHFT + i))
+ val = WLED4_SINK_REG_STR_MOD_MASK;
+ else
+ val = 0x0; /* Disable modulator_en for unused sink */
+
+ rc = regmap_write(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_MOD_EN(i), val);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to configure MODULATOR_EN rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+ }
+
+ /* Restore the feedback setting */
+ rc = regmap_write(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FEEDBACK_CONTROL, 0);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to restore feedback setting rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ /* Restore brightness */
+ rc = wled4_set_brightness(wled, wled->brightness);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to set brightness after auto detection rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK,
+ WLED3_CTRL_REG_MOD_EN_MASK);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to enable WLED module rc=%d\n", rc);
+ goto failed_detect;
+ }
+
+failed_detect:
+ return;
+}
+
+#define WLED_AUTO_DETECT_OVP_COUNT 5
+#define WLED_AUTO_DETECT_CNT_DLY_US USEC_PER_SEC
+static bool wled_auto_detection_required(struct wled *wled)
+{
+ s64 elapsed_time_us;
+
+ if (!wled->cfg.auto_detection_enabled)
+ return false;
+
+ /*
+ * Check if the OVP fault was an occasional one
+ * or if it's firing continuously, the latter qualifies
+ * for an auto-detection check.
+ */
+ if (!wled->auto_detection_ovp_count) {
+ wled->start_ovp_fault_time = ktime_get();
+ wled->auto_detection_ovp_count++;
+ } else {
+ elapsed_time_us = ktime_us_delta(ktime_get(),
+ wled->start_ovp_fault_time);
+ if (elapsed_time_us > WLED_AUTO_DETECT_CNT_DLY_US)
+ wled->auto_detection_ovp_count = 0;
+ else
+ wled->auto_detection_ovp_count++;
+
+ if (wled->auto_detection_ovp_count >=
+ WLED_AUTO_DETECT_OVP_COUNT) {
+ wled->auto_detection_ovp_count = 0;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int wled_auto_detection_at_init(struct wled *wled)
+{
+ int rc;
+ u32 fault_status, rt_status;
+
+ if (!wled->cfg.auto_detection_enabled)
+ return 0;
+
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_INT_RT_STS,
+ &rt_status);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read RT status rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FAULT_STATUS,
+ &fault_status);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read fault status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((rt_status & WLED3_CTRL_REG_OVP_FAULT_STATUS) ||
+ (fault_status & WLED3_CTRL_REG_OVP_FAULT_BIT)) {
+ mutex_lock(&wled->lock);
+ wled_auto_string_detection(wled);
+ mutex_unlock(&wled->lock);
+ }
+
+ return rc;
+}
+
+static irqreturn_t wled_ovp_irq_handler(int irq, void *_wled)
+{
+ struct wled *wled = _wled;
+ int rc;
+ u32 int_sts, fault_sts;
+
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_INT_RT_STS, &int_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Error in reading WLED3_INT_RT_STS rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ rc = regmap_read(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_FAULT_STATUS, &fault_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Error in reading WLED_FAULT_STATUS rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ if (fault_sts & (WLED3_CTRL_REG_OVP_FAULT_BIT |
+ WLED3_CTRL_REG_ILIM_FAULT_BIT))
+ dev_dbg(wled->dev, "WLED OVP fault detected, int_sts=%x fault_sts= %x\n",
+ int_sts, fault_sts);
+
+ if (fault_sts & WLED3_CTRL_REG_OVP_FAULT_BIT) {
+ if (wled_auto_detection_required(wled)) {
+ mutex_lock(&wled->lock);
+ wled_auto_string_detection(wled);
+ mutex_unlock(&wled->lock);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int wled3_setup(struct wled *wled)
+{
+ u16 addr;
+ u8 sink_en = 0;
+ int rc, i, j;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_OVP,
+ WLED3_CTRL_REG_OVP_MASK, wled->cfg.ovp);
+ if (rc)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_ILIMIT,
+ WLED3_CTRL_REG_ILIMIT_MASK,
+ wled->cfg.boost_i_limit);
+ if (rc)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FREQ,
+ WLED3_CTRL_REG_FREQ_MASK,
+ wled->cfg.switch_freq);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ j = wled->cfg.enabled_strings[i];
+ addr = wled->ctrl_addr + WLED3_SINK_REG_STR_MOD_EN(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED3_SINK_REG_STR_MOD_MASK,
+ WLED3_SINK_REG_STR_MOD_MASK);
+ if (rc)
+ return rc;
+
+ if (wled->cfg.ext_gen) {
+ addr = wled->ctrl_addr + WLED3_SINK_REG_STR_MOD_SRC(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED3_SINK_REG_STR_MOD_SRC_MASK,
+ WLED3_SINK_REG_STR_MOD_SRC_EXT);
+ if (rc)
+ return rc;
+ }
+
+ addr = wled->ctrl_addr + WLED3_SINK_REG_STR_FULL_SCALE_CURR(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED3_SINK_REG_STR_FULL_SCALE_CURR_MASK,
+ wled->cfg.string_i_limit);
+ if (rc)
+ return rc;
+
+ addr = wled->ctrl_addr + WLED3_SINK_REG_STR_CABC(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED3_SINK_REG_STR_CABC_MASK,
+ wled->cfg.cabc ?
+ WLED3_SINK_REG_STR_CABC_MASK : 0);
+ if (rc)
+ return rc;
+
+ sink_en |= BIT(j + WLED3_SINK_REG_CURR_SINK_SHFT);
+ }
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_SINK_REG_CURR_SINK,
+ WLED3_SINK_REG_CURR_SINK_MASK, sink_en);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct wled_config wled3_config_defaults = {
+ .boost_i_limit = 3,
+ .string_i_limit = 20,
+ .ovp = 2,
+ .num_strings = 3,
+ .switch_freq = 5,
+ .cs_out_en = false,
+ .ext_gen = false,
+ .cabc = false,
+ .enabled_strings = {0, 1, 2, 3},
+};
+
+static int wled4_setup(struct wled *wled)
+{
+ int rc, temp, i, j;
+ u16 addr;
+ u8 sink_en = 0;
+ u32 sink_cfg;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_OVP,
+ WLED3_CTRL_REG_OVP_MASK, wled->cfg.ovp);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_ILIMIT,
+ WLED3_CTRL_REG_ILIMIT_MASK,
+ wled->cfg.boost_i_limit);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FREQ,
+ WLED3_CTRL_REG_FREQ_MASK,
+ wled->cfg.switch_freq);
+ if (rc < 0)
+ return rc;
+
+ if (wled->cfg.external_pfet) {
+ /* Unlock the secure register access */
+ rc = regmap_write(wled->regmap, wled->ctrl_addr +
+ WLED4_CTRL_REG_SEC_ACCESS,
+ WLED4_CTRL_REG_SEC_UNLOCK);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_write(wled->regmap,
+ wled->ctrl_addr + WLED4_CTRL_REG_TEST1,
+ WLED4_CTRL_REG_TEST1_EXT_FET_DTEST2);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = regmap_read(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_CURR_SINK, &sink_cfg);
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ j = wled->cfg.enabled_strings[i];
+ temp = j + WLED4_SINK_REG_CURR_SINK_SHFT;
+ sink_en |= 1 << temp;
+ }
+
+ if (sink_cfg == sink_en) {
+ rc = wled_auto_detection_at_init(wled);
+ return rc;
+ }
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK,
+ WLED4_SINK_REG_CURR_SINK_MASK, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK, 0);
+ if (rc < 0)
+ return rc;
+
+ /* Per sink/string configuration */
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ j = wled->cfg.enabled_strings[i];
+
+ addr = wled->sink_addr +
+ WLED4_SINK_REG_STR_MOD_EN(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED4_SINK_REG_STR_MOD_MASK,
+ WLED4_SINK_REG_STR_MOD_MASK);
+ if (rc < 0)
+ return rc;
+
+ addr = wled->sink_addr +
+ WLED4_SINK_REG_STR_FULL_SCALE_CURR(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED4_SINK_REG_STR_FULL_SCALE_CURR_MASK,
+ wled->cfg.string_i_limit);
+ if (rc < 0)
+ return rc;
+
+ addr = wled->sink_addr +
+ WLED4_SINK_REG_STR_CABC(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED4_SINK_REG_STR_CABC_MASK,
+ wled->cfg.cabc ?
+ WLED4_SINK_REG_STR_CABC_MASK : 0);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK,
+ WLED3_CTRL_REG_MOD_EN_MASK);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK,
+ WLED4_SINK_REG_CURR_SINK_MASK, sink_en);
+ if (rc < 0)
+ return rc;
+
+ rc = wled_sync_toggle(wled);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to toggle sync reg rc:%d\n", rc);
+ return rc;
+ }
+
+ rc = wled_auto_detection_at_init(wled);
+
+ return rc;
+}
+
+static const struct wled_config wled4_config_defaults = {
+ .boost_i_limit = 4,
+ .string_i_limit = 10,
+ .ovp = 1,
+ .num_strings = 4,
+ .switch_freq = 11,
+ .cabc = false,
+ .external_pfet = false,
+ .auto_detection_enabled = false,
+};
+
+static const u32 wled3_boost_i_limit_values[] = {
+ 105, 385, 525, 805, 980, 1260, 1400, 1680,
+};
+
+static const struct wled_var_cfg wled3_boost_i_limit_cfg = {
+ .values = wled3_boost_i_limit_values,
+ .size = ARRAY_SIZE(wled3_boost_i_limit_values),
+};
+
+static const u32 wled4_boost_i_limit_values[] = {
+ 105, 280, 450, 620, 970, 1150, 1300, 1500,
+};
+
+static const struct wled_var_cfg wled4_boost_i_limit_cfg = {
+ .values = wled4_boost_i_limit_values,
+ .size = ARRAY_SIZE(wled4_boost_i_limit_values),
+};
+
+static const u32 wled3_ovp_values[] = {
+ 35, 32, 29, 27,
+};
+
+static const struct wled_var_cfg wled3_ovp_cfg = {
+ .values = wled3_ovp_values,
+ .size = ARRAY_SIZE(wled3_ovp_values),
+};
+
+static const u32 wled4_ovp_values[] = {
+ 31100, 29600, 19600, 18100,
+};
+
+static const struct wled_var_cfg wled4_ovp_cfg = {
+ .values = wled4_ovp_values,
+ .size = ARRAY_SIZE(wled4_ovp_values),
+};
+
+static u32 wled3_num_strings_values_fn(u32 idx)
+{
+ return idx + 1;
+}
+
+static const struct wled_var_cfg wled3_num_strings_cfg = {
+ .fn = wled3_num_strings_values_fn,
+ .size = 3,
+};
+
+static const struct wled_var_cfg wled4_num_strings_cfg = {
+ .fn = wled3_num_strings_values_fn,
+ .size = 4,
+};
+
+static u32 wled3_switch_freq_values_fn(u32 idx)
+{
+ return 19200 / (2 * (1 + idx));
+}
+
+static const struct wled_var_cfg wled3_switch_freq_cfg = {
+ .fn = wled3_switch_freq_values_fn,
+ .size = 16,
+};
+
+static const struct wled_var_cfg wled3_string_i_limit_cfg = {
+ .size = 26,
+};
+
+static const u32 wled4_string_i_limit_values[] = {
+ 0, 2500, 5000, 7500, 10000, 12500, 15000, 17500, 20000,
+ 22500, 25000, 27500, 30000,
+};
+
+static const struct wled_var_cfg wled4_string_i_limit_cfg = {
+ .values = wled4_string_i_limit_values,
+ .size = ARRAY_SIZE(wled4_string_i_limit_values),
+};
+
+static const struct wled_var_cfg wled3_string_cfg = {
+ .size = 8,
+};
+
+static const struct wled_var_cfg wled4_string_cfg = {
+ .size = 16,
+};
+
+static u32 wled_values(const struct wled_var_cfg *cfg, u32 idx)
+{
+ if (idx >= cfg->size)
+ return UINT_MAX;
+ if (cfg->fn)
+ return cfg->fn(idx);
+ if (cfg->values)
+ return cfg->values[idx];
+ return idx;
+}
+
+static int wled_configure(struct wled *wled, int version)
+{
+ struct wled_config *cfg = &wled->cfg;
+ struct device *dev = wled->dev;
+ const __be32 *prop_addr;
+ u32 size, val, c, string_len;
+ int rc, i, j;
+
+ const struct wled_u32_opts *u32_opts = NULL;
+ const struct wled_u32_opts wled3_opts[] = {
+ {
+ .name = "qcom,current-boost-limit",
+ .val_ptr = &cfg->boost_i_limit,
+ .cfg = &wled3_boost_i_limit_cfg,
+ },
+ {
+ .name = "qcom,current-limit",
+ .val_ptr = &cfg->string_i_limit,
+ .cfg = &wled3_string_i_limit_cfg,
+ },
+ {
+ .name = "qcom,ovp",
+ .val_ptr = &cfg->ovp,
+ .cfg = &wled3_ovp_cfg,
+ },
+ {
+ .name = "qcom,switching-freq",
+ .val_ptr = &cfg->switch_freq,
+ .cfg = &wled3_switch_freq_cfg,
+ },
+ {
+ .name = "qcom,num-strings",
+ .val_ptr = &cfg->num_strings,
+ .cfg = &wled3_num_strings_cfg,
+ },
+ };
+
+ const struct wled_u32_opts wled4_opts[] = {
+ {
+ .name = "qcom,current-boost-limit",
+ .val_ptr = &cfg->boost_i_limit,
+ .cfg = &wled4_boost_i_limit_cfg,
+ },
+ {
+ .name = "qcom,current-limit-microamp",
+ .val_ptr = &cfg->string_i_limit,
+ .cfg = &wled4_string_i_limit_cfg,
+ },
+ {
+ .name = "qcom,ovp-millivolt",
+ .val_ptr = &cfg->ovp,
+ .cfg = &wled4_ovp_cfg,
+ },
+ {
+ .name = "qcom,switching-freq",
+ .val_ptr = &cfg->switch_freq,
+ .cfg = &wled3_switch_freq_cfg,
+ },
+ {
+ .name = "qcom,num-strings",
+ .val_ptr = &cfg->num_strings,
+ .cfg = &wled4_num_strings_cfg,
+ },
+ };
+
+ const struct wled_bool_opts bool_opts[] = {
+ { "qcom,cs-out", &cfg->cs_out_en, },
+ { "qcom,ext-gen", &cfg->ext_gen, },
+ { "qcom,cabc", &cfg->cabc, },
+ { "qcom,external-pfet", &cfg->external_pfet, },
+ { "qcom,auto-string-detection", &cfg->auto_detection_enabled, },
+ };
+
+ prop_addr = of_get_address(dev->of_node, 0, NULL, NULL);
+ if (!prop_addr) {
+ dev_err(wled->dev, "invalid IO resources\n");
+ return -EINVAL;
+ }
+ wled->ctrl_addr = be32_to_cpu(*prop_addr);
+
+ rc = of_property_read_string(dev->of_node, "label", &wled->name);
+ if (rc)
+ wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+
+ switch (version) {
+ case 3:
+ u32_opts = wled3_opts;
+ size = ARRAY_SIZE(wled3_opts);
+ *cfg = wled3_config_defaults;
+ wled->wled_set_brightness = wled3_set_brightness;
+ wled->max_string_count = 3;
+ wled->sink_addr = wled->ctrl_addr;
+ break;
+
+ case 4:
+ u32_opts = wled4_opts;
+ size = ARRAY_SIZE(wled4_opts);
+ *cfg = wled4_config_defaults;
+ wled->wled_set_brightness = wled4_set_brightness;
+ wled->max_string_count = 4;
+
+ prop_addr = of_get_address(dev->of_node, 1, NULL, NULL);
+ if (!prop_addr) {
+ dev_err(wled->dev, "invalid IO resources\n");
+ return -EINVAL;
+ }
+ wled->sink_addr = be32_to_cpu(*prop_addr);
+ break;
+
+ default:
+ dev_err(wled->dev, "Invalid WLED version\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; ++i) {
+ rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
+ if (rc == -EINVAL) {
+ continue;
+ } else if (rc) {
+ dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
+ return rc;
+ }
+
+ c = UINT_MAX;
+ for (j = 0; c != val; j++) {
+ c = wled_values(u32_opts[i].cfg, j);
+ if (c == UINT_MAX) {
+ dev_err(dev, "invalid value for '%s'\n",
+ u32_opts[i].name);
+ return -EINVAL;
+ }
+
+ if (c == val)
+ break;
+ }
+
+ dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
+ *u32_opts[i].val_ptr = j;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
+ if (of_property_read_bool(dev->of_node, bool_opts[i].name))
+ *bool_opts[i].val_ptr = true;
+ }
+
+ cfg->num_strings = cfg->num_strings + 1;
+
+ string_len = of_property_count_elems_of_size(dev->of_node,
+ "qcom,enabled-strings",
+ sizeof(u32));
+ if (string_len > 0)
+ of_property_read_u32_array(dev->of_node,
+ "qcom,enabled-strings",
+ wled->cfg.enabled_strings,
+ sizeof(u32));
+
+ return 0;
+}
+
+static int wled_configure_short_irq(struct wled *wled,
+ struct platform_device *pdev)
+{
+ int rc;
+
+ if (!wled->has_short_detect)
+ return 0;
+
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED4_CTRL_REG_SHORT_PROTECT,
+ WLED4_CTRL_REG_SHORT_EN_MASK,
+ WLED4_CTRL_REG_SHORT_EN_MASK);
+ if (rc < 0)
+ return rc;
+
+ wled->short_irq = platform_get_irq_byname(pdev, "short");
+ if (wled->short_irq < 0) {
+ dev_dbg(&pdev->dev, "short irq is not used\n");
+ return 0;
+ }
+
+ rc = devm_request_threaded_irq(wled->dev, wled->short_irq,
+ NULL, wled_short_irq_handler,
+ IRQF_ONESHOT,
+ "wled_short_irq", wled);
+ if (rc < 0)
+ dev_err(wled->dev, "Unable to request short_irq (err:%d)\n",
+ rc);
+
+ return rc;
+}
+
+static int wled_configure_ovp_irq(struct wled *wled,
+ struct platform_device *pdev)
+{
+ int rc;
+ u32 val;
+
+ wled->ovp_irq = platform_get_irq_byname(pdev, "ovp");
+ if (wled->ovp_irq < 0) {
+ dev_dbg(&pdev->dev, "OVP IRQ not found - disabling automatic string detection\n");
+ return 0;
+ }
+
+ rc = devm_request_threaded_irq(wled->dev, wled->ovp_irq, NULL,
+ wled_ovp_irq_handler, IRQF_ONESHOT,
+ "wled_ovp_irq", wled);
+ if (rc < 0) {
+ dev_err(wled->dev, "Unable to request ovp_irq (err:%d)\n",
+ rc);
+ wled->ovp_irq = 0;
+ return 0;
+ }
+
+ rc = regmap_read(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN, &val);
+ if (rc < 0)
+ return rc;
+
+ /* Keep OVP irq disabled until module is enabled */
+ if (!(val & WLED3_CTRL_REG_MOD_EN_MASK))
+ disable_irq(wled->ovp_irq);
+
+ return 0;
+}
+
+static const struct backlight_ops wled_ops = {
+ .update_status = wled_update_status,
+};
+
+static int wled_probe(struct platform_device *pdev)
+{
+ struct backlight_properties props;
+ struct backlight_device *bl;
+ struct wled *wled;
+ struct regmap *regmap;
+ int version;
+ u32 val;
+ int rc;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "Unable to get regmap\n");
+ return -EINVAL;
+ }
+
+ wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
+ if (!wled)
+ return -ENOMEM;
+
+ wled->regmap = regmap;
+ wled->dev = &pdev->dev;
+
+ version = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ if (!version) {
+ dev_err(&pdev->dev, "Unknown device version\n");
+ return -ENODEV;
+ }
+
+ mutex_init(&wled->lock);
+ rc = wled_configure(wled, version);
+ if (rc)
+ return rc;
+
+ switch (version) {
+ case 3:
+ wled->cfg.auto_detection_enabled = false;
+ rc = wled3_setup(wled);
+ if (rc) {
+ dev_err(&pdev->dev, "wled3_setup failed\n");
+ return rc;
+ }
+ break;
+
+ case 4:
+ wled->has_short_detect = true;
+ rc = wled4_setup(wled);
+ if (rc) {
+ dev_err(&pdev->dev, "wled4_setup failed\n");
+ return rc;
+ }
+ break;
+
+ default:
+ dev_err(wled->dev, "Invalid WLED version\n");
+ break;
+ }
+
+ INIT_DELAYED_WORK(&wled->ovp_work, wled_ovp_work);
+
+ rc = wled_configure_short_irq(wled, pdev);
+ if (rc < 0)
+ return rc;
+
+ rc = wled_configure_ovp_irq(wled, pdev);
+ if (rc < 0)
+ return rc;
+
+ val = WLED_DEFAULT_BRIGHTNESS;
+ of_property_read_u32(pdev->dev.of_node, "default-brightness", &val);
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.brightness = val;
+ props.max_brightness = WLED3_SINK_REG_BRIGHT_MAX;
+ bl = devm_backlight_device_register(&pdev->dev, wled->name,
+ &pdev->dev, wled,
+ &wled_ops, &props);
+ return PTR_ERR_OR_ZERO(bl);
+};
+
+static int wled_remove(struct platform_device *pdev)
+{
+ struct wled *wled = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&wled->lock);
+ cancel_delayed_work_sync(&wled->ovp_work);
+ disable_irq(wled->short_irq);
+ disable_irq(wled->ovp_irq);
+
+ return 0;
+}
+
+static const struct of_device_id wled_match_table[] = {
+ { .compatible = "qcom,pm8941-wled", .data = (void *)3 },
+ { .compatible = "qcom,pmi8998-wled", .data = (void *)4 },
+ { .compatible = "qcom,pm660l-wled", .data = (void *)4 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, wled_match_table);
+
+static struct platform_driver wled_driver = {
+ .probe = wled_probe,
+ .remove = wled_remove,
+ .driver = {
+ .name = "qcom,wled",
+ .of_match_table = wled_match_table,
+ },
+};
+
+module_platform_driver(wled_driver);
+
+MODULE_DESCRIPTION("Qualcomm WLED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 1275e815bd86..cff5e96fd988 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -18,7 +18,7 @@
#include <asm/mach/sharpsl_param.h>
-#include <mach/tosa.h>
+#include "tosa_bl.h"
#define COMADJ_DEFAULT 97
@@ -28,6 +28,7 @@
struct tosa_bl_data {
struct i2c_client *i2c;
struct backlight_device *bl;
+ struct gpio_desc *gpio;
int comadj;
};
@@ -42,7 +43,7 @@ static void tosa_bl_set_backlight(struct tosa_bl_data *data, int brightness)
i2c_smbus_write_byte_data(data->i2c, DAC_CH2, (u8)(brightness & 0xff));
/* SetBacklightVR */
- gpio_set_value(TOSA_GPIO_BL_C20MA, brightness & 0x100);
+ gpiod_set_value(data->gpio, brightness & 0x100);
tosa_bl_enable(spi, brightness);
}
@@ -87,9 +88,8 @@ static int tosa_bl_probe(struct i2c_client *client,
return -ENOMEM;
data->comadj = sharpsl_param.comadj == -1 ? COMADJ_DEFAULT : sharpsl_param.comadj;
-
- ret = devm_gpio_request_one(&client->dev, TOSA_GPIO_BL_C20MA,
- GPIOF_OUT_INIT_LOW, "backlight");
+ data->gpio = devm_gpiod_get(&client->dev, "backlight", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(data->gpio);
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
return ret;
diff --git a/drivers/video/backlight/tosa_bl.h b/drivers/video/backlight/tosa_bl.h
new file mode 100644
index 000000000000..589e17e6fdb2
--- /dev/null
+++ b/drivers/video/backlight/tosa_bl.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _TOSA_BL_H
+#define _TOSA_BL_H
+
+struct spi_device;
+extern int tosa_bl_enable(struct spi_device *spi, int enable);
+
+#endif
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 29af8e27b6e5..e8ab583e5098 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -19,7 +19,7 @@
#include <asm/mach/sharpsl_param.h>
-#include <mach/tosa.h>
+#include "tosa_bl.h"
#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
@@ -28,12 +28,26 @@
#define TG_REG0_UD 0x0004
#define TG_REG0_LR 0x0008
+/*
+ * Timing Generator
+ */
+#define TG_PNLCTL 0x00
+#define TG_TPOSCTL 0x01
+#define TG_DUTYCTL 0x02
+#define TG_GPOSR 0x03
+#define TG_GPODR1 0x04
+#define TG_GPODR2 0x05
+#define TG_PINICTL 0x06
+#define TG_HPOSCTL 0x07
+
+
#define DAC_BASE 0x4e
struct tosa_lcd_data {
struct spi_device *spi;
struct lcd_device *lcd;
struct i2c_client *i2c;
+ struct gpio_desc *gpiod_tg;
int lcd_power;
bool is_vga;
@@ -66,7 +80,7 @@ EXPORT_SYMBOL(tosa_bl_enable);
static void tosa_lcd_tg_init(struct tosa_lcd_data *data)
{
/* TG on */
- gpio_set_value(TOSA_GPIO_TG_ON, 0);
+ gpiod_set_value(data->gpiod_tg, 0);
mdelay(60);
@@ -100,6 +114,7 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
*/
struct i2c_adapter *adap = i2c_get_adapter(0);
struct i2c_board_info info = {
+ .dev_name = "tosa-bl",
.type = "tosa-bl",
.addr = DAC_BASE,
.platform_data = data->spi,
@@ -121,7 +136,7 @@ static void tosa_lcd_tg_off(struct tosa_lcd_data *data)
mdelay(50);
/* TG Off */
- gpio_set_value(TOSA_GPIO_TG_ON, 1);
+ gpiod_set_value(data->gpiod_tg, 1);
mdelay(100);
}
@@ -191,10 +206,9 @@ static int tosa_lcd_probe(struct spi_device *spi)
data->spi = spi;
spi_set_drvdata(spi, data);
- ret = devm_gpio_request_one(&spi->dev, TOSA_GPIO_TG_ON,
- GPIOF_OUT_INIT_LOW, "tg #pwr");
- if (ret < 0)
- return ret;
+ data->gpiod_tg = devm_gpiod_get(&spi->dev, "tg #pwr", GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpiod_tg))
+ return PTR_ERR(data->gpiod_tg);
mdelay(60);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 1e70e838530e..aa9541bf964b 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2214,6 +2214,7 @@ config FB_HYPERV
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_DEFERRED_IO
help
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 6dda5d885a03..79d548746efd 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -48,7 +48,7 @@
******************************************************************************/
-
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -235,6 +235,13 @@ static int atyfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
static int atyfb_blank(int blank, struct fb_info *info);
static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg);
+#ifdef CONFIG_COMPAT
+static int atyfb_compat_ioctl(struct fb_info *info, u_int cmd, u_long arg)
+{
+ return atyfb_ioctl(info, cmd, (u_long)compat_ptr(arg));
+}
+#endif
+
#ifdef __sparc__
static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma);
#endif
@@ -290,6 +297,9 @@ static struct fb_ops atyfb_ops = {
.fb_pan_display = atyfb_pan_display,
.fb_blank = atyfb_blank,
.fb_ioctl = atyfb_ioctl,
+#ifdef CONFIG_COMPAT
+ .fb_compat_ioctl = atyfb_compat_ioctl,
+#endif
.fb_fillrect = atyfb_fillrect,
.fb_copyarea = atyfb_copyarea,
.fb_imageblit = atyfb_imageblit,
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index 2dc5703eac51..7c4483c7f313 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -2593,7 +2593,7 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
* calling pci_set_power_state()
*/
radeonfb_whack_power_state(rinfo, PCI_D2);
- __pci_complete_power_transition(rinfo->pdev, PCI_D2);
+ pci_platform_power_transition(rinfo->pdev, PCI_D2);
} else {
printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n",
pci_name(rinfo->pdev));
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index e6a1c805064f..6f6fc785b545 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1758,23 +1758,21 @@ EXPORT_SYMBOL(remove_conflicting_framebuffers);
/**
* remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
* @pdev: PCI device
- * @res_id: index of PCI BAR configuring framebuffer memory
* @name: requesting driver name
*
* This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for @pdev's BAR @res_id.
+ * using memory range configured for any of @pdev's memory bars.
*
* The function assumes that PCI device with shadowed ROM drives a primary
* display and so kicks out vga16fb.
*/
-int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const char *name)
+int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name)
{
struct apertures_struct *ap;
bool primary = false;
int err, idx, bar;
- bool res_id_found = false;
- for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
continue;
idx++;
@@ -1784,21 +1782,16 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const
if (!ap)
return -ENOMEM;
- for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
continue;
ap->ranges[idx].base = pci_resource_start(pdev, bar);
ap->ranges[idx].size = pci_resource_len(pdev, bar);
- pci_info(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
- (unsigned long)pci_resource_start(pdev, bar),
- (unsigned long)pci_resource_end(pdev, bar));
+ pci_dbg(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
+ (unsigned long)pci_resource_start(pdev, bar),
+ (unsigned long)pci_resource_end(pdev, bar));
idx++;
- if (res_id == bar)
- res_id_found = true;
}
- if (!res_id_found)
- pci_warn(pdev, "%s: passed res_id (%d) is not a memory bar\n",
- __func__, res_id);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags &
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 51d97ec4f58f..1caa3726cb45 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -653,7 +653,7 @@ static void efifb_fixup_resources(struct pci_dev *dev)
if (!base)
return;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *res = &dev->resource[i];
if (!(res->flags & IORESOURCE_MEM))
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 2dcb7c58b31e..4cd27e5172a1 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -23,6 +23,14 @@
*
* Portrait orientation is also supported:
* For example: video=hyperv_fb:864x1152
+ *
+ * When a Windows 10 RS5+ host is used, the virtual machine screen
+ * resolution is obtained from the host. The "video=hyperv_fb" option is
+ * not needed, but still can be used to overwrite what the host specifies.
+ * The VM resolution on the host could be set by executing the powershell
+ * "set-vmvideo" command. For example
+ * set-vmvideo -vmname name -horizontalresolution:1920 \
+ * -verticalresolution:1200 -resolutiontype single
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -34,6 +42,7 @@
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/efi.h>
+#include <linux/console.h>
#include <linux/hyperv.h>
@@ -44,6 +53,10 @@
#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
+#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
+
+#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
+#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
#define SYNTHVID_DEPTH_WIN7 16
#define SYNTHVID_DEPTH_WIN8 32
@@ -82,16 +95,25 @@ enum synthvid_msg_type {
SYNTHVID_POINTER_SHAPE = 8,
SYNTHVID_FEATURE_CHANGE = 9,
SYNTHVID_DIRT = 10,
+ SYNTHVID_RESOLUTION_REQUEST = 13,
+ SYNTHVID_RESOLUTION_RESPONSE = 14,
- SYNTHVID_MAX = 11
+ SYNTHVID_MAX = 15
};
+#define SYNTHVID_EDID_BLOCK_SIZE 128
+#define SYNTHVID_MAX_RESOLUTION_COUNT 64
+
+struct hvd_screen_info {
+ u16 width;
+ u16 height;
+} __packed;
+
struct synthvid_msg_hdr {
u32 type;
u32 size; /* size of this header + payload after this field*/
} __packed;
-
struct synthvid_version_req {
u32 version;
} __packed;
@@ -102,6 +124,19 @@ struct synthvid_version_resp {
u8 max_video_outputs;
} __packed;
+struct synthvid_supported_resolution_req {
+ u8 maximum_resolution_count;
+} __packed;
+
+struct synthvid_supported_resolution_resp {
+ u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
+ u8 resolution_count;
+ u8 default_resolution_index;
+ u8 is_standard;
+ struct hvd_screen_info
+ supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
+} __packed;
+
struct synthvid_vram_location {
u64 user_ctx;
u8 is_vram_gpa_specified;
@@ -187,6 +222,8 @@ struct synthvid_msg {
struct synthvid_pointer_shape ptr_shape;
struct synthvid_feature_change feature_chg;
struct synthvid_dirt dirt;
+ struct synthvid_supported_resolution_req resolution_req;
+ struct synthvid_supported_resolution_resp resolution_resp;
};
} __packed;
@@ -201,6 +238,7 @@ struct synthvid_msg {
#define RING_BUFSIZE (256 * 1024)
#define VSP_TIMEOUT (10 * HZ)
#define HVFB_UPDATE_DELAY (HZ / 20)
+#define HVFB_ONDEMAND_THROTTLE (HZ / 20)
struct hvfb_par {
struct fb_info *info;
@@ -211,6 +249,7 @@ struct hvfb_par {
struct delayed_work dwork;
bool update;
+ bool update_saved; /* The value of 'update' before hibernation */
u32 pseudo_palette[16];
u8 init_buf[MAX_VMBUS_PKT_SIZE];
@@ -220,12 +259,25 @@ struct hvfb_par {
bool synchronous_fb;
struct notifier_block hvfb_panic_nb;
+
+ /* Memory for deferred IO and frame buffer itself */
+ unsigned char *dio_vp;
+ unsigned char *mmio_vp;
+ unsigned long mmio_pp;
+
+ /* Dirty rectangle, protected by delayed_refresh_lock */
+ int x1, y1, x2, y2;
+ bool delayed_refresh;
+ spinlock_t delayed_refresh_lock;
};
static uint screen_width = HVFB_WIDTH;
static uint screen_height = HVFB_HEIGHT;
+static uint screen_width_max = HVFB_WIDTH;
+static uint screen_height_max = HVFB_HEIGHT;
static uint screen_depth;
static uint screen_fb_size;
+static uint dio_fb_size; /* FB size for deferred IO */
/* Send message to Hyper-V host */
static inline int synthvid_send(struct hv_device *hdev,
@@ -312,28 +364,88 @@ static int synthvid_send_ptr(struct hv_device *hdev)
}
/* Send updated screen area (dirty rectangle) location to host */
-static int synthvid_update(struct fb_info *info)
+static int
+synthvid_update(struct fb_info *info, int x1, int y1, int x2, int y2)
{
struct hv_device *hdev = device_to_hv_device(info->device);
struct synthvid_msg msg;
memset(&msg, 0, sizeof(struct synthvid_msg));
+ if (x2 == INT_MAX)
+ x2 = info->var.xres;
+ if (y2 == INT_MAX)
+ y2 = info->var.yres;
msg.vid_hdr.type = SYNTHVID_DIRT;
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_dirt);
msg.dirt.video_output = 0;
msg.dirt.dirt_count = 1;
- msg.dirt.rect[0].x1 = 0;
- msg.dirt.rect[0].y1 = 0;
- msg.dirt.rect[0].x2 = info->var.xres;
- msg.dirt.rect[0].y2 = info->var.yres;
+ msg.dirt.rect[0].x1 = (x1 > x2) ? 0 : x1;
+ msg.dirt.rect[0].y1 = (y1 > y2) ? 0 : y1;
+ msg.dirt.rect[0].x2 =
+ (x2 < x1 || x2 > info->var.xres) ? info->var.xres : x2;
+ msg.dirt.rect[0].y2 =
+ (y2 < y1 || y2 > info->var.yres) ? info->var.yres : y2;
synthvid_send(hdev, &msg);
return 0;
}
+static void hvfb_docopy(struct hvfb_par *par,
+ unsigned long offset,
+ unsigned long size)
+{
+ if (!par || !par->mmio_vp || !par->dio_vp || !par->fb_ready ||
+ size == 0 || offset >= dio_fb_size)
+ return;
+
+ if (offset + size > dio_fb_size)
+ size = dio_fb_size - offset;
+
+ memcpy(par->mmio_vp + offset, par->dio_vp + offset, size);
+}
+
+/* Deferred IO callback */
+static void synthvid_deferred_io(struct fb_info *p,
+ struct list_head *pagelist)
+{
+ struct hvfb_par *par = p->par;
+ struct page *page;
+ unsigned long start, end;
+ int y1, y2, miny, maxy;
+
+ miny = INT_MAX;
+ maxy = 0;
+
+ /*
+ * Merge dirty pages. It is possible that last page cross
+ * over the end of frame buffer row yres. This is taken care of
+ * in synthvid_update function by clamping the y2
+ * value to yres.
+ */
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ y1 = start / p->fix.line_length;
+ y2 = end / p->fix.line_length;
+ miny = min_t(int, miny, y1);
+ maxy = max_t(int, maxy, y2);
+
+ /* Copy from dio space to mmio address */
+ if (par->fb_ready)
+ hvfb_docopy(par, start, PAGE_SIZE);
+ }
+
+ if (par->fb_ready && par->update)
+ synthvid_update(p, 0, miny, p->var.xres, maxy + 1);
+}
+
+static struct fb_deferred_io synthvid_defio = {
+ .delay = HZ / 20,
+ .deferred_io = synthvid_deferred_io,
+};
/*
* Actions on received messages from host:
@@ -354,6 +466,7 @@ static void synthvid_recv_sub(struct hv_device *hdev)
/* Complete the wait event */
if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
+ msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
memcpy(par->init_buf, msg, MAX_VMBUS_PKT_SIZE);
complete(&par->wait);
@@ -400,6 +513,17 @@ static void synthvid_receive(void *ctx)
} while (bytes_recvd > 0 && ret == 0);
}
+/* Check if the ver1 version is equal or greater than ver2 */
+static inline bool synthvid_ver_ge(u32 ver1, u32 ver2)
+{
+ if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
+ (SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
+ SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
+ return true;
+
+ return false;
+}
+
/* Check synthetic video protocol version with the host */
static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
{
@@ -428,6 +552,64 @@ static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
}
par->synthvid_version = ver;
+ pr_info("Synthvid Version major %d, minor %d\n",
+ SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
+
+out:
+ return ret;
+}
+
+/* Get current resolution from the host */
+static int synthvid_get_supported_resolution(struct hv_device *hdev)
+{
+ struct fb_info *info = hv_get_drvdata(hdev);
+ struct hvfb_par *par = info->par;
+ struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
+ int ret = 0;
+ unsigned long t;
+ u8 index;
+ int i;
+
+ memset(msg, 0, sizeof(struct synthvid_msg));
+ msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
+ msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_supported_resolution_req);
+
+ msg->resolution_req.maximum_resolution_count =
+ SYNTHVID_MAX_RESOLUTION_COUNT;
+ synthvid_send(hdev, msg);
+
+ t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
+ if (!t) {
+ pr_err("Time out on waiting resolution response\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (msg->resolution_resp.resolution_count == 0) {
+ pr_err("No supported resolutions\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ index = msg->resolution_resp.default_resolution_index;
+ if (index >= msg->resolution_resp.resolution_count) {
+ pr_err("Invalid resolution index: %d\n", index);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
+ screen_width_max = max_t(unsigned int, screen_width_max,
+ msg->resolution_resp.supported_resolution[i].width);
+ screen_height_max = max_t(unsigned int, screen_height_max,
+ msg->resolution_resp.supported_resolution[i].height);
+ }
+
+ screen_width =
+ msg->resolution_resp.supported_resolution[index].width;
+ screen_height =
+ msg->resolution_resp.supported_resolution[index].height;
out:
return ret;
@@ -448,11 +630,27 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
}
/* Negotiate the protocol version with host */
- if (vmbus_proto_version == VERSION_WS2008 ||
- vmbus_proto_version == VERSION_WIN7)
- ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
- else
+ switch (vmbus_proto_version) {
+ case VERSION_WIN10:
+ case VERSION_WIN10_V5:
+ ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
+ if (!ret)
+ break;
+ /* Fallthrough */
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8);
+ if (!ret)
+ break;
+ /* Fallthrough */
+ case VERSION_WS2008:
+ case VERSION_WIN7:
+ ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
+ break;
+ default:
+ ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
+ break;
+ }
if (ret) {
pr_err("Synthetic video device version not accepted\n");
@@ -464,6 +662,12 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
else
screen_depth = SYNTHVID_DEPTH_WIN8;
+ if (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10)) {
+ ret = synthvid_get_supported_resolution(hdev);
+ if (ret)
+ pr_info("Failed to get supported resolution from host, use default\n");
+ }
+
screen_fb_size = hdev->channel->offermsg.offer.
mmio_megabytes * 1024 * 1024;
@@ -488,7 +692,7 @@ static int synthvid_send_config(struct hv_device *hdev)
msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_vram_location);
- msg->vram.user_ctx = msg->vram.vram_gpa = info->fix.smem_start;
+ msg->vram.user_ctx = msg->vram.vram_gpa = par->mmio_pp;
msg->vram.is_vram_gpa_specified = 1;
synthvid_send(hdev, msg);
@@ -498,7 +702,7 @@ static int synthvid_send_config(struct hv_device *hdev)
ret = -ETIMEDOUT;
goto out;
}
- if (msg->vram_ack.user_ctx != info->fix.smem_start) {
+ if (msg->vram_ack.user_ctx != par->mmio_pp) {
pr_err("Unable to set VRAM location\n");
ret = -ENODEV;
goto out;
@@ -515,19 +719,77 @@ out:
/*
* Delayed work callback:
- * It is called at HVFB_UPDATE_DELAY or longer time interval to process
- * screen updates. It is re-scheduled if further update is necessary.
+ * It is scheduled to call whenever update request is received and it has
+ * not been called in last HVFB_ONDEMAND_THROTTLE time interval.
*/
static void hvfb_update_work(struct work_struct *w)
{
struct hvfb_par *par = container_of(w, struct hvfb_par, dwork.work);
struct fb_info *info = par->info;
+ unsigned long flags;
+ int x1, x2, y1, y2;
+ int j;
+
+ spin_lock_irqsave(&par->delayed_refresh_lock, flags);
+ /* Reset the request flag */
+ par->delayed_refresh = false;
+
+ /* Store the dirty rectangle to local variables */
+ x1 = par->x1;
+ x2 = par->x2;
+ y1 = par->y1;
+ y2 = par->y2;
+
+ /* Clear dirty rectangle */
+ par->x1 = par->y1 = INT_MAX;
+ par->x2 = par->y2 = 0;
+
+ spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
+
+ if (x1 > info->var.xres || x2 > info->var.xres ||
+ y1 > info->var.yres || y2 > info->var.yres || x2 <= x1)
+ return;
+
+ /* Copy the dirty rectangle to frame buffer memory */
+ for (j = y1; j < y2; j++) {
+ hvfb_docopy(par,
+ j * info->fix.line_length +
+ (x1 * screen_depth / 8),
+ (x2 - x1) * screen_depth / 8);
+ }
+
+ /* Refresh */
+ if (par->fb_ready && par->update)
+ synthvid_update(info, x1, y1, x2, y2);
+}
- if (par->fb_ready)
- synthvid_update(info);
+/*
+ * Control the on-demand refresh frequency. It schedules a delayed
+ * screen update if it has not yet.
+ */
+static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par,
+ int x1, int y1, int w, int h)
+{
+ unsigned long flags;
+ int x2 = x1 + w;
+ int y2 = y1 + h;
+
+ spin_lock_irqsave(&par->delayed_refresh_lock, flags);
+
+ /* Merge dirty rectangle */
+ par->x1 = min_t(int, par->x1, x1);
+ par->y1 = min_t(int, par->y1, y1);
+ par->x2 = max_t(int, par->x2, x2);
+ par->y2 = max_t(int, par->y2, y2);
+
+ /* Schedule a delayed screen update if not yet */
+ if (par->delayed_refresh == false) {
+ schedule_delayed_work(&par->dwork,
+ HVFB_ONDEMAND_THROTTLE);
+ par->delayed_refresh = true;
+ }
- if (par->update)
- schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
+ spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
}
static int hvfb_on_panic(struct notifier_block *nb,
@@ -539,7 +801,8 @@ static int hvfb_on_panic(struct notifier_block *nb,
par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
par->synchronous_fb = true;
info = par->info;
- synthvid_update(info);
+ hvfb_docopy(par, 0, dio_fb_size);
+ synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
return NOTIFY_DONE;
}
@@ -600,7 +863,10 @@ static void hvfb_cfb_fillrect(struct fb_info *p,
cfb_fillrect(p, rect);
if (par->synchronous_fb)
- synthvid_update(p);
+ synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
+ else
+ hvfb_ondemand_refresh_throttle(par, rect->dx, rect->dy,
+ rect->width, rect->height);
}
static void hvfb_cfb_copyarea(struct fb_info *p,
@@ -610,7 +876,10 @@ static void hvfb_cfb_copyarea(struct fb_info *p,
cfb_copyarea(p, area);
if (par->synchronous_fb)
- synthvid_update(p);
+ synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
+ else
+ hvfb_ondemand_refresh_throttle(par, area->dx, area->dy,
+ area->width, area->height);
}
static void hvfb_cfb_imageblit(struct fb_info *p,
@@ -620,7 +889,10 @@ static void hvfb_cfb_imageblit(struct fb_info *p,
cfb_imageblit(p, image);
if (par->synchronous_fb)
- synthvid_update(p);
+ synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
+ else
+ hvfb_ondemand_refresh_throttle(par, image->dx, image->dy,
+ image->width, image->height);
}
static struct fb_ops hvfb_ops = {
@@ -653,6 +925,8 @@ static void hvfb_get_option(struct fb_info *info)
}
if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
+ (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
+ (x > screen_width_max || y > screen_height_max)) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
@@ -677,6 +951,9 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
resource_size_t pot_start, pot_end;
int ret;
+ dio_fb_size =
+ screen_width * screen_height * screen_depth / 8;
+
if (gen2vm) {
pot_start = 0;
pot_end = -1;
@@ -689,8 +966,12 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) < screen_fb_size)
+ pci_resource_len(pdev, 0) < screen_fb_size) {
+ pr_err("Resource not available or (0x%lx < 0x%lx)\n",
+ (unsigned long) pci_resource_len(pdev, 0),
+ (unsigned long) screen_fb_size);
goto err1;
+ }
pot_end = pci_resource_end(pdev, 0);
pot_start = pot_end - screen_fb_size + 1;
@@ -707,9 +988,14 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
if (!fb_virt)
goto err2;
+ /* Allocate memory for deferred IO */
+ par->dio_vp = vzalloc(round_up(dio_fb_size, PAGE_SIZE));
+ if (par->dio_vp == NULL)
+ goto err3;
+
info->apertures = alloc_apertures(1);
if (!info->apertures)
- goto err3;
+ goto err4;
if (gen2vm) {
info->apertures->ranges[0].base = screen_info.lfb_base;
@@ -721,16 +1007,23 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
}
+ /* Physical address of FB device */
+ par->mmio_pp = par->mem->start;
+ /* Virtual address of FB device */
+ par->mmio_vp = (unsigned char *) fb_virt;
+
info->fix.smem_start = par->mem->start;
- info->fix.smem_len = screen_fb_size;
- info->screen_base = fb_virt;
- info->screen_size = screen_fb_size;
+ info->fix.smem_len = dio_fb_size;
+ info->screen_base = par->dio_vp;
+ info->screen_size = dio_fb_size;
if (!gen2vm)
pci_dev_put(pdev);
return 0;
+err4:
+ vfree(par->dio_vp);
err3:
iounmap(fb_virt);
err2:
@@ -748,6 +1041,7 @@ static void hvfb_putmem(struct fb_info *info)
{
struct hvfb_par *par = info->par;
+ vfree(par->dio_vp);
iounmap(info->screen_base);
vmbus_free_mmio(par->mem->start, screen_fb_size);
par->mem = NULL;
@@ -771,6 +1065,11 @@ static int hvfb_probe(struct hv_device *hdev,
init_completion(&par->wait);
INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
+ par->delayed_refresh = false;
+ spin_lock_init(&par->delayed_refresh_lock);
+ par->x1 = par->y1 = INT_MAX;
+ par->x2 = par->y2 = 0;
+
/* Connect to VSP */
hv_set_drvdata(hdev, info);
ret = synthvid_connect_vsp(hdev);
@@ -779,17 +1078,16 @@ static int hvfb_probe(struct hv_device *hdev,
goto error1;
}
+ hvfb_get_option(info);
+ pr_info("Screen resolution: %dx%d, Color depth: %d\n",
+ screen_width, screen_height, screen_depth);
+
ret = hvfb_getmem(hdev, info);
if (ret) {
pr_err("No memory for framebuffer\n");
goto error2;
}
- hvfb_get_option(info);
- pr_info("Screen resolution: %dx%d, Color depth: %d\n",
- screen_width, screen_height, screen_depth);
-
-
/* Set up fb_info */
info->flags = FBINFO_DEFAULT;
@@ -823,6 +1121,10 @@ static int hvfb_probe(struct hv_device *hdev,
info->fbops = &hvfb_ops;
info->pseudo_palette = par->pseudo_palette;
+ /* Initialize deferred IO */
+ info->fbdefio = &synthvid_defio;
+ fb_deferred_io_init(info);
+
/* Send config to host */
ret = synthvid_send_config(hdev);
if (ret)
@@ -844,6 +1146,7 @@ static int hvfb_probe(struct hv_device *hdev,
return 0;
error:
+ fb_deferred_io_cleanup(info);
hvfb_putmem(info);
error2:
vmbus_close(hdev->channel);
@@ -866,6 +1169,8 @@ static int hvfb_remove(struct hv_device *hdev)
par->update = false;
par->fb_ready = false;
+ fb_deferred_io_cleanup(info);
+
unregister_framebuffer(info);
cancel_delayed_work_sync(&par->dwork);
@@ -878,6 +1183,61 @@ static int hvfb_remove(struct hv_device *hdev)
return 0;
}
+static int hvfb_suspend(struct hv_device *hdev)
+{
+ struct fb_info *info = hv_get_drvdata(hdev);
+ struct hvfb_par *par = info->par;
+
+ console_lock();
+
+ /* 1 means do suspend */
+ fb_set_suspend(info, 1);
+
+ cancel_delayed_work_sync(&par->dwork);
+
+ par->update_saved = par->update;
+ par->update = false;
+ par->fb_ready = false;
+
+ vmbus_close(hdev->channel);
+
+ console_unlock();
+
+ return 0;
+}
+
+static int hvfb_resume(struct hv_device *hdev)
+{
+ struct fb_info *info = hv_get_drvdata(hdev);
+ struct hvfb_par *par = info->par;
+ int ret;
+
+ console_lock();
+
+ ret = synthvid_connect_vsp(hdev);
+ if (ret != 0)
+ goto out;
+
+ ret = synthvid_send_config(hdev);
+ if (ret != 0) {
+ vmbus_close(hdev->channel);
+ goto out;
+ }
+
+ par->fb_ready = true;
+ par->update = par->update_saved;
+
+ schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
+
+ /* 0 means do resume */
+ fb_set_suspend(info, 0);
+
+out:
+ console_unlock();
+
+ return ret;
+}
+
static const struct pci_device_id pci_stub_id_table[] = {
{
@@ -901,6 +1261,8 @@ static struct hv_driver hvfb_drv = {
.id_table = id_table,
.probe = hvfb_probe,
.remove = hvfb_remove,
+ .suspend = hvfb_suspend,
+ .resume = hvfb_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/video/fbdev/matrox/i2c-matroxfb.c b/drivers/video/fbdev/matrox/i2c-matroxfb.c
index 34e2659c3189..e2e4705e3fe0 100644
--- a/drivers/video/fbdev/matrox/i2c-matroxfb.c
+++ b/drivers/video/fbdev/matrox/i2c-matroxfb.c
@@ -191,8 +191,8 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
0x1b, I2C_CLIENT_END
};
- i2c_new_probed_device(&m2info->maven.adapter,
- &maven_info, addr_list, NULL);
+ i2c_new_scanned_device(&m2info->maven.adapter,
+ &maven_info, addr_list, NULL);
}
}
return m2info;
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index ae2bcfee338a..81ad3aa1ca06 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -968,19 +968,6 @@ static void sa1100fb_task(struct work_struct *w)
#ifdef CONFIG_CPU_FREQ
/*
- * Calculate the minimum DMA period over all displays that we own.
- * This, together with the SDRAM bandwidth defines the slowest CPU
- * frequency that can be selected.
- */
-static unsigned int sa1100fb_min_dma_period(struct sa1100fb_info *fbi)
-{
- /*
- * FIXME: we need to verify _all_ consoles.
- */
- return sa1100fb_display_dma_period(&fbi->fb.var);
-}
-
-/*
* CPU clock speed change handler. We need to adjust the LCD timing
* parameters when the CPU clock is adjusted by the power management
* subsystem.
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index b939bc28d886..9c82e2a0a411 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -1576,12 +1576,12 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
if (ptr[0] & 0x10)
frame->active_aspect = ptr[1] & 0xf;
if (ptr[0] & 0x8) {
- frame->top_bar = (ptr[5] << 8) + ptr[6];
- frame->bottom_bar = (ptr[7] << 8) + ptr[8];
+ frame->top_bar = (ptr[6] << 8) | ptr[5];
+ frame->bottom_bar = (ptr[8] << 8) | ptr[7];
}
if (ptr[0] & 0x4) {
- frame->left_bar = (ptr[9] << 8) + ptr[10];
- frame->right_bar = (ptr[11] << 8) + ptr[12];
+ frame->left_bar = (ptr[10] << 8) | ptr[9];
+ frame->right_bar = (ptr[12] << 8) | ptr[11];
}
frame->scan_mode = ptr[0] & 0x3;
diff --git a/drivers/video/logo/.gitignore b/drivers/video/logo/.gitignore
index e48355f538fa..9dda1b26b2e4 100644
--- a/drivers/video/logo/.gitignore
+++ b/drivers/video/logo/.gitignore
@@ -5,3 +5,4 @@
*_vga16.c
*_clut224.c
*_gray256.c
+pnmtologo
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index 16f60c1e1766..bcda657493a4 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -18,24 +18,19 @@ obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
# How to generate logo's
-pnmtologo := scripts/pnmtologo
+hostprogs-y := pnmtologo
# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
quiet_cmd_logo = LOGO $@
- cmd_logo = $(pnmtologo) \
- -t $(patsubst $*_%,%,$(notdir $(basename $<))) \
- -n $(notdir $(basename $<)) -o $@ $<
+ cmd_logo = $(obj)/pnmtologo -t $(lastword $(subst _, ,$*)) -n $* -o $@ $<
-$(obj)/%_mono.c: $(src)/%_mono.pbm $(pnmtologo) FORCE
+$(obj)/%.c: $(src)/%.pbm $(obj)/pnmtologo FORCE
$(call if_changed,logo)
-$(obj)/%_vga16.c: $(src)/%_vga16.ppm $(pnmtologo) FORCE
+$(obj)/%.c: $(src)/%.ppm $(obj)/pnmtologo FORCE
$(call if_changed,logo)
-$(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
- $(call if_changed,logo)
-
-$(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
+$(obj)/%.c: $(src)/%.pgm $(obj)/pnmtologo FORCE
$(call if_changed,logo)
# generated C files
diff --git a/drivers/video/logo/pnmtologo.c b/drivers/video/logo/pnmtologo.c
new file mode 100644
index 000000000000..4718d7895f0b
--- /dev/null
+++ b/drivers/video/logo/pnmtologo.c
@@ -0,0 +1,514 @@
+
+/*
+ * Convert a logo in ASCII PNM format to C source suitable for inclusion in
+ * the Linux kernel
+ *
+ * (C) Copyright 2001-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * --------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+
+static const char *programname;
+static const char *filename;
+static const char *logoname = "linux_logo";
+static const char *outputname;
+static FILE *out;
+
+
+#define LINUX_LOGO_MONO 1 /* monochrome black/white */
+#define LINUX_LOGO_VGA16 2 /* 16 colors VGA text palette */
+#define LINUX_LOGO_CLUT224 3 /* 224 colors */
+#define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */
+
+static const char *logo_types[LINUX_LOGO_GRAY256+1] = {
+ [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO",
+ [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16",
+ [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224",
+ [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256"
+};
+
+#define MAX_LINUX_LOGO_COLORS 224
+
+struct color {
+ unsigned char red;
+ unsigned char green;
+ unsigned char blue;
+};
+
+static const struct color clut_vga16[16] = {
+ { 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0xaa },
+ { 0x00, 0xaa, 0x00 },
+ { 0x00, 0xaa, 0xaa },
+ { 0xaa, 0x00, 0x00 },
+ { 0xaa, 0x00, 0xaa },
+ { 0xaa, 0x55, 0x00 },
+ { 0xaa, 0xaa, 0xaa },
+ { 0x55, 0x55, 0x55 },
+ { 0x55, 0x55, 0xff },
+ { 0x55, 0xff, 0x55 },
+ { 0x55, 0xff, 0xff },
+ { 0xff, 0x55, 0x55 },
+ { 0xff, 0x55, 0xff },
+ { 0xff, 0xff, 0x55 },
+ { 0xff, 0xff, 0xff },
+};
+
+
+static int logo_type = LINUX_LOGO_CLUT224;
+static unsigned int logo_width;
+static unsigned int logo_height;
+static struct color **logo_data;
+static struct color logo_clut[MAX_LINUX_LOGO_COLORS];
+static unsigned int logo_clutsize;
+static int is_plain_pbm = 0;
+
+static void die(const char *fmt, ...)
+ __attribute__ ((noreturn)) __attribute ((format (printf, 1, 2)));
+static void usage(void) __attribute ((noreturn));
+
+
+static unsigned int get_number(FILE *fp)
+{
+ int c, val;
+
+ /* Skip leading whitespace */
+ do {
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
+ if (c == '#') {
+ /* Ignore comments 'till end of line */
+ do {
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
+ } while (c != '\n');
+ }
+ } while (isspace(c));
+
+ /* Parse decimal number */
+ val = 0;
+ while (isdigit(c)) {
+ val = 10*val+c-'0';
+ /* some PBM are 'broken'; GiMP for example exports a PBM without space
+ * between the digits. This is Ok cause we know a PBM can only have a '1'
+ * or a '0' for the digit. */
+ if (is_plain_pbm)
+ break;
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
+ }
+ return val;
+}
+
+static unsigned int get_number255(FILE *fp, unsigned int maxval)
+{
+ unsigned int val = get_number(fp);
+ return (255*val+maxval/2)/maxval;
+}
+
+static void read_image(void)
+{
+ FILE *fp;
+ unsigned int i, j;
+ int magic;
+ unsigned int maxval;
+
+ /* open image file */
+ fp = fopen(filename, "r");
+ if (!fp)
+ die("Cannot open file %s: %s\n", filename, strerror(errno));
+
+ /* check file type and read file header */
+ magic = fgetc(fp);
+ if (magic != 'P')
+ die("%s is not a PNM file\n", filename);
+ magic = fgetc(fp);
+ switch (magic) {
+ case '1':
+ case '2':
+ case '3':
+ /* Plain PBM/PGM/PPM */
+ break;
+
+ case '4':
+ case '5':
+ case '6':
+ /* Binary PBM/PGM/PPM */
+ die("%s: Binary PNM is not supported\n"
+ "Use pnmnoraw(1) to convert it to ASCII PNM\n", filename);
+
+ default:
+ die("%s is not a PNM file\n", filename);
+ }
+ logo_width = get_number(fp);
+ logo_height = get_number(fp);
+
+ /* allocate image data */
+ logo_data = (struct color **)malloc(logo_height*sizeof(struct color *));
+ if (!logo_data)
+ die("%s\n", strerror(errno));
+ for (i = 0; i < logo_height; i++) {
+ logo_data[i] = malloc(logo_width*sizeof(struct color));
+ if (!logo_data[i])
+ die("%s\n", strerror(errno));
+ }
+
+ /* read image data */
+ switch (magic) {
+ case '1':
+ /* Plain PBM */
+ is_plain_pbm = 1;
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ logo_data[i][j].red = logo_data[i][j].green =
+ logo_data[i][j].blue = 255*(1-get_number(fp));
+ break;
+
+ case '2':
+ /* Plain PGM */
+ maxval = get_number(fp);
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ logo_data[i][j].red = logo_data[i][j].green =
+ logo_data[i][j].blue = get_number255(fp, maxval);
+ break;
+
+ case '3':
+ /* Plain PPM */
+ maxval = get_number(fp);
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ logo_data[i][j].red = get_number255(fp, maxval);
+ logo_data[i][j].green = get_number255(fp, maxval);
+ logo_data[i][j].blue = get_number255(fp, maxval);
+ }
+ break;
+ }
+
+ /* close file */
+ fclose(fp);
+}
+
+static inline int is_black(struct color c)
+{
+ return c.red == 0 && c.green == 0 && c.blue == 0;
+}
+
+static inline int is_white(struct color c)
+{
+ return c.red == 255 && c.green == 255 && c.blue == 255;
+}
+
+static inline int is_gray(struct color c)
+{
+ return c.red == c.green && c.red == c.blue;
+}
+
+static inline int is_equal(struct color c1, struct color c2)
+{
+ return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue;
+}
+
+static void write_header(void)
+{
+ /* open logo file */
+ if (outputname) {
+ out = fopen(outputname, "w");
+ if (!out)
+ die("Cannot create file %s: %s\n", outputname, strerror(errno));
+ } else {
+ out = stdout;
+ }
+
+ fputs("/*\n", out);
+ fputs(" * DO NOT EDIT THIS FILE!\n", out);
+ fputs(" *\n", out);
+ fprintf(out, " * It was automatically generated from %s\n", filename);
+ fputs(" *\n", out);
+ fprintf(out, " * Linux logo %s\n", logoname);
+ fputs(" */\n\n", out);
+ fputs("#include <linux/linux_logo.h>\n\n", out);
+ fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
+ logoname);
+}
+
+static void write_footer(void)
+{
+ fputs("\n};\n\n", out);
+ fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
+ fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+ fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+ fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+ if (logo_type == LINUX_LOGO_CLUT224) {
+ fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
+ fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
+ }
+ fprintf(out, "\t.data\t\t= %s_data\n", logoname);
+ fputs("};\n\n", out);
+
+ /* close logo file */
+ if (outputname)
+ fclose(out);
+}
+
+static int write_hex_cnt;
+
+static void write_hex(unsigned char byte)
+{
+ if (write_hex_cnt % 12)
+ fprintf(out, ", 0x%02x", byte);
+ else if (write_hex_cnt)
+ fprintf(out, ",\n\t0x%02x", byte);
+ else
+ fprintf(out, "\t0x%02x", byte);
+ write_hex_cnt++;
+}
+
+static void write_logo_mono(void)
+{
+ unsigned int i, j;
+ unsigned char val, bit;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j]))
+ die("Image must be monochrome\n");
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++) {
+ for (j = 0; j < logo_width;) {
+ for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1)
+ if (logo_data[i][j].red)
+ val |= bit;
+ write_hex(val);
+ }
+ }
+
+ /* write logo structure and file footer */
+ write_footer();
+}
+
+static void write_logo_vga16(void)
+{
+ unsigned int i, j, k;
+ unsigned char val;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ if (k == 16)
+ die("Image must use the 16 console colors only\n"
+ "Use ppmquant(1) -map clut_vga16.ppm to reduce the number "
+ "of colors\n");
+ }
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ val = k<<4;
+ if (++j < logo_width) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ val |= k;
+ }
+ write_hex(val);
+ }
+
+ /* write logo structure and file footer */
+ write_footer();
+}
+
+static void write_logo_clut224(void)
+{
+ unsigned int i, j, k;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < logo_clutsize; k++)
+ if (is_equal(logo_data[i][j], logo_clut[k]))
+ break;
+ if (k == logo_clutsize) {
+ if (logo_clutsize == MAX_LINUX_LOGO_COLORS)
+ die("Image has more than %d colors\n"
+ "Use ppmquant(1) to reduce the number of colors\n",
+ MAX_LINUX_LOGO_COLORS);
+ logo_clut[logo_clutsize++] = logo_data[i][j];
+ }
+ }
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < logo_clutsize; k++)
+ if (is_equal(logo_data[i][j], logo_clut[k]))
+ break;
+ write_hex(k+32);
+ }
+ fputs("\n};\n\n", out);
+
+ /* write logo clut */
+ fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
+ logoname);
+ write_hex_cnt = 0;
+ for (i = 0; i < logo_clutsize; i++) {
+ write_hex(logo_clut[i].red);
+ write_hex(logo_clut[i].green);
+ write_hex(logo_clut[i].blue);
+ }
+
+ /* write logo structure and file footer */
+ write_footer();
+}
+
+static void write_logo_gray256(void)
+{
+ unsigned int i, j;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ if (!is_gray(logo_data[i][j]))
+ die("Image must be grayscale\n");
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ write_hex(logo_data[i][j].red);
+
+ /* write logo structure and file footer */
+ write_footer();
+}
+
+static void die(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+
+ exit(1);
+}
+
+static void usage(void)
+{
+ die("\n"
+ "Usage: %s [options] <filename>\n"
+ "\n"
+ "Valid options:\n"
+ " -h : display this usage information\n"
+ " -n <name> : specify logo name (default: linux_logo)\n"
+ " -o <output> : output to file <output> instead of stdout\n"
+ " -t <type> : specify logo type, one of\n"
+ " mono : monochrome black/white\n"
+ " vga16 : 16 colors VGA text palette\n"
+ " clut224 : 224 colors (default)\n"
+ " gray256 : 256 levels grayscale\n"
+ "\n", programname);
+}
+
+int main(int argc, char *argv[])
+{
+ int opt;
+
+ programname = argv[0];
+
+ opterr = 0;
+ while (1) {
+ opt = getopt(argc, argv, "hn:o:t:");
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'h':
+ usage();
+ break;
+
+ case 'n':
+ logoname = optarg;
+ break;
+
+ case 'o':
+ outputname = optarg;
+ break;
+
+ case 't':
+ if (!strcmp(optarg, "mono"))
+ logo_type = LINUX_LOGO_MONO;
+ else if (!strcmp(optarg, "vga16"))
+ logo_type = LINUX_LOGO_VGA16;
+ else if (!strcmp(optarg, "clut224"))
+ logo_type = LINUX_LOGO_CLUT224;
+ else if (!strcmp(optarg, "gray256"))
+ logo_type = LINUX_LOGO_GRAY256;
+ else
+ usage();
+ break;
+
+ default:
+ usage();
+ break;
+ }
+ }
+ if (optind != argc-1)
+ usage();
+
+ filename = argv[optind];
+
+ read_image();
+ switch (logo_type) {
+ case LINUX_LOGO_MONO:
+ write_logo_mono();
+ break;
+
+ case LINUX_LOGO_VGA16:
+ write_logo_vga16();
+ break;
+
+ case LINUX_LOGO_CLUT224:
+ write_logo_clut224();
+ break;
+
+ case LINUX_LOGO_GRAY256:
+ write_logo_gray256();
+ break;
+ }
+ exit(0);
+}
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 93d5bebf9572..1b0b11b55d2a 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -706,7 +706,7 @@ static const struct file_operations fsl_hv_fops = {
.poll = fsl_hv_poll,
.read = fsl_hv_read,
.unlocked_ioctl = fsl_hv_ioctl,
- .compat_ioctl = fsl_hv_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice fsl_hv_misc_dev = {
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
index 1b2d96b945be..e8c7fa68d3cc 100644
--- a/drivers/w1/masters/sgi_w1.c
+++ b/drivers/w1/masters/sgi_w1.c
@@ -77,15 +77,13 @@ static int sgi_w1_probe(struct platform_device *pdev)
{
struct sgi_w1_device *sdev;
struct sgi_w1_platform_data *pdata;
- struct resource *res;
sdev = devm_kzalloc(&pdev->dev, sizeof(struct sgi_w1_device),
GFP_KERNEL);
if (!sdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->mcr = devm_ioremap_resource(&pdev->dev, res);
+ sdev->mcr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->mcr))
return PTR_ERR(sdev->mcr);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index b7847636501d..687753889c34 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -74,6 +74,14 @@ config W1_SLAVE_DS2805
organized as 7 pages of 16 bytes each with 64bit
unique number. Requires OverDrive Speed to talk to.
+config W1_SLAVE_DS2430
+ tristate "256b EEPROM family support (DS2430)"
+ help
+ Say Y here if you want to use a 1-wire 256bit EEPROM
+ family device (DS2430).
+ This EEPROM is organized as one page of 32 bytes for random
+ access.
+
config W1_SLAVE_DS2431
tristate "1kb EEPROM family support (DS2431)"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 8e9655eaa478..278bcf2a9bfd 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
+obj-$(CONFIG_W1_SLAVE_DS2430) += w1_ds2430.o
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2430.c b/drivers/w1/slaves/w1_ds2430.c
new file mode 100644
index 000000000000..6fb0563fb2ae
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2430.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * w1_ds2430.c - w1 family 14 (DS2430) driver
+ **
+ * Copyright (c) 2019 Angelo Dureghello <angelo.dureghello@timesys.com>
+ *
+ * Cloned and modified from ds2431
+ * Copyright (c) 2008 Bernhard Weirich <bernhard.weirich@riedel.net>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/w1.h>
+
+#define W1_EEPROM_DS2430 0x14
+
+#define W1_F14_EEPROM_SIZE 32
+#define W1_F14_PAGE_COUNT 1
+#define W1_F14_PAGE_BITS 5
+#define W1_F14_PAGE_SIZE (1 << W1_F14_PAGE_BITS)
+#define W1_F14_PAGE_MASK 0x1F
+
+#define W1_F14_SCRATCH_BITS 5
+#define W1_F14_SCRATCH_SIZE (1 << W1_F14_SCRATCH_BITS)
+#define W1_F14_SCRATCH_MASK (W1_F14_SCRATCH_SIZE-1)
+
+#define W1_F14_READ_EEPROM 0xF0
+#define W1_F14_WRITE_SCRATCH 0x0F
+#define W1_F14_READ_SCRATCH 0xAA
+#define W1_F14_COPY_SCRATCH 0x55
+#define W1_F14_VALIDATION_KEY 0xa5
+
+#define W1_F14_TPROG_MS 11
+#define W1_F14_READ_RETRIES 10
+#define W1_F14_READ_MAXLEN W1_F14_SCRATCH_SIZE
+
+/*
+ * Check the file size bounds and adjusts count as needed.
+ * This would not be needed if the file size didn't reset to 0 after a write.
+ */
+static inline size_t w1_f14_fix_count(loff_t off, size_t count, size_t size)
+{
+ if (off > size)
+ return 0;
+
+ if ((off + count) > size)
+ return size - off;
+
+ return count;
+}
+
+/*
+ * Read a block from W1 ROM two times and compares the results.
+ * If they are equal they are returned, otherwise the read
+ * is repeated W1_F14_READ_RETRIES times.
+ *
+ * count must not exceed W1_F14_READ_MAXLEN.
+ */
+static int w1_f14_readblock(struct w1_slave *sl, int off, int count, char *buf)
+{
+ u8 wrbuf[2];
+ u8 cmp[W1_F14_READ_MAXLEN];
+ int tries = W1_F14_READ_RETRIES;
+
+ do {
+ wrbuf[0] = W1_F14_READ_EEPROM;
+ wrbuf[1] = off & 0xff;
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_read_block(sl->master, buf, count);
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_read_block(sl->master, cmp, count);
+
+ if (!memcmp(cmp, buf, count))
+ return 0;
+ } while (--tries);
+
+ dev_err(&sl->dev, "proof reading failed %d times\n",
+ W1_F14_READ_RETRIES);
+
+ return -1;
+}
+
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int todo = count;
+
+ count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* read directly from the EEPROM in chunks of W1_F14_READ_MAXLEN */
+ while (todo > 0) {
+ int block_read;
+
+ if (todo >= W1_F14_READ_MAXLEN)
+ block_read = W1_F14_READ_MAXLEN;
+ else
+ block_read = todo;
+
+ if (w1_f14_readblock(sl, off, block_read, buf) < 0)
+ count = -EIO;
+
+ todo -= W1_F14_READ_MAXLEN;
+ buf += W1_F14_READ_MAXLEN;
+ off += W1_F14_READ_MAXLEN;
+ }
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return count;
+}
+
+/*
+ * Writes to the scratchpad and reads it back for verification.
+ * Then copies the scratchpad to EEPROM.
+ * The data must be aligned at W1_F14_SCRATCH_SIZE bytes and
+ * must be W1_F14_SCRATCH_SIZE bytes long.
+ * The master must be locked.
+ *
+ * @param sl The slave structure
+ * @param addr Address for the write
+ * @param len length must be <= (W1_F14_PAGE_SIZE - (addr & W1_F14_PAGE_MASK))
+ * @param data The data to write
+ * @return 0=Success -1=failure
+ */
+static int w1_f14_write(struct w1_slave *sl, int addr, int len, const u8 *data)
+{
+ int tries = W1_F14_READ_RETRIES;
+ u8 wrbuf[2];
+ u8 rdbuf[W1_F14_SCRATCH_SIZE + 3];
+
+retry:
+
+ /* Write the data to the scratchpad */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ wrbuf[0] = W1_F14_WRITE_SCRATCH;
+ wrbuf[1] = addr & 0xff;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_write_block(sl->master, data, len);
+
+ /* Read the scratchpad and verify */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_8(sl->master, W1_F14_READ_SCRATCH);
+ w1_read_block(sl->master, rdbuf, len + 2);
+
+ /*
+ * Compare what was read against the data written
+ * Note: on read scratchpad, device returns 2 bulk 0xff bytes,
+ * to be discarded.
+ */
+ if ((memcmp(data, &rdbuf[2], len) != 0)) {
+
+ if (--tries)
+ goto retry;
+
+ dev_err(&sl->dev,
+ "could not write to eeprom, scratchpad compare failed %d times\n",
+ W1_F14_READ_RETRIES);
+
+ return -1;
+ }
+
+ /* Copy the scratchpad to EEPROM */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ wrbuf[0] = W1_F14_COPY_SCRATCH;
+ wrbuf[1] = W1_F14_VALIDATION_KEY;
+ w1_write_block(sl->master, wrbuf, 2);
+
+ /* Sleep for tprog ms to wait for the write to complete */
+ msleep(W1_F14_TPROG_MS);
+
+ /* Reset the bus to wake up the EEPROM */
+ w1_reset_bus(sl->master);
+
+ return 0;
+}
+
+static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int addr, len;
+ int copy;
+
+ count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Can only write data in blocks of the size of the scratchpad */
+ addr = off;
+ len = count;
+ while (len > 0) {
+
+ /* if len too short or addr not aligned */
+ if (len < W1_F14_SCRATCH_SIZE || addr & W1_F14_SCRATCH_MASK) {
+ char tmp[W1_F14_SCRATCH_SIZE];
+
+ /* read the block and update the parts to be written */
+ if (w1_f14_readblock(sl, addr & ~W1_F14_SCRATCH_MASK,
+ W1_F14_SCRATCH_SIZE, tmp)) {
+ count = -EIO;
+ goto out_up;
+ }
+
+ /* copy at most to the boundary of the PAGE or len */
+ copy = W1_F14_SCRATCH_SIZE -
+ (addr & W1_F14_SCRATCH_MASK);
+
+ if (copy > len)
+ copy = len;
+
+ memcpy(&tmp[addr & W1_F14_SCRATCH_MASK], buf, copy);
+ if (w1_f14_write(sl, addr & ~W1_F14_SCRATCH_MASK,
+ W1_F14_SCRATCH_SIZE, tmp) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ } else {
+
+ copy = W1_F14_SCRATCH_SIZE;
+ if (w1_f14_write(sl, addr, copy, buf) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ }
+ buf += copy;
+ addr += copy;
+ len -= copy;
+ }
+
+out_up:
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return count;
+}
+
+static BIN_ATTR_RW(eeprom, W1_F14_EEPROM_SIZE);
+
+static struct bin_attribute *w1_f14_bin_attrs[] = {
+ &bin_attr_eeprom,
+ NULL,
+};
+
+static const struct attribute_group w1_f14_group = {
+ .bin_attrs = w1_f14_bin_attrs,
+};
+
+static const struct attribute_group *w1_f14_groups[] = {
+ &w1_f14_group,
+ NULL,
+};
+
+static struct w1_family_ops w1_f14_fops = {
+ .groups = w1_f14_groups,
+};
+
+static struct w1_family w1_family_14 = {
+ .fid = W1_EEPROM_DS2430,
+ .fops = &w1_f14_fops,
+};
+module_w1_family(w1_family_14);
+
+MODULE_AUTHOR("Angelo Dureghello <angelo.dureghello@timesys.com>");
+MODULE_DESCRIPTION("w1 family 14 driver for DS2430, 256kb EEPROM");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2430));
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 58e7c100b6ad..1679e0dc869b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1485,6 +1485,7 @@ config W83627HF_WDT
NCT6791
NCT6792
NCT6102D/04D/06D
+ NCT6116D
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
@@ -1641,8 +1642,10 @@ config INDYDOG
config JZ4740_WDT
tristate "Ingenic jz4740 SoC hardware watchdog"
- depends on MACH_JZ4740 || MACH_JZ4780
+ depends on MIPS
+ depends on COMMON_CLK
select WATCHDOG_CORE
+ select MFD_SYSCON
help
Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs.
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 848db958411e..bc6f333565d3 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -221,6 +221,7 @@ static const struct file_operations acq_fops = {
.llseek = no_llseek,
.write = acq_write,
.unlocked_ioctl = acq_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = acq_open,
.release = acq_close,
};
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index 0d02bb275b3d..0e4c18a2aa42 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -220,6 +220,7 @@ static const struct file_operations advwdt_fops = {
.llseek = no_llseek,
.write = advwdt_write,
.unlocked_ioctl = advwdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = advwdt_open,
.release = advwdt_close,
};
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index c157dd3d92a3..42338c7d4540 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -362,6 +362,7 @@ static const struct file_operations ali_fops = {
.llseek = no_llseek,
.write = ali_write,
.unlocked_ioctl = ali_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ali_open,
.release = ali_release,
};
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index c8e3ab056767..5af0358f4390 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -294,6 +294,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 668a1c704f28..c087027ffd5d 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -250,6 +250,7 @@ static const struct file_operations ar7_wdt_fops = {
.owner = THIS_MODULE,
.write = ar7_wdt_write,
.unlocked_ioctl = ar7_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ar7_wdt_open,
.release = ar7_wdt_release,
.llseek = no_llseek,
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 4ec0906bf12c..7e00960651fa 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -258,11 +258,6 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- /*
- * The ast2400 wdt can run at PCLK, or 1MHz. The ast2500 only
- * runs at 1MHz. We chose to always run at 1MHz, as there's no
- * good reason to have a faster watchdog counter.
- */
wdt->wdd.info = &aspeed_wdt_info;
wdt->wdd.ops = &aspeed_wdt_ops;
wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
@@ -278,7 +273,16 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
return -EINVAL;
config = ofdid->data;
- wdt->ctrl = WDT_CTRL_1MHZ_CLK;
+ /*
+ * On clock rates:
+ * - ast2400 wdt can run at PCLK, or 1MHz
+ * - ast2500 only runs at 1MHz, hard coding bit 4 to 1
+ * - ast2600 always runs at 1MHz
+ *
+ * Set the ast2400 to run at 1MHz as it simplifies the driver.
+ */
+ if (of_device_is_compatible(np, "aspeed,ast2400-wdt"))
+ wdt->ctrl = WDT_CTRL_1MHZ_CLK;
/*
* Control reset on a per-device basis to ensure the
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 907a4545dee6..6d751eb8191d 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -213,6 +213,7 @@ static const struct file_operations at91wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = at91_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = at91_wdt_open,
.release = at91_wdt_close,
.write = at91_wdt_write,
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index 390941c65eee..abfe34dd760a 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -4,33 +4,37 @@
*
* Copyright (C) 2007 Andrew Victor
* Copyright (C) 2007 Atmel Corporation.
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
*
* Watchdog Timer (WDT) - System peripherals regsters.
* Based on AT91SAM9261 datasheet revision D.
+ * Based on SAM9X60 datasheet.
*
*/
#ifndef AT91_WDT_H
#define AT91_WDT_H
+#include <linux/bits.h>
+
#define AT91_WDT_CR 0x00 /* Watchdog Control Register */
-#define AT91_WDT_WDRSTT (1 << 0) /* Restart */
-#define AT91_WDT_KEY (0xa5 << 24) /* KEY Password */
+#define AT91_WDT_WDRSTT BIT(0) /* Restart */
+#define AT91_WDT_KEY (0xa5UL << 24) /* KEY Password */
#define AT91_WDT_MR 0x04 /* Watchdog Mode Register */
-#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
-#define AT91_WDT_SET_WDV(x) ((x) & AT91_WDT_WDV)
-#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
-#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
-#define AT91_WDT_WDRPROC (1 << 14) /* Timer Restart */
-#define AT91_WDT_WDDIS (1 << 15) /* Watchdog Disable */
-#define AT91_WDT_WDD (0xfff << 16) /* Delta Value */
-#define AT91_WDT_SET_WDD(x) (((x) << 16) & AT91_WDT_WDD)
-#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
-#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
+#define AT91_WDT_WDV (0xfffUL << 0) /* Counter Value */
+#define AT91_WDT_SET_WDV(x) ((x) & AT91_WDT_WDV)
+#define AT91_WDT_WDFIEN BIT(12) /* Fault Interrupt Enable */
+#define AT91_WDT_WDRSTEN BIT(13) /* Reset Processor */
+#define AT91_WDT_WDRPROC BIT(14) /* Timer Restart */
+#define AT91_WDT_WDDIS BIT(15) /* Watchdog Disable */
+#define AT91_WDT_WDD (0xfffUL << 16) /* Delta Value */
+#define AT91_WDT_SET_WDD(x) (((x) << 16) & AT91_WDT_WDD)
+#define AT91_WDT_WDDBGHLT BIT(28) /* Debug Halt */
+#define AT91_WDT_WDIDLEHLT BIT(29) /* Idle Halt */
-#define AT91_WDT_SR 0x08 /* Watchdog Status Register */
-#define AT91_WDT_WDUNF (1 << 0) /* Watchdog Underflow */
-#define AT91_WDT_WDERR (1 << 1) /* Watchdog Error */
+#define AT91_WDT_SR 0x08 /* Watchdog Status Register */
+#define AT91_WDT_WDUNF BIT(0) /* Watchdog Underflow */
+#define AT91_WDT_WDERR BIT(1) /* Watchdog Error */
#endif
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 75de664ef4b0..d6dff97c280b 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -234,6 +234,7 @@ static const struct file_operations ath79_wdt_fops = {
.llseek = no_llseek,
.write = ath79_wdt_write,
.unlocked_ioctl = ath79_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ath79_wdt_open,
.release = ath79_wdt_release,
};
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index e2af37c9a266..8a043b52aa2f 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -221,6 +221,7 @@ static const struct file_operations bcm63xx_wdt_fops = {
.llseek = no_llseek,
.write = bcm63xx_wdt_write,
.unlocked_ioctl = bcm63xx_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = bcm63xx_wdt_open,
.release = bcm63xx_wdt_release,
};
diff --git a/drivers/watchdog/bd70528_wdt.c b/drivers/watchdog/bd70528_wdt.c
index bc60e036627a..0170b37e6674 100644
--- a/drivers/watchdog/bd70528_wdt.c
+++ b/drivers/watchdog/bd70528_wdt.c
@@ -97,7 +97,7 @@ EXPORT_SYMBOL(bd70528_wdt_set);
/**
* bd70528_wdt_lock - take WDT lock
*
- * @bd70528: device data for the PMIC instance we want to operate on
+ * @data: device data for the PMIC instance we want to operate on
*
* Lock WDT for arming/disarming in order to avoid race condition caused
* by WDT state changes initiated by WDT and RTC drivers.
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(bd70528_wdt_lock);
/**
* bd70528_wdt_unlock - unlock WDT lock
*
- * @bd70528: device data for the PMIC instance we want to operate on
+ * @data: device data for the PMIC instance we want to operate on
*
* Unlock WDT lock which has previously been taken by call to
* bd70528_wdt_lock.
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index f8d4e91d0383..06bd4e1a5923 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -335,8 +335,10 @@ static int cdns_wdt_probe(struct platform_device *pdev)
wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(dev, "input clock not found\n");
- return PTR_ERR(wdt->clk);
+ ret = PTR_ERR(wdt->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "input clock not found\n");
+ return ret;
}
ret = clk_prepare_enable(wdt->clk);
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index d6d53014cb68..9867a3a936df 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -187,6 +187,7 @@ static const struct file_operations cpu5wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = cpu5wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = cpu5wdt_open,
.write = cpu5wdt_write,
.release = cpu5wdt_release,
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 3a83a48abcae..f5ffa7be066e 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -371,6 +371,7 @@ static const struct file_operations eurwdt_fops = {
.llseek = no_llseek,
.write = eurwdt_write,
.unlocked_ioctl = eurwdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = eurwdt_open,
.release = eurwdt_release,
};
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index e46104c2fd94..a3c44d75d80e 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -669,6 +669,7 @@ static const struct file_operations watchdog_fops = {
.release = watchdog_release,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice watchdog_miscdev = {
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 7d5f56994f09..f6541d1b65e3 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -248,6 +248,7 @@ static const struct file_operations gef_wdt_fops = {
.llseek = no_llseek,
.write = gef_wdt_write,
.unlocked_ioctl = gef_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = gef_wdt_open,
.release = gef_wdt_release,
};
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 8d105d98908e..9914a4283cb2 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -201,6 +201,7 @@ static const struct file_operations geodewdt_fops = {
.llseek = no_llseek,
.write = geodewdt_write,
.unlocked_ioctl = geodewdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = geodewdt_open,
.release = geodewdt_release,
};
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 92fd7f33bc4d..2b65ea9451d1 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -259,6 +259,7 @@ static const struct file_operations ibwdt_fops = {
.llseek = no_llseek,
.write = ibwdt_write,
.unlocked_ioctl = ibwdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ibwdt_open,
.release = ibwdt_close,
};
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 897f7eda9e6a..4a22fe152086 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -344,6 +344,7 @@ static const struct file_operations asr_fops = {
.llseek = no_llseek,
.write = asr_write,
.unlocked_ioctl = asr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = asr_open,
.release = asr_release,
};
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 8d019a961ccc..f8d58bf0bf66 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -72,7 +72,6 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
static unsigned timeout;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
@@ -247,13 +246,14 @@ static const struct regmap_config imx2_wdt_regmap_config = {
static int __init imx2_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct imx2_wdt_device *wdev;
struct watchdog_device *wdog;
void __iomem *base;
int ret;
u32 val;
- wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
+ wdev = devm_kzalloc(dev, sizeof(*wdev), GFP_KERNEL);
if (!wdev)
return -ENOMEM;
@@ -261,16 +261,16 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- wdev->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
+ wdev->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
&imx2_wdt_regmap_config);
if (IS_ERR(wdev->regmap)) {
- dev_err(&pdev->dev, "regmap init failed\n");
+ dev_err(dev, "regmap init failed\n");
return PTR_ERR(wdev->regmap);
}
- wdev->clk = devm_clk_get(&pdev->dev, NULL);
+ wdev->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdev->clk)) {
- dev_err(&pdev->dev, "can't get Watchdog clock\n");
+ dev_err(dev, "can't get Watchdog clock\n");
return PTR_ERR(wdev->clk);
}
@@ -280,12 +280,12 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdog->min_timeout = 1;
wdog->timeout = IMX2_WDT_DEFAULT_TIME;
wdog->max_hw_heartbeat_ms = IMX2_WDT_MAX_TIME * 1000;
- wdog->parent = &pdev->dev;
+ wdog->parent = dev;
ret = platform_get_irq(pdev, 0);
if (ret > 0)
- if (!devm_request_irq(&pdev->dev, ret, imx2_wdt_isr, 0,
- dev_name(&pdev->dev), wdog))
+ if (!devm_request_irq(dev, ret, imx2_wdt_isr, 0,
+ dev_name(dev), wdog))
wdog->info = &imx2_wdt_pretimeout_info;
ret = clk_prepare_enable(wdev->clk);
@@ -295,13 +295,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val);
wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
- wdev->ext_reset = of_property_read_bool(pdev->dev.of_node,
+ wdev->ext_reset = of_property_read_bool(dev->of_node,
"fsl,ext-reset-output");
platform_set_drvdata(pdev, wdog);
watchdog_set_drvdata(wdog, wdev);
watchdog_set_nowayout(wdog, nowayout);
watchdog_set_restart_priority(wdog, 128);
- watchdog_init_timeout(wdog, timeout, &pdev->dev);
+ watchdog_init_timeout(wdog, timeout, dev);
if (imx2_wdt_is_running(wdev)) {
imx2_wdt_set_timeout(wdog, wdog->timeout);
@@ -319,7 +319,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (ret)
goto disable_clk;
- dev_info(&pdev->dev, "timeout %d sec (nowayout=%d)\n",
+ dev_info(dev, "timeout %d sec (nowayout=%d)\n",
wdog->timeout, nowayout);
return 0;
@@ -359,9 +359,8 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
}
-#ifdef CONFIG_PM_SLEEP
/* Disable watchdog if it is active or non-active but still running */
-static int imx2_wdt_suspend(struct device *dev)
+static int __maybe_unused imx2_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -382,7 +381,7 @@ static int imx2_wdt_suspend(struct device *dev)
}
/* Enable watchdog and configure it if necessary */
-static int imx2_wdt_resume(struct device *dev)
+static int __maybe_unused imx2_wdt_resume(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -407,7 +406,6 @@ static int imx2_wdt_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
imx2_wdt_resume);
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 5ce51026989a..0a87c6f4bab2 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -17,6 +17,9 @@
#define WDOG_CS_CMD32EN BIT(13)
#define WDOG_CS_ULK BIT(11)
#define WDOG_CS_RCS BIT(10)
+#define LPO_CLK 0x1
+#define LPO_CLK_SHIFT 8
+#define WDOG_CS_CLK (LPO_CLK << LPO_CLK_SHIFT)
#define WDOG_CS_EN BIT(7)
#define WDOG_CS_UPDATE BIT(5)
@@ -41,24 +44,25 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct imx7ulp_wdt_device {
- struct notifier_block restart_handler;
struct watchdog_device wdd;
void __iomem *base;
struct clk *clk;
};
-static inline void imx7ulp_wdt_enable(void __iomem *base, bool enable)
+static void imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable)
{
- u32 val = readl(base + WDOG_CS);
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
- writel(UNLOCK, base + WDOG_CNT);
+ u32 val = readl(wdt->base + WDOG_CS);
+
+ writel(UNLOCK, wdt->base + WDOG_CNT);
if (enable)
- writel(val | WDOG_CS_EN, base + WDOG_CS);
+ writel(val | WDOG_CS_EN, wdt->base + WDOG_CS);
else
- writel(val & ~WDOG_CS_EN, base + WDOG_CS);
+ writel(val & ~WDOG_CS_EN, wdt->base + WDOG_CS);
}
-static inline bool imx7ulp_wdt_is_enabled(void __iomem *base)
+static bool imx7ulp_wdt_is_enabled(void __iomem *base)
{
u32 val = readl(base + WDOG_CS);
@@ -76,18 +80,15 @@ static int imx7ulp_wdt_ping(struct watchdog_device *wdog)
static int imx7ulp_wdt_start(struct watchdog_device *wdog)
{
- struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
- imx7ulp_wdt_enable(wdt->base, true);
+ imx7ulp_wdt_enable(wdog, true);
return 0;
}
static int imx7ulp_wdt_stop(struct watchdog_device *wdog)
{
- struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
-
- imx7ulp_wdt_enable(wdt->base, false);
+ imx7ulp_wdt_enable(wdog, false);
return 0;
}
@@ -106,12 +107,28 @@ static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
return 0;
}
+static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
+ unsigned long action, void *data)
+{
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+
+ imx7ulp_wdt_enable(wdt->base, true);
+ imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
+
+ /* wait for wdog to fire */
+ while (true)
+ ;
+
+ return NOTIFY_DONE;
+}
+
static const struct watchdog_ops imx7ulp_wdt_ops = {
.owner = THIS_MODULE,
.start = imx7ulp_wdt_start,
.stop = imx7ulp_wdt_stop,
.ping = imx7ulp_wdt_ping,
.set_timeout = imx7ulp_wdt_set_timeout,
+ .restart = imx7ulp_wdt_restart,
};
static const struct watchdog_info imx7ulp_wdt_info = {
@@ -120,7 +137,7 @@ static const struct watchdog_info imx7ulp_wdt_info = {
WDIOF_MAGICCLOSE,
};
-static inline void imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
+static void imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
{
u32 val;
@@ -131,7 +148,7 @@ static inline void imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
/* set an initial timeout value in TOVAL */
writel(timeout, base + WDOG_TOVAL);
/* enable 32bit command sequence and reconfigure */
- val = BIT(13) | BIT(8) | BIT(5);
+ val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE;
writel(val, base + WDOG_CS);
}
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 550358528084..9857bb74a723 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -152,6 +152,7 @@ static const struct file_operations indydog_fops = {
.llseek = no_llseek,
.write = indydog_write,
.unlocked_ioctl = indydog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = indydog_open,
.release = indydog_release,
};
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 2cdbd37c700c..470213abfd3d 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -134,6 +134,7 @@ static int mid_wdt_probe(struct platform_device *pdev)
wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT;
wdt_dev->parent = dev;
+ watchdog_set_nowayout(wdt_dev, WATCHDOG_NOWAYOUT);
watchdog_set_drvdata(wdt_dev, dev);
ret = devm_request_irq(dev, pdata->irq, mid_wdt_irq,
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 1c85103b750b..6ad5bf3451ec 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -412,6 +412,7 @@ static const struct file_operations intel_scu_fops = {
.llseek = no_llseek,
.write = intel_scu_write,
.unlocked_ioctl = intel_scu_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = intel_scu_open,
.release = intel_scu_release,
};
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index a9ccdb9a9159..6bf68d4750de 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -202,6 +202,7 @@ static const struct file_operations iop_wdt_fops = {
.llseek = no_llseek,
.write = iop_wdt_write,
.unlocked_ioctl = iop_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = iop_wdt_open,
.release = iop_wdt_release,
};
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 2fe1a3c499ed..2fed40d14007 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -345,6 +345,7 @@ static const struct file_operations it8712f_wdt_fops = {
.llseek = no_llseek,
.write = it8712f_wdt_write,
.unlocked_ioctl = it8712f_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = it8712f_wdt_open,
.release = it8712f_wdt_release,
};
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 9067998759e3..09886616fd21 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -163,6 +163,7 @@ static const struct file_operations ixp4xx_wdt_fops = {
.llseek = no_llseek,
.write = ixp4xx_wdt_write,
.unlocked_ioctl = ixp4xx_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ixp4xx_wdt_open,
.release = ixp4xx_wdt_release,
};
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index c6052ae54f32..bdf9564efa29 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -5,6 +5,7 @@
*/
#include <linux/mfd/ingenic-tcu.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -17,19 +18,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
-
-#include <asm/mach-jz4740/timer.h>
-
-#define JZ_WDT_CLOCK_PCLK 0x1
-#define JZ_WDT_CLOCK_RTC 0x2
-#define JZ_WDT_CLOCK_EXT 0x4
-
-#define JZ_WDT_CLOCK_DIV_1 (0 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_4 (1 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_16 (2 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_64 (3 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_256 (4 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_1024 (5 << TCU_TCSR_PRESCALE_LSB)
+#include <linux/regmap.h>
#define DEFAULT_HEARTBEAT 5
#define MAX_HEARTBEAT 2048
@@ -49,15 +38,17 @@ MODULE_PARM_DESC(heartbeat,
struct jz4740_wdt_drvdata {
struct watchdog_device wdt;
- void __iomem *base;
- struct clk *rtc_clk;
+ struct regmap *map;
+ struct clk *clk;
+ unsigned long clk_rate;
};
static int jz4740_wdt_ping(struct watchdog_device *wdt_dev)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writew(0x0, drvdata->base + TCU_REG_WDT_TCNT);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCNT, 0);
+
return 0;
}
@@ -65,35 +56,17 @@ static int jz4740_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int new_timeout)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- unsigned int rtc_clk_rate;
- unsigned int timeout_value;
- unsigned short clock_div = JZ_WDT_CLOCK_DIV_1;
- u8 tcer;
-
- rtc_clk_rate = clk_get_rate(drvdata->rtc_clk);
-
- timeout_value = rtc_clk_rate * new_timeout;
- while (timeout_value > 0xffff) {
- if (clock_div == JZ_WDT_CLOCK_DIV_1024) {
- /* Requested timeout too high;
- * use highest possible value. */
- timeout_value = 0xffff;
- break;
- }
- timeout_value >>= 2;
- clock_div += (1 << TCU_TCSR_PRESCALE_LSB);
- }
+ u16 timeout_value = (u16)(drvdata->clk_rate * new_timeout);
+ unsigned int tcer;
- tcer = readb(drvdata->base + TCU_REG_WDT_TCER);
- writeb(0x0, drvdata->base + TCU_REG_WDT_TCER);
- writew(clock_div, drvdata->base + TCU_REG_WDT_TCSR);
+ regmap_read(drvdata->map, TCU_REG_WDT_TCER, &tcer);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCER, 0);
- writew((u16)timeout_value, drvdata->base + TCU_REG_WDT_TDR);
- writew(0x0, drvdata->base + TCU_REG_WDT_TCNT);
- writew(clock_div | JZ_WDT_CLOCK_RTC, drvdata->base + TCU_REG_WDT_TCSR);
+ regmap_write(drvdata->map, TCU_REG_WDT_TDR, timeout_value);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCNT, 0);
if (tcer & TCU_WDT_TCER_TCEN)
- writeb(TCU_WDT_TCER_TCEN, drvdata->base + TCU_REG_WDT_TCER);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCER, TCU_WDT_TCER_TCEN);
wdt_dev->timeout = new_timeout;
return 0;
@@ -102,16 +75,20 @@ static int jz4740_wdt_set_timeout(struct watchdog_device *wdt_dev,
static int jz4740_wdt_start(struct watchdog_device *wdt_dev)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- u8 tcer;
+ unsigned int tcer;
+ int ret;
- tcer = readb(drvdata->base + TCU_REG_WDT_TCER);
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ regmap_read(drvdata->map, TCU_REG_WDT_TCER, &tcer);
- jz4740_timer_enable_watchdog();
jz4740_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
/* Start watchdog if it wasn't started already */
if (!(tcer & TCU_WDT_TCER_TCEN))
- writeb(TCU_WDT_TCER_TCEN, drvdata->base + TCU_REG_WDT_TCER);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCER, TCU_WDT_TCER_TCEN);
return 0;
}
@@ -120,8 +97,8 @@ static int jz4740_wdt_stop(struct watchdog_device *wdt_dev)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writeb(0x0, drvdata->base + TCU_REG_WDT_TCER);
- jz4740_timer_disable_watchdog();
+ regmap_write(drvdata->map, TCU_REG_WDT_TCER, 0);
+ clk_disable_unprepare(drvdata->clk);
return 0;
}
@@ -162,33 +139,46 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct jz4740_wdt_drvdata *drvdata;
struct watchdog_device *jz4740_wdt;
+ long rate;
+ int ret;
drvdata = devm_kzalloc(dev, sizeof(struct jz4740_wdt_drvdata),
GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
- heartbeat = DEFAULT_HEARTBEAT;
+ drvdata->clk = devm_clk_get(&pdev->dev, "wdt");
+ if (IS_ERR(drvdata->clk)) {
+ dev_err(&pdev->dev, "cannot find WDT clock\n");
+ return PTR_ERR(drvdata->clk);
+ }
+
+ /* Set smallest clock possible */
+ rate = clk_round_rate(drvdata->clk, 1);
+ if (rate < 0)
+ return rate;
+ ret = clk_set_rate(drvdata->clk, rate);
+ if (ret)
+ return ret;
+
+ drvdata->clk_rate = rate;
jz4740_wdt = &drvdata->wdt;
jz4740_wdt->info = &jz4740_wdt_info;
jz4740_wdt->ops = &jz4740_wdt_ops;
- jz4740_wdt->timeout = heartbeat;
jz4740_wdt->min_timeout = 1;
- jz4740_wdt->max_timeout = MAX_HEARTBEAT;
+ jz4740_wdt->max_timeout = 0xffff / rate;
+ jz4740_wdt->timeout = clamp(heartbeat,
+ jz4740_wdt->min_timeout,
+ jz4740_wdt->max_timeout);
jz4740_wdt->parent = dev;
watchdog_set_nowayout(jz4740_wdt, nowayout);
watchdog_set_drvdata(jz4740_wdt, drvdata);
- drvdata->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(drvdata->base))
- return PTR_ERR(drvdata->base);
-
- drvdata->rtc_clk = devm_clk_get(dev, "rtc");
- if (IS_ERR(drvdata->rtc_clk)) {
- dev_err(dev, "cannot find RTC clock\n");
- return PTR_ERR(drvdata->rtc_clk);
+ drvdata->map = device_node_to_regmap(dev->parent->of_node);
+ if (!drvdata->map) {
+ dev_err(dev, "regmap not found\n");
+ return -EINVAL;
}
return devm_watchdog_register_device(dev, &drvdata->wdt);
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 752d03620f0a..22f335e1e164 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -183,6 +183,7 @@ static const struct file_operations m54xx_wdt_fops = {
.llseek = no_llseek,
.write = m54xx_wdt_write,
.unlocked_ioctl = m54xx_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = m54xx_wdt_open,
.release = m54xx_wdt_release,
};
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index cef2baf59dda..80ff94688487 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -361,6 +361,7 @@ static const struct file_operations zf_fops = {
.llseek = no_llseek,
.write = zf_write,
.unlocked_ioctl = zf_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = zf_open,
.release = zf_close,
};
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index ed18238c5407..8973f98bc6a5 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -168,3 +168,4 @@ module_mcb_driver(men_z069_driver);
MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z069");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index a86faa5000f1..d387bad377c4 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -227,6 +227,7 @@ static const struct file_operations mixcomwd_fops = {
.llseek = no_llseek,
.write = mixcomwd_write,
.unlocked_ioctl = mixcomwd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = mixcomwd_open,
.release = mixcomwd_release,
};
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 25a92857b217..8aa1cb4a295f 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -181,6 +181,7 @@ static const struct file_operations mtx1_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = mtx1_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = mtx1_wdt_open,
.write = mtx1_wdt_write,
.release = mtx1_wdt_release,
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
index 74bf7144a970..0bc72dd69b70 100644
--- a/drivers/watchdog/mv64x60_wdt.c
+++ b/drivers/watchdog/mv64x60_wdt.c
@@ -241,6 +241,7 @@ static const struct file_operations mv64x60_wdt_fops = {
.llseek = no_llseek,
.write = mv64x60_wdt_write,
.unlocked_ioctl = mv64x60_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = mv64x60_wdt_open,
.release = mv64x60_wdt_release,
};
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 5f0082e300bd..d7a560e348d5 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -267,6 +267,7 @@ static const struct file_operations nv_tco_fops = {
.llseek = no_llseek,
.write = nv_tco_write,
.unlocked_ioctl = nv_tco_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = nv_tco_open,
.release = nv_tco_release,
};
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 2af1a8b3f973..73fbfc99083b 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -473,6 +473,7 @@ static const struct file_operations pc87413_fops = {
.llseek = no_llseek,
.write = pc87413_write,
.unlocked_ioctl = pc87413_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pc87413_open,
.release = pc87413_release,
};
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index c3c93e00b320..7a0587fdc52c 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -752,6 +752,7 @@ static const struct file_operations pcwd_fops = {
.llseek = no_llseek,
.write = pcwd_write,
.unlocked_ioctl = pcwd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pcwd_open,
.release = pcwd_close,
};
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index e30c1f762045..81508a42a90c 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -646,6 +646,7 @@ static const struct file_operations pcipcwd_fops = {
.llseek = no_llseek,
.write = pcipcwd_write,
.unlocked_ioctl = pcipcwd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pcipcwd_open,
.release = pcipcwd_release,
};
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 6727f8ab2d18..2f44af1831d0 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -550,6 +550,7 @@ static const struct file_operations usb_pcwd_fops = {
.llseek = no_llseek,
.write = usb_pcwd_write,
.unlocked_ioctl = usb_pcwd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = usb_pcwd_open,
.release = usb_pcwd_release,
};
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index 205c3c68fca1..a98abd0d3146 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -214,6 +214,7 @@ static const struct file_operations pikawdt_fops = {
.release = pikawdt_release,
.write = pikawdt_write,
.unlocked_ioctl = pikawdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice pikawdt_miscdev = {
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
index aa53babf2bab..4097d076aab8 100644
--- a/drivers/watchdog/pnx833x_wdt.c
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -215,6 +215,7 @@ static const struct file_operations pnx833x_wdt_fops = {
.llseek = no_llseek,
.write = pnx833x_wdt_write,
.unlocked_ioctl = pnx833x_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pnx833x_wdt_open,
.release = pnx833x_wdt_release,
};
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index a8a4b3a41a90..1dfede0abf18 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -245,6 +245,7 @@ static const struct file_operations rc32434_wdt_fops = {
.llseek = no_llseek,
.write = rc32434_wdt_write,
.unlocked_ioctl = rc32434_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = rc32434_wdt_open,
.release = rc32434_wdt_release,
};
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 2e608ae6cbc7..57187efeb86f 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -199,6 +199,7 @@ static const struct file_operations rdc321x_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = rdc321x_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = rdc321x_wdt_open,
.write = rdc321x_wdt_write,
.release = rdc321x_wdt_release,
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index b35f7be20c00..dc3c06a92f93 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -163,6 +163,7 @@ static const struct file_operations riowd_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = riowd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = riowd_open,
.write = riowd_write,
.release = riowd_release,
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index cbd8c957182f..9b93be00109f 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -141,6 +141,7 @@ static const struct file_operations sa1100dog_fops = {
.llseek = no_llseek,
.write = sa1100dog_write,
.unlocked_ioctl = sa1100dog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = sa1100dog_open,
.release = sa1100dog_release,
};
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 202fc8d8ca5f..da2dad00d473 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -237,6 +237,7 @@ static const struct file_operations sbwdog_fops = {
.llseek = no_llseek,
.write = sbwdog_write,
.unlocked_ioctl = sbwdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = sbwdog_open,
.release = sbwdog_release,
};
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index c3151642694c..f2cbe6d880a8 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -280,6 +280,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index 12cdee7d5069..520b8dd77ed4 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -194,9 +194,8 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (wdt_set_timeout(new_timeout))
return -EINVAL;
-
- /* Fall through */
}
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, (int __user *)arg);
default:
@@ -211,6 +210,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 86828c28843f..5e3a9ddb952e 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -156,6 +156,7 @@ static const struct file_operations epx_c3_fops = {
.llseek = no_llseek,
.write = epx_c3_write,
.unlocked_ioctl = epx_c3_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = epx_c3_open,
.release = epx_c3_release,
};
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 3822a60a8d2b..1b20b33879c4 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -186,6 +186,7 @@ static const struct file_operations fitpc2_wdt_fops = {
.llseek = no_llseek,
.write = fitpc2_wdt_write,
.unlocked_ioctl = fitpc2_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = fitpc2_wdt_open,
.release = fitpc2_wdt_release,
};
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 960385a766b3..9673eb12dacd 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -307,6 +307,7 @@ static const struct file_operations sc1200wdt_fops = {
.llseek = no_llseek,
.write = sc1200wdt_write,
.unlocked_ioctl = sc1200wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = sc1200wdt_open,
.release = sc1200wdt_release,
};
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index a612128c5f80..fbe79bcc9297 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -336,6 +336,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 3612f1df381b..83949a385f62 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -337,6 +337,7 @@ static const struct file_operations sch311x_wdt_fops = {
.llseek = no_llseek,
.write = sch311x_wdt_write,
.unlocked_ioctl = sch311x_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = sch311x_wdt_open,
.release = sch311x_wdt_close,
};
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index 46268309ee9b..c94098acb78f 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -201,6 +201,7 @@ static const struct file_operations scx200_wdt_fops = {
.llseek = no_llseek,
.write = scx200_wdt_write,
.unlocked_ioctl = scx200_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = scx200_wdt_open,
.release = scx200_wdt_release,
};
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index f5713030d0f7..43de56acd767 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -505,6 +505,7 @@ static const struct file_operations wb_smsc_wdt_fops = {
.llseek = no_llseek,
.write = wb_smsc_wdt_write,
.unlocked_ioctl = wb_smsc_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wb_smsc_wdt_open,
.release = wb_smsc_wdt_release,
};
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index 0bb17b046140..65cb55f3916f 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -327,10 +327,9 @@ static int sprd_wdt_probe(struct platform_device *pdev)
static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
{
- struct watchdog_device *wdd = dev_get_drvdata(dev);
struct sprd_wdt *wdt = dev_get_drvdata(dev);
- if (watchdog_active(wdd))
+ if (watchdog_active(&wdt->wdd))
sprd_wdt_stop(&wdt->wdd);
sprd_wdt_disable(wdt);
@@ -339,7 +338,6 @@ static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
{
- struct watchdog_device *wdd = dev_get_drvdata(dev);
struct sprd_wdt *wdt = dev_get_drvdata(dev);
int ret;
@@ -347,7 +345,7 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
if (ret)
return ret;
- if (watchdog_active(wdd)) {
+ if (watchdog_active(&wdt->wdd)) {
ret = sprd_wdt_start(&wdt->wdd);
if (ret) {
sprd_wdt_disable(wdt);
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 38b31e9947aa..fdf533fe0bb2 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -49,7 +49,7 @@ static int wdt_cfg_leave = 0xAA;/* key to lock configuration space */
enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
- nct6795, nct6796, nct6102 };
+ nct6795, nct6796, nct6102, nct6116 };
static int timeout; /* in seconds */
module_param(timeout, int, 0);
@@ -94,6 +94,7 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
#define NCT6775_ID 0xb4
#define NCT6776_ID 0xc3
#define NCT6102_ID 0xc4
+#define NCT6116_ID 0xd2
#define NCT6779_ID 0xc5
#define NCT6791_ID 0xc8
#define NCT6792_ID 0xc9
@@ -211,6 +212,7 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
case nct6795:
case nct6796:
case nct6102:
+ case nct6116:
/*
* These chips have a fixed WDTO# output pin (W83627UHG),
* or support more than one WDTO# output pin.
@@ -417,6 +419,12 @@ static int wdt_find(int addr)
cr_wdt_control = NCT6102D_WDT_CONTROL;
cr_wdt_csr = NCT6102D_WDT_CSR;
break;
+ case NCT6116_ID:
+ ret = nct6102;
+ cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
+ cr_wdt_control = NCT6102D_WDT_CONTROL;
+ cr_wdt_csr = NCT6102D_WDT_CSR;
+ break;
case 0xff:
ret = -ENODEV;
break;
@@ -482,6 +490,7 @@ static int __init wdt_init(void)
"NCT6795",
"NCT6796",
"NCT6102",
+ "NCT6116",
};
/* Apply system-specific quirks */
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index 6eb5185d6ea6..6b3b667e6f23 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -304,6 +304,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 16e9cbe72acc..5212e68c6b01 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -446,6 +446,7 @@ static const struct file_operations wdt_fops = {
.llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdt_open,
.release = wdt_release,
};
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index 6d2071a0590d..a6925847f76f 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -230,6 +230,7 @@ static const struct file_operations wafwdt_fops = {
.llseek = no_llseek,
.write = wafwdt_write,
.unlocked_ioctl = wafwdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wafwdt_open,
.release = wafwdt_close,
};
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index dbd2ad4c9294..4b2a85438478 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -34,7 +34,6 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/hrtimer.h> /* For hrtimers */
#include <linux/kernel.h> /* For printk/panic/... */
-#include <linux/kref.h> /* For data references */
#include <linux/kthread.h> /* For kthread_work */
#include <linux/miscdevice.h> /* For handling misc devices */
#include <linux/module.h> /* For module stuff/... */
@@ -52,14 +51,14 @@
/*
* struct watchdog_core_data - watchdog core internal data
- * @kref: Reference count.
+ * @dev: The watchdog's internal device
* @cdev: The watchdog's Character device.
* @wdd: Pointer to watchdog device.
* @lock: Lock for watchdog core.
* @status: Watchdog core internal status bits.
*/
struct watchdog_core_data {
- struct kref kref;
+ struct device dev;
struct cdev cdev;
struct watchdog_device *wdd;
struct mutex lock;
@@ -158,7 +157,8 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd)
ktime_t t = watchdog_next_keepalive(wdd);
if (t > 0)
- hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL);
+ hrtimer_start(&wd_data->timer, t,
+ HRTIMER_MODE_REL_HARD);
} else {
hrtimer_cancel(&wd_data->timer);
}
@@ -177,7 +177,7 @@ static int __watchdog_ping(struct watchdog_device *wdd)
if (ktime_after(earliest_keepalive, now)) {
hrtimer_start(&wd_data->timer,
ktime_sub(earliest_keepalive, now),
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_HARD);
return 0;
}
@@ -452,7 +452,26 @@ static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
}
-static DEVICE_ATTR_RO(nowayout);
+
+static ssize_t nowayout_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
+ /* nowayout cannot be disabled once set */
+ if (test_bit(WDOG_NO_WAY_OUT, &wdd->status) && !value)
+ return -EPERM;
+ watchdog_set_nowayout(wdd, value);
+ return len;
+}
+static DEVICE_ATTR_RW(nowayout);
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -839,7 +858,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
file->private_data = wd_data;
if (!hw_running)
- kref_get(&wd_data->kref);
+ get_device(&wd_data->dev);
/*
* open_timeout only applies for the first open from
@@ -860,11 +879,11 @@ out_clear:
return err;
}
-static void watchdog_core_data_release(struct kref *kref)
+static void watchdog_core_data_release(struct device *dev)
{
struct watchdog_core_data *wd_data;
- wd_data = container_of(kref, struct watchdog_core_data, kref);
+ wd_data = container_of(dev, struct watchdog_core_data, dev);
kfree(wd_data);
}
@@ -924,7 +943,7 @@ done:
*/
if (!running) {
module_put(wd_data->cdev.owner);
- kref_put(&wd_data->kref, watchdog_core_data_release);
+ put_device(&wd_data->dev);
}
return 0;
}
@@ -933,6 +952,7 @@ static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = watchdog_open,
.release = watchdog_release,
};
@@ -943,17 +963,22 @@ static struct miscdevice watchdog_miscdev = {
.fops = &watchdog_fops,
};
+static struct class watchdog_class = {
+ .name = "watchdog",
+ .owner = THIS_MODULE,
+ .dev_groups = wdt_groups,
+};
+
/*
* watchdog_cdev_register: register watchdog character device
* @wdd: watchdog device
- * @devno: character device number
*
* Register a watchdog character device including handling the legacy
* /dev/watchdog node. /dev/watchdog is actually a miscdevice and
* thus we set it up like that.
*/
-static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
+static int watchdog_cdev_register(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data;
int err;
@@ -961,7 +986,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
if (!wd_data)
return -ENOMEM;
- kref_init(&wd_data->kref);
mutex_init(&wd_data->lock);
wd_data->wdd = wdd;
@@ -971,7 +995,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
return -ENODEV;
kthread_init_work(&wd_data->work, watchdog_ping_work);
- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
wd_data->timer.function = watchdog_timer_expired;
if (wdd->id == 0) {
@@ -990,23 +1014,33 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
}
}
+ device_initialize(&wd_data->dev);
+ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
+ wd_data->dev.class = &watchdog_class;
+ wd_data->dev.parent = wdd->parent;
+ wd_data->dev.groups = wdd->groups;
+ wd_data->dev.release = watchdog_core_data_release;
+ dev_set_drvdata(&wd_data->dev, wdd);
+ dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+
/* Fill in the data structures */
cdev_init(&wd_data->cdev, &watchdog_fops);
- wd_data->cdev.owner = wdd->ops->owner;
/* Add the device */
- err = cdev_add(&wd_data->cdev, devno, 1);
+ err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
if (err) {
pr_err("watchdog%d unable to add device %d:%d\n",
wdd->id, MAJOR(watchdog_devt), wdd->id);
if (wdd->id == 0) {
misc_deregister(&watchdog_miscdev);
old_wd_data = NULL;
- kref_put(&wd_data->kref, watchdog_core_data_release);
+ put_device(&wd_data->dev);
}
return err;
}
+ wd_data->cdev.owner = wdd->ops->owner;
+
/* Record time of most recent heartbeat as 'just before now'. */
wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
watchdog_set_open_deadline(wd_data);
@@ -1017,9 +1051,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
*/
if (watchdog_hw_running(wdd)) {
__module_get(wdd->ops->owner);
- kref_get(&wd_data->kref);
+ get_device(&wd_data->dev);
if (handle_boot_enabled)
- hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&wd_data->timer, 0,
+ HRTIMER_MODE_REL_HARD);
else
pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
wdd->id);
@@ -1040,7 +1075,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data = wdd->wd_data;
- cdev_del(&wd_data->cdev);
+ cdev_device_del(&wd_data->cdev, &wd_data->dev);
if (wdd->id == 0) {
misc_deregister(&watchdog_miscdev);
old_wd_data = NULL;
@@ -1059,15 +1094,9 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
hrtimer_cancel(&wd_data->timer);
kthread_cancel_work_sync(&wd_data->work);
- kref_put(&wd_data->kref, watchdog_core_data_release);
+ put_device(&wd_data->dev);
}
-static struct class watchdog_class = {
- .name = "watchdog",
- .owner = THIS_MODULE,
- .dev_groups = wdt_groups,
-};
-
static int watchdog_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *data)
{
@@ -1098,27 +1127,14 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,
int watchdog_dev_register(struct watchdog_device *wdd)
{
- struct device *dev;
- dev_t devno;
int ret;
- devno = MKDEV(MAJOR(watchdog_devt), wdd->id);
-
- ret = watchdog_cdev_register(wdd, devno);
+ ret = watchdog_cdev_register(wdd);
if (ret)
return ret;
- dev = device_create_with_groups(&watchdog_class, wdd->parent,
- devno, wdd, wdd->groups,
- "watchdog%d", wdd->id);
- if (IS_ERR(dev)) {
- watchdog_cdev_unregister(wdd);
- return PTR_ERR(dev);
- }
-
ret = watchdog_register_pretimeout(wdd);
if (ret) {
- device_destroy(&watchdog_class, devno);
watchdog_cdev_unregister(wdd);
return ret;
}
@@ -1126,7 +1142,8 @@ int watchdog_dev_register(struct watchdog_device *wdd)
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
- ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
+ ret = devm_register_reboot_notifier(&wdd->wd_data->dev,
+ &wdd->reboot_nb);
if (ret) {
pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
wdd->id, ret);
@@ -1148,7 +1165,6 @@ int watchdog_dev_register(struct watchdog_device *wdd)
void watchdog_dev_unregister(struct watchdog_device *wdd)
{
watchdog_unregister_pretimeout(wdd);
- device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
watchdog_cdev_unregister(wdd);
}
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e7cf41aa26c3..b069349b52f5 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -202,7 +202,7 @@ static int wdat_wdt_enable_reboot(struct wdat_wdt *wdat)
* WDAT specification says that the watchdog is required to reboot
* the system when it fires. However, it also states that it is
* recommeded to make it configurable through hardware register. We
- * enable reboot now if it is configrable, just in case.
+ * enable reboot now if it is configurable, just in case.
*/
ret = wdat_wdt_run_action(wdat, ACPI_WDAT_SET_REBOOT, 0, NULL);
if (ret && ret != -EOPNOTSUPP) {
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index 6ad7edb4a712..184a06a74f83 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -472,6 +472,7 @@ static const struct file_operations wdrtas_fops = {
.llseek = no_llseek,
.write = wdrtas_write,
.unlocked_ioctl = wdrtas_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdrtas_open,
.release = wdrtas_close,
};
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 7d278b37e083..f9054cb0f8e2 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -523,6 +523,7 @@ static const struct file_operations wdt_fops = {
.llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdt_open,
.release = wdt_release,
};
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index eb729d704836..e60993d0767e 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -181,6 +181,7 @@ static const struct file_operations watchdog_fops = {
.llseek = no_llseek,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = watchdog_open,
.release = watchdog_release,
};
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 5c52c73e1839..066a4fb4d75b 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -422,6 +422,7 @@ static const struct file_operations wdt977_fops = {
.llseek = no_llseek,
.write = wdt977_write,
.unlocked_ioctl = wdt977_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdt977_open,
.release = wdt977_release,
};
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 66303ab95685..e528024faa41 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -566,6 +566,7 @@ static const struct file_operations wdtpci_fops = {
.llseek = no_llseek,
.write = wdtpci_write,
.unlocked_ioctl = wdtpci_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdtpci_open,
.release = wdtpci_release,
};
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 5bae515c8e25..4f2e78a5e4db 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -374,7 +374,6 @@ static void xen_online_page(struct page *page, unsigned int order)
mutex_lock(&balloon_mutex);
for (i = 0; i < size; i++) {
p = pfn_to_page(start_pfn + i);
- __online_page_set_limits(p);
balloon_append(p);
}
mutex_unlock(&balloon_mutex);
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 2f8b949c3eeb..91e44c04f787 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -21,15 +21,8 @@ struct gntdev_dmabuf_priv;
struct gntdev_priv {
/* Maps with visible offsets in the file descriptor. */
struct list_head maps;
- /*
- * Maps that are not visible; will be freed on munmap.
- * Only populated if populate_freeable_maps == 1
- */
- struct list_head freeable_maps;
/* lock protects maps and freeable_maps. */
struct mutex lock;
- struct mm_struct *mm;
- struct mmu_notifier mn;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
/* Device for which DMA memory is allocated. */
@@ -49,6 +42,7 @@ struct gntdev_unmap_notify {
};
struct gntdev_grant_map {
+ struct mmu_interval_notifier notifier;
struct list_head next;
struct vm_area_struct *vma;
int index;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 81401f386c9c..a04ddf2a68af 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -63,7 +63,6 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
static atomic_t pages_mapped = ATOMIC_INIT(0);
static int use_ptemod;
-#define populate_freeable_maps use_ptemod
static int unmap_grant_pages(struct gntdev_grant_map *map,
int offset, int pages);
@@ -249,12 +248,6 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
evtchn_put(map->notify.event);
}
- if (populate_freeable_maps && priv) {
- mutex_lock(&priv->lock);
- list_del(&map->next);
- mutex_unlock(&priv->lock);
- }
-
if (map->pages && !use_ptemod)
unmap_grant_pages(map, 0, map->count);
gntdev_free_map(map);
@@ -444,16 +437,9 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
pr_debug("gntdev_vma_close %p\n", vma);
if (use_ptemod) {
- /* It is possible that an mmu notifier could be running
- * concurrently, so take priv->lock to ensure that the vma won't
- * vanishing during the unmap_grant_pages call, since we will
- * spin here until that completes. Such a concurrent call will
- * not do any unmapping, since that has been done prior to
- * closing the vma, but it may still iterate the unmap_ops list.
- */
- mutex_lock(&priv->lock);
+ WARN_ON(map->vma != vma);
+ mmu_interval_notifier_remove(&map->notifier);
map->vma = NULL;
- mutex_unlock(&priv->lock);
}
vma->vm_private_data = NULL;
gntdev_put_map(priv, map);
@@ -475,109 +461,44 @@ static const struct vm_operations_struct gntdev_vmops = {
/* ------------------------------------------------------------------ */
-static bool in_range(struct gntdev_grant_map *map,
- unsigned long start, unsigned long end)
-{
- if (!map->vma)
- return false;
- if (map->vma->vm_start >= end)
- return false;
- if (map->vma->vm_end <= start)
- return false;
-
- return true;
-}
-
-static int unmap_if_in_range(struct gntdev_grant_map *map,
- unsigned long start, unsigned long end,
- bool blockable)
+static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
+ struct gntdev_grant_map *map =
+ container_of(mn, struct gntdev_grant_map, notifier);
unsigned long mstart, mend;
int err;
- if (!in_range(map, start, end))
- return 0;
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- if (!blockable)
- return -EAGAIN;
+ /*
+ * If the VMA is split or otherwise changed the notifier is not
+ * updated, but we don't want to process VA's outside the modified
+ * VMA. FIXME: It would be much more understandable to just prevent
+ * modifying the VMA in the first place.
+ */
+ if (map->vma->vm_start >= range->end ||
+ map->vma->vm_end <= range->start)
+ return true;
- mstart = max(start, map->vma->vm_start);
- mend = min(end, map->vma->vm_end);
+ mstart = max(range->start, map->vma->vm_start);
+ mend = min(range->end, map->vma->vm_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
map->index, map->count,
map->vma->vm_start, map->vma->vm_end,
- start, end, mstart, mend);
+ range->start, range->end, mstart, mend);
err = unmap_grant_pages(map,
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
WARN_ON(err);
- return 0;
-}
-
-static int mn_invl_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct gntdev_grant_map *map;
- int ret = 0;
-
- if (mmu_notifier_range_blockable(range))
- mutex_lock(&priv->lock);
- else if (!mutex_trylock(&priv->lock))
- return -EAGAIN;
-
- list_for_each_entry(map, &priv->maps, next) {
- ret = unmap_if_in_range(map, range->start, range->end,
- mmu_notifier_range_blockable(range));
- if (ret)
- goto out_unlock;
- }
- list_for_each_entry(map, &priv->freeable_maps, next) {
- ret = unmap_if_in_range(map, range->start, range->end,
- mmu_notifier_range_blockable(range));
- if (ret)
- goto out_unlock;
- }
-
-out_unlock:
- mutex_unlock(&priv->lock);
-
- return ret;
-}
-
-static void mn_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct gntdev_grant_map *map;
- int err;
-
- mutex_lock(&priv->lock);
- list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- pr_debug("map %d+%d (%lx %lx)\n",
- map->index, map->count,
- map->vma->vm_start, map->vma->vm_end);
- err = unmap_grant_pages(map, /* offset */ 0, map->count);
- WARN_ON(err);
- }
- list_for_each_entry(map, &priv->freeable_maps, next) {
- if (!map->vma)
- continue;
- pr_debug("map %d+%d (%lx %lx)\n",
- map->index, map->count,
- map->vma->vm_start, map->vma->vm_end);
- err = unmap_grant_pages(map, /* offset */ 0, map->count);
- WARN_ON(err);
- }
- mutex_unlock(&priv->lock);
+ return true;
}
-static const struct mmu_notifier_ops gntdev_mmu_ops = {
- .release = mn_release,
- .invalidate_range_start = mn_invl_range_start,
+static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
+ .invalidate = gntdev_invalidate,
};
/* ------------------------------------------------------------------ */
@@ -592,7 +513,6 @@ static int gntdev_open(struct inode *inode, struct file *flip)
return -ENOMEM;
INIT_LIST_HEAD(&priv->maps);
- INIT_LIST_HEAD(&priv->freeable_maps);
mutex_init(&priv->lock);
#ifdef CONFIG_XEN_GNTDEV_DMABUF
@@ -604,17 +524,6 @@ static int gntdev_open(struct inode *inode, struct file *flip)
}
#endif
- if (use_ptemod) {
- priv->mm = get_task_mm(current);
- if (!priv->mm) {
- kfree(priv);
- return -ENOMEM;
- }
- priv->mn.ops = &gntdev_mmu_ops;
- ret = mmu_notifier_register(&priv->mn, priv->mm);
- mmput(priv->mm);
- }
-
if (ret) {
kfree(priv);
return ret;
@@ -644,16 +553,12 @@ static int gntdev_release(struct inode *inode, struct file *flip)
list_del(&map->next);
gntdev_put_map(NULL /* already removed */, map);
}
- WARN_ON(!list_empty(&priv->freeable_maps));
mutex_unlock(&priv->lock);
#ifdef CONFIG_XEN_GNTDEV_DMABUF
gntdev_dmabuf_fini(priv->dmabuf_priv);
#endif
- if (use_ptemod)
- mmu_notifier_unregister(&priv->mn, priv->mm);
-
kfree(priv);
return 0;
}
@@ -714,8 +619,6 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
if (map) {
list_del(&map->next);
- if (populate_freeable_maps)
- list_add_tail(&map->next, &priv->freeable_maps);
err = 0;
}
mutex_unlock(&priv->lock);
@@ -1087,11 +990,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto unlock_out;
if (use_ptemod && map->vma)
goto unlock_out;
- if (use_ptemod && priv->mm != vma->vm_mm) {
- pr_warn("Huh? Other mm?\n");
- goto unlock_out;
- }
-
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
@@ -1102,10 +1000,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vma->vm_flags |= VM_DONTCOPY;
vma->vm_private_data = map;
-
- if (use_ptemod)
- map->vma = vma;
-
if (map->flags) {
if ((vma->vm_flags & VM_WRITE) &&
(map->flags & GNTMAP_readonly))
@@ -1116,8 +1010,28 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map->flags |= GNTMAP_readonly;
}
+ if (use_ptemod) {
+ map->vma = vma;
+ err = mmu_interval_notifier_insert_locked(
+ &map->notifier, vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
+ if (err)
+ goto out_unlock_put;
+ }
mutex_unlock(&priv->lock);
+ /*
+ * gntdev takes the address of the PTE in find_grant_ptes() and passes
+ * it to the hypervisor in gntdev_map_grant_pages(). The purpose of
+ * the notifier is to prevent the hypervisor pointer to the PTE from
+ * going stale.
+ *
+ * Since this vma's mappings can't be touched without the mmap_sem,
+ * and we are holding it now, there is no need for the notifier_range
+ * locking pattern.
+ */
+ mmu_interval_read_begin(&map->notifier);
+
if (use_ptemod) {
map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
@@ -1166,8 +1080,11 @@ out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
if (use_ptemod) {
- map->vma = NULL;
unmap_grant_pages(map, 0, map->count);
+ if (map->vma) {
+ mmu_interval_notifier_remove(&map->notifier);
+ map->vma = NULL;
+ }
}
gntdev_put_map(priv, map);
return err;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 5e30602fdbad..59e85e408c23 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -74,7 +74,7 @@ static int xen_allocate_irq(struct pci_dev *pdev)
"xen-platform-pci", pdev);
}
-static int platform_pci_resume(struct pci_dev *pdev)
+static int platform_pci_resume(struct device *dev)
{
int err;
@@ -83,7 +83,7 @@ static int platform_pci_resume(struct pci_dev *pdev)
err = xen_set_callback_via(callback_via);
if (err) {
- dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+ dev_err(dev, "platform_pci_resume failure!\n");
return err;
}
return 0;
@@ -168,13 +168,17 @@ static const struct pci_device_id platform_pci_tbl[] = {
{0,}
};
+static struct dev_pm_ops platform_pm_ops = {
+ .resume_noirq = platform_pci_resume,
+};
+
static struct pci_driver platform_driver = {
.name = DRV_NAME,
.probe = platform_pci_probe,
.id_table = platform_pci_tbl,
-#ifdef CONFIG_PM
- .resume_early = platform_pci_resume,
-#endif
+ .driver = {
+ .pm = &platform_pm_ops,
+ },
};
builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index bd3a10dfac15..b6d27762c6f8 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -375,7 +375,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size) &&
+ if (dma_capable(dev, dev_addr, size, true) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
swiotlb_force != SWIOTLB_FORCE)
@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (unlikely(!dma_capable(dev, dev_addr, size))) {
+ if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
swiotlb_tbl_unmap_single(dev, map, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
@@ -405,7 +405,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
+ xen_dma_sync_for_device(dev_addr, phys, size, dir);
return dev_addr;
}
@@ -425,7 +425,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
+ xen_dma_sync_for_cpu(dev_addr, paddr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
@@ -439,7 +439,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
phys_addr_t paddr = xen_bus_to_phys(dma_addr);
if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
+ xen_dma_sync_for_cpu(dma_addr, paddr, size, dir);
if (is_xen_swiotlb_buffer(dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
@@ -455,7 +455,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
+ xen_dma_sync_for_device(dma_addr, paddr, size, dir);
}
/*